Web   ·   Wiki   ·   Activities   ·   Blog   ·   Lists   ·   Chat   ·   Meeting   ·   Bugs   ·   Git   ·   Translate   ·   Archive   ·   People   ·   Donate
summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Silva <sebastian@sugarlabs.org>2011-12-02 20:57:56 (GMT)
committer Sebastian Silva <sebastian@sugarlabs.org>2011-12-02 20:57:56 (GMT)
commit39e7a3959f9bc95bc191ea1ac1ea06db63bf592a (patch)
tree57d3d7c2db5d1177b861074be37e89c66f864c63
parentda635c3c97fd93af408c795cdf27adf306604ff1 (diff)
change mercurial version
-rw-r--r--[l---------]websdk/mercurial/__version__.py3
-rw-r--r--[l---------]websdk/mercurial/ancestor.py92
-rw-r--r--[l---------]websdk/mercurial/archival.py285
-rw-r--r--websdk/mercurial/bookmarks.py213
-rw-r--r--[l---------]websdk/mercurial/bundlerepo.py364
-rw-r--r--[l---------]websdk/mercurial/byterange.py463
-rw-r--r--[l---------]websdk/mercurial/changegroup.py257
-rw-r--r--[l---------]websdk/mercurial/changelog.py245
-rw-r--r--[l---------]websdk/mercurial/cmdutil.py1272
-rw-r--r--[l---------]websdk/mercurial/commands.py5673
-rw-r--r--websdk/mercurial/commandserver.py237
-rw-r--r--[l---------]websdk/mercurial/config.py150
-rw-r--r--[l---------]websdk/mercurial/context.py1138
-rw-r--r--[l---------]websdk/mercurial/copies.py268
-rw-r--r--[l---------]websdk/mercurial/dagparser.py475
-rw-r--r--websdk/mercurial/dagutil.py277
-rw-r--r--[l---------]websdk/mercurial/demandimport.py156
-rw-r--r--[l---------]websdk/mercurial/dirstate.py726
-rw-r--r--[l---------]websdk/mercurial/discovery.py193
-rw-r--r--[l---------]websdk/mercurial/dispatch.py739
-rw-r--r--[l---------]websdk/mercurial/encoding.py174
-rw-r--r--[l---------]websdk/mercurial/error.py91
-rw-r--r--[l---------]websdk/mercurial/extensions.py356
-rw-r--r--[l---------]websdk/mercurial/fancyopts.py118
-rw-r--r--[l---------]websdk/mercurial/filelog.py93
-rw-r--r--[l---------]websdk/mercurial/filemerge.py272
-rw-r--r--websdk/mercurial/fileset.py440
-rw-r--r--[l---------]websdk/mercurial/graphmod.py140
-rw-r--r--[l---------]websdk/mercurial/hbisect.py259
-rw-r--r--[l---------]websdk/mercurial/help.py110
-rw-r--r--websdk/mercurial/help/config.txt1312
-rw-r--r--websdk/mercurial/help/dates.txt36
-rw-r--r--websdk/mercurial/help/diffs.txt29
-rw-r--r--websdk/mercurial/help/environment.txt101
-rw-r--r--websdk/mercurial/help/extensions.txt33
-rw-r--r--websdk/mercurial/help/filesets.txt65
-rw-r--r--websdk/mercurial/help/glossary.txt386
-rw-r--r--websdk/mercurial/help/hgignore.txt80
-rw-r--r--websdk/mercurial/help/hgweb.txt47
-rw-r--r--websdk/mercurial/help/merge-tools.txt110
-rw-r--r--websdk/mercurial/help/multirevs.txt13
-rw-r--r--websdk/mercurial/help/patterns.txt53
-rw-r--r--websdk/mercurial/help/revisions.txt29
-rw-r--r--websdk/mercurial/help/revsets.txt122
-rw-r--r--websdk/mercurial/help/subrepos.txt135
-rw-r--r--websdk/mercurial/help/templates.txt40
-rw-r--r--websdk/mercurial/help/urls.txt66
-rw-r--r--[l---------]websdk/mercurial/hg.py582
-rw-r--r--[l---------]websdk/mercurial/hgweb/__init__.py32
-rw-r--r--[l---------]websdk/mercurial/hgweb/common.py187
-rw-r--r--[l---------]websdk/mercurial/hgweb/hgweb_mod.py306
-rw-r--r--[l---------]websdk/mercurial/hgweb/hgwebdir_mod.py400
-rw-r--r--[l---------]websdk/mercurial/hgweb/protocol.py98
-rw-r--r--[l---------]websdk/mercurial/hgweb/request.py148
-rw-r--r--[l---------]websdk/mercurial/hgweb/server.py321
-rw-r--r--[l---------]websdk/mercurial/hgweb/webcommands.py843
-rw-r--r--[l---------]websdk/mercurial/hgweb/webutil.py270
-rw-r--r--[l---------]websdk/mercurial/hgweb/wsgicgi.py82
-rw-r--r--[l---------]websdk/mercurial/hook.py174
-rw-r--r--websdk/mercurial/httpclient/__init__.py711
-rw-r--r--websdk/mercurial/httpclient/socketutil.py127
-rw-r--r--websdk/mercurial/httpclient/tests/__init__.py1
-rw-r--r--websdk/mercurial/httpclient/tests/simple_http_test.py401
-rw-r--r--websdk/mercurial/httpclient/tests/test_bogus_responses.py68
-rw-r--r--websdk/mercurial/httpclient/tests/test_chunked_transfer.py153
-rw-r--r--websdk/mercurial/httpclient/tests/test_proxy_support.py135
-rw-r--r--websdk/mercurial/httpclient/tests/test_ssl.py93
-rw-r--r--websdk/mercurial/httpclient/tests/util.py195
-rw-r--r--websdk/mercurial/httpconnection.py284
-rw-r--r--[l---------]websdk/mercurial/httprepo.py248
-rw-r--r--[l---------]websdk/mercurial/i18n.py64
-rw-r--r--[l---------]websdk/mercurial/ignore.py106
-rw-r--r--[l---------]websdk/mercurial/keepalive.py767
-rw-r--r--websdk/mercurial/locale/da/LC_MESSAGES/hg.mobin0 -> 229004 bytes
-rw-r--r--websdk/mercurial/locale/de/LC_MESSAGES/hg.mobin0 -> 360765 bytes
-rw-r--r--websdk/mercurial/locale/el/LC_MESSAGES/hg.mobin0 -> 44585 bytes
-rw-r--r--websdk/mercurial/locale/fr/LC_MESSAGES/hg.mobin0 -> 42392 bytes
-rw-r--r--websdk/mercurial/locale/it/LC_MESSAGES/hg.mobin0 -> 205979 bytes
-rw-r--r--websdk/mercurial/locale/ja/LC_MESSAGES/hg.mobin0 -> 596022 bytes
-rw-r--r--websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mobin0 -> 747919 bytes
-rw-r--r--websdk/mercurial/locale/ro/LC_MESSAGES/hg.mobin0 -> 93643 bytes
-rw-r--r--websdk/mercurial/locale/ru/LC_MESSAGES/hg.mobin0 -> 499849 bytes
-rw-r--r--websdk/mercurial/locale/sv/LC_MESSAGES/hg.mobin0 -> 211916 bytes
-rw-r--r--websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mobin0 -> 27190 bytes
-rw-r--r--websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mobin0 -> 52870 bytes
-rw-r--r--[l---------]websdk/mercurial/localrepo.py2102
-rw-r--r--[l---------]websdk/mercurial/lock.py138
-rw-r--r--[l---------]websdk/mercurial/lsprof.py110
-rw-r--r--[l---------]websdk/mercurial/lsprofcalltree.py87
-rw-r--r--[l---------]websdk/mercurial/mail.py234
-rw-r--r--[l---------]websdk/mercurial/manifest.py205
-rw-r--r--[l---------]websdk/mercurial/match.py338
-rw-r--r--[l---------]websdk/mercurial/mdiff.py288
-rw-r--r--[l---------]websdk/mercurial/merge.py575
-rw-r--r--[l---------]websdk/mercurial/minirst.py688
-rw-r--r--[l---------]websdk/mercurial/node.py19
-rw-r--r--[l---------]websdk/mercurial/parser.py96
-rw-r--r--[l---------]websdk/mercurial/patch.py1870
-rw-r--r--[l---------]websdk/mercurial/posix.py417
-rw-r--r--[l---------]websdk/mercurial/pushkey.py35
-rw-r--r--websdk/mercurial/py3kcompat.py72
-rw-r--r--[l---------]websdk/mercurial/repair.py159
-rw-r--r--[l---------]websdk/mercurial/repo.py41
-rw-r--r--[l---------]websdk/mercurial/revlog.py1281
-rw-r--r--[l---------]websdk/mercurial/revset.py1142
-rw-r--r--websdk/mercurial/scmutil.py803
-rw-r--r--websdk/mercurial/setdiscovery.py194
-rw-r--r--[l---------]websdk/mercurial/similar.py104
-rw-r--r--[l---------]websdk/mercurial/simplemerge.py454
-rw-r--r--[l---------]websdk/mercurial/sshrepo.py226
-rw-r--r--[l---------]websdk/mercurial/sshserver.py152
-rw-r--r--websdk/mercurial/sslutil.py130
-rw-r--r--[l---------]websdk/mercurial/statichttprepo.py139
-rw-r--r--[l---------]websdk/mercurial/store.py428
-rw-r--r--[l---------]websdk/mercurial/strutil.py35
-rw-r--r--[l---------]websdk/mercurial/subrepo.py1127
-rw-r--r--[l---------]websdk/mercurial/tags.py293
-rw-r--r--[l---------]websdk/mercurial/templatefilters.py373
-rw-r--r--[l---------]websdk/mercurial/templatekw.py321
-rw-r--r--[l---------]websdk/mercurial/templater.py393
-rw-r--r--websdk/mercurial/templates/atom/bookmarkentry.tmpl8
-rw-r--r--websdk/mercurial/templates/atom/bookmarks.tmpl11
-rw-r--r--websdk/mercurial/templates/atom/changelog.tmpl10
-rw-r--r--websdk/mercurial/templates/atom/changelogentry.tmpl16
-rw-r--r--websdk/mercurial/templates/atom/error.tmpl17
-rw-r--r--websdk/mercurial/templates/atom/filelog.tmpl8
-rw-r--r--websdk/mercurial/templates/atom/header.tmpl2
-rw-r--r--websdk/mercurial/templates/atom/map13
-rw-r--r--websdk/mercurial/templates/atom/tagentry.tmpl8
-rw-r--r--websdk/mercurial/templates/atom/tags.tmpl11
-rw-r--r--websdk/mercurial/templates/coal/header.tmpl7
-rw-r--r--websdk/mercurial/templates/coal/map214
-rw-r--r--websdk/mercurial/templates/gitweb/bookmarks.tmpl32
-rw-r--r--websdk/mercurial/templates/gitweb/branches.tmpl32
-rw-r--r--websdk/mercurial/templates/gitweb/changelog.tmpl41
-rw-r--r--websdk/mercurial/templates/gitweb/changelogentry.tmpl14
-rw-r--r--websdk/mercurial/templates/gitweb/changeset.tmpl53
-rw-r--r--websdk/mercurial/templates/gitweb/error.tmpl33
-rw-r--r--websdk/mercurial/templates/gitweb/fileannotate.tmpl65
-rw-r--r--websdk/mercurial/templates/gitweb/filediff.tmpl50
-rw-r--r--websdk/mercurial/templates/gitweb/filelog.tmpl42
-rw-r--r--websdk/mercurial/templates/gitweb/filerevision.tmpl64
-rw-r--r--websdk/mercurial/templates/gitweb/footer.tmpl12
-rw-r--r--websdk/mercurial/templates/gitweb/graph.tmpl128
-rw-r--r--websdk/mercurial/templates/gitweb/header.tmpl8
-rw-r--r--websdk/mercurial/templates/gitweb/help.tmpl33
-rw-r--r--websdk/mercurial/templates/gitweb/helptopics.tmpl39
-rw-r--r--websdk/mercurial/templates/gitweb/index.tmpl26
-rw-r--r--websdk/mercurial/templates/gitweb/manifest.tmpl41
-rw-r--r--websdk/mercurial/templates/gitweb/map272
-rw-r--r--websdk/mercurial/templates/gitweb/notfound.tmpl18
-rw-r--r--websdk/mercurial/templates/gitweb/search.tmpl39
-rw-r--r--websdk/mercurial/templates/gitweb/shortlog.tmpl42
-rw-r--r--websdk/mercurial/templates/gitweb/summary.tmpl66
-rw-r--r--websdk/mercurial/templates/gitweb/tags.tmpl32
-rw-r--r--websdk/mercurial/templates/map-cmdline.bisect25
-rw-r--r--websdk/mercurial/templates/map-cmdline.changelog17
-rw-r--r--websdk/mercurial/templates/map-cmdline.compact12
-rw-r--r--websdk/mercurial/templates/map-cmdline.default25
-rw-r--r--websdk/mercurial/templates/map-cmdline.xml20
-rw-r--r--websdk/mercurial/templates/monoblue/bookmarks.tmpl38
-rw-r--r--websdk/mercurial/templates/monoblue/branches.tmpl38
-rw-r--r--websdk/mercurial/templates/monoblue/changelog.tmpl42
-rw-r--r--websdk/mercurial/templates/monoblue/changelogentry.tmpl6
-rw-r--r--websdk/mercurial/templates/monoblue/changeset.tmpl65
-rw-r--r--websdk/mercurial/templates/monoblue/error.tmpl36
-rw-r--r--websdk/mercurial/templates/monoblue/fileannotate.tmpl65
-rw-r--r--websdk/mercurial/templates/monoblue/filediff.tmpl56
-rw-r--r--websdk/mercurial/templates/monoblue/filelog.tmpl51
-rw-r--r--websdk/mercurial/templates/monoblue/filerevision.tmpl65
-rw-r--r--websdk/mercurial/templates/monoblue/footer.tmpl23
-rw-r--r--websdk/mercurial/templates/monoblue/graph.tmpl125
-rw-r--r--websdk/mercurial/templates/monoblue/header.tmpl7
-rw-r--r--websdk/mercurial/templates/monoblue/help.tmpl38
-rw-r--r--websdk/mercurial/templates/monoblue/helptopics.tmpl45
-rw-r--r--websdk/mercurial/templates/monoblue/index.tmpl39
-rw-r--r--websdk/mercurial/templates/monoblue/manifest.tmpl53
-rw-r--r--websdk/mercurial/templates/monoblue/map240
-rw-r--r--websdk/mercurial/templates/monoblue/notfound.tmpl37
-rw-r--r--websdk/mercurial/templates/monoblue/search.tmpl36
-rw-r--r--websdk/mercurial/templates/monoblue/shortlog.tmpl44
-rw-r--r--websdk/mercurial/templates/monoblue/summary.tmpl76
-rw-r--r--websdk/mercurial/templates/monoblue/tags.tmpl38
-rw-r--r--websdk/mercurial/templates/paper/bookmarks.tmpl49
-rw-r--r--websdk/mercurial/templates/paper/branches.tmpl60
-rw-r--r--websdk/mercurial/templates/paper/changeset.tmpl87
-rw-r--r--websdk/mercurial/templates/paper/diffstat.tmpl8
-rw-r--r--websdk/mercurial/templates/paper/error.tmpl45
-rw-r--r--websdk/mercurial/templates/paper/fileannotate.tmpl82
-rw-r--r--websdk/mercurial/templates/paper/filediff.tmpl77
-rw-r--r--websdk/mercurial/templates/paper/filelog.tmpl73
-rw-r--r--websdk/mercurial/templates/paper/filelogentry.tmpl5
-rw-r--r--websdk/mercurial/templates/paper/filerevision.tmpl76
-rw-r--r--websdk/mercurial/templates/paper/footer.tmpl5
-rw-r--r--websdk/mercurial/templates/paper/graph.tmpl141
-rw-r--r--websdk/mercurial/templates/paper/header.tmpl7
-rw-r--r--websdk/mercurial/templates/paper/help.tmpl44
-rw-r--r--websdk/mercurial/templates/paper/helptopics.tmpl49
-rw-r--r--websdk/mercurial/templates/paper/index.tmpl26
-rw-r--r--websdk/mercurial/templates/paper/manifest.tmpl58
-rw-r--r--websdk/mercurial/templates/paper/map213
-rw-r--r--websdk/mercurial/templates/paper/notfound.tmpl12
-rw-r--r--websdk/mercurial/templates/paper/search.tmpl55
-rw-r--r--websdk/mercurial/templates/paper/shortlog.tmpl70
-rw-r--r--websdk/mercurial/templates/paper/shortlogentry.tmpl5
-rw-r--r--websdk/mercurial/templates/paper/tags.tmpl49
-rw-r--r--websdk/mercurial/templates/raw/changeset.tmpl9
-rw-r--r--websdk/mercurial/templates/raw/error.tmpl2
-rw-r--r--websdk/mercurial/templates/raw/fileannotate.tmpl5
-rw-r--r--websdk/mercurial/templates/raw/filediff.tmpl5
-rw-r--r--websdk/mercurial/templates/raw/index.tmpl2
-rw-r--r--websdk/mercurial/templates/raw/manifest.tmpl3
-rw-r--r--websdk/mercurial/templates/raw/map30
-rw-r--r--websdk/mercurial/templates/raw/notfound.tmpl2
-rw-r--r--websdk/mercurial/templates/rss/bookmarkentry.tmpl6
-rw-r--r--websdk/mercurial/templates/rss/bookmarks.tmpl6
-rw-r--r--websdk/mercurial/templates/rss/changelog.tmpl6
-rw-r--r--websdk/mercurial/templates/rss/changelogentry.tmpl7
-rw-r--r--websdk/mercurial/templates/rss/error.tmpl10
-rw-r--r--websdk/mercurial/templates/rss/filelog.tmpl6
-rw-r--r--websdk/mercurial/templates/rss/filelogentry.tmpl7
-rw-r--r--websdk/mercurial/templates/rss/header.tmpl5
-rw-r--r--websdk/mercurial/templates/rss/map12
-rw-r--r--websdk/mercurial/templates/rss/tagentry.tmpl6
-rw-r--r--websdk/mercurial/templates/rss/tags.tmpl6
-rw-r--r--websdk/mercurial/templates/spartan/branches.tmpl27
-rw-r--r--websdk/mercurial/templates/spartan/changelog.tmpl44
-rw-r--r--websdk/mercurial/templates/spartan/changelogentry.tmpl25
-rw-r--r--websdk/mercurial/templates/spartan/changeset.tmpl52
-rw-r--r--websdk/mercurial/templates/spartan/error.tmpl15
-rw-r--r--websdk/mercurial/templates/spartan/fileannotate.tmpl49
-rw-r--r--websdk/mercurial/templates/spartan/filediff.tmpl37
-rw-r--r--websdk/mercurial/templates/spartan/filelog.tmpl29
-rw-r--r--websdk/mercurial/templates/spartan/filelogentry.tmpl25
-rw-r--r--websdk/mercurial/templates/spartan/filerevision.tmpl47
-rw-r--r--websdk/mercurial/templates/spartan/footer.tmpl9
-rw-r--r--websdk/mercurial/templates/spartan/graph.tmpl96
-rw-r--r--websdk/mercurial/templates/spartan/header.tmpl7
-rw-r--r--websdk/mercurial/templates/spartan/index.tmpl19
-rw-r--r--websdk/mercurial/templates/spartan/manifest.tmpl29
-rw-r--r--websdk/mercurial/templates/spartan/map183
-rw-r--r--websdk/mercurial/templates/spartan/notfound.tmpl12
-rw-r--r--websdk/mercurial/templates/spartan/search.tmpl37
-rw-r--r--websdk/mercurial/templates/spartan/shortlog.tmpl44
-rw-r--r--websdk/mercurial/templates/spartan/shortlogentry.tmpl7
-rw-r--r--websdk/mercurial/templates/spartan/tags.tmpl27
-rw-r--r--websdk/mercurial/templates/static/background.pngbin0 -> 603 bytes
-rw-r--r--websdk/mercurial/templates/static/coal-file.pngbin0 -> 273 bytes
-rw-r--r--websdk/mercurial/templates/static/coal-folder.pngbin0 -> 284 bytes
-rw-r--r--websdk/mercurial/templates/static/excanvas.js19
-rw-r--r--websdk/mercurial/templates/static/hgicon.pngbin0 -> 792 bytes
-rw-r--r--websdk/mercurial/templates/static/hglogo.pngbin0 -> 4123 bytes
-rw-r--r--websdk/mercurial/templates/static/mercurial.js249
-rw-r--r--websdk/mercurial/templates/static/style-coal.css288
-rw-r--r--websdk/mercurial/templates/static/style-gitweb.css128
-rw-r--r--websdk/mercurial/templates/static/style-monoblue.css479
-rw-r--r--websdk/mercurial/templates/static/style-paper.css277
-rw-r--r--websdk/mercurial/templates/static/style.css105
-rw-r--r--websdk/mercurial/templates/template-vars.txt42
-rw-r--r--[l---------]websdk/mercurial/transaction.py184
-rw-r--r--websdk/mercurial/treediscovery.py150
-rw-r--r--[l---------]websdk/mercurial/ui.py735
-rw-r--r--[l---------]websdk/mercurial/url.py474
-rw-r--r--[l---------]websdk/mercurial/util.py1742
-rw-r--r--[l---------]websdk/mercurial/verify.py304
-rw-r--r--[l---------]websdk/mercurial/win32.py453
-rw-r--r--[l---------]websdk/mercurial/windows.py317
-rw-r--r--websdk/mercurial/wireproto.py607
268 files changed, 51394 insertions, 80 deletions
diff --git a/websdk/mercurial/__version__.py b/websdk/mercurial/__version__.py
index 02cef30..bc32403 120000..100644
--- a/websdk/mercurial/__version__.py
+++ b/websdk/mercurial/__version__.py
@@ -1 +1,2 @@
-/usr/share/pyshared/mercurial/__version__.py \ No newline at end of file
+# this file is autogenerated by setup.py
+version = "2.0.1"
diff --git a/websdk/mercurial/ancestor.py b/websdk/mercurial/ancestor.py
index 5be3ec0..867d683 120000..100644
--- a/websdk/mercurial/ancestor.py
+++ b/websdk/mercurial/ancestor.py
@@ -1 +1,91 @@
-/usr/share/pyshared/mercurial/ancestor.py \ No newline at end of file
+# ancestor.py - generic DAG ancestor algorithm for mercurial
+#
+# Copyright 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import heapq
+
+def ancestor(a, b, pfunc):
+ """
+ Returns the common ancestor of a and b that is furthest from a
+ root (as measured by longest path) or None if no ancestor is
+ found. If there are multiple common ancestors at the same
+ distance, the first one found is returned.
+
+ pfunc must return a list of parent vertices for a given vertex
+ """
+
+ if a == b:
+ return a
+
+ a, b = sorted([a, b])
+
+ # find depth from root of all ancestors
+ # depth is stored as a negative for heapq
+ parentcache = {}
+ visit = [a, b]
+ depth = {}
+ while visit:
+ vertex = visit[-1]
+ pl = pfunc(vertex)
+ parentcache[vertex] = pl
+ if not pl:
+ depth[vertex] = 0
+ visit.pop()
+ else:
+ for p in pl:
+ if p == a or p == b: # did we find a or b as a parent?
+ return p # we're done
+ if p not in depth:
+ visit.append(p)
+ if visit[-1] == vertex:
+ # -(maximum distance of parents + 1)
+ depth[vertex] = min([depth[p] for p in pl]) - 1
+ visit.pop()
+
+ # traverse ancestors in order of decreasing distance from root
+ def ancestors(vertex):
+ h = [(depth[vertex], vertex)]
+ seen = set()
+ while h:
+ d, n = heapq.heappop(h)
+ if n not in seen:
+ seen.add(n)
+ yield (d, n)
+ for p in parentcache[n]:
+ heapq.heappush(h, (depth[p], p))
+
+ def generations(vertex):
+ sg, s = None, set()
+ for g, v in ancestors(vertex):
+ if g != sg:
+ if sg:
+ yield sg, s
+ sg, s = g, set((v,))
+ else:
+ s.add(v)
+ yield sg, s
+
+ x = generations(a)
+ y = generations(b)
+ gx = x.next()
+ gy = y.next()
+
+ # increment each ancestor list until it is closer to root than
+ # the other, or they match
+ try:
+ while True:
+ if gx[0] == gy[0]:
+ for v in gx[1]:
+ if v in gy[1]:
+ return v
+ gy = y.next()
+ gx = x.next()
+ elif gx[0] > gy[0]:
+ gy = y.next()
+ else:
+ gx = x.next()
+ except StopIteration:
+ return None
diff --git a/websdk/mercurial/archival.py b/websdk/mercurial/archival.py
index d99f47f..a7c271d 120000..100644
--- a/websdk/mercurial/archival.py
+++ b/websdk/mercurial/archival.py
@@ -1 +1,284 @@
-/usr/share/pyshared/mercurial/archival.py \ No newline at end of file
+# archival.py - revision archival for mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+from node import hex
+import cmdutil
+import scmutil, util, encoding
+import cStringIO, os, tarfile, time, zipfile
+import zlib, gzip
+
+def tidyprefix(dest, kind, prefix):
+ '''choose prefix to use for names in archive. make sure prefix is
+ safe for consumers.'''
+
+ if prefix:
+ prefix = util.normpath(prefix)
+ else:
+ if not isinstance(dest, str):
+ raise ValueError('dest must be string if no prefix')
+ prefix = os.path.basename(dest)
+ lower = prefix.lower()
+ for sfx in exts.get(kind, []):
+ if lower.endswith(sfx):
+ prefix = prefix[:-len(sfx)]
+ break
+ lpfx = os.path.normpath(util.localpath(prefix))
+ prefix = util.pconvert(lpfx)
+ if not prefix.endswith('/'):
+ prefix += '/'
+ if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
+ raise util.Abort(_('archive prefix contains illegal components'))
+ return prefix
+
+exts = {
+ 'tar': ['.tar'],
+ 'tbz2': ['.tbz2', '.tar.bz2'],
+ 'tgz': ['.tgz', '.tar.gz'],
+ 'zip': ['.zip'],
+ }
+
+def guesskind(dest):
+ for kind, extensions in exts.iteritems():
+ if util.any(dest.endswith(ext) for ext in extensions):
+ return kind
+ return None
+
+
+class tarit(object):
+ '''write archive to tar file or stream. can write uncompressed,
+ or compress with gzip or bzip2.'''
+
+ class GzipFileWithTime(gzip.GzipFile):
+
+ def __init__(self, *args, **kw):
+ timestamp = None
+ if 'timestamp' in kw:
+ timestamp = kw.pop('timestamp')
+ if timestamp is None:
+ self.timestamp = time.time()
+ else:
+ self.timestamp = timestamp
+ gzip.GzipFile.__init__(self, *args, **kw)
+
+ def _write_gzip_header(self):
+ self.fileobj.write('\037\213') # magic header
+ self.fileobj.write('\010') # compression method
+ # Python 2.6 deprecates self.filename
+ fname = getattr(self, 'name', None) or self.filename
+ if fname and fname.endswith('.gz'):
+ fname = fname[:-3]
+ flags = 0
+ if fname:
+ flags = gzip.FNAME
+ self.fileobj.write(chr(flags))
+ gzip.write32u(self.fileobj, long(self.timestamp))
+ self.fileobj.write('\002')
+ self.fileobj.write('\377')
+ if fname:
+ self.fileobj.write(fname + '\000')
+
+ def __init__(self, dest, mtime, kind=''):
+ self.mtime = mtime
+ self.fileobj = None
+
+ def taropen(name, mode, fileobj=None):
+ if kind == 'gz':
+ mode = mode[0]
+ if not fileobj:
+ fileobj = open(name, mode + 'b')
+ gzfileobj = self.GzipFileWithTime(name, mode + 'b',
+ zlib.Z_BEST_COMPRESSION,
+ fileobj, timestamp=mtime)
+ self.fileobj = gzfileobj
+ return tarfile.TarFile.taropen(name, mode, gzfileobj)
+ else:
+ self.fileobj = fileobj
+ return tarfile.open(name, mode + kind, fileobj)
+
+ if isinstance(dest, str):
+ self.z = taropen(dest, mode='w:')
+ else:
+ # Python 2.5-2.5.1 have a regression that requires a name arg
+ self.z = taropen(name='', mode='w|', fileobj=dest)
+
+ def addfile(self, name, mode, islink, data):
+ i = tarfile.TarInfo(name)
+ i.mtime = self.mtime
+ i.size = len(data)
+ if islink:
+ i.type = tarfile.SYMTYPE
+ i.mode = 0777
+ i.linkname = data
+ data = None
+ i.size = 0
+ else:
+ i.mode = mode
+ data = cStringIO.StringIO(data)
+ self.z.addfile(i, data)
+
+ def done(self):
+ self.z.close()
+ if self.fileobj:
+ self.fileobj.close()
+
+class tellable(object):
+ '''provide tell method for zipfile.ZipFile when writing to http
+ response file object.'''
+
+ def __init__(self, fp):
+ self.fp = fp
+ self.offset = 0
+
+ def __getattr__(self, key):
+ return getattr(self.fp, key)
+
+ def write(self, s):
+ self.fp.write(s)
+ self.offset += len(s)
+
+ def tell(self):
+ return self.offset
+
+class zipit(object):
+ '''write archive to zip file or stream. can write uncompressed,
+ or compressed with deflate.'''
+
+ def __init__(self, dest, mtime, compress=True):
+ if not isinstance(dest, str):
+ try:
+ dest.tell()
+ except (AttributeError, IOError):
+ dest = tellable(dest)
+ self.z = zipfile.ZipFile(dest, 'w',
+ compress and zipfile.ZIP_DEFLATED or
+ zipfile.ZIP_STORED)
+
+ # Python's zipfile module emits deprecation warnings if we try
+ # to store files with a date before 1980.
+ epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
+ if mtime < epoch:
+ mtime = epoch
+
+ self.date_time = time.gmtime(mtime)[:6]
+
+ def addfile(self, name, mode, islink, data):
+ i = zipfile.ZipInfo(name, self.date_time)
+ i.compress_type = self.z.compression
+ # unzip will not honor unix file modes unless file creator is
+ # set to unix (id 3).
+ i.create_system = 3
+ ftype = 0x8000 # UNX_IFREG in unzip source code
+ if islink:
+ mode = 0777
+ ftype = 0xa000 # UNX_IFLNK in unzip source code
+ i.external_attr = (mode | ftype) << 16L
+ self.z.writestr(i, data)
+
+ def done(self):
+ self.z.close()
+
+class fileit(object):
+ '''write archive as files in directory.'''
+
+ def __init__(self, name, mtime):
+ self.basedir = name
+ self.opener = scmutil.opener(self.basedir)
+
+ def addfile(self, name, mode, islink, data):
+ if islink:
+ self.opener.symlink(data, name)
+ return
+ f = self.opener(name, "w", atomictemp=True)
+ f.write(data)
+ f.close()
+ destfile = os.path.join(self.basedir, name)
+ os.chmod(destfile, mode)
+
+ def done(self):
+ pass
+
+archivers = {
+ 'files': fileit,
+ 'tar': tarit,
+ 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
+ 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
+ 'uzip': lambda name, mtime: zipit(name, mtime, False),
+ 'zip': zipit,
+ }
+
+def archive(repo, dest, node, kind, decode=True, matchfn=None,
+ prefix=None, mtime=None, subrepos=False):
+ '''create archive of repo as it was at node.
+
+ dest can be name of directory, name of archive file, or file
+ object to write archive to.
+
+ kind is type of archive to create.
+
+ decode tells whether to put files through decode filters from
+ hgrc.
+
+ matchfn is function to filter names of files to write to archive.
+
+ prefix is name of path to put before every archive member.'''
+
+ if kind == 'files':
+ if prefix:
+ raise util.Abort(_('cannot give prefix when archiving to files'))
+ else:
+ prefix = tidyprefix(dest, kind, prefix)
+
+ def write(name, mode, islink, getdata):
+ if matchfn and not matchfn(name):
+ return
+ data = getdata()
+ if decode:
+ data = repo.wwritedata(name, data)
+ archiver.addfile(prefix + name, mode, islink, data)
+
+ if kind not in archivers:
+ raise util.Abort(_("unknown archive type '%s'") % kind)
+
+ ctx = repo[node]
+ archiver = archivers[kind](dest, mtime or ctx.date()[0])
+
+ if repo.ui.configbool("ui", "archivemeta", True):
+ def metadata():
+ base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
+ repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
+
+ tags = ''.join('tag: %s\n' % t for t in ctx.tags()
+ if repo.tagtype(t) == 'global')
+ if not tags:
+ repo.ui.pushbuffer()
+ opts = {'template': '{latesttag}\n{latesttagdistance}',
+ 'style': '', 'patch': None, 'git': None}
+ cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
+ ltags, dist = repo.ui.popbuffer().split('\n')
+ tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
+ tags += 'latesttagdistance: %s\n' % dist
+
+ return base + tags
+
+ write('.hg_archival.txt', 0644, False, metadata)
+
+ total = len(ctx.manifest())
+ repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
+ for i, f in enumerate(ctx):
+ ff = ctx.flags(f)
+ write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
+ repo.ui.progress(_('archiving'), i + 1, item=f,
+ unit=_('files'), total=total)
+ repo.ui.progress(_('archiving'), None)
+
+ if subrepos:
+ for subpath in ctx.substate:
+ sub = ctx.sub(subpath)
+ sub.archive(repo.ui, archiver, prefix)
+
+ archiver.done()
diff --git a/websdk/mercurial/bookmarks.py b/websdk/mercurial/bookmarks.py
new file mode 100644
index 0000000..587cefc
--- /dev/null
+++ b/websdk/mercurial/bookmarks.py
@@ -0,0 +1,213 @@
+# Mercurial bookmark support code
+#
+# Copyright 2008 David Soria Parra <dsp@php.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial.i18n import _
+from mercurial.node import hex
+from mercurial import encoding, error, util
+import errno, os
+
+def valid(mark):
+ for c in (':', '\0', '\n', '\r'):
+ if c in mark:
+ return False
+ return True
+
+def read(repo):
+ '''Parse .hg/bookmarks file and return a dictionary
+
+ Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
+ in the .hg/bookmarks file.
+ Read the file and return a (name=>nodeid) dictionary
+ '''
+ bookmarks = {}
+ try:
+ for line in repo.opener('bookmarks'):
+ line = line.strip()
+ if not line:
+ continue
+ if ' ' not in line:
+ repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
+ continue
+ sha, refspec = line.split(' ', 1)
+ refspec = encoding.tolocal(refspec)
+ try:
+ bookmarks[refspec] = repo.changelog.lookup(sha)
+ except error.RepoLookupError:
+ pass
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return bookmarks
+
+def readcurrent(repo):
+ '''Get the current bookmark
+
+ If we use gittishsh branches we have a current bookmark that
+ we are on. This function returns the name of the bookmark. It
+ is stored in .hg/bookmarks.current
+ '''
+ mark = None
+ try:
+ file = repo.opener('bookmarks.current')
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return None
+ try:
+ # No readline() in posixfile_nt, reading everything is cheap
+ mark = encoding.tolocal((file.readlines() or [''])[0])
+ if mark == '' or mark not in repo._bookmarks:
+ mark = None
+ finally:
+ file.close()
+ return mark
+
+def write(repo):
+ '''Write bookmarks
+
+ Write the given bookmark => hash dictionary to the .hg/bookmarks file
+ in a format equal to those of localtags.
+
+ We also store a backup of the previous state in undo.bookmarks that
+ can be copied back on rollback.
+ '''
+ refs = repo._bookmarks
+
+ if repo._bookmarkcurrent not in refs:
+ setcurrent(repo, None)
+ for mark in refs.keys():
+ if not valid(mark):
+ raise util.Abort(_("bookmark '%s' contains illegal "
+ "character" % mark))
+
+ wlock = repo.wlock()
+ try:
+
+ file = repo.opener('bookmarks', 'w', atomictemp=True)
+ for refspec, node in refs.iteritems():
+ file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
+ file.close()
+
+ # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
+ try:
+ os.utime(repo.sjoin('00changelog.i'), None)
+ except OSError:
+ pass
+
+ finally:
+ wlock.release()
+
+def setcurrent(repo, mark):
+ '''Set the name of the bookmark that we are currently on
+
+ Set the name of the bookmark that we are on (hg update <bookmark>).
+ The name is recorded in .hg/bookmarks.current
+ '''
+ current = repo._bookmarkcurrent
+ if current == mark:
+ return
+
+ if mark not in repo._bookmarks:
+ mark = ''
+ if not valid(mark):
+ raise util.Abort(_("bookmark '%s' contains illegal "
+ "character" % mark))
+
+ wlock = repo.wlock()
+ try:
+ file = repo.opener('bookmarks.current', 'w', atomictemp=True)
+ file.write(encoding.fromlocal(mark))
+ file.close()
+ finally:
+ wlock.release()
+ repo._bookmarkcurrent = mark
+
+def updatecurrentbookmark(repo, oldnode, curbranch):
+ try:
+ update(repo, oldnode, repo.branchtags()[curbranch])
+ except KeyError:
+ if curbranch == "default": # no default branch!
+ update(repo, oldnode, repo.lookup("tip"))
+ else:
+ raise util.Abort(_("branch %s not found") % curbranch)
+
+def update(repo, parents, node):
+ marks = repo._bookmarks
+ update = False
+ mark = repo._bookmarkcurrent
+ if mark and marks[mark] in parents:
+ old = repo[marks[mark]]
+ new = repo[node]
+ if new in old.descendants():
+ marks[mark] = new.node()
+ update = True
+ if update:
+ repo._writebookmarks(marks)
+
+def listbookmarks(repo):
+ # We may try to list bookmarks on a repo type that does not
+ # support it (e.g., statichttprepository).
+ marks = getattr(repo, '_bookmarks', {})
+
+ d = {}
+ for k, v in marks.iteritems():
+ d[k] = hex(v)
+ return d
+
+def pushbookmark(repo, key, old, new):
+ w = repo.wlock()
+ try:
+ marks = repo._bookmarks
+ if hex(marks.get(key, '')) != old:
+ return False
+ if new == '':
+ del marks[key]
+ else:
+ if new not in repo:
+ return False
+ marks[key] = repo[new].node()
+ write(repo)
+ return True
+ finally:
+ w.release()
+
+def updatefromremote(ui, repo, remote):
+ ui.debug("checking for updated bookmarks\n")
+ rb = remote.listkeys('bookmarks')
+ changed = False
+ for k in rb.keys():
+ if k in repo._bookmarks:
+ nr, nl = rb[k], repo._bookmarks[k]
+ if nr in repo:
+ cr = repo[nr]
+ cl = repo[nl]
+ if cl.rev() >= cr.rev():
+ continue
+ if cr in cl.descendants():
+ repo._bookmarks[k] = cr.node()
+ changed = True
+ ui.status(_("updating bookmark %s\n") % k)
+ else:
+ ui.warn(_("not updating divergent"
+ " bookmark %s\n") % k)
+ if changed:
+ write(repo)
+
+def diff(ui, repo, remote):
+ ui.status(_("searching for changed bookmarks\n"))
+
+ lmarks = repo.listkeys('bookmarks')
+ rmarks = remote.listkeys('bookmarks')
+
+ diff = sorted(set(rmarks) - set(lmarks))
+ for k in diff:
+ ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
+
+ if len(diff) <= 0:
+ ui.status(_("no changed bookmarks found\n"))
+ return 1
+ return 0
diff --git a/websdk/mercurial/bundlerepo.py b/websdk/mercurial/bundlerepo.py
index 8da29ad..5ee67b9 120000..100644
--- a/websdk/mercurial/bundlerepo.py
+++ b/websdk/mercurial/bundlerepo.py
@@ -1 +1,363 @@
-/usr/share/pyshared/mercurial/bundlerepo.py \ No newline at end of file
+# bundlerepo.py - repository class for viewing uncompressed bundles
+#
+# Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Repository class for viewing uncompressed bundles.
+
+This provides a read-only repository interface to bundles as if they
+were part of the actual repository.
+"""
+
+from node import nullid
+from i18n import _
+import os, tempfile, shutil
+import changegroup, util, mdiff, discovery
+import localrepo, changelog, manifest, filelog, revlog, error
+
+class bundlerevlog(revlog.revlog):
+ def __init__(self, opener, indexfile, bundle, linkmapper):
+ # How it works:
+ # to retrieve a revision, we need to know the offset of
+ # the revision in the bundle (an unbundle object).
+ #
+ # We store this offset in the index (start), to differentiate a
+ # rev in the bundle and from a rev in the revlog, we check
+ # len(index[r]). If the tuple is bigger than 7, it is a bundle
+ # (it is bigger since we store the node to which the delta is)
+ #
+ revlog.revlog.__init__(self, opener, indexfile)
+ self.bundle = bundle
+ self.basemap = {}
+ n = len(self)
+ chain = None
+ while True:
+ chunkdata = bundle.deltachunk(chain)
+ if not chunkdata:
+ break
+ node = chunkdata['node']
+ p1 = chunkdata['p1']
+ p2 = chunkdata['p2']
+ cs = chunkdata['cs']
+ deltabase = chunkdata['deltabase']
+ delta = chunkdata['delta']
+
+ size = len(delta)
+ start = bundle.tell() - size
+
+ link = linkmapper(cs)
+ if node in self.nodemap:
+ # this can happen if two branches make the same change
+ chain = node
+ continue
+
+ for p in (p1, p2):
+ if not p in self.nodemap:
+ raise error.LookupError(p, self.indexfile,
+ _("unknown parent"))
+ # start, size, full unc. size, base (unused), link, p1, p2, node
+ e = (revlog.offset_type(start, 0), size, -1, -1, link,
+ self.rev(p1), self.rev(p2), node)
+ self.basemap[n] = deltabase
+ self.index.insert(-1, e)
+ self.nodemap[node] = n
+ chain = node
+ n += 1
+
+ def inbundle(self, rev):
+ """is rev from the bundle"""
+ if rev < 0:
+ return False
+ return rev in self.basemap
+ def bundlebase(self, rev):
+ return self.basemap[rev]
+ def _chunk(self, rev):
+ # Warning: in case of bundle, the diff is against bundlebase,
+ # not against rev - 1
+ # XXX: could use some caching
+ if not self.inbundle(rev):
+ return revlog.revlog._chunk(self, rev)
+ self.bundle.seek(self.start(rev))
+ return self.bundle.read(self.length(rev))
+
+ def revdiff(self, rev1, rev2):
+ """return or calculate a delta between two revisions"""
+ if self.inbundle(rev1) and self.inbundle(rev2):
+ # hot path for bundle
+ revb = self.rev(self.bundlebase(rev2))
+ if revb == rev1:
+ return self._chunk(rev2)
+ elif not self.inbundle(rev1) and not self.inbundle(rev2):
+ return revlog.revlog.revdiff(self, rev1, rev2)
+
+ return mdiff.textdiff(self.revision(self.node(rev1)),
+ self.revision(self.node(rev2)))
+
+ def revision(self, node):
+ """return an uncompressed revision of a given"""
+ if node == nullid:
+ return ""
+
+ text = None
+ chain = []
+ iter_node = node
+ rev = self.rev(iter_node)
+ # reconstruct the revision if it is from a changegroup
+ while self.inbundle(rev):
+ if self._cache and self._cache[0] == iter_node:
+ text = self._cache[2]
+ break
+ chain.append(rev)
+ iter_node = self.bundlebase(rev)
+ rev = self.rev(iter_node)
+ if text is None:
+ text = revlog.revlog.revision(self, iter_node)
+
+ while chain:
+ delta = self._chunk(chain.pop())
+ text = mdiff.patches(text, [delta])
+
+ p1, p2 = self.parents(node)
+ if node != revlog.hash(text, p1, p2):
+ raise error.RevlogError(_("integrity check failed on %s:%d")
+ % (self.datafile, self.rev(node)))
+
+ self._cache = (node, self.rev(node), text)
+ return text
+
+ def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
+ raise NotImplementedError
+ def addgroup(self, revs, linkmapper, transaction):
+ raise NotImplementedError
+ def strip(self, rev, minlink):
+ raise NotImplementedError
+ def checksize(self):
+ raise NotImplementedError
+
+class bundlechangelog(bundlerevlog, changelog.changelog):
+ def __init__(self, opener, bundle):
+ changelog.changelog.__init__(self, opener)
+ linkmapper = lambda x: x
+ bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ linkmapper)
+
+class bundlemanifest(bundlerevlog, manifest.manifest):
+ def __init__(self, opener, bundle, linkmapper):
+ manifest.manifest.__init__(self, opener)
+ bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ linkmapper)
+
+class bundlefilelog(bundlerevlog, filelog.filelog):
+ def __init__(self, opener, path, bundle, linkmapper, repo):
+ filelog.filelog.__init__(self, opener, path)
+ bundlerevlog.__init__(self, opener, self.indexfile, bundle,
+ linkmapper)
+ self._repo = repo
+
+ def _file(self, f):
+ self._repo.file(f)
+
+class bundlerepository(localrepo.localrepository):
+ def __init__(self, ui, path, bundlename):
+ self._tempparent = None
+ try:
+ localrepo.localrepository.__init__(self, ui, path)
+ except error.RepoError:
+ self._tempparent = tempfile.mkdtemp()
+ localrepo.instance(ui, self._tempparent, 1)
+ localrepo.localrepository.__init__(self, ui, self._tempparent)
+
+ if path:
+ self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename
+ else:
+ self._url = 'bundle:' + bundlename
+
+ self.tempfile = None
+ f = util.posixfile(bundlename, "rb")
+ self.bundle = changegroup.readbundle(f, bundlename)
+ if self.bundle.compressed():
+ fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
+ suffix=".hg10un", dir=self.path)
+ self.tempfile = temp
+ fptemp = os.fdopen(fdtemp, 'wb')
+
+ try:
+ fptemp.write("HG10UN")
+ while True:
+ chunk = self.bundle.read(2**18)
+ if not chunk:
+ break
+ fptemp.write(chunk)
+ finally:
+ fptemp.close()
+
+ f = util.posixfile(self.tempfile, "rb")
+ self.bundle = changegroup.readbundle(f, bundlename)
+
+ # dict with the mapping 'filename' -> position in the bundle
+ self.bundlefilespos = {}
+
+ @util.propertycache
+ def changelog(self):
+ # consume the header if it exists
+ self.bundle.changelogheader()
+ c = bundlechangelog(self.sopener, self.bundle)
+ self.manstart = self.bundle.tell()
+ return c
+
+ @util.propertycache
+ def manifest(self):
+ self.bundle.seek(self.manstart)
+ # consume the header if it exists
+ self.bundle.manifestheader()
+ m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev)
+ self.filestart = self.bundle.tell()
+ return m
+
+ @util.propertycache
+ def manstart(self):
+ self.changelog
+ return self.manstart
+
+ @util.propertycache
+ def filestart(self):
+ self.manifest
+ return self.filestart
+
+ def url(self):
+ return self._url
+
+ def file(self, f):
+ if not self.bundlefilespos:
+ self.bundle.seek(self.filestart)
+ while True:
+ chunkdata = self.bundle.filelogheader()
+ if not chunkdata:
+ break
+ fname = chunkdata['filename']
+ self.bundlefilespos[fname] = self.bundle.tell()
+ while True:
+ c = self.bundle.deltachunk(None)
+ if not c:
+ break
+
+ if f[0] == '/':
+ f = f[1:]
+ if f in self.bundlefilespos:
+ self.bundle.seek(self.bundlefilespos[f])
+ return bundlefilelog(self.sopener, f, self.bundle,
+ self.changelog.rev, self)
+ else:
+ return filelog.filelog(self.sopener, f)
+
+ def close(self):
+ """Close assigned bundle file immediately."""
+ self.bundle.close()
+ if self.tempfile is not None:
+ os.unlink(self.tempfile)
+ if self._tempparent:
+ shutil.rmtree(self._tempparent, True)
+
+ def cancopy(self):
+ return False
+
+ def getcwd(self):
+ return os.getcwd() # always outside the repo
+
+ def _writebranchcache(self, branches, tip, tiprev):
+ # don't overwrite the disk cache with bundle-augmented data
+ pass
+
+def instance(ui, path, create):
+ if create:
+ raise util.Abort(_('cannot create new bundle repository'))
+ parentpath = ui.config("bundle", "mainreporoot", "")
+ if parentpath:
+ # Try to make the full path relative so we get a nice, short URL.
+ # In particular, we don't want temp dir names in test outputs.
+ cwd = os.getcwd()
+ if parentpath == cwd:
+ parentpath = ''
+ else:
+ cwd = os.path.join(cwd,'')
+ if parentpath.startswith(cwd):
+ parentpath = parentpath[len(cwd):]
+ u = util.url(path)
+ path = u.localpath()
+ if u.scheme == 'bundle':
+ s = path.split("+", 1)
+ if len(s) == 1:
+ repopath, bundlename = parentpath, s[0]
+ else:
+ repopath, bundlename = s
+ else:
+ repopath, bundlename = parentpath, path
+ return bundlerepository(ui, repopath, bundlename)
+
+def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None,
+ force=False):
+ '''obtains a bundle of changes incoming from other
+
+ "onlyheads" restricts the returned changes to those reachable from the
+ specified heads.
+ "bundlename", if given, stores the bundle to this file path permanently;
+ otherwise it's stored to a temp file and gets deleted again when you call
+ the returned "cleanupfn".
+ "force" indicates whether to proceed on unrelated repos.
+
+ Returns a tuple (local, csets, cleanupfn):
+
+ "local" is a local repo from which to obtain the actual incoming changesets; it
+ is a bundlerepo for the obtained bundle when the original "other" is remote.
+ "csets" lists the incoming changeset node ids.
+ "cleanupfn" must be called without arguments when you're done processing the
+ changes; it closes both the original "other" and the one returned here.
+ '''
+ tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force)
+ common, incoming, rheads = tmp
+ if not incoming:
+ try:
+ if bundlename:
+ os.unlink(bundlename)
+ except OSError:
+ pass
+ return other, [], other.close
+
+ bundle = None
+ bundlerepo = None
+ localrepo = other
+ if bundlename or not other.local():
+ # create a bundle (uncompressed if other repo is not local)
+
+ if other.capable('getbundle'):
+ cg = other.getbundle('incoming', common=common, heads=rheads)
+ elif onlyheads is None and not other.capable('changegroupsubset'):
+ # compat with older servers when pulling all remote heads
+ cg = other.changegroup(incoming, "incoming")
+ rheads = None
+ else:
+ cg = other.changegroupsubset(incoming, rheads, 'incoming')
+ bundletype = other.local() and "HG10BZ" or "HG10UN"
+ fname = bundle = changegroup.writebundle(cg, bundlename, bundletype)
+ # keep written bundle?
+ if bundlename:
+ bundle = None
+ if not other.local():
+ # use the created uncompressed bundlerepo
+ localrepo = bundlerepo = bundlerepository(ui, repo.root, fname)
+ # this repo contains local and other now, so filter out local again
+ common = repo.heads()
+
+ csets = localrepo.changelog.findmissing(common, rheads)
+
+ def cleanup():
+ if bundlerepo:
+ bundlerepo.close()
+ if bundle:
+ os.unlink(bundle)
+ other.close()
+
+ return (localrepo, csets, cleanup)
+
diff --git a/websdk/mercurial/byterange.py b/websdk/mercurial/byterange.py
index a9d1349..baa67d4 120000..100644
--- a/websdk/mercurial/byterange.py
+++ b/websdk/mercurial/byterange.py
@@ -1 +1,462 @@
-/usr/share/pyshared/mercurial/byterange.py \ No newline at end of file
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the
+# Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330,
+# Boston, MA 02111-1307 USA
+
+# This file is part of urlgrabber, a high-level cross-protocol url-grabber
+# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
+
+# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $
+
+import os
+import stat
+import urllib
+import urllib2
+import email.Utils
+
+class RangeError(IOError):
+ """Error raised when an unsatisfiable range is requested."""
+ pass
+
+class HTTPRangeHandler(urllib2.BaseHandler):
+ """Handler that enables HTTP Range headers.
+
+ This was extremely simple. The Range header is a HTTP feature to
+ begin with so all this class does is tell urllib2 that the
+ "206 Partial Content" reponse from the HTTP server is what we
+ expected.
+
+ Example:
+ import urllib2
+ import byterange
+
+ range_handler = range.HTTPRangeHandler()
+ opener = urllib2.build_opener(range_handler)
+
+ # install it
+ urllib2.install_opener(opener)
+
+ # create Request and set Range header
+ req = urllib2.Request('http://www.python.org/')
+ req.header['Range'] = 'bytes=30-50'
+ f = urllib2.urlopen(req)
+ """
+
+ def http_error_206(self, req, fp, code, msg, hdrs):
+ # 206 Partial Content Response
+ r = urllib.addinfourl(fp, hdrs, req.get_full_url())
+ r.code = code
+ r.msg = msg
+ return r
+
+ def http_error_416(self, req, fp, code, msg, hdrs):
+ # HTTP's Range Not Satisfiable error
+ raise RangeError('Requested Range Not Satisfiable')
+
+class RangeableFileObject(object):
+ """File object wrapper to enable raw range handling.
+ This was implemented primarilary for handling range
+ specifications for file:// urls. This object effectively makes
+ a file object look like it consists only of a range of bytes in
+ the stream.
+
+ Examples:
+ # expose 10 bytes, starting at byte position 20, from
+ # /etc/aliases.
+ >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30))
+ # seek seeks within the range (to position 23 in this case)
+ >>> fo.seek(3)
+ # tell tells where your at _within the range_ (position 3 in
+ # this case)
+ >>> fo.tell()
+ # read EOFs if an attempt is made to read past the last
+ # byte in the range. the following will return only 7 bytes.
+ >>> fo.read(30)
+ """
+
+ def __init__(self, fo, rangetup):
+ """Create a RangeableFileObject.
+ fo -- a file like object. only the read() method need be
+ supported but supporting an optimized seek() is
+ preferable.
+ rangetup -- a (firstbyte,lastbyte) tuple specifying the range
+ to work over.
+ The file object provided is assumed to be at byte offset 0.
+ """
+ self.fo = fo
+ (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup)
+ self.realpos = 0
+ self._do_seek(self.firstbyte)
+
+ def __getattr__(self, name):
+ """This effectively allows us to wrap at the instance level.
+ Any attribute not found in _this_ object will be searched for
+ in self.fo. This includes methods."""
+ return getattr(self.fo, name)
+
+ def tell(self):
+ """Return the position within the range.
+ This is different from fo.seek in that position 0 is the
+ first byte position of the range tuple. For example, if
+ this object was created with a range tuple of (500,899),
+ tell() will return 0 when at byte position 500 of the file.
+ """
+ return (self.realpos - self.firstbyte)
+
+ def seek(self, offset, whence=0):
+ """Seek within the byte range.
+ Positioning is identical to that described under tell().
+ """
+ assert whence in (0, 1, 2)
+ if whence == 0: # absolute seek
+ realoffset = self.firstbyte + offset
+ elif whence == 1: # relative seek
+ realoffset = self.realpos + offset
+ elif whence == 2: # absolute from end of file
+ # XXX: are we raising the right Error here?
+ raise IOError('seek from end of file not supported.')
+
+ # do not allow seek past lastbyte in range
+ if self.lastbyte and (realoffset >= self.lastbyte):
+ realoffset = self.lastbyte
+
+ self._do_seek(realoffset - self.realpos)
+
+ def read(self, size=-1):
+ """Read within the range.
+ This method will limit the size read based on the range.
+ """
+ size = self._calc_read_size(size)
+ rslt = self.fo.read(size)
+ self.realpos += len(rslt)
+ return rslt
+
+ def readline(self, size=-1):
+ """Read lines within the range.
+ This method will limit the size read based on the range.
+ """
+ size = self._calc_read_size(size)
+ rslt = self.fo.readline(size)
+ self.realpos += len(rslt)
+ return rslt
+
+ def _calc_read_size(self, size):
+ """Handles calculating the amount of data to read based on
+ the range.
+ """
+ if self.lastbyte:
+ if size > -1:
+ if ((self.realpos + size) >= self.lastbyte):
+ size = (self.lastbyte - self.realpos)
+ else:
+ size = (self.lastbyte - self.realpos)
+ return size
+
+ def _do_seek(self, offset):
+ """Seek based on whether wrapped object supports seek().
+ offset is relative to the current position (self.realpos).
+ """
+ assert offset >= 0
+ seek = getattr(self.fo, 'seek', self._poor_mans_seek)
+ seek(self.realpos + offset)
+ self.realpos += offset
+
+ def _poor_mans_seek(self, offset):
+ """Seek by calling the wrapped file objects read() method.
+ This is used for file like objects that do not have native
+ seek support. The wrapped objects read() method is called
+ to manually seek to the desired position.
+ offset -- read this number of bytes from the wrapped
+ file object.
+ raise RangeError if we encounter EOF before reaching the
+ specified offset.
+ """
+ pos = 0
+ bufsize = 1024
+ while pos < offset:
+ if (pos + bufsize) > offset:
+ bufsize = offset - pos
+ buf = self.fo.read(bufsize)
+ if len(buf) != bufsize:
+ raise RangeError('Requested Range Not Satisfiable')
+ pos += bufsize
+
+class FileRangeHandler(urllib2.FileHandler):
+ """FileHandler subclass that adds Range support.
+ This class handles Range headers exactly like an HTTP
+ server would.
+ """
+ def open_local_file(self, req):
+ import mimetypes
+ import email
+ host = req.get_host()
+ file = req.get_selector()
+ localfile = urllib.url2pathname(file)
+ stats = os.stat(localfile)
+ size = stats[stat.ST_SIZE]
+ modified = email.Utils.formatdate(stats[stat.ST_MTIME])
+ mtype = mimetypes.guess_type(file)[0]
+ if host:
+ host, port = urllib.splitport(host)
+ if port or socket.gethostbyname(host) not in self.get_names():
+ raise urllib2.URLError('file not on local host')
+ fo = open(localfile,'rb')
+ brange = req.headers.get('Range', None)
+ brange = range_header_to_tuple(brange)
+ assert brange != ()
+ if brange:
+ (fb, lb) = brange
+ if lb == '':
+ lb = size
+ if fb < 0 or fb > size or lb > size:
+ raise RangeError('Requested Range Not Satisfiable')
+ size = (lb - fb)
+ fo = RangeableFileObject(fo, (fb, lb))
+ headers = email.message_from_string(
+ 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' %
+ (mtype or 'text/plain', size, modified))
+ return urllib.addinfourl(fo, headers, 'file:'+file)
+
+
+# FTP Range Support
+# Unfortunately, a large amount of base FTP code had to be copied
+# from urllib and urllib2 in order to insert the FTP REST command.
+# Code modifications for range support have been commented as
+# follows:
+# -- range support modifications start/end here
+
+from urllib import splitport, splituser, splitpasswd, splitattr, \
+ unquote, addclosehook, addinfourl
+import ftplib
+import socket
+import sys
+import mimetypes
+import email
+
+class FTPRangeHandler(urllib2.FTPHandler):
+ def ftp_open(self, req):
+ host = req.get_host()
+ if not host:
+ raise IOError('ftp error', 'no host given')
+ host, port = splitport(host)
+ if port is None:
+ port = ftplib.FTP_PORT
+ else:
+ port = int(port)
+
+ # username/password handling
+ user, host = splituser(host)
+ if user:
+ user, passwd = splitpasswd(user)
+ else:
+ passwd = None
+ host = unquote(host)
+ user = unquote(user or '')
+ passwd = unquote(passwd or '')
+
+ try:
+ host = socket.gethostbyname(host)
+ except socket.error, msg:
+ raise urllib2.URLError(msg)
+ path, attrs = splitattr(req.get_selector())
+ dirs = path.split('/')
+ dirs = map(unquote, dirs)
+ dirs, file = dirs[:-1], dirs[-1]
+ if dirs and not dirs[0]:
+ dirs = dirs[1:]
+ try:
+ fw = self.connect_ftp(user, passwd, host, port, dirs)
+ type = file and 'I' or 'D'
+ for attr in attrs:
+ attr, value = splitattr(attr)
+ if attr.lower() == 'type' and \
+ value in ('a', 'A', 'i', 'I', 'd', 'D'):
+ type = value.upper()
+
+ # -- range support modifications start here
+ rest = None
+ range_tup = range_header_to_tuple(req.headers.get('Range', None))
+ assert range_tup != ()
+ if range_tup:
+ (fb, lb) = range_tup
+ if fb > 0:
+ rest = fb
+ # -- range support modifications end here
+
+ fp, retrlen = fw.retrfile(file, type, rest)
+
+ # -- range support modifications start here
+ if range_tup:
+ (fb, lb) = range_tup
+ if lb == '':
+ if retrlen is None or retrlen == 0:
+ raise RangeError('Requested Range Not Satisfiable due'
+ ' to unobtainable file length.')
+ lb = retrlen
+ retrlen = lb - fb
+ if retrlen < 0:
+ # beginning of range is larger than file
+ raise RangeError('Requested Range Not Satisfiable')
+ else:
+ retrlen = lb - fb
+ fp = RangeableFileObject(fp, (0, retrlen))
+ # -- range support modifications end here
+
+ headers = ""
+ mtype = mimetypes.guess_type(req.get_full_url())[0]
+ if mtype:
+ headers += "Content-Type: %s\n" % mtype
+ if retrlen is not None and retrlen >= 0:
+ headers += "Content-Length: %d\n" % retrlen
+ headers = email.message_from_string(headers)
+ return addinfourl(fp, headers, req.get_full_url())
+ except ftplib.all_errors, msg:
+ raise IOError('ftp error', msg), sys.exc_info()[2]
+
+ def connect_ftp(self, user, passwd, host, port, dirs):
+ fw = ftpwrapper(user, passwd, host, port, dirs)
+ return fw
+
+class ftpwrapper(urllib.ftpwrapper):
+ # range support note:
+ # this ftpwrapper code is copied directly from
+ # urllib. The only enhancement is to add the rest
+ # argument and pass it on to ftp.ntransfercmd
+ def retrfile(self, file, type, rest=None):
+ self.endtransfer()
+ if type in ('d', 'D'):
+ cmd = 'TYPE A'
+ isdir = 1
+ else:
+ cmd = 'TYPE ' + type
+ isdir = 0
+ try:
+ self.ftp.voidcmd(cmd)
+ except ftplib.all_errors:
+ self.init()
+ self.ftp.voidcmd(cmd)
+ conn = None
+ if file and not isdir:
+ # Use nlst to see if the file exists at all
+ try:
+ self.ftp.nlst(file)
+ except ftplib.error_perm, reason:
+ raise IOError('ftp error', reason), sys.exc_info()[2]
+ # Restore the transfer mode!
+ self.ftp.voidcmd(cmd)
+ # Try to retrieve as a file
+ try:
+ cmd = 'RETR ' + file
+ conn = self.ftp.ntransfercmd(cmd, rest)
+ except ftplib.error_perm, reason:
+ if str(reason).startswith('501'):
+ # workaround for REST not supported error
+ fp, retrlen = self.retrfile(file, type)
+ fp = RangeableFileObject(fp, (rest,''))
+ return (fp, retrlen)
+ elif not str(reason).startswith('550'):
+ raise IOError('ftp error', reason), sys.exc_info()[2]
+ if not conn:
+ # Set transfer mode to ASCII!
+ self.ftp.voidcmd('TYPE A')
+ # Try a directory listing
+ if file:
+ cmd = 'LIST ' + file
+ else:
+ cmd = 'LIST'
+ conn = self.ftp.ntransfercmd(cmd)
+ self.busy = 1
+ # Pass back both a suitably decorated object and a retrieval length
+ return (addclosehook(conn[0].makefile('rb'),
+ self.endtransfer), conn[1])
+
+
+####################################################################
+# Range Tuple Functions
+# XXX: These range tuple functions might go better in a class.
+
+_rangere = None
+def range_header_to_tuple(range_header):
+ """Get a (firstbyte,lastbyte) tuple from a Range header value.
+
+ Range headers have the form "bytes=<firstbyte>-<lastbyte>". This
+ function pulls the firstbyte and lastbyte values and returns
+ a (firstbyte,lastbyte) tuple. If lastbyte is not specified in
+ the header value, it is returned as an empty string in the
+ tuple.
+
+ Return None if range_header is None
+ Return () if range_header does not conform to the range spec
+ pattern.
+
+ """
+ global _rangere
+ if range_header is None:
+ return None
+ if _rangere is None:
+ import re
+ _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)')
+ match = _rangere.match(range_header)
+ if match:
+ tup = range_tuple_normalize(match.group(1, 2))
+ if tup and tup[1]:
+ tup = (tup[0], tup[1]+1)
+ return tup
+ return ()
+
+def range_tuple_to_header(range_tup):
+ """Convert a range tuple to a Range header value.
+ Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None
+ if no range is needed.
+ """
+ if range_tup is None:
+ return None
+ range_tup = range_tuple_normalize(range_tup)
+ if range_tup:
+ if range_tup[1]:
+ range_tup = (range_tup[0], range_tup[1] - 1)
+ return 'bytes=%s-%s' % range_tup
+
+def range_tuple_normalize(range_tup):
+ """Normalize a (first_byte,last_byte) range tuple.
+ Return a tuple whose first element is guaranteed to be an int
+ and whose second element will be '' (meaning: the last byte) or
+ an int. Finally, return None if the normalized tuple == (0,'')
+ as that is equivelant to retrieving the entire file.
+ """
+ if range_tup is None:
+ return None
+ # handle first byte
+ fb = range_tup[0]
+ if fb in (None, ''):
+ fb = 0
+ else:
+ fb = int(fb)
+ # handle last byte
+ try:
+ lb = range_tup[1]
+ except IndexError:
+ lb = ''
+ else:
+ if lb is None:
+ lb = ''
+ elif lb != '':
+ lb = int(lb)
+ # check if range is over the entire file
+ if (fb, lb) == (0, ''):
+ return None
+ # check that the range is valid
+ if lb < fb:
+ raise RangeError('Invalid byte range: %s-%s' % (fb, lb))
+ return (fb, lb)
diff --git a/websdk/mercurial/changegroup.py b/websdk/mercurial/changegroup.py
index 6ea0821..7ee7b3f 120000..100644
--- a/websdk/mercurial/changegroup.py
+++ b/websdk/mercurial/changegroup.py
@@ -1 +1,256 @@
-/usr/share/pyshared/mercurial/changegroup.py \ No newline at end of file
+# changegroup.py - Mercurial changegroup manipulation functions
+#
+# Copyright 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+from node import nullrev
+import mdiff, util
+import struct, os, bz2, zlib, tempfile
+
+_BUNDLE10_DELTA_HEADER = "20s20s20s20s"
+
+def readexactly(stream, n):
+ '''read n bytes from stream.read and abort if less was available'''
+ s = stream.read(n)
+ if len(s) < n:
+ raise util.Abort(_("stream ended unexpectedly"
+ " (got %d bytes, expected %d)")
+ % (len(s), n))
+ return s
+
+def getchunk(stream):
+ """return the next chunk from stream as a string"""
+ d = readexactly(stream, 4)
+ l = struct.unpack(">l", d)[0]
+ if l <= 4:
+ if l:
+ raise util.Abort(_("invalid chunk length %d") % l)
+ return ""
+ return readexactly(stream, l - 4)
+
+def chunkheader(length):
+ """return a changegroup chunk header (string)"""
+ return struct.pack(">l", length + 4)
+
+def closechunk():
+ """return a changegroup chunk header (string) for a zero-length chunk"""
+ return struct.pack(">l", 0)
+
+class nocompress(object):
+ def compress(self, x):
+ return x
+ def flush(self):
+ return ""
+
+bundletypes = {
+ "": ("", nocompress), # only when using unbundle on ssh and old http servers
+ # since the unification ssh accepts a header but there
+ # is no capability signaling it.
+ "HG10UN": ("HG10UN", nocompress),
+ "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
+ "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
+}
+
+# hgweb uses this list to communicate its preferred type
+bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
+
+def writebundle(cg, filename, bundletype):
+ """Write a bundle file and return its filename.
+
+ Existing files will not be overwritten.
+ If no filename is specified, a temporary file is created.
+ bz2 compression can be turned off.
+ The bundle file will be deleted in case of errors.
+ """
+
+ fh = None
+ cleanup = None
+ try:
+ if filename:
+ fh = open(filename, "wb")
+ else:
+ fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
+ fh = os.fdopen(fd, "wb")
+ cleanup = filename
+
+ header, compressor = bundletypes[bundletype]
+ fh.write(header)
+ z = compressor()
+
+ # parse the changegroup data, otherwise we will block
+ # in case of sshrepo because we don't know the end of the stream
+
+ # an empty chunkgroup is the end of the changegroup
+ # a changegroup has at least 2 chunkgroups (changelog and manifest).
+ # after that, an empty chunkgroup is the end of the changegroup
+ empty = False
+ count = 0
+ while not empty or count <= 2:
+ empty = True
+ count += 1
+ while True:
+ chunk = getchunk(cg)
+ if not chunk:
+ break
+ empty = False
+ fh.write(z.compress(chunkheader(len(chunk))))
+ pos = 0
+ while pos < len(chunk):
+ next = pos + 2**20
+ fh.write(z.compress(chunk[pos:next]))
+ pos = next
+ fh.write(z.compress(closechunk()))
+ fh.write(z.flush())
+ cleanup = None
+ return filename
+ finally:
+ if fh is not None:
+ fh.close()
+ if cleanup is not None:
+ os.unlink(cleanup)
+
+def decompressor(fh, alg):
+ if alg == 'UN':
+ return fh
+ elif alg == 'GZ':
+ def generator(f):
+ zd = zlib.decompressobj()
+ for chunk in f:
+ yield zd.decompress(chunk)
+ elif alg == 'BZ':
+ def generator(f):
+ zd = bz2.BZ2Decompressor()
+ zd.decompress("BZ")
+ for chunk in util.filechunkiter(f, 4096):
+ yield zd.decompress(chunk)
+ else:
+ raise util.Abort("unknown bundle compression '%s'" % alg)
+ return util.chunkbuffer(generator(fh))
+
+class unbundle10(object):
+ deltaheader = _BUNDLE10_DELTA_HEADER
+ deltaheadersize = struct.calcsize(deltaheader)
+ def __init__(self, fh, alg):
+ self._stream = decompressor(fh, alg)
+ self._type = alg
+ self.callback = None
+ def compressed(self):
+ return self._type != 'UN'
+ def read(self, l):
+ return self._stream.read(l)
+ def seek(self, pos):
+ return self._stream.seek(pos)
+ def tell(self):
+ return self._stream.tell()
+ def close(self):
+ return self._stream.close()
+
+ def chunklength(self):
+ d = readexactly(self._stream, 4)
+ l = struct.unpack(">l", d)[0]
+ if l <= 4:
+ if l:
+ raise util.Abort(_("invalid chunk length %d") % l)
+ return 0
+ if self.callback:
+ self.callback()
+ return l - 4
+
+ def changelogheader(self):
+ """v10 does not have a changelog header chunk"""
+ return {}
+
+ def manifestheader(self):
+ """v10 does not have a manifest header chunk"""
+ return {}
+
+ def filelogheader(self):
+ """return the header of the filelogs chunk, v10 only has the filename"""
+ l = self.chunklength()
+ if not l:
+ return {}
+ fname = readexactly(self._stream, l)
+ return dict(filename=fname)
+
+ def _deltaheader(self, headertuple, prevnode):
+ node, p1, p2, cs = headertuple
+ if prevnode is None:
+ deltabase = p1
+ else:
+ deltabase = prevnode
+ return node, p1, p2, deltabase, cs
+
+ def deltachunk(self, prevnode):
+ l = self.chunklength()
+ if not l:
+ return {}
+ headerdata = readexactly(self._stream, self.deltaheadersize)
+ header = struct.unpack(self.deltaheader, headerdata)
+ delta = readexactly(self._stream, l - self.deltaheadersize)
+ node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode)
+ return dict(node=node, p1=p1, p2=p2, cs=cs,
+ deltabase=deltabase, delta=delta)
+
+class headerlessfixup(object):
+ def __init__(self, fh, h):
+ self._h = h
+ self._fh = fh
+ def read(self, n):
+ if self._h:
+ d, self._h = self._h[:n], self._h[n:]
+ if len(d) < n:
+ d += readexactly(self._fh, n - len(d))
+ return d
+ return readexactly(self._fh, n)
+
+def readbundle(fh, fname):
+ header = readexactly(fh, 6)
+
+ if not fname:
+ fname = "stream"
+ if not header.startswith('HG') and header.startswith('\0'):
+ fh = headerlessfixup(fh, header)
+ header = "HG10UN"
+
+ magic, version, alg = header[0:2], header[2:4], header[4:6]
+
+ if magic != 'HG':
+ raise util.Abort(_('%s: not a Mercurial bundle') % fname)
+ if version != '10':
+ raise util.Abort(_('%s: unknown bundle version %s') % (fname, version))
+ return unbundle10(fh, alg)
+
+class bundle10(object):
+ deltaheader = _BUNDLE10_DELTA_HEADER
+ def __init__(self, lookup):
+ self._lookup = lookup
+ def close(self):
+ return closechunk()
+ def fileheader(self, fname):
+ return chunkheader(len(fname)) + fname
+ def revchunk(self, revlog, rev, prev):
+ node = revlog.node(rev)
+ p1, p2 = revlog.parentrevs(rev)
+ base = prev
+
+ prefix = ''
+ if base == nullrev:
+ delta = revlog.revision(node)
+ prefix = mdiff.trivialdiffheader(len(delta))
+ else:
+ delta = revlog.revdiff(base, rev)
+ linknode = self._lookup(revlog, node)
+ p1n, p2n = revlog.parents(node)
+ basenode = revlog.node(base)
+ meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode)
+ meta += prefix
+ l = len(meta) + len(delta)
+ yield chunkheader(l)
+ yield meta
+ yield delta
+ def builddeltaheader(self, node, p1n, p2n, basenode, linknode):
+ # do nothing with basenode, it is implicitly the previous one in HG10
+ return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
diff --git a/websdk/mercurial/changelog.py b/websdk/mercurial/changelog.py
index 2b80448..55e23c5 120000..100644
--- a/websdk/mercurial/changelog.py
+++ b/websdk/mercurial/changelog.py
@@ -1 +1,244 @@
-/usr/share/pyshared/mercurial/changelog.py \ No newline at end of file
+# changelog.py - changelog class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import bin, hex, nullid
+from i18n import _
+import util, error, revlog, encoding
+
+def _string_escape(text):
+ """
+ >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
+ >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
+ >>> s
+ 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
+ >>> res = _string_escape(s)
+ >>> s == res.decode('string_escape')
+ True
+ """
+ # subset of the string_escape codec
+ text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
+ return text.replace('\0', '\\0')
+
+def decodeextra(text):
+ extra = {}
+ for l in text.split('\0'):
+ if l:
+ k, v = l.decode('string_escape').split(':', 1)
+ extra[k] = v
+ return extra
+
+def encodeextra(d):
+ # keys must be sorted to produce a deterministic changelog entry
+ items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
+ return "\0".join(items)
+
+class appender(object):
+ '''the changelog index must be updated last on disk, so we use this class
+ to delay writes to it'''
+ def __init__(self, fp, buf):
+ self.data = buf
+ self.fp = fp
+ self.offset = fp.tell()
+ self.size = util.fstat(fp).st_size
+
+ def end(self):
+ return self.size + len("".join(self.data))
+ def tell(self):
+ return self.offset
+ def flush(self):
+ pass
+ def close(self):
+ self.fp.close()
+
+ def seek(self, offset, whence=0):
+ '''virtual file offset spans real file and data'''
+ if whence == 0:
+ self.offset = offset
+ elif whence == 1:
+ self.offset += offset
+ elif whence == 2:
+ self.offset = self.end() + offset
+ if self.offset < self.size:
+ self.fp.seek(self.offset)
+
+ def read(self, count=-1):
+ '''only trick here is reads that span real file and data'''
+ ret = ""
+ if self.offset < self.size:
+ s = self.fp.read(count)
+ ret = s
+ self.offset += len(s)
+ if count > 0:
+ count -= len(s)
+ if count != 0:
+ doff = self.offset - self.size
+ self.data.insert(0, "".join(self.data))
+ del self.data[1:]
+ s = self.data[0][doff:doff + count]
+ self.offset += len(s)
+ ret += s
+ return ret
+
+ def write(self, s):
+ self.data.append(str(s))
+ self.offset += len(s)
+
+def delayopener(opener, target, divert, buf):
+ def o(name, mode='r'):
+ if name != target:
+ return opener(name, mode)
+ if divert:
+ return opener(name + ".a", mode.replace('a', 'w'))
+ # otherwise, divert to memory
+ return appender(opener(name, mode), buf)
+ return o
+
+class changelog(revlog.revlog):
+ def __init__(self, opener):
+ revlog.revlog.__init__(self, opener, "00changelog.i")
+ if self._initempty:
+ # changelogs don't benefit from generaldelta
+ self.version &= ~revlog.REVLOGGENERALDELTA
+ self._generaldelta = False
+ self._realopener = opener
+ self._delayed = False
+ self._divert = False
+ # hiddenrevs: revs that should be hidden by command and tools
+ self.hiddenrevs = set()
+
+ def delayupdate(self):
+ "delay visibility of index updates to other readers"
+ self._delayed = True
+ self._divert = (len(self) == 0)
+ self._delaybuf = []
+ self.opener = delayopener(self._realopener, self.indexfile,
+ self._divert, self._delaybuf)
+
+ def finalize(self, tr):
+ "finalize index updates"
+ self._delayed = False
+ self.opener = self._realopener
+ # move redirected index data back into place
+ if self._divert:
+ nfile = self.opener(self.indexfile + ".a")
+ n = nfile.name
+ nfile.close()
+ util.rename(n, n[:-2])
+ elif self._delaybuf:
+ fp = self.opener(self.indexfile, 'a')
+ fp.write("".join(self._delaybuf))
+ fp.close()
+ self._delaybuf = []
+ # split when we're done
+ self.checkinlinesize(tr)
+
+ def readpending(self, file):
+ r = revlog.revlog(self.opener, file)
+ self.index = r.index
+ self.nodemap = r.nodemap
+ self._chunkcache = r._chunkcache
+
+ def writepending(self):
+ "create a file containing the unfinalized state for pretxnchangegroup"
+ if self._delaybuf:
+ # make a temporary copy of the index
+ fp1 = self._realopener(self.indexfile)
+ fp2 = self._realopener(self.indexfile + ".a", "w")
+ fp2.write(fp1.read())
+ # add pending data
+ fp2.write("".join(self._delaybuf))
+ fp2.close()
+ # switch modes so finalize can simply rename
+ self._delaybuf = []
+ self._divert = True
+
+ if self._divert:
+ return True
+
+ return False
+
+ def checkinlinesize(self, tr, fp=None):
+ if not self._delayed:
+ revlog.revlog.checkinlinesize(self, tr, fp)
+
+ def read(self, node):
+ """
+ format used:
+ nodeid\n : manifest node in ascii
+ user\n : user, no \n or \r allowed
+ time tz extra\n : date (time is int or float, timezone is int)
+ : extra is metadatas, encoded and separated by '\0'
+ : older versions ignore it
+ files\n\n : files modified by the cset, no \n or \r allowed
+ (.*) : comment (free text, ideally utf-8)
+
+ changelog v0 doesn't use extra
+ """
+ text = self.revision(node)
+ if not text:
+ return (nullid, "", (0, 0), [], "", {'branch': 'default'})
+ last = text.index("\n\n")
+ desc = encoding.tolocal(text[last + 2:])
+ l = text[:last].split('\n')
+ manifest = bin(l[0])
+ user = encoding.tolocal(l[1])
+
+ extra_data = l[2].split(' ', 2)
+ if len(extra_data) != 3:
+ time = float(extra_data.pop(0))
+ try:
+ # various tools did silly things with the time zone field.
+ timezone = int(extra_data[0])
+ except ValueError:
+ timezone = 0
+ extra = {}
+ else:
+ time, timezone, extra = extra_data
+ time, timezone = float(time), int(timezone)
+ extra = decodeextra(extra)
+ if not extra.get('branch'):
+ extra['branch'] = 'default'
+ files = l[3:]
+ return (manifest, user, (time, timezone), files, desc, extra)
+
+ def add(self, manifest, files, desc, transaction, p1, p2,
+ user, date=None, extra=None):
+ # Convert to UTF-8 encoded bytestrings as the very first
+ # thing: calling any method on a localstr object will turn it
+ # into a str object and the cached UTF-8 string is thus lost.
+ user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
+
+ user = user.strip()
+ # An empty username or a username with a "\n" will make the
+ # revision text contain two "\n\n" sequences -> corrupt
+ # repository since read cannot unpack the revision.
+ if not user:
+ raise error.RevlogError(_("empty username"))
+ if "\n" in user:
+ raise error.RevlogError(_("username %s contains a newline")
+ % repr(user))
+
+ # strip trailing whitespace and leading and trailing empty lines
+ desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
+
+ if date:
+ parseddate = "%d %d" % util.parsedate(date)
+ else:
+ parseddate = "%d %d" % util.makedate()
+ if extra:
+ branch = extra.get("branch")
+ if branch in ("default", ""):
+ del extra["branch"]
+ elif branch in (".", "null", "tip"):
+ raise error.RevlogError(_('the name \'%s\' is reserved')
+ % branch)
+ if extra:
+ extra = encodeextra(extra)
+ parseddate = "%s %s" % (parseddate, extra)
+ l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
+ text = "\n".join(l)
+ return self.addrevision(text, transaction, len(self), p1, p2)
diff --git a/websdk/mercurial/cmdutil.py b/websdk/mercurial/cmdutil.py
index 26624d7..dfc1094 120000..100644
--- a/websdk/mercurial/cmdutil.py
+++ b/websdk/mercurial/cmdutil.py
@@ -1 +1,1271 @@
-/usr/share/pyshared/mercurial/cmdutil.py \ No newline at end of file
+# cmdutil.py - help for command processing in mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import hex, nullid, nullrev, short
+from i18n import _
+import os, sys, errno, re, tempfile
+import util, scmutil, templater, patch, error, templatekw, revlog, copies
+import match as matchmod
+import subrepo
+
+def parsealiases(cmd):
+ return cmd.lstrip("^").split("|")
+
+def findpossible(cmd, table, strict=False):
+ """
+ Return cmd -> (aliases, command table entry)
+ for each matching command.
+ Return debug commands (or their aliases) only if no normal command matches.
+ """
+ choice = {}
+ debugchoice = {}
+
+ if cmd in table:
+ # short-circuit exact matches, "log" alias beats "^log|history"
+ keys = [cmd]
+ else:
+ keys = table.keys()
+
+ for e in keys:
+ aliases = parsealiases(e)
+ found = None
+ if cmd in aliases:
+ found = cmd
+ elif not strict:
+ for a in aliases:
+ if a.startswith(cmd):
+ found = a
+ break
+ if found is not None:
+ if aliases[0].startswith("debug") or found.startswith("debug"):
+ debugchoice[found] = (aliases, table[e])
+ else:
+ choice[found] = (aliases, table[e])
+
+ if not choice and debugchoice:
+ choice = debugchoice
+
+ return choice
+
+def findcmd(cmd, table, strict=True):
+ """Return (aliases, command table entry) for command string."""
+ choice = findpossible(cmd, table, strict)
+
+ if cmd in choice:
+ return choice[cmd]
+
+ if len(choice) > 1:
+ clist = choice.keys()
+ clist.sort()
+ raise error.AmbiguousCommand(cmd, clist)
+
+ if choice:
+ return choice.values()[0]
+
+ raise error.UnknownCommand(cmd)
+
+def findrepo(p):
+ while not os.path.isdir(os.path.join(p, ".hg")):
+ oldp, p = p, os.path.dirname(p)
+ if p == oldp:
+ return None
+
+ return p
+
+def bailifchanged(repo):
+ if repo.dirstate.p2() != nullid:
+ raise util.Abort(_('outstanding uncommitted merge'))
+ modified, added, removed, deleted = repo.status()[:4]
+ if modified or added or removed or deleted:
+ raise util.Abort(_("outstanding uncommitted changes"))
+ ctx = repo[None]
+ for s in ctx.substate:
+ if ctx.sub(s).dirty():
+ raise util.Abort(_("uncommitted changes in subrepo %s") % s)
+
+def logmessage(ui, opts):
+ """ get the log message according to -m and -l option """
+ message = opts.get('message')
+ logfile = opts.get('logfile')
+
+ if message and logfile:
+ raise util.Abort(_('options --message and --logfile are mutually '
+ 'exclusive'))
+ if not message and logfile:
+ try:
+ if logfile == '-':
+ message = ui.fin.read()
+ else:
+ message = '\n'.join(util.readfile(logfile).splitlines())
+ except IOError, inst:
+ raise util.Abort(_("can't read commit message '%s': %s") %
+ (logfile, inst.strerror))
+ return message
+
+def loglimit(opts):
+ """get the log limit according to option -l/--limit"""
+ limit = opts.get('limit')
+ if limit:
+ try:
+ limit = int(limit)
+ except ValueError:
+ raise util.Abort(_('limit must be a positive integer'))
+ if limit <= 0:
+ raise util.Abort(_('limit must be positive'))
+ else:
+ limit = None
+ return limit
+
+def makefilename(repo, pat, node, desc=None,
+ total=None, seqno=None, revwidth=None, pathname=None):
+ node_expander = {
+ 'H': lambda: hex(node),
+ 'R': lambda: str(repo.changelog.rev(node)),
+ 'h': lambda: short(node),
+ 'm': lambda: re.sub('[^\w]', '_', str(desc))
+ }
+ expander = {
+ '%': lambda: '%',
+ 'b': lambda: os.path.basename(repo.root),
+ }
+
+ try:
+ if node:
+ expander.update(node_expander)
+ if node:
+ expander['r'] = (lambda:
+ str(repo.changelog.rev(node)).zfill(revwidth or 0))
+ if total is not None:
+ expander['N'] = lambda: str(total)
+ if seqno is not None:
+ expander['n'] = lambda: str(seqno)
+ if total is not None and seqno is not None:
+ expander['n'] = lambda: str(seqno).zfill(len(str(total)))
+ if pathname is not None:
+ expander['s'] = lambda: os.path.basename(pathname)
+ expander['d'] = lambda: os.path.dirname(pathname) or '.'
+ expander['p'] = lambda: pathname
+
+ newname = []
+ patlen = len(pat)
+ i = 0
+ while i < patlen:
+ c = pat[i]
+ if c == '%':
+ i += 1
+ c = pat[i]
+ c = expander[c]()
+ newname.append(c)
+ i += 1
+ return ''.join(newname)
+ except KeyError, inst:
+ raise util.Abort(_("invalid format spec '%%%s' in output filename") %
+ inst.args[0])
+
+def makefileobj(repo, pat, node=None, desc=None, total=None,
+ seqno=None, revwidth=None, mode='wb', pathname=None):
+
+ writable = mode not in ('r', 'rb')
+
+ if not pat or pat == '-':
+ fp = writable and repo.ui.fout or repo.ui.fin
+ if util.safehasattr(fp, 'fileno'):
+ return os.fdopen(os.dup(fp.fileno()), mode)
+ else:
+ # if this fp can't be duped properly, return
+ # a dummy object that can be closed
+ class wrappedfileobj(object):
+ noop = lambda x: None
+ def __init__(self, f):
+ self.f = f
+ def __getattr__(self, attr):
+ if attr == 'close':
+ return self.noop
+ else:
+ return getattr(self.f, attr)
+
+ return wrappedfileobj(fp)
+ if util.safehasattr(pat, 'write') and writable:
+ return pat
+ if util.safehasattr(pat, 'read') and 'r' in mode:
+ return pat
+ return open(makefilename(repo, pat, node, desc, total, seqno, revwidth,
+ pathname),
+ mode)
+
+def openrevlog(repo, cmd, file_, opts):
+ """opens the changelog, manifest, a filelog or a given revlog"""
+ cl = opts['changelog']
+ mf = opts['manifest']
+ msg = None
+ if cl and mf:
+ msg = _('cannot specify --changelog and --manifest at the same time')
+ elif cl or mf:
+ if file_:
+ msg = _('cannot specify filename with --changelog or --manifest')
+ elif not repo:
+ msg = _('cannot specify --changelog or --manifest '
+ 'without a repository')
+ if msg:
+ raise util.Abort(msg)
+
+ r = None
+ if repo:
+ if cl:
+ r = repo.changelog
+ elif mf:
+ r = repo.manifest
+ elif file_:
+ filelog = repo.file(file_)
+ if len(filelog):
+ r = filelog
+ if not r:
+ if not file_:
+ raise error.CommandError(cmd, _('invalid arguments'))
+ if not os.path.isfile(file_):
+ raise util.Abort(_("revlog '%s' not found") % file_)
+ r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
+ file_[:-2] + ".i")
+ return r
+
+def copy(ui, repo, pats, opts, rename=False):
+ # called with the repo lock held
+ #
+ # hgsep => pathname that uses "/" to separate directories
+ # ossep => pathname that uses os.sep to separate directories
+ cwd = repo.getcwd()
+ targets = {}
+ after = opts.get("after")
+ dryrun = opts.get("dry_run")
+ wctx = repo[None]
+
+ def walkpat(pat):
+ srcs = []
+ badstates = after and '?' or '?r'
+ m = scmutil.match(repo[None], [pat], opts, globbed=True)
+ for abs in repo.walk(m):
+ state = repo.dirstate[abs]
+ rel = m.rel(abs)
+ exact = m.exact(abs)
+ if state in badstates:
+ if exact and state == '?':
+ ui.warn(_('%s: not copying - file is not managed\n') % rel)
+ if exact and state == 'r':
+ ui.warn(_('%s: not copying - file has been marked for'
+ ' remove\n') % rel)
+ continue
+ # abs: hgsep
+ # rel: ossep
+ srcs.append((abs, rel, exact))
+ return srcs
+
+ # abssrc: hgsep
+ # relsrc: ossep
+ # otarget: ossep
+ def copyfile(abssrc, relsrc, otarget, exact):
+ abstarget = scmutil.canonpath(repo.root, cwd, otarget)
+ reltarget = repo.pathto(abstarget, cwd)
+ target = repo.wjoin(abstarget)
+ src = repo.wjoin(abssrc)
+ state = repo.dirstate[abstarget]
+
+ scmutil.checkportable(ui, abstarget)
+
+ # check for collisions
+ prevsrc = targets.get(abstarget)
+ if prevsrc is not None:
+ ui.warn(_('%s: not overwriting - %s collides with %s\n') %
+ (reltarget, repo.pathto(abssrc, cwd),
+ repo.pathto(prevsrc, cwd)))
+ return
+
+ # check for overwrites
+ exists = os.path.lexists(target)
+ if not after and exists or after and state in 'mn':
+ if not opts['force']:
+ ui.warn(_('%s: not overwriting - file exists\n') %
+ reltarget)
+ return
+
+ if after:
+ if not exists:
+ if rename:
+ ui.warn(_('%s: not recording move - %s does not exist\n') %
+ (relsrc, reltarget))
+ else:
+ ui.warn(_('%s: not recording copy - %s does not exist\n') %
+ (relsrc, reltarget))
+ return
+ elif not dryrun:
+ try:
+ if exists:
+ os.unlink(target)
+ targetdir = os.path.dirname(target) or '.'
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+ util.copyfile(src, target)
+ srcexists = True
+ except IOError, inst:
+ if inst.errno == errno.ENOENT:
+ ui.warn(_('%s: deleted in working copy\n') % relsrc)
+ srcexists = False
+ else:
+ ui.warn(_('%s: cannot copy - %s\n') %
+ (relsrc, inst.strerror))
+ return True # report a failure
+
+ if ui.verbose or not exact:
+ if rename:
+ ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
+ else:
+ ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
+
+ targets[abstarget] = abssrc
+
+ # fix up dirstate
+ scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
+ dryrun=dryrun, cwd=cwd)
+ if rename and not dryrun:
+ if not after and srcexists:
+ util.unlinkpath(repo.wjoin(abssrc))
+ wctx.forget([abssrc])
+
+ # pat: ossep
+ # dest ossep
+ # srcs: list of (hgsep, hgsep, ossep, bool)
+ # return: function that takes hgsep and returns ossep
+ def targetpathfn(pat, dest, srcs):
+ if os.path.isdir(pat):
+ abspfx = scmutil.canonpath(repo.root, cwd, pat)
+ abspfx = util.localpath(abspfx)
+ if destdirexists:
+ striplen = len(os.path.split(abspfx)[0])
+ else:
+ striplen = len(abspfx)
+ if striplen:
+ striplen += len(os.sep)
+ res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
+ elif destdirexists:
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ res = lambda p: dest
+ return res
+
+ # pat: ossep
+ # dest ossep
+ # srcs: list of (hgsep, hgsep, ossep, bool)
+ # return: function that takes hgsep and returns ossep
+ def targetpathafterfn(pat, dest, srcs):
+ if matchmod.patkind(pat):
+ # a mercurial pattern
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ abspfx = scmutil.canonpath(repo.root, cwd, pat)
+ if len(abspfx) < len(srcs[0][0]):
+ # A directory. Either the target path contains the last
+ # component of the source path or it does not.
+ def evalpath(striplen):
+ score = 0
+ for s in srcs:
+ t = os.path.join(dest, util.localpath(s[0])[striplen:])
+ if os.path.lexists(t):
+ score += 1
+ return score
+
+ abspfx = util.localpath(abspfx)
+ striplen = len(abspfx)
+ if striplen:
+ striplen += len(os.sep)
+ if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
+ score = evalpath(striplen)
+ striplen1 = len(os.path.split(abspfx)[0])
+ if striplen1:
+ striplen1 += len(os.sep)
+ if evalpath(striplen1) > score:
+ striplen = striplen1
+ res = lambda p: os.path.join(dest,
+ util.localpath(p)[striplen:])
+ else:
+ # a file
+ if destdirexists:
+ res = lambda p: os.path.join(dest,
+ os.path.basename(util.localpath(p)))
+ else:
+ res = lambda p: dest
+ return res
+
+
+ pats = scmutil.expandpats(pats)
+ if not pats:
+ raise util.Abort(_('no source or destination specified'))
+ if len(pats) == 1:
+ raise util.Abort(_('no destination specified'))
+ dest = pats.pop()
+ destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
+ if not destdirexists:
+ if len(pats) > 1 or matchmod.patkind(pats[0]):
+ raise util.Abort(_('with multiple sources, destination must be an '
+ 'existing directory'))
+ if util.endswithsep(dest):
+ raise util.Abort(_('destination %s is not a directory') % dest)
+
+ tfn = targetpathfn
+ if after:
+ tfn = targetpathafterfn
+ copylist = []
+ for pat in pats:
+ srcs = walkpat(pat)
+ if not srcs:
+ continue
+ copylist.append((tfn(pat, dest, srcs), srcs))
+ if not copylist:
+ raise util.Abort(_('no files to copy'))
+
+ errors = 0
+ for targetpath, srcs in copylist:
+ for abssrc, relsrc, exact in srcs:
+ if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
+ errors += 1
+
+ if errors:
+ ui.warn(_('(consider using --after)\n'))
+
+ return errors != 0
+
+def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
+ runargs=None, appendpid=False):
+ '''Run a command as a service.'''
+
+ if opts['daemon'] and not opts['daemon_pipefds']:
+ # Signal child process startup with file removal
+ lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
+ os.close(lockfd)
+ try:
+ if not runargs:
+ runargs = util.hgcmd() + sys.argv[1:]
+ runargs.append('--daemon-pipefds=%s' % lockpath)
+ # Don't pass --cwd to the child process, because we've already
+ # changed directory.
+ for i in xrange(1, len(runargs)):
+ if runargs[i].startswith('--cwd='):
+ del runargs[i]
+ break
+ elif runargs[i].startswith('--cwd'):
+ del runargs[i:i + 2]
+ break
+ def condfn():
+ return not os.path.exists(lockpath)
+ pid = util.rundetached(runargs, condfn)
+ if pid < 0:
+ raise util.Abort(_('child process failed to start'))
+ finally:
+ try:
+ os.unlink(lockpath)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ if parentfn:
+ return parentfn(pid)
+ else:
+ return
+
+ if initfn:
+ initfn()
+
+ if opts['pid_file']:
+ mode = appendpid and 'a' or 'w'
+ fp = open(opts['pid_file'], mode)
+ fp.write(str(os.getpid()) + '\n')
+ fp.close()
+
+ if opts['daemon_pipefds']:
+ lockpath = opts['daemon_pipefds']
+ try:
+ os.setsid()
+ except AttributeError:
+ pass
+ os.unlink(lockpath)
+ util.hidewindow()
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ nullfd = os.open(util.nulldev, os.O_RDWR)
+ logfilefd = nullfd
+ if logfile:
+ logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
+ os.dup2(nullfd, 0)
+ os.dup2(logfilefd, 1)
+ os.dup2(logfilefd, 2)
+ if nullfd not in (0, 1, 2):
+ os.close(nullfd)
+ if logfile and logfilefd not in (0, 1, 2):
+ os.close(logfilefd)
+
+ if runfn:
+ return runfn()
+
+def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
+ opts=None):
+ '''export changesets as hg patches.'''
+
+ total = len(revs)
+ revwidth = max([len(str(rev)) for rev in revs])
+
+ def single(rev, seqno, fp):
+ ctx = repo[rev]
+ node = ctx.node()
+ parents = [p.node() for p in ctx.parents() if p]
+ branch = ctx.branch()
+ if switch_parent:
+ parents.reverse()
+ prev = (parents and parents[0]) or nullid
+
+ shouldclose = False
+ if not fp:
+ desc_lines = ctx.description().rstrip().split('\n')
+ desc = desc_lines[0] #Commit always has a first line.
+ fp = makefileobj(repo, template, node, desc=desc, total=total,
+ seqno=seqno, revwidth=revwidth, mode='ab')
+ if fp != template:
+ shouldclose = True
+ if fp != sys.stdout and util.safehasattr(fp, 'name'):
+ repo.ui.note("%s\n" % fp.name)
+
+ fp.write("# HG changeset patch\n")
+ fp.write("# User %s\n" % ctx.user())
+ fp.write("# Date %d %d\n" % ctx.date())
+ if branch and branch != 'default':
+ fp.write("# Branch %s\n" % branch)
+ fp.write("# Node ID %s\n" % hex(node))
+ fp.write("# Parent %s\n" % hex(prev))
+ if len(parents) > 1:
+ fp.write("# Parent %s\n" % hex(parents[1]))
+ fp.write(ctx.description().rstrip())
+ fp.write("\n\n")
+
+ for chunk in patch.diff(repo, prev, node, opts=opts):
+ fp.write(chunk)
+
+ if shouldclose:
+ fp.close()
+
+ for seqno, rev in enumerate(revs):
+ single(rev, seqno + 1, fp)
+
+def diffordiffstat(ui, repo, diffopts, node1, node2, match,
+ changes=None, stat=False, fp=None, prefix='',
+ listsubrepos=False):
+ '''show diff or diffstat.'''
+ if fp is None:
+ write = ui.write
+ else:
+ def write(s, **kw):
+ fp.write(s)
+
+ if stat:
+ diffopts = diffopts.copy(context=0)
+ width = 80
+ if not ui.plain():
+ width = ui.termwidth()
+ chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
+ prefix=prefix)
+ for chunk, label in patch.diffstatui(util.iterlines(chunks),
+ width=width,
+ git=diffopts.git):
+ write(chunk, label=label)
+ else:
+ for chunk, label in patch.diffui(repo, node1, node2, match,
+ changes, diffopts, prefix=prefix):
+ write(chunk, label=label)
+
+ if listsubrepos:
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+ for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
+ if node2 is not None:
+ node2 = ctx2.substate[subpath][1]
+ submatch = matchmod.narrowmatcher(subpath, match)
+ sub.diff(diffopts, node2, submatch, changes=changes,
+ stat=stat, fp=fp, prefix=prefix)
+
+class changeset_printer(object):
+ '''show changeset information when templating not requested.'''
+
+ def __init__(self, ui, repo, patch, diffopts, buffered):
+ self.ui = ui
+ self.repo = repo
+ self.buffered = buffered
+ self.patch = patch
+ self.diffopts = diffopts
+ self.header = {}
+ self.hunk = {}
+ self.lastheader = None
+ self.footer = None
+
+ def flush(self, rev):
+ if rev in self.header:
+ h = self.header[rev]
+ if h != self.lastheader:
+ self.lastheader = h
+ self.ui.write(h)
+ del self.header[rev]
+ if rev in self.hunk:
+ self.ui.write(self.hunk[rev])
+ del self.hunk[rev]
+ return 1
+ return 0
+
+ def close(self):
+ if self.footer:
+ self.ui.write(self.footer)
+
+ def show(self, ctx, copies=None, matchfn=None, **props):
+ if self.buffered:
+ self.ui.pushbuffer()
+ self._show(ctx, copies, matchfn, props)
+ self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
+ else:
+ self._show(ctx, copies, matchfn, props)
+
+ def _show(self, ctx, copies, matchfn, props):
+ '''show a single changeset or file revision'''
+ changenode = ctx.node()
+ rev = ctx.rev()
+
+ if self.ui.quiet:
+ self.ui.write("%d:%s\n" % (rev, short(changenode)),
+ label='log.node')
+ return
+
+ log = self.repo.changelog
+ date = util.datestr(ctx.date())
+
+ hexfunc = self.ui.debugflag and hex or short
+
+ parents = [(p, hexfunc(log.node(p)))
+ for p in self._meaningful_parentrevs(log, rev)]
+
+ self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
+ label='log.changeset')
+
+ branch = ctx.branch()
+ # don't show the default branch name
+ if branch != 'default':
+ self.ui.write(_("branch: %s\n") % branch,
+ label='log.branch')
+ for bookmark in self.repo.nodebookmarks(changenode):
+ self.ui.write(_("bookmark: %s\n") % bookmark,
+ label='log.bookmark')
+ for tag in self.repo.nodetags(changenode):
+ self.ui.write(_("tag: %s\n") % tag,
+ label='log.tag')
+ for parent in parents:
+ self.ui.write(_("parent: %d:%s\n") % parent,
+ label='log.parent')
+
+ if self.ui.debugflag:
+ mnode = ctx.manifestnode()
+ self.ui.write(_("manifest: %d:%s\n") %
+ (self.repo.manifest.rev(mnode), hex(mnode)),
+ label='ui.debug log.manifest')
+ self.ui.write(_("user: %s\n") % ctx.user(),
+ label='log.user')
+ self.ui.write(_("date: %s\n") % date,
+ label='log.date')
+
+ if self.ui.debugflag:
+ files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
+ for key, value in zip([_("files:"), _("files+:"), _("files-:")],
+ files):
+ if value:
+ self.ui.write("%-12s %s\n" % (key, " ".join(value)),
+ label='ui.debug log.files')
+ elif ctx.files() and self.ui.verbose:
+ self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
+ label='ui.note log.files')
+ if copies and self.ui.verbose:
+ copies = ['%s (%s)' % c for c in copies]
+ self.ui.write(_("copies: %s\n") % ' '.join(copies),
+ label='ui.note log.copies')
+
+ extra = ctx.extra()
+ if extra and self.ui.debugflag:
+ for key, value in sorted(extra.items()):
+ self.ui.write(_("extra: %s=%s\n")
+ % (key, value.encode('string_escape')),
+ label='ui.debug log.extra')
+
+ description = ctx.description().strip()
+ if description:
+ if self.ui.verbose:
+ self.ui.write(_("description:\n"),
+ label='ui.note log.description')
+ self.ui.write(description,
+ label='ui.note log.description')
+ self.ui.write("\n\n")
+ else:
+ self.ui.write(_("summary: %s\n") %
+ description.splitlines()[0],
+ label='log.summary')
+ self.ui.write("\n")
+
+ self.showpatch(changenode, matchfn)
+
+ def showpatch(self, node, matchfn):
+ if not matchfn:
+ matchfn = self.patch
+ if matchfn:
+ stat = self.diffopts.get('stat')
+ diff = self.diffopts.get('patch')
+ diffopts = patch.diffopts(self.ui, self.diffopts)
+ prev = self.repo.changelog.parents(node)[0]
+ if stat:
+ diffordiffstat(self.ui, self.repo, diffopts, prev, node,
+ match=matchfn, stat=True)
+ if diff:
+ if stat:
+ self.ui.write("\n")
+ diffordiffstat(self.ui, self.repo, diffopts, prev, node,
+ match=matchfn, stat=False)
+ self.ui.write("\n")
+
+ def _meaningful_parentrevs(self, log, rev):
+ """Return list of meaningful (or all if debug) parentrevs for rev.
+
+ For merges (two non-nullrev revisions) both parents are meaningful.
+ Otherwise the first parent revision is considered meaningful if it
+ is not the preceding revision.
+ """
+ parents = log.parentrevs(rev)
+ if not self.ui.debugflag and parents[1] == nullrev:
+ if parents[0] >= rev - 1:
+ parents = []
+ else:
+ parents = [parents[0]]
+ return parents
+
+
+class changeset_templater(changeset_printer):
+ '''format changeset information.'''
+
+ def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
+ changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
+ formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
+ defaulttempl = {
+ 'parent': '{rev}:{node|formatnode} ',
+ 'manifest': '{rev}:{node|formatnode}',
+ 'file_copy': '{name} ({source})',
+ 'extra': '{key}={value|stringescape}'
+ }
+ # filecopy is preserved for compatibility reasons
+ defaulttempl['filecopy'] = defaulttempl['file_copy']
+ self.t = templater.templater(mapfile, {'formatnode': formatnode},
+ cache=defaulttempl)
+ self.cache = {}
+
+ def use_template(self, t):
+ '''set template string to use'''
+ self.t.cache['changeset'] = t
+
+ def _meaningful_parentrevs(self, ctx):
+ """Return list of meaningful (or all if debug) parentrevs for rev.
+ """
+ parents = ctx.parents()
+ if len(parents) > 1:
+ return parents
+ if self.ui.debugflag:
+ return [parents[0], self.repo['null']]
+ if parents[0].rev() >= ctx.rev() - 1:
+ return []
+ return parents
+
+ def _show(self, ctx, copies, matchfn, props):
+ '''show a single changeset or file revision'''
+
+ showlist = templatekw.showlist
+
+ # showparents() behaviour depends on ui trace level which
+ # causes unexpected behaviours at templating level and makes
+ # it harder to extract it in a standalone function. Its
+ # behaviour cannot be changed so leave it here for now.
+ def showparents(**args):
+ ctx = args['ctx']
+ parents = [[('rev', p.rev()), ('node', p.hex())]
+ for p in self._meaningful_parentrevs(ctx)]
+ return showlist('parent', parents, **args)
+
+ props = props.copy()
+ props.update(templatekw.keywords)
+ props['parents'] = showparents
+ props['templ'] = self.t
+ props['ctx'] = ctx
+ props['repo'] = self.repo
+ props['revcache'] = {'copies': copies}
+ props['cache'] = self.cache
+
+ # find correct templates for current mode
+
+ tmplmodes = [
+ (True, None),
+ (self.ui.verbose, 'verbose'),
+ (self.ui.quiet, 'quiet'),
+ (self.ui.debugflag, 'debug'),
+ ]
+
+ types = {'header': '', 'footer':'', 'changeset': 'changeset'}
+ for mode, postfix in tmplmodes:
+ for type in types:
+ cur = postfix and ('%s_%s' % (type, postfix)) or type
+ if mode and cur in self.t:
+ types[type] = cur
+
+ try:
+
+ # write header
+ if types['header']:
+ h = templater.stringify(self.t(types['header'], **props))
+ if self.buffered:
+ self.header[ctx.rev()] = h
+ else:
+ if self.lastheader != h:
+ self.lastheader = h
+ self.ui.write(h)
+
+ # write changeset metadata, then patch if requested
+ key = types['changeset']
+ self.ui.write(templater.stringify(self.t(key, **props)))
+ self.showpatch(ctx.node(), matchfn)
+
+ if types['footer']:
+ if not self.footer:
+ self.footer = templater.stringify(self.t(types['footer'],
+ **props))
+
+ except KeyError, inst:
+ msg = _("%s: no key named '%s'")
+ raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
+ except SyntaxError, inst:
+ raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
+
+def show_changeset(ui, repo, opts, buffered=False):
+ """show one changeset using template or regular display.
+
+ Display format will be the first non-empty hit of:
+ 1. option 'template'
+ 2. option 'style'
+ 3. [ui] setting 'logtemplate'
+ 4. [ui] setting 'style'
+ If all of these values are either the unset or the empty string,
+ regular display via changeset_printer() is done.
+ """
+ # options
+ patch = False
+ if opts.get('patch') or opts.get('stat'):
+ patch = scmutil.matchall(repo)
+
+ tmpl = opts.get('template')
+ style = None
+ if tmpl:
+ tmpl = templater.parsestring(tmpl, quoted=False)
+ else:
+ style = opts.get('style')
+
+ # ui settings
+ if not (tmpl or style):
+ tmpl = ui.config('ui', 'logtemplate')
+ if tmpl:
+ tmpl = templater.parsestring(tmpl)
+ else:
+ style = util.expandpath(ui.config('ui', 'style', ''))
+
+ if not (tmpl or style):
+ return changeset_printer(ui, repo, patch, opts, buffered)
+
+ mapfile = None
+ if style and not tmpl:
+ mapfile = style
+ if not os.path.split(mapfile)[0]:
+ mapname = (templater.templatepath('map-cmdline.' + mapfile)
+ or templater.templatepath(mapfile))
+ if mapname:
+ mapfile = mapname
+
+ try:
+ t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
+ except SyntaxError, inst:
+ raise util.Abort(inst.args[0])
+ if tmpl:
+ t.use_template(tmpl)
+ return t
+
+def finddate(ui, repo, date):
+ """Find the tipmost changeset that matches the given date spec"""
+
+ df = util.matchdate(date)
+ m = scmutil.matchall(repo)
+ results = {}
+
+ def prep(ctx, fns):
+ d = ctx.date()
+ if df(d[0]):
+ results[ctx.rev()] = d
+
+ for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
+ rev = ctx.rev()
+ if rev in results:
+ ui.status(_("Found revision %s from %s\n") %
+ (rev, util.datestr(results[rev])))
+ return str(rev)
+
+ raise util.Abort(_("revision matching date not found"))
+
+def walkchangerevs(repo, match, opts, prepare):
+ '''Iterate over files and the revs in which they changed.
+
+ Callers most commonly need to iterate backwards over the history
+ in which they are interested. Doing so has awful (quadratic-looking)
+ performance, so we use iterators in a "windowed" way.
+
+ We walk a window of revisions in the desired order. Within the
+ window, we first walk forwards to gather data, then in the desired
+ order (usually backwards) to display it.
+
+ This function returns an iterator yielding contexts. Before
+ yielding each context, the iterator will first call the prepare
+ function on each context in the window in forward order.'''
+
+ def increasing_windows(start, end, windowsize=8, sizelimit=512):
+ if start < end:
+ while start < end:
+ yield start, min(windowsize, end - start)
+ start += windowsize
+ if windowsize < sizelimit:
+ windowsize *= 2
+ else:
+ while start > end:
+ yield start, min(windowsize, start - end - 1)
+ start -= windowsize
+ if windowsize < sizelimit:
+ windowsize *= 2
+
+ follow = opts.get('follow') or opts.get('follow_first')
+
+ if not len(repo):
+ return []
+
+ if follow:
+ defrange = '%s:0' % repo['.'].rev()
+ else:
+ defrange = '-1:0'
+ revs = scmutil.revrange(repo, opts['rev'] or [defrange])
+ if not revs:
+ return []
+ wanted = set()
+ slowpath = match.anypats() or (match.files() and opts.get('removed'))
+ fncache = {}
+ change = util.cachefunc(repo.changectx)
+
+ # First step is to fill wanted, the set of revisions that we want to yield.
+ # When it does not induce extra cost, we also fill fncache for revisions in
+ # wanted: a cache of filenames that were changed (ctx.files()) and that
+ # match the file filtering conditions.
+
+ if not slowpath and not match.files():
+ # No files, no patterns. Display all revs.
+ wanted = set(revs)
+ copies = []
+
+ if not slowpath:
+ # We only have to read through the filelog to find wanted revisions
+
+ minrev, maxrev = min(revs), max(revs)
+ def filerevgen(filelog, last):
+ """
+ Only files, no patterns. Check the history of each file.
+
+ Examines filelog entries within minrev, maxrev linkrev range
+ Returns an iterator yielding (linkrev, parentlinkrevs, copied)
+ tuples in backwards order
+ """
+ cl_count = len(repo)
+ revs = []
+ for j in xrange(0, last + 1):
+ linkrev = filelog.linkrev(j)
+ if linkrev < minrev:
+ continue
+ # only yield rev for which we have the changelog, it can
+ # happen while doing "hg log" during a pull or commit
+ if linkrev >= cl_count:
+ break
+
+ parentlinkrevs = []
+ for p in filelog.parentrevs(j):
+ if p != nullrev:
+ parentlinkrevs.append(filelog.linkrev(p))
+ n = filelog.node(j)
+ revs.append((linkrev, parentlinkrevs,
+ follow and filelog.renamed(n)))
+
+ return reversed(revs)
+ def iterfiles():
+ for filename in match.files():
+ yield filename, None
+ for filename_node in copies:
+ yield filename_node
+ for file_, node in iterfiles():
+ filelog = repo.file(file_)
+ if not len(filelog):
+ if node is None:
+ # A zero count may be a directory or deleted file, so
+ # try to find matching entries on the slow path.
+ if follow:
+ raise util.Abort(
+ _('cannot follow nonexistent file: "%s"') % file_)
+ slowpath = True
+ break
+ else:
+ continue
+
+ if node is None:
+ last = len(filelog) - 1
+ else:
+ last = filelog.rev(node)
+
+
+ # keep track of all ancestors of the file
+ ancestors = set([filelog.linkrev(last)])
+
+ # iterate from latest to oldest revision
+ for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
+ if not follow:
+ if rev > maxrev:
+ continue
+ else:
+ # Note that last might not be the first interesting
+ # rev to us:
+ # if the file has been changed after maxrev, we'll
+ # have linkrev(last) > maxrev, and we still need
+ # to explore the file graph
+ if rev not in ancestors:
+ continue
+ # XXX insert 1327 fix here
+ if flparentlinkrevs:
+ ancestors.update(flparentlinkrevs)
+
+ fncache.setdefault(rev, []).append(file_)
+ wanted.add(rev)
+ if copied:
+ copies.append(copied)
+ if slowpath:
+ # We have to read the changelog to match filenames against
+ # changed files
+
+ if follow:
+ raise util.Abort(_('can only follow copies/renames for explicit '
+ 'filenames'))
+
+ # The slow path checks files modified in every changeset.
+ for i in sorted(revs):
+ ctx = change(i)
+ matches = filter(match, ctx.files())
+ if matches:
+ fncache[i] = matches
+ wanted.add(i)
+
+ class followfilter(object):
+ def __init__(self, onlyfirst=False):
+ self.startrev = nullrev
+ self.roots = set()
+ self.onlyfirst = onlyfirst
+
+ def match(self, rev):
+ def realparents(rev):
+ if self.onlyfirst:
+ return repo.changelog.parentrevs(rev)[0:1]
+ else:
+ return filter(lambda x: x != nullrev,
+ repo.changelog.parentrevs(rev))
+
+ if self.startrev == nullrev:
+ self.startrev = rev
+ return True
+
+ if rev > self.startrev:
+ # forward: all descendants
+ if not self.roots:
+ self.roots.add(self.startrev)
+ for parent in realparents(rev):
+ if parent in self.roots:
+ self.roots.add(rev)
+ return True
+ else:
+ # backwards: all parents
+ if not self.roots:
+ self.roots.update(realparents(self.startrev))
+ if rev in self.roots:
+ self.roots.remove(rev)
+ self.roots.update(realparents(rev))
+ return True
+
+ return False
+
+ # it might be worthwhile to do this in the iterator if the rev range
+ # is descending and the prune args are all within that range
+ for rev in opts.get('prune', ()):
+ rev = repo.changelog.rev(repo.lookup(rev))
+ ff = followfilter()
+ stop = min(revs[0], revs[-1])
+ for x in xrange(rev, stop - 1, -1):
+ if ff.match(x):
+ wanted.discard(x)
+
+ # Now that wanted is correctly initialized, we can iterate over the
+ # revision range, yielding only revisions in wanted.
+ def iterate():
+ if follow and not match.files():
+ ff = followfilter(onlyfirst=opts.get('follow_first'))
+ def want(rev):
+ return ff.match(rev) and rev in wanted
+ else:
+ def want(rev):
+ return rev in wanted
+
+ for i, window in increasing_windows(0, len(revs)):
+ nrevs = [rev for rev in revs[i:i + window] if want(rev)]
+ for rev in sorted(nrevs):
+ fns = fncache.get(rev)
+ ctx = change(rev)
+ if not fns:
+ def fns_generator():
+ for f in ctx.files():
+ if match(f):
+ yield f
+ fns = fns_generator()
+ prepare(ctx, fns)
+ for rev in nrevs:
+ yield change(rev)
+ return iterate()
+
+def add(ui, repo, match, dryrun, listsubrepos, prefix):
+ join = lambda f: os.path.join(prefix, f)
+ bad = []
+ oldbad = match.bad
+ match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
+ names = []
+ wctx = repo[None]
+ cca = None
+ abort, warn = scmutil.checkportabilityalert(ui)
+ if abort or warn:
+ cca = scmutil.casecollisionauditor(ui, abort, wctx)
+ for f in repo.walk(match):
+ exact = match.exact(f)
+ if exact or f not in repo.dirstate:
+ if cca:
+ cca(f)
+ names.append(f)
+ if ui.verbose or not exact:
+ ui.status(_('adding %s\n') % match.rel(join(f)))
+
+ if listsubrepos:
+ for subpath in wctx.substate:
+ sub = wctx.sub(subpath)
+ try:
+ submatch = matchmod.narrowmatcher(subpath, match)
+ bad.extend(sub.add(ui, submatch, dryrun, prefix))
+ except error.LookupError:
+ ui.status(_("skipping missing subrepository: %s\n")
+ % join(subpath))
+
+ if not dryrun:
+ rejected = wctx.add(names, prefix)
+ bad.extend(f for f in rejected if f in match.files())
+ return bad
+
+def duplicatecopies(repo, rev, p1, p2):
+ "Reproduce copies found in the source revision in the dirstate for grafts"
+ # Here we simulate the copies and renames in the source changeset
+ cop, diver = copies.copies(repo, repo[rev], repo[p1], repo[p2], True)
+ m1 = repo[rev].manifest()
+ m2 = repo[p1].manifest()
+ for k, v in cop.iteritems():
+ if k in m1:
+ if v in m1 or v in m2:
+ repo.dirstate.copy(v, k)
+ if v in m2 and v not in m1 and k in m2:
+ repo.dirstate.remove(v)
+
+def commit(ui, repo, commitfunc, pats, opts):
+ '''commit the specified files or all outstanding changes'''
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+ message = logmessage(ui, opts)
+
+ # extract addremove carefully -- this function can be called from a command
+ # that doesn't support addremove
+ if opts.get('addremove'):
+ scmutil.addremove(repo, pats, opts)
+
+ return commitfunc(ui, repo, message,
+ scmutil.match(repo[None], pats, opts), opts)
+
+def commiteditor(repo, ctx, subs):
+ if ctx.description():
+ return ctx.description()
+ return commitforceeditor(repo, ctx, subs)
+
+def commitforceeditor(repo, ctx, subs):
+ edittext = []
+ modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
+ if ctx.description():
+ edittext.append(ctx.description())
+ edittext.append("")
+ edittext.append("") # Empty line between message and comments.
+ edittext.append(_("HG: Enter commit message."
+ " Lines beginning with 'HG:' are removed."))
+ edittext.append(_("HG: Leave message empty to abort commit."))
+ edittext.append("HG: --")
+ edittext.append(_("HG: user: %s") % ctx.user())
+ if ctx.p2():
+ edittext.append(_("HG: branch merge"))
+ if ctx.branch():
+ edittext.append(_("HG: branch '%s'") % ctx.branch())
+ edittext.extend([_("HG: subrepo %s") % s for s in subs])
+ edittext.extend([_("HG: added %s") % f for f in added])
+ edittext.extend([_("HG: changed %s") % f for f in modified])
+ edittext.extend([_("HG: removed %s") % f for f in removed])
+ if not added and not modified and not removed:
+ edittext.append(_("HG: no files changed"))
+ edittext.append("")
+ # run editor in the repository root
+ olddir = os.getcwd()
+ os.chdir(repo.root)
+ text = repo.ui.edit("\n".join(edittext), ctx.user())
+ text = re.sub("(?m)^HG:.*(\n|$)", "", text)
+ os.chdir(olddir)
+
+ if not text.strip():
+ raise util.Abort(_("empty commit message"))
+
+ return text
+
+def command(table):
+ '''returns a function object bound to table which can be used as
+ a decorator for populating table as a command table'''
+
+ def cmd(name, options, synopsis=None):
+ def decorator(func):
+ if synopsis:
+ table[name] = func, options[:], synopsis
+ else:
+ table[name] = func, options[:]
+ return func
+ return decorator
+
+ return cmd
diff --git a/websdk/mercurial/commands.py b/websdk/mercurial/commands.py
index dcb9954..7ed52fb 120000..100644
--- a/websdk/mercurial/commands.py
+++ b/websdk/mercurial/commands.py
@@ -1 +1,5672 @@
-/usr/share/pyshared/mercurial/commands.py \ No newline at end of file
+# commands.py - command processing for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import hex, bin, nullid, nullrev, short
+from lock import release
+from i18n import _, gettext
+import os, re, difflib, time, tempfile, errno
+import hg, scmutil, util, revlog, extensions, copies, error, bookmarks
+import patch, help, url, encoding, templatekw, discovery
+import archival, changegroup, cmdutil, hbisect
+import sshserver, hgweb, hgweb.server, commandserver
+import merge as mergemod
+import minirst, revset, fileset
+import dagparser, context, simplemerge
+import random, setdiscovery, treediscovery, dagutil
+
+table = {}
+
+command = cmdutil.command(table)
+
+# common command options
+
+globalopts = [
+ ('R', 'repository', '',
+ _('repository root directory or name of overlay bundle file'),
+ _('REPO')),
+ ('', 'cwd', '',
+ _('change working directory'), _('DIR')),
+ ('y', 'noninteractive', None,
+ _('do not prompt, automatically pick the first choice for all prompts')),
+ ('q', 'quiet', None, _('suppress output')),
+ ('v', 'verbose', None, _('enable additional output')),
+ ('', 'config', [],
+ _('set/override config option (use \'section.name=value\')'),
+ _('CONFIG')),
+ ('', 'debug', None, _('enable debugging output')),
+ ('', 'debugger', None, _('start debugger')),
+ ('', 'encoding', encoding.encoding, _('set the charset encoding'),
+ _('ENCODE')),
+ ('', 'encodingmode', encoding.encodingmode,
+ _('set the charset encoding mode'), _('MODE')),
+ ('', 'traceback', None, _('always print a traceback on exception')),
+ ('', 'time', None, _('time how long the command takes')),
+ ('', 'profile', None, _('print command execution profile')),
+ ('', 'version', None, _('output version information and exit')),
+ ('h', 'help', None, _('display help and exit')),
+]
+
+dryrunopts = [('n', 'dry-run', None,
+ _('do not perform actions, just print output'))]
+
+remoteopts = [
+ ('e', 'ssh', '',
+ _('specify ssh command to use'), _('CMD')),
+ ('', 'remotecmd', '',
+ _('specify hg command to run on the remote side'), _('CMD')),
+ ('', 'insecure', None,
+ _('do not verify server certificate (ignoring web.cacerts config)')),
+]
+
+walkopts = [
+ ('I', 'include', [],
+ _('include names matching the given patterns'), _('PATTERN')),
+ ('X', 'exclude', [],
+ _('exclude names matching the given patterns'), _('PATTERN')),
+]
+
+commitopts = [
+ ('m', 'message', '',
+ _('use text as commit message'), _('TEXT')),
+ ('l', 'logfile', '',
+ _('read commit message from file'), _('FILE')),
+]
+
+commitopts2 = [
+ ('d', 'date', '',
+ _('record the specified date as commit date'), _('DATE')),
+ ('u', 'user', '',
+ _('record the specified user as committer'), _('USER')),
+]
+
+templateopts = [
+ ('', 'style', '',
+ _('display using template map file'), _('STYLE')),
+ ('', 'template', '',
+ _('display with template'), _('TEMPLATE')),
+]
+
+logopts = [
+ ('p', 'patch', None, _('show patch')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('l', 'limit', '',
+ _('limit number of changes displayed'), _('NUM')),
+ ('M', 'no-merges', None, _('do not show merges')),
+ ('', 'stat', None, _('output diffstat-style summary of changes')),
+] + templateopts
+
+diffopts = [
+ ('a', 'text', None, _('treat all files as text')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ('', 'nodates', None, _('omit dates from diff headers'))
+]
+
+diffopts2 = [
+ ('p', 'show-function', None, _('show which function each change is in')),
+ ('', 'reverse', None, _('produce a diff that undoes the changes')),
+ ('w', 'ignore-all-space', None,
+ _('ignore white space when comparing lines')),
+ ('b', 'ignore-space-change', None,
+ _('ignore changes in the amount of white space')),
+ ('B', 'ignore-blank-lines', None,
+ _('ignore changes whose lines are all blank')),
+ ('U', 'unified', '',
+ _('number of lines of context to show'), _('NUM')),
+ ('', 'stat', None, _('output diffstat-style summary of changes')),
+]
+
+mergetoolopts = [
+ ('t', 'tool', '', _('specify merge tool')),
+]
+
+similarityopts = [
+ ('s', 'similarity', '',
+ _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
+]
+
+subrepoopts = [
+ ('S', 'subrepos', None,
+ _('recurse into subrepositories'))
+]
+
+# Commands start here, listed alphabetically
+
+@command('^add',
+ walkopts + subrepoopts + dryrunopts,
+ _('[OPTION]... [FILE]...'))
+def add(ui, repo, *pats, **opts):
+ """add the specified files on the next commit
+
+ Schedule files to be version controlled and added to the
+ repository.
+
+ The files will be added to the repository at the next commit. To
+ undo an add before that, see :hg:`forget`.
+
+ If no names are given, add all files to the repository.
+
+ .. container:: verbose
+
+ An example showing how new (unknown) files are added
+ automatically by :hg:`add`::
+
+ $ ls
+ foo.c
+ $ hg status
+ ? foo.c
+ $ hg add
+ adding foo.c
+ $ hg status
+ A foo.c
+
+ Returns 0 if all files are successfully added.
+ """
+
+ m = scmutil.match(repo[None], pats, opts)
+ rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'),
+ opts.get('subrepos'), prefix="")
+ return rejected and 1 or 0
+
+@command('addremove',
+ similarityopts + walkopts + dryrunopts,
+ _('[OPTION]... [FILE]...'))
+def addremove(ui, repo, *pats, **opts):
+ """add all new files, delete all missing files
+
+ Add all new files and remove all missing files from the
+ repository.
+
+ New files are ignored if they match any of the patterns in
+ ``.hgignore``. As with add, these changes take effect at the next
+ commit.
+
+ Use the -s/--similarity option to detect renamed files. With a
+ parameter greater than 0, this compares every removed file with
+ every added file and records those similar enough as renames. This
+ option takes a percentage between 0 (disabled) and 100 (files must
+ be identical) as its parameter. Detecting renamed files this way
+ can be expensive. After using this option, :hg:`status -C` can be
+ used to check which files were identified as moved or renamed.
+
+ Returns 0 if all files are successfully added.
+ """
+ try:
+ sim = float(opts.get('similarity') or 100)
+ except ValueError:
+ raise util.Abort(_('similarity must be a number'))
+ if sim < 0 or sim > 100:
+ raise util.Abort(_('similarity must be between 0 and 100'))
+ return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0)
+
+@command('^annotate|blame',
+ [('r', 'rev', '', _('annotate the specified revision'), _('REV')),
+ ('', 'follow', None,
+ _('follow copies/renames and list the filename (DEPRECATED)')),
+ ('', 'no-follow', None, _("don't follow copies and renames")),
+ ('a', 'text', None, _('treat all files as text')),
+ ('u', 'user', None, _('list the author (long with -v)')),
+ ('f', 'file', None, _('list the filename')),
+ ('d', 'date', None, _('list the date (short with -q)')),
+ ('n', 'number', None, _('list the revision number (default)')),
+ ('c', 'changeset', None, _('list the changeset')),
+ ('l', 'line-number', None, _('show line number at the first appearance'))
+ ] + walkopts,
+ _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'))
+def annotate(ui, repo, *pats, **opts):
+ """show changeset information by line for each file
+
+ List changes in files, showing the revision id responsible for
+ each line
+
+ This command is useful for discovering when a change was made and
+ by whom.
+
+ Without the -a/--text option, annotate will avoid processing files
+ it detects as binary. With -a, annotate will annotate the file
+ anyway, although the results will probably be neither useful
+ nor desirable.
+
+ Returns 0 on success.
+ """
+ if opts.get('follow'):
+ # --follow is deprecated and now just an alias for -f/--file
+ # to mimic the behavior of Mercurial before version 1.5
+ opts['file'] = True
+
+ datefunc = ui.quiet and util.shortdate or util.datestr
+ getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
+
+ if not pats:
+ raise util.Abort(_('at least one filename or pattern is required'))
+
+ opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())),
+ ('number', ' ', lambda x: str(x[0].rev())),
+ ('changeset', ' ', lambda x: short(x[0].node())),
+ ('date', ' ', getdate),
+ ('file', ' ', lambda x: x[0].path()),
+ ('line_number', ':', lambda x: str(x[1])),
+ ]
+
+ if (not opts.get('user') and not opts.get('changeset')
+ and not opts.get('date') and not opts.get('file')):
+ opts['number'] = True
+
+ linenumber = opts.get('line_number') is not None
+ if linenumber and (not opts.get('changeset')) and (not opts.get('number')):
+ raise util.Abort(_('at least one of -n/-c is required for -l'))
+
+ funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)]
+ funcmap[0] = (funcmap[0][0], '') # no separator in front of first column
+
+ def bad(x, y):
+ raise util.Abort("%s: %s" % (x, y))
+
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ m = scmutil.match(ctx, pats, opts)
+ m.bad = bad
+ follow = not opts.get('no_follow')
+ for abs in ctx.walk(m):
+ fctx = ctx[abs]
+ if not opts.get('text') and util.binary(fctx.data()):
+ ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
+ continue
+
+ lines = fctx.annotate(follow=follow, linenumber=linenumber)
+ pieces = []
+
+ for f, sep in funcmap:
+ l = [f(n) for n, dummy in lines]
+ if l:
+ sized = [(x, encoding.colwidth(x)) for x in l]
+ ml = max([w for x, w in sized])
+ pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x)
+ for x, w in sized])
+
+ if pieces:
+ for p, l in zip(zip(*pieces), lines):
+ ui.write("%s: %s" % ("".join(p), l[1]))
+
+@command('archive',
+ [('', 'no-decode', None, _('do not pass files through decoders')),
+ ('p', 'prefix', '', _('directory prefix for files in archive'),
+ _('PREFIX')),
+ ('r', 'rev', '', _('revision to distribute'), _('REV')),
+ ('t', 'type', '', _('type of distribution to create'), _('TYPE')),
+ ] + subrepoopts + walkopts,
+ _('[OPTION]... DEST'))
+def archive(ui, repo, dest, **opts):
+ '''create an unversioned archive of a repository revision
+
+ By default, the revision used is the parent of the working
+ directory; use -r/--rev to specify a different revision.
+
+ The archive type is automatically detected based on file
+ extension (or override using -t/--type).
+
+ .. container:: verbose
+
+ Examples:
+
+ - create a zip file containing the 1.0 release::
+
+ hg archive -r 1.0 project-1.0.zip
+
+ - create a tarball excluding .hg files::
+
+ hg archive project.tar.gz -X ".hg*"
+
+ Valid types are:
+
+ :``files``: a directory full of files (default)
+ :``tar``: tar archive, uncompressed
+ :``tbz2``: tar archive, compressed using bzip2
+ :``tgz``: tar archive, compressed using gzip
+ :``uzip``: zip archive, uncompressed
+ :``zip``: zip archive, compressed using deflate
+
+ The exact name of the destination archive or directory is given
+ using a format string; see :hg:`help export` for details.
+
+ Each member added to an archive file has a directory prefix
+ prepended. Use -p/--prefix to specify a format string for the
+ prefix. The default is the basename of the archive, with suffixes
+ removed.
+
+ Returns 0 on success.
+ '''
+
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ if not ctx:
+ raise util.Abort(_('no working directory: please specify a revision'))
+ node = ctx.node()
+ dest = cmdutil.makefilename(repo, dest, node)
+ if os.path.realpath(dest) == repo.root:
+ raise util.Abort(_('repository root cannot be destination'))
+
+ kind = opts.get('type') or archival.guesskind(dest) or 'files'
+ prefix = opts.get('prefix')
+
+ if dest == '-':
+ if kind == 'files':
+ raise util.Abort(_('cannot archive plain files to stdout'))
+ dest = cmdutil.makefileobj(repo, dest)
+ if not prefix:
+ prefix = os.path.basename(repo.root) + '-%h'
+
+ prefix = cmdutil.makefilename(repo, prefix, node)
+ matchfn = scmutil.match(ctx, [], opts)
+ archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
+ matchfn, prefix, subrepos=opts.get('subrepos'))
+
+@command('backout',
+ [('', 'merge', None, _('merge with old dirstate parent after backout')),
+ ('', 'parent', '',
+ _('parent to choose when backing out merge (DEPRECATED)'), _('REV')),
+ ('r', 'rev', '', _('revision to backout'), _('REV')),
+ ] + mergetoolopts + walkopts + commitopts + commitopts2,
+ _('[OPTION]... [-r] REV'))
+def backout(ui, repo, node=None, rev=None, **opts):
+ '''reverse effect of earlier changeset
+
+ Prepare a new changeset with the effect of REV undone in the
+ current working directory.
+
+ If REV is the parent of the working directory, then this new changeset
+ is committed automatically. Otherwise, hg needs to merge the
+ changes and the merged result is left uncommitted.
+
+ .. note::
+ backout cannot be used to fix either an unwanted or
+ incorrect merge.
+
+ .. container:: verbose
+
+ By default, the pending changeset will have one parent,
+ maintaining a linear history. With --merge, the pending
+ changeset will instead have two parents: the old parent of the
+ working directory and a new child of REV that simply undoes REV.
+
+ Before version 1.7, the behavior without --merge was equivalent
+ to specifying --merge followed by :hg:`update --clean .` to
+ cancel the merge and leave the child of REV as a head to be
+ merged separately.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Returns 0 on success.
+ '''
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if not rev:
+ rev = node
+
+ if not rev:
+ raise util.Abort(_("please specify a revision to backout"))
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ cmdutil.bailifchanged(repo)
+ node = scmutil.revsingle(repo, rev).node()
+
+ op1, op2 = repo.dirstate.parents()
+ a = repo.changelog.ancestor(op1, node)
+ if a != node:
+ raise util.Abort(_('cannot backout change on a different branch'))
+
+ p1, p2 = repo.changelog.parents(node)
+ if p1 == nullid:
+ raise util.Abort(_('cannot backout a change with no parents'))
+ if p2 != nullid:
+ if not opts.get('parent'):
+ raise util.Abort(_('cannot backout a merge changeset'))
+ p = repo.lookup(opts['parent'])
+ if p not in (p1, p2):
+ raise util.Abort(_('%s is not a parent of %s') %
+ (short(p), short(node)))
+ parent = p
+ else:
+ if opts.get('parent'):
+ raise util.Abort(_('cannot use --parent on non-merge changeset'))
+ parent = p1
+
+ # the backout should appear on the same branch
+ branch = repo.dirstate.branch()
+ hg.clean(repo, node, show_stats=False)
+ repo.dirstate.setbranch(branch)
+ revert_opts = opts.copy()
+ revert_opts['date'] = None
+ revert_opts['all'] = True
+ revert_opts['rev'] = hex(parent)
+ revert_opts['no_backup'] = None
+ revert(ui, repo, **revert_opts)
+ if not opts.get('merge') and op1 != node:
+ try:
+ ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ return hg.update(repo, op1)
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+
+ commit_opts = opts.copy()
+ commit_opts['addremove'] = False
+ if not commit_opts['message'] and not commit_opts['logfile']:
+ # we don't translate commit messages
+ commit_opts['message'] = "Backed out changeset %s" % short(node)
+ commit_opts['force_editor'] = True
+ commit(ui, repo, **commit_opts)
+ def nice(node):
+ return '%d:%s' % (repo.changelog.rev(node), short(node))
+ ui.status(_('changeset %s backs out changeset %s\n') %
+ (nice(repo.changelog.tip()), nice(node)))
+ if opts.get('merge') and op1 != node:
+ hg.clean(repo, op1, show_stats=False)
+ ui.status(_('merging with changeset %s\n')
+ % nice(repo.changelog.tip()))
+ try:
+ ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ return hg.merge(repo, hex(repo.changelog.tip()))
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+ return 0
+
+@command('bisect',
+ [('r', 'reset', False, _('reset bisect state')),
+ ('g', 'good', False, _('mark changeset good')),
+ ('b', 'bad', False, _('mark changeset bad')),
+ ('s', 'skip', False, _('skip testing changeset')),
+ ('e', 'extend', False, _('extend the bisect range')),
+ ('c', 'command', '', _('use command to check changeset state'), _('CMD')),
+ ('U', 'noupdate', False, _('do not update to target'))],
+ _("[-gbsr] [-U] [-c CMD] [REV]"))
+def bisect(ui, repo, rev=None, extra=None, command=None,
+ reset=None, good=None, bad=None, skip=None, extend=None,
+ noupdate=None):
+ """subdivision search of changesets
+
+ This command helps to find changesets which introduce problems. To
+ use, mark the earliest changeset you know exhibits the problem as
+ bad, then mark the latest changeset which is free from the problem
+ as good. Bisect will update your working directory to a revision
+ for testing (unless the -U/--noupdate option is specified). Once
+ you have performed tests, mark the working directory as good or
+ bad, and bisect will either update to another candidate changeset
+ or announce that it has found the bad revision.
+
+ As a shortcut, you can also use the revision argument to mark a
+ revision as good or bad without checking it out first.
+
+ If you supply a command, it will be used for automatic bisection.
+ Its exit status will be used to mark revisions as good or bad:
+ status 0 means good, 125 means to skip the revision, 127
+ (command not found) will abort the bisection, and any other
+ non-zero exit status means the revision is bad.
+
+ .. container:: verbose
+
+ Some examples:
+
+ - start a bisection with known bad revision 12, and good revision 34::
+
+ hg bisect --bad 34
+ hg bisect --good 12
+
+ - advance the current bisection by marking current revision as good or
+ bad::
+
+ hg bisect --good
+ hg bisect --bad
+
+ - mark the current revision, or a known revision, to be skipped (eg. if
+ that revision is not usable because of another issue)::
+
+ hg bisect --skip
+ hg bisect --skip 23
+
+ - forget the current bisection::
+
+ hg bisect --reset
+
+ - use 'make && make tests' to automatically find the first broken
+ revision::
+
+ hg bisect --reset
+ hg bisect --bad 34
+ hg bisect --good 12
+ hg bisect --command 'make && make tests'
+
+ - see all changesets whose states are already known in the current
+ bisection::
+
+ hg log -r "bisect(pruned)"
+
+ - see all changesets that took part in the current bisection::
+
+ hg log -r "bisect(range)"
+
+ - with the graphlog extension, you can even get a nice graph::
+
+ hg log --graph -r "bisect(range)"
+
+ See :hg:`help revsets` for more about the `bisect()` keyword.
+
+ Returns 0 on success.
+ """
+ def extendbisectrange(nodes, good):
+ # bisect is incomplete when it ends on a merge node and
+ # one of the parent was not checked.
+ parents = repo[nodes[0]].parents()
+ if len(parents) > 1:
+ side = good and state['bad'] or state['good']
+ num = len(set(i.node() for i in parents) & set(side))
+ if num == 1:
+ return parents[0].ancestor(parents[1])
+ return None
+
+ def print_result(nodes, good):
+ displayer = cmdutil.show_changeset(ui, repo, {})
+ if len(nodes) == 1:
+ # narrowed it down to a single revision
+ if good:
+ ui.write(_("The first good revision is:\n"))
+ else:
+ ui.write(_("The first bad revision is:\n"))
+ displayer.show(repo[nodes[0]])
+ extendnode = extendbisectrange(nodes, good)
+ if extendnode is not None:
+ ui.write(_('Not all ancestors of this changeset have been'
+ ' checked.\nUse bisect --extend to continue the '
+ 'bisection from\nthe common ancestor, %s.\n')
+ % extendnode)
+ else:
+ # multiple possible revisions
+ if good:
+ ui.write(_("Due to skipped revisions, the first "
+ "good revision could be any of:\n"))
+ else:
+ ui.write(_("Due to skipped revisions, the first "
+ "bad revision could be any of:\n"))
+ for n in nodes:
+ displayer.show(repo[n])
+ displayer.close()
+
+ def check_state(state, interactive=True):
+ if not state['good'] or not state['bad']:
+ if (good or bad or skip or reset) and interactive:
+ return
+ if not state['good']:
+ raise util.Abort(_('cannot bisect (no known good revisions)'))
+ else:
+ raise util.Abort(_('cannot bisect (no known bad revisions)'))
+ return True
+
+ # backward compatibility
+ if rev in "good bad reset init".split():
+ ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
+ cmd, rev, extra = rev, extra, None
+ if cmd == "good":
+ good = True
+ elif cmd == "bad":
+ bad = True
+ else:
+ reset = True
+ elif extra or good + bad + skip + reset + extend + bool(command) > 1:
+ raise util.Abort(_('incompatible arguments'))
+
+ if reset:
+ p = repo.join("bisect.state")
+ if os.path.exists(p):
+ os.unlink(p)
+ return
+
+ state = hbisect.load_state(repo)
+
+ if command:
+ changesets = 1
+ try:
+ while changesets:
+ # update state
+ status = util.system(command, out=ui.fout)
+ if status == 125:
+ transition = "skip"
+ elif status == 0:
+ transition = "good"
+ # status < 0 means process was killed
+ elif status == 127:
+ raise util.Abort(_("failed to execute %s") % command)
+ elif status < 0:
+ raise util.Abort(_("%s killed") % command)
+ else:
+ transition = "bad"
+ ctx = scmutil.revsingle(repo, rev)
+ rev = None # clear for future iterations
+ state[transition].append(ctx.node())
+ ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition))
+ check_state(state, interactive=False)
+ # bisect
+ nodes, changesets, good = hbisect.bisect(repo.changelog, state)
+ # update to next check
+ cmdutil.bailifchanged(repo)
+ hg.clean(repo, nodes[0], show_stats=False)
+ finally:
+ hbisect.save_state(repo, state)
+ print_result(nodes, good)
+ return
+
+ # update state
+
+ if rev:
+ nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])]
+ else:
+ nodes = [repo.lookup('.')]
+
+ if good or bad or skip:
+ if good:
+ state['good'] += nodes
+ elif bad:
+ state['bad'] += nodes
+ elif skip:
+ state['skip'] += nodes
+ hbisect.save_state(repo, state)
+
+ if not check_state(state):
+ return
+
+ # actually bisect
+ nodes, changesets, good = hbisect.bisect(repo.changelog, state)
+ if extend:
+ if not changesets:
+ extendnode = extendbisectrange(nodes, good)
+ if extendnode is not None:
+ ui.write(_("Extending search to changeset %d:%s\n"
+ % (extendnode.rev(), extendnode)))
+ if noupdate:
+ return
+ cmdutil.bailifchanged(repo)
+ return hg.clean(repo, extendnode.node())
+ raise util.Abort(_("nothing to extend"))
+
+ if changesets == 0:
+ print_result(nodes, good)
+ else:
+ assert len(nodes) == 1 # only a single node can be tested next
+ node = nodes[0]
+ # compute the approximate number of remaining tests
+ tests, size = 0, 2
+ while size <= changesets:
+ tests, size = tests + 1, size * 2
+ rev = repo.changelog.rev(node)
+ ui.write(_("Testing changeset %d:%s "
+ "(%d changesets remaining, ~%d tests)\n")
+ % (rev, short(node), changesets, tests))
+ if not noupdate:
+ cmdutil.bailifchanged(repo)
+ return hg.clean(repo, node)
+
+@command('bookmarks',
+ [('f', 'force', False, _('force')),
+ ('r', 'rev', '', _('revision'), _('REV')),
+ ('d', 'delete', False, _('delete a given bookmark')),
+ ('m', 'rename', '', _('rename a given bookmark'), _('NAME')),
+ ('i', 'inactive', False, _('do not mark a new bookmark active'))],
+ _('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]'))
+def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False,
+ rename=None, inactive=False):
+ '''track a line of development with movable markers
+
+ Bookmarks are pointers to certain commits that move when
+ committing. Bookmarks are local. They can be renamed, copied and
+ deleted. It is possible to use bookmark names in :hg:`merge` and
+ :hg:`update` to merge and update respectively to a given bookmark.
+
+ You can use :hg:`bookmark NAME` to set a bookmark on the working
+ directory's parent revision with the given name. If you specify
+ a revision using -r REV (where REV may be an existing bookmark),
+ the bookmark is assigned to that revision.
+
+ Bookmarks can be pushed and pulled between repositories (see :hg:`help
+ push` and :hg:`help pull`). This requires both the local and remote
+ repositories to support bookmarks. For versions prior to 1.8, this means
+ the bookmarks extension must be enabled.
+ '''
+ hexfn = ui.debugflag and hex or short
+ marks = repo._bookmarks
+ cur = repo.changectx('.').node()
+
+ if rename:
+ if rename not in marks:
+ raise util.Abort(_("bookmark '%s' does not exist") % rename)
+ if mark in marks and not force:
+ raise util.Abort(_("bookmark '%s' already exists "
+ "(use -f to force)") % mark)
+ if mark is None:
+ raise util.Abort(_("new bookmark name required"))
+ marks[mark] = marks[rename]
+ if repo._bookmarkcurrent == rename and not inactive:
+ bookmarks.setcurrent(repo, mark)
+ del marks[rename]
+ bookmarks.write(repo)
+ return
+
+ if delete:
+ if mark is None:
+ raise util.Abort(_("bookmark name required"))
+ if mark not in marks:
+ raise util.Abort(_("bookmark '%s' does not exist") % mark)
+ if mark == repo._bookmarkcurrent:
+ bookmarks.setcurrent(repo, None)
+ del marks[mark]
+ bookmarks.write(repo)
+ return
+
+ if mark is not None:
+ if "\n" in mark:
+ raise util.Abort(_("bookmark name cannot contain newlines"))
+ mark = mark.strip()
+ if not mark:
+ raise util.Abort(_("bookmark names cannot consist entirely of "
+ "whitespace"))
+ if inactive and mark == repo._bookmarkcurrent:
+ bookmarks.setcurrent(repo, None)
+ return
+ if mark in marks and not force:
+ raise util.Abort(_("bookmark '%s' already exists "
+ "(use -f to force)") % mark)
+ if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
+ and not force):
+ raise util.Abort(
+ _("a bookmark cannot have the name of an existing branch"))
+ if rev:
+ marks[mark] = repo.lookup(rev)
+ else:
+ marks[mark] = repo.changectx('.').node()
+ if not inactive and repo.changectx('.').node() == marks[mark]:
+ bookmarks.setcurrent(repo, mark)
+ bookmarks.write(repo)
+ return
+
+ if mark is None:
+ if rev:
+ raise util.Abort(_("bookmark name required"))
+ if len(marks) == 0:
+ ui.status(_("no bookmarks set\n"))
+ else:
+ for bmark, n in sorted(marks.iteritems()):
+ current = repo._bookmarkcurrent
+ if bmark == current and n == cur:
+ prefix, label = '*', 'bookmarks.current'
+ else:
+ prefix, label = ' ', ''
+
+ if ui.quiet:
+ ui.write("%s\n" % bmark, label=label)
+ else:
+ ui.write(" %s %-25s %d:%s\n" % (
+ prefix, bmark, repo.changelog.rev(n), hexfn(n)),
+ label=label)
+ return
+
+@command('branch',
+ [('f', 'force', None,
+ _('set branch name even if it shadows an existing branch')),
+ ('C', 'clean', None, _('reset branch name to parent branch name'))],
+ _('[-fC] [NAME]'))
+def branch(ui, repo, label=None, **opts):
+ """set or show the current branch name
+
+ With no argument, show the current branch name. With one argument,
+ set the working directory branch name (the branch will not exist
+ in the repository until the next commit). Standard practice
+ recommends that primary development take place on the 'default'
+ branch.
+
+ Unless -f/--force is specified, branch will not let you set a
+ branch name that already exists, even if it's inactive.
+
+ Use -C/--clean to reset the working directory branch to that of
+ the parent of the working directory, negating a previous branch
+ change.
+
+ Use the command :hg:`update` to switch to an existing branch. Use
+ :hg:`commit --close-branch` to mark this branch as closed.
+
+ .. note::
+ Branch names are permanent. Use :hg:`bookmark` to create a
+ light-weight bookmark instead. See :hg:`help glossary` for more
+ information about named branches and bookmarks.
+
+ Returns 0 on success.
+ """
+
+ if opts.get('clean'):
+ label = repo[None].p1().branch()
+ repo.dirstate.setbranch(label)
+ ui.status(_('reset working directory to branch %s\n') % label)
+ elif label:
+ if not opts.get('force') and label in repo.branchtags():
+ if label not in [p.branch() for p in repo.parents()]:
+ raise util.Abort(_('a branch of the same name already exists'),
+ # i18n: "it" refers to an existing branch
+ hint=_("use 'hg update' to switch to it"))
+ repo.dirstate.setbranch(label)
+ ui.status(_('marked working directory as branch %s\n') % label)
+ else:
+ ui.write("%s\n" % repo.dirstate.branch())
+
+@command('branches',
+ [('a', 'active', False, _('show only branches that have unmerged heads')),
+ ('c', 'closed', False, _('show normal and closed branches'))],
+ _('[-ac]'))
+def branches(ui, repo, active=False, closed=False):
+ """list repository named branches
+
+ List the repository's named branches, indicating which ones are
+ inactive. If -c/--closed is specified, also list branches which have
+ been marked closed (see :hg:`commit --close-branch`).
+
+ If -a/--active is specified, only show active branches. A branch
+ is considered active if it contains repository heads.
+
+ Use the command :hg:`update` to switch to an existing branch.
+
+ Returns 0.
+ """
+
+ hexfunc = ui.debugflag and hex or short
+ activebranches = [repo[n].branch() for n in repo.heads()]
+ def testactive(tag, node):
+ realhead = tag in activebranches
+ open = node in repo.branchheads(tag, closed=False)
+ return realhead and open
+ branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag)
+ for tag, node in repo.branchtags().items()],
+ reverse=True)
+
+ for isactive, node, tag in branches:
+ if (not active) or isactive:
+ if ui.quiet:
+ ui.write("%s\n" % tag)
+ else:
+ hn = repo.lookup(node)
+ if isactive:
+ label = 'branches.active'
+ notice = ''
+ elif hn not in repo.branchheads(tag, closed=False):
+ if not closed:
+ continue
+ label = 'branches.closed'
+ notice = _(' (closed)')
+ else:
+ label = 'branches.inactive'
+ notice = _(' (inactive)')
+ if tag == repo.dirstate.branch():
+ label = 'branches.current'
+ rev = str(node).rjust(31 - encoding.colwidth(tag))
+ rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset')
+ tag = ui.label(tag, label)
+ ui.write("%s %s%s\n" % (tag, rev, notice))
+
+@command('bundle',
+ [('f', 'force', None, _('run even when the destination is unrelated')),
+ ('r', 'rev', [], _('a changeset intended to be added to the destination'),
+ _('REV')),
+ ('b', 'branch', [], _('a specific branch you would like to bundle'),
+ _('BRANCH')),
+ ('', 'base', [],
+ _('a base changeset assumed to be available at the destination'),
+ _('REV')),
+ ('a', 'all', None, _('bundle all changesets in the repository')),
+ ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')),
+ ] + remoteopts,
+ _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]'))
+def bundle(ui, repo, fname, dest=None, **opts):
+ """create a changegroup file
+
+ Generate a compressed changegroup file collecting changesets not
+ known to be in another repository.
+
+ If you omit the destination repository, then hg assumes the
+ destination will have all the nodes you specify with --base
+ parameters. To create a bundle containing all changesets, use
+ -a/--all (or --base null).
+
+ You can change compression method with the -t/--type option.
+ The available compression methods are: none, bzip2, and
+ gzip (by default, bundles are compressed using bzip2).
+
+ The bundle file can then be transferred using conventional means
+ and applied to another repository with the unbundle or pull
+ command. This is useful when direct push and pull are not
+ available or when exporting an entire repository is undesirable.
+
+ Applying bundles preserves all changeset contents including
+ permissions, copy/rename information, and revision history.
+
+ Returns 0 on success, 1 if no changes found.
+ """
+ revs = None
+ if 'rev' in opts:
+ revs = scmutil.revrange(repo, opts['rev'])
+
+ if opts.get('all'):
+ base = ['null']
+ else:
+ base = scmutil.revrange(repo, opts.get('base'))
+ if base:
+ if dest:
+ raise util.Abort(_("--base is incompatible with specifying "
+ "a destination"))
+ common = [repo.lookup(rev) for rev in base]
+ heads = revs and map(repo.lookup, revs) or revs
+ else:
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest, opts.get('branch'))
+ other = hg.peer(repo, opts, dest)
+ revs, checkout = hg.addbranchrevs(repo, other, branches, revs)
+ heads = revs and map(repo.lookup, revs) or revs
+ common, outheads = discovery.findcommonoutgoing(repo, other,
+ onlyheads=heads,
+ force=opts.get('force'))
+
+ cg = repo.getbundle('bundle', common=common, heads=heads)
+ if not cg:
+ ui.status(_("no changes found\n"))
+ return 1
+
+ bundletype = opts.get('type', 'bzip2').lower()
+ btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
+ bundletype = btypes.get(bundletype)
+ if bundletype not in changegroup.bundletypes:
+ raise util.Abort(_('unknown bundle type specified with --type'))
+
+ changegroup.writebundle(cg, fname, bundletype)
+
+@command('cat',
+ [('o', 'output', '',
+ _('print output to file with formatted name'), _('FORMAT')),
+ ('r', 'rev', '', _('print the given revision'), _('REV')),
+ ('', 'decode', None, _('apply any matching decode filter')),
+ ] + walkopts,
+ _('[OPTION]... FILE...'))
+def cat(ui, repo, file1, *pats, **opts):
+ """output the current or given revision of files
+
+ Print the specified files as they were at the given revision. If
+ no revision is given, the parent of the working directory is used,
+ or tip if no revision is checked out.
+
+ Output may be to a file, in which case the name of the file is
+ given using a format string. The formatting rules are the same as
+ for the export command, with the following additions:
+
+ :``%s``: basename of file being printed
+ :``%d``: dirname of file being printed, or '.' if in repository root
+ :``%p``: root-relative path name of file being printed
+
+ Returns 0 on success.
+ """
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ err = 1
+ m = scmutil.match(ctx, (file1,) + pats, opts)
+ for abs in ctx.walk(m):
+ fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
+ pathname=abs)
+ data = ctx[abs].data()
+ if opts.get('decode'):
+ data = repo.wwritedata(abs, data)
+ fp.write(data)
+ fp.close()
+ err = 0
+ return err
+
+@command('^clone',
+ [('U', 'noupdate', None,
+ _('the clone will include an empty working copy (only a repository)')),
+ ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')),
+ ('r', 'rev', [], _('include the specified changeset'), _('REV')),
+ ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')),
+ ('', 'pull', None, _('use pull protocol to copy metadata')),
+ ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')),
+ ] + remoteopts,
+ _('[OPTION]... SOURCE [DEST]'))
+def clone(ui, source, dest=None, **opts):
+ """make a copy of an existing repository
+
+ Create a copy of an existing repository in a new directory.
+
+ If no destination directory name is specified, it defaults to the
+ basename of the source.
+
+ The location of the source is added to the new repository's
+ ``.hg/hgrc`` file, as the default to be used for future pulls.
+
+ Only local paths and ``ssh://`` URLs are supported as
+ destinations. For ``ssh://`` destinations, no working directory or
+ ``.hg/hgrc`` will be created on the remote side.
+
+ To pull only a subset of changesets, specify one or more revisions
+ identifiers with -r/--rev or branches with -b/--branch. The
+ resulting clone will contain only the specified changesets and
+ their ancestors. These options (or 'clone src#rev dest') imply
+ --pull, even for local source repositories. Note that specifying a
+ tag will include the tagged changeset but not the changeset
+ containing the tag.
+
+ To check out a particular version, use -u/--update, or
+ -U/--noupdate to create a clone with no working directory.
+
+ .. container:: verbose
+
+ For efficiency, hardlinks are used for cloning whenever the
+ source and destination are on the same filesystem (note this
+ applies only to the repository data, not to the working
+ directory). Some filesystems, such as AFS, implement hardlinking
+ incorrectly, but do not report errors. In these cases, use the
+ --pull option to avoid hardlinking.
+
+ In some cases, you can clone repositories and the working
+ directory using full hardlinks with ::
+
+ $ cp -al REPO REPOCLONE
+
+ This is the fastest way to clone, but it is not always safe. The
+ operation is not atomic (making sure REPO is not modified during
+ the operation is up to you) and you have to make sure your
+ editor breaks hardlinks (Emacs and most Linux Kernel tools do
+ so). Also, this is not compatible with certain extensions that
+ place their metadata under the .hg directory, such as mq.
+
+ Mercurial will update the working directory to the first applicable
+ revision from this list:
+
+ a) null if -U or the source repository has no changesets
+ b) if -u . and the source repository is local, the first parent of
+ the source repository's working directory
+ c) the changeset specified with -u (if a branch name, this means the
+ latest head of that branch)
+ d) the changeset specified with -r
+ e) the tipmost head specified with -b
+ f) the tipmost head specified with the url#branch source syntax
+ g) the tipmost head of the default branch
+ h) tip
+
+ Examples:
+
+ - clone a remote repository to a new directory named hg/::
+
+ hg clone http://selenic.com/hg
+
+ - create a lightweight local clone::
+
+ hg clone project/ project-feature/
+
+ - clone from an absolute path on an ssh server (note double-slash)::
+
+ hg clone ssh://user@server//home/projects/alpha/
+
+ - do a high-speed clone over a LAN while checking out a
+ specified version::
+
+ hg clone --uncompressed http://server/repo -u 1.5
+
+ - create a repository without changesets after a particular revision::
+
+ hg clone -r 04e544 experimental/ good/
+
+ - clone (and track) a particular named branch::
+
+ hg clone http://selenic.com/hg#stable
+
+ See :hg:`help urls` for details on specifying URLs.
+
+ Returns 0 on success.
+ """
+ if opts.get('noupdate') and opts.get('updaterev'):
+ raise util.Abort(_("cannot specify both --noupdate and --updaterev"))
+
+ r = hg.clone(ui, opts, source, dest,
+ pull=opts.get('pull'),
+ stream=opts.get('uncompressed'),
+ rev=opts.get('rev'),
+ update=opts.get('updaterev') or not opts.get('noupdate'),
+ branch=opts.get('branch'))
+
+ return r is None
+
+@command('^commit|ci',
+ [('A', 'addremove', None,
+ _('mark new/missing files as added/removed before committing')),
+ ('', 'close-branch', None,
+ _('mark a branch as closed, hiding it from the branch list')),
+ ] + walkopts + commitopts + commitopts2 + subrepoopts,
+ _('[OPTION]... [FILE]...'))
+def commit(ui, repo, *pats, **opts):
+ """commit the specified files or all outstanding changes
+
+ Commit changes to the given files into the repository. Unlike a
+ centralized SCM, this operation is a local operation. See
+ :hg:`push` for a way to actively distribute your changes.
+
+ If a list of files is omitted, all changes reported by :hg:`status`
+ will be committed.
+
+ If you are committing the result of a merge, do not provide any
+ filenames or -I/-X filters.
+
+ If no commit message is specified, Mercurial starts your
+ configured editor where you can enter a message. In case your
+ commit fails, you will find a backup of your message in
+ ``.hg/last-message.txt``.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Returns 0 on success, 1 if nothing changed.
+ """
+ if opts.get('subrepos'):
+ # Let --subrepos on the command line overide config setting.
+ ui.setconfig('ui', 'commitsubrepos', True)
+
+ extra = {}
+ if opts.get('close_branch'):
+ if repo['.'].node() not in repo.branchheads():
+ # The topo heads set is included in the branch heads set of the
+ # current branch, so it's sufficient to test branchheads
+ raise util.Abort(_('can only close branch heads'))
+ extra['close'] = 1
+ e = cmdutil.commiteditor
+ if opts.get('force_editor'):
+ e = cmdutil.commitforceeditor
+
+ def commitfunc(ui, repo, message, match, opts):
+ return repo.commit(message, opts.get('user'), opts.get('date'), match,
+ editor=e, extra=extra)
+
+ branch = repo[None].branch()
+ bheads = repo.branchheads(branch)
+
+ node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
+ if not node:
+ stat = repo.status(match=scmutil.match(repo[None], pats, opts))
+ if stat[3]:
+ ui.status(_("nothing changed (%d missing files, see 'hg status')\n")
+ % len(stat[3]))
+ else:
+ ui.status(_("nothing changed\n"))
+ return 1
+
+ ctx = repo[node]
+ parents = ctx.parents()
+
+ if (bheads and node not in bheads and not
+ [x for x in parents if x.node() in bheads and x.branch() == branch]):
+ ui.status(_('created new head\n'))
+ # The message is not printed for initial roots. For the other
+ # changesets, it is printed in the following situations:
+ #
+ # Par column: for the 2 parents with ...
+ # N: null or no parent
+ # B: parent is on another named branch
+ # C: parent is a regular non head changeset
+ # H: parent was a branch head of the current branch
+ # Msg column: whether we print "created new head" message
+ # In the following, it is assumed that there already exists some
+ # initial branch heads of the current branch, otherwise nothing is
+ # printed anyway.
+ #
+ # Par Msg Comment
+ # NN y additional topo root
+ #
+ # BN y additional branch root
+ # CN y additional topo head
+ # HN n usual case
+ #
+ # BB y weird additional branch root
+ # CB y branch merge
+ # HB n merge with named branch
+ #
+ # CC y additional head from merge
+ # CH n merge with a head
+ #
+ # HH n head merge: head count decreases
+
+ if not opts.get('close_branch'):
+ for r in parents:
+ if r.extra().get('close') and r.branch() == branch:
+ ui.status(_('reopening closed branch head %d\n') % r)
+
+ if ui.debugflag:
+ ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
+ elif ui.verbose:
+ ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
+
+@command('copy|cp',
+ [('A', 'after', None, _('record a copy that has already occurred')),
+ ('f', 'force', None, _('forcibly copy over an existing managed file')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... [SOURCE]... DEST'))
+def copy(ui, repo, *pats, **opts):
+ """mark files as copied for the next commit
+
+ Mark dest as having copies of source files. If dest is a
+ directory, copies are put in that directory. If dest is a file,
+ the source must be a single file.
+
+ By default, this command copies the contents of files as they
+ exist in the working directory. If invoked with -A/--after, the
+ operation is recorded, but no copying is performed.
+
+ This command takes effect with the next commit. To undo a copy
+ before that, see :hg:`revert`.
+
+ Returns 0 on success, 1 if errors are encountered.
+ """
+ wlock = repo.wlock(False)
+ try:
+ return cmdutil.copy(ui, repo, pats, opts)
+ finally:
+ wlock.release()
+
+@command('debugancestor', [], _('[INDEX] REV1 REV2'))
+def debugancestor(ui, repo, *args):
+ """find the ancestor revision of two revisions in a given index"""
+ if len(args) == 3:
+ index, rev1, rev2 = args
+ r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index)
+ lookup = r.lookup
+ elif len(args) == 2:
+ if not repo:
+ raise util.Abort(_("there is no Mercurial repository here "
+ "(.hg not found)"))
+ rev1, rev2 = args
+ r = repo.changelog
+ lookup = repo.lookup
+ else:
+ raise util.Abort(_('either two or three arguments required'))
+ a = r.ancestor(lookup(rev1), lookup(rev2))
+ ui.write("%d:%s\n" % (r.rev(a), hex(a)))
+
+@command('debugbuilddag',
+ [('m', 'mergeable-file', None, _('add single file mergeable changes')),
+ ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
+ ('n', 'new-file', None, _('add new file at each rev'))],
+ _('[OPTION]... [TEXT]'))
+def debugbuilddag(ui, repo, text=None,
+ mergeable_file=False,
+ overwritten_file=False,
+ new_file=False):
+ """builds a repo with a given DAG from scratch in the current empty repo
+
+ The description of the DAG is read from stdin if not given on the
+ command line.
+
+ Elements:
+
+ - "+n" is a linear run of n nodes based on the current default parent
+ - "." is a single node based on the current default parent
+ - "$" resets the default parent to null (implied at the start);
+ otherwise the default parent is always the last node created
+ - "<p" sets the default parent to the backref p
+ - "*p" is a fork at parent p, which is a backref
+ - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
+ - "/p2" is a merge of the preceding node and p2
+ - ":tag" defines a local tag for the preceding node
+ - "@branch" sets the named branch for subsequent nodes
+ - "#...\\n" is a comment up to the end of the line
+
+ Whitespace between the above elements is ignored.
+
+ A backref is either
+
+ - a number n, which references the node curr-n, where curr is the current
+ node, or
+ - the name of a local tag you placed earlier using ":tag", or
+ - empty to denote the default parent.
+
+ All string valued-elements are either strictly alphanumeric, or must
+ be enclosed in double quotes ("..."), with "\\" as escape character.
+ """
+
+ if text is None:
+ ui.status(_("reading DAG from stdin\n"))
+ text = ui.fin.read()
+
+ cl = repo.changelog
+ if len(cl) > 0:
+ raise util.Abort(_('repository is not empty'))
+
+ # determine number of revs in DAG
+ total = 0
+ for type, data in dagparser.parsedag(text):
+ if type == 'n':
+ total += 1
+
+ if mergeable_file:
+ linesperrev = 2
+ # make a file with k lines per rev
+ initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)]
+ initialmergedlines.append("")
+
+ tags = []
+
+ tr = repo.transaction("builddag")
+ try:
+
+ at = -1
+ atbranch = 'default'
+ nodeids = []
+ ui.progress(_('building'), 0, unit=_('revisions'), total=total)
+ for type, data in dagparser.parsedag(text):
+ if type == 'n':
+ ui.note('node %s\n' % str(data))
+ id, ps = data
+
+ files = []
+ fctxs = {}
+
+ p2 = None
+ if mergeable_file:
+ fn = "mf"
+ p1 = repo[ps[0]]
+ if len(ps) > 1:
+ p2 = repo[ps[1]]
+ pa = p1.ancestor(p2)
+ base, local, other = [x[fn].data() for x in pa, p1, p2]
+ m3 = simplemerge.Merge3Text(base, local, other)
+ ml = [l.strip() for l in m3.merge_lines()]
+ ml.append("")
+ elif at > 0:
+ ml = p1[fn].data().split("\n")
+ else:
+ ml = initialmergedlines
+ ml[id * linesperrev] += " r%i" % id
+ mergedtext = "\n".join(ml)
+ files.append(fn)
+ fctxs[fn] = context.memfilectx(fn, mergedtext)
+
+ if overwritten_file:
+ fn = "of"
+ files.append(fn)
+ fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
+
+ if new_file:
+ fn = "nf%i" % id
+ files.append(fn)
+ fctxs[fn] = context.memfilectx(fn, "r%i\n" % id)
+ if len(ps) > 1:
+ if not p2:
+ p2 = repo[ps[1]]
+ for fn in p2:
+ if fn.startswith("nf"):
+ files.append(fn)
+ fctxs[fn] = p2[fn]
+
+ def fctxfn(repo, cx, path):
+ return fctxs.get(path)
+
+ if len(ps) == 0 or ps[0] < 0:
+ pars = [None, None]
+ elif len(ps) == 1:
+ pars = [nodeids[ps[0]], None]
+ else:
+ pars = [nodeids[p] for p in ps]
+ cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
+ date=(id, 0),
+ user="debugbuilddag",
+ extra={'branch': atbranch})
+ nodeid = repo.commitctx(cx)
+ nodeids.append(nodeid)
+ at = id
+ elif type == 'l':
+ id, name = data
+ ui.note('tag %s\n' % name)
+ tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
+ elif type == 'a':
+ ui.note('branch %s\n' % data)
+ atbranch = data
+ ui.progress(_('building'), id, unit=_('revisions'), total=total)
+ tr.close()
+ finally:
+ ui.progress(_('building'), None)
+ tr.release()
+
+ if tags:
+ repo.opener.write("localtags", "".join(tags))
+
+@command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE'))
+def debugbundle(ui, bundlepath, all=None, **opts):
+ """lists the contents of a bundle"""
+ f = url.open(ui, bundlepath)
+ try:
+ gen = changegroup.readbundle(f, bundlepath)
+ if all:
+ ui.write("format: id, p1, p2, cset, delta base, len(delta)\n")
+
+ def showchunks(named):
+ ui.write("\n%s\n" % named)
+ chain = None
+ while True:
+ chunkdata = gen.deltachunk(chain)
+ if not chunkdata:
+ break
+ node = chunkdata['node']
+ p1 = chunkdata['p1']
+ p2 = chunkdata['p2']
+ cs = chunkdata['cs']
+ deltabase = chunkdata['deltabase']
+ delta = chunkdata['delta']
+ ui.write("%s %s %s %s %s %s\n" %
+ (hex(node), hex(p1), hex(p2),
+ hex(cs), hex(deltabase), len(delta)))
+ chain = node
+
+ chunkdata = gen.changelogheader()
+ showchunks("changelog")
+ chunkdata = gen.manifestheader()
+ showchunks("manifest")
+ while True:
+ chunkdata = gen.filelogheader()
+ if not chunkdata:
+ break
+ fname = chunkdata['filename']
+ showchunks(fname)
+ else:
+ chunkdata = gen.changelogheader()
+ chain = None
+ while True:
+ chunkdata = gen.deltachunk(chain)
+ if not chunkdata:
+ break
+ node = chunkdata['node']
+ ui.write("%s\n" % hex(node))
+ chain = node
+ finally:
+ f.close()
+
+@command('debugcheckstate', [], '')
+def debugcheckstate(ui, repo):
+ """validate the correctness of the current dirstate"""
+ parent1, parent2 = repo.dirstate.parents()
+ m1 = repo[parent1].manifest()
+ m2 = repo[parent2].manifest()
+ errors = 0
+ for f in repo.dirstate:
+ state = repo.dirstate[f]
+ if state in "nr" and f not in m1:
+ ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
+ errors += 1
+ if state in "a" and f in m1:
+ ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
+ errors += 1
+ if state in "m" and f not in m1 and f not in m2:
+ ui.warn(_("%s in state %s, but not in either manifest\n") %
+ (f, state))
+ errors += 1
+ for f in m1:
+ state = repo.dirstate[f]
+ if state not in "nrm":
+ ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
+ errors += 1
+ if errors:
+ error = _(".hg/dirstate inconsistent with current parent's manifest")
+ raise util.Abort(error)
+
+@command('debugcommands', [], _('[COMMAND]'))
+def debugcommands(ui, cmd='', *args):
+ """list all available commands and options"""
+ for cmd, vals in sorted(table.iteritems()):
+ cmd = cmd.split('|')[0].strip('^')
+ opts = ', '.join([i[1] for i in vals[1]])
+ ui.write('%s: %s\n' % (cmd, opts))
+
+@command('debugcomplete',
+ [('o', 'options', None, _('show the command options'))],
+ _('[-o] CMD'))
+def debugcomplete(ui, cmd='', **opts):
+ """returns the completion list associated with the given command"""
+
+ if opts.get('options'):
+ options = []
+ otables = [globalopts]
+ if cmd:
+ aliases, entry = cmdutil.findcmd(cmd, table, False)
+ otables.append(entry[1])
+ for t in otables:
+ for o in t:
+ if "(DEPRECATED)" in o[3]:
+ continue
+ if o[0]:
+ options.append('-%s' % o[0])
+ options.append('--%s' % o[1])
+ ui.write("%s\n" % "\n".join(options))
+ return
+
+ cmdlist = cmdutil.findpossible(cmd, table)
+ if ui.verbose:
+ cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
+ ui.write("%s\n" % "\n".join(sorted(cmdlist)))
+
+@command('debugdag',
+ [('t', 'tags', None, _('use tags as labels')),
+ ('b', 'branches', None, _('annotate with branch names')),
+ ('', 'dots', None, _('use dots for runs')),
+ ('s', 'spaces', None, _('separate elements by spaces'))],
+ _('[OPTION]... [FILE [REV]...]'))
+def debugdag(ui, repo, file_=None, *revs, **opts):
+ """format the changelog or an index DAG as a concise textual description
+
+ If you pass a revlog index, the revlog's DAG is emitted. If you list
+ revision numbers, they get labelled in the output as rN.
+
+ Otherwise, the changelog DAG of the current repo is emitted.
+ """
+ spaces = opts.get('spaces')
+ dots = opts.get('dots')
+ if file_:
+ rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
+ revs = set((int(r) for r in revs))
+ def events():
+ for r in rlog:
+ yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1)))
+ if r in revs:
+ yield 'l', (r, "r%i" % r)
+ elif repo:
+ cl = repo.changelog
+ tags = opts.get('tags')
+ branches = opts.get('branches')
+ if tags:
+ labels = {}
+ for l, n in repo.tags().items():
+ labels.setdefault(cl.rev(n), []).append(l)
+ def events():
+ b = "default"
+ for r in cl:
+ if branches:
+ newb = cl.read(cl.node(r))[5]['branch']
+ if newb != b:
+ yield 'a', newb
+ b = newb
+ yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1)))
+ if tags:
+ ls = labels.get(r)
+ if ls:
+ for l in ls:
+ yield 'l', (r, l)
+ else:
+ raise util.Abort(_('need repo for changelog dag'))
+
+ for line in dagparser.dagtextlines(events(),
+ addspaces=spaces,
+ wraplabels=True,
+ wrapannotations=True,
+ wrapnonlinear=dots,
+ usedots=dots,
+ maxlinewidth=70):
+ ui.write(line)
+ ui.write("\n")
+
+@command('debugdata',
+ [('c', 'changelog', False, _('open changelog')),
+ ('m', 'manifest', False, _('open manifest'))],
+ _('-c|-m|FILE REV'))
+def debugdata(ui, repo, file_, rev = None, **opts):
+ """dump the contents of a data file revision"""
+ if opts.get('changelog') or opts.get('manifest'):
+ file_, rev = None, file_
+ elif rev is None:
+ raise error.CommandError('debugdata', _('invalid arguments'))
+ r = cmdutil.openrevlog(repo, 'debugdata', file_, opts)
+ try:
+ ui.write(r.revision(r.lookup(rev)))
+ except KeyError:
+ raise util.Abort(_('invalid revision identifier %s') % rev)
+
+@command('debugdate',
+ [('e', 'extended', None, _('try extended date formats'))],
+ _('[-e] DATE [RANGE]'))
+def debugdate(ui, date, range=None, **opts):
+ """parse and display a date"""
+ if opts["extended"]:
+ d = util.parsedate(date, util.extendeddateformats)
+ else:
+ d = util.parsedate(date)
+ ui.write("internal: %s %s\n" % d)
+ ui.write("standard: %s\n" % util.datestr(d))
+ if range:
+ m = util.matchdate(range)
+ ui.write("match: %s\n" % m(d[0]))
+
+@command('debugdiscovery',
+ [('', 'old', None, _('use old-style discovery')),
+ ('', 'nonheads', None,
+ _('use old-style discovery with non-heads included')),
+ ] + remoteopts,
+ _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]'))
+def debugdiscovery(ui, repo, remoteurl="default", **opts):
+ """runs the changeset discovery protocol in isolation"""
+ remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch'))
+ remote = hg.peer(repo, opts, remoteurl)
+ ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
+
+ # make sure tests are repeatable
+ random.seed(12323)
+
+ def doit(localheads, remoteheads):
+ if opts.get('old'):
+ if localheads:
+ raise util.Abort('cannot use localheads with old style discovery')
+ common, _in, hds = treediscovery.findcommonincoming(repo, remote,
+ force=True)
+ common = set(common)
+ if not opts.get('nonheads'):
+ ui.write("unpruned common: %s\n" % " ".join([short(n)
+ for n in common]))
+ dag = dagutil.revlogdag(repo.changelog)
+ all = dag.ancestorset(dag.internalizeall(common))
+ common = dag.externalizeall(dag.headsetofconnecteds(all))
+ else:
+ common, any, hds = setdiscovery.findcommonheads(ui, repo, remote)
+ common = set(common)
+ rheads = set(hds)
+ lheads = set(repo.heads())
+ ui.write("common heads: %s\n" % " ".join([short(n) for n in common]))
+ if lheads <= common:
+ ui.write("local is subset\n")
+ elif rheads <= common:
+ ui.write("remote is subset\n")
+
+ serverlogs = opts.get('serverlog')
+ if serverlogs:
+ for filename in serverlogs:
+ logfile = open(filename, 'r')
+ try:
+ line = logfile.readline()
+ while line:
+ parts = line.strip().split(';')
+ op = parts[1]
+ if op == 'cg':
+ pass
+ elif op == 'cgss':
+ doit(parts[2].split(' '), parts[3].split(' '))
+ elif op == 'unb':
+ doit(parts[3].split(' '), parts[2].split(' '))
+ line = logfile.readline()
+ finally:
+ logfile.close()
+
+ else:
+ remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches,
+ opts.get('remote_head'))
+ localrevs = opts.get('local_head')
+ doit(localrevs, remoterevs)
+
+@command('debugfileset', [], ('REVSPEC'))
+def debugfileset(ui, repo, expr):
+ '''parse and apply a fileset specification'''
+ if ui.verbose:
+ tree = fileset.parse(expr)[0]
+ ui.note(tree, "\n")
+
+ for f in fileset.getfileset(repo[None], expr):
+ ui.write("%s\n" % f)
+
+@command('debugfsinfo', [], _('[PATH]'))
+def debugfsinfo(ui, path = "."):
+ """show information detected about current filesystem"""
+ util.writefile('.debugfsinfo', '')
+ ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
+ ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
+ ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
+ and 'yes' or 'no'))
+ os.unlink('.debugfsinfo')
+
+@command('debuggetbundle',
+ [('H', 'head', [], _('id of head node'), _('ID')),
+ ('C', 'common', [], _('id of common node'), _('ID')),
+ ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
+ _('REPO FILE [-H|-C ID]...'))
+def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
+ """retrieves a bundle from a repo
+
+ Every ID must be a full-length hex node id string. Saves the bundle to the
+ given file.
+ """
+ repo = hg.peer(ui, opts, repopath)
+ if not repo.capable('getbundle'):
+ raise util.Abort("getbundle() not supported by target repository")
+ args = {}
+ if common:
+ args['common'] = [bin(s) for s in common]
+ if head:
+ args['heads'] = [bin(s) for s in head]
+ bundle = repo.getbundle('debug', **args)
+
+ bundletype = opts.get('type', 'bzip2').lower()
+ btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
+ bundletype = btypes.get(bundletype)
+ if bundletype not in changegroup.bundletypes:
+ raise util.Abort(_('unknown bundle type specified with --type'))
+ changegroup.writebundle(bundle, bundlepath, bundletype)
+
+@command('debugignore', [], '')
+def debugignore(ui, repo, *values, **opts):
+ """display the combined ignore pattern"""
+ ignore = repo.dirstate._ignore
+ includepat = getattr(ignore, 'includepat', None)
+ if includepat is not None:
+ ui.write("%s\n" % includepat)
+ else:
+ raise util.Abort(_("no ignore patterns found"))
+
+@command('debugindex',
+ [('c', 'changelog', False, _('open changelog')),
+ ('m', 'manifest', False, _('open manifest')),
+ ('f', 'format', 0, _('revlog format'), _('FORMAT'))],
+ _('[-f FORMAT] -c|-m|FILE'))
+def debugindex(ui, repo, file_ = None, **opts):
+ """dump the contents of an index file"""
+ r = cmdutil.openrevlog(repo, 'debugindex', file_, opts)
+ format = opts.get('format', 0)
+ if format not in (0, 1):
+ raise util.Abort(_("unknown format %d") % format)
+
+ generaldelta = r.version & revlog.REVLOGGENERALDELTA
+ if generaldelta:
+ basehdr = ' delta'
+ else:
+ basehdr = ' base'
+
+ if format == 0:
+ ui.write(" rev offset length " + basehdr + " linkrev"
+ " nodeid p1 p2\n")
+ elif format == 1:
+ ui.write(" rev flag offset length"
+ " size " + basehdr + " link p1 p2 nodeid\n")
+
+ for i in r:
+ node = r.node(i)
+ if generaldelta:
+ base = r.deltaparent(i)
+ else:
+ base = r.chainbase(i)
+ if format == 0:
+ try:
+ pp = r.parents(node)
+ except:
+ pp = [nullid, nullid]
+ ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
+ i, r.start(i), r.length(i), base, r.linkrev(i),
+ short(node), short(pp[0]), short(pp[1])))
+ elif format == 1:
+ pr = r.parentrevs(i)
+ ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % (
+ i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
+ base, r.linkrev(i), pr[0], pr[1], short(node)))
+
+@command('debugindexdot', [], _('FILE'))
+def debugindexdot(ui, repo, file_):
+ """dump an index DAG as a graphviz dot file"""
+ r = None
+ if repo:
+ filelog = repo.file(file_)
+ if len(filelog):
+ r = filelog
+ if not r:
+ r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_)
+ ui.write("digraph G {\n")
+ for i in r:
+ node = r.node(i)
+ pp = r.parents(node)
+ ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
+ if pp[1] != nullid:
+ ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
+ ui.write("}\n")
+
+@command('debuginstall', [], '')
+def debuginstall(ui):
+ '''test Mercurial installation
+
+ Returns 0 on success.
+ '''
+
+ def writetemp(contents):
+ (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
+ f = os.fdopen(fd, "wb")
+ f.write(contents)
+ f.close()
+ return name
+
+ problems = 0
+
+ # encoding
+ ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
+ try:
+ encoding.fromlocal("test")
+ except util.Abort, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" (check that your locale is properly set)\n"))
+ problems += 1
+
+ # compiled modules
+ ui.status(_("Checking installed modules (%s)...\n")
+ % os.path.dirname(__file__))
+ try:
+ import bdiff, mpatch, base85, osutil
+ dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes
+ except Exception, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" One or more extensions could not be found"))
+ ui.write(_(" (check that you compiled the extensions)\n"))
+ problems += 1
+
+ # templates
+ import templater
+ p = templater.templatepath()
+ ui.status(_("Checking templates (%s)...\n") % ' '.join(p))
+ try:
+ templater.templater(templater.templatepath("map-cmdline.default"))
+ except Exception, inst:
+ ui.write(" %s\n" % inst)
+ ui.write(_(" (templates seem to have been installed incorrectly)\n"))
+ problems += 1
+
+ # editor
+ ui.status(_("Checking commit editor...\n"))
+ editor = ui.geteditor()
+ cmdpath = util.findexe(editor) or util.findexe(editor.split()[0])
+ if not cmdpath:
+ if editor == 'vi':
+ ui.write(_(" No commit editor set and can't find vi in PATH\n"))
+ ui.write(_(" (specify a commit editor in your configuration"
+ " file)\n"))
+ else:
+ ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
+ ui.write(_(" (specify a commit editor in your configuration"
+ " file)\n"))
+ problems += 1
+
+ # check username
+ ui.status(_("Checking username...\n"))
+ try:
+ ui.username()
+ except util.Abort, e:
+ ui.write(" %s\n" % e)
+ ui.write(_(" (specify a username in your configuration file)\n"))
+ problems += 1
+
+ if not problems:
+ ui.status(_("No problems detected\n"))
+ else:
+ ui.write(_("%s problems detected,"
+ " please check your install!\n") % problems)
+
+ return problems
+
+@command('debugknown', [], _('REPO ID...'))
+def debugknown(ui, repopath, *ids, **opts):
+ """test whether node ids are known to a repo
+
+ Every ID must be a full-length hex node id string. Returns a list of 0s and 1s
+ indicating unknown/known.
+ """
+ repo = hg.peer(ui, opts, repopath)
+ if not repo.capable('known'):
+ raise util.Abort("known() not supported by target repository")
+ flags = repo.known([bin(s) for s in ids])
+ ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
+
+@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'))
+def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
+ '''access the pushkey key/value protocol
+
+ With two args, list the keys in the given namespace.
+
+ With five args, set a key to new if it currently is set to old.
+ Reports success or failure.
+ '''
+
+ target = hg.peer(ui, {}, repopath)
+ if keyinfo:
+ key, old, new = keyinfo
+ r = target.pushkey(namespace, key, old, new)
+ ui.status(str(r) + '\n')
+ return not r
+ else:
+ for k, v in target.listkeys(namespace).iteritems():
+ ui.write("%s\t%s\n" % (k.encode('string-escape'),
+ v.encode('string-escape')))
+
+@command('debugrebuildstate',
+ [('r', 'rev', '', _('revision to rebuild to'), _('REV'))],
+ _('[-r REV] [REV]'))
+def debugrebuildstate(ui, repo, rev="tip"):
+ """rebuild the dirstate as it would look like for the given revision"""
+ ctx = scmutil.revsingle(repo, rev)
+ wlock = repo.wlock()
+ try:
+ repo.dirstate.rebuild(ctx.node(), ctx.manifest())
+ finally:
+ wlock.release()
+
+@command('debugrename',
+ [('r', 'rev', '', _('revision to debug'), _('REV'))],
+ _('[-r REV] FILE'))
+def debugrename(ui, repo, file1, *pats, **opts):
+ """dump rename information"""
+
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ m = scmutil.match(ctx, (file1,) + pats, opts)
+ for abs in ctx.walk(m):
+ fctx = ctx[abs]
+ o = fctx.filelog().renamed(fctx.filenode())
+ rel = m.rel(abs)
+ if o:
+ ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
+ else:
+ ui.write(_("%s not renamed\n") % rel)
+
+@command('debugrevlog',
+ [('c', 'changelog', False, _('open changelog')),
+ ('m', 'manifest', False, _('open manifest')),
+ ('d', 'dump', False, _('dump index data'))],
+ _('-c|-m|FILE'))
+def debugrevlog(ui, repo, file_ = None, **opts):
+ """show data and statistics about a revlog"""
+ r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
+
+ if opts.get("dump"):
+ numrevs = len(r)
+ ui.write("# rev p1rev p2rev start end deltastart base p1 p2"
+ " rawsize totalsize compression heads\n")
+ ts = 0
+ heads = set()
+ for rev in xrange(numrevs):
+ dbase = r.deltaparent(rev)
+ if dbase == -1:
+ dbase = rev
+ cbase = r.chainbase(rev)
+ p1, p2 = r.parentrevs(rev)
+ rs = r.rawsize(rev)
+ ts = ts + rs
+ heads -= set(r.parentrevs(rev))
+ heads.add(rev)
+ ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" %
+ (rev, p1, p2, r.start(rev), r.end(rev),
+ r.start(dbase), r.start(cbase),
+ r.start(p1), r.start(p2),
+ rs, ts, ts / r.end(rev), len(heads)))
+ return 0
+
+ v = r.version
+ format = v & 0xFFFF
+ flags = []
+ gdelta = False
+ if v & revlog.REVLOGNGINLINEDATA:
+ flags.append('inline')
+ if v & revlog.REVLOGGENERALDELTA:
+ gdelta = True
+ flags.append('generaldelta')
+ if not flags:
+ flags = ['(none)']
+
+ nummerges = 0
+ numfull = 0
+ numprev = 0
+ nump1 = 0
+ nump2 = 0
+ numother = 0
+ nump1prev = 0
+ nump2prev = 0
+ chainlengths = []
+
+ datasize = [None, 0, 0L]
+ fullsize = [None, 0, 0L]
+ deltasize = [None, 0, 0L]
+
+ def addsize(size, l):
+ if l[0] is None or size < l[0]:
+ l[0] = size
+ if size > l[1]:
+ l[1] = size
+ l[2] += size
+
+ numrevs = len(r)
+ for rev in xrange(numrevs):
+ p1, p2 = r.parentrevs(rev)
+ delta = r.deltaparent(rev)
+ if format > 0:
+ addsize(r.rawsize(rev), datasize)
+ if p2 != nullrev:
+ nummerges += 1
+ size = r.length(rev)
+ if delta == nullrev:
+ chainlengths.append(0)
+ numfull += 1
+ addsize(size, fullsize)
+ else:
+ chainlengths.append(chainlengths[delta] + 1)
+ addsize(size, deltasize)
+ if delta == rev - 1:
+ numprev += 1
+ if delta == p1:
+ nump1prev += 1
+ elif delta == p2:
+ nump2prev += 1
+ elif delta == p1:
+ nump1 += 1
+ elif delta == p2:
+ nump2 += 1
+ elif delta != nullrev:
+ numother += 1
+
+ numdeltas = numrevs - numfull
+ numoprev = numprev - nump1prev - nump2prev
+ totalrawsize = datasize[2]
+ datasize[2] /= numrevs
+ fulltotal = fullsize[2]
+ fullsize[2] /= numfull
+ deltatotal = deltasize[2]
+ deltasize[2] /= numrevs - numfull
+ totalsize = fulltotal + deltatotal
+ avgchainlen = sum(chainlengths) / numrevs
+ compratio = totalrawsize / totalsize
+
+ basedfmtstr = '%%%dd\n'
+ basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
+
+ def dfmtstr(max):
+ return basedfmtstr % len(str(max))
+ def pcfmtstr(max, padding=0):
+ return basepcfmtstr % (len(str(max)), ' ' * padding)
+
+ def pcfmt(value, total):
+ return (value, 100 * float(value) / total)
+
+ ui.write('format : %d\n' % format)
+ ui.write('flags : %s\n' % ', '.join(flags))
+
+ ui.write('\n')
+ fmt = pcfmtstr(totalsize)
+ fmt2 = dfmtstr(totalsize)
+ ui.write('revisions : ' + fmt2 % numrevs)
+ ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs))
+ ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs))
+ ui.write('revisions : ' + fmt2 % numrevs)
+ ui.write(' full : ' + fmt % pcfmt(numfull, numrevs))
+ ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
+ ui.write('revision size : ' + fmt2 % totalsize)
+ ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize))
+ ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
+
+ ui.write('\n')
+ fmt = dfmtstr(max(avgchainlen, compratio))
+ ui.write('avg chain length : ' + fmt % avgchainlen)
+ ui.write('compression ratio : ' + fmt % compratio)
+
+ if format > 0:
+ ui.write('\n')
+ ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n'
+ % tuple(datasize))
+ ui.write('full revision size (min/max/avg) : %d / %d / %d\n'
+ % tuple(fullsize))
+ ui.write('delta size (min/max/avg) : %d / %d / %d\n'
+ % tuple(deltasize))
+
+ if numdeltas > 0:
+ ui.write('\n')
+ fmt = pcfmtstr(numdeltas)
+ fmt2 = pcfmtstr(numdeltas, 4)
+ ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas))
+ if numprev > 0:
+ ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev))
+ ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev))
+ ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev))
+ if gdelta:
+ ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas))
+ ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas))
+ ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas))
+
+@command('debugrevspec', [], ('REVSPEC'))
+def debugrevspec(ui, repo, expr):
+ '''parse and apply a revision specification'''
+ if ui.verbose:
+ tree = revset.parse(expr)[0]
+ ui.note(tree, "\n")
+ newtree = revset.findaliases(ui, tree)
+ if newtree != tree:
+ ui.note(newtree, "\n")
+ func = revset.match(ui, expr)
+ for c in func(repo, range(len(repo))):
+ ui.write("%s\n" % c)
+
+@command('debugsetparents', [], _('REV1 [REV2]'))
+def debugsetparents(ui, repo, rev1, rev2=None):
+ """manually set the parents of the current working directory
+
+ This is useful for writing repository conversion tools, but should
+ be used with care.
+
+ Returns 0 on success.
+ """
+
+ r1 = scmutil.revsingle(repo, rev1).node()
+ r2 = scmutil.revsingle(repo, rev2, 'null').node()
+
+ wlock = repo.wlock()
+ try:
+ repo.dirstate.setparents(r1, r2)
+ finally:
+ wlock.release()
+
+@command('debugstate',
+ [('', 'nodates', None, _('do not display the saved mtime')),
+ ('', 'datesort', None, _('sort by saved mtime'))],
+ _('[OPTION]...'))
+def debugstate(ui, repo, nodates=None, datesort=None):
+ """show the contents of the current dirstate"""
+ timestr = ""
+ showdate = not nodates
+ if datesort:
+ keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
+ else:
+ keyfunc = None # sort by filename
+ for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
+ if showdate:
+ if ent[3] == -1:
+ # Pad or slice to locale representation
+ locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ",
+ time.localtime(0)))
+ timestr = 'unset'
+ timestr = (timestr[:locale_len] +
+ ' ' * (locale_len - len(timestr)))
+ else:
+ timestr = time.strftime("%Y-%m-%d %H:%M:%S ",
+ time.localtime(ent[3]))
+ if ent[1] & 020000:
+ mode = 'lnk'
+ else:
+ mode = '%3o' % (ent[1] & 0777)
+ ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
+ for f in repo.dirstate.copies():
+ ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
+
+@command('debugsub',
+ [('r', 'rev', '',
+ _('revision to check'), _('REV'))],
+ _('[-r REV] [REV]'))
+def debugsub(ui, repo, rev=None):
+ ctx = scmutil.revsingle(repo, rev, None)
+ for k, v in sorted(ctx.substate.items()):
+ ui.write('path %s\n' % k)
+ ui.write(' source %s\n' % v[0])
+ ui.write(' revision %s\n' % v[1])
+
+@command('debugwalk', walkopts, _('[OPTION]... [FILE]...'))
+def debugwalk(ui, repo, *pats, **opts):
+ """show how files match on given patterns"""
+ m = scmutil.match(repo[None], pats, opts)
+ items = list(repo.walk(m))
+ if not items:
+ return
+ fmt = 'f %%-%ds %%-%ds %%s' % (
+ max([len(abs) for abs in items]),
+ max([len(m.rel(abs)) for abs in items]))
+ for abs in items:
+ line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
+ ui.write("%s\n" % line.rstrip())
+
+@command('debugwireargs',
+ [('', 'three', '', 'three'),
+ ('', 'four', '', 'four'),
+ ('', 'five', '', 'five'),
+ ] + remoteopts,
+ _('REPO [OPTIONS]... [ONE [TWO]]'))
+def debugwireargs(ui, repopath, *vals, **opts):
+ repo = hg.peer(ui, opts, repopath)
+ for opt in remoteopts:
+ del opts[opt[1]]
+ args = {}
+ for k, v in opts.iteritems():
+ if v:
+ args[k] = v
+ # run twice to check that we don't mess up the stream for the next command
+ res1 = repo.debugwireargs(*vals, **args)
+ res2 = repo.debugwireargs(*vals, **args)
+ ui.write("%s\n" % res1)
+ if res1 != res2:
+ ui.warn("%s\n" % res2)
+
+@command('^diff',
+ [('r', 'rev', [], _('revision'), _('REV')),
+ ('c', 'change', '', _('change made by revision'), _('REV'))
+ ] + diffopts + diffopts2 + walkopts + subrepoopts,
+ _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...'))
+def diff(ui, repo, *pats, **opts):
+ """diff repository (or selected files)
+
+ Show differences between revisions for the specified files.
+
+ Differences between files are shown using the unified diff format.
+
+ .. note::
+ diff may generate unexpected results for merges, as it will
+ default to comparing against the working directory's first
+ parent changeset if no revisions are specified.
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.
+
+ Alternatively you can specify -c/--change with a revision to see
+ the changes in that changeset relative to its first parent.
+
+ Without the -a/--text option, diff will avoid generating diffs of
+ files it detects as binary. With -a, diff will generate a diff
+ anyway, probably with undesirable results.
+
+ Use the -g/--git option to generate diffs in the git extended diff
+ format. For more information, read :hg:`help diffs`.
+
+ .. container:: verbose
+
+ Examples:
+
+ - compare a file in the current working directory to its parent::
+
+ hg diff foo.c
+
+ - compare two historical versions of a directory, with rename info::
+
+ hg diff --git -r 1.0:1.2 lib/
+
+ - get change stats relative to the last change on some date::
+
+ hg diff --stat -r "date('may 2')"
+
+ - diff all newly-added files that contain a keyword::
+
+ hg diff "set:added() and grep(GNU)"
+
+ - compare a revision and its parents::
+
+ hg diff -c 9353 # compare against first parent
+ hg diff -r 9353^:9353 # same using revset syntax
+ hg diff -r 9353^2:9353 # compare against the second parent
+
+ Returns 0 on success.
+ """
+
+ revs = opts.get('rev')
+ change = opts.get('change')
+ stat = opts.get('stat')
+ reverse = opts.get('reverse')
+
+ if revs and change:
+ msg = _('cannot specify --rev and --change at the same time')
+ raise util.Abort(msg)
+ elif change:
+ node2 = scmutil.revsingle(repo, change, None).node()
+ node1 = repo[node2].p1().node()
+ else:
+ node1, node2 = scmutil.revpair(repo, revs)
+
+ if reverse:
+ node1, node2 = node2, node1
+
+ diffopts = patch.diffopts(ui, opts)
+ m = scmutil.match(repo[node2], pats, opts)
+ cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat,
+ listsubrepos=opts.get('subrepos'))
+
+@command('^export',
+ [('o', 'output', '',
+ _('print output to file with formatted name'), _('FORMAT')),
+ ('', 'switch-parent', None, _('diff against the second parent')),
+ ('r', 'rev', [], _('revisions to export'), _('REV')),
+ ] + diffopts,
+ _('[OPTION]... [-o OUTFILESPEC] REV...'))
+def export(ui, repo, *changesets, **opts):
+ """dump the header and diffs for one or more changesets
+
+ Print the changeset header and diffs for one or more revisions.
+
+ The information shown in the changeset header is: author, date,
+ branch name (if non-default), changeset hash, parent(s) and commit
+ comment.
+
+ .. note::
+ export may generate unexpected diff output for merge
+ changesets, as it will compare the merge changeset against its
+ first parent only.
+
+ Output may be to a file, in which case the name of the file is
+ given using a format string. The formatting rules are as follows:
+
+ :``%%``: literal "%" character
+ :``%H``: changeset hash (40 hexadecimal digits)
+ :``%N``: number of patches being generated
+ :``%R``: changeset revision number
+ :``%b``: basename of the exporting repository
+ :``%h``: short-form changeset hash (12 hexadecimal digits)
+ :``%m``: first line of the commit message (only alphanumeric characters)
+ :``%n``: zero-padded sequence number, starting at 1
+ :``%r``: zero-padded changeset revision number
+
+ Without the -a/--text option, export will avoid generating diffs
+ of files it detects as binary. With -a, export will generate a
+ diff anyway, probably with undesirable results.
+
+ Use the -g/--git option to generate diffs in the git extended diff
+ format. See :hg:`help diffs` for more information.
+
+ With the --switch-parent option, the diff will be against the
+ second parent. It can be useful to review a merge.
+
+ .. container:: verbose
+
+ Examples:
+
+ - use export and import to transplant a bugfix to the current
+ branch::
+
+ hg export -r 9353 | hg import -
+
+ - export all the changesets between two revisions to a file with
+ rename information::
+
+ hg export --git -r 123:150 > changes.txt
+
+ - split outgoing changes into a series of patches with
+ descriptive names::
+
+ hg export -r "outgoing()" -o "%n-%m.patch"
+
+ Returns 0 on success.
+ """
+ changesets += tuple(opts.get('rev', []))
+ if not changesets:
+ raise util.Abort(_("export requires at least one changeset"))
+ revs = scmutil.revrange(repo, changesets)
+ if len(revs) > 1:
+ ui.note(_('exporting patches:\n'))
+ else:
+ ui.note(_('exporting patch:\n'))
+ cmdutil.export(repo, revs, template=opts.get('output'),
+ switch_parent=opts.get('switch_parent'),
+ opts=patch.diffopts(ui, opts))
+
+@command('^forget', walkopts, _('[OPTION]... FILE...'))
+def forget(ui, repo, *pats, **opts):
+ """forget the specified files on the next commit
+
+ Mark the specified files so they will no longer be tracked
+ after the next commit.
+
+ This only removes files from the current branch, not from the
+ entire project history, and it does not delete them from the
+ working directory.
+
+ To undo a forget before the next commit, see :hg:`add`.
+
+ .. container:: verbose
+
+ Examples:
+
+ - forget newly-added binary files::
+
+ hg forget "set:added() and binary()"
+
+ - forget files that would be excluded by .hgignore::
+
+ hg forget "set:hgignore()"
+
+ Returns 0 on success.
+ """
+
+ if not pats:
+ raise util.Abort(_('no files specified'))
+
+ m = scmutil.match(repo[None], pats, opts)
+ s = repo.status(match=m, clean=True)
+ forget = sorted(s[0] + s[1] + s[3] + s[6])
+ errs = 0
+
+ for f in m.files():
+ if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
+ if os.path.exists(m.rel(f)):
+ ui.warn(_('not removing %s: file is already untracked\n')
+ % m.rel(f))
+ errs = 1
+
+ for f in forget:
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ repo[None].forget(forget)
+ return errs
+
+@command(
+ 'graft',
+ [('c', 'continue', False, _('resume interrupted graft')),
+ ('e', 'edit', False, _('invoke editor on commit messages')),
+ ('D', 'currentdate', False,
+ _('record the current date as commit date')),
+ ('U', 'currentuser', False,
+ _('record the current user as committer'), _('DATE'))]
+ + commitopts2 + mergetoolopts,
+ _('[OPTION]... REVISION...'))
+def graft(ui, repo, *revs, **opts):
+ '''copy changes from other branches onto the current branch
+
+ This command uses Mercurial's merge logic to copy individual
+ changes from other branches without merging branches in the
+ history graph. This is sometimes known as 'backporting' or
+ 'cherry-picking'. By default, graft will copy user, date, and
+ description from the source changesets.
+
+ Changesets that are ancestors of the current revision, that have
+ already been grafted, or that are merges will be skipped.
+
+ If a graft merge results in conflicts, the graft process is
+ aborted so that the current merge can be manually resolved. Once
+ all conflicts are addressed, the graft process can be continued
+ with the -c/--continue option.
+
+ .. note::
+ The -c/--continue option does not reapply earlier options.
+
+ .. container:: verbose
+
+ Examples:
+
+ - copy a single change to the stable branch and edit its description::
+
+ hg update stable
+ hg graft --edit 9393
+
+ - graft a range of changesets with one exception, updating dates::
+
+ hg graft -D "2085::2093 and not 2091"
+
+ - continue a graft after resolving conflicts::
+
+ hg graft -c
+
+ - show the source of a grafted changeset::
+
+ hg log --debug -r tip
+
+ Returns 0 on successful completion.
+ '''
+
+ if not opts.get('user') and opts.get('currentuser'):
+ opts['user'] = ui.username()
+ if not opts.get('date') and opts.get('currentdate'):
+ opts['date'] = "%d %d" % util.makedate()
+
+ editor = None
+ if opts.get('edit'):
+ editor = cmdutil.commitforceeditor
+
+ cont = False
+ if opts['continue']:
+ cont = True
+ if revs:
+ raise util.Abort(_("can't specify --continue and revisions"))
+ # read in unfinished revisions
+ try:
+ nodes = repo.opener.read('graftstate').splitlines()
+ revs = [repo[node].rev() for node in nodes]
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ raise util.Abort(_("no graft state found, can't continue"))
+ else:
+ cmdutil.bailifchanged(repo)
+ if not revs:
+ raise util.Abort(_('no revisions specified'))
+ revs = scmutil.revrange(repo, revs)
+
+ # check for merges
+ for ctx in repo.set('%ld and merge()', revs):
+ ui.warn(_('skipping ungraftable merge revision %s\n') % ctx.rev())
+ revs.remove(ctx.rev())
+ if not revs:
+ return -1
+
+ # check for ancestors of dest branch
+ for ctx in repo.set('::. and %ld', revs):
+ ui.warn(_('skipping ancestor revision %s\n') % ctx.rev())
+ revs.remove(ctx.rev())
+ if not revs:
+ return -1
+
+ # analyze revs for earlier grafts
+ ids = {}
+ for ctx in repo.set("%ld", revs):
+ ids[ctx.hex()] = ctx.rev()
+ n = ctx.extra().get('source')
+ if n:
+ ids[n] = ctx.rev()
+
+ # check ancestors for earlier grafts
+ ui.debug('scanning for duplicate grafts\n')
+ for ctx in repo.set("::. - ::%ld", revs):
+ n = ctx.extra().get('source')
+ if n in ids:
+ r = repo[n].rev()
+ if r in revs:
+ ui.warn(_('skipping already grafted revision %s\n') % r)
+ revs.remove(r)
+ elif ids[n] in revs:
+ ui.warn(_('skipping already grafted revision %s '
+ '(same origin %d)\n') % (ids[n], r))
+ revs.remove(ids[n])
+ elif ctx.hex() in ids:
+ r = ids[ctx.hex()]
+ ui.warn(_('skipping already grafted revision %s '
+ '(was grafted from %d)\n') % (r, ctx.rev()))
+ revs.remove(r)
+ if not revs:
+ return -1
+
+ for pos, ctx in enumerate(repo.set("%ld", revs)):
+ current = repo['.']
+ ui.status(_('grafting revision %s\n') % ctx.rev())
+
+ # we don't merge the first commit when continuing
+ if not cont:
+ # perform the graft merge with p1(rev) as 'ancestor'
+ try:
+ # ui.forcemerge is an internal variable, do not document
+ repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ stats = mergemod.update(repo, ctx.node(), True, True, False,
+ ctx.p1().node())
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+ # drop the second merge parent
+ repo.dirstate.setparents(current.node(), nullid)
+ repo.dirstate.write()
+ # fix up dirstate for copies and renames
+ cmdutil.duplicatecopies(repo, ctx.rev(), current.node(), nullid)
+ # report any conflicts
+ if stats and stats[3] > 0:
+ # write out state for --continue
+ nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]]
+ repo.opener.write('graftstate', ''.join(nodelines))
+ raise util.Abort(
+ _("unresolved conflicts, can't continue"),
+ hint=_('use hg resolve and hg graft --continue'))
+ else:
+ cont = False
+
+ # commit
+ source = ctx.extra().get('source')
+ if not source:
+ source = ctx.hex()
+ extra = {'source': source}
+ user = ctx.user()
+ if opts.get('user'):
+ user = opts['user']
+ date = ctx.date()
+ if opts.get('date'):
+ date = opts['date']
+ repo.commit(text=ctx.description(), user=user,
+ date=date, extra=extra, editor=editor)
+
+ # remove state when we complete successfully
+ if os.path.exists(repo.join('graftstate')):
+ util.unlinkpath(repo.join('graftstate'))
+
+ return 0
+
+@command('grep',
+ [('0', 'print0', None, _('end fields with NUL')),
+ ('', 'all', None, _('print all revisions that match')),
+ ('a', 'text', None, _('treat all files as text')),
+ ('f', 'follow', None,
+ _('follow changeset history,'
+ ' or file history across copies and renames')),
+ ('i', 'ignore-case', None, _('ignore case when matching')),
+ ('l', 'files-with-matches', None,
+ _('print only filenames and revisions that match')),
+ ('n', 'line-number', None, _('print matching line numbers')),
+ ('r', 'rev', [],
+ _('only search files changed within revision range'), _('REV')),
+ ('u', 'user', None, _('list the author (long with -v)')),
+ ('d', 'date', None, _('list the date (short with -q)')),
+ ] + walkopts,
+ _('[OPTION]... PATTERN [FILE]...'))
+def grep(ui, repo, pattern, *pats, **opts):
+ """search for a pattern in specified files and revisions
+
+ Search revisions of files for a regular expression.
+
+ This command behaves differently than Unix grep. It only accepts
+ Python/Perl regexps. It searches repository history, not the
+ working directory. It always prints the revision number in which a
+ match appears.
+
+ By default, grep only prints output for the first revision of a
+ file in which it finds a match. To get it to print every revision
+ that contains a change in match status ("-" for a match that
+ becomes a non-match, or "+" for a non-match that becomes a match),
+ use the --all flag.
+
+ Returns 0 if a match is found, 1 otherwise.
+ """
+ reflags = 0
+ if opts.get('ignore_case'):
+ reflags |= re.I
+ try:
+ regexp = re.compile(pattern, reflags)
+ except re.error, inst:
+ ui.warn(_("grep: invalid match pattern: %s\n") % inst)
+ return 1
+ sep, eol = ':', '\n'
+ if opts.get('print0'):
+ sep = eol = '\0'
+
+ getfile = util.lrucachefunc(repo.file)
+
+ def matchlines(body):
+ begin = 0
+ linenum = 0
+ while True:
+ match = regexp.search(body, begin)
+ if not match:
+ break
+ mstart, mend = match.span()
+ linenum += body.count('\n', begin, mstart) + 1
+ lstart = body.rfind('\n', begin, mstart) + 1 or begin
+ begin = body.find('\n', mend) + 1 or len(body) + 1
+ lend = begin - 1
+ yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
+
+ class linestate(object):
+ def __init__(self, line, linenum, colstart, colend):
+ self.line = line
+ self.linenum = linenum
+ self.colstart = colstart
+ self.colend = colend
+
+ def __hash__(self):
+ return hash((self.linenum, self.line))
+
+ def __eq__(self, other):
+ return self.line == other.line
+
+ matches = {}
+ copies = {}
+ def grepbody(fn, rev, body):
+ matches[rev].setdefault(fn, [])
+ m = matches[rev][fn]
+ for lnum, cstart, cend, line in matchlines(body):
+ s = linestate(line, lnum, cstart, cend)
+ m.append(s)
+
+ def difflinestates(a, b):
+ sm = difflib.SequenceMatcher(None, a, b)
+ for tag, alo, ahi, blo, bhi in sm.get_opcodes():
+ if tag == 'insert':
+ for i in xrange(blo, bhi):
+ yield ('+', b[i])
+ elif tag == 'delete':
+ for i in xrange(alo, ahi):
+ yield ('-', a[i])
+ elif tag == 'replace':
+ for i in xrange(alo, ahi):
+ yield ('-', a[i])
+ for i in xrange(blo, bhi):
+ yield ('+', b[i])
+
+ def display(fn, ctx, pstates, states):
+ rev = ctx.rev()
+ datefunc = ui.quiet and util.shortdate or util.datestr
+ found = False
+ filerevmatches = {}
+ def binary():
+ flog = getfile(fn)
+ return util.binary(flog.read(ctx.filenode(fn)))
+
+ if opts.get('all'):
+ iter = difflinestates(pstates, states)
+ else:
+ iter = [('', l) for l in states]
+ for change, l in iter:
+ cols = [fn, str(rev)]
+ before, match, after = None, None, None
+ if opts.get('line_number'):
+ cols.append(str(l.linenum))
+ if opts.get('all'):
+ cols.append(change)
+ if opts.get('user'):
+ cols.append(ui.shortuser(ctx.user()))
+ if opts.get('date'):
+ cols.append(datefunc(ctx.date()))
+ if opts.get('files_with_matches'):
+ c = (fn, rev)
+ if c in filerevmatches:
+ continue
+ filerevmatches[c] = 1
+ else:
+ before = l.line[:l.colstart]
+ match = l.line[l.colstart:l.colend]
+ after = l.line[l.colend:]
+ ui.write(sep.join(cols))
+ if before is not None:
+ if not opts.get('text') and binary():
+ ui.write(sep + " Binary file matches")
+ else:
+ ui.write(sep + before)
+ ui.write(match, label='grep.match')
+ ui.write(after)
+ ui.write(eol)
+ found = True
+ return found
+
+ skip = {}
+ revfiles = {}
+ matchfn = scmutil.match(repo[None], pats, opts)
+ found = False
+ follow = opts.get('follow')
+
+ def prep(ctx, fns):
+ rev = ctx.rev()
+ pctx = ctx.p1()
+ parent = pctx.rev()
+ matches.setdefault(rev, {})
+ matches.setdefault(parent, {})
+ files = revfiles.setdefault(rev, [])
+ for fn in fns:
+ flog = getfile(fn)
+ try:
+ fnode = ctx.filenode(fn)
+ except error.LookupError:
+ continue
+
+ copied = flog.renamed(fnode)
+ copy = follow and copied and copied[0]
+ if copy:
+ copies.setdefault(rev, {})[fn] = copy
+ if fn in skip:
+ if copy:
+ skip[copy] = True
+ continue
+ files.append(fn)
+
+ if fn not in matches[rev]:
+ grepbody(fn, rev, flog.read(fnode))
+
+ pfn = copy or fn
+ if pfn not in matches[parent]:
+ try:
+ fnode = pctx.filenode(pfn)
+ grepbody(pfn, parent, flog.read(fnode))
+ except error.LookupError:
+ pass
+
+ for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
+ rev = ctx.rev()
+ parent = ctx.p1().rev()
+ for fn in sorted(revfiles.get(rev, [])):
+ states = matches[rev][fn]
+ copy = copies.get(rev, {}).get(fn)
+ if fn in skip:
+ if copy:
+ skip[copy] = True
+ continue
+ pstates = matches.get(parent, {}).get(copy or fn, [])
+ if pstates or states:
+ r = display(fn, ctx, pstates, states)
+ found = found or r
+ if r and not opts.get('all'):
+ skip[fn] = True
+ if copy:
+ skip[copy] = True
+ del matches[rev]
+ del revfiles[rev]
+
+ return not found
+
+@command('heads',
+ [('r', 'rev', '',
+ _('show only heads which are descendants of STARTREV'), _('STARTREV')),
+ ('t', 'topo', False, _('show topological heads only')),
+ ('a', 'active', False, _('show active branchheads only (DEPRECATED)')),
+ ('c', 'closed', False, _('show normal and closed branch heads')),
+ ] + templateopts,
+ _('[-ac] [-r STARTREV] [REV]...'))
+def heads(ui, repo, *branchrevs, **opts):
+ """show current repository heads or show branch heads
+
+ With no arguments, show all repository branch heads.
+
+ Repository "heads" are changesets with no child changesets. They are
+ where development generally takes place and are the usual targets
+ for update and merge operations. Branch heads are changesets that have
+ no child changeset on the same branch.
+
+ If one or more REVs are given, only branch heads on the branches
+ associated with the specified changesets are shown. This means
+ that you can use :hg:`heads foo` to see the heads on a branch
+ named ``foo``.
+
+ If -c/--closed is specified, also show branch heads marked closed
+ (see :hg:`commit --close-branch`).
+
+ If STARTREV is specified, only those heads that are descendants of
+ STARTREV will be displayed.
+
+ If -t/--topo is specified, named branch mechanics will be ignored and only
+ changesets without children will be shown.
+
+ Returns 0 if matching heads are found, 1 if not.
+ """
+
+ start = None
+ if 'rev' in opts:
+ start = scmutil.revsingle(repo, opts['rev'], None).node()
+
+ if opts.get('topo'):
+ heads = [repo[h] for h in repo.heads(start)]
+ else:
+ heads = []
+ for branch in repo.branchmap():
+ heads += repo.branchheads(branch, start, opts.get('closed'))
+ heads = [repo[h] for h in heads]
+
+ if branchrevs:
+ branches = set(repo[br].branch() for br in branchrevs)
+ heads = [h for h in heads if h.branch() in branches]
+
+ if opts.get('active') and branchrevs:
+ dagheads = repo.heads(start)
+ heads = [h for h in heads if h.node() in dagheads]
+
+ if branchrevs:
+ haveheads = set(h.branch() for h in heads)
+ if branches - haveheads:
+ headless = ', '.join(b for b in branches - haveheads)
+ msg = _('no open branch heads found on branches %s')
+ if opts.get('rev'):
+ msg += _(' (started at %s)' % opts['rev'])
+ ui.warn((msg + '\n') % headless)
+
+ if not heads:
+ return 1
+
+ heads = sorted(heads, key=lambda x: -x.rev())
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for ctx in heads:
+ displayer.show(ctx)
+ displayer.close()
+
+@command('help',
+ [('e', 'extension', None, _('show only help for extensions')),
+ ('c', 'command', None, _('show only help for commands'))],
+ _('[-ec] [TOPIC]'))
+def help_(ui, name=None, unknowncmd=False, full=True, **opts):
+ """show help for a given topic or a help overview
+
+ With no arguments, print a list of commands with short help messages.
+
+ Given a topic, extension, or command name, print help for that
+ topic.
+
+ Returns 0 if successful.
+ """
+
+ textwidth = min(ui.termwidth(), 80) - 2
+
+ def optrst(options):
+ data = []
+ multioccur = False
+ for option in options:
+ if len(option) == 5:
+ shortopt, longopt, default, desc, optlabel = option
+ else:
+ shortopt, longopt, default, desc = option
+ optlabel = _("VALUE") # default label
+
+ if _("DEPRECATED") in desc and not ui.verbose:
+ continue
+
+ so = ''
+ if shortopt:
+ so = '-' + shortopt
+ lo = '--' + longopt
+ if default:
+ desc += _(" (default: %s)") % default
+
+ if isinstance(default, list):
+ lo += " %s [+]" % optlabel
+ multioccur = True
+ elif (default is not None) and not isinstance(default, bool):
+ lo += " %s" % optlabel
+
+ data.append((so, lo, desc))
+
+ rst = minirst.maketable(data, 1)
+
+ if multioccur:
+ rst += _("\n[+] marked option can be specified multiple times\n")
+
+ return rst
+
+ # list all option lists
+ def opttext(optlist, width):
+ rst = ''
+ if not optlist:
+ return ''
+
+ for title, options in optlist:
+ rst += '\n%s\n' % title
+ if options:
+ rst += "\n"
+ rst += optrst(options)
+ rst += '\n'
+
+ return '\n' + minirst.format(rst, width)
+
+ def addglobalopts(optlist, aliases):
+ if ui.quiet:
+ return []
+
+ if ui.verbose:
+ optlist.append((_("global options:"), globalopts))
+ if name == 'shortlist':
+ optlist.append((_('use "hg help" for the full list '
+ 'of commands'), ()))
+ else:
+ if name == 'shortlist':
+ msg = _('use "hg help" for the full list of commands '
+ 'or "hg -v" for details')
+ elif name and not full:
+ msg = _('use "hg help %s" to show the full help text' % name)
+ elif aliases:
+ msg = _('use "hg -v help%s" to show builtin aliases and '
+ 'global options') % (name and " " + name or "")
+ else:
+ msg = _('use "hg -v help %s" to show more info') % name
+ optlist.append((msg, ()))
+
+ def helpcmd(name):
+ try:
+ aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd)
+ except error.AmbiguousCommand, inst:
+ # py3k fix: except vars can't be used outside the scope of the
+ # except block, nor can be used inside a lambda. python issue4617
+ prefix = inst.args[0]
+ select = lambda c: c.lstrip('^').startswith(prefix)
+ helplist(select)
+ return
+
+ # check if it's an invalid alias and display its error if it is
+ if getattr(entry[0], 'badalias', False):
+ if not unknowncmd:
+ entry[0](ui)
+ return
+
+ rst = ""
+
+ # synopsis
+ if len(entry) > 2:
+ if entry[2].startswith('hg'):
+ rst += "%s\n" % entry[2]
+ else:
+ rst += 'hg %s %s\n' % (aliases[0], entry[2])
+ else:
+ rst += 'hg %s\n' % aliases[0]
+
+ # aliases
+ if full and not ui.quiet and len(aliases) > 1:
+ rst += _("\naliases: %s\n") % ', '.join(aliases[1:])
+
+ # description
+ doc = gettext(entry[0].__doc__)
+ if not doc:
+ doc = _("(no help text available)")
+ if util.safehasattr(entry[0], 'definition'): # aliased command
+ if entry[0].definition.startswith('!'): # shell alias
+ doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
+ else:
+ doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
+ if ui.quiet or not full:
+ doc = doc.splitlines()[0]
+ rst += "\n" + doc + "\n"
+
+ # check if this command shadows a non-trivial (multi-line)
+ # extension help text
+ try:
+ mod = extensions.find(name)
+ doc = gettext(mod.__doc__) or ''
+ if '\n' in doc.strip():
+ msg = _('use "hg help -e %s" to show help for '
+ 'the %s extension') % (name, name)
+ rst += '\n%s\n' % msg
+ except KeyError:
+ pass
+
+ # options
+ if not ui.quiet and entry[1]:
+ rst += '\noptions:\n\n'
+ rst += optrst(entry[1])
+
+ if ui.verbose:
+ rst += '\nglobal options:\n\n'
+ rst += optrst(globalopts)
+
+ keep = ui.verbose and ['verbose'] or []
+ formatted, pruned = minirst.format(rst, textwidth, keep=keep)
+ ui.write(formatted)
+
+ if not ui.verbose:
+ if not full:
+ ui.write(_('\nuse "hg help %s" to show the full help text\n')
+ % name)
+ elif not ui.quiet:
+ ui.write(_('\nuse "hg -v help %s" to show more info\n') % name)
+
+
+ def helplist(select=None):
+ # list of commands
+ if name == "shortlist":
+ header = _('basic commands:\n\n')
+ else:
+ header = _('list of commands:\n\n')
+
+ h = {}
+ cmds = {}
+ for c, e in table.iteritems():
+ f = c.split("|", 1)[0]
+ if select and not select(f):
+ continue
+ if (not select and name != 'shortlist' and
+ e[0].__module__ != __name__):
+ continue
+ if name == "shortlist" and not f.startswith("^"):
+ continue
+ f = f.lstrip("^")
+ if not ui.debugflag and f.startswith("debug"):
+ continue
+ doc = e[0].__doc__
+ if doc and 'DEPRECATED' in doc and not ui.verbose:
+ continue
+ doc = gettext(doc)
+ if not doc:
+ doc = _("(no help text available)")
+ h[f] = doc.splitlines()[0].rstrip()
+ cmds[f] = c.lstrip("^")
+
+ if not h:
+ ui.status(_('no commands defined\n'))
+ return
+
+ ui.status(header)
+ fns = sorted(h)
+ m = max(map(len, fns))
+ for f in fns:
+ if ui.verbose:
+ commands = cmds[f].replace("|",", ")
+ ui.write(" %s:\n %s\n"%(commands, h[f]))
+ else:
+ ui.write('%s\n' % (util.wrap(h[f], textwidth,
+ initindent=' %-*s ' % (m, f),
+ hangindent=' ' * (m + 4))))
+
+ if not name:
+ text = help.listexts(_('enabled extensions:'), extensions.enabled())
+ if text:
+ ui.write("\n%s" % minirst.format(text, textwidth))
+
+ ui.write(_("\nadditional help topics:\n\n"))
+ topics = []
+ for names, header, doc in help.helptable:
+ topics.append((sorted(names, key=len, reverse=True)[0], header))
+ topics_len = max([len(s[0]) for s in topics])
+ for t, desc in topics:
+ ui.write(" %-*s %s\n" % (topics_len, t, desc))
+
+ optlist = []
+ addglobalopts(optlist, True)
+ ui.write(opttext(optlist, textwidth))
+
+ def helptopic(name):
+ for names, header, doc in help.helptable:
+ if name in names:
+ break
+ else:
+ raise error.UnknownCommand(name)
+
+ # description
+ if not doc:
+ doc = _("(no help text available)")
+ if util.safehasattr(doc, '__call__'):
+ doc = doc()
+
+ ui.write("%s\n\n" % header)
+ ui.write("%s" % minirst.format(doc, textwidth, indent=4))
+ try:
+ cmdutil.findcmd(name, table)
+ ui.write(_('\nuse "hg help -c %s" to see help for '
+ 'the %s command\n') % (name, name))
+ except error.UnknownCommand:
+ pass
+
+ def helpext(name):
+ try:
+ mod = extensions.find(name)
+ doc = gettext(mod.__doc__) or _('no help text available')
+ except KeyError:
+ mod = None
+ doc = extensions.disabledext(name)
+ if not doc:
+ raise error.UnknownCommand(name)
+
+ if '\n' not in doc:
+ head, tail = doc, ""
+ else:
+ head, tail = doc.split('\n', 1)
+ ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head))
+ if tail:
+ ui.write(minirst.format(tail, textwidth))
+ ui.status('\n')
+
+ if mod:
+ try:
+ ct = mod.cmdtable
+ except AttributeError:
+ ct = {}
+ modcmds = set([c.split('|', 1)[0] for c in ct])
+ helplist(modcmds.__contains__)
+ else:
+ ui.write(_('use "hg help extensions" for information on enabling '
+ 'extensions\n'))
+
+ def helpextcmd(name):
+ cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict'))
+ doc = gettext(mod.__doc__).splitlines()[0]
+
+ msg = help.listexts(_("'%s' is provided by the following "
+ "extension:") % cmd, {ext: doc}, indent=4)
+ ui.write(minirst.format(msg, textwidth))
+ ui.write('\n')
+ ui.write(_('use "hg help extensions" for information on enabling '
+ 'extensions\n'))
+
+ if name and name != 'shortlist':
+ i = None
+ if unknowncmd:
+ queries = (helpextcmd,)
+ elif opts.get('extension'):
+ queries = (helpext,)
+ elif opts.get('command'):
+ queries = (helpcmd,)
+ else:
+ queries = (helptopic, helpcmd, helpext, helpextcmd)
+ for f in queries:
+ try:
+ f(name)
+ i = None
+ break
+ except error.UnknownCommand, inst:
+ i = inst
+ if i:
+ raise i
+ else:
+ # program name
+ ui.status(_("Mercurial Distributed SCM\n"))
+ ui.status('\n')
+ helplist()
+
+
+@command('identify|id',
+ [('r', 'rev', '',
+ _('identify the specified revision'), _('REV')),
+ ('n', 'num', None, _('show local revision number')),
+ ('i', 'id', None, _('show global revision id')),
+ ('b', 'branch', None, _('show branch')),
+ ('t', 'tags', None, _('show tags')),
+ ('B', 'bookmarks', None, _('show bookmarks'))],
+ _('[-nibtB] [-r REV] [SOURCE]'))
+def identify(ui, repo, source=None, rev=None,
+ num=None, id=None, branch=None, tags=None, bookmarks=None):
+ """identify the working copy or specified revision
+
+ Print a summary identifying the repository state at REV using one or
+ two parent hash identifiers, followed by a "+" if the working
+ directory has uncommitted changes, the branch name (if not default),
+ a list of tags, and a list of bookmarks.
+
+ When REV is not given, print a summary of the current state of the
+ repository.
+
+ Specifying a path to a repository root or Mercurial bundle will
+ cause lookup to operate on that repository/bundle.
+
+ .. container:: verbose
+
+ Examples:
+
+ - generate a build identifier for the working directory::
+
+ hg id --id > build-id.dat
+
+ - find the revision corresponding to a tag::
+
+ hg id -n -r 1.3
+
+ - check the most recent revision of a remote repository::
+
+ hg id -r tip http://selenic.com/hg/
+
+ Returns 0 if successful.
+ """
+
+ if not repo and not source:
+ raise util.Abort(_("there is no Mercurial repository here "
+ "(.hg not found)"))
+
+ hexfunc = ui.debugflag and hex or short
+ default = not (num or id or branch or tags or bookmarks)
+ output = []
+ revs = []
+
+ if source:
+ source, branches = hg.parseurl(ui.expandpath(source))
+ repo = hg.peer(ui, {}, source)
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
+
+ if not repo.local():
+ if num or branch or tags:
+ raise util.Abort(
+ _("can't query remote revision number, branch, or tags"))
+ if not rev and revs:
+ rev = revs[0]
+ if not rev:
+ rev = "tip"
+
+ remoterev = repo.lookup(rev)
+ if default or id:
+ output = [hexfunc(remoterev)]
+
+ def getbms():
+ bms = []
+
+ if 'bookmarks' in repo.listkeys('namespaces'):
+ hexremoterev = hex(remoterev)
+ bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems()
+ if bmr == hexremoterev]
+
+ return bms
+
+ if bookmarks:
+ output.extend(getbms())
+ elif default and not ui.quiet:
+ # multiple bookmarks for a single parent separated by '/'
+ bm = '/'.join(getbms())
+ if bm:
+ output.append(bm)
+ else:
+ if not rev:
+ ctx = repo[None]
+ parents = ctx.parents()
+ changed = ""
+ if default or id or num:
+ changed = util.any(repo.status()) and "+" or ""
+ if default or id:
+ output = ["%s%s" %
+ ('+'.join([hexfunc(p.node()) for p in parents]), changed)]
+ if num:
+ output.append("%s%s" %
+ ('+'.join([str(p.rev()) for p in parents]), changed))
+ else:
+ ctx = scmutil.revsingle(repo, rev)
+ if default or id:
+ output = [hexfunc(ctx.node())]
+ if num:
+ output.append(str(ctx.rev()))
+
+ if default and not ui.quiet:
+ b = ctx.branch()
+ if b != 'default':
+ output.append("(%s)" % b)
+
+ # multiple tags for a single parent separated by '/'
+ t = '/'.join(ctx.tags())
+ if t:
+ output.append(t)
+
+ # multiple bookmarks for a single parent separated by '/'
+ bm = '/'.join(ctx.bookmarks())
+ if bm:
+ output.append(bm)
+ else:
+ if branch:
+ output.append(ctx.branch())
+
+ if tags:
+ output.extend(ctx.tags())
+
+ if bookmarks:
+ output.extend(ctx.bookmarks())
+
+ ui.write("%s\n" % ' '.join(output))
+
+@command('import|patch',
+ [('p', 'strip', 1,
+ _('directory strip option for patch. This has the same '
+ 'meaning as the corresponding patch option'), _('NUM')),
+ ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')),
+ ('e', 'edit', False, _('invoke editor on commit messages')),
+ ('f', 'force', None, _('skip check for outstanding uncommitted changes')),
+ ('', 'no-commit', None,
+ _("don't commit, just update the working directory")),
+ ('', 'bypass', None,
+ _("apply patch without touching the working directory")),
+ ('', 'exact', None,
+ _('apply patch to the nodes from which it was generated')),
+ ('', 'import-branch', None,
+ _('use any branch information in patch (implied by --exact)'))] +
+ commitopts + commitopts2 + similarityopts,
+ _('[OPTION]... PATCH...'))
+def import_(ui, repo, patch1=None, *patches, **opts):
+ """import an ordered set of patches
+
+ Import a list of patches and commit them individually (unless
+ --no-commit is specified).
+
+ If there are outstanding changes in the working directory, import
+ will abort unless given the -f/--force flag.
+
+ You can import a patch straight from a mail message. Even patches
+ as attachments work (to use the body part, it must have type
+ text/plain or text/x-patch). From and Subject headers of email
+ message are used as default committer and commit message. All
+ text/plain body parts before first diff are added to commit
+ message.
+
+ If the imported patch was generated by :hg:`export`, user and
+ description from patch override values from message headers and
+ body. Values given on command line with -m/--message and -u/--user
+ override these.
+
+ If --exact is specified, import will set the working directory to
+ the parent of each patch before applying it, and will abort if the
+ resulting changeset has a different ID than the one recorded in
+ the patch. This may happen due to character set problems or other
+ deficiencies in the text patch format.
+
+ Use --bypass to apply and commit patches directly to the
+ repository, not touching the working directory. Without --exact,
+ patches will be applied on top of the working directory parent
+ revision.
+
+ With -s/--similarity, hg will attempt to discover renames and
+ copies in the patch in the same way as 'addremove'.
+
+ To read a patch from standard input, use "-" as the patch name. If
+ a URL is specified, the patch will be downloaded from it.
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ .. container:: verbose
+
+ Examples:
+
+ - import a traditional patch from a website and detect renames::
+
+ hg import -s 80 http://example.com/bugfix.patch
+
+ - import a changeset from an hgweb server::
+
+ hg import http://www.selenic.com/hg/rev/5ca8c111e9aa
+
+ - import all the patches in an Unix-style mbox::
+
+ hg import incoming-patches.mbox
+
+ - attempt to exactly restore an exported changeset (not always
+ possible)::
+
+ hg import --exact proposed-fix.patch
+
+ Returns 0 on success.
+ """
+
+ if not patch1:
+ raise util.Abort(_('need at least one patch to import'))
+
+ patches = (patch1,) + patches
+
+ date = opts.get('date')
+ if date:
+ opts['date'] = util.parsedate(date)
+
+ editor = cmdutil.commiteditor
+ if opts.get('edit'):
+ editor = cmdutil.commitforceeditor
+
+ update = not opts.get('bypass')
+ if not update and opts.get('no_commit'):
+ raise util.Abort(_('cannot use --no-commit with --bypass'))
+ try:
+ sim = float(opts.get('similarity') or 0)
+ except ValueError:
+ raise util.Abort(_('similarity must be a number'))
+ if sim < 0 or sim > 100:
+ raise util.Abort(_('similarity must be between 0 and 100'))
+ if sim and not update:
+ raise util.Abort(_('cannot use --similarity with --bypass'))
+
+ if (opts.get('exact') or not opts.get('force')) and update:
+ cmdutil.bailifchanged(repo)
+
+ base = opts["base"]
+ strip = opts["strip"]
+ wlock = lock = tr = None
+ msgs = []
+
+ def checkexact(repo, n, nodeid):
+ if opts.get('exact') and hex(n) != nodeid:
+ repo.rollback()
+ raise util.Abort(_('patch is damaged or loses information'))
+
+ def tryone(ui, hunk, parents):
+ tmpname, message, user, date, branch, nodeid, p1, p2 = \
+ patch.extract(ui, hunk)
+
+ if not tmpname:
+ return (None, None)
+ msg = _('applied to working directory')
+
+ try:
+ cmdline_message = cmdutil.logmessage(ui, opts)
+ if cmdline_message:
+ # pickup the cmdline msg
+ message = cmdline_message
+ elif message:
+ # pickup the patch msg
+ message = message.strip()
+ else:
+ # launch the editor
+ message = None
+ ui.debug('message:\n%s\n' % message)
+
+ if len(parents) == 1:
+ parents.append(repo[nullid])
+ if opts.get('exact'):
+ if not nodeid or not p1:
+ raise util.Abort(_('not a Mercurial patch'))
+ p1 = repo[p1]
+ p2 = repo[p2 or nullid]
+ elif p2:
+ try:
+ p1 = repo[p1]
+ p2 = repo[p2]
+ # Without any options, consider p2 only if the
+ # patch is being applied on top of the recorded
+ # first parent.
+ if p1 != parents[0]:
+ p1 = parents[0]
+ p2 = repo[nullid]
+ except error.RepoError:
+ p1, p2 = parents
+ else:
+ p1, p2 = parents
+
+ n = None
+ if update:
+ if p1 != parents[0]:
+ hg.clean(repo, p1.node())
+ if p2 != parents[1]:
+ repo.dirstate.setparents(p1.node(), p2.node())
+
+ if opts.get('exact') or opts.get('import_branch'):
+ repo.dirstate.setbranch(branch or 'default')
+
+ files = set()
+ patch.patch(ui, repo, tmpname, strip=strip, files=files,
+ eolmode=None, similarity=sim / 100.0)
+ files = list(files)
+ if opts.get('no_commit'):
+ if message:
+ msgs.append(message)
+ else:
+ if opts.get('exact') or p2:
+ # If you got here, you either use --force and know what
+ # you are doing or used --exact or a merge patch while
+ # being updated to its first parent.
+ m = None
+ else:
+ m = scmutil.matchfiles(repo, files or [])
+ n = repo.commit(message, opts.get('user') or user,
+ opts.get('date') or date, match=m,
+ editor=editor)
+ checkexact(repo, n, nodeid)
+ else:
+ if opts.get('exact') or opts.get('import_branch'):
+ branch = branch or 'default'
+ else:
+ branch = p1.branch()
+ store = patch.filestore()
+ try:
+ files = set()
+ try:
+ patch.patchrepo(ui, repo, p1, store, tmpname, strip,
+ files, eolmode=None)
+ except patch.PatchError, e:
+ raise util.Abort(str(e))
+ memctx = patch.makememctx(repo, (p1.node(), p2.node()),
+ message,
+ opts.get('user') or user,
+ opts.get('date') or date,
+ branch, files, store,
+ editor=cmdutil.commiteditor)
+ repo.savecommitmessage(memctx.description())
+ n = memctx.commit()
+ checkexact(repo, n, nodeid)
+ finally:
+ store.close()
+ if n:
+ # i18n: refers to a short changeset id
+ msg = _('created %s') % short(n)
+ return (msg, n)
+ finally:
+ os.unlink(tmpname)
+
+ try:
+ try:
+ wlock = repo.wlock()
+ lock = repo.lock()
+ tr = repo.transaction('import')
+ parents = repo.parents()
+ for patchurl in patches:
+ if patchurl == '-':
+ ui.status(_('applying patch from stdin\n'))
+ patchfile = ui.fin
+ patchurl = 'stdin' # for error message
+ else:
+ patchurl = os.path.join(base, patchurl)
+ ui.status(_('applying %s\n') % patchurl)
+ patchfile = url.open(ui, patchurl)
+
+ haspatch = False
+ for hunk in patch.split(patchfile):
+ (msg, node) = tryone(ui, hunk, parents)
+ if msg:
+ haspatch = True
+ ui.note(msg + '\n')
+ if update or opts.get('exact'):
+ parents = repo.parents()
+ else:
+ parents = [repo[node]]
+
+ if not haspatch:
+ raise util.Abort(_('%s: no diffs found') % patchurl)
+
+ tr.close()
+ if msgs:
+ repo.savecommitmessage('\n* * *\n'.join(msgs))
+ except:
+ # wlock.release() indirectly calls dirstate.write(): since
+ # we're crashing, we do not want to change the working dir
+ # parent after all, so make sure it writes nothing
+ repo.dirstate.invalidate()
+ raise
+ finally:
+ if tr:
+ tr.release()
+ release(lock, wlock)
+
+@command('incoming|in',
+ [('f', 'force', None,
+ _('run even if remote repository is unrelated')),
+ ('n', 'newest-first', None, _('show newest record first')),
+ ('', 'bundle', '',
+ _('file to store the bundles into'), _('FILE')),
+ ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
+ ('B', 'bookmarks', False, _("compare bookmarks")),
+ ('b', 'branch', [],
+ _('a specific branch you would like to pull'), _('BRANCH')),
+ ] + logopts + remoteopts + subrepoopts,
+ _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]'))
+def incoming(ui, repo, source="default", **opts):
+ """show new changesets found in source
+
+ Show new changesets found in the specified path/URL or the default
+ pull location. These are the changesets that would have been pulled
+ if a pull at the time you issued this command.
+
+ For remote repository, using --bundle avoids downloading the
+ changesets twice if the incoming is followed by a pull.
+
+ See pull for valid source format details.
+
+ Returns 0 if there are incoming changes, 1 otherwise.
+ """
+ if opts.get('bundle') and opts.get('subrepos'):
+ raise util.Abort(_('cannot combine --bundle and --subrepos'))
+
+ if opts.get('bookmarks'):
+ source, branches = hg.parseurl(ui.expandpath(source),
+ opts.get('branch'))
+ other = hg.peer(repo, opts, source)
+ if 'bookmarks' not in other.listkeys('namespaces'):
+ ui.warn(_("remote doesn't support bookmarks\n"))
+ return 0
+ ui.status(_('comparing with %s\n') % util.hidepassword(source))
+ return bookmarks.diff(ui, repo, other)
+
+ repo._subtoppath = ui.expandpath(source)
+ try:
+ return hg.incoming(ui, repo, source, opts)
+ finally:
+ del repo._subtoppath
+
+
+@command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]'))
+def init(ui, dest=".", **opts):
+ """create a new repository in the given directory
+
+ Initialize a new repository in the given directory. If the given
+ directory does not exist, it will be created.
+
+ If no directory is given, the current directory is used.
+
+ It is possible to specify an ``ssh://`` URL as the destination.
+ See :hg:`help urls` for more information.
+
+ Returns 0 on success.
+ """
+ hg.peer(ui, opts, ui.expandpath(dest), create=True)
+
+@command('locate',
+ [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')),
+ ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
+ ('f', 'fullpath', None, _('print complete paths from the filesystem root')),
+ ] + walkopts,
+ _('[OPTION]... [PATTERN]...'))
+def locate(ui, repo, *pats, **opts):
+ """locate files matching specific patterns
+
+ Print files under Mercurial control in the working directory whose
+ names match the given patterns.
+
+ By default, this command searches all directories in the working
+ directory. To search just the current directory and its
+ subdirectories, use "--include .".
+
+ If no patterns are given to match, this command prints the names
+ of all files under Mercurial control in the working directory.
+
+ If you want to feed the output of this command into the "xargs"
+ command, use the -0 option to both this command and "xargs". This
+ will avoid the problem of "xargs" treating single filenames that
+ contain whitespace as multiple filenames.
+
+ Returns 0 if a match is found, 1 otherwise.
+ """
+ end = opts.get('print0') and '\0' or '\n'
+ rev = scmutil.revsingle(repo, opts.get('rev'), None).node()
+
+ ret = 1
+ m = scmutil.match(repo[rev], pats, opts, default='relglob')
+ m.bad = lambda x, y: False
+ for abs in repo[rev].walk(m):
+ if not rev and abs not in repo.dirstate:
+ continue
+ if opts.get('fullpath'):
+ ui.write(repo.wjoin(abs), end)
+ else:
+ ui.write(((pats and m.rel(abs)) or abs), end)
+ ret = 0
+
+ return ret
+
+@command('^log|history',
+ [('f', 'follow', None,
+ _('follow changeset history, or file history across copies and renames')),
+ ('', 'follow-first', None,
+ _('only follow the first parent of merge changesets')),
+ ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
+ ('C', 'copies', None, _('show copied files')),
+ ('k', 'keyword', [],
+ _('do case-insensitive search for a given text'), _('TEXT')),
+ ('r', 'rev', [], _('show the specified revision or range'), _('REV')),
+ ('', 'removed', None, _('include revisions where files were removed')),
+ ('m', 'only-merges', None, _('show only merges')),
+ ('u', 'user', [], _('revisions committed by user'), _('USER')),
+ ('', 'only-branch', [],
+ _('show only changesets within the given named branch (DEPRECATED)'),
+ _('BRANCH')),
+ ('b', 'branch', [],
+ _('show changesets within the given named branch'), _('BRANCH')),
+ ('P', 'prune', [],
+ _('do not display revision or any of its ancestors'), _('REV')),
+ ('', 'hidden', False, _('show hidden changesets')),
+ ] + logopts + walkopts,
+ _('[OPTION]... [FILE]'))
+def log(ui, repo, *pats, **opts):
+ """show revision history of entire repository or files
+
+ Print the revision history of the specified files or the entire
+ project.
+
+ If no revision range is specified, the default is ``tip:0`` unless
+ --follow is set, in which case the working directory parent is
+ used as the starting revision.
+
+ File history is shown without following rename or copy history of
+ files. Use -f/--follow with a filename to follow history across
+ renames and copies. --follow without a filename will only show
+ ancestors or descendants of the starting revision.
+
+ By default this command prints revision number and changeset id,
+ tags, non-trivial parents, user, date and time, and a summary for
+ each commit. When the -v/--verbose switch is used, the list of
+ changed files and full commit message are shown.
+
+ .. note::
+ log -p/--patch may generate unexpected diff output for merge
+ changesets, as it will only compare the merge changeset against
+ its first parent. Also, only files different from BOTH parents
+ will appear in files:.
+
+ .. note::
+ for performance reasons, log FILE may omit duplicate changes
+ made on branches and will not show deletions. To see all
+ changes including duplicates and deletions, use the --removed
+ switch.
+
+ .. container:: verbose
+
+ Some examples:
+
+ - changesets with full descriptions and file lists::
+
+ hg log -v
+
+ - changesets ancestral to the working directory::
+
+ hg log -f
+
+ - last 10 commits on the current branch::
+
+ hg log -l 10 -b .
+
+ - changesets showing all modifications of a file, including removals::
+
+ hg log --removed file.c
+
+ - all changesets that touch a directory, with diffs, excluding merges::
+
+ hg log -Mp lib/
+
+ - all revision numbers that match a keyword::
+
+ hg log -k bug --template "{rev}\\n"
+
+ - check if a given changeset is included is a tagged release::
+
+ hg log -r "a21ccf and ancestor(1.9)"
+
+ - find all changesets by some user in a date range::
+
+ hg log -k alice -d "may 2008 to jul 2008"
+
+ - summary of all changesets after the last tag::
+
+ hg log -r "last(tagged())::" --template "{desc|firstline}\\n"
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ See :hg:`help revisions` and :hg:`help revsets` for more about
+ specifying revisions.
+
+ Returns 0 on success.
+ """
+
+ matchfn = scmutil.match(repo[None], pats, opts)
+ limit = cmdutil.loglimit(opts)
+ count = 0
+
+ endrev = None
+ if opts.get('copies') and opts.get('rev'):
+ endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
+
+ df = False
+ if opts["date"]:
+ df = util.matchdate(opts["date"])
+
+ branches = opts.get('branch', []) + opts.get('only_branch', [])
+ opts['branch'] = [repo.lookupbranch(b) for b in branches]
+
+ displayer = cmdutil.show_changeset(ui, repo, opts, True)
+ def prep(ctx, fns):
+ rev = ctx.rev()
+ parents = [p for p in repo.changelog.parentrevs(rev)
+ if p != nullrev]
+ if opts.get('no_merges') and len(parents) == 2:
+ return
+ if opts.get('only_merges') and len(parents) != 2:
+ return
+ if opts.get('branch') and ctx.branch() not in opts['branch']:
+ return
+ if not opts.get('hidden') and ctx.hidden():
+ return
+ if df and not df(ctx.date()[0]):
+ return
+ if opts['user'] and not [k for k in opts['user']
+ if k.lower() in ctx.user().lower()]:
+ return
+ if opts.get('keyword'):
+ for k in [kw.lower() for kw in opts['keyword']]:
+ if (k in ctx.user().lower() or
+ k in ctx.description().lower() or
+ k in " ".join(ctx.files()).lower()):
+ break
+ else:
+ return
+
+ copies = None
+ if opts.get('copies') and rev:
+ copies = []
+ getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
+ for fn in ctx.files():
+ rename = getrenamed(fn, rev)
+ if rename:
+ copies.append((fn, rename[0]))
+
+ revmatchfn = None
+ if opts.get('patch') or opts.get('stat'):
+ if opts.get('follow') or opts.get('follow_first'):
+ # note: this might be wrong when following through merges
+ revmatchfn = scmutil.match(repo[None], fns, default='path')
+ else:
+ revmatchfn = matchfn
+
+ displayer.show(ctx, copies=copies, matchfn=revmatchfn)
+
+ for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep):
+ if count == limit:
+ break
+ if displayer.flush(ctx.rev()):
+ count += 1
+ displayer.close()
+
+@command('manifest',
+ [('r', 'rev', '', _('revision to display'), _('REV')),
+ ('', 'all', False, _("list files from all revisions"))],
+ _('[-r REV]'))
+def manifest(ui, repo, node=None, rev=None, **opts):
+ """output the current or given revision of the project manifest
+
+ Print a list of version controlled files for the given revision.
+ If no revision is given, the first parent of the working directory
+ is used, or the null revision if no revision is checked out.
+
+ With -v, print file permissions, symlink and executable bits.
+ With --debug, print file revision hashes.
+
+ If option --all is specified, the list of all files from all revisions
+ is printed. This includes deleted and renamed files.
+
+ Returns 0 on success.
+ """
+ if opts.get('all'):
+ if rev or node:
+ raise util.Abort(_("can't specify a revision with --all"))
+
+ res = []
+ prefix = "data/"
+ suffix = ".i"
+ plen = len(prefix)
+ slen = len(suffix)
+ lock = repo.lock()
+ try:
+ for fn, b, size in repo.store.datafiles():
+ if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix:
+ res.append(fn[plen:-slen])
+ finally:
+ lock.release()
+ for f in sorted(res):
+ ui.write("%s\n" % f)
+ return
+
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if not node:
+ node = rev
+
+ decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
+ ctx = scmutil.revsingle(repo, node)
+ for f in ctx:
+ if ui.debugflag:
+ ui.write("%40s " % hex(ctx.manifest()[f]))
+ if ui.verbose:
+ ui.write(decor[ctx.flags(f)])
+ ui.write("%s\n" % f)
+
+@command('^merge',
+ [('f', 'force', None, _('force a merge with outstanding changes')),
+ ('r', 'rev', '', _('revision to merge'), _('REV')),
+ ('P', 'preview', None,
+ _('review revisions to merge (no merge is performed)'))
+ ] + mergetoolopts,
+ _('[-P] [-f] [[-r] REV]'))
+def merge(ui, repo, node=None, **opts):
+ """merge working directory with another revision
+
+ The current working directory is updated with all changes made in
+ the requested revision since the last common predecessor revision.
+
+ Files that changed between either parent are marked as changed for
+ the next commit and a commit must be performed before any further
+ updates to the repository are allowed. The next commit will have
+ two parents.
+
+ ``--tool`` can be used to specify the merge tool used for file
+ merges. It overrides the HGMERGE environment variable and your
+ configuration files. See :hg:`help merge-tools` for options.
+
+ If no revision is specified, the working directory's parent is a
+ head revision, and the current branch contains exactly one other
+ head, the other head is merged with by default. Otherwise, an
+ explicit revision with which to merge with must be provided.
+
+ :hg:`resolve` must be used to resolve unresolved files.
+
+ To undo an uncommitted merge, use :hg:`update --clean .` which
+ will check out a clean copy of the original merge parent, losing
+ all changes.
+
+ Returns 0 on success, 1 if there are unresolved files.
+ """
+
+ if opts.get('rev') and node:
+ raise util.Abort(_("please specify just one revision"))
+ if not node:
+ node = opts.get('rev')
+
+ if not node:
+ branch = repo[None].branch()
+ bheads = repo.branchheads(branch)
+ if len(bheads) > 2:
+ raise util.Abort(_("branch '%s' has %d heads - "
+ "please merge with an explicit rev")
+ % (branch, len(bheads)),
+ hint=_("run 'hg heads .' to see heads"))
+
+ parent = repo.dirstate.p1()
+ if len(bheads) == 1:
+ if len(repo.heads()) > 1:
+ raise util.Abort(_("branch '%s' has one head - "
+ "please merge with an explicit rev")
+ % branch,
+ hint=_("run 'hg heads' to see all heads"))
+ msg = _('there is nothing to merge')
+ if parent != repo.lookup(repo[None].branch()):
+ msg = _('%s - use "hg update" instead') % msg
+ raise util.Abort(msg)
+
+ if parent not in bheads:
+ raise util.Abort(_('working directory not at a head revision'),
+ hint=_("use 'hg update' or merge with an "
+ "explicit revision"))
+ node = parent == bheads[0] and bheads[-1] or bheads[0]
+ else:
+ node = scmutil.revsingle(repo, node).node()
+
+ if opts.get('preview'):
+ # find nodes that are ancestors of p2 but not of p1
+ p1 = repo.lookup('.')
+ p2 = repo.lookup(node)
+ nodes = repo.changelog.findmissing(common=[p1], heads=[p2])
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for node in nodes:
+ displayer.show(repo[node])
+ displayer.close()
+ return 0
+
+ try:
+ # ui.forcemerge is an internal variable, do not document
+ repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ return hg.merge(repo, node, force=opts.get('force'))
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+
+@command('outgoing|out',
+ [('f', 'force', None, _('run even when the destination is unrelated')),
+ ('r', 'rev', [],
+ _('a changeset intended to be included in the destination'), _('REV')),
+ ('n', 'newest-first', None, _('show newest record first')),
+ ('B', 'bookmarks', False, _('compare bookmarks')),
+ ('b', 'branch', [], _('a specific branch you would like to push'),
+ _('BRANCH')),
+ ] + logopts + remoteopts + subrepoopts,
+ _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]'))
+def outgoing(ui, repo, dest=None, **opts):
+ """show changesets not found in the destination
+
+ Show changesets not found in the specified destination repository
+ or the default push location. These are the changesets that would
+ be pushed if a push was requested.
+
+ See pull for details of valid destination formats.
+
+ Returns 0 if there are outgoing changes, 1 otherwise.
+ """
+
+ if opts.get('bookmarks'):
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest, opts.get('branch'))
+ other = hg.peer(repo, opts, dest)
+ if 'bookmarks' not in other.listkeys('namespaces'):
+ ui.warn(_("remote doesn't support bookmarks\n"))
+ return 0
+ ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+ return bookmarks.diff(ui, other, repo)
+
+ repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default')
+ try:
+ return hg.outgoing(ui, repo, dest, opts)
+ finally:
+ del repo._subtoppath
+
+@command('parents',
+ [('r', 'rev', '', _('show parents of the specified revision'), _('REV')),
+ ] + templateopts,
+ _('[-r REV] [FILE]'))
+def parents(ui, repo, file_=None, **opts):
+ """show the parents of the working directory or revision
+
+ Print the working directory's parent revisions. If a revision is
+ given via -r/--rev, the parent of that revision will be printed.
+ If a file argument is given, the revision in which the file was
+ last changed (before the working directory revision or the
+ argument to --rev if given) is printed.
+
+ Returns 0 on success.
+ """
+
+ ctx = scmutil.revsingle(repo, opts.get('rev'), None)
+
+ if file_:
+ m = scmutil.match(ctx, (file_,), opts)
+ if m.anypats() or len(m.files()) != 1:
+ raise util.Abort(_('can only specify an explicit filename'))
+ file_ = m.files()[0]
+ filenodes = []
+ for cp in ctx.parents():
+ if not cp:
+ continue
+ try:
+ filenodes.append(cp.filenode(file_))
+ except error.LookupError:
+ pass
+ if not filenodes:
+ raise util.Abort(_("'%s' not found in manifest!") % file_)
+ fl = repo.file(file_)
+ p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
+ else:
+ p = [cp.node() for cp in ctx.parents()]
+
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ for n in p:
+ if n != nullid:
+ displayer.show(repo[n])
+ displayer.close()
+
+@command('paths', [], _('[NAME]'))
+def paths(ui, repo, search=None):
+ """show aliases for remote repositories
+
+ Show definition of symbolic path name NAME. If no name is given,
+ show definition of all available names.
+
+ Option -q/--quiet suppresses all output when searching for NAME
+ and shows only the path names when listing all definitions.
+
+ Path names are defined in the [paths] section of your
+ configuration file and in ``/etc/mercurial/hgrc``. If run inside a
+ repository, ``.hg/hgrc`` is used, too.
+
+ The path names ``default`` and ``default-push`` have a special
+ meaning. When performing a push or pull operation, they are used
+ as fallbacks if no location is specified on the command-line.
+ When ``default-push`` is set, it will be used for push and
+ ``default`` will be used for pull; otherwise ``default`` is used
+ as the fallback for both. When cloning a repository, the clone
+ source is written as ``default`` in ``.hg/hgrc``. Note that
+ ``default`` and ``default-push`` apply to all inbound (e.g.
+ :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and
+ :hg:`bundle`) operations.
+
+ See :hg:`help urls` for more information.
+
+ Returns 0 on success.
+ """
+ if search:
+ for name, path in ui.configitems("paths"):
+ if name == search:
+ ui.status("%s\n" % util.hidepassword(path))
+ return
+ if not ui.quiet:
+ ui.warn(_("not found!\n"))
+ return 1
+ else:
+ for name, path in ui.configitems("paths"):
+ if ui.quiet:
+ ui.write("%s\n" % name)
+ else:
+ ui.write("%s = %s\n" % (name, util.hidepassword(path)))
+
+def postincoming(ui, repo, modheads, optupdate, checkout):
+ if modheads == 0:
+ return
+ if optupdate:
+ try:
+ return hg.update(repo, checkout)
+ except util.Abort, inst:
+ ui.warn(_("not updating: %s\n" % str(inst)))
+ return 0
+ if modheads > 1:
+ currentbranchheads = len(repo.branchheads())
+ if currentbranchheads == modheads:
+ ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
+ elif currentbranchheads > 1:
+ ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n"))
+ else:
+ ui.status(_("(run 'hg heads' to see heads)\n"))
+ else:
+ ui.status(_("(run 'hg update' to get a working copy)\n"))
+
+@command('^pull',
+ [('u', 'update', None,
+ _('update to new branch head if changesets were pulled')),
+ ('f', 'force', None, _('run even when remote repository is unrelated')),
+ ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')),
+ ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')),
+ ('b', 'branch', [], _('a specific branch you would like to pull'),
+ _('BRANCH')),
+ ] + remoteopts,
+ _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]'))
+def pull(ui, repo, source="default", **opts):
+ """pull changes from the specified source
+
+ Pull changes from a remote repository to a local one.
+
+ This finds all changes from the repository at the specified path
+ or URL and adds them to a local repository (the current one unless
+ -R is specified). By default, this does not update the copy of the
+ project in the working directory.
+
+ Use :hg:`incoming` if you want to see what would have been added
+ by a pull at the time you issued this command. If you then decide
+ to add those changes to the repository, you should use :hg:`pull
+ -r X` where ``X`` is the last changeset listed by :hg:`incoming`.
+
+ If SOURCE is omitted, the 'default' path will be used.
+ See :hg:`help urls` for more information.
+
+ Returns 0 on success, 1 if an update had unresolved files.
+ """
+ source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
+ other = hg.peer(repo, opts, source)
+ ui.status(_('pulling from %s\n') % util.hidepassword(source))
+ revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
+
+ if opts.get('bookmark'):
+ if not revs:
+ revs = []
+ rb = other.listkeys('bookmarks')
+ for b in opts['bookmark']:
+ if b not in rb:
+ raise util.Abort(_('remote bookmark %s not found!') % b)
+ revs.append(rb[b])
+
+ if revs:
+ try:
+ revs = [other.lookup(rev) for rev in revs]
+ except error.CapabilityError:
+ err = _("other repository doesn't support revision lookup, "
+ "so a rev cannot be specified.")
+ raise util.Abort(err)
+
+ modheads = repo.pull(other, heads=revs, force=opts.get('force'))
+ bookmarks.updatefromremote(ui, repo, other)
+ if checkout:
+ checkout = str(repo.changelog.rev(other.lookup(checkout)))
+ repo._subtoppath = source
+ try:
+ ret = postincoming(ui, repo, modheads, opts.get('update'), checkout)
+
+ finally:
+ del repo._subtoppath
+
+ # update specified bookmarks
+ if opts.get('bookmark'):
+ for b in opts['bookmark']:
+ # explicit pull overrides local bookmark if any
+ ui.status(_("importing bookmark %s\n") % b)
+ repo._bookmarks[b] = repo[rb[b]].node()
+ bookmarks.write(repo)
+
+ return ret
+
+@command('^push',
+ [('f', 'force', None, _('force push')),
+ ('r', 'rev', [],
+ _('a changeset intended to be included in the destination'),
+ _('REV')),
+ ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')),
+ ('b', 'branch', [],
+ _('a specific branch you would like to push'), _('BRANCH')),
+ ('', 'new-branch', False, _('allow pushing a new branch')),
+ ] + remoteopts,
+ _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]'))
+def push(ui, repo, dest=None, **opts):
+ """push changes to the specified destination
+
+ Push changesets from the local repository to the specified
+ destination.
+
+ This operation is symmetrical to pull: it is identical to a pull
+ in the destination repository from the current one.
+
+ By default, push will not allow creation of new heads at the
+ destination, since multiple heads would make it unclear which head
+ to use. In this situation, it is recommended to pull and merge
+ before pushing.
+
+ Use --new-branch if you want to allow push to create a new named
+ branch that is not present at the destination. This allows you to
+ only create a new branch without forcing other changes.
+
+ Use -f/--force to override the default behavior and push all
+ changesets on all branches.
+
+ If -r/--rev is used, the specified revision and all its ancestors
+ will be pushed to the remote repository.
+
+ Please see :hg:`help urls` for important details about ``ssh://``
+ URLs. If DESTINATION is omitted, a default path will be used.
+
+ Returns 0 if push was successful, 1 if nothing to push.
+ """
+
+ if opts.get('bookmark'):
+ for b in opts['bookmark']:
+ # translate -B options to -r so changesets get pushed
+ if b in repo._bookmarks:
+ opts.setdefault('rev', []).append(b)
+ else:
+ # if we try to push a deleted bookmark, translate it to null
+ # this lets simultaneous -r, -b options continue working
+ opts.setdefault('rev', []).append("null")
+
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest, opts.get('branch'))
+ ui.status(_('pushing to %s\n') % util.hidepassword(dest))
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
+ other = hg.peer(repo, opts, dest)
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+
+ repo._subtoppath = dest
+ try:
+ # push subrepos depth-first for coherent ordering
+ c = repo['']
+ subs = c.substate # only repos that are committed
+ for s in sorted(subs):
+ if not c.sub(s).push(opts.get('force')):
+ return False
+ finally:
+ del repo._subtoppath
+ result = repo.push(other, opts.get('force'), revs=revs,
+ newbranch=opts.get('new_branch'))
+
+ result = (result == 0)
+
+ if opts.get('bookmark'):
+ rb = other.listkeys('bookmarks')
+ for b in opts['bookmark']:
+ # explicit push overrides remote bookmark if any
+ if b in repo._bookmarks:
+ ui.status(_("exporting bookmark %s\n") % b)
+ new = repo[b].hex()
+ elif b in rb:
+ ui.status(_("deleting remote bookmark %s\n") % b)
+ new = '' # delete
+ else:
+ ui.warn(_('bookmark %s does not exist on the local '
+ 'or remote repository!\n') % b)
+ return 2
+ old = rb.get(b, '')
+ r = other.pushkey('bookmarks', b, old, new)
+ if not r:
+ ui.warn(_('updating bookmark %s failed!\n') % b)
+ if not result:
+ result = 2
+
+ return result
+
+@command('recover', [])
+def recover(ui, repo):
+ """roll back an interrupted transaction
+
+ Recover from an interrupted commit or pull.
+
+ This command tries to fix the repository status after an
+ interrupted operation. It should only be necessary when Mercurial
+ suggests it.
+
+ Returns 0 if successful, 1 if nothing to recover or verify fails.
+ """
+ if repo.recover():
+ return hg.verify(repo)
+ return 1
+
+@command('^remove|rm',
+ [('A', 'after', None, _('record delete for missing files')),
+ ('f', 'force', None,
+ _('remove (and delete) file even if added or modified')),
+ ] + walkopts,
+ _('[OPTION]... FILE...'))
+def remove(ui, repo, *pats, **opts):
+ """remove the specified files on the next commit
+
+ Schedule the indicated files for removal from the current branch.
+
+ This command schedules the files to be removed at the next commit.
+ To undo a remove before that, see :hg:`revert`. To undo added
+ files, see :hg:`forget`.
+
+ .. container:: verbose
+
+ -A/--after can be used to remove only files that have already
+ been deleted, -f/--force can be used to force deletion, and -Af
+ can be used to remove files from the next revision without
+ deleting them from the working directory.
+
+ The following table details the behavior of remove for different
+ file states (columns) and option combinations (rows). The file
+ states are Added [A], Clean [C], Modified [M] and Missing [!]
+ (as reported by :hg:`status`). The actions are Warn, Remove
+ (from branch) and Delete (from disk):
+
+ ======= == == == ==
+ A C M !
+ ======= == == == ==
+ none W RD W R
+ -f R RD RD R
+ -A W W W R
+ -Af R R R R
+ ======= == == == ==
+
+ Note that remove never deletes files in Added [A] state from the
+ working directory, not even if option --force is specified.
+
+ Returns 0 on success, 1 if any warnings encountered.
+ """
+
+ ret = 0
+ after, force = opts.get('after'), opts.get('force')
+ if not pats and not after:
+ raise util.Abort(_('no files specified'))
+
+ m = scmutil.match(repo[None], pats, opts)
+ s = repo.status(match=m, clean=True)
+ modified, added, deleted, clean = s[0], s[1], s[3], s[6]
+
+ for f in m.files():
+ if f not in repo.dirstate and not os.path.isdir(m.rel(f)):
+ if os.path.exists(m.rel(f)):
+ ui.warn(_('not removing %s: file is untracked\n') % m.rel(f))
+ ret = 1
+
+ if force:
+ list = modified + deleted + clean + added
+ elif after:
+ list = deleted
+ for f in modified + added + clean:
+ ui.warn(_('not removing %s: file still exists (use -f'
+ ' to force removal)\n') % m.rel(f))
+ ret = 1
+ else:
+ list = deleted + clean
+ for f in modified:
+ ui.warn(_('not removing %s: file is modified (use -f'
+ ' to force removal)\n') % m.rel(f))
+ ret = 1
+ for f in added:
+ ui.warn(_('not removing %s: file has been marked for add'
+ ' (use forget to undo)\n') % m.rel(f))
+ ret = 1
+
+ for f in sorted(list):
+ if ui.verbose or not m.exact(f):
+ ui.status(_('removing %s\n') % m.rel(f))
+
+ wlock = repo.wlock()
+ try:
+ if not after:
+ for f in list:
+ if f in added:
+ continue # we never unlink added files on remove
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ repo[None].forget(list)
+ finally:
+ wlock.release()
+
+ return ret
+
+@command('rename|move|mv',
+ [('A', 'after', None, _('record a rename that has already occurred')),
+ ('f', 'force', None, _('forcibly copy over an existing managed file')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... SOURCE... DEST'))
+def rename(ui, repo, *pats, **opts):
+ """rename files; equivalent of copy + remove
+
+ Mark dest as copies of sources; mark sources for deletion. If dest
+ is a directory, copies are put in that directory. If dest is a
+ file, there can only be one source.
+
+ By default, this command copies the contents of files as they
+ exist in the working directory. If invoked with -A/--after, the
+ operation is recorded, but no copying is performed.
+
+ This command takes effect at the next commit. To undo a rename
+ before that, see :hg:`revert`.
+
+ Returns 0 on success, 1 if errors are encountered.
+ """
+ wlock = repo.wlock(False)
+ try:
+ return cmdutil.copy(ui, repo, pats, opts, rename=True)
+ finally:
+ wlock.release()
+
+@command('resolve',
+ [('a', 'all', None, _('select all unresolved files')),
+ ('l', 'list', None, _('list state of files needing merge')),
+ ('m', 'mark', None, _('mark files as resolved')),
+ ('u', 'unmark', None, _('mark files as unresolved')),
+ ('n', 'no-status', None, _('hide status prefix'))]
+ + mergetoolopts + walkopts,
+ _('[OPTION]... [FILE]...'))
+def resolve(ui, repo, *pats, **opts):
+ """redo merges or set/view the merge status of files
+
+ Merges with unresolved conflicts are often the result of
+ non-interactive merging using the ``internal:merge`` configuration
+ setting, or a command-line merge tool like ``diff3``. The resolve
+ command is used to manage the files involved in a merge, after
+ :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the
+ working directory must have two parents).
+
+ The resolve command can be used in the following ways:
+
+ - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified
+ files, discarding any previous merge attempts. Re-merging is not
+ performed for files already marked as resolved. Use ``--all/-a``
+ to select all unresolved files. ``--tool`` can be used to specify
+ the merge tool used for the given files. It overrides the HGMERGE
+ environment variable and your configuration files. Previous file
+ contents are saved with a ``.orig`` suffix.
+
+ - :hg:`resolve -m [FILE]`: mark a file as having been resolved
+ (e.g. after having manually fixed-up the files). The default is
+ to mark all unresolved files.
+
+ - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The
+ default is to mark all resolved files.
+
+ - :hg:`resolve -l`: list files which had or still have conflicts.
+ In the printed list, ``U`` = unresolved and ``R`` = resolved.
+
+ Note that Mercurial will not let you commit files with unresolved
+ merge conflicts. You must use :hg:`resolve -m ...` before you can
+ commit after a conflicting merge.
+
+ Returns 0 on success, 1 if any files fail a resolve attempt.
+ """
+
+ all, mark, unmark, show, nostatus = \
+ [opts.get(o) for o in 'all mark unmark list no_status'.split()]
+
+ if (show and (mark or unmark)) or (mark and unmark):
+ raise util.Abort(_("too many options specified"))
+ if pats and all:
+ raise util.Abort(_("can't specify --all and patterns"))
+ if not (all or pats or show or mark or unmark):
+ raise util.Abort(_('no files or directories specified; '
+ 'use --all to remerge all files'))
+
+ ms = mergemod.mergestate(repo)
+ m = scmutil.match(repo[None], pats, opts)
+ ret = 0
+
+ for f in ms:
+ if m(f):
+ if show:
+ if nostatus:
+ ui.write("%s\n" % f)
+ else:
+ ui.write("%s %s\n" % (ms[f].upper(), f),
+ label='resolve.' +
+ {'u': 'unresolved', 'r': 'resolved'}[ms[f]])
+ elif mark:
+ ms.mark(f, "r")
+ elif unmark:
+ ms.mark(f, "u")
+ else:
+ wctx = repo[None]
+ mctx = wctx.parents()[-1]
+
+ # backup pre-resolve (merge uses .orig for its own purposes)
+ a = repo.wjoin(f)
+ util.copyfile(a, a + ".resolve")
+
+ try:
+ # resolve file
+ ui.setconfig('ui', 'forcemerge', opts.get('tool', ''))
+ if ms.resolve(f, wctx, mctx):
+ ret = 1
+ finally:
+ ui.setconfig('ui', 'forcemerge', '')
+
+ # replace filemerge's .orig file with our resolve file
+ util.rename(a + ".resolve", a + ".orig")
+
+ ms.commit()
+ return ret
+
+@command('revert',
+ [('a', 'all', None, _('revert all changes when no arguments given')),
+ ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
+ ('r', 'rev', '', _('revert to the specified revision'), _('REV')),
+ ('C', 'no-backup', None, _('do not save backup copies of files')),
+ ] + walkopts + dryrunopts,
+ _('[OPTION]... [-r REV] [NAME]...'))
+def revert(ui, repo, *pats, **opts):
+ """restore files to their checkout state
+
+ .. note::
+ To check out earlier revisions, you should use :hg:`update REV`.
+ To cancel a merge (and lose your changes), use :hg:`update --clean .`.
+
+ With no revision specified, revert the specified files or directories
+ to the contents they had in the parent of the working directory.
+ This restores the contents of files to an unmodified
+ state and unschedules adds, removes, copies, and renames. If the
+ working directory has two parents, you must explicitly specify a
+ revision.
+
+ Using the -r/--rev or -d/--date options, revert the given files or
+ directories to their states as of a specific revision. Because
+ revert does not change the working directory parents, this will
+ cause these files to appear modified. This can be helpful to "back
+ out" some or all of an earlier change. See :hg:`backout` for a
+ related method.
+
+ Modified files are saved with a .orig suffix before reverting.
+ To disable these backups, use --no-backup.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Returns 0 on success.
+ """
+
+ if opts.get("date"):
+ if opts.get("rev"):
+ raise util.Abort(_("you can't specify a revision and a date"))
+ opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
+
+ parent, p2 = repo.dirstate.parents()
+ if not opts.get('rev') and p2 != nullid:
+ # revert after merge is a trap for new users (issue2915)
+ raise util.Abort(_('uncommitted merge with no revision specified'),
+ hint=_('use "hg update" or see "hg help revert"'))
+
+ ctx = scmutil.revsingle(repo, opts.get('rev'))
+ node = ctx.node()
+
+ if not pats and not opts.get('all'):
+ msg = _("no files or directories specified")
+ if p2 != nullid:
+ hint = _("uncommitted merge, use --all to discard all changes,"
+ " or 'hg update -C .' to abort the merge")
+ raise util.Abort(msg, hint=hint)
+ dirty = util.any(repo.status())
+ if node != parent:
+ if dirty:
+ hint = _("uncommitted changes, use --all to discard all"
+ " changes, or 'hg update %s' to update") % ctx.rev()
+ else:
+ hint = _("use --all to revert all files,"
+ " or 'hg update %s' to update") % ctx.rev()
+ elif dirty:
+ hint = _("uncommitted changes, use --all to discard all changes")
+ else:
+ hint = _("use --all to revert all files")
+ raise util.Abort(msg, hint=hint)
+
+ mf = ctx.manifest()
+ if node == parent:
+ pmf = mf
+ else:
+ pmf = None
+
+ # need all matching names in dirstate and manifest of target rev,
+ # so have to walk both. do not print errors if files exist in one
+ # but not other.
+
+ names = {}
+
+ wlock = repo.wlock()
+ try:
+ # walk dirstate.
+
+ m = scmutil.match(repo[None], pats, opts)
+ m.bad = lambda x, y: False
+ for abs in repo.walk(m):
+ names[abs] = m.rel(abs), m.exact(abs)
+
+ # walk target manifest.
+
+ def badfn(path, msg):
+ if path in names:
+ return
+ if path in repo[node].substate:
+ ui.warn("%s: %s\n" % (m.rel(path),
+ 'reverting subrepos is unsupported'))
+ return
+ path_ = path + '/'
+ for f in names:
+ if f.startswith(path_):
+ return
+ ui.warn("%s: %s\n" % (m.rel(path), msg))
+
+ m = scmutil.match(repo[node], pats, opts)
+ m.bad = badfn
+ for abs in repo[node].walk(m):
+ if abs not in names:
+ names[abs] = m.rel(abs), m.exact(abs)
+
+ m = scmutil.matchfiles(repo, names)
+ changes = repo.status(match=m)[:4]
+ modified, added, removed, deleted = map(set, changes)
+
+ # if f is a rename, also revert the source
+ cwd = repo.getcwd()
+ for f in added:
+ src = repo.dirstate.copied(f)
+ if src and src not in names and repo.dirstate[src] == 'r':
+ removed.add(src)
+ names[src] = (repo.pathto(src, cwd), True)
+
+ def removeforget(abs):
+ if repo.dirstate[abs] == 'a':
+ return _('forgetting %s\n')
+ return _('removing %s\n')
+
+ revert = ([], _('reverting %s\n'))
+ add = ([], _('adding %s\n'))
+ remove = ([], removeforget)
+ undelete = ([], _('undeleting %s\n'))
+
+ disptable = (
+ # dispatch table:
+ # file state
+ # action if in target manifest
+ # action if not in target manifest
+ # make backup if in target manifest
+ # make backup if not in target manifest
+ (modified, revert, remove, True, True),
+ (added, revert, remove, True, False),
+ (removed, undelete, None, False, False),
+ (deleted, revert, remove, False, False),
+ )
+
+ for abs, (rel, exact) in sorted(names.items()):
+ mfentry = mf.get(abs)
+ target = repo.wjoin(abs)
+ def handle(xlist, dobackup):
+ xlist[0].append(abs)
+ if (dobackup and not opts.get('no_backup') and
+ os.path.lexists(target)):
+ bakname = "%s.orig" % rel
+ ui.note(_('saving current version of %s as %s\n') %
+ (rel, bakname))
+ if not opts.get('dry_run'):
+ util.rename(target, bakname)
+ if ui.verbose or not exact:
+ msg = xlist[1]
+ if not isinstance(msg, basestring):
+ msg = msg(abs)
+ ui.status(msg % rel)
+ for table, hitlist, misslist, backuphit, backupmiss in disptable:
+ if abs not in table:
+ continue
+ # file has changed in dirstate
+ if mfentry:
+ handle(hitlist, backuphit)
+ elif misslist is not None:
+ handle(misslist, backupmiss)
+ break
+ else:
+ if abs not in repo.dirstate:
+ if mfentry:
+ handle(add, True)
+ elif exact:
+ ui.warn(_('file not managed: %s\n') % rel)
+ continue
+ # file has not changed in dirstate
+ if node == parent:
+ if exact:
+ ui.warn(_('no changes needed to %s\n') % rel)
+ continue
+ if pmf is None:
+ # only need parent manifest in this unlikely case,
+ # so do not read by default
+ pmf = repo[parent].manifest()
+ if abs in pmf and mfentry:
+ # if version of file is same in parent and target
+ # manifests, do nothing
+ if (pmf[abs] != mfentry or
+ pmf.flags(abs) != mf.flags(abs)):
+ handle(revert, False)
+ else:
+ handle(remove, False)
+
+ if not opts.get('dry_run'):
+ def checkout(f):
+ fc = ctx[f]
+ repo.wwrite(f, fc.data(), fc.flags())
+
+ audit_path = scmutil.pathauditor(repo.root)
+ for f in remove[0]:
+ if repo.dirstate[f] == 'a':
+ repo.dirstate.drop(f)
+ continue
+ audit_path(f)
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError:
+ pass
+ repo.dirstate.remove(f)
+
+ normal = None
+ if node == parent:
+ # We're reverting to our parent. If possible, we'd like status
+ # to report the file as clean. We have to use normallookup for
+ # merges to avoid losing information about merged/dirty files.
+ if p2 != nullid:
+ normal = repo.dirstate.normallookup
+ else:
+ normal = repo.dirstate.normal
+ for f in revert[0]:
+ checkout(f)
+ if normal:
+ normal(f)
+
+ for f in add[0]:
+ checkout(f)
+ repo.dirstate.add(f)
+
+ normal = repo.dirstate.normallookup
+ if node == parent and p2 == nullid:
+ normal = repo.dirstate.normal
+ for f in undelete[0]:
+ checkout(f)
+ normal(f)
+
+ finally:
+ wlock.release()
+
+@command('rollback', dryrunopts +
+ [('f', 'force', False, _('ignore safety measures'))])
+def rollback(ui, repo, **opts):
+ """roll back the last transaction (dangerous)
+
+ This command should be used with care. There is only one level of
+ rollback, and there is no way to undo a rollback. It will also
+ restore the dirstate at the time of the last transaction, losing
+ any dirstate changes since that time. This command does not alter
+ the working directory.
+
+ Transactions are used to encapsulate the effects of all commands
+ that create new changesets or propagate existing changesets into a
+ repository. For example, the following commands are transactional,
+ and their effects can be rolled back:
+
+ - commit
+ - import
+ - pull
+ - push (with this repository as the destination)
+ - unbundle
+
+ It's possible to lose data with rollback: commit, update back to
+ an older changeset, and then rollback. The update removes the
+ changes you committed from the working directory, and rollback
+ removes them from history. To avoid data loss, you must pass
+ --force in this case.
+
+ This command is not intended for use on public repositories. Once
+ changes are visible for pull by other users, rolling a transaction
+ back locally is ineffective (someone else may already have pulled
+ the changes). Furthermore, a race is possible with readers of the
+ repository; for example an in-progress pull from the repository
+ may fail if a rollback is performed.
+
+ Returns 0 on success, 1 if no rollback data is available.
+ """
+ return repo.rollback(dryrun=opts.get('dry_run'),
+ force=opts.get('force'))
+
+@command('root', [])
+def root(ui, repo):
+ """print the root (top) of the current working directory
+
+ Print the root directory of the current repository.
+
+ Returns 0 on success.
+ """
+ ui.write(repo.root + "\n")
+
+@command('^serve',
+ [('A', 'accesslog', '', _('name of access log file to write to'),
+ _('FILE')),
+ ('d', 'daemon', None, _('run server in background')),
+ ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')),
+ ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')),
+ # use string type, then we can check if something was passed
+ ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')),
+ ('a', 'address', '', _('address to listen on (default: all interfaces)'),
+ _('ADDR')),
+ ('', 'prefix', '', _('prefix path to serve from (default: server root)'),
+ _('PREFIX')),
+ ('n', 'name', '',
+ _('name to show in web pages (default: working directory)'), _('NAME')),
+ ('', 'web-conf', '',
+ _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')),
+ ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'),
+ _('FILE')),
+ ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')),
+ ('', 'stdio', None, _('for remote clients')),
+ ('', 'cmdserver', '', _('for remote clients'), _('MODE')),
+ ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')),
+ ('', 'style', '', _('template style to use'), _('STYLE')),
+ ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
+ ('', 'certificate', '', _('SSL certificate file'), _('FILE'))],
+ _('[OPTION]...'))
+def serve(ui, repo, **opts):
+ """start stand-alone webserver
+
+ Start a local HTTP repository browser and pull server. You can use
+ this for ad-hoc sharing and browsing of repositories. It is
+ recommended to use a real web server to serve a repository for
+ longer periods of time.
+
+ Please note that the server does not implement access control.
+ This means that, by default, anybody can read from the server and
+ nobody can write to it by default. Set the ``web.allow_push``
+ option to ``*`` to allow everybody to push to the server. You
+ should use a real web server if you need to authenticate users.
+
+ By default, the server logs accesses to stdout and errors to
+ stderr. Use the -A/--accesslog and -E/--errorlog options to log to
+ files.
+
+ To have the server choose a free port number to listen on, specify
+ a port number of 0; in this case, the server will print the port
+ number it uses.
+
+ Returns 0 on success.
+ """
+
+ if opts["stdio"] and opts["cmdserver"]:
+ raise util.Abort(_("cannot use --stdio with --cmdserver"))
+
+ def checkrepo():
+ if repo is None:
+ raise error.RepoError(_("There is no Mercurial repository here"
+ " (.hg not found)"))
+
+ if opts["stdio"]:
+ checkrepo()
+ s = sshserver.sshserver(ui, repo)
+ s.serve_forever()
+
+ if opts["cmdserver"]:
+ checkrepo()
+ s = commandserver.server(ui, repo, opts["cmdserver"])
+ return s.serve()
+
+ # this way we can check if something was given in the command-line
+ if opts.get('port'):
+ opts['port'] = util.getport(opts.get('port'))
+
+ baseui = repo and repo.baseui or ui
+ optlist = ("name templates style address port prefix ipv6"
+ " accesslog errorlog certificate encoding")
+ for o in optlist.split():
+ val = opts.get(o, '')
+ if val in (None, ''): # should check against default options instead
+ continue
+ baseui.setconfig("web", o, val)
+ if repo and repo.ui != baseui:
+ repo.ui.setconfig("web", o, val)
+
+ o = opts.get('web_conf') or opts.get('webdir_conf')
+ if not o:
+ if not repo:
+ raise error.RepoError(_("There is no Mercurial repository"
+ " here (.hg not found)"))
+ o = repo.root
+
+ app = hgweb.hgweb(o, baseui=ui)
+
+ class service(object):
+ def init(self):
+ util.setsignalhandler()
+ self.httpd = hgweb.server.create_server(ui, app)
+
+ if opts['port'] and not ui.verbose:
+ return
+
+ if self.httpd.prefix:
+ prefix = self.httpd.prefix.strip('/') + '/'
+ else:
+ prefix = ''
+
+ port = ':%d' % self.httpd.port
+ if port == ':80':
+ port = ''
+
+ bindaddr = self.httpd.addr
+ if bindaddr == '0.0.0.0':
+ bindaddr = '*'
+ elif ':' in bindaddr: # IPv6
+ bindaddr = '[%s]' % bindaddr
+
+ fqaddr = self.httpd.fqaddr
+ if ':' in fqaddr:
+ fqaddr = '[%s]' % fqaddr
+ if opts['port']:
+ write = ui.status
+ else:
+ write = ui.write
+ write(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
+ (fqaddr, port, prefix, bindaddr, self.httpd.port))
+
+ def run(self):
+ self.httpd.serve_forever()
+
+ service = service()
+
+ cmdutil.service(opts, initfn=service.init, runfn=service.run)
+
+@command('showconfig|debugconfig',
+ [('u', 'untrusted', None, _('show untrusted configuration options'))],
+ _('[-u] [NAME]...'))
+def showconfig(ui, repo, *values, **opts):
+ """show combined config settings from all hgrc files
+
+ With no arguments, print names and values of all config items.
+
+ With one argument of the form section.name, print just the value
+ of that config item.
+
+ With multiple arguments, print names and values of all config
+ items with matching section names.
+
+ With --debug, the source (filename and line number) is printed
+ for each config item.
+
+ Returns 0 on success.
+ """
+
+ for f in scmutil.rcpath():
+ ui.debug('read config from: %s\n' % f)
+ untrusted = bool(opts.get('untrusted'))
+ if values:
+ sections = [v for v in values if '.' not in v]
+ items = [v for v in values if '.' in v]
+ if len(items) > 1 or items and sections:
+ raise util.Abort(_('only one config item permitted'))
+ for section, name, value in ui.walkconfig(untrusted=untrusted):
+ value = str(value).replace('\n', '\\n')
+ sectname = section + '.' + name
+ if values:
+ for v in values:
+ if v == section:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write('%s=%s\n' % (sectname, value))
+ elif v == sectname:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write(value, '\n')
+ else:
+ ui.debug('%s: ' %
+ ui.configsource(section, name, untrusted))
+ ui.write('%s=%s\n' % (sectname, value))
+
+@command('^status|st',
+ [('A', 'all', None, _('show status of all files')),
+ ('m', 'modified', None, _('show only modified files')),
+ ('a', 'added', None, _('show only added files')),
+ ('r', 'removed', None, _('show only removed files')),
+ ('d', 'deleted', None, _('show only deleted (but tracked) files')),
+ ('c', 'clean', None, _('show only files without changes')),
+ ('u', 'unknown', None, _('show only unknown (not tracked) files')),
+ ('i', 'ignored', None, _('show only ignored files')),
+ ('n', 'no-status', None, _('hide status prefix')),
+ ('C', 'copies', None, _('show source of copied files')),
+ ('0', 'print0', None, _('end filenames with NUL, for use with xargs')),
+ ('', 'rev', [], _('show difference from revision'), _('REV')),
+ ('', 'change', '', _('list the changed files of a revision'), _('REV')),
+ ] + walkopts + subrepoopts,
+ _('[OPTION]... [FILE]...'))
+def status(ui, repo, *pats, **opts):
+ """show changed files in the working directory
+
+ Show status of files in the repository. If names are given, only
+ files that match are shown. Files that are clean or ignored or
+ the source of a copy/move operation, are not listed unless
+ -c/--clean, -i/--ignored, -C/--copies or -A/--all are given.
+ Unless options described with "show only ..." are given, the
+ options -mardu are used.
+
+ Option -q/--quiet hides untracked (unknown and ignored) files
+ unless explicitly requested with -u/--unknown or -i/--ignored.
+
+ .. note::
+ status may appear to disagree with diff if permissions have
+ changed or a merge has occurred. The standard diff format does
+ not report permission changes and diff only reports changes
+ relative to one merge parent.
+
+ If one revision is given, it is used as the base revision.
+ If two revisions are given, the differences between them are
+ shown. The --change option can also be used as a shortcut to list
+ the changed files of a revision from its first parent.
+
+ The codes used to show the status of files are::
+
+ M = modified
+ A = added
+ R = removed
+ C = clean
+ ! = missing (deleted by non-hg command, but still tracked)
+ ? = not tracked
+ I = ignored
+ = origin of the previous file listed as A (added)
+
+ .. container:: verbose
+
+ Examples:
+
+ - show changes in the working directory relative to a changeset:
+
+ hg status --rev 9353
+
+ - show all changes including copies in an existing changeset::
+
+ hg status --copies --change 9353
+
+ - get a NUL separated list of added files, suitable for xargs::
+
+ hg status -an0
+
+ Returns 0 on success.
+ """
+
+ revs = opts.get('rev')
+ change = opts.get('change')
+
+ if revs and change:
+ msg = _('cannot specify --rev and --change at the same time')
+ raise util.Abort(msg)
+ elif change:
+ node2 = repo.lookup(change)
+ node1 = repo[node2].p1().node()
+ else:
+ node1, node2 = scmutil.revpair(repo, revs)
+
+ cwd = (pats and repo.getcwd()) or ''
+ end = opts.get('print0') and '\0' or '\n'
+ copy = {}
+ states = 'modified added removed deleted unknown ignored clean'.split()
+ show = [k for k in states if opts.get(k)]
+ if opts.get('all'):
+ show += ui.quiet and (states[:4] + ['clean']) or states
+ if not show:
+ show = ui.quiet and states[:4] or states[:5]
+
+ stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts),
+ 'ignored' in show, 'clean' in show, 'unknown' in show,
+ opts.get('subrepos'))
+ changestates = zip(states, 'MAR!?IC', stat)
+
+ if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
+ ctxn = repo[nullid]
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+ added = stat[1]
+ if node2 is None:
+ added = stat[0] + stat[1] # merged?
+
+ for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
+ if k in added:
+ copy[k] = v
+ elif v in added:
+ copy[v] = k
+
+ for state, char, files in changestates:
+ if state in show:
+ format = "%s %%s%s" % (char, end)
+ if opts.get('no_status'):
+ format = "%%s%s" % end
+
+ for f in files:
+ ui.write(format % repo.pathto(f, cwd),
+ label='status.' + state)
+ if f in copy:
+ ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end),
+ label='status.copied')
+
+@command('^summary|sum',
+ [('', 'remote', None, _('check for push and pull'))], '[--remote]')
+def summary(ui, repo, **opts):
+ """summarize working directory state
+
+ This generates a brief summary of the working directory state,
+ including parents, branch, commit status, and available updates.
+
+ With the --remote option, this will check the default paths for
+ incoming and outgoing changes. This can be time-consuming.
+
+ Returns 0 on success.
+ """
+
+ ctx = repo[None]
+ parents = ctx.parents()
+ pnode = parents[0].node()
+ marks = []
+
+ for p in parents:
+ # label with log.changeset (instead of log.parent) since this
+ # shows a working directory parent *changeset*:
+ ui.write(_('parent: %d:%s ') % (p.rev(), str(p)),
+ label='log.changeset')
+ ui.write(' '.join(p.tags()), label='log.tag')
+ if p.bookmarks():
+ marks.extend(p.bookmarks())
+ if p.rev() == -1:
+ if not len(repo):
+ ui.write(_(' (empty repository)'))
+ else:
+ ui.write(_(' (no revision checked out)'))
+ ui.write('\n')
+ if p.description():
+ ui.status(' ' + p.description().splitlines()[0].strip() + '\n',
+ label='log.summary')
+
+ branch = ctx.branch()
+ bheads = repo.branchheads(branch)
+ m = _('branch: %s\n') % branch
+ if branch != 'default':
+ ui.write(m, label='log.branch')
+ else:
+ ui.status(m, label='log.branch')
+
+ if marks:
+ current = repo._bookmarkcurrent
+ ui.write(_('bookmarks:'), label='log.bookmark')
+ if current is not None:
+ try:
+ marks.remove(current)
+ ui.write(' *' + current, label='bookmarks.current')
+ except ValueError:
+ # current bookmark not in parent ctx marks
+ pass
+ for m in marks:
+ ui.write(' ' + m, label='log.bookmark')
+ ui.write('\n', label='log.bookmark')
+
+ st = list(repo.status(unknown=True))[:6]
+
+ c = repo.dirstate.copies()
+ copied, renamed = [], []
+ for d, s in c.iteritems():
+ if s in st[2]:
+ st[2].remove(s)
+ renamed.append(d)
+ else:
+ copied.append(d)
+ if d in st[1]:
+ st[1].remove(d)
+ st.insert(3, renamed)
+ st.insert(4, copied)
+
+ ms = mergemod.mergestate(repo)
+ st.append([f for f in ms if ms[f] == 'u'])
+
+ subs = [s for s in ctx.substate if ctx.sub(s).dirty()]
+ st.append(subs)
+
+ labels = [ui.label(_('%d modified'), 'status.modified'),
+ ui.label(_('%d added'), 'status.added'),
+ ui.label(_('%d removed'), 'status.removed'),
+ ui.label(_('%d renamed'), 'status.copied'),
+ ui.label(_('%d copied'), 'status.copied'),
+ ui.label(_('%d deleted'), 'status.deleted'),
+ ui.label(_('%d unknown'), 'status.unknown'),
+ ui.label(_('%d ignored'), 'status.ignored'),
+ ui.label(_('%d unresolved'), 'resolve.unresolved'),
+ ui.label(_('%d subrepos'), 'status.modified')]
+ t = []
+ for s, l in zip(st, labels):
+ if s:
+ t.append(l % len(s))
+
+ t = ', '.join(t)
+ cleanworkdir = False
+
+ if len(parents) > 1:
+ t += _(' (merge)')
+ elif branch != parents[0].branch():
+ t += _(' (new branch)')
+ elif (parents[0].extra().get('close') and
+ pnode in repo.branchheads(branch, closed=True)):
+ t += _(' (head closed)')
+ elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]):
+ t += _(' (clean)')
+ cleanworkdir = True
+ elif pnode not in bheads:
+ t += _(' (new branch head)')
+
+ if cleanworkdir:
+ ui.status(_('commit: %s\n') % t.strip())
+ else:
+ ui.write(_('commit: %s\n') % t.strip())
+
+ # all ancestors of branch heads - all ancestors of parent = new csets
+ new = [0] * len(repo)
+ cl = repo.changelog
+ for a in [cl.rev(n) for n in bheads]:
+ new[a] = 1
+ for a in cl.ancestors(*[cl.rev(n) for n in bheads]):
+ new[a] = 1
+ for a in [p.rev() for p in parents]:
+ if a >= 0:
+ new[a] = 0
+ for a in cl.ancestors(*[p.rev() for p in parents]):
+ new[a] = 0
+ new = sum(new)
+
+ if new == 0:
+ ui.status(_('update: (current)\n'))
+ elif pnode not in bheads:
+ ui.write(_('update: %d new changesets (update)\n') % new)
+ else:
+ ui.write(_('update: %d new changesets, %d branch heads (merge)\n') %
+ (new, len(bheads)))
+
+ if opts.get('remote'):
+ t = []
+ source, branches = hg.parseurl(ui.expandpath('default'))
+ other = hg.peer(repo, {}, source)
+ revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev'))
+ ui.debug('comparing with %s\n' % util.hidepassword(source))
+ repo.ui.pushbuffer()
+ commoninc = discovery.findcommonincoming(repo, other)
+ _common, incoming, _rheads = commoninc
+ repo.ui.popbuffer()
+ if incoming:
+ t.append(_('1 or more incoming'))
+
+ dest, branches = hg.parseurl(ui.expandpath('default-push', 'default'))
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, None)
+ if source != dest:
+ other = hg.peer(repo, {}, dest)
+ commoninc = None
+ ui.debug('comparing with %s\n' % util.hidepassword(dest))
+ repo.ui.pushbuffer()
+ common, outheads = discovery.findcommonoutgoing(repo, other,
+ commoninc=commoninc)
+ repo.ui.popbuffer()
+ o = repo.changelog.findmissing(common=common, heads=outheads)
+ if o:
+ t.append(_('%d outgoing') % len(o))
+ if 'bookmarks' in other.listkeys('namespaces'):
+ lmarks = repo.listkeys('bookmarks')
+ rmarks = other.listkeys('bookmarks')
+ diff = set(rmarks) - set(lmarks)
+ if len(diff) > 0:
+ t.append(_('%d incoming bookmarks') % len(diff))
+ diff = set(lmarks) - set(rmarks)
+ if len(diff) > 0:
+ t.append(_('%d outgoing bookmarks') % len(diff))
+
+ if t:
+ ui.write(_('remote: %s\n') % (', '.join(t)))
+ else:
+ ui.status(_('remote: (synced)\n'))
+
+@command('tag',
+ [('f', 'force', None, _('force tag')),
+ ('l', 'local', None, _('make the tag local')),
+ ('r', 'rev', '', _('revision to tag'), _('REV')),
+ ('', 'remove', None, _('remove a tag')),
+ # -l/--local is already there, commitopts cannot be used
+ ('e', 'edit', None, _('edit commit message')),
+ ('m', 'message', '', _('use <text> as commit message'), _('TEXT')),
+ ] + commitopts2,
+ _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...'))
+def tag(ui, repo, name1, *names, **opts):
+ """add one or more tags for the current or given revision
+
+ Name a particular revision using <name>.
+
+ Tags are used to name particular revisions of the repository and are
+ very useful to compare different revisions, to go back to significant
+ earlier versions or to mark branch points as releases, etc. Changing
+ an existing tag is normally disallowed; use -f/--force to override.
+
+ If no revision is given, the parent of the working directory is
+ used, or tip if no revision is checked out.
+
+ To facilitate version control, distribution, and merging of tags,
+ they are stored as a file named ".hgtags" which is managed similarly
+ to other project files and can be hand-edited if necessary. This
+ also means that tagging creates a new commit. The file
+ ".hg/localtags" is used for local tags (not shared among
+ repositories).
+
+ Tag commits are usually made at the head of a branch. If the parent
+ of the working directory is not a branch head, :hg:`tag` aborts; use
+ -f/--force to force the tag commit to be based on a non-head
+ changeset.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Since tag names have priority over branch names during revision
+ lookup, using an existing branch name as a tag name is discouraged.
+
+ Returns 0 on success.
+ """
+
+ rev_ = "."
+ names = [t.strip() for t in (name1,) + names]
+ if len(names) != len(set(names)):
+ raise util.Abort(_('tag names must be unique'))
+ for n in names:
+ if n in ['tip', '.', 'null']:
+ raise util.Abort(_("the name '%s' is reserved") % n)
+ if not n:
+ raise util.Abort(_('tag names cannot consist entirely of whitespace'))
+ if opts.get('rev') and opts.get('remove'):
+ raise util.Abort(_("--rev and --remove are incompatible"))
+ if opts.get('rev'):
+ rev_ = opts['rev']
+ message = opts.get('message')
+ if opts.get('remove'):
+ expectedtype = opts.get('local') and 'local' or 'global'
+ for n in names:
+ if not repo.tagtype(n):
+ raise util.Abort(_("tag '%s' does not exist") % n)
+ if repo.tagtype(n) != expectedtype:
+ if expectedtype == 'global':
+ raise util.Abort(_("tag '%s' is not a global tag") % n)
+ else:
+ raise util.Abort(_("tag '%s' is not a local tag") % n)
+ rev_ = nullid
+ if not message:
+ # we don't translate commit messages
+ message = 'Removed tag %s' % ', '.join(names)
+ elif not opts.get('force'):
+ for n in names:
+ if n in repo.tags():
+ raise util.Abort(_("tag '%s' already exists "
+ "(use -f to force)") % n)
+ if not opts.get('local'):
+ p1, p2 = repo.dirstate.parents()
+ if p2 != nullid:
+ raise util.Abort(_('uncommitted merge'))
+ bheads = repo.branchheads()
+ if not opts.get('force') and bheads and p1 not in bheads:
+ raise util.Abort(_('not at a branch head (use -f to force)'))
+ r = scmutil.revsingle(repo, rev_).node()
+
+ if not message:
+ # we don't translate commit messages
+ message = ('Added tag %s for changeset %s' %
+ (', '.join(names), short(r)))
+
+ date = opts.get('date')
+ if date:
+ date = util.parsedate(date)
+
+ if opts.get('edit'):
+ message = ui.edit(message, ui.username())
+
+ repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
+
+@command('tags', [], '')
+def tags(ui, repo):
+ """list repository tags
+
+ This lists both regular and local tags. When the -v/--verbose
+ switch is used, a third column "local" is printed for local tags.
+
+ Returns 0 on success.
+ """
+
+ hexfunc = ui.debugflag and hex or short
+ tagtype = ""
+
+ for t, n in reversed(repo.tagslist()):
+ if ui.quiet:
+ ui.write("%s\n" % t, label='tags.normal')
+ continue
+
+ hn = hexfunc(n)
+ r = "%5d:%s" % (repo.changelog.rev(n), hn)
+ rev = ui.label(r, 'log.changeset')
+ spaces = " " * (30 - encoding.colwidth(t))
+
+ tag = ui.label(t, 'tags.normal')
+ if ui.verbose:
+ if repo.tagtype(t) == 'local':
+ tagtype = " local"
+ tag = ui.label(t, 'tags.local')
+ else:
+ tagtype = ""
+ ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype))
+
+@command('tip',
+ [('p', 'patch', None, _('show patch')),
+ ('g', 'git', None, _('use git extended diff format')),
+ ] + templateopts,
+ _('[-p] [-g]'))
+def tip(ui, repo, **opts):
+ """show the tip revision
+
+ The tip revision (usually just called the tip) is the changeset
+ most recently added to the repository (and therefore the most
+ recently changed head).
+
+ If you have just made a commit, that commit will be the tip. If
+ you have just pulled changes from another repository, the tip of
+ that repository becomes the current tip. The "tip" tag is special
+ and cannot be renamed or assigned to a different changeset.
+
+ Returns 0 on success.
+ """
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ displayer.show(repo[len(repo) - 1])
+ displayer.close()
+
+@command('unbundle',
+ [('u', 'update', None,
+ _('update to new branch head if changesets were unbundled'))],
+ _('[-u] FILE...'))
+def unbundle(ui, repo, fname1, *fnames, **opts):
+ """apply one or more changegroup files
+
+ Apply one or more compressed changegroup files generated by the
+ bundle command.
+
+ Returns 0 on success, 1 if an update has unresolved files.
+ """
+ fnames = (fname1,) + fnames
+
+ lock = repo.lock()
+ wc = repo['.']
+ try:
+ for fname in fnames:
+ f = url.open(ui, fname)
+ gen = changegroup.readbundle(f, fname)
+ modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname,
+ lock=lock)
+ bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch())
+ finally:
+ lock.release()
+ return postincoming(ui, repo, modheads, opts.get('update'), None)
+
+@command('^update|up|checkout|co',
+ [('C', 'clean', None, _('discard uncommitted changes (no backup)')),
+ ('c', 'check', None,
+ _('update across branches if no uncommitted changes')),
+ ('d', 'date', '', _('tipmost revision matching date'), _('DATE')),
+ ('r', 'rev', '', _('revision'), _('REV'))],
+ _('[-c] [-C] [-d DATE] [[-r] REV]'))
+def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False):
+ """update working directory (or switch revisions)
+
+ Update the repository's working directory to the specified
+ changeset. If no changeset is specified, update to the tip of the
+ current named branch.
+
+ If the changeset is not a descendant of the working directory's
+ parent, the update is aborted. With the -c/--check option, the
+ working directory is checked for uncommitted changes; if none are
+ found, the working directory is updated to the specified
+ changeset.
+
+ Update sets the working directory's parent revison to the specified
+ changeset (see :hg:`help parents`).
+
+ The following rules apply when the working directory contains
+ uncommitted changes:
+
+ 1. If neither -c/--check nor -C/--clean is specified, and if
+ the requested changeset is an ancestor or descendant of
+ the working directory's parent, the uncommitted changes
+ are merged into the requested changeset and the merged
+ result is left uncommitted. If the requested changeset is
+ not an ancestor or descendant (that is, it is on another
+ branch), the update is aborted and the uncommitted changes
+ are preserved.
+
+ 2. With the -c/--check option, the update is aborted and the
+ uncommitted changes are preserved.
+
+ 3. With the -C/--clean option, uncommitted changes are discarded and
+ the working directory is updated to the requested changeset.
+
+ Use null as the changeset to remove the working directory (like
+ :hg:`clone -U`).
+
+ If you want to revert just one file to an older revision, use
+ :hg:`revert [-r REV] NAME`.
+
+ See :hg:`help dates` for a list of formats valid for -d/--date.
+
+ Returns 0 on success, 1 if there are unresolved files.
+ """
+ if rev and node:
+ raise util.Abort(_("please specify just one revision"))
+
+ if rev is None or rev == '':
+ rev = node
+
+ # if we defined a bookmark, we have to remember the original bookmark name
+ brev = rev
+ rev = scmutil.revsingle(repo, rev, rev).rev()
+
+ if check and clean:
+ raise util.Abort(_("cannot specify both -c/--check and -C/--clean"))
+
+ if check:
+ # we could use dirty() but we can ignore merge and branch trivia
+ c = repo[None]
+ if c.modified() or c.added() or c.removed():
+ raise util.Abort(_("uncommitted local changes"))
+
+ if date:
+ if rev is not None:
+ raise util.Abort(_("you can't specify a revision and a date"))
+ rev = cmdutil.finddate(ui, repo, date)
+
+ if clean or check:
+ ret = hg.clean(repo, rev)
+ else:
+ ret = hg.update(repo, rev)
+
+ if brev in repo._bookmarks:
+ bookmarks.setcurrent(repo, brev)
+
+ return ret
+
+@command('verify', [])
+def verify(ui, repo):
+ """verify the integrity of the repository
+
+ Verify the integrity of the current repository.
+
+ This will perform an extensive check of the repository's
+ integrity, validating the hashes and checksums of each entry in
+ the changelog, manifest, and tracked files, as well as the
+ integrity of their crosslinks and indices.
+
+ Returns 0 on success, 1 if errors are encountered.
+ """
+ return hg.verify(repo)
+
+@command('version', [])
+def version_(ui):
+ """output version and copyright information"""
+ ui.write(_("Mercurial Distributed SCM (version %s)\n")
+ % util.version())
+ ui.status(_(
+ "(see http://mercurial.selenic.com for more information)\n"
+ "\nCopyright (C) 2005-2011 Matt Mackall and others\n"
+ "This is free software; see the source for copying conditions. "
+ "There is NO\nwarranty; "
+ "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
+ ))
+
+norepo = ("clone init version help debugcommands debugcomplete"
+ " debugdate debuginstall debugfsinfo debugpushkey debugwireargs"
+ " debugknown debuggetbundle debugbundle")
+optionalrepo = ("identify paths serve showconfig debugancestor debugdag"
+ " debugdata debugindex debugindexdot debugrevlog")
diff --git a/websdk/mercurial/commandserver.py b/websdk/mercurial/commandserver.py
new file mode 100644
index 0000000..27d589d
--- /dev/null
+++ b/websdk/mercurial/commandserver.py
@@ -0,0 +1,237 @@
+# commandserver.py - communicate with Mercurial's API over a pipe
+#
+# Copyright Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import struct
+import sys, os
+import dispatch, encoding, util
+
+logfile = None
+
+def log(*args):
+ if not logfile:
+ return
+
+ for a in args:
+ logfile.write(str(a))
+
+ logfile.flush()
+
+class channeledoutput(object):
+ """
+ Write data from in_ to out in the following format:
+
+ data length (unsigned int),
+ data
+ """
+ def __init__(self, in_, out, channel):
+ self.in_ = in_
+ self.out = out
+ self.channel = channel
+
+ def write(self, data):
+ if not data:
+ return
+ self.out.write(struct.pack('>cI', self.channel, len(data)))
+ self.out.write(data)
+ self.out.flush()
+
+ def __getattr__(self, attr):
+ if attr in ('isatty', 'fileno'):
+ raise AttributeError, attr
+ return getattr(self.in_, attr)
+
+class channeledinput(object):
+ """
+ Read data from in_.
+
+ Requests for input are written to out in the following format:
+ channel identifier - 'I' for plain input, 'L' line based (1 byte)
+ how many bytes to send at most (unsigned int),
+
+ The client replies with:
+ data length (unsigned int), 0 meaning EOF
+ data
+ """
+
+ maxchunksize = 4 * 1024
+
+ def __init__(self, in_, out, channel):
+ self.in_ = in_
+ self.out = out
+ self.channel = channel
+
+ def read(self, size=-1):
+ if size < 0:
+ # if we need to consume all the clients input, ask for 4k chunks
+ # so the pipe doesn't fill up risking a deadlock
+ size = self.maxchunksize
+ s = self._read(size, self.channel)
+ buf = s
+ while s:
+ s = self._read(size, self.channel)
+ buf += s
+
+ return buf
+ else:
+ return self._read(size, self.channel)
+
+ def _read(self, size, channel):
+ if not size:
+ return ''
+ assert size > 0
+
+ # tell the client we need at most size bytes
+ self.out.write(struct.pack('>cI', channel, size))
+ self.out.flush()
+
+ length = self.in_.read(4)
+ length = struct.unpack('>I', length)[0]
+ if not length:
+ return ''
+ else:
+ return self.in_.read(length)
+
+ def readline(self, size=-1):
+ if size < 0:
+ size = self.maxchunksize
+ s = self._read(size, 'L')
+ buf = s
+ # keep asking for more until there's either no more or
+ # we got a full line
+ while s and s[-1] != '\n':
+ s = self._read(size, 'L')
+ buf += s
+
+ return buf
+ else:
+ return self._read(size, 'L')
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ l = self.readline()
+ if not l:
+ raise StopIteration
+ return l
+
+ def __getattr__(self, attr):
+ if attr in ('isatty', 'fileno'):
+ raise AttributeError, attr
+ return getattr(self.in_, attr)
+
+class server(object):
+ """
+ Listens for commands on stdin, runs them and writes the output on a channel
+ based stream to stdout.
+ """
+ def __init__(self, ui, repo, mode):
+ self.cwd = os.getcwd()
+
+ logpath = ui.config("cmdserver", "log", None)
+ if logpath:
+ global logfile
+ if logpath == '-':
+ # write log on a special 'd'ebug channel
+ logfile = channeledoutput(sys.stdout, sys.stdout, 'd')
+ else:
+ logfile = open(logpath, 'a')
+
+ # the ui here is really the repo ui so take its baseui so we don't end up
+ # with its local configuration
+ self.ui = repo.baseui
+ self.repo = repo
+ self.repoui = repo.ui
+
+ if mode == 'pipe':
+ self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e')
+ self.cout = channeledoutput(sys.stdout, sys.stdout, 'o')
+ self.cin = channeledinput(sys.stdin, sys.stdout, 'I')
+ self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r')
+
+ self.client = sys.stdin
+ else:
+ raise util.Abort(_('unknown mode %s') % mode)
+
+ def _read(self, size):
+ if not size:
+ return ''
+
+ data = self.client.read(size)
+
+ # is the other end closed?
+ if not data:
+ raise EOFError()
+
+ return data
+
+ def runcommand(self):
+ """ reads a list of \0 terminated arguments, executes
+ and writes the return code to the result channel """
+
+ length = struct.unpack('>I', self._read(4))[0]
+ if not length:
+ args = []
+ else:
+ args = self._read(length).split('\0')
+
+ # copy the uis so changes (e.g. --config or --verbose) don't
+ # persist between requests
+ copiedui = self.ui.copy()
+ self.repo.baseui = copiedui
+ self.repo.ui = self.repo.dirstate._ui = self.repoui.copy()
+ self.repo.invalidate()
+
+ req = dispatch.request(args[:], copiedui, self.repo, self.cin,
+ self.cout, self.cerr)
+
+ ret = dispatch.dispatch(req) or 0 # might return None
+
+ # restore old cwd
+ if '--cwd' in args:
+ os.chdir(self.cwd)
+
+ self.cresult.write(struct.pack('>i', int(ret)))
+
+ def getencoding(self):
+ """ writes the current encoding to the result channel """
+ self.cresult.write(encoding.encoding)
+
+ def serveone(self):
+ cmd = self.client.readline()[:-1]
+ if cmd:
+ handler = self.capabilities.get(cmd)
+ if handler:
+ handler(self)
+ else:
+ # clients are expected to check what commands are supported by
+ # looking at the servers capabilities
+ raise util.Abort(_('unknown command %s') % cmd)
+
+ return cmd != ''
+
+ capabilities = {'runcommand' : runcommand,
+ 'getencoding' : getencoding}
+
+ def serve(self):
+ hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys())
+ hellomsg += '\n'
+ hellomsg += 'encoding: ' + encoding.encoding
+
+ # write the hello msg in -one- chunk
+ self.cout.write(hellomsg)
+
+ try:
+ while self.serveone():
+ pass
+ except EOFError:
+ # we'll get here if the client disconnected while we were reading
+ # its request
+ return 1
+
+ return 0
diff --git a/websdk/mercurial/config.py b/websdk/mercurial/config.py
index 25ec40e..2556ef1 120000..100644
--- a/websdk/mercurial/config.py
+++ b/websdk/mercurial/config.py
@@ -1 +1,149 @@
-/usr/share/pyshared/mercurial/config.py \ No newline at end of file
+# config.py - configuration parsing for Mercurial
+#
+# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import error, util
+import re, os, errno
+
+class sortdict(dict):
+ 'a simple sorted dictionary'
+ def __init__(self, data=None):
+ self._list = []
+ if data:
+ self.update(data)
+ def copy(self):
+ return sortdict(self)
+ def __setitem__(self, key, val):
+ if key in self:
+ self._list.remove(key)
+ self._list.append(key)
+ dict.__setitem__(self, key, val)
+ def __iter__(self):
+ return self._list.__iter__()
+ def update(self, src):
+ for k in src:
+ self[k] = src[k]
+ def clear(self):
+ dict.clear(self)
+ self._list = []
+ def items(self):
+ return [(k, self[k]) for k in self._list]
+ def __delitem__(self, key):
+ dict.__delitem__(self, key)
+ self._list.remove(key)
+
+class config(object):
+ def __init__(self, data=None):
+ self._data = {}
+ self._source = {}
+ if data:
+ for k in data._data:
+ self._data[k] = data[k].copy()
+ self._source = data._source.copy()
+ def copy(self):
+ return config(self)
+ def __contains__(self, section):
+ return section in self._data
+ def __getitem__(self, section):
+ return self._data.get(section, {})
+ def __iter__(self):
+ for d in self.sections():
+ yield d
+ def update(self, src):
+ for s in src:
+ if s not in self:
+ self._data[s] = sortdict()
+ self._data[s].update(src._data[s])
+ self._source.update(src._source)
+ def get(self, section, item, default=None):
+ return self._data.get(section, {}).get(item, default)
+ def source(self, section, item):
+ return self._source.get((section, item), "")
+ def sections(self):
+ return sorted(self._data.keys())
+ def items(self, section):
+ return self._data.get(section, {}).items()
+ def set(self, section, item, value, source=""):
+ if section not in self:
+ self._data[section] = sortdict()
+ self._data[section][item] = value
+ self._source[(section, item)] = source
+
+ def parse(self, src, data, sections=None, remap=None, include=None):
+ sectionre = re.compile(r'\[([^\[]+)\]')
+ itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
+ contre = re.compile(r'\s+(\S|\S.*\S)\s*$')
+ emptyre = re.compile(r'(;|#|\s*$)')
+ commentre = re.compile(r'(;|#)')
+ unsetre = re.compile(r'%unset\s+(\S+)')
+ includere = re.compile(r'%include\s+(\S|\S.*\S)\s*$')
+ section = ""
+ item = None
+ line = 0
+ cont = False
+
+ for l in data.splitlines(True):
+ line += 1
+ if cont:
+ if commentre.match(l):
+ continue
+ m = contre.match(l)
+ if m:
+ if sections and section not in sections:
+ continue
+ v = self.get(section, item) + "\n" + m.group(1)
+ self.set(section, item, v, "%s:%d" % (src, line))
+ continue
+ item = None
+ cont = False
+ m = includere.match(l)
+ if m:
+ inc = util.expandpath(m.group(1))
+ base = os.path.dirname(src)
+ inc = os.path.normpath(os.path.join(base, inc))
+ if include:
+ try:
+ include(inc, remap=remap, sections=sections)
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise error.ParseError(_("cannot include %s (%s)")
+ % (inc, inst.strerror),
+ "%s:%s" % (src, line))
+ continue
+ if emptyre.match(l):
+ continue
+ m = sectionre.match(l)
+ if m:
+ section = m.group(1)
+ if remap:
+ section = remap.get(section, section)
+ if section not in self:
+ self._data[section] = sortdict()
+ continue
+ m = itemre.match(l)
+ if m:
+ item = m.group(1)
+ cont = True
+ if sections and section not in sections:
+ continue
+ self.set(section, item, m.group(2), "%s:%d" % (src, line))
+ continue
+ m = unsetre.match(l)
+ if m:
+ name = m.group(1)
+ if sections and section not in sections:
+ continue
+ if self.get(section, name) is not None:
+ del self._data[section][name]
+ continue
+
+ raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
+
+ def read(self, path, fp=None, sections=None, remap=None):
+ if not fp:
+ fp = util.posixfile(path)
+ self.parse(path, fp.read(), sections, remap, self.read)
diff --git a/websdk/mercurial/context.py b/websdk/mercurial/context.py
index 8d686b7..58f4385 120000..100644
--- a/websdk/mercurial/context.py
+++ b/websdk/mercurial/context.py
@@ -1 +1,1137 @@
-/usr/share/pyshared/mercurial/context.py \ No newline at end of file
+# context.py - changeset and file context objects for mercurial
+#
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid, nullrev, short, hex
+from i18n import _
+import ancestor, bdiff, error, util, scmutil, subrepo, patch, encoding
+import match as matchmod
+import os, errno, stat
+
+propertycache = util.propertycache
+
+class changectx(object):
+ """A changecontext object makes access to data related to a particular
+ changeset convenient."""
+ def __init__(self, repo, changeid=''):
+ """changeid is a revision number, node, or tag"""
+ if changeid == '':
+ changeid = '.'
+ self._repo = repo
+ if isinstance(changeid, (long, int)):
+ self._rev = changeid
+ self._node = self._repo.changelog.node(changeid)
+ else:
+ self._node = self._repo.lookup(changeid)
+ self._rev = self._repo.changelog.rev(self._node)
+
+ def __str__(self):
+ return short(self.node())
+
+ def __int__(self):
+ return self.rev()
+
+ def __repr__(self):
+ return "<changectx %s>" % str(self)
+
+ def __hash__(self):
+ try:
+ return hash(self._rev)
+ except AttributeError:
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self._rev == other._rev
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __nonzero__(self):
+ return self._rev != nullrev
+
+ @propertycache
+ def _changeset(self):
+ return self._repo.changelog.read(self.node())
+
+ @propertycache
+ def _manifest(self):
+ return self._repo.manifest.read(self._changeset[0])
+
+ @propertycache
+ def _manifestdelta(self):
+ return self._repo.manifest.readdelta(self._changeset[0])
+
+ @propertycache
+ def _parents(self):
+ p = self._repo.changelog.parentrevs(self._rev)
+ if p[1] == nullrev:
+ p = p[:-1]
+ return [changectx(self._repo, x) for x in p]
+
+ @propertycache
+ def substate(self):
+ return subrepo.state(self, self._repo.ui)
+
+ def __contains__(self, key):
+ return key in self._manifest
+
+ def __getitem__(self, key):
+ return self.filectx(key)
+
+ def __iter__(self):
+ for f in sorted(self._manifest):
+ yield f
+
+ def changeset(self):
+ return self._changeset
+ def manifest(self):
+ return self._manifest
+ def manifestnode(self):
+ return self._changeset[0]
+
+ def rev(self):
+ return self._rev
+ def node(self):
+ return self._node
+ def hex(self):
+ return hex(self._node)
+ def user(self):
+ return self._changeset[1]
+ def date(self):
+ return self._changeset[2]
+ def files(self):
+ return self._changeset[3]
+ def description(self):
+ return self._changeset[4]
+ def branch(self):
+ return encoding.tolocal(self._changeset[5].get("branch"))
+ def extra(self):
+ return self._changeset[5]
+ def tags(self):
+ return self._repo.nodetags(self._node)
+ def bookmarks(self):
+ return self._repo.nodebookmarks(self._node)
+ def hidden(self):
+ return self._rev in self._repo.changelog.hiddenrevs
+
+ def parents(self):
+ """return contexts for each parent changeset"""
+ return self._parents
+
+ def p1(self):
+ return self._parents[0]
+
+ def p2(self):
+ if len(self._parents) == 2:
+ return self._parents[1]
+ return changectx(self._repo, -1)
+
+ def children(self):
+ """return contexts for each child changeset"""
+ c = self._repo.changelog.children(self._node)
+ return [changectx(self._repo, x) for x in c]
+
+ def ancestors(self):
+ for a in self._repo.changelog.ancestors(self._rev):
+ yield changectx(self._repo, a)
+
+ def descendants(self):
+ for d in self._repo.changelog.descendants(self._rev):
+ yield changectx(self._repo, d)
+
+ def _fileinfo(self, path):
+ if '_manifest' in self.__dict__:
+ try:
+ return self._manifest[path], self._manifest.flags(path)
+ except KeyError:
+ raise error.LookupError(self._node, path,
+ _('not found in manifest'))
+ if '_manifestdelta' in self.__dict__ or path in self.files():
+ if path in self._manifestdelta:
+ return self._manifestdelta[path], self._manifestdelta.flags(path)
+ node, flag = self._repo.manifest.find(self._changeset[0], path)
+ if not node:
+ raise error.LookupError(self._node, path,
+ _('not found in manifest'))
+
+ return node, flag
+
+ def filenode(self, path):
+ return self._fileinfo(path)[0]
+
+ def flags(self, path):
+ try:
+ return self._fileinfo(path)[1]
+ except error.LookupError:
+ return ''
+
+ def filectx(self, path, fileid=None, filelog=None):
+ """get a file context from this changeset"""
+ if fileid is None:
+ fileid = self.filenode(path)
+ return filectx(self._repo, path, fileid=fileid,
+ changectx=self, filelog=filelog)
+
+ def ancestor(self, c2):
+ """
+ return the ancestor context of self and c2
+ """
+ # deal with workingctxs
+ n2 = c2._node
+ if n2 is None:
+ n2 = c2._parents[0]._node
+ n = self._repo.changelog.ancestor(self._node, n2)
+ return changectx(self._repo, n)
+
+ def walk(self, match):
+ fset = set(match.files())
+ # for dirstate.walk, files=['.'] means "walk the whole tree".
+ # follow that here, too
+ fset.discard('.')
+ for fn in self:
+ for ffn in fset:
+ # match if the file is the exact name or a directory
+ if ffn == fn or fn.startswith("%s/" % ffn):
+ fset.remove(ffn)
+ break
+ if match(fn):
+ yield fn
+ for fn in sorted(fset):
+ if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
+ yield fn
+
+ def sub(self, path):
+ return subrepo.subrepo(self, path)
+
+ def match(self, pats=[], include=None, exclude=None, default='glob'):
+ r = self._repo
+ return matchmod.match(r.root, r.getcwd(), pats,
+ include, exclude, default,
+ auditor=r.auditor, ctx=self)
+
+ def diff(self, ctx2=None, match=None, **opts):
+ """Returns a diff generator for the given contexts and matcher"""
+ if ctx2 is None:
+ ctx2 = self.p1()
+ if ctx2 is not None and not isinstance(ctx2, changectx):
+ ctx2 = self._repo[ctx2]
+ diffopts = patch.diffopts(self._repo.ui, opts)
+ return patch.diff(self._repo, ctx2.node(), self.node(),
+ match=match, opts=diffopts)
+
+class filectx(object):
+ """A filecontext object makes access to data related to a particular
+ filerevision convenient."""
+ def __init__(self, repo, path, changeid=None, fileid=None,
+ filelog=None, changectx=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ self._repo = repo
+ self._path = path
+
+ assert (changeid is not None
+ or fileid is not None
+ or changectx is not None), \
+ ("bad args: changeid=%r, fileid=%r, changectx=%r"
+ % (changeid, fileid, changectx))
+
+ if filelog:
+ self._filelog = filelog
+
+ if changeid is not None:
+ self._changeid = changeid
+ if changectx is not None:
+ self._changectx = changectx
+ if fileid is not None:
+ self._fileid = fileid
+
+ @propertycache
+ def _changectx(self):
+ return changectx(self._repo, self._changeid)
+
+ @propertycache
+ def _filelog(self):
+ return self._repo.file(self._path)
+
+ @propertycache
+ def _changeid(self):
+ if '_changectx' in self.__dict__:
+ return self._changectx.rev()
+ else:
+ return self._filelog.linkrev(self._filerev)
+
+ @propertycache
+ def _filenode(self):
+ if '_fileid' in self.__dict__:
+ return self._filelog.lookup(self._fileid)
+ else:
+ return self._changectx.filenode(self._path)
+
+ @propertycache
+ def _filerev(self):
+ return self._filelog.rev(self._filenode)
+
+ @propertycache
+ def _repopath(self):
+ return self._path
+
+ def __nonzero__(self):
+ try:
+ self._filenode
+ return True
+ except error.LookupError:
+ # file is missing
+ return False
+
+ def __str__(self):
+ return "%s@%s" % (self.path(), short(self.node()))
+
+ def __repr__(self):
+ return "<filectx %s>" % str(self)
+
+ def __hash__(self):
+ try:
+ return hash((self._path, self._filenode))
+ except AttributeError:
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return (self._path == other._path
+ and self._filenode == other._filenode)
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def filectx(self, fileid):
+ '''opens an arbitrary revision of the file without
+ opening a new filelog'''
+ return filectx(self._repo, self._path, fileid=fileid,
+ filelog=self._filelog)
+
+ def filerev(self):
+ return self._filerev
+ def filenode(self):
+ return self._filenode
+ def flags(self):
+ return self._changectx.flags(self._path)
+ def filelog(self):
+ return self._filelog
+
+ def rev(self):
+ if '_changectx' in self.__dict__:
+ return self._changectx.rev()
+ if '_changeid' in self.__dict__:
+ return self._changectx.rev()
+ return self._filelog.linkrev(self._filerev)
+
+ def linkrev(self):
+ return self._filelog.linkrev(self._filerev)
+ def node(self):
+ return self._changectx.node()
+ def hex(self):
+ return hex(self.node())
+ def user(self):
+ return self._changectx.user()
+ def date(self):
+ return self._changectx.date()
+ def files(self):
+ return self._changectx.files()
+ def description(self):
+ return self._changectx.description()
+ def branch(self):
+ return self._changectx.branch()
+ def extra(self):
+ return self._changectx.extra()
+ def manifest(self):
+ return self._changectx.manifest()
+ def changectx(self):
+ return self._changectx
+
+ def data(self):
+ return self._filelog.read(self._filenode)
+ def path(self):
+ return self._path
+ def size(self):
+ return self._filelog.size(self._filerev)
+
+ def cmp(self, fctx):
+ """compare with other file context
+
+ returns True if different than fctx.
+ """
+ if (fctx._filerev is None and self._repo._encodefilterpats
+ or self.size() == fctx.size()):
+ return self._filelog.cmp(self._filenode, fctx.data())
+
+ return True
+
+ def renamed(self):
+ """check if file was actually renamed in this changeset revision
+
+ If rename logged in file revision, we report copy for changeset only
+ if file revisions linkrev points back to the changeset in question
+ or both changeset parents contain different file revisions.
+ """
+
+ renamed = self._filelog.renamed(self._filenode)
+ if not renamed:
+ return renamed
+
+ if self.rev() == self.linkrev():
+ return renamed
+
+ name = self.path()
+ fnode = self._filenode
+ for p in self._changectx.parents():
+ try:
+ if fnode == p.filenode(name):
+ return None
+ except error.LookupError:
+ pass
+ return renamed
+
+ def parents(self):
+ p = self._path
+ fl = self._filelog
+ pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
+
+ r = self._filelog.renamed(self._filenode)
+ if r:
+ pl[0] = (r[0], r[1], None)
+
+ return [filectx(self._repo, p, fileid=n, filelog=l)
+ for p, n, l in pl if n != nullid]
+
+ def p1(self):
+ return self.parents()[0]
+
+ def p2(self):
+ p = self.parents()
+ if len(p) == 2:
+ return p[1]
+ return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
+
+ def children(self):
+ # hard for renames
+ c = self._filelog.children(self._filenode)
+ return [filectx(self._repo, self._path, fileid=x,
+ filelog=self._filelog) for x in c]
+
+ def annotate(self, follow=False, linenumber=None):
+ '''returns a list of tuples of (ctx, line) for each line
+ in the file, where ctx is the filectx of the node where
+ that line was last changed.
+ This returns tuples of ((ctx, linenumber), line) for each line,
+ if "linenumber" parameter is NOT "None".
+ In such tuples, linenumber means one at the first appearance
+ in the managed file.
+ To reduce annotation cost,
+ this returns fixed value(False is used) as linenumber,
+ if "linenumber" parameter is "False".'''
+
+ def decorate_compat(text, rev):
+ return ([rev] * len(text.splitlines()), text)
+
+ def without_linenumber(text, rev):
+ return ([(rev, False)] * len(text.splitlines()), text)
+
+ def with_linenumber(text, rev):
+ size = len(text.splitlines())
+ return ([(rev, i) for i in xrange(1, size + 1)], text)
+
+ decorate = (((linenumber is None) and decorate_compat) or
+ (linenumber and with_linenumber) or
+ without_linenumber)
+
+ def pair(parent, child):
+ for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
+ child[0][b1:b2] = parent[0][a1:a2]
+ return child
+
+ getlog = util.lrucachefunc(lambda x: self._repo.file(x))
+ def getctx(path, fileid):
+ log = path == self._path and self._filelog or getlog(path)
+ return filectx(self._repo, path, fileid=fileid, filelog=log)
+ getctx = util.lrucachefunc(getctx)
+
+ def parents(f):
+ # we want to reuse filectx objects as much as possible
+ p = f._path
+ if f._filerev is None: # working dir
+ pl = [(n.path(), n.filerev()) for n in f.parents()]
+ else:
+ pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
+
+ if follow:
+ r = f.renamed()
+ if r:
+ pl[0] = (r[0], getlog(r[0]).rev(r[1]))
+
+ return [getctx(p, n) for p, n in pl if n != nullrev]
+
+ # use linkrev to find the first changeset where self appeared
+ if self.rev() != self.linkrev():
+ base = self.filectx(self.filerev())
+ else:
+ base = self
+
+ # This algorithm would prefer to be recursive, but Python is a
+ # bit recursion-hostile. Instead we do an iterative
+ # depth-first search.
+
+ visit = [base]
+ hist = {}
+ pcache = {}
+ needed = {base: 1}
+ while visit:
+ f = visit[-1]
+ if f not in pcache:
+ pcache[f] = parents(f)
+
+ ready = True
+ pl = pcache[f]
+ for p in pl:
+ if p not in hist:
+ ready = False
+ visit.append(p)
+ needed[p] = needed.get(p, 0) + 1
+ if ready:
+ visit.pop()
+ curr = decorate(f.data(), f)
+ for p in pl:
+ curr = pair(hist[p], curr)
+ if needed[p] == 1:
+ del hist[p]
+ else:
+ needed[p] -= 1
+
+ hist[f] = curr
+ pcache[f] = []
+
+ return zip(hist[base][0], hist[base][1].splitlines(True))
+
+ def ancestor(self, fc2, actx=None):
+ """
+ find the common ancestor file context, if any, of self, and fc2
+
+ If actx is given, it must be the changectx of the common ancestor
+ of self's and fc2's respective changesets.
+ """
+
+ if actx is None:
+ actx = self.changectx().ancestor(fc2.changectx())
+
+ # the trivial case: changesets are unrelated, files must be too
+ if not actx:
+ return None
+
+ # the easy case: no (relevant) renames
+ if fc2.path() == self.path() and self.path() in actx:
+ return actx[self.path()]
+ acache = {}
+
+ # prime the ancestor cache for the working directory
+ for c in (self, fc2):
+ if c._filerev is None:
+ pl = [(n.path(), n.filenode()) for n in c.parents()]
+ acache[(c._path, None)] = pl
+
+ flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
+ def parents(vertex):
+ if vertex in acache:
+ return acache[vertex]
+ f, n = vertex
+ if f not in flcache:
+ flcache[f] = self._repo.file(f)
+ fl = flcache[f]
+ pl = [(f, p) for p in fl.parents(n) if p != nullid]
+ re = fl.renamed(n)
+ if re:
+ pl.append(re)
+ acache[vertex] = pl
+ return pl
+
+ a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
+ v = ancestor.ancestor(a, b, parents)
+ if v:
+ f, n = v
+ return filectx(self._repo, f, fileid=n, filelog=flcache[f])
+
+ return None
+
+ def ancestors(self):
+ visit = {}
+ c = self
+ while True:
+ for parent in c.parents():
+ visit[(parent.rev(), parent.node())] = parent
+ if not visit:
+ break
+ c = visit.pop(max(visit))
+ yield c
+
+class workingctx(changectx):
+ """A workingctx object makes access to data related to
+ the current working directory convenient.
+ date - any valid date string or (unixtime, offset), or None.
+ user - username string, or None.
+ extra - a dictionary of extra values, or None.
+ changes - a list of file lists as returned by localrepo.status()
+ or None to use the repository status.
+ """
+ def __init__(self, repo, text="", user=None, date=None, extra=None,
+ changes=None):
+ self._repo = repo
+ self._rev = None
+ self._node = None
+ self._text = text
+ if date:
+ self._date = util.parsedate(date)
+ if user:
+ self._user = user
+ if changes:
+ self._status = list(changes[:4])
+ self._unknown = changes[4]
+ self._ignored = changes[5]
+ self._clean = changes[6]
+ else:
+ self._unknown = None
+ self._ignored = None
+ self._clean = None
+
+ self._extra = {}
+ if extra:
+ self._extra = extra.copy()
+ if 'branch' not in self._extra:
+ try:
+ branch = encoding.fromlocal(self._repo.dirstate.branch())
+ except UnicodeDecodeError:
+ raise util.Abort(_('branch name not in UTF-8!'))
+ self._extra['branch'] = branch
+ if self._extra['branch'] == '':
+ self._extra['branch'] = 'default'
+
+ def __str__(self):
+ return str(self._parents[0]) + "+"
+
+ def __repr__(self):
+ return "<workingctx %s>" % str(self)
+
+ def __nonzero__(self):
+ return True
+
+ def __contains__(self, key):
+ return self._repo.dirstate[key] not in "?r"
+
+ def _buildflagfunc(self):
+ # Create a fallback function for getting file flags when the
+ # filesystem doesn't support them
+
+ copiesget = self._repo.dirstate.copies().get
+
+ if len(self._parents) < 2:
+ # when we have one parent, it's easy: copy from parent
+ man = self._parents[0].manifest()
+ def func(f):
+ f = copiesget(f, f)
+ return man.flags(f)
+ else:
+ # merges are tricky: we try to reconstruct the unstored
+ # result from the merge (issue1802)
+ p1, p2 = self._parents
+ pa = p1.ancestor(p2)
+ m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
+
+ def func(f):
+ f = copiesget(f, f) # may be wrong for merges with copies
+ fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
+ if fl1 == fl2:
+ return fl1
+ if fl1 == fla:
+ return fl2
+ if fl2 == fla:
+ return fl1
+ return '' # punt for conflicts
+
+ return func
+
+ @propertycache
+ def _flagfunc(self):
+ return self._repo.dirstate.flagfunc(self._buildflagfunc)
+
+ @propertycache
+ def _manifest(self):
+ """generate a manifest corresponding to the working directory"""
+
+ if self._unknown is None:
+ self.status(unknown=True)
+
+ man = self._parents[0].manifest().copy()
+ if len(self._parents) > 1:
+ man2 = self.p2().manifest()
+ def getman(f):
+ if f in man:
+ return man
+ return man2
+ else:
+ getman = lambda f: man
+
+ copied = self._repo.dirstate.copies()
+ ff = self._flagfunc
+ modified, added, removed, deleted = self._status
+ unknown = self._unknown
+ for i, l in (("a", added), ("m", modified), ("u", unknown)):
+ for f in l:
+ orig = copied.get(f, f)
+ man[f] = getman(orig).get(orig, nullid) + i
+ try:
+ man.set(f, ff(f))
+ except OSError:
+ pass
+
+ for f in deleted + removed:
+ if f in man:
+ del man[f]
+
+ return man
+
+ def __iter__(self):
+ d = self._repo.dirstate
+ for f in d:
+ if d[f] != 'r':
+ yield f
+
+ @propertycache
+ def _status(self):
+ return self._repo.status()[:4]
+
+ @propertycache
+ def _user(self):
+ return self._repo.ui.username()
+
+ @propertycache
+ def _date(self):
+ return util.makedate()
+
+ @propertycache
+ def _parents(self):
+ p = self._repo.dirstate.parents()
+ if p[1] == nullid:
+ p = p[:-1]
+ self._parents = [changectx(self._repo, x) for x in p]
+ return self._parents
+
+ def status(self, ignored=False, clean=False, unknown=False):
+ """Explicit status query
+ Unless this method is used to query the working copy status, the
+ _status property will implicitly read the status using its default
+ arguments."""
+ stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
+ self._unknown = self._ignored = self._clean = None
+ if unknown:
+ self._unknown = stat[4]
+ if ignored:
+ self._ignored = stat[5]
+ if clean:
+ self._clean = stat[6]
+ self._status = stat[:4]
+ return stat
+
+ def manifest(self):
+ return self._manifest
+ def user(self):
+ return self._user or self._repo.ui.username()
+ def date(self):
+ return self._date
+ def description(self):
+ return self._text
+ def files(self):
+ return sorted(self._status[0] + self._status[1] + self._status[2])
+
+ def modified(self):
+ return self._status[0]
+ def added(self):
+ return self._status[1]
+ def removed(self):
+ return self._status[2]
+ def deleted(self):
+ return self._status[3]
+ def unknown(self):
+ assert self._unknown is not None # must call status first
+ return self._unknown
+ def ignored(self):
+ assert self._ignored is not None # must call status first
+ return self._ignored
+ def clean(self):
+ assert self._clean is not None # must call status first
+ return self._clean
+ def branch(self):
+ return encoding.tolocal(self._extra['branch'])
+ def extra(self):
+ return self._extra
+
+ def tags(self):
+ t = []
+ for p in self.parents():
+ t.extend(p.tags())
+ return t
+
+ def bookmarks(self):
+ b = []
+ for p in self.parents():
+ b.extend(p.bookmarks())
+ return b
+
+ def children(self):
+ return []
+
+ def flags(self, path):
+ if '_manifest' in self.__dict__:
+ try:
+ return self._manifest.flags(path)
+ except KeyError:
+ return ''
+
+ try:
+ return self._flagfunc(path)
+ except OSError:
+ return ''
+
+ def filectx(self, path, filelog=None):
+ """get a file context from the working directory"""
+ return workingfilectx(self._repo, path, workingctx=self,
+ filelog=filelog)
+
+ def ancestor(self, c2):
+ """return the ancestor context of self and c2"""
+ return self._parents[0].ancestor(c2) # punt on two parents for now
+
+ def walk(self, match):
+ return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
+ True, False))
+
+ def dirty(self, missing=False):
+ "check whether a working directory is modified"
+ # check subrepos first
+ for s in self.substate:
+ if self.sub(s).dirty():
+ return True
+ # check current working dir
+ return (self.p2() or self.branch() != self.p1().branch() or
+ self.modified() or self.added() or self.removed() or
+ (missing and self.deleted()))
+
+ def add(self, list, prefix=""):
+ join = lambda f: os.path.join(prefix, f)
+ wlock = self._repo.wlock()
+ ui, ds = self._repo.ui, self._repo.dirstate
+ try:
+ rejected = []
+ for f in list:
+ scmutil.checkportable(ui, join(f))
+ p = self._repo.wjoin(f)
+ try:
+ st = os.lstat(p)
+ except OSError:
+ ui.warn(_("%s does not exist!\n") % join(f))
+ rejected.append(f)
+ continue
+ if st.st_size > 10000000:
+ ui.warn(_("%s: up to %d MB of RAM may be required "
+ "to manage this file\n"
+ "(use 'hg revert %s' to cancel the "
+ "pending addition)\n")
+ % (f, 3 * st.st_size // 1000000, join(f)))
+ if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
+ ui.warn(_("%s not added: only files and symlinks "
+ "supported currently\n") % join(f))
+ rejected.append(p)
+ elif ds[f] in 'amn':
+ ui.warn(_("%s already tracked!\n") % join(f))
+ elif ds[f] == 'r':
+ ds.normallookup(f)
+ else:
+ ds.add(f)
+ return rejected
+ finally:
+ wlock.release()
+
+ def forget(self, files):
+ wlock = self._repo.wlock()
+ try:
+ for f in files:
+ if self._repo.dirstate[f] != 'a':
+ self._repo.dirstate.remove(f)
+ elif f not in self._repo.dirstate:
+ self._repo.ui.warn(_("%s not tracked!\n") % f)
+ else:
+ self._repo.dirstate.drop(f)
+ finally:
+ wlock.release()
+
+ def ancestors(self):
+ for a in self._repo.changelog.ancestors(
+ *[p.rev() for p in self._parents]):
+ yield changectx(self._repo, a)
+
+ def undelete(self, list):
+ pctxs = self.parents()
+ wlock = self._repo.wlock()
+ try:
+ for f in list:
+ if self._repo.dirstate[f] != 'r':
+ self._repo.ui.warn(_("%s not removed!\n") % f)
+ else:
+ fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
+ t = fctx.data()
+ self._repo.wwrite(f, t, fctx.flags())
+ self._repo.dirstate.normal(f)
+ finally:
+ wlock.release()
+
+ def copy(self, source, dest):
+ p = self._repo.wjoin(dest)
+ if not os.path.lexists(p):
+ self._repo.ui.warn(_("%s does not exist!\n") % dest)
+ elif not (os.path.isfile(p) or os.path.islink(p)):
+ self._repo.ui.warn(_("copy failed: %s is not a file or a "
+ "symbolic link\n") % dest)
+ else:
+ wlock = self._repo.wlock()
+ try:
+ if self._repo.dirstate[dest] in '?r':
+ self._repo.dirstate.add(dest)
+ self._repo.dirstate.copy(source, dest)
+ finally:
+ wlock.release()
+
+class workingfilectx(filectx):
+ """A workingfilectx object makes access to data related to a particular
+ file in the working directory convenient."""
+ def __init__(self, repo, path, filelog=None, workingctx=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ self._repo = repo
+ self._path = path
+ self._changeid = None
+ self._filerev = self._filenode = None
+
+ if filelog:
+ self._filelog = filelog
+ if workingctx:
+ self._changectx = workingctx
+
+ @propertycache
+ def _changectx(self):
+ return workingctx(self._repo)
+
+ def __nonzero__(self):
+ return True
+
+ def __str__(self):
+ return "%s@%s" % (self.path(), self._changectx)
+
+ def __repr__(self):
+ return "<workingfilectx %s>" % str(self)
+
+ def data(self):
+ return self._repo.wread(self._path)
+ def renamed(self):
+ rp = self._repo.dirstate.copied(self._path)
+ if not rp:
+ return None
+ return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
+
+ def parents(self):
+ '''return parent filectxs, following copies if necessary'''
+ def filenode(ctx, path):
+ return ctx._manifest.get(path, nullid)
+
+ path = self._path
+ fl = self._filelog
+ pcl = self._changectx._parents
+ renamed = self.renamed()
+
+ if renamed:
+ pl = [renamed + (None,)]
+ else:
+ pl = [(path, filenode(pcl[0], path), fl)]
+
+ for pc in pcl[1:]:
+ pl.append((path, filenode(pc, path), fl))
+
+ return [filectx(self._repo, p, fileid=n, filelog=l)
+ for p, n, l in pl if n != nullid]
+
+ def children(self):
+ return []
+
+ def size(self):
+ return os.lstat(self._repo.wjoin(self._path)).st_size
+ def date(self):
+ t, tz = self._changectx.date()
+ try:
+ return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
+ except OSError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return (t, tz)
+
+ def cmp(self, fctx):
+ """compare with other file context
+
+ returns True if different than fctx.
+ """
+ # fctx should be a filectx (not a wfctx)
+ # invert comparison to reuse the same code path
+ return fctx.cmp(self)
+
+class memctx(object):
+ """Use memctx to perform in-memory commits via localrepo.commitctx().
+
+ Revision information is supplied at initialization time while
+ related files data and is made available through a callback
+ mechanism. 'repo' is the current localrepo, 'parents' is a
+ sequence of two parent revisions identifiers (pass None for every
+ missing parent), 'text' is the commit message and 'files' lists
+ names of files touched by the revision (normalized and relative to
+ repository root).
+
+ filectxfn(repo, memctx, path) is a callable receiving the
+ repository, the current memctx object and the normalized path of
+ requested file, relative to repository root. It is fired by the
+ commit function for every file in 'files', but calls order is
+ undefined. If the file is available in the revision being
+ committed (updated or added), filectxfn returns a memfilectx
+ object. If the file was removed, filectxfn raises an
+ IOError. Moved files are represented by marking the source file
+ removed and the new file added with copy information (see
+ memfilectx).
+
+ user receives the committer name and defaults to current
+ repository username, date is the commit date in any format
+ supported by util.parsedate() and defaults to current date, extra
+ is a dictionary of metadata or is left empty.
+ """
+ def __init__(self, repo, parents, text, files, filectxfn, user=None,
+ date=None, extra=None):
+ self._repo = repo
+ self._rev = None
+ self._node = None
+ self._text = text
+ self._date = date and util.parsedate(date) or util.makedate()
+ self._user = user
+ parents = [(p or nullid) for p in parents]
+ p1, p2 = parents
+ self._parents = [changectx(self._repo, p) for p in (p1, p2)]
+ files = sorted(set(files))
+ self._status = [files, [], [], [], []]
+ self._filectxfn = filectxfn
+
+ self._extra = extra and extra.copy() or {}
+ if self._extra.get('branch', '') == '':
+ self._extra['branch'] = 'default'
+
+ def __str__(self):
+ return str(self._parents[0]) + "+"
+
+ def __int__(self):
+ return self._rev
+
+ def __nonzero__(self):
+ return True
+
+ def __getitem__(self, key):
+ return self.filectx(key)
+
+ def p1(self):
+ return self._parents[0]
+ def p2(self):
+ return self._parents[1]
+
+ def user(self):
+ return self._user or self._repo.ui.username()
+ def date(self):
+ return self._date
+ def description(self):
+ return self._text
+ def files(self):
+ return self.modified()
+ def modified(self):
+ return self._status[0]
+ def added(self):
+ return self._status[1]
+ def removed(self):
+ return self._status[2]
+ def deleted(self):
+ return self._status[3]
+ def unknown(self):
+ return self._status[4]
+ def ignored(self):
+ return self._status[5]
+ def clean(self):
+ return self._status[6]
+ def branch(self):
+ return encoding.tolocal(self._extra['branch'])
+ def extra(self):
+ return self._extra
+ def flags(self, f):
+ return self[f].flags()
+
+ def parents(self):
+ """return contexts for each parent changeset"""
+ return self._parents
+
+ def filectx(self, path, filelog=None):
+ """get a file context from the working directory"""
+ return self._filectxfn(self._repo, self, path)
+
+ def commit(self):
+ """commit context to the repo"""
+ return self._repo.commitctx(self)
+
+class memfilectx(object):
+ """memfilectx represents an in-memory file to commit.
+
+ See memctx for more details.
+ """
+ def __init__(self, path, data, islink=False, isexec=False, copied=None):
+ """
+ path is the normalized file path relative to repository root.
+ data is the file content as a string.
+ islink is True if the file is a symbolic link.
+ isexec is True if the file is executable.
+ copied is the source file path if current file was copied in the
+ revision being committed, or None."""
+ self._path = path
+ self._data = data
+ self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
+ self._copied = None
+ if copied:
+ self._copied = (copied, nullid)
+
+ def __nonzero__(self):
+ return True
+ def __str__(self):
+ return "%s@%s" % (self.path(), self._changectx)
+ def path(self):
+ return self._path
+ def data(self):
+ return self._data
+ def flags(self):
+ return self._flags
+ def isexec(self):
+ return 'x' in self._flags
+ def islink(self):
+ return 'l' in self._flags
+ def renamed(self):
+ return self._copied
diff --git a/websdk/mercurial/copies.py b/websdk/mercurial/copies.py
index 4da6846..abd16fa 120000..100644
--- a/websdk/mercurial/copies.py
+++ b/websdk/mercurial/copies.py
@@ -1 +1,267 @@
-/usr/share/pyshared/mercurial/copies.py \ No newline at end of file
+# copies.py - copy detection for Mercurial
+#
+# Copyright 2008 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import util
+import heapq
+
+def _nonoverlap(d1, d2, d3):
+ "Return list of elements in d1 not in d2 or d3"
+ return sorted([d for d in d1 if d not in d3 and d not in d2])
+
+def _dirname(f):
+ s = f.rfind("/")
+ if s == -1:
+ return ""
+ return f[:s]
+
+def _dirs(files):
+ d = set()
+ for f in files:
+ f = _dirname(f)
+ while f not in d:
+ d.add(f)
+ f = _dirname(f)
+ return d
+
+def _findlimit(repo, a, b):
+ """Find the earliest revision that's an ancestor of a or b but not both,
+ None if no such revision exists.
+ """
+ # basic idea:
+ # - mark a and b with different sides
+ # - if a parent's children are all on the same side, the parent is
+ # on that side, otherwise it is on no side
+ # - walk the graph in topological order with the help of a heap;
+ # - add unseen parents to side map
+ # - clear side of any parent that has children on different sides
+ # - track number of interesting revs that might still be on a side
+ # - track the lowest interesting rev seen
+ # - quit when interesting revs is zero
+
+ cl = repo.changelog
+ working = len(cl) # pseudo rev for the working directory
+ if a is None:
+ a = working
+ if b is None:
+ b = working
+
+ side = {a: -1, b: 1}
+ visit = [-a, -b]
+ heapq.heapify(visit)
+ interesting = len(visit)
+ hascommonancestor = False
+ limit = working
+
+ while interesting:
+ r = -heapq.heappop(visit)
+ if r == working:
+ parents = [cl.rev(p) for p in repo.dirstate.parents()]
+ else:
+ parents = cl.parentrevs(r)
+ for p in parents:
+ if p < 0:
+ continue
+ if p not in side:
+ # first time we see p; add it to visit
+ side[p] = side[r]
+ if side[p]:
+ interesting += 1
+ heapq.heappush(visit, -p)
+ elif side[p] and side[p] != side[r]:
+ # p was interesting but now we know better
+ side[p] = 0
+ interesting -= 1
+ hascommonancestor = True
+ if side[r]:
+ limit = r # lowest rev visited
+ interesting -= 1
+
+ if not hascommonancestor:
+ return None
+ return limit
+
+def copies(repo, c1, c2, ca, checkdirs=False):
+ """
+ Find moves and copies between context c1 and c2
+ """
+ # avoid silly behavior for update from empty dir
+ if not c1 or not c2 or c1 == c2:
+ return {}, {}
+
+ # avoid silly behavior for parent -> working dir
+ if c2.node() is None and c1.node() == repo.dirstate.p1():
+ return repo.dirstate.copies(), {}
+
+ limit = _findlimit(repo, c1.rev(), c2.rev())
+ if limit is None:
+ # no common ancestor, no copies
+ return {}, {}
+ m1 = c1.manifest()
+ m2 = c2.manifest()
+ ma = ca.manifest()
+
+ def makectx(f, n):
+ if len(n) != 20: # in a working context?
+ if c1.rev() is None:
+ return c1.filectx(f)
+ return c2.filectx(f)
+ return repo.filectx(f, fileid=n)
+
+ ctx = util.lrucachefunc(makectx)
+ copy = {}
+ fullcopy = {}
+ diverge = {}
+
+ def related(f1, f2, limit):
+ # Walk back to common ancestor to see if the two files originate
+ # from the same file. Since workingfilectx's rev() is None it messes
+ # up the integer comparison logic, hence the pre-step check for
+ # None (f1 and f2 can only be workingfilectx's initially).
+
+ if f1 == f2:
+ return f1 # a match
+
+ g1, g2 = f1.ancestors(), f2.ancestors()
+ try:
+ f1r, f2r = f1.rev(), f2.rev()
+
+ if f1r is None:
+ f1 = g1.next()
+ if f2r is None:
+ f2 = g2.next()
+
+ while True:
+ f1r, f2r = f1.rev(), f2.rev()
+ if f1r > f2r:
+ f1 = g1.next()
+ elif f2r > f1r:
+ f2 = g2.next()
+ elif f1 == f2:
+ return f1 # a match
+ elif f1r == f2r or f1r < limit or f2r < limit:
+ return False # copy no longer relevant
+ except StopIteration:
+ return False
+
+ def checkcopies(f, m1, m2):
+ '''check possible copies of f from m1 to m2'''
+ of = None
+ seen = set([f])
+ for oc in ctx(f, m1[f]).ancestors():
+ ocr = oc.rev()
+ of = oc.path()
+ if of in seen:
+ # check limit late - grab last rename before
+ if ocr < limit:
+ break
+ continue
+ seen.add(of)
+
+ fullcopy[f] = of # remember for dir rename detection
+ if of not in m2:
+ continue # no match, keep looking
+ if m2[of] == ma.get(of):
+ break # no merge needed, quit early
+ c2 = ctx(of, m2[of])
+ cr = related(oc, c2, ca.rev())
+ if cr and (of == f or of == c2.path()): # non-divergent
+ copy[f] = of
+ of = None
+ break
+
+ if of in ma:
+ diverge.setdefault(of, []).append(f)
+
+ repo.ui.debug(" searching for copies back to rev %d\n" % limit)
+
+ u1 = _nonoverlap(m1, m2, ma)
+ u2 = _nonoverlap(m2, m1, ma)
+
+ if u1:
+ repo.ui.debug(" unmatched files in local:\n %s\n"
+ % "\n ".join(u1))
+ if u2:
+ repo.ui.debug(" unmatched files in other:\n %s\n"
+ % "\n ".join(u2))
+
+ for f in u1:
+ checkcopies(f, m1, m2)
+ for f in u2:
+ checkcopies(f, m2, m1)
+
+ diverge2 = set()
+ for of, fl in diverge.items():
+ if len(fl) == 1 or of in c2:
+ del diverge[of] # not actually divergent, or not a rename
+ else:
+ diverge2.update(fl) # reverse map for below
+
+ if fullcopy:
+ repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
+ for f in fullcopy:
+ note = ""
+ if f in copy:
+ note += "*"
+ if f in diverge2:
+ note += "!"
+ repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
+ del diverge2
+
+ if not fullcopy or not checkdirs:
+ return copy, diverge
+
+ repo.ui.debug(" checking for directory renames\n")
+
+ # generate a directory move map
+ d1, d2 = _dirs(m1), _dirs(m2)
+ invalid = set()
+ dirmove = {}
+
+ # examine each file copy for a potential directory move, which is
+ # when all the files in a directory are moved to a new directory
+ for dst, src in fullcopy.iteritems():
+ dsrc, ddst = _dirname(src), _dirname(dst)
+ if dsrc in invalid:
+ # already seen to be uninteresting
+ continue
+ elif dsrc in d1 and ddst in d1:
+ # directory wasn't entirely moved locally
+ invalid.add(dsrc)
+ elif dsrc in d2 and ddst in d2:
+ # directory wasn't entirely moved remotely
+ invalid.add(dsrc)
+ elif dsrc in dirmove and dirmove[dsrc] != ddst:
+ # files from the same directory moved to two different places
+ invalid.add(dsrc)
+ else:
+ # looks good so far
+ dirmove[dsrc + "/"] = ddst + "/"
+
+ for i in invalid:
+ if i in dirmove:
+ del dirmove[i]
+ del d1, d2, invalid
+
+ if not dirmove:
+ return copy, diverge
+
+ for d in dirmove:
+ repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
+
+ # check unaccounted nonoverlapping files against directory moves
+ for f in u1 + u2:
+ if f not in fullcopy:
+ for d in dirmove:
+ if f.startswith(d):
+ # new file added in a directory that was moved, move it
+ df = dirmove[d] + f[len(d):]
+ if df not in copy:
+ copy[f] = df
+ repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
+ break
+
+ return copy, diverge
diff --git a/websdk/mercurial/dagparser.py b/websdk/mercurial/dagparser.py
index f94cbde..e02faa5 120000..100644
--- a/websdk/mercurial/dagparser.py
+++ b/websdk/mercurial/dagparser.py
@@ -1 +1,474 @@
-/usr/share/pyshared/mercurial/dagparser.py \ No newline at end of file
+# dagparser.py - parser and generator for concise description of DAGs
+#
+# Copyright 2010 Peter Arrenbrecht <peter@arrenbrecht.ch>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import re, string
+import util
+from i18n import _
+
+def parsedag(desc):
+ '''parses a DAG from a concise textual description; generates events
+
+ "+n" is a linear run of n nodes based on the current default parent
+ "." is a single node based on the current default parent
+ "$" resets the default parent to -1 (implied at the start);
+ otherwise the default parent is always the last node created
+ "<p" sets the default parent to the backref p
+ "*p" is a fork at parent p, where p is a backref
+ "*p1/p2/.../pn" is a merge of parents p1..pn, where the pi are backrefs
+ "/p2/.../pn" is a merge of the preceding node and p2..pn
+ ":name" defines a label for the preceding node; labels can be redefined
+ "@text" emits an annotation event for text
+ "!command" emits an action event for the current node
+ "!!my command\n" is like "!", but to the end of the line
+ "#...\n" is a comment up to the end of the line
+
+ Whitespace between the above elements is ignored.
+
+ A backref is either
+ * a number n, which references the node curr-n, where curr is the current
+ node, or
+ * the name of a label you placed earlier using ":name", or
+ * empty to denote the default parent.
+
+ All string valued-elements are either strictly alphanumeric, or must
+ be enclosed in double quotes ("..."), with "\" as escape character.
+
+ Generates sequence of
+
+ ('n', (id, [parentids])) for node creation
+ ('l', (id, labelname)) for labels on nodes
+ ('a', text) for annotations
+ ('c', command) for actions (!)
+ ('C', command) for line actions (!!)
+
+ Examples
+ --------
+
+ Example of a complex graph (output not shown for brevity):
+
+ >>> len(list(parsedag("""
+ ...
+ ... +3 # 3 nodes in linear run
+ ... :forkhere # a label for the last of the 3 nodes from above
+ ... +5 # 5 more nodes on one branch
+ ... :mergethis # label again
+ ... <forkhere # set default parent to labelled fork node
+ ... +10 # 10 more nodes on a parallel branch
+ ... @stable # following nodes will be annotated as "stable"
+ ... +5 # 5 nodes in stable
+ ... !addfile # custom command; could trigger new file in next node
+ ... +2 # two more nodes
+ ... /mergethis # merge last node with labelled node
+ ... +4 # 4 more nodes descending from merge node
+ ...
+ ... """)))
+ 34
+
+ Empty list:
+
+ >>> list(parsedag(""))
+ []
+
+ A simple linear run:
+
+ >>> list(parsedag("+3"))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
+
+ Some non-standard ways to define such runs:
+
+ >>> list(parsedag("+1+2"))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
+
+ >>> list(parsedag("+1*1*"))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
+
+ >>> list(parsedag("*"))
+ [('n', (0, [-1]))]
+
+ >>> list(parsedag("..."))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
+
+ A fork and a join, using numeric back references:
+
+ >>> list(parsedag("+2*2*/2"))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
+
+ >>> list(parsedag("+2<2+1/2"))
+ [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
+
+ Placing a label:
+
+ >>> list(parsedag("+1 :mylabel +1"))
+ [('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))]
+
+ An empty label (silly, really):
+
+ >>> list(parsedag("+1:+1"))
+ [('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))]
+
+ Fork and join, but with labels instead of numeric back references:
+
+ >>> list(parsedag("+1:f +1:p2 *f */p2"))
+ [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
+ ('n', (2, [0])), ('n', (3, [2, 1]))]
+
+ >>> list(parsedag("+1:f +1:p2 <f +1 /p2"))
+ [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
+ ('n', (2, [0])), ('n', (3, [2, 1]))]
+
+ Restarting from the root:
+
+ >>> list(parsedag("+1 $ +1"))
+ [('n', (0, [-1])), ('n', (1, [-1]))]
+
+ Annotations, which are meant to introduce sticky state for subsequent nodes:
+
+ >>> list(parsedag("+1 @ann +1"))
+ [('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]
+
+ >>> list(parsedag('+1 @"my annotation" +1'))
+ [('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]
+
+ Commands, which are meant to operate on the most recently created node:
+
+ >>> list(parsedag("+1 !cmd +1"))
+ [('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]
+
+ >>> list(parsedag('+1 !"my command" +1'))
+ [('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]
+
+ >>> list(parsedag('+1 !!my command line\\n +1'))
+ [('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]
+
+ Comments, which extend to the end of the line:
+
+ >>> list(parsedag('+1 # comment\\n+1'))
+ [('n', (0, [-1])), ('n', (1, [0]))]
+
+ Error:
+
+ >>> try: list(parsedag('+1 bad'))
+ ... except Exception, e: print e
+ invalid character in dag description: bad...
+
+ '''
+ if not desc:
+ return
+
+ wordchars = string.ascii_letters + string.digits
+
+ labels = {}
+ p1 = -1
+ r = 0
+
+ def resolve(ref):
+ if not ref:
+ return p1
+ elif ref[0] in string.digits:
+ return r - int(ref)
+ else:
+ return labels[ref]
+
+ chiter = (c for c in desc)
+
+ def nextch():
+ try:
+ return chiter.next()
+ except StopIteration:
+ return '\0'
+
+ def nextrun(c, allow):
+ s = ''
+ while c in allow:
+ s += c
+ c = nextch()
+ return c, s
+
+ def nextdelimited(c, limit, escape):
+ s = ''
+ while c != limit:
+ if c == escape:
+ c = nextch()
+ s += c
+ c = nextch()
+ return nextch(), s
+
+ def nextstring(c):
+ if c == '"':
+ return nextdelimited(nextch(), '"', '\\')
+ else:
+ return nextrun(c, wordchars)
+
+ c = nextch()
+ while c != '\0':
+ while c in string.whitespace:
+ c = nextch()
+ if c == '.':
+ yield 'n', (r, [p1])
+ p1 = r
+ r += 1
+ c = nextch()
+ elif c == '+':
+ c, digs = nextrun(nextch(), string.digits)
+ n = int(digs)
+ for i in xrange(0, n):
+ yield 'n', (r, [p1])
+ p1 = r
+ r += 1
+ elif c in '*/':
+ if c == '*':
+ c = nextch()
+ c, pref = nextstring(c)
+ prefs = [pref]
+ while c == '/':
+ c, pref = nextstring(nextch())
+ prefs.append(pref)
+ ps = [resolve(ref) for ref in prefs]
+ yield 'n', (r, ps)
+ p1 = r
+ r += 1
+ elif c == '<':
+ c, ref = nextstring(nextch())
+ p1 = resolve(ref)
+ elif c == ':':
+ c, name = nextstring(nextch())
+ labels[name] = p1
+ yield 'l', (p1, name)
+ elif c == '@':
+ c, text = nextstring(nextch())
+ yield 'a', text
+ elif c == '!':
+ c = nextch()
+ if c == '!':
+ cmd = ''
+ c = nextch()
+ while c not in '\n\r\0':
+ cmd += c
+ c = nextch()
+ yield 'C', cmd
+ else:
+ c, cmd = nextstring(c)
+ yield 'c', cmd
+ elif c == '#':
+ while c not in '\n\r\0':
+ c = nextch()
+ elif c == '$':
+ p1 = -1
+ c = nextch()
+ elif c == '\0':
+ return # in case it was preceded by whitespace
+ else:
+ s = ''
+ i = 0
+ while c != '\0' and i < 10:
+ s += c
+ i += 1
+ c = nextch()
+ raise util.Abort(_("invalid character in dag description: %s...") % s)
+
+def dagtextlines(events,
+ addspaces=True,
+ wraplabels=False,
+ wrapannotations=False,
+ wrapcommands=False,
+ wrapnonlinear=False,
+ usedots=False,
+ maxlinewidth=70):
+ '''generates single lines for dagtext()'''
+
+ def wrapstring(text):
+ if re.match("^[0-9a-z]*$", text):
+ return text
+ return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"'
+
+ def gen():
+ labels = {}
+ run = 0
+ wantr = 0
+ needroot = False
+ for kind, data in events:
+ if kind == 'n':
+ r, ps = data
+
+ # sanity check
+ if r != wantr:
+ raise util.Abort(_("expected id %i, got %i") % (wantr, r))
+ if not ps:
+ ps = [-1]
+ else:
+ for p in ps:
+ if p >= r:
+ raise util.Abort(_("parent id %i is larger than "
+ "current id %i") % (p, r))
+ wantr += 1
+
+ # new root?
+ p1 = r - 1
+ if len(ps) == 1 and ps[0] == -1:
+ if needroot:
+ if run:
+ yield '+' + str(run)
+ run = 0
+ if wrapnonlinear:
+ yield '\n'
+ yield '$'
+ p1 = -1
+ else:
+ needroot = True
+ if len(ps) == 1 and ps[0] == p1:
+ if usedots:
+ yield "."
+ else:
+ run += 1
+ else:
+ if run:
+ yield '+' + str(run)
+ run = 0
+ if wrapnonlinear:
+ yield '\n'
+ prefs = []
+ for p in ps:
+ if p == p1:
+ prefs.append('')
+ elif p in labels:
+ prefs.append(labels[p])
+ else:
+ prefs.append(str(r - p))
+ yield '*' + '/'.join(prefs)
+ else:
+ if run:
+ yield '+' + str(run)
+ run = 0
+ if kind == 'l':
+ rid, name = data
+ labels[rid] = name
+ yield ':' + name
+ if wraplabels:
+ yield '\n'
+ elif kind == 'c':
+ yield '!' + wrapstring(data)
+ if wrapcommands:
+ yield '\n'
+ elif kind == 'C':
+ yield '!!' + data
+ yield '\n'
+ elif kind == 'a':
+ if wrapannotations:
+ yield '\n'
+ yield '@' + wrapstring(data)
+ elif kind == '#':
+ yield '#' + data
+ yield '\n'
+ else:
+ raise util.Abort(_("invalid event type in dag: %s")
+ % str((type, data)))
+ if run:
+ yield '+' + str(run)
+
+ line = ''
+ for part in gen():
+ if part == '\n':
+ if line:
+ yield line
+ line = ''
+ else:
+ if len(line) + len(part) >= maxlinewidth:
+ yield line
+ line = ''
+ elif addspaces and line and part != '.':
+ line += ' '
+ line += part
+ if line:
+ yield line
+
+def dagtext(dag,
+ addspaces=True,
+ wraplabels=False,
+ wrapannotations=False,
+ wrapcommands=False,
+ wrapnonlinear=False,
+ usedots=False,
+ maxlinewidth=70):
+ '''generates lines of a textual representation for a dag event stream
+
+ events should generate what parsedag() does, so:
+
+ ('n', (id, [parentids])) for node creation
+ ('l', (id, labelname)) for labels on nodes
+ ('a', text) for annotations
+ ('c', text) for commands
+ ('C', text) for line commands ('!!')
+ ('#', text) for comment lines
+
+ Parent nodes must come before child nodes.
+
+ Examples
+ --------
+
+ Linear run:
+
+ >>> dagtext([('n', (0, [-1])), ('n', (1, [0]))])
+ '+2'
+
+ Two roots:
+
+ >>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))])
+ '+1 $ +1'
+
+ Fork and join:
+
+ >>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])),
+ ... ('n', (3, [2, 1]))])
+ '+2 *2 */2'
+
+ Fork and join with labels:
+
+ >>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])),
+ ... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))])
+ '+1 :f +1 :p2 *f */p2'
+
+ Annotations:
+
+ >>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))])
+ '+1 @ann +1'
+
+ >>> dagtext([('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))])
+ '+1 @"my annotation" +1'
+
+ Commands:
+
+ >>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))])
+ '+1 !cmd +1'
+
+ >>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))])
+ '+1 !"my command" +1'
+
+ >>> dagtext([('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))])
+ '+1 !!my command line\\n+1'
+
+ Comments:
+
+ >>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))])
+ '+1 # comment\\n+1'
+
+ >>> dagtext([])
+ ''
+
+ Combining parsedag and dagtext:
+
+ >>> dagtext(parsedag('+1 :f +1 :p2 *f */p2'))
+ '+1 :f +1 :p2 *f */p2'
+
+ '''
+ return "\n".join(dagtextlines(dag,
+ addspaces,
+ wraplabels,
+ wrapannotations,
+ wrapcommands,
+ wrapnonlinear,
+ usedots,
+ maxlinewidth))
diff --git a/websdk/mercurial/dagutil.py b/websdk/mercurial/dagutil.py
new file mode 100644
index 0000000..7908e63
--- /dev/null
+++ b/websdk/mercurial/dagutil.py
@@ -0,0 +1,277 @@
+# dagutil.py - dag utilities for mercurial
+#
+# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
+# and Peter Arrenbrecht <peter@arrenbrecht.ch>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullrev
+from i18n import _
+
+
+class basedag(object):
+ '''generic interface for DAGs
+
+ terms:
+ "ix" (short for index) identifies a nodes internally,
+ "id" identifies one externally.
+
+ All params are ixs unless explicitly suffixed otherwise.
+ Pluralized params are lists or sets.
+ '''
+
+ def __init__(self):
+ self._inverse = None
+
+ def nodeset(self):
+ '''set of all node idxs'''
+ raise NotImplementedError()
+
+ def heads(self):
+ '''list of head ixs'''
+ raise NotImplementedError()
+
+ def parents(self, ix):
+ '''list of parents ixs of ix'''
+ raise NotImplementedError()
+
+ def inverse(self):
+ '''inverse DAG, where parents becomes children, etc.'''
+ raise NotImplementedError()
+
+ def ancestorset(self, starts, stops=None):
+ '''
+ set of all ancestors of starts (incl), but stop walk at stops (excl)
+ '''
+ raise NotImplementedError()
+
+ def descendantset(self, starts, stops=None):
+ '''
+ set of all descendants of starts (incl), but stop walk at stops (excl)
+ '''
+ return self.inverse().ancestorset(starts, stops)
+
+ def headsetofconnecteds(self, ixs):
+ '''
+ subset of connected list of ixs so that no node has a descendant in it
+
+ By "connected list" we mean that if an ancestor and a descendant are in
+ the list, then so is at least one path connecting them.
+ '''
+ raise NotImplementedError()
+
+ def externalize(self, ix):
+ '''return a list of (or set if given a set) of node ids'''
+ return self._externalize(ix)
+
+ def externalizeall(self, ixs):
+ '''return a list of (or set if given a set) of node ids'''
+ ids = self._externalizeall(ixs)
+ if isinstance(ixs, set):
+ return set(ids)
+ return list(ids)
+
+ def internalize(self, id):
+ '''return a list of (or set if given a set) of node ixs'''
+ return self._internalize(id)
+
+ def internalizeall(self, ids, filterunknown=False):
+ '''return a list of (or set if given a set) of node ids'''
+ ixs = self._internalizeall(ids, filterunknown)
+ if isinstance(ids, set):
+ return set(ixs)
+ return list(ixs)
+
+
+class genericdag(basedag):
+ '''generic implementations for DAGs'''
+
+ def ancestorset(self, starts, stops=None):
+ stops = stops and set(stops) or set()
+ seen = set()
+ pending = list(starts)
+ while pending:
+ n = pending.pop()
+ if n not in seen and n not in stops:
+ seen.add(n)
+ pending.extend(self.parents(n))
+ return seen
+
+ def headsetofconnecteds(self, ixs):
+ hds = set(ixs)
+ if not hds:
+ return hds
+ for n in ixs:
+ for p in self.parents(n):
+ hds.discard(p)
+ assert hds
+ return hds
+
+
+class revlogbaseddag(basedag):
+ '''generic dag interface to a revlog'''
+
+ def __init__(self, revlog, nodeset):
+ basedag.__init__(self)
+ self._revlog = revlog
+ self._heads = None
+ self._nodeset = nodeset
+
+ def nodeset(self):
+ return self._nodeset
+
+ def heads(self):
+ if self._heads is None:
+ self._heads = self._getheads()
+ return self._heads
+
+ def _externalize(self, ix):
+ return self._revlog.index[ix][7]
+ def _externalizeall(self, ixs):
+ idx = self._revlog.index
+ return [idx[i][7] for i in ixs]
+
+ def _internalize(self, id):
+ ix = self._revlog.rev(id)
+ if ix == nullrev:
+ raise LookupError(id, self._revlog.indexfile, _('nullid'))
+ return ix
+ def _internalizeall(self, ids, filterunknown):
+ rl = self._revlog
+ if filterunknown:
+ return [r for r in map(rl.nodemap.get, ids)
+ if r is not None and r != nullrev]
+ return map(self._internalize, ids)
+
+
+class revlogdag(revlogbaseddag):
+ '''dag interface to a revlog'''
+
+ def __init__(self, revlog):
+ revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog))))
+
+ def _getheads(self):
+ return [r for r in self._revlog.headrevs() if r != nullrev]
+
+ def parents(self, ix):
+ rlog = self._revlog
+ idx = rlog.index
+ revdata = idx[ix]
+ prev = revdata[5]
+ if prev != nullrev:
+ prev2 = revdata[6]
+ if prev2 == nullrev:
+ return [prev]
+ return [prev, prev2]
+ prev2 = revdata[6]
+ if prev2 != nullrev:
+ return [prev2]
+ return []
+
+ def inverse(self):
+ if self._inverse is None:
+ self._inverse = inverserevlogdag(self)
+ return self._inverse
+
+ def ancestorset(self, starts, stops=None):
+ rlog = self._revlog
+ idx = rlog.index
+ stops = stops and set(stops) or set()
+ seen = set()
+ pending = list(starts)
+ while pending:
+ rev = pending.pop()
+ if rev not in seen and rev not in stops:
+ seen.add(rev)
+ revdata = idx[rev]
+ for i in [5, 6]:
+ prev = revdata[i]
+ if prev != nullrev:
+ pending.append(prev)
+ return seen
+
+ def headsetofconnecteds(self, ixs):
+ if not ixs:
+ return set()
+ rlog = self._revlog
+ idx = rlog.index
+ headrevs = set(ixs)
+ for rev in ixs:
+ revdata = idx[rev]
+ for i in [5, 6]:
+ prev = revdata[i]
+ if prev != nullrev:
+ headrevs.discard(prev)
+ assert headrevs
+ return headrevs
+
+ def linearize(self, ixs):
+ '''linearize and topologically sort a list of revisions
+
+ The linearization process tries to create long runs of revs where
+ a child rev comes immediately after its first parent. This is done by
+ visiting the heads of the given revs in inverse topological order,
+ and for each visited rev, visiting its second parent, then its first
+ parent, then adding the rev itself to the output list.
+ '''
+ sorted = []
+ visit = list(self.headsetofconnecteds(ixs))
+ visit.sort(reverse=True)
+ finished = set()
+
+ while visit:
+ cur = visit.pop()
+ if cur < 0:
+ cur = -cur - 1
+ if cur not in finished:
+ sorted.append(cur)
+ finished.add(cur)
+ else:
+ visit.append(-cur - 1)
+ visit += [p for p in self.parents(cur)
+ if p in ixs and p not in finished]
+ assert len(sorted) == len(ixs)
+ return sorted
+
+
+class inverserevlogdag(revlogbaseddag, genericdag):
+ '''inverse of an existing revlog dag; see revlogdag.inverse()'''
+
+ def __init__(self, orig):
+ revlogbaseddag.__init__(self, orig._revlog, orig._nodeset)
+ self._orig = orig
+ self._children = {}
+ self._roots = []
+ self._walkfrom = len(self._revlog) - 1
+
+ def _walkto(self, walkto):
+ rev = self._walkfrom
+ cs = self._children
+ roots = self._roots
+ idx = self._revlog.index
+ while rev >= walkto:
+ data = idx[rev]
+ isroot = True
+ for prev in [data[5], data[6]]: # parent revs
+ if prev != nullrev:
+ cs.setdefault(prev, []).append(rev)
+ isroot = False
+ if isroot:
+ roots.append(rev)
+ rev -= 1
+ self._walkfrom = rev
+
+ def _getheads(self):
+ self._walkto(nullrev)
+ return self._roots
+
+ def parents(self, ix):
+ if ix is None:
+ return []
+ if ix <= self._walkfrom:
+ self._walkto(ix)
+ return self._children.get(ix, [])
+
+ def inverse(self):
+ return self._orig
diff --git a/websdk/mercurial/demandimport.py b/websdk/mercurial/demandimport.py
index 2498fbb..e439487 120000..100644
--- a/websdk/mercurial/demandimport.py
+++ b/websdk/mercurial/demandimport.py
@@ -1 +1,155 @@
-/usr/share/pyshared/mercurial/demandimport.py \ No newline at end of file
+# demandimport.py - global demand-loading of modules for Mercurial
+#
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+'''
+demandimport - automatic demandloading of modules
+
+To enable this module, do:
+
+ import demandimport; demandimport.enable()
+
+Imports of the following forms will be demand-loaded:
+
+ import a, b.c
+ import a.b as c
+ from a import b,c # a will be loaded immediately
+
+These imports will not be delayed:
+
+ from a import *
+ b = __import__(a)
+'''
+
+import __builtin__
+_origimport = __import__
+
+nothing = object()
+
+try:
+ _origimport(__builtin__.__name__, {}, {}, None, -1)
+except TypeError: # no level argument
+ def _import(name, globals, locals, fromlist, level):
+ "call _origimport with no level argument"
+ return _origimport(name, globals, locals, fromlist)
+else:
+ _import = _origimport
+
+class _demandmod(object):
+ """module demand-loader and proxy"""
+ def __init__(self, name, globals, locals):
+ if '.' in name:
+ head, rest = name.split('.', 1)
+ after = [rest]
+ else:
+ head = name
+ after = []
+ object.__setattr__(self, "_data", (head, globals, locals, after))
+ object.__setattr__(self, "_module", None)
+ def _extend(self, name):
+ """add to the list of submodules to load"""
+ self._data[3].append(name)
+ def _load(self):
+ if not self._module:
+ head, globals, locals, after = self._data
+ mod = _origimport(head, globals, locals)
+ # load submodules
+ def subload(mod, p):
+ h, t = p, None
+ if '.' in p:
+ h, t = p.split('.', 1)
+ if getattr(mod, h, nothing) is nothing:
+ setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__))
+ elif t:
+ subload(getattr(mod, h), t)
+
+ for x in after:
+ subload(mod, x)
+
+ # are we in the locals dictionary still?
+ if locals and locals.get(head) == self:
+ locals[head] = mod
+ object.__setattr__(self, "_module", mod)
+
+ def __repr__(self):
+ if self._module:
+ return "<proxied module '%s'>" % self._data[0]
+ return "<unloaded module '%s'>" % self._data[0]
+ def __call__(self, *args, **kwargs):
+ raise TypeError("%s object is not callable" % repr(self))
+ def __getattribute__(self, attr):
+ if attr in ('_data', '_extend', '_load', '_module'):
+ return object.__getattribute__(self, attr)
+ self._load()
+ return getattr(self._module, attr)
+ def __setattr__(self, attr, val):
+ self._load()
+ setattr(self._module, attr, val)
+
+def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1):
+ if not locals or name in ignore or fromlist == ('*',):
+ # these cases we can't really delay
+ return _import(name, globals, locals, fromlist, level)
+ elif not fromlist:
+ # import a [as b]
+ if '.' in name: # a.b
+ base, rest = name.split('.', 1)
+ # email.__init__ loading email.mime
+ if globals and globals.get('__name__', None) == base:
+ return _import(name, globals, locals, fromlist, level)
+ # if a is already demand-loaded, add b to its submodule list
+ if base in locals:
+ if isinstance(locals[base], _demandmod):
+ locals[base]._extend(rest)
+ return locals[base]
+ return _demandmod(name, globals, locals)
+ else:
+ if level != -1:
+ # from . import b,c,d or from .a import b,c,d
+ return _origimport(name, globals, locals, fromlist, level)
+ # from a import b,c,d
+ mod = _origimport(name, globals, locals)
+ # recurse down the module chain
+ for comp in name.split('.')[1:]:
+ if getattr(mod, comp, nothing) is nothing:
+ setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__))
+ mod = getattr(mod, comp)
+ for x in fromlist:
+ # set requested submodules for demand load
+ if getattr(mod, x, nothing) is nothing:
+ setattr(mod, x, _demandmod(x, mod.__dict__, locals))
+ return mod
+
+ignore = [
+ '_hashlib',
+ '_xmlplus',
+ 'fcntl',
+ 'win32com.gen_py',
+ '_winreg', # 2.7 mimetypes needs immediate ImportError
+ 'pythoncom',
+ # imported by tarfile, not available under Windows
+ 'pwd',
+ 'grp',
+ # imported by profile, itself imported by hotshot.stats,
+ # not available under Windows
+ 'resource',
+ # this trips up many extension authors
+ 'gtk',
+ # setuptools' pkg_resources.py expects "from __main__ import x" to
+ # raise ImportError if x not defined
+ '__main__',
+ '_ssl', # conditional imports in the stdlib, issue1964
+ 'rfc822',
+ 'mimetools',
+ ]
+
+def enable():
+ "enable global demand-loading of modules"
+ __builtin__.__import__ = _demandimport
+
+def disable():
+ "disable global demand-loading of modules"
+ __builtin__.__import__ = _origimport
diff --git a/websdk/mercurial/dirstate.py b/websdk/mercurial/dirstate.py
index 95a0490..50af321 120000..100644
--- a/websdk/mercurial/dirstate.py
+++ b/websdk/mercurial/dirstate.py
@@ -1 +1,725 @@
-/usr/share/pyshared/mercurial/dirstate.py \ No newline at end of file
+# dirstate.py - working directory tracking for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid
+from i18n import _
+import scmutil, util, ignore, osutil, parsers, encoding
+import struct, os, stat, errno
+import cStringIO
+
+_format = ">cllll"
+propertycache = util.propertycache
+
+def _finddirs(path):
+ pos = path.rfind('/')
+ while pos != -1:
+ yield path[:pos]
+ pos = path.rfind('/', 0, pos)
+
+def _incdirs(dirs, path):
+ for base in _finddirs(path):
+ if base in dirs:
+ dirs[base] += 1
+ return
+ dirs[base] = 1
+
+def _decdirs(dirs, path):
+ for base in _finddirs(path):
+ if dirs[base] > 1:
+ dirs[base] -= 1
+ return
+ del dirs[base]
+
+class dirstate(object):
+
+ def __init__(self, opener, ui, root, validate):
+ '''Create a new dirstate object.
+
+ opener is an open()-like callable that can be used to open the
+ dirstate file; root is the root of the directory tracked by
+ the dirstate.
+ '''
+ self._opener = opener
+ self._validate = validate
+ self._root = root
+ self._rootdir = os.path.join(root, '')
+ self._dirty = False
+ self._dirtypl = False
+ self._lastnormaltime = None
+ self._ui = ui
+
+ @propertycache
+ def _map(self):
+ '''Return the dirstate contents as a map from filename to
+ (state, mode, size, time).'''
+ self._read()
+ return self._map
+
+ @propertycache
+ def _copymap(self):
+ self._read()
+ return self._copymap
+
+ @propertycache
+ def _foldmap(self):
+ f = {}
+ for name in self._map:
+ f[util.normcase(name)] = name
+ return f
+
+ @propertycache
+ def _branch(self):
+ try:
+ return self._opener.read("branch").strip() or "default"
+ except IOError:
+ return "default"
+
+ @propertycache
+ def _pl(self):
+ try:
+ fp = self._opener("dirstate")
+ st = fp.read(40)
+ fp.close()
+ l = len(st)
+ if l == 40:
+ return st[:20], st[20:40]
+ elif l > 0 and l < 40:
+ raise util.Abort(_('working directory state appears damaged!'))
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return [nullid, nullid]
+
+ @propertycache
+ def _dirs(self):
+ dirs = {}
+ for f, s in self._map.iteritems():
+ if s[0] != 'r':
+ _incdirs(dirs, f)
+ return dirs
+
+ @propertycache
+ def _ignore(self):
+ files = [self._join('.hgignore')]
+ for name, path in self._ui.configitems("ui"):
+ if name == 'ignore' or name.startswith('ignore.'):
+ files.append(util.expandpath(path))
+ return ignore.ignore(self._root, files, self._ui.warn)
+
+ @propertycache
+ def _slash(self):
+ return self._ui.configbool('ui', 'slash') and os.sep != '/'
+
+ @propertycache
+ def _checklink(self):
+ return util.checklink(self._root)
+
+ @propertycache
+ def _checkexec(self):
+ return util.checkexec(self._root)
+
+ @propertycache
+ def _checkcase(self):
+ return not util.checkcase(self._join('.hg'))
+
+ def _join(self, f):
+ # much faster than os.path.join()
+ # it's safe because f is always a relative path
+ return self._rootdir + f
+
+ def flagfunc(self, buildfallback):
+ if self._checklink and self._checkexec:
+ def f(x):
+ p = self._join(x)
+ if os.path.islink(p):
+ return 'l'
+ if util.isexec(p):
+ return 'x'
+ return ''
+ return f
+
+ fallback = buildfallback()
+ if self._checklink:
+ def f(x):
+ if os.path.islink(self._join(x)):
+ return 'l'
+ if 'x' in fallback(x):
+ return 'x'
+ return ''
+ return f
+ if self._checkexec:
+ def f(x):
+ if 'l' in fallback(x):
+ return 'l'
+ if util.isexec(self._join(x)):
+ return 'x'
+ return ''
+ return f
+ else:
+ return fallback
+
+ def getcwd(self):
+ cwd = os.getcwd()
+ if cwd == self._root:
+ return ''
+ # self._root ends with a path separator if self._root is '/' or 'C:\'
+ rootsep = self._root
+ if not util.endswithsep(rootsep):
+ rootsep += os.sep
+ if cwd.startswith(rootsep):
+ return cwd[len(rootsep):]
+ else:
+ # we're outside the repo. return an absolute path.
+ return cwd
+
+ def pathto(self, f, cwd=None):
+ if cwd is None:
+ cwd = self.getcwd()
+ path = util.pathto(self._root, cwd, f)
+ if self._slash:
+ return util.normpath(path)
+ return path
+
+ def __getitem__(self, key):
+ '''Return the current state of key (a filename) in the dirstate.
+
+ States are:
+ n normal
+ m needs merging
+ r marked for removal
+ a marked for addition
+ ? not tracked
+ '''
+ return self._map.get(key, ("?",))[0]
+
+ def __contains__(self, key):
+ return key in self._map
+
+ def __iter__(self):
+ for x in sorted(self._map):
+ yield x
+
+ def parents(self):
+ return [self._validate(p) for p in self._pl]
+
+ def p1(self):
+ return self._validate(self._pl[0])
+
+ def p2(self):
+ return self._validate(self._pl[1])
+
+ def branch(self):
+ return encoding.tolocal(self._branch)
+
+ def setparents(self, p1, p2=nullid):
+ self._dirty = self._dirtypl = True
+ self._pl = p1, p2
+
+ def setbranch(self, branch):
+ if branch in ['tip', '.', 'null']:
+ raise util.Abort(_('the name \'%s\' is reserved') % branch)
+ self._branch = encoding.fromlocal(branch)
+ self._opener.write("branch", self._branch + '\n')
+
+ def _read(self):
+ self._map = {}
+ self._copymap = {}
+ try:
+ st = self._opener.read("dirstate")
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ return
+ if not st:
+ return
+
+ p = parsers.parse_dirstate(self._map, self._copymap, st)
+ if not self._dirtypl:
+ self._pl = p
+
+ def invalidate(self):
+ for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
+ "_ignore"):
+ if a in self.__dict__:
+ delattr(self, a)
+ self._lastnormaltime = None
+ self._dirty = False
+
+ def copy(self, source, dest):
+ """Mark dest as a copy of source. Unmark dest if source is None."""
+ if source == dest:
+ return
+ self._dirty = True
+ if source is not None:
+ self._copymap[dest] = source
+ elif dest in self._copymap:
+ del self._copymap[dest]
+
+ def copied(self, file):
+ return self._copymap.get(file, None)
+
+ def copies(self):
+ return self._copymap
+
+ def _droppath(self, f):
+ if self[f] not in "?r" and "_dirs" in self.__dict__:
+ _decdirs(self._dirs, f)
+
+ def _addpath(self, f, check=False):
+ oldstate = self[f]
+ if check or oldstate == "r":
+ scmutil.checkfilename(f)
+ if f in self._dirs:
+ raise util.Abort(_('directory %r already in dirstate') % f)
+ # shadows
+ for d in _finddirs(f):
+ if d in self._dirs:
+ break
+ if d in self._map and self[d] != 'r':
+ raise util.Abort(
+ _('file %r in dirstate clashes with %r') % (d, f))
+ if oldstate in "?r" and "_dirs" in self.__dict__:
+ _incdirs(self._dirs, f)
+
+ def normal(self, f):
+ '''Mark a file normal and clean.'''
+ self._dirty = True
+ self._addpath(f)
+ s = os.lstat(self._join(f))
+ mtime = int(s.st_mtime)
+ self._map[f] = ('n', s.st_mode, s.st_size, mtime)
+ if f in self._copymap:
+ del self._copymap[f]
+ if mtime > self._lastnormaltime:
+ # Remember the most recent modification timeslot for status(),
+ # to make sure we won't miss future size-preserving file content
+ # modifications that happen within the same timeslot.
+ self._lastnormaltime = mtime
+
+ def normallookup(self, f):
+ '''Mark a file normal, but possibly dirty.'''
+ if self._pl[1] != nullid and f in self._map:
+ # if there is a merge going on and the file was either
+ # in state 'm' (-1) or coming from other parent (-2) before
+ # being removed, restore that state.
+ entry = self._map[f]
+ if entry[0] == 'r' and entry[2] in (-1, -2):
+ source = self._copymap.get(f)
+ if entry[2] == -1:
+ self.merge(f)
+ elif entry[2] == -2:
+ self.otherparent(f)
+ if source:
+ self.copy(source, f)
+ return
+ if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
+ return
+ self._dirty = True
+ self._addpath(f)
+ self._map[f] = ('n', 0, -1, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def otherparent(self, f):
+ '''Mark as coming from the other parent, always dirty.'''
+ if self._pl[1] == nullid:
+ raise util.Abort(_("setting %r to other parent "
+ "only allowed in merges") % f)
+ self._dirty = True
+ self._addpath(f)
+ self._map[f] = ('n', 0, -2, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def add(self, f):
+ '''Mark a file added.'''
+ self._dirty = True
+ self._addpath(f, True)
+ self._map[f] = ('a', 0, -1, -1)
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def remove(self, f):
+ '''Mark a file removed.'''
+ self._dirty = True
+ self._droppath(f)
+ size = 0
+ if self._pl[1] != nullid and f in self._map:
+ # backup the previous state
+ entry = self._map[f]
+ if entry[0] == 'm': # merge
+ size = -1
+ elif entry[0] == 'n' and entry[2] == -2: # other parent
+ size = -2
+ self._map[f] = ('r', 0, size, 0)
+ if size == 0 and f in self._copymap:
+ del self._copymap[f]
+
+ def merge(self, f):
+ '''Mark a file merged.'''
+ self._dirty = True
+ s = os.lstat(self._join(f))
+ self._addpath(f)
+ self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
+ if f in self._copymap:
+ del self._copymap[f]
+
+ def drop(self, f):
+ '''Drop a file from the dirstate'''
+ if f in self._map:
+ self._dirty = True
+ self._droppath(f)
+ del self._map[f]
+
+ def _normalize(self, path, isknown):
+ normed = util.normcase(path)
+ folded = self._foldmap.get(normed, None)
+ if folded is None:
+ if isknown or not os.path.lexists(os.path.join(self._root, path)):
+ folded = path
+ else:
+ folded = self._foldmap.setdefault(normed,
+ util.fspath(path, self._root))
+ return folded
+
+ def normalize(self, path, isknown=False):
+ '''
+ normalize the case of a pathname when on a casefolding filesystem
+
+ isknown specifies whether the filename came from walking the
+ disk, to avoid extra filesystem access
+
+ The normalized case is determined based on the following precedence:
+
+ - version of name already stored in the dirstate
+ - version of name stored on disk
+ - version provided via command arguments
+ '''
+
+ if self._checkcase:
+ return self._normalize(path, isknown)
+ return path
+
+ def clear(self):
+ self._map = {}
+ if "_dirs" in self.__dict__:
+ delattr(self, "_dirs")
+ self._copymap = {}
+ self._pl = [nullid, nullid]
+ self._lastnormaltime = None
+ self._dirty = True
+
+ def rebuild(self, parent, files):
+ self.clear()
+ for f in files:
+ if 'x' in files.flags(f):
+ self._map[f] = ('n', 0777, -1, 0)
+ else:
+ self._map[f] = ('n', 0666, -1, 0)
+ self._pl = (parent, nullid)
+ self._dirty = True
+
+ def write(self):
+ if not self._dirty:
+ return
+ st = self._opener("dirstate", "w", atomictemp=True)
+
+ # use the modification time of the newly created temporary file as the
+ # filesystem's notion of 'now'
+ now = int(util.fstat(st).st_mtime)
+
+ cs = cStringIO.StringIO()
+ copymap = self._copymap
+ pack = struct.pack
+ write = cs.write
+ write("".join(self._pl))
+ for f, e in self._map.iteritems():
+ if e[0] == 'n' and e[3] == now:
+ # The file was last modified "simultaneously" with the current
+ # write to dirstate (i.e. within the same second for file-
+ # systems with a granularity of 1 sec). This commonly happens
+ # for at least a couple of files on 'update'.
+ # The user could change the file without changing its size
+ # within the same second. Invalidate the file's stat data in
+ # dirstate, forcing future 'status' calls to compare the
+ # contents of the file. This prevents mistakenly treating such
+ # files as clean.
+ e = (e[0], 0, -1, -1) # mark entry as 'unset'
+ self._map[f] = e
+
+ if f in copymap:
+ f = "%s\0%s" % (f, copymap[f])
+ e = pack(_format, e[0], e[1], e[2], e[3], len(f))
+ write(e)
+ write(f)
+ st.write(cs.getvalue())
+ st.close()
+ self._lastnormaltime = None
+ self._dirty = self._dirtypl = False
+
+ def _dirignore(self, f):
+ if f == '.':
+ return False
+ if self._ignore(f):
+ return True
+ for p in _finddirs(f):
+ if self._ignore(p):
+ return True
+ return False
+
+ def walk(self, match, subrepos, unknown, ignored):
+ '''
+ Walk recursively through the directory tree, finding all files
+ matched by match.
+
+ Return a dict mapping filename to stat-like object (either
+ mercurial.osutil.stat instance or return value of os.stat()).
+ '''
+
+ def fwarn(f, msg):
+ self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
+ return False
+
+ def badtype(mode):
+ kind = _('unknown')
+ if stat.S_ISCHR(mode):
+ kind = _('character device')
+ elif stat.S_ISBLK(mode):
+ kind = _('block device')
+ elif stat.S_ISFIFO(mode):
+ kind = _('fifo')
+ elif stat.S_ISSOCK(mode):
+ kind = _('socket')
+ elif stat.S_ISDIR(mode):
+ kind = _('directory')
+ return _('unsupported file type (type is %s)') % kind
+
+ ignore = self._ignore
+ dirignore = self._dirignore
+ if ignored:
+ ignore = util.never
+ dirignore = util.never
+ elif not unknown:
+ # if unknown and ignored are False, skip step 2
+ ignore = util.always
+ dirignore = util.always
+
+ matchfn = match.matchfn
+ badfn = match.bad
+ dmap = self._map
+ normpath = util.normpath
+ listdir = osutil.listdir
+ lstat = os.lstat
+ getkind = stat.S_IFMT
+ dirkind = stat.S_IFDIR
+ regkind = stat.S_IFREG
+ lnkkind = stat.S_IFLNK
+ join = self._join
+ work = []
+ wadd = work.append
+
+ exact = skipstep3 = False
+ if matchfn == match.exact: # match.exact
+ exact = True
+ dirignore = util.always # skip step 2
+ elif match.files() and not match.anypats(): # match.match, no patterns
+ skipstep3 = True
+
+ if self._checkcase:
+ normalize = self._normalize
+ skipstep3 = False
+ else:
+ normalize = lambda x, y: x
+
+ files = sorted(match.files())
+ subrepos.sort()
+ i, j = 0, 0
+ while i < len(files) and j < len(subrepos):
+ subpath = subrepos[j] + "/"
+ if files[i] < subpath:
+ i += 1
+ continue
+ while i < len(files) and files[i].startswith(subpath):
+ del files[i]
+ j += 1
+
+ if not files or '.' in files:
+ files = ['']
+ results = dict.fromkeys(subrepos)
+ results['.hg'] = None
+
+ # step 1: find all explicit files
+ for ff in files:
+ nf = normalize(normpath(ff), False)
+ if nf in results:
+ continue
+
+ try:
+ st = lstat(join(nf))
+ kind = getkind(st.st_mode)
+ if kind == dirkind:
+ skipstep3 = False
+ if nf in dmap:
+ #file deleted on disk but still in dirstate
+ results[nf] = None
+ match.dir(nf)
+ if not dirignore(nf):
+ wadd(nf)
+ elif kind == regkind or kind == lnkkind:
+ results[nf] = st
+ else:
+ badfn(ff, badtype(kind))
+ if nf in dmap:
+ results[nf] = None
+ except OSError, inst:
+ if nf in dmap: # does it exactly match a file?
+ results[nf] = None
+ else: # does it match a directory?
+ prefix = nf + "/"
+ for fn in dmap:
+ if fn.startswith(prefix):
+ match.dir(nf)
+ skipstep3 = False
+ break
+ else:
+ badfn(ff, inst.strerror)
+
+ # step 2: visit subdirectories
+ while work:
+ nd = work.pop()
+ skip = None
+ if nd == '.':
+ nd = ''
+ else:
+ skip = '.hg'
+ try:
+ entries = listdir(join(nd), stat=True, skip=skip)
+ except OSError, inst:
+ if inst.errno == errno.EACCES:
+ fwarn(nd, inst.strerror)
+ continue
+ raise
+ for f, kind, st in entries:
+ nf = normalize(nd and (nd + "/" + f) or f, True)
+ if nf not in results:
+ if kind == dirkind:
+ if not ignore(nf):
+ match.dir(nf)
+ wadd(nf)
+ if nf in dmap and matchfn(nf):
+ results[nf] = None
+ elif kind == regkind or kind == lnkkind:
+ if nf in dmap:
+ if matchfn(nf):
+ results[nf] = st
+ elif matchfn(nf) and not ignore(nf):
+ results[nf] = st
+ elif nf in dmap and matchfn(nf):
+ results[nf] = None
+
+ # step 3: report unseen items in the dmap hash
+ if not skipstep3 and not exact:
+ visit = sorted([f for f in dmap if f not in results and matchfn(f)])
+ for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
+ if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
+ st = None
+ results[nf] = st
+ for s in subrepos:
+ del results[s]
+ del results['.hg']
+ return results
+
+ def status(self, match, subrepos, ignored, clean, unknown):
+ '''Determine the status of the working copy relative to the
+ dirstate and return a tuple of lists (unsure, modified, added,
+ removed, deleted, unknown, ignored, clean), where:
+
+ unsure:
+ files that might have been modified since the dirstate was
+ written, but need to be read to be sure (size is the same
+ but mtime differs)
+ modified:
+ files that have definitely been modified since the dirstate
+ was written (different size or mode)
+ added:
+ files that have been explicitly added with hg add
+ removed:
+ files that have been explicitly removed with hg remove
+ deleted:
+ files that have been deleted through other means ("missing")
+ unknown:
+ files not in the dirstate that are not ignored
+ ignored:
+ files not in the dirstate that are ignored
+ (by _dirignore())
+ clean:
+ files that have definitely not been modified since the
+ dirstate was written
+ '''
+ listignored, listclean, listunknown = ignored, clean, unknown
+ lookup, modified, added, unknown, ignored = [], [], [], [], []
+ removed, deleted, clean = [], [], []
+
+ dmap = self._map
+ ladd = lookup.append # aka "unsure"
+ madd = modified.append
+ aadd = added.append
+ uadd = unknown.append
+ iadd = ignored.append
+ radd = removed.append
+ dadd = deleted.append
+ cadd = clean.append
+
+ lnkkind = stat.S_IFLNK
+
+ for fn, st in self.walk(match, subrepos, listunknown,
+ listignored).iteritems():
+ if fn not in dmap:
+ if (listignored or match.exact(fn)) and self._dirignore(fn):
+ if listignored:
+ iadd(fn)
+ elif listunknown:
+ uadd(fn)
+ continue
+
+ state, mode, size, time = dmap[fn]
+
+ if not st and state in "nma":
+ dadd(fn)
+ elif state == 'n':
+ # The "mode & lnkkind != lnkkind or self._checklink"
+ # lines are an expansion of "islink => checklink"
+ # where islink means "is this a link?" and checklink
+ # means "can we check links?".
+ mtime = int(st.st_mtime)
+ if (size >= 0 and
+ (size != st.st_size
+ or ((mode ^ st.st_mode) & 0100 and self._checkexec))
+ and (mode & lnkkind != lnkkind or self._checklink)
+ or size == -2 # other parent
+ or fn in self._copymap):
+ madd(fn)
+ elif (mtime != time
+ and (mode & lnkkind != lnkkind or self._checklink)):
+ ladd(fn)
+ elif mtime == self._lastnormaltime:
+ # fn may have been changed in the same timeslot without
+ # changing its size. This can happen if we quickly do
+ # multiple commits in a single transaction.
+ # Force lookup, so we don't miss such a racy file change.
+ ladd(fn)
+ elif listclean:
+ cadd(fn)
+ elif state == 'm':
+ madd(fn)
+ elif state == 'a':
+ aadd(fn)
+ elif state == 'r':
+ radd(fn)
+
+ return (lookup, modified, added, removed, deleted, unknown, ignored,
+ clean)
diff --git a/websdk/mercurial/discovery.py b/websdk/mercurial/discovery.py
index 77607e3..83b0c43 120000..100644
--- a/websdk/mercurial/discovery.py
+++ b/websdk/mercurial/discovery.py
@@ -1 +1,192 @@
-/usr/share/pyshared/mercurial/discovery.py \ No newline at end of file
+# discovery.py - protocol changeset discovery functions
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid, short
+from i18n import _
+import util, setdiscovery, treediscovery
+
+def findcommonincoming(repo, remote, heads=None, force=False):
+ """Return a tuple (common, anyincoming, heads) used to identify the common
+ subset of nodes between repo and remote.
+
+ "common" is a list of (at least) the heads of the common subset.
+ "anyincoming" is testable as a boolean indicating if any nodes are missing
+ locally. If remote does not support getbundle, this actually is a list of
+ roots of the nodes that would be incoming, to be supplied to
+ changegroupsubset. No code except for pull should be relying on this fact
+ any longer.
+ "heads" is either the supplied heads, or else the remote's heads.
+
+ If you pass heads and they are all known locally, the reponse lists justs
+ these heads in "common" and in "heads".
+
+ Please use findcommonoutgoing to compute the set of outgoing nodes to give
+ extensions a good hook into outgoing.
+ """
+
+ if not remote.capable('getbundle'):
+ return treediscovery.findcommonincoming(repo, remote, heads, force)
+
+ if heads:
+ allknown = True
+ nm = repo.changelog.nodemap
+ for h in heads:
+ if nm.get(h) is None:
+ allknown = False
+ break
+ if allknown:
+ return (heads, False, heads)
+
+ res = setdiscovery.findcommonheads(repo.ui, repo, remote,
+ abortwhenunrelated=not force)
+ common, anyinc, srvheads = res
+ return (list(common), anyinc, heads or list(srvheads))
+
+def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None):
+ '''Return a tuple (common, anyoutgoing, heads) used to identify the set
+ of nodes present in repo but not in other.
+
+ If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive)
+ are included. If you already know the local repo's heads, passing them in
+ onlyheads is faster than letting them be recomputed here.
+
+ If commoninc is given, it must the the result of a prior call to
+ findcommonincoming(repo, other, force) to avoid recomputing it here.
+
+ The returned tuple is meant to be passed to changelog.findmissing.'''
+ common, _any, _hds = commoninc or findcommonincoming(repo, other, force=force)
+ return (common, onlyheads or repo.heads())
+
+def prepush(repo, remote, force, revs, newbranch):
+ '''Analyze the local and remote repositories and determine which
+ changesets need to be pushed to the remote. Return value depends
+ on circumstances:
+
+ If we are not going to push anything, return a tuple (None,
+ outgoing) where outgoing is 0 if there are no outgoing
+ changesets and 1 if there are, but we refuse to push them
+ (e.g. would create new remote heads).
+
+ Otherwise, return a tuple (changegroup, remoteheads), where
+ changegroup is a readable file-like object whose read() returns
+ successive changegroup chunks ready to be sent over the wire and
+ remoteheads is the list of remote heads.'''
+ commoninc = findcommonincoming(repo, remote, force=force)
+ common, revs = findcommonoutgoing(repo, remote, onlyheads=revs,
+ commoninc=commoninc, force=force)
+ _common, inc, remoteheads = commoninc
+
+ cl = repo.changelog
+ outg = cl.findmissing(common, revs)
+
+ if not outg:
+ repo.ui.status(_("no changes found\n"))
+ return None, 1
+
+ if not force and remoteheads != [nullid]:
+ if remote.capable('branchmap'):
+ # Check for each named branch if we're creating new remote heads.
+ # To be a remote head after push, node must be either:
+ # - unknown locally
+ # - a local outgoing head descended from update
+ # - a remote head that's known locally and not
+ # ancestral to an outgoing head
+
+ # 1. Create set of branches involved in the push.
+ branches = set(repo[n].branch() for n in outg)
+
+ # 2. Check for new branches on the remote.
+ remotemap = remote.branchmap()
+ newbranches = branches - set(remotemap)
+ if newbranches and not newbranch: # new branch requires --new-branch
+ branchnames = ', '.join(sorted(newbranches))
+ raise util.Abort(_("push creates new remote branches: %s!")
+ % branchnames,
+ hint=_("use 'hg push --new-branch' to create"
+ " new remote branches"))
+ branches.difference_update(newbranches)
+
+ # 3. Construct the initial oldmap and newmap dicts.
+ # They contain information about the remote heads before and
+ # after the push, respectively.
+ # Heads not found locally are not included in either dict,
+ # since they won't be affected by the push.
+ # unsynced contains all branches with incoming changesets.
+ oldmap = {}
+ newmap = {}
+ unsynced = set()
+ for branch in branches:
+ remotebrheads = remotemap[branch]
+ prunedbrheads = [h for h in remotebrheads if h in cl.nodemap]
+ oldmap[branch] = prunedbrheads
+ newmap[branch] = list(prunedbrheads)
+ if len(remotebrheads) > len(prunedbrheads):
+ unsynced.add(branch)
+
+ # 4. Update newmap with outgoing changes.
+ # This will possibly add new heads and remove existing ones.
+ ctxgen = (repo[n] for n in outg)
+ repo._updatebranchcache(newmap, ctxgen)
+
+ else:
+ # 1-4b. old servers: Check for new topological heads.
+ # Construct {old,new}map with branch = None (topological branch).
+ # (code based on _updatebranchcache)
+ oldheads = set(h for h in remoteheads if h in cl.nodemap)
+ newheads = oldheads.union(outg)
+ if len(newheads) > 1:
+ for latest in reversed(outg):
+ if latest not in newheads:
+ continue
+ minhrev = min(cl.rev(h) for h in newheads)
+ reachable = cl.reachable(latest, cl.node(minhrev))
+ reachable.remove(latest)
+ newheads.difference_update(reachable)
+ branches = set([None])
+ newmap = {None: newheads}
+ oldmap = {None: oldheads}
+ unsynced = inc and branches or set()
+
+ # 5. Check for new heads.
+ # If there are more heads after the push than before, a suitable
+ # error message, depending on unsynced status, is displayed.
+ error = None
+ for branch in branches:
+ newhs = set(newmap[branch])
+ oldhs = set(oldmap[branch])
+ if len(newhs) > len(oldhs):
+ dhs = list(newhs - oldhs)
+ if error is None:
+ if branch not in ('default', None):
+ error = _("push creates new remote head %s "
+ "on branch '%s'!") % (short(dhs[0]), branch)
+ else:
+ error = _("push creates new remote head %s!"
+ ) % short(dhs[0])
+ if branch in unsynced:
+ hint = _("you should pull and merge or "
+ "use push -f to force")
+ else:
+ hint = _("did you forget to merge? "
+ "use push -f to force")
+ if branch is not None:
+ repo.ui.note(_("new remote heads on branch '%s'\n") % branch)
+ for h in dhs:
+ repo.ui.note(_("new remote head %s\n") % short(h))
+ if error:
+ raise util.Abort(error, hint=hint)
+
+ # 6. Check for unsynced changes on involved branches.
+ if unsynced:
+ repo.ui.warn(_("note: unsynced remote changes!\n"))
+
+ if revs is None:
+ # use the fast path, no race possible on push
+ cg = repo._changegroup(outg, 'push')
+ else:
+ cg = repo.getbundle('push', heads=revs, common=common)
+ return cg, remoteheads
diff --git a/websdk/mercurial/dispatch.py b/websdk/mercurial/dispatch.py
index 611f8a9..f678c45 120000..100644
--- a/websdk/mercurial/dispatch.py
+++ b/websdk/mercurial/dispatch.py
@@ -1 +1,738 @@
-/usr/share/pyshared/mercurial/dispatch.py \ No newline at end of file
+# dispatch.py - command dispatching for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re
+import util, commands, hg, fancyopts, extensions, hook, error
+import cmdutil, encoding
+import ui as uimod
+
+class request(object):
+ def __init__(self, args, ui=None, repo=None, fin=None, fout=None, ferr=None):
+ self.args = args
+ self.ui = ui
+ self.repo = repo
+
+ # input/output/error streams
+ self.fin = fin
+ self.fout = fout
+ self.ferr = ferr
+
+def run():
+ "run the command in sys.argv"
+ sys.exit(dispatch(request(sys.argv[1:])))
+
+def dispatch(req):
+ "run the command specified in req.args"
+ if req.ferr:
+ ferr = req.ferr
+ elif req.ui:
+ ferr = req.ui.ferr
+ else:
+ ferr = sys.stderr
+
+ try:
+ if not req.ui:
+ req.ui = uimod.ui()
+ if '--traceback' in req.args:
+ req.ui.setconfig('ui', 'traceback', 'on')
+
+ # set ui streams from the request
+ if req.fin:
+ req.ui.fin = req.fin
+ if req.fout:
+ req.ui.fout = req.fout
+ if req.ferr:
+ req.ui.ferr = req.ferr
+ except util.Abort, inst:
+ ferr.write(_("abort: %s\n") % inst)
+ if inst.hint:
+ ferr.write(_("(%s)\n") % inst.hint)
+ return -1
+ except error.ParseError, inst:
+ if len(inst.args) > 1:
+ ferr.write(_("hg: parse error at %s: %s\n") %
+ (inst.args[1], inst.args[0]))
+ else:
+ ferr.write(_("hg: parse error: %s\n") % inst.args[0])
+ return -1
+
+ return _runcatch(req)
+
+def _runcatch(req):
+ def catchterm(*args):
+ raise error.SignalInterrupt
+
+ ui = req.ui
+ try:
+ for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
+ num = getattr(signal, name, None)
+ if num:
+ signal.signal(num, catchterm)
+ except ValueError:
+ pass # happens if called in a thread
+
+ try:
+ try:
+ # enter the debugger before command execution
+ if '--debugger' in req.args:
+ ui.warn(_("entering debugger - "
+ "type c to continue starting hg or h for help\n"))
+ pdb.set_trace()
+ try:
+ return _dispatch(req)
+ finally:
+ ui.flush()
+ except:
+ # enter the debugger when we hit an exception
+ if '--debugger' in req.args:
+ traceback.print_exc()
+ pdb.post_mortem(sys.exc_info()[2])
+ ui.traceback()
+ raise
+
+ # Global exception handling, alphabetically
+ # Mercurial-specific first, followed by built-in and library exceptions
+ except error.AmbiguousCommand, inst:
+ ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
+ (inst.args[0], " ".join(inst.args[1])))
+ except error.ParseError, inst:
+ if len(inst.args) > 1:
+ ui.warn(_("hg: parse error at %s: %s\n") %
+ (inst.args[1], inst.args[0]))
+ else:
+ ui.warn(_("hg: parse error: %s\n") % inst.args[0])
+ return -1
+ except error.LockHeld, inst:
+ if inst.errno == errno.ETIMEDOUT:
+ reason = _('timed out waiting for lock held by %s') % inst.locker
+ else:
+ reason = _('lock held by %s') % inst.locker
+ ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
+ except error.LockUnavailable, inst:
+ ui.warn(_("abort: could not lock %s: %s\n") %
+ (inst.desc or inst.filename, inst.strerror))
+ except error.CommandError, inst:
+ if inst.args[0]:
+ ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
+ commands.help_(ui, inst.args[0], full=False, command=True)
+ else:
+ ui.warn(_("hg: %s\n") % inst.args[1])
+ commands.help_(ui, 'shortlist')
+ except error.OutOfBandError, inst:
+ ui.warn(_("abort: remote error:\n"))
+ ui.warn(''.join(inst.args))
+ except error.RepoError, inst:
+ ui.warn(_("abort: %s!\n") % inst)
+ if inst.hint:
+ ui.warn(_("(%s)\n") % inst.hint)
+ except error.ResponseError, inst:
+ ui.warn(_("abort: %s") % inst.args[0])
+ if not isinstance(inst.args[1], basestring):
+ ui.warn(" %r\n" % (inst.args[1],))
+ elif not inst.args[1]:
+ ui.warn(_(" empty string\n"))
+ else:
+ ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
+ except error.RevlogError, inst:
+ ui.warn(_("abort: %s!\n") % inst)
+ except error.SignalInterrupt:
+ ui.warn(_("killed!\n"))
+ except error.UnknownCommand, inst:
+ ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
+ try:
+ # check if the command is in a disabled extension
+ # (but don't check for extensions themselves)
+ commands.help_(ui, inst.args[0], unknowncmd=True)
+ except error.UnknownCommand:
+ commands.help_(ui, 'shortlist')
+ except util.Abort, inst:
+ ui.warn(_("abort: %s\n") % inst)
+ if inst.hint:
+ ui.warn(_("(%s)\n") % inst.hint)
+ except ImportError, inst:
+ ui.warn(_("abort: %s!\n") % inst)
+ m = str(inst).split()[-1]
+ if m in "mpatch bdiff".split():
+ ui.warn(_("(did you forget to compile extensions?)\n"))
+ elif m in "zlib".split():
+ ui.warn(_("(is your Python install correct?)\n"))
+ except IOError, inst:
+ if util.safehasattr(inst, "code"):
+ ui.warn(_("abort: %s\n") % inst)
+ elif util.safehasattr(inst, "reason"):
+ try: # usually it is in the form (errno, strerror)
+ reason = inst.reason.args[1]
+ except (AttributeError, IndexError):
+ # it might be anything, for example a string
+ reason = inst.reason
+ ui.warn(_("abort: error: %s\n") % reason)
+ elif util.safehasattr(inst, "args") and inst.args[0] == errno.EPIPE:
+ if ui.debugflag:
+ ui.warn(_("broken pipe\n"))
+ elif getattr(inst, "strerror", None):
+ if getattr(inst, "filename", None):
+ ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+ else:
+ ui.warn(_("abort: %s\n") % inst.strerror)
+ else:
+ raise
+ except OSError, inst:
+ if getattr(inst, "filename", None):
+ ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
+ else:
+ ui.warn(_("abort: %s\n") % inst.strerror)
+ except KeyboardInterrupt:
+ try:
+ ui.warn(_("interrupted!\n"))
+ except IOError, inst:
+ if inst.errno == errno.EPIPE:
+ if ui.debugflag:
+ ui.warn(_("\nbroken pipe\n"))
+ else:
+ raise
+ except MemoryError:
+ ui.warn(_("abort: out of memory\n"))
+ except SystemExit, inst:
+ # Commands shouldn't sys.exit directly, but give a return code.
+ # Just in case catch this and and pass exit code to caller.
+ return inst.code
+ except socket.error, inst:
+ ui.warn(_("abort: %s\n") % inst.args[-1])
+ except:
+ ui.warn(_("** unknown exception encountered,"
+ " please report by visiting\n"))
+ ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n"))
+ ui.warn(_("** Python %s\n") % sys.version.replace('\n', ''))
+ ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
+ % util.version())
+ ui.warn(_("** Extensions loaded: %s\n")
+ % ", ".join([x[0] for x in extensions.extensions()]))
+ raise
+
+ return -1
+
+def aliasargs(fn, givenargs):
+ args = getattr(fn, 'args', [])
+ if args and givenargs:
+ cmd = ' '.join(map(util.shellquote, args))
+
+ nums = []
+ def replacer(m):
+ num = int(m.group(1)) - 1
+ nums.append(num)
+ return givenargs[num]
+ cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
+ givenargs = [x for i, x in enumerate(givenargs)
+ if i not in nums]
+ args = shlex.split(cmd)
+ return args + givenargs
+
+class cmdalias(object):
+ def __init__(self, name, definition, cmdtable):
+ self.name = self.cmd = name
+ self.cmdname = ''
+ self.definition = definition
+ self.args = []
+ self.opts = []
+ self.help = ''
+ self.norepo = True
+ self.badalias = False
+
+ try:
+ aliases, entry = cmdutil.findcmd(self.name, cmdtable)
+ for alias, e in cmdtable.iteritems():
+ if e is entry:
+ self.cmd = alias
+ break
+ self.shadows = True
+ except error.UnknownCommand:
+ self.shadows = False
+
+ if not self.definition:
+ def fn(ui, *args):
+ ui.warn(_("no definition for alias '%s'\n") % self.name)
+ return 1
+ self.fn = fn
+ self.badalias = True
+
+ return
+
+ if self.definition.startswith('!'):
+ self.shell = True
+ def fn(ui, *args):
+ env = {'HG_ARGS': ' '.join((self.name,) + args)}
+ def _checkvar(m):
+ if m.groups()[0] == '$':
+ return m.group()
+ elif int(m.groups()[0]) <= len(args):
+ return m.group()
+ else:
+ ui.debug("No argument found for substitution "
+ "of %i variable in alias '%s' definition."
+ % (int(m.groups()[0]), self.name))
+ return ''
+ cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
+ replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
+ replace['0'] = self.name
+ replace['@'] = ' '.join(args)
+ cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
+ return util.system(cmd, environ=env, out=ui.fout)
+ self.fn = fn
+ return
+
+ args = shlex.split(self.definition)
+ self.cmdname = cmd = args.pop(0)
+ args = map(util.expandpath, args)
+
+ for invalidarg in ("--cwd", "-R", "--repository", "--repo"):
+ if _earlygetopt([invalidarg], args):
+ def fn(ui, *args):
+ ui.warn(_("error in definition for alias '%s': %s may only "
+ "be given on the command line\n")
+ % (self.name, invalidarg))
+ return 1
+
+ self.fn = fn
+ self.badalias = True
+ return
+
+ try:
+ tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
+ if len(tableentry) > 2:
+ self.fn, self.opts, self.help = tableentry
+ else:
+ self.fn, self.opts = tableentry
+
+ self.args = aliasargs(self.fn, args)
+ if cmd not in commands.norepo.split(' '):
+ self.norepo = False
+ if self.help.startswith("hg " + cmd):
+ # drop prefix in old-style help lines so hg shows the alias
+ self.help = self.help[4 + len(cmd):]
+ self.__doc__ = self.fn.__doc__
+
+ except error.UnknownCommand:
+ def fn(ui, *args):
+ ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
+ % (self.name, cmd))
+ try:
+ # check if the command is in a disabled extension
+ commands.help_(ui, cmd, unknowncmd=True)
+ except error.UnknownCommand:
+ pass
+ return 1
+ self.fn = fn
+ self.badalias = True
+ except error.AmbiguousCommand:
+ def fn(ui, *args):
+ ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
+ % (self.name, cmd))
+ return 1
+ self.fn = fn
+ self.badalias = True
+
+ def __call__(self, ui, *args, **opts):
+ if self.shadows:
+ ui.debug("alias '%s' shadows command '%s'\n" %
+ (self.name, self.cmdname))
+
+ if util.safehasattr(self, 'shell'):
+ return self.fn(ui, *args, **opts)
+ else:
+ try:
+ util.checksignature(self.fn)(ui, *args, **opts)
+ except error.SignatureError:
+ args = ' '.join([self.cmdname] + self.args)
+ ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
+ raise
+
+def addaliases(ui, cmdtable):
+ # aliases are processed after extensions have been loaded, so they
+ # may use extension commands. Aliases can also use other alias definitions,
+ # but only if they have been defined prior to the current definition.
+ for alias, definition in ui.configitems('alias'):
+ aliasdef = cmdalias(alias, definition, cmdtable)
+
+ try:
+ olddef = cmdtable[aliasdef.cmd][0]
+ if olddef.definition == aliasdef.definition:
+ continue
+ except (KeyError, AttributeError):
+ # definition might not exist or it might not be a cmdalias
+ pass
+
+ cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help)
+ if aliasdef.norepo:
+ commands.norepo += ' %s' % alias
+
+def _parse(ui, args):
+ options = {}
+ cmdoptions = {}
+
+ try:
+ args = fancyopts.fancyopts(args, commands.globalopts, options)
+ except fancyopts.getopt.GetoptError, inst:
+ raise error.CommandError(None, inst)
+
+ if args:
+ cmd, args = args[0], args[1:]
+ aliases, entry = cmdutil.findcmd(cmd, commands.table,
+ ui.config("ui", "strict"))
+ cmd = aliases[0]
+ args = aliasargs(entry[0], args)
+ defaults = ui.config("defaults", cmd)
+ if defaults:
+ args = map(util.expandpath, shlex.split(defaults)) + args
+ c = list(entry[1])
+ else:
+ cmd = None
+ c = []
+
+ # combine global options into local
+ for o in commands.globalopts:
+ c.append((o[0], o[1], options[o[1]], o[3]))
+
+ try:
+ args = fancyopts.fancyopts(args, c, cmdoptions, True)
+ except fancyopts.getopt.GetoptError, inst:
+ raise error.CommandError(cmd, inst)
+
+ # separate global options back out
+ for o in commands.globalopts:
+ n = o[1]
+ options[n] = cmdoptions[n]
+ del cmdoptions[n]
+
+ return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
+
+def _parseconfig(ui, config):
+ """parse the --config options from the command line"""
+ configs = []
+
+ for cfg in config:
+ try:
+ name, value = cfg.split('=', 1)
+ section, name = name.split('.', 1)
+ if not section or not name:
+ raise IndexError
+ ui.setconfig(section, name, value)
+ configs.append((section, name, value))
+ except (IndexError, ValueError):
+ raise util.Abort(_('malformed --config option: %r '
+ '(use --config section.name=value)') % cfg)
+
+ return configs
+
+def _earlygetopt(aliases, args):
+ """Return list of values for an option (or aliases).
+
+ The values are listed in the order they appear in args.
+ The options and values are removed from args.
+ """
+ try:
+ argcount = args.index("--")
+ except ValueError:
+ argcount = len(args)
+ shortopts = [opt for opt in aliases if len(opt) == 2]
+ values = []
+ pos = 0
+ while pos < argcount:
+ if args[pos] in aliases:
+ if pos + 1 >= argcount:
+ # ignore and let getopt report an error if there is no value
+ break
+ del args[pos]
+ values.append(args.pop(pos))
+ argcount -= 2
+ elif args[pos][:2] in shortopts:
+ # short option can have no following space, e.g. hg log -Rfoo
+ values.append(args.pop(pos)[2:])
+ argcount -= 1
+ else:
+ pos += 1
+ return values
+
+def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
+ # run pre-hook, and abort if it fails
+ ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs),
+ pats=cmdpats, opts=cmdoptions)
+ if ret:
+ return ret
+ ret = _runcommand(ui, options, cmd, d)
+ # run post-hook, passing command result
+ hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
+ result=ret, pats=cmdpats, opts=cmdoptions)
+ return ret
+
+def _getlocal(ui, rpath):
+ """Return (path, local ui object) for the given target path.
+
+ Takes paths in [cwd]/.hg/hgrc into account."
+ """
+ try:
+ wd = os.getcwd()
+ except OSError, e:
+ raise util.Abort(_("error getting current working directory: %s") %
+ e.strerror)
+ path = cmdutil.findrepo(wd) or ""
+ if not path:
+ lui = ui
+ else:
+ lui = ui.copy()
+ lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
+
+ if rpath and rpath[-1]:
+ path = lui.expandpath(rpath[-1])
+ lui = ui.copy()
+ lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
+
+ return path, lui
+
+def _checkshellalias(lui, ui, args):
+ norepo = commands.norepo
+ options = {}
+
+ try:
+ args = fancyopts.fancyopts(args, commands.globalopts, options)
+ except fancyopts.getopt.GetoptError:
+ return
+
+ if not args:
+ return
+
+ cmdtable = commands.table.copy()
+ addaliases(lui, cmdtable)
+
+ cmd = args[0]
+ try:
+ aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict"))
+ except (error.AmbiguousCommand, error.UnknownCommand):
+ commands.norepo = norepo
+ return
+
+ cmd = aliases[0]
+ fn = entry[0]
+
+ if cmd and util.safehasattr(fn, 'shell'):
+ d = lambda: fn(ui, *args[1:])
+ return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {})
+
+ commands.norepo = norepo
+
+_loaded = set()
+def _dispatch(req):
+ args = req.args
+ ui = req.ui
+
+ # read --config before doing anything else
+ # (e.g. to change trust settings for reading .hg/hgrc)
+ cfgs = _parseconfig(ui, _earlygetopt(['--config'], args))
+
+ # check for cwd
+ cwd = _earlygetopt(['--cwd'], args)
+ if cwd:
+ os.chdir(cwd[-1])
+
+ rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
+ path, lui = _getlocal(ui, rpath)
+
+ # Now that we're operating in the right directory/repository with
+ # the right config settings, check for shell aliases
+ shellaliasfn = _checkshellalias(lui, ui, args)
+ if shellaliasfn:
+ return shellaliasfn()
+
+ # Configure extensions in phases: uisetup, extsetup, cmdtable, and
+ # reposetup. Programs like TortoiseHg will call _dispatch several
+ # times so we keep track of configured extensions in _loaded.
+ extensions.loadall(lui)
+ exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
+ # Propagate any changes to lui.__class__ by extensions
+ ui.__class__ = lui.__class__
+
+ # (uisetup and extsetup are handled in extensions.loadall)
+
+ for name, module in exts:
+ cmdtable = getattr(module, 'cmdtable', {})
+ overrides = [cmd for cmd in cmdtable if cmd in commands.table]
+ if overrides:
+ ui.warn(_("extension '%s' overrides commands: %s\n")
+ % (name, " ".join(overrides)))
+ commands.table.update(cmdtable)
+ _loaded.add(name)
+
+ # (reposetup is handled in hg.repository)
+
+ addaliases(lui, commands.table)
+
+ # check for fallback encoding
+ fallback = lui.config('ui', 'fallbackencoding')
+ if fallback:
+ encoding.fallbackencoding = fallback
+
+ fullargs = args
+ cmd, func, args, options, cmdoptions = _parse(lui, args)
+
+ if options["config"]:
+ raise util.Abort(_("option --config may not be abbreviated!"))
+ if options["cwd"]:
+ raise util.Abort(_("option --cwd may not be abbreviated!"))
+ if options["repository"]:
+ raise util.Abort(_(
+ "Option -R has to be separated from other options (e.g. not -qR) "
+ "and --repository may only be abbreviated as --repo!"))
+
+ if options["encoding"]:
+ encoding.encoding = options["encoding"]
+ if options["encodingmode"]:
+ encoding.encodingmode = options["encodingmode"]
+ if options["time"]:
+ def get_times():
+ t = os.times()
+ if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
+ t = (t[0], t[1], t[2], t[3], time.clock())
+ return t
+ s = get_times()
+ def print_time():
+ t = get_times()
+ ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
+ (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
+ atexit.register(print_time)
+
+ uis = set([ui, lui])
+
+ if req.repo:
+ uis.add(req.repo.ui)
+
+ # copy configs that were passed on the cmdline (--config) to the repo ui
+ for cfg in cfgs:
+ req.repo.ui.setconfig(*cfg)
+
+ if options['verbose'] or options['debug'] or options['quiet']:
+ for opt in ('verbose', 'debug', 'quiet'):
+ val = str(bool(options[opt]))
+ for ui_ in uis:
+ ui_.setconfig('ui', opt, val)
+
+ if options['traceback']:
+ for ui_ in uis:
+ ui_.setconfig('ui', 'traceback', 'on')
+
+ if options['noninteractive']:
+ for ui_ in uis:
+ ui_.setconfig('ui', 'interactive', 'off')
+
+ if cmdoptions.get('insecure', False):
+ for ui_ in uis:
+ ui_.setconfig('web', 'cacerts', '')
+
+ if options['version']:
+ return commands.version_(ui)
+ if options['help']:
+ return commands.help_(ui, cmd)
+ elif not cmd:
+ return commands.help_(ui, 'shortlist')
+
+ repo = None
+ cmdpats = args[:]
+ if cmd not in commands.norepo.split():
+ # use the repo from the request only if we don't have -R
+ if not rpath and not cwd:
+ repo = req.repo
+
+ if repo:
+ # set the descriptors of the repo ui to those of ui
+ repo.ui.fin = ui.fin
+ repo.ui.fout = ui.fout
+ repo.ui.ferr = ui.ferr
+ else:
+ try:
+ repo = hg.repository(ui, path=path)
+ if not repo.local():
+ raise util.Abort(_("repository '%s' is not local") % path)
+ repo.ui.setconfig("bundle", "mainreporoot", repo.root)
+ except error.RequirementError:
+ raise
+ except error.RepoError:
+ if cmd not in commands.optionalrepo.split():
+ if args and not path: # try to infer -R from command args
+ repos = map(cmdutil.findrepo, args)
+ guess = repos[0]
+ if guess and repos.count(guess) == len(repos):
+ req.args = ['--repository', guess] + fullargs
+ return _dispatch(req)
+ if not path:
+ raise error.RepoError(_("no repository found in '%s'"
+ " (.hg not found)") % os.getcwd())
+ raise
+ if repo:
+ ui = repo.ui
+ args.insert(0, repo)
+ elif rpath:
+ ui.warn(_("warning: --repository ignored\n"))
+
+ msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
+ ui.log("command", msg + "\n")
+ d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
+ try:
+ return runcommand(lui, repo, cmd, fullargs, ui, options, d,
+ cmdpats, cmdoptions)
+ finally:
+ if repo and repo != req.repo:
+ repo.close()
+
+def _runcommand(ui, options, cmd, cmdfunc):
+ def checkargs():
+ try:
+ return cmdfunc()
+ except error.SignatureError:
+ raise error.CommandError(cmd, _("invalid arguments"))
+
+ if options['profile']:
+ format = ui.config('profiling', 'format', default='text')
+
+ if not format in ['text', 'kcachegrind']:
+ ui.warn(_("unrecognized profiling format '%s'"
+ " - Ignored\n") % format)
+ format = 'text'
+
+ output = ui.config('profiling', 'output')
+
+ if output:
+ path = ui.expandpath(output)
+ ostream = open(path, 'wb')
+ else:
+ ostream = sys.stderr
+
+ try:
+ from mercurial import lsprof
+ except ImportError:
+ raise util.Abort(_(
+ 'lsprof not available - install from '
+ 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
+ p = lsprof.Profiler()
+ p.enable(subcalls=True)
+ try:
+ return checkargs()
+ finally:
+ p.disable()
+
+ if format == 'kcachegrind':
+ import lsprofcalltree
+ calltree = lsprofcalltree.KCacheGrind(p)
+ calltree.output(ostream)
+ else:
+ # format == 'text'
+ stats = lsprof.Stats(p.getstats())
+ stats.sort()
+ stats.pprint(top=10, file=ostream, climit=5)
+
+ if output:
+ ostream.close()
+ else:
+ return checkargs()
diff --git a/websdk/mercurial/encoding.py b/websdk/mercurial/encoding.py
index 6af0112..f6197b2 120000..100644
--- a/websdk/mercurial/encoding.py
+++ b/websdk/mercurial/encoding.py
@@ -1 +1,173 @@
-/usr/share/pyshared/mercurial/encoding.py \ No newline at end of file
+# encoding.py - character transcoding support for Mercurial
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import error
+import unicodedata, locale, os
+
+def _getpreferredencoding():
+ '''
+ On darwin, getpreferredencoding ignores the locale environment and
+ always returns mac-roman. http://bugs.python.org/issue6202 fixes this
+ for Python 2.7 and up. This is the same corrected code for earlier
+ Python versions.
+
+ However, we can't use a version check for this method, as some distributions
+ patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman
+ encoding, as it is unlikely that this encoding is the actually expected.
+ '''
+ try:
+ locale.CODESET
+ except AttributeError:
+ # Fall back to parsing environment variables :-(
+ return locale.getdefaultlocale()[1]
+
+ oldloc = locale.setlocale(locale.LC_CTYPE)
+ locale.setlocale(locale.LC_CTYPE, "")
+ result = locale.nl_langinfo(locale.CODESET)
+ locale.setlocale(locale.LC_CTYPE, oldloc)
+
+ return result
+
+_encodingfixers = {
+ '646': lambda: 'ascii',
+ 'ANSI_X3.4-1968': lambda: 'ascii',
+ 'mac-roman': _getpreferredencoding
+}
+
+try:
+ encoding = os.environ.get("HGENCODING")
+ if not encoding:
+ encoding = locale.getpreferredencoding() or 'ascii'
+ encoding = _encodingfixers.get(encoding, lambda: encoding)()
+except locale.Error:
+ encoding = 'ascii'
+encodingmode = os.environ.get("HGENCODINGMODE", "strict")
+fallbackencoding = 'ISO-8859-1'
+
+class localstr(str):
+ '''This class allows strings that are unmodified to be
+ round-tripped to the local encoding and back'''
+ def __new__(cls, u, l):
+ s = str.__new__(cls, l)
+ s._utf8 = u
+ return s
+ def __hash__(self):
+ return hash(self._utf8) # avoid collisions in local string space
+
+def tolocal(s):
+ """
+ Convert a string from internal UTF-8 to local encoding
+
+ All internal strings should be UTF-8 but some repos before the
+ implementation of locale support may contain latin1 or possibly
+ other character sets. We attempt to decode everything strictly
+ using UTF-8, then Latin-1, and failing that, we use UTF-8 and
+ replace unknown characters.
+
+ The localstr class is used to cache the known UTF-8 encoding of
+ strings next to their local representation to allow lossless
+ round-trip conversion back to UTF-8.
+
+ >>> u = 'foo: \\xc3\\xa4' # utf-8
+ >>> l = tolocal(u)
+ >>> l
+ 'foo: ?'
+ >>> fromlocal(l)
+ 'foo: \\xc3\\xa4'
+ >>> u2 = 'foo: \\xc3\\xa1'
+ >>> d = { l: 1, tolocal(u2): 2 }
+ >>> d # no collision
+ {'foo: ?': 1, 'foo: ?': 2}
+ >>> 'foo: ?' in d
+ False
+ >>> l1 = 'foo: \\xe4' # historical latin1 fallback
+ >>> l = tolocal(l1)
+ >>> l
+ 'foo: ?'
+ >>> fromlocal(l) # magically in utf-8
+ 'foo: \\xc3\\xa4'
+ """
+
+ for e in ('UTF-8', fallbackencoding):
+ try:
+ u = s.decode(e) # attempt strict decoding
+ r = u.encode(encoding, "replace")
+ if u == r.decode(encoding):
+ # r is a safe, non-lossy encoding of s
+ return r
+ elif e == 'UTF-8':
+ return localstr(s, r)
+ else:
+ return localstr(u.encode('UTF-8'), r)
+
+ except LookupError, k:
+ raise error.Abort("%s, please check your locale settings" % k)
+ except UnicodeDecodeError:
+ pass
+ u = s.decode("utf-8", "replace") # last ditch
+ return u.encode(encoding, "replace") # can't round-trip
+
+def fromlocal(s):
+ """
+ Convert a string from the local character encoding to UTF-8
+
+ We attempt to decode strings using the encoding mode set by
+ HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown
+ characters will cause an error message. Other modes include
+ 'replace', which replaces unknown characters with a special
+ Unicode character, and 'ignore', which drops the character.
+ """
+
+ # can we do a lossless round-trip?
+ if isinstance(s, localstr):
+ return s._utf8
+
+ try:
+ return s.decode(encoding, encodingmode).encode("utf-8")
+ except UnicodeDecodeError, inst:
+ sub = s[max(0, inst.start - 10):inst.start + 10]
+ raise error.Abort("decoding near '%s': %s!" % (sub, inst))
+ except LookupError, k:
+ raise error.Abort("%s, please check your locale settings" % k)
+
+# How to treat ambiguous-width characters. Set to 'wide' to treat as wide.
+wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide"
+ and "WFA" or "WF")
+
+def colwidth(s):
+ "Find the column width of a string for display in the local encoding"
+ return ucolwidth(s.decode(encoding, 'replace'))
+
+def ucolwidth(d):
+ "Find the column width of a Unicode string for display"
+ eaw = getattr(unicodedata, 'east_asian_width', None)
+ if eaw is not None:
+ return sum([eaw(c) in wide and 2 or 1 for c in d])
+ return len(d)
+
+def getcols(s, start, c):
+ '''Use colwidth to find a c-column substring of s starting at byte
+ index start'''
+ for x in xrange(start + c, len(s)):
+ t = s[start:x]
+ if colwidth(t) == c:
+ return t
+
+def lower(s):
+ "best-effort encoding-aware case-folding of local string s"
+ try:
+ if isinstance(s, localstr):
+ u = s._utf8.decode("utf-8")
+ else:
+ u = s.decode(encoding, encodingmode)
+
+ lu = u.lower()
+ if u == lu:
+ return s # preserve localstring
+ return lu.encode(encoding)
+ except UnicodeError:
+ return s.lower() # we don't know how to fold this except in ASCII
diff --git a/websdk/mercurial/error.py b/websdk/mercurial/error.py
index 06ca817..462b971 120000..100644
--- a/websdk/mercurial/error.py
+++ b/websdk/mercurial/error.py
@@ -1 +1,90 @@
-/usr/share/pyshared/mercurial/error.py \ No newline at end of file
+# error.py - Mercurial exceptions
+#
+# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Mercurial exceptions.
+
+This allows us to catch exceptions at higher levels without forcing
+imports.
+"""
+
+# Do not import anything here, please
+
+class RevlogError(Exception):
+ pass
+
+class LookupError(RevlogError, KeyError):
+ def __init__(self, name, index, message):
+ self.name = name
+ if isinstance(name, str) and len(name) == 20:
+ from node import short
+ name = short(name)
+ RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
+
+ def __str__(self):
+ return RevlogError.__str__(self)
+
+class CommandError(Exception):
+ """Exception raised on errors in parsing the command line."""
+
+class Abort(Exception):
+ """Raised if a command needs to print an error and exit."""
+ def __init__(self, *args, **kw):
+ Exception.__init__(self, *args)
+ self.hint = kw.get('hint')
+
+class ConfigError(Abort):
+ 'Exception raised when parsing config files'
+
+class OutOfBandError(Exception):
+ 'Exception raised when a remote repo reports failure'
+
+class ParseError(Exception):
+ 'Exception raised when parsing config files (msg[, pos])'
+
+class RepoError(Exception):
+ def __init__(self, *args, **kw):
+ Exception.__init__(self, *args)
+ self.hint = kw.get('hint')
+
+class RepoLookupError(RepoError):
+ pass
+
+class CapabilityError(RepoError):
+ pass
+
+class RequirementError(RepoError):
+ """Exception raised if .hg/requires has an unknown entry."""
+ pass
+
+class LockError(IOError):
+ def __init__(self, errno, strerror, filename, desc):
+ IOError.__init__(self, errno, strerror, filename)
+ self.desc = desc
+
+class LockHeld(LockError):
+ def __init__(self, errno, filename, desc, locker):
+ LockError.__init__(self, errno, 'Lock held', filename, desc)
+ self.locker = locker
+
+class LockUnavailable(LockError):
+ pass
+
+class ResponseError(Exception):
+ """Raised to print an error with part of output and exit."""
+
+class UnknownCommand(Exception):
+ """Exception raised if command is not in the command table."""
+
+class AmbiguousCommand(Exception):
+ """Exception raised if command shortcut matches more than one command."""
+
+# derived from KeyboardInterrupt to simplify some breakout code
+class SignalInterrupt(KeyboardInterrupt):
+ """Exception raised on SIGTERM and SIGHUP."""
+
+class SignatureError(Exception):
+ pass
diff --git a/websdk/mercurial/extensions.py b/websdk/mercurial/extensions.py
index 85006cf..9cfb49d 120000..100644
--- a/websdk/mercurial/extensions.py
+++ b/websdk/mercurial/extensions.py
@@ -1 +1,355 @@
-/usr/share/pyshared/mercurial/extensions.py \ No newline at end of file
+# extensions.py - extension handling for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import imp, os
+import util, cmdutil, error
+from i18n import _, gettext
+
+_extensions = {}
+_order = []
+_ignore = ['hbisect', 'bookmarks', 'parentrevspec']
+
+def extensions():
+ for name in _order:
+ module = _extensions[name]
+ if module:
+ yield name, module
+
+def find(name):
+ '''return module with given extension name'''
+ mod = None
+ try:
+ mod = _extensions[name]
+ except KeyError:
+ for k, v in _extensions.iteritems():
+ if k.endswith('.' + name) or k.endswith('/' + name):
+ mod = v
+ break
+ if not mod:
+ raise KeyError(name)
+ return mod
+
+def loadpath(path, module_name):
+ module_name = module_name.replace('.', '_')
+ path = util.expandpath(path)
+ if os.path.isdir(path):
+ # module/__init__.py style
+ d, f = os.path.split(path.rstrip('/'))
+ fd, fpath, desc = imp.find_module(f, [d])
+ return imp.load_module(module_name, fd, fpath, desc)
+ else:
+ return imp.load_source(module_name, path)
+
+def load(ui, name, path):
+ # unused ui argument kept for backwards compatibility
+ if name.startswith('hgext.') or name.startswith('hgext/'):
+ shortname = name[6:]
+ else:
+ shortname = name
+ if shortname in _ignore:
+ return None
+ if shortname in _extensions:
+ return _extensions[shortname]
+ _extensions[shortname] = None
+ if path:
+ # the module will be loaded in sys.modules
+ # choose an unique name so that it doesn't
+ # conflicts with other modules
+ mod = loadpath(path, 'hgext.%s' % name)
+ else:
+ def importh(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+ try:
+ mod = importh("hgext.%s" % name)
+ except ImportError, err:
+ ui.debug('could not import hgext.%s (%s): trying %s\n'
+ % (name, err, name))
+ mod = importh(name)
+ _extensions[shortname] = mod
+ _order.append(shortname)
+ return mod
+
+def loadall(ui):
+ result = ui.configitems("extensions")
+ newindex = len(_order)
+ for (name, path) in result:
+ if path:
+ if path[0] == '!':
+ continue
+ try:
+ load(ui, name, path)
+ except KeyboardInterrupt:
+ raise
+ except Exception, inst:
+ if path:
+ ui.warn(_("*** failed to import extension %s from %s: %s\n")
+ % (name, path, inst))
+ else:
+ ui.warn(_("*** failed to import extension %s: %s\n")
+ % (name, inst))
+ if ui.traceback():
+ return 1
+
+ for name in _order[newindex:]:
+ uisetup = getattr(_extensions[name], 'uisetup', None)
+ if uisetup:
+ uisetup(ui)
+
+ for name in _order[newindex:]:
+ extsetup = getattr(_extensions[name], 'extsetup', None)
+ if extsetup:
+ try:
+ extsetup(ui)
+ except TypeError:
+ if extsetup.func_code.co_argcount != 0:
+ raise
+ extsetup() # old extsetup with no ui argument
+
+def wrapcommand(table, command, wrapper):
+ '''Wrap the command named `command' in table
+
+ Replace command in the command table with wrapper. The wrapped command will
+ be inserted into the command table specified by the table argument.
+
+ The wrapper will be called like
+
+ wrapper(orig, *args, **kwargs)
+
+ where orig is the original (wrapped) function, and *args, **kwargs
+ are the arguments passed to it.
+ '''
+ assert util.safehasattr(wrapper, '__call__')
+ aliases, entry = cmdutil.findcmd(command, table)
+ for alias, e in table.iteritems():
+ if e is entry:
+ key = alias
+ break
+
+ origfn = entry[0]
+ def wrap(*args, **kwargs):
+ return util.checksignature(wrapper)(
+ util.checksignature(origfn), *args, **kwargs)
+
+ wrap.__doc__ = getattr(origfn, '__doc__')
+ wrap.__module__ = getattr(origfn, '__module__')
+
+ newentry = list(entry)
+ newentry[0] = wrap
+ table[key] = tuple(newentry)
+ return entry
+
+def wrapfunction(container, funcname, wrapper):
+ '''Wrap the function named funcname in container
+
+ Replace the funcname member in the given container with the specified
+ wrapper. The container is typically a module, class, or instance.
+
+ The wrapper will be called like
+
+ wrapper(orig, *args, **kwargs)
+
+ where orig is the original (wrapped) function, and *args, **kwargs
+ are the arguments passed to it.
+
+ Wrapping methods of the repository object is not recommended since
+ it conflicts with extensions that extend the repository by
+ subclassing. All extensions that need to extend methods of
+ localrepository should use this subclassing trick: namely,
+ reposetup() should look like
+
+ def reposetup(ui, repo):
+ class myrepo(repo.__class__):
+ def whatever(self, *args, **kwargs):
+ [...extension stuff...]
+ super(myrepo, self).whatever(*args, **kwargs)
+ [...extension stuff...]
+
+ repo.__class__ = myrepo
+
+ In general, combining wrapfunction() with subclassing does not
+ work. Since you cannot control what other extensions are loaded by
+ your end users, you should play nicely with others by using the
+ subclass trick.
+ '''
+ assert util.safehasattr(wrapper, '__call__')
+ def wrap(*args, **kwargs):
+ return wrapper(origfn, *args, **kwargs)
+
+ origfn = getattr(container, funcname)
+ assert util.safehasattr(origfn, '__call__')
+ setattr(container, funcname, wrap)
+ return origfn
+
+def _disabledpaths(strip_init=False):
+ '''find paths of disabled extensions. returns a dict of {name: path}
+ removes /__init__.py from packages if strip_init is True'''
+ import hgext
+ extpath = os.path.dirname(os.path.abspath(hgext.__file__))
+ try: # might not be a filesystem path
+ files = os.listdir(extpath)
+ except OSError:
+ return {}
+
+ exts = {}
+ for e in files:
+ if e.endswith('.py'):
+ name = e.rsplit('.', 1)[0]
+ path = os.path.join(extpath, e)
+ else:
+ name = e
+ path = os.path.join(extpath, e, '__init__.py')
+ if not os.path.exists(path):
+ continue
+ if strip_init:
+ path = os.path.dirname(path)
+ if name in exts or name in _order or name == '__init__':
+ continue
+ exts[name] = path
+ return exts
+
+def _moduledoc(file):
+ '''return the top-level python documentation for the given file
+
+ Loosely inspired by pydoc.source_synopsis(), but rewritten to
+ handle triple quotes and to return the whole text instead of just
+ the synopsis'''
+ result = []
+
+ line = file.readline()
+ while line[:1] == '#' or not line.strip():
+ line = file.readline()
+ if not line:
+ break
+
+ start = line[:3]
+ if start == '"""' or start == "'''":
+ line = line[3:]
+ while line:
+ if line.rstrip().endswith(start):
+ line = line.split(start)[0]
+ if line:
+ result.append(line)
+ break
+ elif not line:
+ return None # unmatched delimiter
+ result.append(line)
+ line = file.readline()
+ else:
+ return None
+
+ return ''.join(result)
+
+def _disabledhelp(path):
+ '''retrieve help synopsis of a disabled extension (without importing)'''
+ try:
+ file = open(path)
+ except IOError:
+ return
+ else:
+ doc = _moduledoc(file)
+ file.close()
+
+ if doc: # extracting localized synopsis
+ return gettext(doc).splitlines()[0]
+ else:
+ return _('(no help text available)')
+
+def disabled():
+ '''find disabled extensions from hgext. returns a dict of {name: desc}'''
+ try:
+ from hgext import __index__
+ return dict((name, gettext(desc))
+ for name, desc in __index__.docs.iteritems()
+ if name not in _order)
+ except ImportError:
+ pass
+
+ paths = _disabledpaths()
+ if not paths:
+ return None
+
+ exts = {}
+ for name, path in paths.iteritems():
+ doc = _disabledhelp(path)
+ if doc:
+ exts[name] = doc
+
+ return exts
+
+def disabledext(name):
+ '''find a specific disabled extension from hgext. returns desc'''
+ try:
+ from hgext import __index__
+ if name in _order: # enabled
+ return
+ else:
+ return gettext(__index__.docs.get(name))
+ except ImportError:
+ pass
+
+ paths = _disabledpaths()
+ if name in paths:
+ return _disabledhelp(paths[name])
+
+def disabledcmd(ui, cmd, strict=False):
+ '''import disabled extensions until cmd is found.
+ returns (cmdname, extname, doc)'''
+
+ paths = _disabledpaths(strip_init=True)
+ if not paths:
+ raise error.UnknownCommand(cmd)
+
+ def findcmd(cmd, name, path):
+ try:
+ mod = loadpath(path, 'hgext.%s' % name)
+ except Exception:
+ return
+ try:
+ aliases, entry = cmdutil.findcmd(cmd,
+ getattr(mod, 'cmdtable', {}), strict)
+ except (error.AmbiguousCommand, error.UnknownCommand):
+ return
+ except Exception:
+ ui.warn(_('warning: error finding commands in %s\n') % path)
+ ui.traceback()
+ return
+ for c in aliases:
+ if c.startswith(cmd):
+ cmd = c
+ break
+ else:
+ cmd = aliases[0]
+ return (cmd, name, mod)
+
+ # first, search for an extension with the same name as the command
+ path = paths.pop(cmd, None)
+ if path:
+ ext = findcmd(cmd, cmd, path)
+ if ext:
+ return ext
+
+ # otherwise, interrogate each extension until there's a match
+ for name, path in paths.iteritems():
+ ext = findcmd(cmd, name, path)
+ if ext:
+ return ext
+
+ raise error.UnknownCommand(cmd)
+
+def enabled():
+ '''return a dict of {name: desc} of extensions'''
+ exts = {}
+ for ename, ext in extensions():
+ doc = (gettext(ext.__doc__) or _('(no help text available)'))
+ ename = ename.split('.')[-1]
+ exts[ename] = doc.splitlines()[0].strip()
+
+ return exts
diff --git a/websdk/mercurial/fancyopts.py b/websdk/mercurial/fancyopts.py
index bce1d13..ae18083 120000..100644
--- a/websdk/mercurial/fancyopts.py
+++ b/websdk/mercurial/fancyopts.py
@@ -1 +1,117 @@
-/usr/share/pyshared/mercurial/fancyopts.py \ No newline at end of file
+# fancyopts.py - better command line parsing
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import getopt
+
+def gnugetopt(args, options, longoptions):
+ """Parse options mostly like getopt.gnu_getopt.
+
+ This is different from getopt.gnu_getopt in that an argument of - will
+ become an argument of - instead of vanishing completely.
+ """
+ extraargs = []
+ if '--' in args:
+ stopindex = args.index('--')
+ extraargs = args[stopindex + 1:]
+ args = args[:stopindex]
+ opts, parseargs = getopt.getopt(args, options, longoptions)
+ args = []
+ while parseargs:
+ arg = parseargs.pop(0)
+ if arg and arg[0] == '-' and len(arg) > 1:
+ parseargs.insert(0, arg)
+ topts, newparseargs = getopt.getopt(parseargs, options, longoptions)
+ opts = opts + topts
+ parseargs = newparseargs
+ else:
+ args.append(arg)
+ args.extend(extraargs)
+ return opts, args
+
+
+def fancyopts(args, options, state, gnu=False):
+ """
+ read args, parse options, and store options in state
+
+ each option is a tuple of:
+
+ short option or ''
+ long option
+ default value
+ description
+ option value label(optional)
+
+ option types include:
+
+ boolean or none - option sets variable in state to true
+ string - parameter string is stored in state
+ list - parameter string is added to a list
+ integer - parameter strings is stored as int
+ function - call function with parameter
+
+ non-option args are returned
+ """
+ namelist = []
+ shortlist = ''
+ argmap = {}
+ defmap = {}
+
+ for option in options:
+ if len(option) == 5:
+ short, name, default, comment, dummy = option
+ else:
+ short, name, default, comment = option
+ # convert opts to getopt format
+ oname = name
+ name = name.replace('-', '_')
+
+ argmap['-' + short] = argmap['--' + oname] = name
+ defmap[name] = default
+
+ # copy defaults to state
+ if isinstance(default, list):
+ state[name] = default[:]
+ elif getattr(default, '__call__', False):
+ state[name] = None
+ else:
+ state[name] = default
+
+ # does it take a parameter?
+ if not (default is None or default is True or default is False):
+ if short:
+ short += ':'
+ if oname:
+ oname += '='
+ if short:
+ shortlist += short
+ if name:
+ namelist.append(oname)
+
+ # parse arguments
+ if gnu:
+ parse = gnugetopt
+ else:
+ parse = getopt.getopt
+ opts, args = parse(args, shortlist, namelist)
+
+ # transfer result to state
+ for opt, val in opts:
+ name = argmap[opt]
+ t = type(defmap[name])
+ if t is type(fancyopts):
+ state[name] = defmap[name](val)
+ elif t is type(1):
+ state[name] = int(val)
+ elif t is type(''):
+ state[name] = val
+ elif t is type([]):
+ state[name].append(val)
+ elif t is type(None) or t is type(False):
+ state[name] = True
+
+ # return unparsed args
+ return args
diff --git a/websdk/mercurial/filelog.py b/websdk/mercurial/filelog.py
index 302d041..33f3d0c 120000..100644
--- a/websdk/mercurial/filelog.py
+++ b/websdk/mercurial/filelog.py
@@ -1 +1,92 @@
-/usr/share/pyshared/mercurial/filelog.py \ No newline at end of file
+# filelog.py - file history class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import revlog
+import re
+
+_mdre = re.compile('\1\n')
+def _parsemeta(text):
+ """return (metadatadict, keylist, metadatasize)"""
+ # text can be buffer, so we can't use .startswith or .index
+ if text[:2] != '\1\n':
+ return None, None, None
+ s = _mdre.search(text, 2).start()
+ mtext = text[2:s]
+ meta = {}
+ keys = []
+ for l in mtext.splitlines():
+ k, v = l.split(": ", 1)
+ meta[k] = v
+ keys.append(k)
+ return meta, keys, (s + 2)
+
+def _packmeta(meta, keys=None):
+ if not keys:
+ keys = sorted(meta.iterkeys())
+ return "".join("%s: %s\n" % (k, meta[k]) for k in keys)
+
+class filelog(revlog.revlog):
+ def __init__(self, opener, path):
+ revlog.revlog.__init__(self, opener,
+ "/".join(("data", path + ".i")))
+
+ def read(self, node):
+ t = self.revision(node)
+ if not t.startswith('\1\n'):
+ return t
+ s = t.index('\1\n', 2)
+ return t[s + 2:]
+
+ def add(self, text, meta, transaction, link, p1=None, p2=None):
+ if meta or text.startswith('\1\n'):
+ text = "\1\n%s\1\n%s" % (_packmeta(meta), text)
+ return self.addrevision(text, transaction, link, p1, p2)
+
+ def renamed(self, node):
+ if self.parents(node)[0] != revlog.nullid:
+ return False
+ t = self.revision(node)
+ m = _parsemeta(t)[0]
+ if m and "copy" in m:
+ return (m["copy"], revlog.bin(m["copyrev"]))
+ return False
+
+ def size(self, rev):
+ """return the size of a given revision"""
+
+ # for revisions with renames, we have to go the slow way
+ node = self.node(rev)
+ if self.renamed(node):
+ return len(self.read(node))
+
+ # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
+ return revlog.revlog.size(self, rev)
+
+ def cmp(self, node, text):
+ """compare text with a given file revision
+
+ returns True if text is different than what is stored.
+ """
+
+ t = text
+ if text.startswith('\1\n'):
+ t = '\1\n\1\n' + text
+
+ samehashes = not revlog.revlog.cmp(self, node, t)
+ if samehashes:
+ return False
+
+ # renaming a file produces a different hash, even if the data
+ # remains unchanged. Check if it's the case (slow):
+ if self.renamed(node):
+ t2 = self.read(node)
+ return t2 != text
+
+ return True
+
+ def _file(self, f):
+ return filelog(self.opener, f)
diff --git a/websdk/mercurial/filemerge.py b/websdk/mercurial/filemerge.py
index 9179d38..bff2158 120000..100644
--- a/websdk/mercurial/filemerge.py
+++ b/websdk/mercurial/filemerge.py
@@ -1 +1,271 @@
-/usr/share/pyshared/mercurial/filemerge.py \ No newline at end of file
+# filemerge.py - file-level merge handling for Mercurial
+#
+# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import short
+from i18n import _
+import util, simplemerge, match, error
+import os, tempfile, re, filecmp
+
+def _toolstr(ui, tool, part, default=""):
+ return ui.config("merge-tools", tool + "." + part, default)
+
+def _toolbool(ui, tool, part, default=False):
+ return ui.configbool("merge-tools", tool + "." + part, default)
+
+def _toollist(ui, tool, part, default=[]):
+ return ui.configlist("merge-tools", tool + "." + part, default)
+
+_internal = ['internal:' + s
+ for s in 'fail local other merge prompt dump'.split()]
+
+def _findtool(ui, tool):
+ if tool in _internal:
+ return tool
+ for kn in ("regkey", "regkeyalt"):
+ k = _toolstr(ui, tool, kn)
+ if not k:
+ continue
+ p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
+ if p:
+ p = util.findexe(p + _toolstr(ui, tool, "regappend"))
+ if p:
+ return p
+ exe = _toolstr(ui, tool, "executable", tool)
+ return util.findexe(util.expandpath(exe))
+
+def _picktool(repo, ui, path, binary, symlink):
+ def check(tool, pat, symlink, binary):
+ tmsg = tool
+ if pat:
+ tmsg += " specified for " + pat
+ if not _findtool(ui, tool):
+ if pat: # explicitly requested tool deserves a warning
+ ui.warn(_("couldn't find merge tool %s\n") % tmsg)
+ else: # configured but non-existing tools are more silent
+ ui.note(_("couldn't find merge tool %s\n") % tmsg)
+ elif symlink and not _toolbool(ui, tool, "symlink"):
+ ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
+ elif binary and not _toolbool(ui, tool, "binary"):
+ ui.warn(_("tool %s can't handle binary\n") % tmsg)
+ elif not util.gui() and _toolbool(ui, tool, "gui"):
+ ui.warn(_("tool %s requires a GUI\n") % tmsg)
+ else:
+ return True
+ return False
+
+ # forcemerge comes from command line arguments, highest priority
+ force = ui.config('ui', 'forcemerge')
+ if force:
+ toolpath = _findtool(ui, force)
+ if toolpath:
+ return (force, '"' + toolpath + '"')
+ else:
+ # mimic HGMERGE if given tool not found
+ return (force, force)
+
+ # HGMERGE takes next precedence
+ hgmerge = os.environ.get("HGMERGE")
+ if hgmerge:
+ return (hgmerge, hgmerge)
+
+ # then patterns
+ for pat, tool in ui.configitems("merge-patterns"):
+ mf = match.match(repo.root, '', [pat])
+ if mf(path) and check(tool, pat, symlink, False):
+ toolpath = _findtool(ui, tool)
+ return (tool, '"' + toolpath + '"')
+
+ # then merge tools
+ tools = {}
+ for k, v in ui.configitems("merge-tools"):
+ t = k.split('.')[0]
+ if t not in tools:
+ tools[t] = int(_toolstr(ui, t, "priority", "0"))
+ names = tools.keys()
+ tools = sorted([(-p, t) for t, p in tools.items()])
+ uimerge = ui.config("ui", "merge")
+ if uimerge:
+ if uimerge not in names:
+ return (uimerge, uimerge)
+ tools.insert(0, (None, uimerge)) # highest priority
+ tools.append((None, "hgmerge")) # the old default, if found
+ for p, t in tools:
+ if check(t, None, symlink, binary):
+ toolpath = _findtool(ui, t)
+ return (t, '"' + toolpath + '"')
+ # internal merge as last resort
+ return (not (symlink or binary) and "internal:merge" or None, None)
+
+def _eoltype(data):
+ "Guess the EOL type of a file"
+ if '\0' in data: # binary
+ return None
+ if '\r\n' in data: # Windows
+ return '\r\n'
+ if '\r' in data: # Old Mac
+ return '\r'
+ if '\n' in data: # UNIX
+ return '\n'
+ return None # unknown
+
+def _matcheol(file, origfile):
+ "Convert EOL markers in a file to match origfile"
+ tostyle = _eoltype(util.readfile(origfile))
+ if tostyle:
+ data = util.readfile(file)
+ style = _eoltype(data)
+ if style:
+ newdata = data.replace(style, tostyle)
+ if newdata != data:
+ util.writefile(file, newdata)
+
+def filemerge(repo, mynode, orig, fcd, fco, fca):
+ """perform a 3-way merge in the working directory
+
+ mynode = parent node before merge
+ orig = original local filename before merge
+ fco = other file context
+ fca = ancestor file context
+ fcd = local file context for current/destination file
+ """
+
+ def temp(prefix, ctx):
+ pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
+ (fd, name) = tempfile.mkstemp(prefix=pre)
+ data = repo.wwritedata(ctx.path(), ctx.data())
+ f = os.fdopen(fd, "wb")
+ f.write(data)
+ f.close()
+ return name
+
+ def isbin(ctx):
+ try:
+ return util.binary(ctx.data())
+ except IOError:
+ return False
+
+ if not fco.cmp(fcd): # files identical?
+ return None
+
+ ui = repo.ui
+ fd = fcd.path()
+ binary = isbin(fcd) or isbin(fco) or isbin(fca)
+ symlink = 'l' in fcd.flags() + fco.flags()
+ tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
+ ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
+ (tool, fd, binary, symlink))
+
+ if not tool or tool == 'internal:prompt':
+ tool = "internal:local"
+ if ui.promptchoice(_(" no tool found to merge %s\n"
+ "keep (l)ocal or take (o)ther?") % fd,
+ (_("&Local"), _("&Other")), 0):
+ tool = "internal:other"
+ if tool == "internal:local":
+ return 0
+ if tool == "internal:other":
+ repo.wwrite(fd, fco.data(), fco.flags())
+ return 0
+ if tool == "internal:fail":
+ return 1
+
+ # do the actual merge
+ a = repo.wjoin(fd)
+ b = temp("base", fca)
+ c = temp("other", fco)
+ out = ""
+ back = a + ".orig"
+ util.copyfile(a, back)
+
+ if orig != fco.path():
+ ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
+ else:
+ ui.status(_("merging %s\n") % fd)
+
+ ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
+
+ # do we attempt to simplemerge first?
+ try:
+ premerge = _toolbool(ui, tool, "premerge", not (binary or symlink))
+ except error.ConfigError:
+ premerge = _toolstr(ui, tool, "premerge").lower()
+ valid = 'keep'.split()
+ if premerge not in valid:
+ _valid = ', '.join(["'" + v + "'" for v in valid])
+ raise error.ConfigError(_("%s.premerge not valid "
+ "('%s' is neither boolean nor %s)") %
+ (tool, premerge, _valid))
+
+ if premerge:
+ r = simplemerge.simplemerge(ui, a, b, c, quiet=True)
+ if not r:
+ ui.debug(" premerge successful\n")
+ os.unlink(back)
+ os.unlink(b)
+ os.unlink(c)
+ return 0
+ if premerge != 'keep':
+ util.copyfile(back, a) # restore from backup and try again
+
+ env = dict(HG_FILE=fd,
+ HG_MY_NODE=short(mynode),
+ HG_OTHER_NODE=str(fco.changectx()),
+ HG_BASE_NODE=str(fca.changectx()),
+ HG_MY_ISLINK='l' in fcd.flags(),
+ HG_OTHER_ISLINK='l' in fco.flags(),
+ HG_BASE_ISLINK='l' in fca.flags())
+
+ if tool == "internal:merge":
+ r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other'])
+ elif tool == 'internal:dump':
+ a = repo.wjoin(fd)
+ util.copyfile(a, a + ".local")
+ repo.wwrite(fd + ".other", fco.data(), fco.flags())
+ repo.wwrite(fd + ".base", fca.data(), fca.flags())
+ return 1 # unresolved
+ else:
+ args = _toolstr(ui, tool, "args", '$local $base $other')
+ if "$output" in args:
+ out, a = a, back # read input from backup, write to original
+ replace = dict(local=a, base=b, other=c, output=out)
+ args = util.interpolate(r'\$', replace, args,
+ lambda s: '"%s"' % util.localpath(s))
+ r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env,
+ out=ui.fout)
+
+ if not r and (_toolbool(ui, tool, "checkconflicts") or
+ 'conflicts' in _toollist(ui, tool, "check")):
+ if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
+ re.MULTILINE):
+ r = 1
+
+ checked = False
+ if 'prompt' in _toollist(ui, tool, "check"):
+ checked = True
+ if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd,
+ (_("&Yes"), _("&No")), 1):
+ r = 1
+
+ if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
+ 'changed' in _toollist(ui, tool, "check")):
+ if filecmp.cmp(repo.wjoin(fd), back):
+ if ui.promptchoice(_(" output file %s appears unchanged\n"
+ "was merge successful (yn)?") % fd,
+ (_("&Yes"), _("&No")), 1):
+ r = 1
+
+ if _toolbool(ui, tool, "fixeol"):
+ _matcheol(repo.wjoin(fd), back)
+
+ if r:
+ ui.warn(_("merging %s failed!\n") % fd)
+ else:
+ os.unlink(back)
+
+ os.unlink(b)
+ os.unlink(c)
+ return r
diff --git a/websdk/mercurial/fileset.py b/websdk/mercurial/fileset.py
new file mode 100644
index 0000000..95a2c17
--- /dev/null
+++ b/websdk/mercurial/fileset.py
@@ -0,0 +1,440 @@
+# fileset.py - file set queries for mercurial
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import parser, error, util, merge, re
+from i18n import _
+
+elements = {
+ "(": (20, ("group", 1, ")"), ("func", 1, ")")),
+ "-": (5, ("negate", 19), ("minus", 5)),
+ "not": (10, ("not", 10)),
+ "!": (10, ("not", 10)),
+ "and": (5, None, ("and", 5)),
+ "&": (5, None, ("and", 5)),
+ "or": (4, None, ("or", 4)),
+ "|": (4, None, ("or", 4)),
+ "+": (4, None, ("or", 4)),
+ ",": (2, None, ("list", 2)),
+ ")": (0, None, None),
+ "symbol": (0, ("symbol",), None),
+ "string": (0, ("string",), None),
+ "end": (0, None, None),
+}
+
+keywords = set(['and', 'or', 'not'])
+
+globchars = ".*{}[]?/\\"
+
+def tokenize(program):
+ pos, l = 0, len(program)
+ while pos < l:
+ c = program[pos]
+ if c.isspace(): # skip inter-token whitespace
+ pass
+ elif c in "(),-|&+!": # handle simple operators
+ yield (c, None, pos)
+ elif (c in '"\'' or c == 'r' and
+ program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+ if c == 'r':
+ pos += 1
+ c = program[pos]
+ decode = lambda x: x
+ else:
+ decode = lambda x: x.decode('string-escape')
+ pos += 1
+ s = pos
+ while pos < l: # find closing quote
+ d = program[pos]
+ if d == '\\': # skip over escaped characters
+ pos += 2
+ continue
+ if d == c:
+ yield ('string', decode(program[s:pos]), s)
+ break
+ pos += 1
+ else:
+ raise error.ParseError(_("unterminated string"), s)
+ elif c.isalnum() or c in globchars or ord(c) > 127:
+ # gather up a symbol/keyword
+ s = pos
+ pos += 1
+ while pos < l: # find end of symbol
+ d = program[pos]
+ if not (d.isalnum() or d in globchars or ord(d) > 127):
+ break
+ pos += 1
+ sym = program[s:pos]
+ if sym in keywords: # operator keywords
+ yield (sym, None, s)
+ else:
+ yield ('symbol', sym, s)
+ pos -= 1
+ else:
+ raise error.ParseError(_("syntax error"), pos)
+ pos += 1
+ yield ('end', None, pos)
+
+parse = parser.parser(tokenize, elements).parse
+
+def getstring(x, err):
+ if x and (x[0] == 'string' or x[0] == 'symbol'):
+ return x[1]
+ raise error.ParseError(err)
+
+def getset(mctx, x):
+ if not x:
+ raise error.ParseError(_("missing argument"))
+ return methods[x[0]](mctx, *x[1:])
+
+def stringset(mctx, x):
+ m = mctx.matcher([x])
+ return [f for f in mctx.subset if m(f)]
+
+def andset(mctx, x, y):
+ return getset(mctx.narrow(getset(mctx, x)), y)
+
+def orset(mctx, x, y):
+ # needs optimizing
+ xl = getset(mctx, x)
+ yl = getset(mctx, y)
+ return xl + [f for f in yl if f not in xl]
+
+def notset(mctx, x):
+ s = set(getset(mctx, x))
+ return [r for r in mctx.subset if r not in s]
+
+def listset(mctx, a, b):
+ raise error.ParseError(_("can't use a list in this context"))
+
+def modified(mctx, x):
+ """``modified()``
+ File that is modified according to status.
+ """
+ # i18n: "modified" is a keyword
+ getargs(x, 0, 0, _("modified takes no arguments"))
+ s = mctx.status()[0]
+ return [f for f in mctx.subset if f in s]
+
+def added(mctx, x):
+ """``added()``
+ File that is added according to status.
+ """
+ # i18n: "added" is a keyword
+ getargs(x, 0, 0, _("added takes no arguments"))
+ s = mctx.status()[1]
+ return [f for f in mctx.subset if f in s]
+
+def removed(mctx, x):
+ """``removed()``
+ File that is removed according to status.
+ """
+ # i18n: "removed" is a keyword
+ getargs(x, 0, 0, _("removed takes no arguments"))
+ s = mctx.status()[2]
+ return [f for f in mctx.subset if f in s]
+
+def deleted(mctx, x):
+ """``deleted()``
+ File that is deleted according to status.
+ """
+ # i18n: "deleted" is a keyword
+ getargs(x, 0, 0, _("deleted takes no arguments"))
+ s = mctx.status()[3]
+ return [f for f in mctx.subset if f in s]
+
+def unknown(mctx, x):
+ """``unknown()``
+ File that is unknown according to status. These files will only be
+ considered if this predicate is used.
+ """
+ # i18n: "unknown" is a keyword
+ getargs(x, 0, 0, _("unknown takes no arguments"))
+ s = mctx.status()[4]
+ return [f for f in mctx.subset if f in s]
+
+def ignored(mctx, x):
+ """``ignored()``
+ File that is ignored according to status. These files will only be
+ considered if this predicate is used.
+ """
+ # i18n: "ignored" is a keyword
+ getargs(x, 0, 0, _("ignored takes no arguments"))
+ s = mctx.status()[5]
+ return [f for f in mctx.subset if f in s]
+
+def clean(mctx, x):
+ """``clean()``
+ File that is clean according to status.
+ """
+ # i18n: "clean" is a keyword
+ getargs(x, 0, 0, _("clean takes no arguments"))
+ s = mctx.status()[6]
+ return [f for f in mctx.subset if f in s]
+
+def func(mctx, a, b):
+ if a[0] == 'symbol' and a[1] in symbols:
+ return symbols[a[1]](mctx, b)
+ raise error.ParseError(_("not a function: %s") % a[1])
+
+def getlist(x):
+ if not x:
+ return []
+ if x[0] == 'list':
+ return getlist(x[1]) + [x[2]]
+ return [x]
+
+def getargs(x, min, max, err):
+ l = getlist(x)
+ if len(l) < min or len(l) > max:
+ raise error.ParseError(err)
+ return l
+
+def binary(mctx, x):
+ """``binary()``
+ File that appears to be binary (contains NUL bytes).
+ """
+ # i18n: "binary" is a keyword
+ getargs(x, 0, 0, _("binary takes no arguments"))
+ return [f for f in mctx.subset if util.binary(mctx.ctx[f].data())]
+
+def exec_(mctx, x):
+ """``exec()``
+ File that is marked as executable.
+ """
+ # i18n: "exec" is a keyword
+ getargs(x, 0, 0, _("exec takes no arguments"))
+ return [f for f in mctx.subset if mctx.ctx.flags(f) == 'x']
+
+def symlink(mctx, x):
+ """``symlink()``
+ File that is marked as a symlink.
+ """
+ # i18n: "symlink" is a keyword
+ getargs(x, 0, 0, _("symlink takes no arguments"))
+ return [f for f in mctx.subset if mctx.ctx.flags(f) == 'l']
+
+def resolved(mctx, x):
+ """``resolved()``
+ File that is marked resolved according to the resolve state.
+ """
+ # i18n: "resolved" is a keyword
+ getargs(x, 0, 0, _("resolved takes no arguments"))
+ if mctx.ctx.rev() is not None:
+ return []
+ ms = merge.mergestate(mctx.ctx._repo)
+ return [f for f in mctx.subset if f in ms and ms[f] == 'r']
+
+def unresolved(mctx, x):
+ """``unresolved()``
+ File that is marked unresolved according to the resolve state.
+ """
+ # i18n: "unresolved" is a keyword
+ getargs(x, 0, 0, _("unresolved takes no arguments"))
+ if mctx.ctx.rev() is not None:
+ return []
+ ms = merge.mergestate(mctx.ctx._repo)
+ return [f for f in mctx.subset if f in ms and ms[f] == 'u']
+
+def hgignore(mctx, x):
+ """``hgignore()``
+ File that matches the active .hgignore pattern.
+ """
+ getargs(x, 0, 0, _("hgignore takes no arguments"))
+ ignore = mctx.ctx._repo.dirstate._ignore
+ return [f for f in mctx.subset if ignore(f)]
+
+def grep(mctx, x):
+ """``grep(regex)``
+ File contains the given regular expression.
+ """
+ pat = getstring(x, _("grep requires a pattern"))
+ r = re.compile(pat)
+ return [f for f in mctx.subset if r.search(mctx.ctx[f].data())]
+
+_units = dict(k=2**10, K=2**10, kB=2**10, KB=2**10,
+ M=2**20, MB=2**20, G=2**30, GB=2**30)
+
+def _sizetoint(s):
+ try:
+ s = s.strip()
+ for k, v in _units.items():
+ if s.endswith(k):
+ return int(float(s[:-len(k)]) * v)
+ return int(s)
+ except ValueError:
+ raise error.ParseError(_("couldn't parse size: %s") % s)
+
+def _sizetomax(s):
+ try:
+ s = s.strip()
+ for k, v in _units.items():
+ if s.endswith(k):
+ # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1
+ n = s[:-len(k)]
+ inc = 1.0
+ if "." in n:
+ inc /= 10 ** len(n.split(".")[1])
+ return int((float(n) + inc) * v) - 1
+ # no extension, this is a precise value
+ return int(s)
+ except ValueError:
+ raise error.ParseError(_("couldn't parse size: %s") % s)
+
+def size(mctx, x):
+ """``size(expression)``
+ File size matches the given expression. Examples:
+
+ - 1k (files from 1024 to 2047 bytes)
+ - < 20k (files less than 20480 bytes)
+ - >= .5MB (files at least 524288 bytes)
+ - 4k - 1MB (files from 4096 bytes to 1048576 bytes)
+ """
+
+ # i18n: "size" is a keyword
+ expr = getstring(x, _("size requires an expression")).strip()
+ if '-' in expr: # do we have a range?
+ a, b = expr.split('-', 1)
+ a = _sizetoint(a)
+ b = _sizetoint(b)
+ m = lambda x: x >= a and x <= b
+ elif expr.startswith("<="):
+ a = _sizetoint(expr[2:])
+ m = lambda x: x <= a
+ elif expr.startswith("<"):
+ a = _sizetoint(expr[1:])
+ m = lambda x: x < a
+ elif expr.startswith(">="):
+ a = _sizetoint(expr[2:])
+ m = lambda x: x >= a
+ elif expr.startswith(">"):
+ a = _sizetoint(expr[1:])
+ m = lambda x: x > a
+ elif expr[0].isdigit or expr[0] == '.':
+ a = _sizetoint(expr)
+ b = _sizetomax(expr)
+ m = lambda x: x >= a and x <= b
+ else:
+ raise error.ParseError(_("couldn't parse size: %s") % expr)
+
+ return [f for f in mctx.subset if m(mctx.ctx[f].size())]
+
+def encoding(mctx, x):
+ """``encoding(name)``
+ File can be successfully decoded with the given character
+ encoding. May not be useful for encodings other than ASCII and
+ UTF-8.
+ """
+
+ # i18n: "encoding" is a keyword
+ enc = getstring(x, _("encoding requires an encoding name"))
+
+ s = []
+ for f in mctx.subset:
+ d = mctx.ctx[f].data()
+ try:
+ d.decode(enc)
+ except LookupError:
+ raise util.Abort(_("unknown encoding '%s'") % enc)
+ except UnicodeDecodeError:
+ continue
+ s.append(f)
+
+ return s
+
+def copied(mctx, x):
+ """``copied()``
+ File that is recorded as being copied.
+ """
+ # i18n: "copied" is a keyword
+ getargs(x, 0, 0, _("copied takes no arguments"))
+ s = []
+ for f in mctx.subset:
+ p = mctx.ctx[f].parents()
+ if p and p[0].path() != f:
+ s.append(f)
+ return s
+
+symbols = {
+ 'added': added,
+ 'binary': binary,
+ 'clean': clean,
+ 'copied': copied,
+ 'deleted': deleted,
+ 'encoding': encoding,
+ 'exec': exec_,
+ 'grep': grep,
+ 'ignored': ignored,
+ 'hgignore': hgignore,
+ 'modified': modified,
+ 'removed': removed,
+ 'resolved': resolved,
+ 'size': size,
+ 'symlink': symlink,
+ 'unknown': unknown,
+ 'unresolved': unresolved,
+}
+
+methods = {
+ 'string': stringset,
+ 'symbol': stringset,
+ 'and': andset,
+ 'or': orset,
+ 'list': listset,
+ 'group': getset,
+ 'not': notset,
+ 'func': func,
+}
+
+class matchctx(object):
+ def __init__(self, ctx, subset=None, status=None):
+ self.ctx = ctx
+ self.subset = subset
+ self._status = status
+ def status(self):
+ return self._status
+ def matcher(self, patterns):
+ return self.ctx.match(patterns)
+ def filter(self, files):
+ return [f for f in files if f in self.subset]
+ def narrow(self, files):
+ return matchctx(self.ctx, self.filter(files), self._status)
+
+def _intree(funcs, tree):
+ if isinstance(tree, tuple):
+ if tree[0] == 'func' and tree[1][0] == 'symbol':
+ if tree[1][1] in funcs:
+ return True
+ for s in tree[1:]:
+ if _intree(funcs, s):
+ return True
+ return False
+
+def getfileset(ctx, expr):
+ tree, pos = parse(expr)
+ if (pos != len(expr)):
+ raise error.ParseError(_("invalid token"), pos)
+
+ # do we need status info?
+ if _intree(['modified', 'added', 'removed', 'deleted',
+ 'unknown', 'ignored', 'clean'], tree):
+ unknown = _intree(['unknown'], tree)
+ ignored = _intree(['ignored'], tree)
+
+ r = ctx._repo
+ status = r.status(ctx.p1(), ctx,
+ unknown=unknown, ignored=ignored, clean=True)
+ subset = []
+ for c in status:
+ subset.extend(c)
+ else:
+ status = None
+ subset = ctx.walk(ctx.match([]))
+
+ return getset(matchctx(ctx, subset, status), tree)
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = symbols.values()
diff --git a/websdk/mercurial/graphmod.py b/websdk/mercurial/graphmod.py
index 0fda9ed..314f2b8 120000..100644
--- a/websdk/mercurial/graphmod.py
+++ b/websdk/mercurial/graphmod.py
@@ -1 +1,139 @@
-/usr/share/pyshared/mercurial/graphmod.py \ No newline at end of file
+# Revision graph generator for Mercurial
+#
+# Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
+# Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""supports walking the history as DAGs suitable for graphical output
+
+The most basic format we use is that of::
+
+ (id, type, data, [parentids])
+
+The node and parent ids are arbitrary integers which identify a node in the
+context of the graph returned. Type is a constant specifying the node type.
+Data depends on type.
+"""
+
+from mercurial.node import nullrev
+
+CHANGESET = 'C'
+
+def dagwalker(repo, revs):
+ """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
+
+ This generator function walks through revisions (which should be ordered
+ from bigger to lower). It returns a tuple for each node. The node and parent
+ ids are arbitrary integers which identify a node in the context of the graph
+ returned.
+ """
+ if not revs:
+ return
+
+ cl = repo.changelog
+ lowestrev = min(revs)
+ gpcache = {}
+
+ knownrevs = set(revs)
+ for rev in revs:
+ ctx = repo[rev]
+ parents = sorted(set([p.rev() for p in ctx.parents()
+ if p.rev() in knownrevs]))
+ mpars = [p.rev() for p in ctx.parents() if
+ p.rev() != nullrev and p.rev() not in parents]
+
+ for mpar in mpars:
+ gp = gpcache.get(mpar)
+ if gp is None:
+ gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar)
+ if not gp:
+ parents.append(mpar)
+ else:
+ parents.extend(g for g in gp if g not in parents)
+
+ yield (ctx.rev(), CHANGESET, ctx, parents)
+
+def nodes(repo, nodes):
+ """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
+
+ This generator function walks the given nodes. It only returns parents
+ that are in nodes, too.
+ """
+ include = set(nodes)
+ for node in nodes:
+ ctx = repo[node]
+ parents = set([p.rev() for p in ctx.parents() if p.node() in include])
+ yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
+
+def colored(dag):
+ """annotates a DAG with colored edge information
+
+ For each DAG node this function emits tuples::
+
+ (id, type, data, (col, color), [(col, nextcol, color)])
+
+ with the following new elements:
+
+ - Tuple (col, color) with column and color index for the current node
+ - A list of tuples indicating the edges between the current node and its
+ parents.
+ """
+ seen = []
+ colors = {}
+ newcolor = 1
+ for (cur, type, data, parents) in dag:
+
+ # Compute seen and next
+ if cur not in seen:
+ seen.append(cur) # new head
+ colors[cur] = newcolor
+ newcolor += 1
+
+ col = seen.index(cur)
+ color = colors.pop(cur)
+ next = seen[:]
+
+ # Add parents to next
+ addparents = [p for p in parents if p not in next]
+ next[col:col + 1] = addparents
+
+ # Set colors for the parents
+ for i, p in enumerate(addparents):
+ if not i:
+ colors[p] = color
+ else:
+ colors[p] = newcolor
+ newcolor += 1
+
+ # Add edges to the graph
+ edges = []
+ for ecol, eid in enumerate(seen):
+ if eid in next:
+ edges.append((ecol, next.index(eid), colors[eid]))
+ elif eid == cur:
+ for p in parents:
+ edges.append((ecol, next.index(p), color))
+
+ # Yield and move on
+ yield (cur, type, data, (col, color), edges)
+ seen = next
+
+def grandparent(cl, lowestrev, roots, head):
+ """Return all ancestors of head in roots which revision is
+ greater or equal to lowestrev.
+ """
+ pending = set([head])
+ seen = set()
+ kept = set()
+ llowestrev = max(nullrev, lowestrev)
+ while pending:
+ r = pending.pop()
+ if r >= llowestrev and r not in seen:
+ if r in roots:
+ kept.add(r)
+ else:
+ pending.update([p for p in cl.parentrevs(r)])
+ seen.add(r)
+ return sorted(kept)
diff --git a/websdk/mercurial/hbisect.py b/websdk/mercurial/hbisect.py
index a38c013..bce6030 120000..100644
--- a/websdk/mercurial/hbisect.py
+++ b/websdk/mercurial/hbisect.py
@@ -1 +1,258 @@
-/usr/share/pyshared/mercurial/hbisect.py \ No newline at end of file
+# changelog bisection for mercurial
+#
+# Copyright 2007 Matt Mackall
+# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
+#
+# Inspired by git bisect, extension skeleton taken from mq.py.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, error
+from i18n import _
+from node import short, hex
+import util
+
+def bisect(changelog, state):
+ """find the next node (if any) for testing during a bisect search.
+ returns a (nodes, number, good) tuple.
+
+ 'nodes' is the final result of the bisect if 'number' is 0.
+ Otherwise 'number' indicates the remaining possible candidates for
+ the search and 'nodes' contains the next bisect target.
+ 'good' is True if bisect is searching for a first good changeset, False
+ if searching for a first bad one.
+ """
+
+ clparents = changelog.parentrevs
+ skip = set([changelog.rev(n) for n in state['skip']])
+
+ def buildancestors(bad, good):
+ # only the earliest bad revision matters
+ badrev = min([changelog.rev(n) for n in bad])
+ goodrevs = [changelog.rev(n) for n in good]
+ goodrev = min(goodrevs)
+ # build visit array
+ ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
+
+ # set nodes descended from goodrevs
+ for rev in goodrevs:
+ ancestors[rev] = []
+ for rev in xrange(goodrev + 1, len(changelog)):
+ for prev in clparents(rev):
+ if ancestors[prev] == []:
+ ancestors[rev] = []
+
+ # clear good revs from array
+ for rev in goodrevs:
+ ancestors[rev] = None
+ for rev in xrange(len(changelog), goodrev, -1):
+ if ancestors[rev] is None:
+ for prev in clparents(rev):
+ ancestors[prev] = None
+
+ if ancestors[badrev] is None:
+ return badrev, None
+ return badrev, ancestors
+
+ good = False
+ badrev, ancestors = buildancestors(state['bad'], state['good'])
+ if not ancestors: # looking for bad to good transition?
+ good = True
+ badrev, ancestors = buildancestors(state['good'], state['bad'])
+ bad = changelog.node(badrev)
+ if not ancestors: # now we're confused
+ if len(state['bad']) == 1 and len(state['good']) == 1:
+ raise util.Abort(_("starting revisions are not directly related"))
+ raise util.Abort(_("inconsistent state, %s:%s is good and bad")
+ % (badrev, short(bad)))
+
+ # build children dict
+ children = {}
+ visit = [badrev]
+ candidates = []
+ while visit:
+ rev = visit.pop(0)
+ if ancestors[rev] == []:
+ candidates.append(rev)
+ for prev in clparents(rev):
+ if prev != -1:
+ if prev in children:
+ children[prev].append(rev)
+ else:
+ children[prev] = [rev]
+ visit.append(prev)
+
+ candidates.sort()
+ # have we narrowed it down to one entry?
+ # or have all other possible candidates besides 'bad' have been skipped?
+ tot = len(candidates)
+ unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
+ if tot == 1 or not unskipped:
+ return ([changelog.node(rev) for rev in candidates], 0, good)
+ perfect = tot // 2
+
+ # find the best node to test
+ best_rev = None
+ best_len = -1
+ poison = set()
+ for rev in candidates:
+ if rev in poison:
+ # poison children
+ poison.update(children.get(rev, []))
+ continue
+
+ a = ancestors[rev] or [rev]
+ ancestors[rev] = None
+
+ x = len(a) # number of ancestors
+ y = tot - x # number of non-ancestors
+ value = min(x, y) # how good is this test?
+ if value > best_len and rev not in skip:
+ best_len = value
+ best_rev = rev
+ if value == perfect: # found a perfect candidate? quit early
+ break
+
+ if y < perfect and rev not in skip: # all downhill from here?
+ # poison children
+ poison.update(children.get(rev, []))
+ continue
+
+ for c in children.get(rev, []):
+ if ancestors[c]:
+ ancestors[c] = list(set(ancestors[c] + a))
+ else:
+ ancestors[c] = a + [c]
+
+ assert best_rev is not None
+ best_node = changelog.node(best_rev)
+
+ return ([best_node], tot, good)
+
+
+def load_state(repo):
+ state = {'good': [], 'bad': [], 'skip': []}
+ if os.path.exists(repo.join("bisect.state")):
+ for l in repo.opener("bisect.state"):
+ kind, node = l[:-1].split()
+ node = repo.lookup(node)
+ if kind not in state:
+ raise util.Abort(_("unknown bisect kind %s") % kind)
+ state[kind].append(node)
+ return state
+
+
+def save_state(repo, state):
+ f = repo.opener("bisect.state", "w", atomictemp=True)
+ wlock = repo.wlock()
+ try:
+ for kind in state:
+ for node in state[kind]:
+ f.write("%s %s\n" % (kind, hex(node)))
+ f.close()
+ finally:
+ wlock.release()
+
+def get(repo, status):
+ """
+ Return a list of revision(s) that match the given status:
+
+ - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
+ - ``goods``, ``bads`` : csets topologicaly good/bad
+ - ``range`` : csets taking part in the bisection
+ - ``pruned`` : csets that are goods, bads or skipped
+ - ``untested`` : csets whose fate is yet unknown
+ - ``ignored`` : csets ignored due to DAG topology
+ """
+ state = load_state(repo)
+ if status in ('good', 'bad', 'skip'):
+ return [repo.changelog.rev(n) for n in state[status]]
+ else:
+ # In the floowing sets, we do *not* call 'bisect()' with more
+ # than one level of recusrsion, because that can be very, very
+ # time consuming. Instead, we always develop the expression as
+ # much as possible.
+
+ # 'range' is all csets that make the bisection:
+ # - have a good ancestor and a bad descendant, or conversely
+ # that's because the bisection can go either way
+ range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )'
+
+ _t = [c.rev() for c in repo.set('bisect(good)::bisect(bad)')]
+ # The sets of topologically good or bad csets
+ if len(_t) == 0:
+ # Goods are topologically after bads
+ goods = 'bisect(good)::' # Pruned good csets
+ bads = '::bisect(bad)' # Pruned bad csets
+ else:
+ # Goods are topologically before bads
+ goods = '::bisect(good)' # Pruned good csets
+ bads = 'bisect(bad)::' # Pruned bad csets
+
+ # 'pruned' is all csets whose fate is already known: good, bad, skip
+ skips = 'bisect(skip)' # Pruned skipped csets
+ pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips)
+
+ # 'untested' is all cset that are- in 'range', but not in 'pruned'
+ untested = '( (%s) - (%s) )' % (range, pruned)
+
+ # 'ignored' is all csets that were not used during the bisection
+ # due to DAG topology, but may however have had an impact.
+ # Eg., a branch merged between bads and goods, but whose branch-
+ # point is out-side of the range.
+ iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors
+ iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors
+ ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range)
+
+ if status == 'range':
+ return [c.rev() for c in repo.set(range)]
+ elif status == 'pruned':
+ return [c.rev() for c in repo.set(pruned)]
+ elif status == 'untested':
+ return [c.rev() for c in repo.set(untested)]
+ elif status == 'ignored':
+ return [c.rev() for c in repo.set(ignored)]
+ elif status == "goods":
+ return [c.rev() for c in repo.set(goods)]
+ elif status == "bads":
+ return [c.rev() for c in repo.set(bads)]
+
+ else:
+ raise error.ParseError(_('invalid bisect state'))
+
+def label(repo, node, short=False):
+ rev = repo.changelog.rev(node)
+
+ # Try explicit sets
+ if rev in get(repo, 'good'):
+ # i18n: bisect changeset status
+ return _('good')
+ if rev in get(repo, 'bad'):
+ # i18n: bisect changeset status
+ return _('bad')
+ if rev in get(repo, 'skip'):
+ # i18n: bisect changeset status
+ return _('skipped')
+ if rev in get(repo, 'untested'):
+ # i18n: bisect changeset status
+ return _('untested')
+ if rev in get(repo, 'ignored'):
+ # i18n: bisect changeset status
+ return _('ignored')
+
+ # Try implicit sets
+ if rev in get(repo, 'goods'):
+ # i18n: bisect changeset status
+ return _('good (implicit)')
+ if rev in get(repo, 'bads'):
+ # i18n: bisect changeset status
+ return _('bad (implicit)')
+
+ return None
+
+def shortlabel(label):
+ if label:
+ return label[0].upper()
+
+ return None
diff --git a/websdk/mercurial/help.py b/websdk/mercurial/help.py
index 687e967..26da99c 120000..100644
--- a/websdk/mercurial/help.py
+++ b/websdk/mercurial/help.py
@@ -1 +1,109 @@
-/usr/share/pyshared/mercurial/help.py \ No newline at end of file
+# help.py - help data for mercurial
+#
+# Copyright 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import gettext, _
+import sys, os
+import extensions, revset, fileset, templatekw, templatefilters
+import util
+
+def listexts(header, exts, indent=1):
+ '''return a text listing of the given extensions'''
+ if not exts:
+ return ''
+ maxlength = max(len(e) for e in exts)
+ result = '\n%s\n\n' % header
+ for name, desc in sorted(exts.iteritems()):
+ result += '%s%-*s %s\n' % (' ' * indent, maxlength + 2,
+ ':%s:' % name, desc)
+ return result
+
+def extshelp():
+ doc = loaddoc('extensions')()
+ doc += listexts(_('enabled extensions:'), extensions.enabled())
+ doc += listexts(_('disabled extensions:'), extensions.disabled())
+ return doc
+
+def loaddoc(topic):
+ """Return a delayed loader for help/topic.txt."""
+
+ def loader():
+ if util.mainfrozen():
+ module = sys.executable
+ else:
+ module = __file__
+ base = os.path.dirname(module)
+
+ for dir in ('.', '..'):
+ docdir = os.path.join(base, dir, 'help')
+ if os.path.isdir(docdir):
+ break
+
+ path = os.path.join(docdir, topic + ".txt")
+ doc = gettext(util.readfile(path))
+ for rewriter in helphooks.get(topic, []):
+ doc = rewriter(topic, doc)
+ return doc
+
+ return loader
+
+helptable = sorted([
+ (["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
+ (["dates"], _("Date Formats"), loaddoc('dates')),
+ (["patterns"], _("File Name Patterns"), loaddoc('patterns')),
+ (['environment', 'env'], _('Environment Variables'),
+ loaddoc('environment')),
+ (['revs', 'revisions'], _('Specifying Single Revisions'),
+ loaddoc('revisions')),
+ (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'),
+ loaddoc('multirevs')),
+ (['revset', 'revsets'], _("Specifying Revision Sets"), loaddoc('revsets')),
+ (['fileset', 'filesets'], _("Specifying File Sets"), loaddoc('filesets')),
+ (['diffs'], _('Diff Formats'), loaddoc('diffs')),
+ (['merge-tools'], _('Merge Tools'), loaddoc('merge-tools')),
+ (['templating', 'templates'], _('Template Usage'),
+ loaddoc('templates')),
+ (['urls'], _('URL Paths'), loaddoc('urls')),
+ (["extensions"], _("Using additional features"), extshelp),
+ (["subrepo", "subrepos"], _("Subrepositories"), loaddoc('subrepos')),
+ (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')),
+ (["glossary"], _("Glossary"), loaddoc('glossary')),
+ (["hgignore", "ignore"], _("syntax for Mercurial ignore files"),
+ loaddoc('hgignore')),
+])
+
+# Map topics to lists of callable taking the current topic help and
+# returning the updated version
+helphooks = {}
+
+def addtopichook(topic, rewriter):
+ helphooks.setdefault(topic, []).append(rewriter)
+
+def makeitemsdoc(topic, doc, marker, items):
+ """Extract docstring from the items key to function mapping, build a
+ .single documentation block and use it to overwrite the marker in doc
+ """
+ entries = []
+ for name in sorted(items):
+ text = (items[name].__doc__ or '').rstrip()
+ if not text:
+ continue
+ text = gettext(text)
+ lines = text.splitlines()
+ lines[1:] = [(' ' + l.strip()) for l in lines[1:]]
+ entries.append('\n'.join(lines))
+ entries = '\n\n'.join(entries)
+ return doc.replace(marker, entries)
+
+def addtopicsymbols(topic, marker, symbols):
+ def add(topic, doc):
+ return makeitemsdoc(topic, doc, marker, symbols)
+ addtopichook(topic, add)
+
+addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols)
+addtopicsymbols('revsets', '.. predicatesmarker', revset.symbols)
+addtopicsymbols('templates', '.. keywordsmarker', templatekw.keywords)
+addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters)
diff --git a/websdk/mercurial/help/config.txt b/websdk/mercurial/help/config.txt
new file mode 100644
index 0000000..d259600
--- /dev/null
+++ b/websdk/mercurial/help/config.txt
@@ -0,0 +1,1312 @@
+The Mercurial system uses a set of configuration files to control
+aspects of its behavior.
+
+The configuration files use a simple ini-file format. A configuration
+file consists of sections, led by a ``[section]`` header and followed
+by ``name = value`` entries::
+
+ [ui]
+ username = Firstname Lastname <firstname.lastname@example.net>
+ verbose = True
+
+The above entries will be referred to as ``ui.username`` and
+``ui.verbose``, respectively. See the Syntax section below.
+
+Files
+-----
+
+Mercurial reads configuration data from several files, if they exist.
+These files do not exist by default and you will have to create the
+appropriate configuration files yourself: global configuration like
+the username setting is typically put into
+``%USERPROFILE%\mercurial.ini`` or ``$HOME/.hgrc`` and local
+configuration is put into the per-repository ``<repo>/.hg/hgrc`` file.
+
+The names of these files depend on the system on which Mercurial is
+installed. ``*.rc`` files from a single directory are read in
+alphabetical order, later ones overriding earlier ones. Where multiple
+paths are given below, settings from earlier paths override later
+ones.
+
+| (Unix, Windows) ``<repo>/.hg/hgrc``
+
+ Per-repository configuration options that only apply in a
+ particular repository. This file is not version-controlled, and
+ will not get transferred during a "clone" operation. Options in
+ this file override options in all other configuration files. On
+ Unix, most of this file will be ignored if it doesn't belong to a
+ trusted user or to a trusted group. See the documentation for the
+ ``[trusted]`` section below for more details.
+
+| (Unix) ``$HOME/.hgrc``
+| (Windows) ``%USERPROFILE%\.hgrc``
+| (Windows) ``%USERPROFILE%\Mercurial.ini``
+| (Windows) ``%HOME%\.hgrc``
+| (Windows) ``%HOME%\Mercurial.ini``
+
+ Per-user configuration file(s), for the user running Mercurial. On
+ Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``. Options in these
+ files apply to all Mercurial commands executed by this user in any
+ directory. Options in these files override per-system and per-installation
+ options.
+
+| (Unix) ``/etc/mercurial/hgrc``
+| (Unix) ``/etc/mercurial/hgrc.d/*.rc``
+
+ Per-system configuration files, for the system on which Mercurial
+ is running. Options in these files apply to all Mercurial commands
+ executed by any user in any directory. Options in these files
+ override per-installation options.
+
+| (Unix) ``<install-root>/etc/mercurial/hgrc``
+| (Unix) ``<install-root>/etc/mercurial/hgrc.d/*.rc``
+
+ Per-installation configuration files, searched for in the
+ directory where Mercurial is installed. ``<install-root>`` is the
+ parent directory of the **hg** executable (or symlink) being run. For
+ example, if installed in ``/shared/tools/bin/hg``, Mercurial will look
+ in ``/shared/tools/etc/mercurial/hgrc``. Options in these files apply
+ to all Mercurial commands executed by any user in any directory.
+
+| (Windows) ``<install-dir>\Mercurial.ini`` **or**
+| (Windows) ``<install-dir>\hgrc.d\*.rc`` **or**
+| (Windows) ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial``
+
+ Per-installation/system configuration files, for the system on
+ which Mercurial is running. Options in these files apply to all
+ Mercurial commands executed by any user in any directory. Registry
+ keys contain PATH-like strings, every part of which must reference
+ a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will
+ be read. Mercurial checks each of these locations in the specified
+ order until one or more configuration files are detected. If the
+ pywin32 extensions are not installed, Mercurial will only look for
+ site-wide configuration in ``C:\Mercurial\Mercurial.ini``.
+
+Syntax
+------
+
+A configuration file consists of sections, led by a ``[section]`` header
+and followed by ``name = value`` entries (sometimes called
+``configuration keys``)::
+
+ [spam]
+ eggs=ham
+ green=
+ eggs
+
+Each line contains one entry. If the lines that follow are indented,
+they are treated as continuations of that entry. Leading whitespace is
+removed from values. Empty lines are skipped. Lines beginning with
+``#`` or ``;`` are ignored and may be used to provide comments.
+
+Configuration keys can be set multiple times, in which case Mercurial
+will use the value that was configured last. As an example::
+
+ [spam]
+ eggs=large
+ ham=serrano
+ eggs=small
+
+This would set the configuration key named ``eggs`` to ``small``.
+
+It is also possible to define a section multiple times. A section can
+be redefined on the same and/or on different configuration files. For
+example::
+
+ [foo]
+ eggs=large
+ ham=serrano
+ eggs=small
+
+ [bar]
+ eggs=ham
+ green=
+ eggs
+
+ [foo]
+ ham=prosciutto
+ eggs=medium
+ bread=toasted
+
+This would set the ``eggs``, ``ham``, and ``bread`` configuration keys
+of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``,
+respectively. As you can see there only thing that matters is the last
+value that was set for each of the configuration keys.
+
+If a configuration key is set multiple times in different
+configuration files the final value will depend on the order in which
+the different configuration files are read, with settings from earlier
+paths overriding later ones as described on the ``Files`` section
+above.
+
+A line of the form ``%include file`` will include ``file`` into the
+current configuration file. The inclusion is recursive, which means
+that included files can include other files. Filenames are relative to
+the configuration file in which the ``%include`` directive is found.
+Environment variables and ``~user`` constructs are expanded in
+``file``. This lets you do something like::
+
+ %include ~/.hgrc.d/$HOST.rc
+
+to include a different configuration file on each computer you use.
+
+A line with ``%unset name`` will remove ``name`` from the current
+section, if it has been set previously.
+
+The values are either free-form text strings, lists of text strings,
+or Boolean values. Boolean values can be set to true using any of "1",
+"yes", "true", or "on" and to false using "0", "no", "false", or "off"
+(all case insensitive).
+
+List values are separated by whitespace or comma, except when values are
+placed in double quotation marks::
+
+ allow_read = "John Doe, PhD", brian, betty
+
+Quotation marks can be escaped by prefixing them with a backslash. Only
+quotation marks at the beginning of a word is counted as a quotation
+(e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``).
+
+Sections
+--------
+
+This section describes the different sections that may appear in a
+Mercurial configuration file, the purpose of each section, its possible
+keys, and their possible values.
+
+``alias``
+"""""""""
+
+Defines command aliases.
+Aliases allow you to define your own commands in terms of other
+commands (or aliases), optionally including arguments. Positional
+arguments in the form of ``$1``, ``$2``, etc in the alias definition
+are expanded by Mercurial before execution. Positional arguments not
+already used by ``$N`` in the definition are put at the end of the
+command to be executed.
+
+Alias definitions consist of lines of the form::
+
+ <alias> = <command> [<argument>]...
+
+For example, this definition::
+
+ latest = log --limit 5
+
+creates a new command ``latest`` that shows only the five most recent
+changesets. You can define subsequent aliases using earlier ones::
+
+ stable5 = latest -b stable
+
+.. note:: It is possible to create aliases with the same names as
+ existing commands, which will then override the original
+ definitions. This is almost always a bad idea!
+
+An alias can start with an exclamation point (``!``) to make it a
+shell alias. A shell alias is executed with the shell and will let you
+run arbitrary commands. As an example, ::
+
+ echo = !echo
+
+will let you do ``hg echo foo`` to have ``foo`` printed in your
+terminal. A better example might be::
+
+ purge = !$HG status --no-status --unknown -0 | xargs -0 rm
+
+which will make ``hg purge`` delete all unknown files in the
+repository in the same manner as the purge extension.
+
+Shell aliases are executed in an environment where ``$HG`` expand to
+the path of the Mercurial that was used to execute the alias. This is
+useful when you want to call further Mercurial commands in a shell
+alias, as was done above for the purge alias. In addition,
+``$HG_ARGS`` expand to the arguments given to Mercurial. In the ``hg
+echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``.
+
+.. note:: Some global configuration options such as ``-R`` are
+ processed before shell aliases and will thus not be passed to
+ aliases.
+
+``auth``
+""""""""
+
+Authentication credentials for HTTP authentication. This section
+allows you to store usernames and passwords for use when logging
+*into* HTTP servers. See the ``[web]`` configuration section if
+you want to configure *who* can login to your HTTP server.
+
+Each line has the following format::
+
+ <name>.<argument> = <value>
+
+where ``<name>`` is used to group arguments into authentication
+entries. Example::
+
+ foo.prefix = hg.intevation.org/mercurial
+ foo.username = foo
+ foo.password = bar
+ foo.schemes = http https
+
+ bar.prefix = secure.example.org
+ bar.key = path/to/file.key
+ bar.cert = path/to/file.cert
+ bar.schemes = https
+
+Supported arguments:
+
+``prefix``
+ Either ``*`` or a URI prefix with or without the scheme part.
+ The authentication entry with the longest matching prefix is used
+ (where ``*`` matches everything and counts as a match of length
+ 1). If the prefix doesn't include a scheme, the match is performed
+ against the URI with its scheme stripped as well, and the schemes
+ argument, q.v., is then subsequently consulted.
+
+``username``
+ Optional. Username to authenticate with. If not given, and the
+ remote site requires basic or digest authentication, the user will
+ be prompted for it. Environment variables are expanded in the
+ username letting you do ``foo.username = $USER``. If the URI
+ includes a username, only ``[auth]`` entries with a matching
+ username or without a username will be considered.
+
+``password``
+ Optional. Password to authenticate with. If not given, and the
+ remote site requires basic or digest authentication, the user
+ will be prompted for it.
+
+``key``
+ Optional. PEM encoded client certificate key file. Environment
+ variables are expanded in the filename.
+
+``cert``
+ Optional. PEM encoded client certificate chain file. Environment
+ variables are expanded in the filename.
+
+``schemes``
+ Optional. Space separated list of URI schemes to use this
+ authentication entry with. Only used if the prefix doesn't include
+ a scheme. Supported schemes are http and https. They will match
+ static-http and static-https respectively, as well.
+ Default: https.
+
+If no suitable authentication entry is found, the user is prompted
+for credentials as usual if required by the remote.
+
+
+``decode/encode``
+"""""""""""""""""
+
+Filters for transforming files on checkout/checkin. This would
+typically be used for newline processing or other
+localization/canonicalization of files.
+
+Filters consist of a filter pattern followed by a filter command.
+Filter patterns are globs by default, rooted at the repository root.
+For example, to match any file ending in ``.txt`` in the root
+directory only, use the pattern ``*.txt``. To match any file ending
+in ``.c`` anywhere in the repository, use the pattern ``**.c``.
+For each file only the first matching filter applies.
+
+The filter command can start with a specifier, either ``pipe:`` or
+``tempfile:``. If no specifier is given, ``pipe:`` is used by default.
+
+A ``pipe:`` command must accept data on stdin and return the transformed
+data on stdout.
+
+Pipe example::
+
+ [encode]
+ # uncompress gzip files on checkin to improve delta compression
+ # note: not necessarily a good idea, just an example
+ *.gz = pipe: gunzip
+
+ [decode]
+ # recompress gzip files when writing them to the working dir (we
+ # can safely omit "pipe:", because it's the default)
+ *.gz = gzip
+
+A ``tempfile:`` command is a template. The string ``INFILE`` is replaced
+with the name of a temporary file that contains the data to be
+filtered by the command. The string ``OUTFILE`` is replaced with the name
+of an empty temporary file, where the filtered data must be written by
+the command.
+
+.. note:: The tempfile mechanism is recommended for Windows systems,
+ where the standard shell I/O redirection operators often have
+ strange effects and may corrupt the contents of your files.
+
+This filter mechanism is used internally by the ``eol`` extension to
+translate line ending characters between Windows (CRLF) and Unix (LF)
+format. We suggest you use the ``eol`` extension for convenience.
+
+
+``defaults``
+""""""""""""
+
+(defaults are deprecated. Don't use them. Use aliases instead)
+
+Use the ``[defaults]`` section to define command defaults, i.e. the
+default options/arguments to pass to the specified commands.
+
+The following example makes :hg:`log` run in verbose mode, and
+:hg:`status` show only the modified files, by default::
+
+ [defaults]
+ log = -v
+ status = -m
+
+The actual commands, instead of their aliases, must be used when
+defining command defaults. The command defaults will also be applied
+to the aliases of the commands defined.
+
+
+``diff``
+""""""""
+
+Settings used when displaying diffs. Everything except for ``unified`` is a
+Boolean and defaults to False.
+
+``git``
+ Use git extended diff format.
+
+``nodates``
+ Don't include dates in diff headers.
+
+``showfunc``
+ Show which function each change is in.
+
+``ignorews``
+ Ignore white space when comparing lines.
+
+``ignorewsamount``
+ Ignore changes in the amount of white space.
+
+``ignoreblanklines``
+ Ignore changes whose lines are all blank.
+
+``unified``
+ Number of lines of context to show.
+
+``email``
+"""""""""
+
+Settings for extensions that send email messages.
+
+``from``
+ Optional. Email address to use in "From" header and SMTP envelope
+ of outgoing messages.
+
+``to``
+ Optional. Comma-separated list of recipients' email addresses.
+
+``cc``
+ Optional. Comma-separated list of carbon copy recipients'
+ email addresses.
+
+``bcc``
+ Optional. Comma-separated list of blind carbon copy recipients'
+ email addresses.
+
+``method``
+ Optional. Method to use to send email messages. If value is ``smtp``
+ (default), use SMTP (see the ``[smtp]`` section for configuration).
+ Otherwise, use as name of program to run that acts like sendmail
+ (takes ``-f`` option for sender, list of recipients on command line,
+ message on stdin). Normally, setting this to ``sendmail`` or
+ ``/usr/sbin/sendmail`` is enough to use sendmail to send messages.
+
+``charsets``
+ Optional. Comma-separated list of character sets considered
+ convenient for recipients. Addresses, headers, and parts not
+ containing patches of outgoing messages will be encoded in the
+ first character set to which conversion from local encoding
+ (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct
+ conversion fails, the text in question is sent as is. Defaults to
+ empty (explicit) list.
+
+ Order of outgoing email character sets:
+
+ 1. ``us-ascii``: always first, regardless of settings
+ 2. ``email.charsets``: in order given by user
+ 3. ``ui.fallbackencoding``: if not in email.charsets
+ 4. ``$HGENCODING``: if not in email.charsets
+ 5. ``utf-8``: always last, regardless of settings
+
+Email example::
+
+ [email]
+ from = Joseph User <joe.user@example.com>
+ method = /usr/sbin/sendmail
+ # charsets for western Europeans
+ # us-ascii, utf-8 omitted, as they are tried first and last
+ charsets = iso-8859-1, iso-8859-15, windows-1252
+
+
+``extensions``
+""""""""""""""
+
+Mercurial has an extension mechanism for adding new features. To
+enable an extension, create an entry for it in this section.
+
+If you know that the extension is already in Python's search path,
+you can give the name of the module, followed by ``=``, with nothing
+after the ``=``.
+
+Otherwise, give a name that you choose, followed by ``=``, followed by
+the path to the ``.py`` file (including the file name extension) that
+defines the extension.
+
+To explicitly disable an extension that is enabled in an hgrc of
+broader scope, prepend its path with ``!``, as in ``foo = !/ext/path``
+or ``foo = !`` when path is not supplied.
+
+Example for ``~/.hgrc``::
+
+ [extensions]
+ # (the mq extension will get loaded from Mercurial's path)
+ mq =
+ # (this extension will get loaded from the file specified)
+ myfeature = ~/.hgext/myfeature.py
+
+
+``hostfingerprints``
+""""""""""""""""""""
+
+Fingerprints of the certificates of known HTTPS servers.
+A HTTPS connection to a server with a fingerprint configured here will
+only succeed if the servers certificate matches the fingerprint.
+This is very similar to how ssh known hosts works.
+The fingerprint is the SHA-1 hash value of the DER encoded certificate.
+The CA chain and web.cacerts is not used for servers with a fingerprint.
+
+For example::
+
+ [hostfingerprints]
+ hg.intevation.org = 38:76:52:7c:87:26:9a:8f:4a:f8:d3:de:08:45:3b:ea:d6:4b:ee:cc
+
+This feature is only supported when using Python 2.6 or later.
+
+
+``format``
+""""""""""
+
+``usestore``
+ Enable or disable the "store" repository format which improves
+ compatibility with systems that fold case or otherwise mangle
+ filenames. Enabled by default. Disabling this option will allow
+ you to store longer filenames in some situations at the expense of
+ compatibility and ensures that the on-disk format of newly created
+ repositories will be compatible with Mercurial before version 0.9.4.
+
+``usefncache``
+ Enable or disable the "fncache" repository format which enhances
+ the "store" repository format (which has to be enabled to use
+ fncache) to allow longer filenames and avoids using Windows
+ reserved names, e.g. "nul". Enabled by default. Disabling this
+ option ensures that the on-disk format of newly created
+ repositories will be compatible with Mercurial before version 1.1.
+
+``dotencode``
+ Enable or disable the "dotencode" repository format which enhances
+ the "fncache" repository format (which has to be enabled to use
+ dotencode) to avoid issues with filenames starting with ._ on
+ Mac OS X and spaces on Windows. Enabled by default. Disabling this
+ option ensures that the on-disk format of newly created
+ repositories will be compatible with Mercurial before version 1.7.
+
+``merge-patterns``
+""""""""""""""""""
+
+This section specifies merge tools to associate with particular file
+patterns. Tools matched here will take precedence over the default
+merge tool. Patterns are globs by default, rooted at the repository
+root.
+
+Example::
+
+ [merge-patterns]
+ **.c = kdiff3
+ **.jpg = myimgmerge
+
+``merge-tools``
+"""""""""""""""
+
+This section configures external merge tools to use for file-level
+merges.
+
+Example ``~/.hgrc``::
+
+ [merge-tools]
+ # Override stock tool location
+ kdiff3.executable = ~/bin/kdiff3
+ # Specify command line
+ kdiff3.args = $base $local $other -o $output
+ # Give higher priority
+ kdiff3.priority = 1
+
+ # Define new tool
+ myHtmlTool.args = -m $local $other $base $output
+ myHtmlTool.regkey = Software\FooSoftware\HtmlMerge
+ myHtmlTool.priority = 1
+
+Supported arguments:
+
+``priority``
+ The priority in which to evaluate this tool.
+ Default: 0.
+
+``executable``
+ Either just the name of the executable or its pathname. On Windows,
+ the path can use environment variables with ${ProgramFiles} syntax.
+ Default: the tool name.
+
+``args``
+ The arguments to pass to the tool executable. You can refer to the
+ files being merged as well as the output file through these
+ variables: ``$base``, ``$local``, ``$other``, ``$output``.
+ Default: ``$local $base $other``
+
+``premerge``
+ Attempt to run internal non-interactive 3-way merge tool before
+ launching external tool. Options are ``true``, ``false``, or ``keep``
+ to leave markers in the file if the premerge fails.
+ Default: True
+
+``binary``
+ This tool can merge binary files. Defaults to False, unless tool
+ was selected by file pattern match.
+
+``symlink``
+ This tool can merge symlinks. Defaults to False, even if tool was
+ selected by file pattern match.
+
+``check``
+ A list of merge success-checking options:
+
+ ``changed``
+ Ask whether merge was successful when the merged file shows no changes.
+ ``conflicts``
+ Check whether there are conflicts even though the tool reported success.
+ ``prompt``
+ Always prompt for merge success, regardless of success reported by tool.
+
+``checkchanged``
+ True is equivalent to ``check = changed``.
+ Default: False
+
+``checkconflicts``
+ True is equivalent to ``check = conflicts``.
+ Default: False
+
+``fixeol``
+ Attempt to fix up EOL changes caused by the merge tool.
+ Default: False
+
+``gui``
+ This tool requires a graphical interface to run. Default: False
+
+``regkey``
+ Windows registry key which describes install location of this
+ tool. Mercurial will search for this key first under
+ ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``.
+ Default: None
+
+``regkeyalt``
+ An alternate Windows registry key to try if the first key is not
+ found. The alternate key uses the same ``regname`` and ``regappend``
+ semantics of the primary key. The most common use for this key
+ is to search for 32bit applications on 64bit operating systems.
+ Default: None
+
+``regname``
+ Name of value to read from specified registry key. Defaults to the
+ unnamed (default) value.
+
+``regappend``
+ String to append to the value read from the registry, typically
+ the executable name of the tool.
+ Default: None
+
+
+``hooks``
+"""""""""
+
+Commands or Python functions that get automatically executed by
+various actions such as starting or finishing a commit. Multiple
+hooks can be run for the same action by appending a suffix to the
+action. Overriding a site-wide hook can be done by changing its
+value or setting it to an empty string.
+
+Example ``.hg/hgrc``::
+
+ [hooks]
+ # update working directory after adding changesets
+ changegroup.update = hg update
+ # do not use the site-wide hook
+ incoming =
+ incoming.email = /my/email/hook
+ incoming.autobuild = /my/build/hook
+
+Most hooks are run with environment variables set that give useful
+additional information. For each hook below, the environment
+variables it is passed are listed with names of the form ``$HG_foo``.
+
+``changegroup``
+ Run after a changegroup has been added via push, pull or unbundle.
+ ID of the first new changeset is in ``$HG_NODE``. URL from which
+ changes came is in ``$HG_URL``.
+
+``commit``
+ Run after a changeset has been created in the local repository. ID
+ of the newly created changeset is in ``$HG_NODE``. Parent changeset
+ IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``incoming``
+ Run after a changeset has been pulled, pushed, or unbundled into
+ the local repository. The ID of the newly arrived changeset is in
+ ``$HG_NODE``. URL that was source of changes came is in ``$HG_URL``.
+
+``outgoing``
+ Run after sending changes from local repository to another. ID of
+ first changeset sent is in ``$HG_NODE``. Source of operation is in
+ ``$HG_SOURCE``; see "preoutgoing" hook for description.
+
+``post-<command>``
+ Run after successful invocations of the associated command. The
+ contents of the command line are passed as ``$HG_ARGS`` and the result
+ code in ``$HG_RESULT``. Parsed command line arguments are passed as
+ ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of
+ the python data internally passed to <command>. ``$HG_OPTS`` is a
+ dictionary of options (with unspecified options set to their defaults).
+ ``$HG_PATS`` is a list of arguments. Hook failure is ignored.
+
+``pre-<command>``
+ Run before executing the associated command. The contents of the
+ command line are passed as ``$HG_ARGS``. Parsed command line arguments
+ are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string
+ representations of the data internally passed to <command>. ``$HG_OPTS``
+ is a dictionary of options (with unspecified options set to their
+ defaults). ``$HG_PATS`` is a list of arguments. If the hook returns
+ failure, the command doesn't execute and Mercurial returns the failure
+ code.
+
+``prechangegroup``
+ Run before a changegroup is added via push, pull or unbundle. Exit
+ status 0 allows the changegroup to proceed. Non-zero status will
+ cause the push, pull or unbundle to fail. URL from which changes
+ will come is in ``$HG_URL``.
+
+``precommit``
+ Run before starting a local commit. Exit status 0 allows the
+ commit to proceed. Non-zero status will cause the commit to fail.
+ Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``prelistkeys``
+ Run before listing pushkeys (like bookmarks) in the
+ repository. Non-zero status will cause failure. The key namespace is
+ in ``$HG_NAMESPACE``.
+
+``preoutgoing``
+ Run before collecting changes to send from the local repository to
+ another. Non-zero status will cause failure. This lets you prevent
+ pull over HTTP or SSH. Also prevents against local pull, push
+ (outbound) or bundle commands, but not effective, since you can
+ just copy files instead then. Source of operation is in
+ ``$HG_SOURCE``. If "serve", operation is happening on behalf of remote
+ SSH or HTTP repository. If "push", "pull" or "bundle", operation
+ is happening on behalf of repository on same system.
+
+``prepushkey``
+ Run before a pushkey (like a bookmark) is added to the
+ repository. Non-zero status will cause the key to be rejected. The
+ key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``,
+ the old value (if any) is in ``$HG_OLD``, and the new value is in
+ ``$HG_NEW``.
+
+``pretag``
+ Run before creating a tag. Exit status 0 allows the tag to be
+ created. Non-zero status will cause the tag to fail. ID of
+ changeset to tag is in ``$HG_NODE``. Name of tag is in ``$HG_TAG``. Tag is
+ local if ``$HG_LOCAL=1``, in repository if ``$HG_LOCAL=0``.
+
+``pretxnchangegroup``
+ Run after a changegroup has been added via push, pull or unbundle,
+ but before the transaction has been committed. Changegroup is
+ visible to hook program. This lets you validate incoming changes
+ before accepting them. Passed the ID of the first new changeset in
+ ``$HG_NODE``. Exit status 0 allows the transaction to commit. Non-zero
+ status will cause the transaction to be rolled back and the push,
+ pull or unbundle will fail. URL that was source of changes is in
+ ``$HG_URL``.
+
+``pretxncommit``
+ Run after a changeset has been created but the transaction not yet
+ committed. Changeset is visible to hook program. This lets you
+ validate commit message and changes. Exit status 0 allows the
+ commit to proceed. Non-zero status will cause the transaction to
+ be rolled back. ID of changeset is in ``$HG_NODE``. Parent changeset
+ IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``.
+
+``preupdate``
+ Run before updating the working directory. Exit status 0 allows
+ the update to proceed. Non-zero status will prevent the update.
+ Changeset ID of first new parent is in ``$HG_PARENT1``. If merge, ID
+ of second new parent is in ``$HG_PARENT2``.
+
+``listkeys``
+ Run after listing pushkeys (like bookmarks) in the repository. The
+ key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a
+ dictionary containing the keys and values.
+
+``pushkey``
+ Run after a pushkey (like a bookmark) is added to the
+ repository. The key namespace is in ``$HG_NAMESPACE``, the key is in
+ ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new
+ value is in ``$HG_NEW``.
+
+``tag``
+ Run after a tag is created. ID of tagged changeset is in ``$HG_NODE``.
+ Name of tag is in ``$HG_TAG``. Tag is local if ``$HG_LOCAL=1``, in
+ repository if ``$HG_LOCAL=0``.
+
+``update``
+ Run after updating the working directory. Changeset ID of first
+ new parent is in ``$HG_PARENT1``. If merge, ID of second new parent is
+ in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the
+ update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``.
+
+.. note:: It is generally better to use standard hooks rather than the
+ generic pre- and post- command hooks as they are guaranteed to be
+ called in the appropriate contexts for influencing transactions.
+ Also, hooks like "commit" will be called in all contexts that
+ generate a commit (e.g. tag) and not just the commit command.
+
+.. note:: Environment variables with empty values may not be passed to
+ hooks on platforms such as Windows. As an example, ``$HG_PARENT2``
+ will have an empty value under Unix-like platforms for non-merge
+ changesets, while it will not be available at all under Windows.
+
+The syntax for Python hooks is as follows::
+
+ hookname = python:modulename.submodule.callable
+ hookname = python:/path/to/python/module.py:callable
+
+Python hooks are run within the Mercurial process. Each hook is
+called with at least three keyword arguments: a ui object (keyword
+``ui``), a repository object (keyword ``repo``), and a ``hooktype``
+keyword that tells what kind of hook is used. Arguments listed as
+environment variables above are passed as keyword arguments, with no
+``HG_`` prefix, and names in lower case.
+
+If a Python hook returns a "true" value or raises an exception, this
+is treated as a failure.
+
+
+``http_proxy``
+""""""""""""""
+
+Used to access web-based Mercurial repositories through a HTTP
+proxy.
+
+``host``
+ Host name and (optional) port of the proxy server, for example
+ "myproxy:8000".
+
+``no``
+ Optional. Comma-separated list of host names that should bypass
+ the proxy.
+
+``passwd``
+ Optional. Password to authenticate with at the proxy server.
+
+``user``
+ Optional. User name to authenticate with at the proxy server.
+
+``always``
+ Optional. Always use the proxy, even for localhost and any entries
+ in ``http_proxy.no``. True or False. Default: False.
+
+``smtp``
+""""""""
+
+Configuration for extensions that need to send email messages.
+
+``host``
+ Host name of mail server, e.g. "mail.example.com".
+
+``port``
+ Optional. Port to connect to on mail server. Default: 25.
+
+``tls``
+ Optional. Method to enable TLS when connecting to mail server: starttls,
+ smtps or none. Default: none.
+
+``username``
+ Optional. User name for authenticating with the SMTP server.
+ Default: none.
+
+``password``
+ Optional. Password for authenticating with the SMTP server. If not
+ specified, interactive sessions will prompt the user for a
+ password; non-interactive sessions will fail. Default: none.
+
+``local_hostname``
+ Optional. It's the hostname that the sender can use to identify
+ itself to the MTA.
+
+
+``patch``
+"""""""""
+
+Settings used when applying patches, for instance through the 'import'
+command or with Mercurial Queues extension.
+
+``eol``
+ When set to 'strict' patch content and patched files end of lines
+ are preserved. When set to ``lf`` or ``crlf``, both files end of
+ lines are ignored when patching and the result line endings are
+ normalized to either LF (Unix) or CRLF (Windows). When set to
+ ``auto``, end of lines are again ignored while patching but line
+ endings in patched files are normalized to their original setting
+ on a per-file basis. If target file does not exist or has no end
+ of line, patch line endings are preserved.
+ Default: strict.
+
+
+``paths``
+"""""""""
+
+Assigns symbolic names to repositories. The left side is the
+symbolic name, and the right gives the directory or URL that is the
+location of the repository. Default paths can be declared by setting
+the following entries.
+
+``default``
+ Directory or URL to use when pulling if no source is specified.
+ Default is set to repository from which the current repository was
+ cloned.
+
+``default-push``
+ Optional. Directory or URL to use when pushing if no destination
+ is specified.
+
+
+``profiling``
+"""""""""""""
+
+Specifies profiling format and file output. In this section
+description, 'profiling data' stands for the raw data collected
+during profiling, while 'profiling report' stands for a statistical
+text report generated from the profiling data. The profiling is done
+using lsprof.
+
+``format``
+ Profiling format.
+ Default: text.
+
+ ``text``
+ Generate a profiling report. When saving to a file, it should be
+ noted that only the report is saved, and the profiling data is
+ not kept.
+ ``kcachegrind``
+ Format profiling data for kcachegrind use: when saving to a
+ file, the generated file can directly be loaded into
+ kcachegrind.
+
+``output``
+ File path where profiling data or report should be saved. If the
+ file exists, it is replaced. Default: None, data is printed on
+ stderr
+
+``revsetalias``
+"""""""""""""""
+
+Alias definitions for revsets. See :hg:`help revsets` for details.
+
+``server``
+""""""""""
+
+Controls generic server settings.
+
+``uncompressed``
+ Whether to allow clients to clone a repository using the
+ uncompressed streaming protocol. This transfers about 40% more
+ data than a regular clone, but uses less memory and CPU on both
+ server and client. Over a LAN (100 Mbps or better) or a very fast
+ WAN, an uncompressed streaming clone is a lot faster (~10x) than a
+ regular clone. Over most WAN connections (anything slower than
+ about 6 Mbps), uncompressed streaming is slower, because of the
+ extra data transfer overhead. This mode will also temporarily hold
+ the write lock while determining what data to transfer.
+ Default is True.
+
+``validate``
+ Whether to validate the completeness of pushed changesets by
+ checking that all new file revisions specified in manifests are
+ present. Default is False.
+
+``subpaths``
+""""""""""""
+
+Defines subrepositories source locations rewriting rules of the form::
+
+ <pattern> = <replacement>
+
+Where ``pattern`` is a regular expression matching the source and
+``replacement`` is the replacement string used to rewrite it. Groups
+can be matched in ``pattern`` and referenced in ``replacements``. For
+instance::
+
+ http://server/(.*)-hg/ = http://hg.server/\1/
+
+rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``.
+
+All patterns are applied in definition order.
+
+``trusted``
+"""""""""""
+
+Mercurial will not use the settings in the
+``.hg/hgrc`` file from a repository if it doesn't belong to a trusted
+user or to a trusted group, as various hgrc features allow arbitrary
+commands to be run. This issue is often encountered when configuring
+hooks or extensions for shared repositories or servers. However,
+the web interface will use some safe settings from the ``[web]``
+section.
+
+This section specifies what users and groups are trusted. The
+current user is always trusted. To trust everybody, list a user or a
+group with name ``*``. These settings must be placed in an
+*already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the
+user or service running Mercurial.
+
+``users``
+ Comma-separated list of trusted users.
+
+``groups``
+ Comma-separated list of trusted groups.
+
+
+``ui``
+""""""
+
+User interface controls.
+
+``archivemeta``
+ Whether to include the .hg_archival.txt file containing meta data
+ (hashes for the repository base and for tip) in archives created
+ by the :hg:`archive` command or downloaded via hgweb.
+ Default is True.
+
+``askusername``
+ Whether to prompt for a username when committing. If True, and
+ neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will
+ be prompted to enter a username. If no username is entered, the
+ default ``USER@HOST`` is used instead.
+ Default is False.
+
+``commitsubrepos``
+ Whether to commit modified subrepositories when committing the
+ parent repository. If False and one subrepository has uncommitted
+ changes, abort the commit.
+ Default is False.
+
+``debug``
+ Print debugging information. True or False. Default is False.
+
+``editor``
+ The editor to use during a commit. Default is ``$EDITOR`` or ``vi``.
+
+``fallbackencoding``
+ Encoding to try if it's not possible to decode the changelog using
+ UTF-8. Default is ISO-8859-1.
+
+``ignore``
+ A file to read per-user ignore patterns from. This file should be
+ in the same format as a repository-wide .hgignore file. This
+ option supports hook syntax, so if you want to specify multiple
+ ignore files, you can do so by setting something like
+ ``ignore.other = ~/.hgignore2``. For details of the ignore file
+ format, see the ``hgignore(5)`` man page.
+
+``interactive``
+ Allow to prompt the user. True or False. Default is True.
+
+``logtemplate``
+ Template string for commands that print changesets.
+
+``merge``
+ The conflict resolution program to use during a manual merge.
+ For more information on merge tools see :hg:`help merge-tools`.
+ For configuring merge tools see the ``[merge-tools]`` section.
+
+``portablefilenames``
+ Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``.
+ Default is ``warn``.
+ If set to ``warn`` (or ``true``), a warning message is printed on POSIX
+ platforms, if a file with a non-portable filename is added (e.g. a file
+ with a name that can't be created on Windows because it contains reserved
+ parts like ``AUX``, reserved characters like ``:``, or would cause a case
+ collision with an existing file).
+ If set to ``ignore`` (or ``false``), no warning is printed.
+ If set to ``abort``, the command is aborted.
+ On Windows, this configuration option is ignored and the command aborted.
+
+``quiet``
+ Reduce the amount of output printed. True or False. Default is False.
+
+``remotecmd``
+ remote command to use for clone/push/pull operations. Default is ``hg``.
+
+``report_untrusted``
+ Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a
+ trusted user or group. True or False. Default is True.
+
+``slash``
+ Display paths using a slash (``/``) as the path separator. This
+ only makes a difference on systems where the default path
+ separator is not the slash character (e.g. Windows uses the
+ backslash character (``\``)).
+ Default is False.
+
+``ssh``
+ command to use for SSH connections. Default is ``ssh``.
+
+``strict``
+ Require exact command names, instead of allowing unambiguous
+ abbreviations. True or False. Default is False.
+
+``style``
+ Name of style to use for command output.
+
+``timeout``
+ The timeout used when a lock is held (in seconds), a negative value
+ means no timeout. Default is 600.
+
+``traceback``
+ Mercurial always prints a traceback when an unknown exception
+ occurs. Setting this to True will make Mercurial print a traceback
+ on all exceptions, even those recognized by Mercurial (such as
+ IOError or MemoryError). Default is False.
+
+``username``
+ The committer of a changeset created when running "commit".
+ Typically a person's name and email address, e.g. ``Fred Widget
+ <fred@example.com>``. Default is ``$EMAIL`` or ``username@hostname``. If
+ the username in hgrc is empty, it has to be specified manually or
+ in a different hgrc file (e.g. ``$HOME/.hgrc``, if the admin set
+ ``username =`` in the system hgrc). Environment variables in the
+ username are expanded.
+
+``verbose``
+ Increase the amount of output printed. True or False. Default is False.
+
+
+``web``
+"""""""
+
+Web interface configuration. The settings in this section apply to
+both the builtin webserver (started by :hg:`serve`) and the script you
+run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI
+and WSGI).
+
+The Mercurial webserver does no authentication (it does not prompt for
+usernames and passwords to validate *who* users are), but it does do
+authorization (it grants or denies access for *authenticated users*
+based on settings in this section). You must either configure your
+webserver to do authentication for you, or disable the authorization
+checks.
+
+For a quick setup in a trusted environment, e.g., a private LAN, where
+you want it to accept pushes from anybody, you can use the following
+command line::
+
+ $ hg --config web.allow_push=* --config web.push_ssl=False serve
+
+Note that this will allow anybody to push anything to the server and
+that this should not be used for public servers.
+
+The full set of options is:
+
+``accesslog``
+ Where to output the access log. Default is stdout.
+
+``address``
+ Interface address to bind to. Default is all.
+
+``allow_archive``
+ List of archive format (bz2, gz, zip) allowed for downloading.
+ Default is empty.
+
+``allowbz2``
+ (DEPRECATED) Whether to allow .tar.bz2 downloading of repository
+ revisions.
+ Default is False.
+
+``allowgz``
+ (DEPRECATED) Whether to allow .tar.gz downloading of repository
+ revisions.
+ Default is False.
+
+``allowpull``
+ Whether to allow pulling from the repository. Default is True.
+
+``allow_push``
+ Whether to allow pushing to the repository. If empty or not set,
+ push is not allowed. If the special value ``*``, any remote user can
+ push, including unauthenticated users. Otherwise, the remote user
+ must have been authenticated, and the authenticated user name must
+ be present in this list. The contents of the allow_push list are
+ examined after the deny_push list.
+
+``guessmime``
+ Control MIME types for raw download of file content.
+ Set to True to let hgweb guess the content type from the file
+ extension. This will serve HTML files as ``text/html`` and might
+ allow cross-site scripting attacks when serving untrusted
+ repositories. Default is False.
+
+``allow_read``
+ If the user has not already been denied repository access due to
+ the contents of deny_read, this list determines whether to grant
+ repository access to the user. If this list is not empty, and the
+ user is unauthenticated or not present in the list, then access is
+ denied for the user. If the list is empty or not set, then access
+ is permitted to all users by default. Setting allow_read to the
+ special value ``*`` is equivalent to it not being set (i.e. access
+ is permitted to all users). The contents of the allow_read list are
+ examined after the deny_read list.
+
+``allowzip``
+ (DEPRECATED) Whether to allow .zip downloading of repository
+ revisions. Default is False. This feature creates temporary files.
+
+``baseurl``
+ Base URL to use when publishing URLs in other locations, so
+ third-party tools like email notification hooks can construct
+ URLs. Example: ``http://hgserver/repos/``.
+
+``cacerts``
+ Path to file containing a list of PEM encoded certificate
+ authority certificates. Environment variables and ``~user``
+ constructs are expanded in the filename. If specified on the
+ client, then it will verify the identity of remote HTTPS servers
+ with these certificates. The form must be as follows::
+
+ -----BEGIN CERTIFICATE-----
+ ... (certificate in base64 PEM encoding) ...
+ -----END CERTIFICATE-----
+ -----BEGIN CERTIFICATE-----
+ ... (certificate in base64 PEM encoding) ...
+ -----END CERTIFICATE-----
+
+ This feature is only supported when using Python 2.6 or later. If you wish
+ to use it with earlier versions of Python, install the backported
+ version of the ssl library that is available from
+ ``http://pypi.python.org``.
+
+ You can use OpenSSL's CA certificate file if your platform has one.
+ On most Linux systems this will be ``/etc/ssl/certs/ca-certificates.crt``.
+ Otherwise you will have to generate this file manually.
+
+ To disable SSL verification temporarily, specify ``--insecure`` from
+ command line.
+
+``cache``
+ Whether to support caching in hgweb. Defaults to True.
+
+``contact``
+ Name or email address of the person in charge of the repository.
+ Defaults to ui.username or ``$EMAIL`` or "unknown" if unset or empty.
+
+``deny_push``
+ Whether to deny pushing to the repository. If empty or not set,
+ push is not denied. If the special value ``*``, all remote users are
+ denied push. Otherwise, unauthenticated users are all denied, and
+ any authenticated user name present in this list is also denied. The
+ contents of the deny_push list are examined before the allow_push list.
+
+``deny_read``
+ Whether to deny reading/viewing of the repository. If this list is
+ not empty, unauthenticated users are all denied, and any
+ authenticated user name present in this list is also denied access to
+ the repository. If set to the special value ``*``, all remote users
+ are denied access (rarely needed ;). If deny_read is empty or not set,
+ the determination of repository access depends on the presence and
+ content of the allow_read list (see description). If both
+ deny_read and allow_read are empty or not set, then access is
+ permitted to all users by default. If the repository is being
+ served via hgwebdir, denied users will not be able to see it in
+ the list of repositories. The contents of the deny_read list have
+ priority over (are examined before) the contents of the allow_read
+ list.
+
+``descend``
+ hgwebdir indexes will not descend into subdirectories. Only repositories
+ directly in the current path will be shown (other repositories are still
+ available from the index corresponding to their containing path).
+
+``description``
+ Textual description of the repository's purpose or contents.
+ Default is "unknown".
+
+``encoding``
+ Character encoding name. Default is the current locale charset.
+ Example: "UTF-8"
+
+``errorlog``
+ Where to output the error log. Default is stderr.
+
+``hidden``
+ Whether to hide the repository in the hgwebdir index.
+ Default is False.
+
+``ipv6``
+ Whether to use IPv6. Default is False.
+
+``logoimg``
+ File name of the logo image that some templates display on each page.
+ The file name is relative to ``staticurl``. That is, the full path to
+ the logo image is "staticurl/logoimg".
+ If unset, ``hglogo.png`` will be used.
+
+``logourl``
+ Base URL to use for logos. If unset, ``http://mercurial.selenic.com/``
+ will be used.
+
+``name``
+ Repository name to use in the web interface. Default is current
+ working directory.
+
+``maxchanges``
+ Maximum number of changes to list on the changelog. Default is 10.
+
+``maxfiles``
+ Maximum number of files to list per changeset. Default is 10.
+
+``port``
+ Port to listen on. Default is 8000.
+
+``prefix``
+ Prefix path to serve from. Default is '' (server root).
+
+``push_ssl``
+ Whether to require that inbound pushes be transported over SSL to
+ prevent password sniffing. Default is True.
+
+``staticurl``
+ Base URL to use for static files. If unset, static files (e.g. the
+ hgicon.png favicon) will be served by the CGI script itself. Use
+ this setting to serve them directly with the HTTP server.
+ Example: ``http://hgserver/static/``.
+
+``stripes``
+ How many lines a "zebra stripe" should span in multiline output.
+ Default is 1; set to 0 to disable.
+
+``style``
+ Which template map style to use.
+
+``templates``
+ Where to find the HTML templates. Default is install path.
diff --git a/websdk/mercurial/help/dates.txt b/websdk/mercurial/help/dates.txt
new file mode 100644
index 0000000..5f48979
--- /dev/null
+++ b/websdk/mercurial/help/dates.txt
@@ -0,0 +1,36 @@
+Some commands allow the user to specify a date, e.g.:
+
+- backout, commit, import, tag: Specify the commit date.
+- log, revert, update: Select revision(s) by date.
+
+Many date formats are valid. Here are some examples:
+
+- ``Wed Dec 6 13:18:29 2006`` (local timezone assumed)
+- ``Dec 6 13:18 -0600`` (year assumed, time offset provided)
+- ``Dec 6 13:18 UTC`` (UTC and GMT are aliases for +0000)
+- ``Dec 6`` (midnight)
+- ``13:18`` (today assumed)
+- ``3:39`` (3:39AM assumed)
+- ``3:39pm`` (15:39)
+- ``2006-12-06 13:18:29`` (ISO 8601 format)
+- ``2006-12-6 13:18``
+- ``2006-12-6``
+- ``12-6``
+- ``12/6``
+- ``12/6/6`` (Dec 6 2006)
+
+Lastly, there is Mercurial's internal format:
+
+- ``1165432709 0`` (Wed Dec 6 13:18:29 2006 UTC)
+
+This is the internal representation format for dates. The first number
+is the number of seconds since the epoch (1970-01-01 00:00 UTC). The
+second is the offset of the local timezone, in seconds west of UTC
+(negative if the timezone is east of UTC).
+
+The log command also accepts date ranges:
+
+- ``<DATE`` - at or before a given date/time
+- ``>DATE`` - on or after a given date/time
+- ``DATE to DATE`` - a date range, inclusive
+- ``-DAYS`` - within a given number of days of today
diff --git a/websdk/mercurial/help/diffs.txt b/websdk/mercurial/help/diffs.txt
new file mode 100644
index 0000000..9ede0a5
--- /dev/null
+++ b/websdk/mercurial/help/diffs.txt
@@ -0,0 +1,29 @@
+Mercurial's default format for showing changes between two versions of
+a file is compatible with the unified format of GNU diff, which can be
+used by GNU patch and many other standard tools.
+
+While this standard format is often enough, it does not encode the
+following information:
+
+- executable status and other permission bits
+- copy or rename information
+- changes in binary files
+- creation or deletion of empty files
+
+Mercurial also supports the extended diff format from the git VCS
+which addresses these limitations. The git diff format is not produced
+by default because a few widespread tools still do not understand this
+format.
+
+This means that when generating diffs from a Mercurial repository
+(e.g. with :hg:`export`), you should be careful about things like file
+copies and renames or other things mentioned above, because when
+applying a standard diff to a different repository, this extra
+information is lost. Mercurial's internal operations (like push and
+pull) are not affected by this, because they use an internal binary
+format for communicating changes.
+
+To make Mercurial produce the git extended diff format, use the --git
+option available for many commands, or set 'git = True' in the [diff]
+section of your configuration file. You do not need to set this option
+when importing diffs in this format or using them in the mq extension.
diff --git a/websdk/mercurial/help/environment.txt b/websdk/mercurial/help/environment.txt
new file mode 100644
index 0000000..5e6771f
--- /dev/null
+++ b/websdk/mercurial/help/environment.txt
@@ -0,0 +1,101 @@
+HG
+ Path to the 'hg' executable, automatically passed when running
+ hooks, extensions or external tools. If unset or empty, this is
+ the hg executable's name if it's frozen, or an executable named
+ 'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on
+ Windows) is searched.
+
+HGEDITOR
+ This is the name of the editor to run when committing. See EDITOR.
+
+ (deprecated, use configuration file)
+
+HGENCODING
+ This overrides the default locale setting detected by Mercurial.
+ This setting is used to convert data including usernames,
+ changeset descriptions, tag names, and branches. This setting can
+ be overridden with the --encoding command-line option.
+
+HGENCODINGMODE
+ This sets Mercurial's behavior for handling unknown characters
+ while transcoding user input. The default is "strict", which
+ causes Mercurial to abort if it can't map a character. Other
+ settings include "replace", which replaces unknown characters, and
+ "ignore", which drops them. This setting can be overridden with
+ the --encodingmode command-line option.
+
+HGENCODINGAMBIGUOUS
+ This sets Mercurial's behavior for handling characters with
+ "ambiguous" widths like accented Latin characters with East Asian
+ fonts. By default, Mercurial assumes ambiguous characters are
+ narrow, set this variable to "wide" if such characters cause
+ formatting problems.
+
+HGMERGE
+ An executable to use for resolving merge conflicts. The program
+ will be executed with three arguments: local file, remote file,
+ ancestor file.
+
+ (deprecated, use configuration file)
+
+HGRCPATH
+ A list of files or directories to search for configuration
+ files. Item separator is ":" on Unix, ";" on Windows. If HGRCPATH
+ is not set, platform default search path is used. If empty, only
+ the .hg/hgrc from the current repository is read.
+
+ For each element in HGRCPATH:
+
+ - if it's a directory, all files ending with .rc are added
+ - otherwise, the file itself will be added
+
+HGPLAIN
+ When set, this disables any configuration settings that might
+ change Mercurial's default output. This includes encoding,
+ defaults, verbose mode, debug mode, quiet mode, tracebacks, and
+ localization. This can be useful when scripting against Mercurial
+ in the face of existing user configuration.
+
+ Equivalent options set via command line flags or environment
+ variables are not overridden.
+
+HGPLAINEXCEPT
+ This is a comma-separated list of features to preserve when
+ HGPLAIN is enabled. Currently the only value supported is "i18n",
+ which preserves internationalization in plain mode.
+
+ Setting HGPLAINEXCEPT to anything (even an empty string) will
+ enable plain mode.
+
+HGUSER
+ This is the string used as the author of a commit. If not set,
+ available values will be considered in this order:
+
+ - HGUSER (deprecated)
+ - configuration files from the HGRCPATH
+ - EMAIL
+ - interactive prompt
+ - LOGNAME (with ``@hostname`` appended)
+
+ (deprecated, use configuration file)
+
+EMAIL
+ May be used as the author of a commit; see HGUSER.
+
+LOGNAME
+ May be used as the author of a commit; see HGUSER.
+
+VISUAL
+ This is the name of the editor to use when committing. See EDITOR.
+
+EDITOR
+ Sometimes Mercurial needs to open a text file in an editor for a
+ user to modify, for example when writing commit messages. The
+ editor it uses is determined by looking at the environment
+ variables HGEDITOR, VISUAL and EDITOR, in that order. The first
+ non-empty one is chosen. If all of them are empty, the editor
+ defaults to 'vi'.
+
+PYTHONPATH
+ This is used by Python to find imported modules and may need to be
+ set appropriately if this Mercurial is not installed system-wide.
diff --git a/websdk/mercurial/help/extensions.txt b/websdk/mercurial/help/extensions.txt
new file mode 100644
index 0000000..f3d2992
--- /dev/null
+++ b/websdk/mercurial/help/extensions.txt
@@ -0,0 +1,33 @@
+Mercurial has the ability to add new features through the use of
+extensions. Extensions may add new commands, add options to
+existing commands, change the default behavior of commands, or
+implement hooks.
+
+Extensions are not loaded by default for a variety of reasons:
+they can increase startup overhead; they may be meant for advanced
+usage only; they may provide potentially dangerous abilities (such
+as letting you destroy or modify history); they might not be ready
+for prime time; or they may alter some usual behaviors of stock
+Mercurial. It is thus up to the user to activate extensions as
+needed.
+
+To enable the "foo" extension, either shipped with Mercurial or in the
+Python search path, create an entry for it in your configuration file,
+like this::
+
+ [extensions]
+ foo =
+
+You may also specify the full path to an extension::
+
+ [extensions]
+ myfeature = ~/.hgext/myfeature.py
+
+To explicitly disable an extension enabled in a configuration file of
+broader scope, prepend its path with !::
+
+ [extensions]
+ # disabling extension bar residing in /path/to/extension/bar.py
+ bar = !/path/to/extension/bar.py
+ # ditto, but no path was supplied for extension baz
+ baz = !
diff --git a/websdk/mercurial/help/filesets.txt b/websdk/mercurial/help/filesets.txt
new file mode 100644
index 0000000..c929d79
--- /dev/null
+++ b/websdk/mercurial/help/filesets.txt
@@ -0,0 +1,65 @@
+Mercurial supports a functional language for selecting a set of
+files.
+
+Like other file patterns, this pattern type is indicated by a prefix,
+'set:'. The language supports a number of predicates which are joined
+by infix operators. Parenthesis can be used for grouping.
+
+Identifiers such as filenames or patterns must be quoted with single
+or double quotes if they contain characters outside of
+``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the
+predefined predicates. This generally applies to file patterns other
+than globs and arguments for predicates.
+
+Special characters can be used in quoted identifiers by escaping them,
+e.g., ``\n`` is interpreted as a newline. To prevent them from being
+interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
+
+There is a single prefix operator:
+
+``not x``
+ Files not in x. Short form is ``! x``.
+
+These are the supported infix operators:
+
+``x and y``
+ The intersection of files in x and y. Short form is ``x & y``.
+
+``x or y``
+ The union of files in x and y. There are two alternative short
+ forms: ``x | y`` and ``x + y``.
+
+``x - y``
+ Files in x but not in y.
+
+The following predicates are supported:
+
+.. predicatesmarker
+
+Some sample queries:
+
+- Show status of files that appear to be binary in the working directory::
+
+ hg status -A "set:binary()"
+
+- Forget files that are in .hgignore but are already tracked::
+
+ hg forget "set:hgignore() and not ignored()"
+
+- Find text files that contain a string::
+
+ hg locate "set:grep(magic) and not binary()"
+
+- Find C files in a non-standard encoding::
+
+ hg locate "set:**.c and not encoding(ascii)"
+
+- Revert copies of large binary files::
+
+ hg revert "set:copied() and binary() and size('>1M')"
+
+- Remove files listed in foo.lst that contain the letter a or b::
+
+ hg remove "set: 'listfile:foo.lst' and (**a* or **b*)"
+
+See also :hg:`help patterns`.
diff --git a/websdk/mercurial/help/glossary.txt b/websdk/mercurial/help/glossary.txt
new file mode 100644
index 0000000..074a31d
--- /dev/null
+++ b/websdk/mercurial/help/glossary.txt
@@ -0,0 +1,386 @@
+Ancestor
+ Any changeset that can be reached by an unbroken chain of parent
+ changesets from a given changeset. More precisely, the ancestors
+ of a changeset can be defined by two properties: a parent of a
+ changeset is an ancestor, and a parent of an ancestor is an
+ ancestor. See also: 'Descendant'.
+
+Bookmark
+ Bookmarks are pointers to certain commits that move when
+ committing. They are similar to tags in that it is possible to use
+ bookmark names in all places where Mercurial expects a changeset
+ ID, e.g., with :hg:`update`. Unlike tags, bookmarks move along
+ when you make a commit.
+
+ Bookmarks can be renamed, copied and deleted. Bookmarks are local,
+ unless they are explicitly pushed or pulled between repositories.
+ Pushing and pulling bookmarks allow you to collaborate with others
+ on a branch without creating a named branch.
+
+Branch
+ (Noun) A child changeset that has been created from a parent that
+ is not a head. These are known as topological branches, see
+ 'Branch, topological'. If a topological branch is named, it becomes
+ a named branch. If a topological branch is not named, it becomes
+ an anonymous branch. See 'Branch, anonymous' and 'Branch, named'.
+
+ Branches may be created when changes are pulled from or pushed to
+ a remote repository, since new heads may be created by these
+ operations. Note that the term branch can also be used informally
+ to describe a development process in which certain development is
+ done independently of other development. This is sometimes done
+ explicitly with a named branch, but it can also be done locally,
+ using bookmarks or clones and anonymous branches.
+
+ Example: "The experimental branch".
+
+ (Verb) The action of creating a child changeset which results in
+ its parent having more than one child.
+
+ Example: "I'm going to branch at X".
+
+Branch, anonymous
+ Every time a new child changeset is created from a parent that is not
+ a head and the name of the branch is not changed, a new anonymous
+ branch is created.
+
+Branch, closed
+ A named branch whose branch heads have all been closed.
+
+Branch, default
+ The branch assigned to a changeset when no name has previously been
+ assigned.
+
+Branch head
+ See 'Head, branch'.
+
+Branch, inactive
+ If a named branch has no topological heads, it is considered to be
+ inactive. As an example, a feature branch becomes inactive when it
+ is merged into the default branch. The :hg:`branches` command
+ shows inactive branches by default, though they can be hidden with
+ :hg:`branches --active`.
+
+ NOTE: this concept is deprecated because it is too implicit.
+ Branches should now be explicitly closed using :hg:`commit
+ --close-branch` when they are no longer needed.
+
+Branch, named
+ A collection of changesets which have the same branch name. By
+ default, children of a changeset in a named branch belong to the
+ same named branch. A child can be explicitly assigned to a
+ different branch. See :hg:`help branch`, :hg:`help branches` and
+ :hg:`commit --close-branch` for more information on managing
+ branches.
+
+ Named branches can be thought of as a kind of namespace, dividing
+ the collection of changesets that comprise the repository into a
+ collection of disjoint subsets. A named branch is not necessarily
+ a topological branch. If a new named branch is created from the
+ head of another named branch, or the default branch, but no
+ further changesets are added to that previous branch, then that
+ previous branch will be a branch in name only.
+
+Branch tip
+ See 'Tip, branch'.
+
+Branch, topological
+ Every time a new child changeset is created from a parent that is
+ not a head, a new topological branch is created. If a topological
+ branch is named, it becomes a named branch. If a topological
+ branch is not named, it becomes an anonymous branch of the
+ current, possibly default, branch.
+
+Changelog
+ A record of the changesets in the order in which they were added
+ to the repository. This includes details such as changeset id,
+ author, commit message, date, and list of changed files.
+
+Changeset
+ A snapshot of the state of the repository used to record a change.
+
+Changeset, child
+ The converse of parent changeset: if P is a parent of C, then C is
+ a child of P. There is no limit to the number of children that a
+ changeset may have.
+
+Changeset id
+ A SHA-1 hash that uniquely identifies a changeset. It may be
+ represented as either a "long" 40 hexadecimal digit string, or a
+ "short" 12 hexadecimal digit string.
+
+Changeset, merge
+ A changeset with two parents. This occurs when a merge is
+ committed.
+
+Changeset, parent
+ A revision upon which a child changeset is based. Specifically, a
+ parent changeset of a changeset C is a changeset whose node
+ immediately precedes C in the DAG. Changesets have at most two
+ parents.
+
+Checkout
+ (Noun) The working directory being updated to a specific
+ revision. This use should probably be avoided where possible, as
+ changeset is much more appropriate than checkout in this context.
+
+ Example: "I'm using checkout X."
+
+ (Verb) Updating the working directory to a specific changeset. See
+ :hg:`help update`.
+
+ Example: "I'm going to check out changeset X."
+
+Child changeset
+ See 'Changeset, child'.
+
+Close changeset
+ See 'Changeset, close'.
+
+Closed branch
+ See 'Branch, closed'.
+
+Clone
+ (Noun) An entire or partial copy of a repository. The partial
+ clone must be in the form of a revision and its ancestors.
+
+ Example: "Is your clone up to date?".
+
+ (Verb) The process of creating a clone, using :hg:`clone`.
+
+ Example: "I'm going to clone the repository".
+
+Closed branch head
+ See 'Head, closed branch'.
+
+Commit
+ (Noun) A synonym for changeset.
+
+ Example: "Is the bug fixed in your recent commit?"
+
+ (Verb) The act of recording changes to a repository. When files
+ are committed in a working directory, Mercurial finds the
+ differences between the committed files and their parent
+ changeset, creating a new changeset in the repository.
+
+ Example: "You should commit those changes now."
+
+Cset
+ A common abbreviation of the term changeset.
+
+DAG
+ The repository of changesets of a distributed version control
+ system (DVCS) can be described as a directed acyclic graph (DAG),
+ consisting of nodes and edges, where nodes correspond to
+ changesets and edges imply a parent -> child relation. This graph
+ can be visualized by graphical tools such as :hg:`glog`
+ (graphlog). In Mercurial, the DAG is limited by the requirement
+ for children to have at most two parents.
+
+Default branch
+ See 'Branch, default'.
+
+Descendant
+ Any changeset that can be reached by a chain of child changesets
+ from a given changeset. More precisely, the descendants of a
+ changeset can be defined by two properties: the child of a
+ changeset is a descendant, and the child of a descendant is a
+ descendant. See also: 'Ancestor'.
+
+Diff
+ (Noun) The difference between the contents and attributes of files
+ in two changesets or a changeset and the current working
+ directory. The difference is usually represented in a standard
+ form called a "diff" or "patch". The "git diff" format is used
+ when the changes include copies, renames, or changes to file
+ attributes, none of which can be represented/handled by classic
+ "diff" and "patch".
+
+ Example: "Did you see my correction in the diff?"
+
+ (Verb) Diffing two changesets is the action of creating a diff or
+ patch.
+
+ Example: "If you diff with changeset X, you will see what I mean."
+
+Directory, working
+ The working directory represents the state of the files tracked by
+ Mercurial, that will be recorded in the next commit. The working
+ directory initially corresponds to the snapshot at an existing
+ changeset, known as the parent of the working directory. See
+ 'Parent, working directory'. The state may be modified by changes
+ to the files introduced manually or by a merge. The repository
+ metadata exists in the .hg directory inside the working directory.
+
+Graph
+ See DAG and :hg:`help graphlog`.
+
+Head
+ The term 'head' may be used to refer to both a branch head or a
+ repository head, depending on the context. See 'Head, branch' and
+ 'Head, repository' for specific definitions.
+
+ Heads are where development generally takes place and are the
+ usual targets for update and merge operations.
+
+Head, branch
+ A changeset with no descendants on the same named branch.
+
+Head, closed branch
+ A changeset that marks a head as no longer interesting. The closed
+ head is no longer listed by :hg:`heads`. A branch is considered
+ closed when all its heads are closed and consequently is not
+ listed by :hg:`branches`.
+
+Head, repository
+ A topological head which has not been closed.
+
+Head, topological
+ A changeset with no children in the repository.
+
+History, immutable
+ Once committed, changesets cannot be altered. Extensions which
+ appear to change history actually create new changesets that
+ replace existing ones, and then destroy the old changesets. Doing
+ so in public repositories can result in old changesets being
+ reintroduced to the repository.
+
+History, rewriting
+ The changesets in a repository are immutable. However, extensions
+ to Mercurial can be used to alter the repository, usually in such
+ a way as to preserve changeset contents.
+
+Immutable history
+ See 'History, immutable'.
+
+Merge changeset
+ See 'Changeset, merge'.
+
+Manifest
+ Each changeset has a manifest, which is the list of files that are
+ tracked by the changeset.
+
+Merge
+ Used to bring together divergent branches of work. When you update
+ to a changeset and then merge another changeset, you bring the
+ history of the latter changeset into your working directory. Once
+ conflicts are resolved (and marked), this merge may be committed
+ as a merge changeset, bringing two branches together in the DAG.
+
+Named branch
+ See 'Branch, named'.
+
+Null changeset
+ The empty changeset. It is the parent state of newly-initialized
+ repositories and repositories with no checked out revision. It is
+ thus the parent of root changesets and the effective ancestor when
+ merging unrelated changesets. Can be specified by the alias 'null'
+ or by the changeset ID '000000000000'.
+
+Parent
+ See 'Changeset, parent'.
+
+Parent changeset
+ See 'Changeset, parent'.
+
+Parent, working directory
+ The working directory parent reflects a virtual revision which is
+ the child of the changeset (or two changesets with an uncommitted
+ merge) shown by :hg:`parents`. This is changed with
+ :hg:`update`. Other commands to see the working directory parent
+ are :hg:`summary` and :hg:`id`. Can be specified by the alias ".".
+
+Patch
+ (Noun) The product of a diff operation.
+
+ Example: "I've sent you my patch."
+
+ (Verb) The process of using a patch file to transform one
+ changeset into another.
+
+ Example: "You will need to patch that revision."
+
+Pull
+ An operation in which changesets in a remote repository which are
+ not in the local repository are brought into the local
+ repository. Note that this operation without special arguments
+ only updates the repository, it does not update the files in the
+ working directory. See :hg:`help pull`.
+
+Push
+ An operation in which changesets in a local repository which are
+ not in a remote repository are sent to the remote repository. Note
+ that this operation only adds changesets which have been committed
+ locally to the remote repository. Uncommitted changes are not
+ sent. See :hg:`help push`.
+
+Repository
+ The metadata describing all recorded states of a collection of
+ files. Each recorded state is represented by a changeset. A
+ repository is usually (but not always) found in the ``.hg``
+ subdirectory of a working directory. Any recorded state can be
+ recreated by "updating" a working directory to a specific
+ changeset.
+
+Repository head
+ See 'Head, repository'.
+
+Revision
+ A state of the repository at some point in time. Earlier revisions
+ can be updated to by using :hg:`update`. See also 'Revision
+ number'; See also 'Changeset'.
+
+Revision number
+ This integer uniquely identifies a changeset in a specific
+ repository. It represents the order in which changesets were added
+ to a repository, starting with revision number 0. Note that the
+ revision number may be different in each clone of a repository. To
+ identify changesets uniquely between different clones, see
+ 'Changeset id'.
+
+Revlog
+ History storage mechanism used by Mercurial. It is a form of delta
+ encoding, with occasional full revision of data followed by delta
+ of each successive revision. It includes data and an index
+ pointing to the data.
+
+Rewriting history
+ See 'History, rewriting'.
+
+Root
+ A changeset that has only the null changeset as its parent. Most
+ repositories have only a single root changeset.
+
+Tag
+ An alternative name given to a changeset. Tags can be used in all
+ places where Mercurial expects a changeset ID, e.g., with
+ :hg:`update`. The creation of a tag is stored in the history and
+ will thus automatically be shared with other using push and pull.
+
+Tip
+ The changeset with the highest revision number. It is the changeset
+ most recently added in a repository.
+
+Tip, branch
+ The head of a given branch with the highest revision number. When
+ a branch name is used as a revision identifier, it refers to the
+ branch tip. See also 'Branch, head'. Note that because revision
+ numbers may be different in different repository clones, the
+ branch tip may be different in different cloned repositories.
+
+Update
+ (Noun) Another synonym of changeset.
+
+ Example: "I've pushed an update".
+
+ (Verb) This term is usually used to describe updating the state of
+ the working directory to that of a specific changeset. See
+ :hg:`help update`.
+
+ Example: "You should update".
+
+Working directory
+ See 'Directory, working'.
+
+Working directory parent
+ See 'Parent, working directory'.
diff --git a/websdk/mercurial/help/hgignore.txt b/websdk/mercurial/help/hgignore.txt
new file mode 100644
index 0000000..538f4b5
--- /dev/null
+++ b/websdk/mercurial/help/hgignore.txt
@@ -0,0 +1,80 @@
+Synopsis
+--------
+
+The Mercurial system uses a file called ``.hgignore`` in the root
+directory of a repository to control its behavior when it searches
+for files that it is not currently tracking.
+
+Description
+-----------
+
+The working directory of a Mercurial repository will often contain
+files that should not be tracked by Mercurial. These include backup
+files created by editors and build products created by compilers.
+These files can be ignored by listing them in a ``.hgignore`` file in
+the root of the working directory. The ``.hgignore`` file must be
+created manually. It is typically put under version control, so that
+the settings will propagate to other repositories with push and pull.
+
+An untracked file is ignored if its path relative to the repository
+root directory, or any prefix path of that path, is matched against
+any pattern in ``.hgignore``.
+
+For example, say we have an untracked file, ``file.c``, at
+``a/b/file.c`` inside our repository. Mercurial will ignore ``file.c``
+if any pattern in ``.hgignore`` matches ``a/b/file.c``, ``a/b`` or ``a``.
+
+In addition, a Mercurial configuration file can reference a set of
+per-user or global ignore files. See the ``ignore`` configuration
+key on the ``[ui]`` section of :hg:`help config` for details of how to
+configure these files.
+
+To control Mercurial's handling of files that it manages, many
+commands support the ``-I`` and ``-X`` options; see
+:hg:`help <command>` and :hg:`help patterns` for details.
+
+Syntax
+------
+
+An ignore file is a plain text file consisting of a list of patterns,
+with one pattern per line. Empty lines are skipped. The ``#``
+character is treated as a comment character, and the ``\`` character
+is treated as an escape character.
+
+Mercurial supports several pattern syntaxes. The default syntax used
+is Python/Perl-style regular expressions.
+
+To change the syntax used, use a line of the following form::
+
+ syntax: NAME
+
+where ``NAME`` is one of the following:
+
+``regexp``
+ Regular expression, Python/Perl syntax.
+``glob``
+ Shell-style glob.
+
+The chosen syntax stays in effect when parsing all patterns that
+follow, until another syntax is selected.
+
+Neither glob nor regexp patterns are rooted. A glob-syntax pattern of
+the form ``*.c`` will match a file ending in ``.c`` in any directory,
+and a regexp pattern of the form ``\.c$`` will do the same. To root a
+regexp pattern, start it with ``^``.
+
+Example
+-------
+
+Here is an example ignore file. ::
+
+ # use glob syntax.
+ syntax: glob
+
+ *.elc
+ *.pyc
+ *~
+
+ # switch to regexp syntax.
+ syntax: regexp
+ ^\.pc/
diff --git a/websdk/mercurial/help/hgweb.txt b/websdk/mercurial/help/hgweb.txt
new file mode 100644
index 0000000..fa901e6
--- /dev/null
+++ b/websdk/mercurial/help/hgweb.txt
@@ -0,0 +1,47 @@
+Mercurial's internal web server, hgweb, can serve either a single
+repository, or a collection of them. In the latter case, a special
+configuration file can be used to specify the repository paths to use
+and global web configuration options.
+
+This file uses the same syntax as other Mercurial configuration files,
+but only the following sections are recognized:
+
+ - web
+ - paths
+ - collections
+
+The ``web`` section can specify all the settings described in the web
+section of the hgrc(5) documentation. See :hg:`help config` for
+information on where to find the manual page.
+
+The ``paths`` section provides mappings of physical repository
+paths to virtual ones. For instance::
+
+ [paths]
+ projects/a = /foo/bar
+ projects/b = /baz/quux
+ web/root = /real/root/*
+ / = /real/root2/*
+ virtual/root2 = /real/root2/**
+
+- The first two entries make two repositories in different directories
+ appear under the same directory in the web interface
+- The third entry maps every Mercurial repository found in '/real/root'
+ into 'web/root'. This format is preferred over the [collections] one,
+ since using absolute paths as configuration keys is not supported on every
+ platform (especially on Windows).
+- The fourth entry is a special case mapping all repositories in
+ '/real/root2' in the root of the virtual directory.
+- The fifth entry recursively finds all repositories under the real
+ root, and maps their relative paths under the virtual root.
+
+The ``collections`` section provides mappings of trees of physical
+repositories paths to virtual ones, though the paths syntax is generally
+preferred. For instance::
+
+ [collections]
+ /foo = /foo
+
+Here, the left side will be stripped off all repositories found in the
+right side. Thus ``/foo/bar`` and ``foo/quux/baz`` will be listed as
+``bar`` and ``quux/baz`` respectively.
diff --git a/websdk/mercurial/help/merge-tools.txt b/websdk/mercurial/help/merge-tools.txt
new file mode 100644
index 0000000..7324fe4
--- /dev/null
+++ b/websdk/mercurial/help/merge-tools.txt
@@ -0,0 +1,110 @@
+To merge files Mercurial uses merge tools.
+
+A merge tool combines two different versions of a file into a merged
+file. Merge tools are given the two files and the greatest common
+ancestor of the two file versions, so they can determine the changes
+made on both branches.
+
+Merge tools are used both for :hg:`resolve`, :hg:`merge`, :hg:`update`,
+:hg:`backout` and in several extensions.
+
+Usually, the merge tool tries to automatically reconcile the files by
+combining all non-overlapping changes that occurred separately in
+the two different evolutions of the same initial base file. Furthermore, some
+interactive merge programs make it easier to manually resolve
+conflicting merges, either in a graphical way, or by inserting some
+conflict markers. Mercurial does not include any interactive merge
+programs but relies on external tools for that.
+
+Available merge tools
+"""""""""""""""""""""
+
+External merge tools and their properties are configured in the
+merge-tools configuration section - see hgrc(5) - but they can often just
+be named by their executable.
+
+A merge tool is generally usable if its executable can be found on the
+system and if it can handle the merge. The executable is found if it
+is an absolute or relative executable path or the name of an
+application in the executable search path. The tool is assumed to be
+able to handle the merge if it can handle symlinks if the file is a
+symlink, if it can handle binary files if the file is binary, and if a
+GUI is available if the tool requires a GUI.
+
+There are some internal merge tools which can be used. The internal
+merge tools are:
+
+``internal:merge``
+ Uses the internal non-interactive simple merge algorithm for merging
+ files. It will fail if there are any conflicts and leave markers in
+ the partially merged file.
+
+``internal:fail``
+ Rather than attempting to merge files that were modified on both
+ branches, it marks them as unresolved. The resolve command must be
+ used to resolve these conflicts.
+
+``internal:local``
+ Uses the local version of files as the merged version.
+
+``internal:other``
+ Uses the other version of files as the merged version.
+
+``internal:prompt``
+ Asks the user which of the local or the other version to keep as
+ the merged version.
+
+``internal:dump``
+ Creates three versions of the files to merge, containing the
+ contents of local, other and base. These files can then be used to
+ perform a merge manually. If the file to be merged is named
+ ``a.txt``, these files will accordingly be named ``a.txt.local``,
+ ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
+ same directory as ``a.txt``.
+
+Internal tools are always available and do not require a GUI but will by default
+not handle symlinks or binary files.
+
+Choosing a merge tool
+"""""""""""""""""""""
+
+Mercurial uses these rules when deciding which merge tool to use:
+
+1. If a tool has been specified with the --tool option to merge or resolve, it
+ is used. If it is the name of a tool in the merge-tools configuration, its
+ configuration is used. Otherwise the specified tool must be executable by
+ the shell.
+
+2. If the ``HGMERGE`` environment variable is present, its value is used and
+ must be executable by the shell.
+
+3. If the filename of the file to be merged matches any of the patterns in the
+ merge-patterns configuration section, the first usable merge tool
+ corresponding to a matching pattern is used. Here, binary capabilities of the
+ merge tool are not considered.
+
+4. If ui.merge is set it will be considered next. If the value is not the name
+ of a configured tool, the specified value is used and must be executable by
+ the shell. Otherwise the named tool is used if it is usable.
+
+5. If any usable merge tools are present in the merge-tools configuration
+ section, the one with the highest priority is used.
+
+6. If a program named ``hgmerge`` can be found on the system, it is used - but
+ it will by default not be used for symlinks and binary files.
+
+7. If the file to be merged is not binary and is not a symlink, then
+ ``internal:merge`` is used.
+
+8. The merge of the file fails and must be resolved before commit.
+
+.. note::
+ After selecting a merge program, Mercurial will by default attempt
+ to merge the files using a simple merge algorithm first. Only if it doesn't
+ succeed because of conflicting changes Mercurial will actually execute the
+ merge program. Whether to use the simple merge algorithm first can be
+ controlled by the premerge setting of the merge tool. Premerge is enabled by
+ default unless the file is binary or a symlink.
+
+See the merge-tools and ui sections of hgrc(5) for details on the
+configuration of merge tools.
diff --git a/websdk/mercurial/help/multirevs.txt b/websdk/mercurial/help/multirevs.txt
new file mode 100644
index 0000000..c8a2833
--- /dev/null
+++ b/websdk/mercurial/help/multirevs.txt
@@ -0,0 +1,13 @@
+When Mercurial accepts more than one revision, they may be specified
+individually, or provided as a topologically continuous range,
+separated by the ":" character.
+
+The syntax of range notation is [BEGIN]:[END], where BEGIN and END are
+revision identifiers. Both BEGIN and END are optional. If BEGIN is not
+specified, it defaults to revision number 0. If END is not specified,
+it defaults to the tip. The range ":" thus means "all revisions".
+
+If BEGIN is greater than END, revisions are treated in reverse order.
+
+A range acts as a closed interval. This means that a range of 3:5
+gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6.
diff --git a/websdk/mercurial/help/patterns.txt b/websdk/mercurial/help/patterns.txt
new file mode 100644
index 0000000..c13453e
--- /dev/null
+++ b/websdk/mercurial/help/patterns.txt
@@ -0,0 +1,53 @@
+Mercurial accepts several notations for identifying one or more files
+at a time.
+
+By default, Mercurial treats filenames as shell-style extended glob
+patterns.
+
+Alternate pattern notations must be specified explicitly.
+
+To use a plain path name without any pattern matching, start it with
+``path:``. These path names must completely match starting at the
+current repository root.
+
+To use an extended glob, start a name with ``glob:``. Globs are rooted
+at the current directory; a glob such as ``*.c`` will only match files
+in the current directory ending with ``.c``.
+
+The supported glob syntax extensions are ``**`` to match any string
+across path separators and ``{a,b}`` to mean "a or b".
+
+To use a Perl/Python regular expression, start a name with ``re:``.
+Regexp pattern matching is anchored at the root of the repository.
+
+To read name patterns from a file, use ``listfile:`` or ``listfile0:``.
+The latter expects null delimited patterns while the former expects line
+feeds. Each string read from the file is itself treated as a file
+pattern.
+
+Plain examples::
+
+ path:foo/bar a name bar in a directory named foo in the root
+ of the repository
+ path:path:name a file or directory named "path:name"
+
+Glob examples::
+
+ glob:*.c any name ending in ".c" in the current directory
+ *.c any name ending in ".c" in the current directory
+ **.c any name ending in ".c" in any subdirectory of the
+ current directory including itself.
+ foo/*.c any name ending in ".c" in the directory foo
+ foo/**.c any name ending in ".c" in any subdirectory of foo
+ including itself.
+
+Regexp examples::
+
+ re:.*\.c$ any name ending in ".c", anywhere in the repository
+
+File examples::
+
+ listfile:list.txt read list from list.txt with one file pattern per line
+ listfile0:list.txt read list from list.txt with null byte delimiters
+
+See also :hg:`help filesets`.
diff --git a/websdk/mercurial/help/revisions.txt b/websdk/mercurial/help/revisions.txt
new file mode 100644
index 0000000..309f8e2
--- /dev/null
+++ b/websdk/mercurial/help/revisions.txt
@@ -0,0 +1,29 @@
+Mercurial supports several ways to specify individual revisions.
+
+A plain integer is treated as a revision number. Negative integers are
+treated as sequential offsets from the tip, with -1 denoting the tip,
+-2 denoting the revision prior to the tip, and so forth.
+
+A 40-digit hexadecimal string is treated as a unique revision
+identifier.
+
+A hexadecimal string less than 40 characters long is treated as a
+unique revision identifier and is referred to as a short-form
+identifier. A short-form identifier is only valid if it is the prefix
+of exactly one full-length identifier.
+
+Any other string is treated as a tag or branch name. A tag name is a
+symbolic name associated with a revision identifier. A branch name
+denotes the tipmost revision of that branch. Tag and branch names must
+not contain the ":" character.
+
+The reserved name "tip" is a special tag that always identifies the
+most recent revision.
+
+The reserved name "null" indicates the null revision. This is the
+revision of an empty repository, and the parent of revision 0.
+
+The reserved name "." indicates the working directory parent. If no
+working directory is checked out, it is equivalent to null. If an
+uncommitted merge is in progress, "." is the revision of the first
+parent.
diff --git a/websdk/mercurial/help/revsets.txt b/websdk/mercurial/help/revsets.txt
new file mode 100644
index 0000000..48838c3
--- /dev/null
+++ b/websdk/mercurial/help/revsets.txt
@@ -0,0 +1,122 @@
+Mercurial supports a functional language for selecting a set of
+revisions.
+
+The language supports a number of predicates which are joined by infix
+operators. Parenthesis can be used for grouping.
+
+Identifiers such as branch names must be quoted with single or double
+quotes if they contain characters outside of
+``[._a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined
+predicates.
+
+Special characters can be used in quoted identifiers by escaping them,
+e.g., ``\n`` is interpreted as a newline. To prevent them from being
+interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``.
+
+There is a single prefix operator:
+
+``not x``
+ Changesets not in x. Short form is ``! x``.
+
+These are the supported infix operators:
+
+``x::y``
+ A DAG range, meaning all changesets that are descendants of x and
+ ancestors of y, including x and y themselves. If the first endpoint
+ is left out, this is equivalent to ``ancestors(y)``, if the second
+ is left out it is equivalent to ``descendants(x)``.
+
+ An alternative syntax is ``x..y``.
+
+``x:y``
+ All changesets with revision numbers between x and y, both
+ inclusive. Either endpoint can be left out, they default to 0 and
+ tip.
+
+``x and y``
+ The intersection of changesets in x and y. Short form is ``x & y``.
+
+``x or y``
+ The union of changesets in x and y. There are two alternative short
+ forms: ``x | y`` and ``x + y``.
+
+``x - y``
+ Changesets in x but not in y.
+
+``x^n``
+ The nth parent of x, n == 0, 1, or 2.
+ For n == 0, x; for n == 1, the first parent of each changeset in x;
+ for n == 2, the second parent of changeset in x.
+
+``x~n``
+ The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``.
+
+There is a single postfix operator:
+
+``x^``
+ Equivalent to ``x^1``, the first parent of each changeset in x.
+
+
+The following predicates are supported:
+
+.. predicatesmarker
+
+New predicates (known as "aliases") can be defined, using any combination of
+existing predicates or other aliases. An alias definition looks like::
+
+ <alias> = <definition>
+
+in the ``revsetalias`` section of a Mercurial configuration file. Arguments
+of the form `$1`, `$2`, etc. are substituted from the alias into the
+definition.
+
+For example,
+
+::
+
+ [revsetalias]
+ h = heads()
+ d($1) = sort($1, date)
+ rs($1, $2) = reverse(sort($1, $2))
+
+defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is
+exactly equivalent to ``reverse(sort(0:tip, author))``.
+
+Command line equivalents for :hg:`log`::
+
+ -f -> ::.
+ -d x -> date(x)
+ -k x -> keyword(x)
+ -m -> merge()
+ -u x -> user(x)
+ -b x -> branch(x)
+ -P x -> !::x
+ -l x -> limit(expr, x)
+
+Some sample queries:
+
+- Changesets on the default branch::
+
+ hg log -r "branch(default)"
+
+- Changesets on the default branch since tag 1.5 (excluding merges)::
+
+ hg log -r "branch(default) and 1.5:: and not merge()"
+
+- Open branch heads::
+
+ hg log -r "head() and not closed()"
+
+- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect
+ ``hgext/*``::
+
+ hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')"
+
+- Changesets committed in May 2008, sorted by user::
+
+ hg log -r "sort(date('May 2008'), user)"
+
+- Changesets mentioning "bug" or "issue" that are not in a tagged
+ release::
+
+ hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tagged())"
diff --git a/websdk/mercurial/help/subrepos.txt b/websdk/mercurial/help/subrepos.txt
new file mode 100644
index 0000000..df14173
--- /dev/null
+++ b/websdk/mercurial/help/subrepos.txt
@@ -0,0 +1,135 @@
+Subrepositories let you nest external repositories or projects into a
+parent Mercurial repository, and make commands operate on them as a
+group.
+
+Mercurial currently supports Mercurial, Git, and Subversion
+subrepositories.
+
+Subrepositories are made of three components:
+
+1. Nested repository checkouts. They can appear anywhere in the
+ parent working directory.
+
+2. Nested repository references. They are defined in ``.hgsub`` and
+ tell where the subrepository checkouts come from. Mercurial
+ subrepositories are referenced like:
+
+ path/to/nested = https://example.com/nested/repo/path
+
+ Git and Subversion subrepos are also supported:
+
+ path/to/nested = [git]git://example.com/nested/repo/path
+ path/to/nested = [svn]https://example.com/nested/trunk/path
+
+ where ``path/to/nested`` is the checkout location relatively to the
+ parent Mercurial root, and ``https://example.com/nested/repo/path``
+ is the source repository path. The source can also reference a
+ filesystem path.
+
+ Note that ``.hgsub`` does not exist by default in Mercurial
+ repositories, you have to create and add it to the parent
+ repository before using subrepositories.
+
+3. Nested repository states. They are defined in ``.hgsubstate`` and
+ capture whatever information is required to restore the
+ subrepositories to the state they were committed in a parent
+ repository changeset. Mercurial automatically record the nested
+ repositories states when committing in the parent repository.
+
+ .. note::
+ The ``.hgsubstate`` file should not be edited manually.
+
+
+Adding a Subrepository
+----------------------
+
+If ``.hgsub`` does not exist, create it and add it to the parent
+repository. Clone or checkout the external projects where you want it
+to live in the parent repository. Edit ``.hgsub`` and add the
+subrepository entry as described above. At this point, the
+subrepository is tracked and the next commit will record its state in
+``.hgsubstate`` and bind it to the committed changeset.
+
+Synchronizing a Subrepository
+-----------------------------
+
+Subrepos do not automatically track the latest changeset of their
+sources. Instead, they are updated to the changeset that corresponds
+with the changeset checked out in the top-level changeset. This is so
+developers always get a consistent set of compatible code and
+libraries when they update.
+
+Thus, updating subrepos is a manual process. Simply check out target
+subrepo at the desired revision, test in the top-level repo, then
+commit in the parent repository to record the new combination.
+
+Deleting a Subrepository
+------------------------
+
+To remove a subrepository from the parent repository, delete its
+reference from ``.hgsub``, then remove its files.
+
+Interaction with Mercurial Commands
+-----------------------------------
+
+:add: add does not recurse in subrepos unless -S/--subrepos is
+ specified. Git and Subversion subrepositories are currently
+ silently ignored.
+
+:archive: archive does not recurse in subrepositories unless
+ -S/--subrepos is specified.
+
+:commit: commit creates a consistent snapshot of the state of the
+ entire project and its subrepositories. If any subrepositories
+ have been modified, Mercurial will abort. Mercurial can be made
+ to instead commit all modified subrepositories by specifying
+ -S/--subrepos, or setting "ui.commitsubrepos=True" in a
+ configuration file (see :hg:`help config`). After there are no
+ longer any modified subrepositories, it records their state and
+ finally commits it in the parent repository.
+
+:diff: diff does not recurse in subrepos unless -S/--subrepos is
+ specified. Changes are displayed as usual, on the subrepositories
+ elements. Git and Subversion subrepositories are currently
+ silently ignored.
+
+:incoming: incoming does not recurse in subrepos unless -S/--subrepos
+ is specified. Git and Subversion subrepositories are currently
+ silently ignored.
+
+:outgoing: outgoing does not recurse in subrepos unless -S/--subrepos
+ is specified. Git and Subversion subrepositories are currently
+ silently ignored.
+
+:pull: pull is not recursive since it is not clear what to pull prior
+ to running :hg:`update`. Listing and retrieving all
+ subrepositories changes referenced by the parent repository pulled
+ changesets is expensive at best, impossible in the Subversion
+ case.
+
+:push: Mercurial will automatically push all subrepositories first
+ when the parent repository is being pushed. This ensures new
+ subrepository changes are available when referenced by top-level
+ repositories. Push is a no-op for Subversion subrepositories.
+
+:status: status does not recurse into subrepositories unless
+ -S/--subrepos is specified. Subrepository changes are displayed as
+ regular Mercurial changes on the subrepository
+ elements. Subversion subrepositories are currently silently
+ ignored.
+
+:update: update restores the subrepos in the state they were
+ originally committed in target changeset. If the recorded
+ changeset is not available in the current subrepository, Mercurial
+ will pull it in first before updating. This means that updating
+ can require network access when using subrepositories.
+
+Remapping Subrepositories Sources
+---------------------------------
+
+A subrepository source location may change during a project life,
+invalidating references stored in the parent repository history. To
+fix this, rewriting rules can be defined in parent repository ``hgrc``
+file or in Mercurial configuration. See the ``[subpaths]`` section in
+hgrc(5) for more details.
+
diff --git a/websdk/mercurial/help/templates.txt b/websdk/mercurial/help/templates.txt
new file mode 100644
index 0000000..ffc7b34
--- /dev/null
+++ b/websdk/mercurial/help/templates.txt
@@ -0,0 +1,40 @@
+Mercurial allows you to customize output of commands through
+templates. You can either pass in a template from the command
+line, via the --template option, or select an existing
+template-style (--style).
+
+You can customize output for any "log-like" command: log,
+outgoing, incoming, tip, parents, heads and glog.
+
+Four styles are packaged with Mercurial: default (the style used
+when no explicit preference is passed), compact, changelog,
+and xml.
+Usage::
+
+ $ hg log -r1 --style changelog
+
+A template is a piece of text, with markup to invoke variable
+expansion::
+
+ $ hg log -r1 --template "{node}\n"
+ b56ce7b07c52de7d5fd79fb89701ea538af65746
+
+Strings in curly braces are called keywords. The availability of
+keywords depends on the exact context of the templater. These
+keywords are usually available for templating a log-like command:
+
+.. keywordsmarker
+
+The "date" keyword does not produce human-readable output. If you
+want to use a date in your output, you can use a filter to process
+it. Filters are functions which return a string based on the input
+variable. Be sure to use the stringify filter first when you're
+applying a string-input filter to a list-like input variable.
+You can also use a chain of filters to get the desired output::
+
+ $ hg tip --template "{date|isodate}\n"
+ 2008-08-21 18:22 +0000
+
+List of filters:
+
+.. filtersmarker
diff --git a/websdk/mercurial/help/urls.txt b/websdk/mercurial/help/urls.txt
new file mode 100644
index 0000000..016d5bd
--- /dev/null
+++ b/websdk/mercurial/help/urls.txt
@@ -0,0 +1,66 @@
+Valid URLs are of the form::
+
+ local/filesystem/path[#revision]
+ file://local/filesystem/path[#revision]
+ http://[user[:pass]@]host[:port]/[path][#revision]
+ https://[user[:pass]@]host[:port]/[path][#revision]
+ ssh://[user@]host[:port]/[path][#revision]
+
+Paths in the local filesystem can either point to Mercurial
+repositories or to bundle files (as created by :hg:`bundle` or :hg:`
+incoming --bundle`). See also :hg:`help paths`.
+
+An optional identifier after # indicates a particular branch, tag, or
+changeset to use from the remote repository. See also :hg:`help
+revisions`.
+
+Some features, such as pushing to http:// and https:// URLs are only
+possible if the feature is explicitly enabled on the remote Mercurial
+server.
+
+Note that the security of HTTPS URLs depends on proper configuration of
+web.cacerts.
+
+Some notes about using SSH with Mercurial:
+
+- SSH requires an accessible shell account on the destination machine
+ and a copy of hg in the remote path or specified with as remotecmd.
+- path is relative to the remote user's home directory by default. Use
+ an extra slash at the start of a path to specify an absolute path::
+
+ ssh://example.com//tmp/repository
+
+- Mercurial doesn't use its own compression via SSH; the right thing
+ to do is to configure it in your ~/.ssh/config, e.g.::
+
+ Host *.mylocalnetwork.example.com
+ Compression no
+ Host *
+ Compression yes
+
+ Alternatively specify "ssh -C" as your ssh command in your
+ configuration file or with the --ssh command line option.
+
+These URLs can all be stored in your configuration file with path
+aliases under the [paths] section like so::
+
+ [paths]
+ alias1 = URL1
+ alias2 = URL2
+ ...
+
+You can then use the alias for any command that uses a URL (for
+example :hg:`pull alias1` will be treated as :hg:`pull URL1`).
+
+Two path aliases are special because they are used as defaults when
+you do not provide the URL to a command:
+
+default:
+ When you create a repository with hg clone, the clone command saves
+ the location of the source repository as the new repository's
+ 'default' path. This is then used when you omit path from push- and
+ pull-like commands (including incoming and outgoing).
+
+default-push:
+ The push command will look for a path named 'default-push', and
+ prefer it over 'default' if both are defined.
diff --git a/websdk/mercurial/hg.py b/websdk/mercurial/hg.py
index 061265c..bea0424 120000..100644
--- a/websdk/mercurial/hg.py
+++ b/websdk/mercurial/hg.py
@@ -1 +1,581 @@
-/usr/share/pyshared/mercurial/hg.py \ No newline at end of file
+# hg.py - repository classes for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+from lock import release
+from node import hex, nullid
+import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks
+import lock, util, extensions, error, node
+import cmdutil, discovery
+import merge as mergemod
+import verify as verifymod
+import errno, os, shutil
+
+def _local(path):
+ path = util.expandpath(util.urllocalpath(path))
+ return (os.path.isfile(path) and bundlerepo or localrepo)
+
+def addbranchrevs(lrepo, repo, branches, revs):
+ hashbranch, branches = branches
+ if not hashbranch and not branches:
+ return revs or None, revs and revs[0] or None
+ revs = revs and list(revs) or []
+ if not repo.capable('branchmap'):
+ if branches:
+ raise util.Abort(_("remote branch lookup not supported"))
+ revs.append(hashbranch)
+ return revs, revs[0]
+ branchmap = repo.branchmap()
+
+ def primary(branch):
+ if branch == '.':
+ if not lrepo or not lrepo.local():
+ raise util.Abort(_("dirstate branch not accessible"))
+ branch = lrepo.dirstate.branch()
+ if branch in branchmap:
+ revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
+ return True
+ else:
+ return False
+
+ for branch in branches:
+ if not primary(branch):
+ raise error.RepoLookupError(_("unknown branch '%s'") % branch)
+ if hashbranch:
+ if not primary(hashbranch):
+ revs.append(hashbranch)
+ return revs, revs[0]
+
+def parseurl(path, branches=None):
+ '''parse url#branch, returning (url, (branch, branches))'''
+
+ u = util.url(path)
+ branch = None
+ if u.fragment:
+ branch = u.fragment
+ u.fragment = None
+ return str(u), (branch, branches or [])
+
+schemes = {
+ 'bundle': bundlerepo,
+ 'file': _local,
+ 'http': httprepo,
+ 'https': httprepo,
+ 'ssh': sshrepo,
+ 'static-http': statichttprepo,
+}
+
+def _peerlookup(path):
+ u = util.url(path)
+ scheme = u.scheme or 'file'
+ thing = schemes.get(scheme) or schemes['file']
+ try:
+ return thing(path)
+ except TypeError:
+ return thing
+
+def islocal(repo):
+ '''return true if repo or path is local'''
+ if isinstance(repo, str):
+ try:
+ return _peerlookup(repo).islocal(repo)
+ except AttributeError:
+ return False
+ return repo.local()
+
+def repository(ui, path='', create=False):
+ """return a repository object for the specified path"""
+ repo = _peerlookup(path).instance(ui, path, create)
+ ui = getattr(repo, "ui", ui)
+ for name, module in extensions.extensions():
+ hook = getattr(module, 'reposetup', None)
+ if hook:
+ hook(ui, repo)
+ return repo
+
+def peer(uiorrepo, opts, path, create=False):
+ '''return a repository peer for the specified path'''
+ rui = remoteui(uiorrepo, opts)
+ return repository(rui, path, create)
+
+def defaultdest(source):
+ '''return default destination of clone if none is given'''
+ return os.path.basename(os.path.normpath(source))
+
+def share(ui, source, dest=None, update=True):
+ '''create a shared repository'''
+
+ if not islocal(source):
+ raise util.Abort(_('can only share local repositories'))
+
+ if not dest:
+ dest = defaultdest(source)
+ else:
+ dest = ui.expandpath(dest)
+
+ if isinstance(source, str):
+ origsource = ui.expandpath(source)
+ source, branches = parseurl(origsource)
+ srcrepo = repository(ui, source)
+ rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
+ else:
+ srcrepo = source
+ origsource = source = srcrepo.url()
+ checkout = None
+
+ sharedpath = srcrepo.sharedpath # if our source is already sharing
+
+ root = os.path.realpath(dest)
+ roothg = os.path.join(root, '.hg')
+
+ if os.path.exists(roothg):
+ raise util.Abort(_('destination already exists'))
+
+ if not os.path.isdir(root):
+ os.mkdir(root)
+ util.makedir(roothg, notindexed=True)
+
+ requirements = ''
+ try:
+ requirements = srcrepo.opener.read('requires')
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ requirements += 'shared\n'
+ util.writefile(os.path.join(roothg, 'requires'), requirements)
+ util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath)
+
+ r = repository(ui, root)
+
+ default = srcrepo.ui.config('paths', 'default')
+ if default:
+ fp = r.opener("hgrc", "w", text=True)
+ fp.write("[paths]\n")
+ fp.write("default = %s\n" % default)
+ fp.close()
+
+ if update:
+ r.ui.status(_("updating working directory\n"))
+ if update is not True:
+ checkout = update
+ for test in (checkout, 'default', 'tip'):
+ if test is None:
+ continue
+ try:
+ uprev = r.lookup(test)
+ break
+ except error.RepoLookupError:
+ continue
+ _update(r, uprev)
+
+def copystore(ui, srcrepo, destpath):
+ '''copy files from store of srcrepo in destpath
+
+ returns destlock
+ '''
+ destlock = None
+ try:
+ hardlink = None
+ num = 0
+ for f in srcrepo.store.copylist():
+ src = os.path.join(srcrepo.sharedpath, f)
+ dst = os.path.join(destpath, f)
+ dstbase = os.path.dirname(dst)
+ if dstbase and not os.path.exists(dstbase):
+ os.mkdir(dstbase)
+ if os.path.exists(src):
+ if dst.endswith('data'):
+ # lock to avoid premature writing to the target
+ destlock = lock.lock(os.path.join(dstbase, "lock"))
+ hardlink, n = util.copyfiles(src, dst, hardlink)
+ num += n
+ if hardlink:
+ ui.debug("linked %d files\n" % num)
+ else:
+ ui.debug("copied %d files\n" % num)
+ return destlock
+ except:
+ release(destlock)
+ raise
+
+def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
+ update=True, stream=False, branch=None):
+ """Make a copy of an existing repository.
+
+ Create a copy of an existing repository in a new directory. The
+ source and destination are URLs, as passed to the repository
+ function. Returns a pair of repository objects, the source and
+ newly created destination.
+
+ The location of the source is added to the new repository's
+ .hg/hgrc file, as the default to be used for future pulls and
+ pushes.
+
+ If an exception is raised, the partly cloned/updated destination
+ repository will be deleted.
+
+ Arguments:
+
+ source: repository object or URL
+
+ dest: URL of destination repository to create (defaults to base
+ name of source repository)
+
+ pull: always pull from source repository, even in local case
+
+ stream: stream raw data uncompressed from repository (fast over
+ LAN, slow over WAN)
+
+ rev: revision to clone up to (implies pull=True)
+
+ update: update working directory after clone completes, if
+ destination is local repository (True means update to default rev,
+ anything else is treated as a revision)
+
+ branch: branches to clone
+ """
+
+ if isinstance(source, str):
+ origsource = ui.expandpath(source)
+ source, branch = parseurl(origsource, branch)
+ srcrepo = repository(remoteui(ui, peeropts), source)
+ else:
+ srcrepo = source
+ branch = (None, branch or [])
+ origsource = source = srcrepo.url()
+ rev, checkout = addbranchrevs(srcrepo, srcrepo, branch, rev)
+
+ if dest is None:
+ dest = defaultdest(source)
+ ui.status(_("destination directory: %s\n") % dest)
+ else:
+ dest = ui.expandpath(dest)
+
+ dest = util.urllocalpath(dest)
+ source = util.urllocalpath(source)
+
+ if os.path.exists(dest):
+ if not os.path.isdir(dest):
+ raise util.Abort(_("destination '%s' already exists") % dest)
+ elif os.listdir(dest):
+ raise util.Abort(_("destination '%s' is not empty") % dest)
+
+ class DirCleanup(object):
+ def __init__(self, dir_):
+ self.rmtree = shutil.rmtree
+ self.dir_ = dir_
+ def close(self):
+ self.dir_ = None
+ def cleanup(self):
+ if self.dir_:
+ self.rmtree(self.dir_, True)
+
+ srclock = destlock = dircleanup = None
+ try:
+ abspath = origsource
+ if islocal(origsource):
+ abspath = os.path.abspath(util.urllocalpath(origsource))
+
+ if islocal(dest):
+ dircleanup = DirCleanup(dest)
+
+ copy = False
+ if srcrepo.cancopy() and islocal(dest):
+ copy = not pull and not rev
+
+ if copy:
+ try:
+ # we use a lock here because if we race with commit, we
+ # can end up with extra data in the cloned revlogs that's
+ # not pointed to by changesets, thus causing verify to
+ # fail
+ srclock = srcrepo.lock(wait=False)
+ except error.LockError:
+ copy = False
+
+ if copy:
+ srcrepo.hook('preoutgoing', throw=True, source='clone')
+ hgdir = os.path.realpath(os.path.join(dest, ".hg"))
+ if not os.path.exists(dest):
+ os.mkdir(dest)
+ else:
+ # only clean up directories we create ourselves
+ dircleanup.dir_ = hgdir
+ try:
+ destpath = hgdir
+ util.makedir(destpath, notindexed=True)
+ except OSError, inst:
+ if inst.errno == errno.EEXIST:
+ dircleanup.close()
+ raise util.Abort(_("destination '%s' already exists")
+ % dest)
+ raise
+
+ destlock = copystore(ui, srcrepo, destpath)
+
+ # we need to re-init the repo after manually copying the data
+ # into it
+ destrepo = repository(remoteui(ui, peeropts), dest)
+ srcrepo.hook('outgoing', source='clone',
+ node=node.hex(node.nullid))
+ else:
+ try:
+ destrepo = repository(remoteui(ui, peeropts), dest,
+ create=True)
+ except OSError, inst:
+ if inst.errno == errno.EEXIST:
+ dircleanup.close()
+ raise util.Abort(_("destination '%s' already exists")
+ % dest)
+ raise
+
+ revs = None
+ if rev:
+ if not srcrepo.capable('lookup'):
+ raise util.Abort(_("src repository does not support "
+ "revision lookup and so doesn't "
+ "support clone by revision"))
+ revs = [srcrepo.lookup(r) for r in rev]
+ checkout = revs[0]
+ if destrepo.local():
+ destrepo.clone(srcrepo, heads=revs, stream=stream)
+ elif srcrepo.local():
+ srcrepo.push(destrepo, revs=revs)
+ else:
+ raise util.Abort(_("clone from remote to remote not supported"))
+
+ if dircleanup:
+ dircleanup.close()
+
+ if destrepo.local():
+ fp = destrepo.opener("hgrc", "w", text=True)
+ fp.write("[paths]\n")
+ u = util.url(abspath)
+ u.passwd = None
+ defaulturl = str(u)
+ fp.write("default = %s\n" % defaulturl)
+ fp.close()
+
+ destrepo.ui.setconfig('paths', 'default', defaulturl)
+
+ if update:
+ if update is not True:
+ checkout = update
+ if srcrepo.local():
+ checkout = srcrepo.lookup(update)
+ for test in (checkout, 'default', 'tip'):
+ if test is None:
+ continue
+ try:
+ uprev = destrepo.lookup(test)
+ break
+ except error.RepoLookupError:
+ continue
+ bn = destrepo[uprev].branch()
+ destrepo.ui.status(_("updating to branch %s\n") % bn)
+ _update(destrepo, uprev)
+
+ # clone all bookmarks
+ if destrepo.local() and srcrepo.capable("pushkey"):
+ rb = srcrepo.listkeys('bookmarks')
+ for k, n in rb.iteritems():
+ try:
+ m = destrepo.lookup(n)
+ destrepo._bookmarks[k] = m
+ except error.RepoLookupError:
+ pass
+ if rb:
+ bookmarks.write(destrepo)
+ elif srcrepo.local() and destrepo.capable("pushkey"):
+ for k, n in srcrepo._bookmarks.iteritems():
+ destrepo.pushkey('bookmarks', k, '', hex(n))
+
+ return srcrepo, destrepo
+ finally:
+ release(srclock, destlock)
+ if dircleanup is not None:
+ dircleanup.cleanup()
+
+def _showstats(repo, stats):
+ repo.ui.status(_("%d files updated, %d files merged, "
+ "%d files removed, %d files unresolved\n") % stats)
+
+def update(repo, node):
+ """update the working directory to node, merging linear changes"""
+ stats = mergemod.update(repo, node, False, False, None)
+ _showstats(repo, stats)
+ if stats[3]:
+ repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
+ return stats[3] > 0
+
+# naming conflict in clone()
+_update = update
+
+def clean(repo, node, show_stats=True):
+ """forcibly switch the working directory to node, clobbering changes"""
+ stats = mergemod.update(repo, node, False, True, None)
+ if show_stats:
+ _showstats(repo, stats)
+ return stats[3] > 0
+
+def merge(repo, node, force=None, remind=True):
+ """Branch merge with node, resolving changes. Return true if any
+ unresolved conflicts."""
+ stats = mergemod.update(repo, node, True, force, False)
+ _showstats(repo, stats)
+ if stats[3]:
+ repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
+ "or 'hg update -C .' to abandon\n"))
+ elif remind:
+ repo.ui.status(_("(branch merge, don't forget to commit)\n"))
+ return stats[3] > 0
+
+def _incoming(displaychlist, subreporecurse, ui, repo, source,
+ opts, buffered=False):
+ """
+ Helper for incoming / gincoming.
+ displaychlist gets called with
+ (remoterepo, incomingchangesetlist, displayer) parameters,
+ and is supposed to contain only code that can't be unified.
+ """
+ source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
+ other = peer(repo, opts, source)
+ ui.status(_('comparing with %s\n') % util.hidepassword(source))
+ revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
+
+ if revs:
+ revs = [other.lookup(rev) for rev in revs]
+ other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
+ revs, opts["bundle"], opts["force"])
+ try:
+ if not chlist:
+ ui.status(_("no changes found\n"))
+ return subreporecurse()
+
+ displayer = cmdutil.show_changeset(ui, other, opts, buffered)
+
+ # XXX once graphlog extension makes it into core,
+ # should be replaced by a if graph/else
+ displaychlist(other, chlist, displayer)
+
+ displayer.close()
+ finally:
+ cleanupfn()
+ subreporecurse()
+ return 0 # exit code is zero since we found incoming changes
+
+def incoming(ui, repo, source, opts):
+ def subreporecurse():
+ ret = 1
+ if opts.get('subrepos'):
+ ctx = repo[None]
+ for subpath in sorted(ctx.substate):
+ sub = ctx.sub(subpath)
+ ret = min(ret, sub.incoming(ui, source, opts))
+ return ret
+
+ def display(other, chlist, displayer):
+ limit = cmdutil.loglimit(opts)
+ if opts.get('newest_first'):
+ chlist.reverse()
+ count = 0
+ for n in chlist:
+ if limit is not None and count >= limit:
+ break
+ parents = [p for p in other.changelog.parents(n) if p != nullid]
+ if opts.get('no_merges') and len(parents) == 2:
+ continue
+ count += 1
+ displayer.show(other[n])
+ return _incoming(display, subreporecurse, ui, repo, source, opts)
+
+def _outgoing(ui, repo, dest, opts):
+ dest = ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = parseurl(dest, opts.get('branch'))
+ ui.status(_('comparing with %s\n') % util.hidepassword(dest))
+ revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+
+ other = peer(repo, opts, dest)
+ common, outheads = discovery.findcommonoutgoing(repo, other, revs,
+ force=opts.get('force'))
+ o = repo.changelog.findmissing(common, outheads)
+ if not o:
+ ui.status(_("no changes found\n"))
+ return None
+ return o
+
+def outgoing(ui, repo, dest, opts):
+ def recurse():
+ ret = 1
+ if opts.get('subrepos'):
+ ctx = repo[None]
+ for subpath in sorted(ctx.substate):
+ sub = ctx.sub(subpath)
+ ret = min(ret, sub.outgoing(ui, dest, opts))
+ return ret
+
+ limit = cmdutil.loglimit(opts)
+ o = _outgoing(ui, repo, dest, opts)
+ if o is None:
+ return recurse()
+
+ if opts.get('newest_first'):
+ o.reverse()
+ displayer = cmdutil.show_changeset(ui, repo, opts)
+ count = 0
+ for n in o:
+ if limit is not None and count >= limit:
+ break
+ parents = [p for p in repo.changelog.parents(n) if p != nullid]
+ if opts.get('no_merges') and len(parents) == 2:
+ continue
+ count += 1
+ displayer.show(repo[n])
+ displayer.close()
+ recurse()
+ return 0 # exit code is zero since we found outgoing changes
+
+def revert(repo, node, choose):
+ """revert changes to revision in node without updating dirstate"""
+ return mergemod.update(repo, node, False, True, choose)[3] > 0
+
+def verify(repo):
+ """verify the consistency of a repository"""
+ return verifymod.verify(repo)
+
+def remoteui(src, opts):
+ 'build a remote ui from ui or repo and opts'
+ if util.safehasattr(src, 'baseui'): # looks like a repository
+ dst = src.baseui.copy() # drop repo-specific config
+ src = src.ui # copy target options from repo
+ else: # assume it's a global ui object
+ dst = src.copy() # keep all global options
+
+ # copy ssh-specific options
+ for o in 'ssh', 'remotecmd':
+ v = opts.get(o) or src.config('ui', o)
+ if v:
+ dst.setconfig("ui", o, v)
+
+ # copy bundle-specific options
+ r = src.config('bundle', 'mainreporoot')
+ if r:
+ dst.setconfig('bundle', 'mainreporoot', r)
+
+ # copy selected local settings to the remote ui
+ for sect in ('auth', 'hostfingerprints', 'http_proxy'):
+ for key, val in src.configitems(sect):
+ dst.setconfig(sect, key, val)
+ v = src.config('web', 'cacerts')
+ if v:
+ dst.setconfig('web', 'cacerts', util.expandpath(v))
+
+ return dst
diff --git a/websdk/mercurial/hgweb/__init__.py b/websdk/mercurial/hgweb/__init__.py
index 4131dec..dd4d089 120000..100644
--- a/websdk/mercurial/hgweb/__init__.py
+++ b/websdk/mercurial/hgweb/__init__.py
@@ -1 +1,31 @@
-/usr/share/pyshared/mercurial/hgweb/__init__.py \ No newline at end of file
+# hgweb/__init__.py - web interface to a mercurial repository
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+import hgweb_mod, hgwebdir_mod
+
+def hgweb(config, name=None, baseui=None):
+ '''create an hgweb wsgi object
+
+ config can be one of:
+ - repo object (single repo view)
+ - path to repo (single repo view)
+ - path to config file (multi-repo view)
+ - dict of virtual:real pairs (multi-repo view)
+ - list of virtual:real tuples (multi-repo view)
+ '''
+
+ if ((isinstance(config, str) and not os.path.isdir(config)) or
+ isinstance(config, dict) or isinstance(config, list)):
+ # create a multi-dir interface
+ return hgwebdir_mod.hgwebdir(config, baseui=baseui)
+ return hgweb_mod.hgweb(config, name=name, baseui=baseui)
+
+def hgwebdir(config, baseui=None):
+ return hgwebdir_mod.hgwebdir(config, baseui=baseui)
+
diff --git a/websdk/mercurial/hgweb/common.py b/websdk/mercurial/hgweb/common.py
index 6bb6c19..2c2c76c 120000..100644
--- a/websdk/mercurial/hgweb/common.py
+++ b/websdk/mercurial/hgweb/common.py
@@ -1 +1,186 @@
-/usr/share/pyshared/mercurial/hgweb/common.py \ No newline at end of file
+# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import errno, mimetypes, os
+
+HTTP_OK = 200
+HTTP_NOT_MODIFIED = 304
+HTTP_BAD_REQUEST = 400
+HTTP_UNAUTHORIZED = 401
+HTTP_FORBIDDEN = 403
+HTTP_NOT_FOUND = 404
+HTTP_METHOD_NOT_ALLOWED = 405
+HTTP_SERVER_ERROR = 500
+
+
+def checkauthz(hgweb, req, op):
+ '''Check permission for operation based on request data (including
+ authentication info). Return if op allowed, else raise an ErrorResponse
+ exception.'''
+
+ user = req.env.get('REMOTE_USER')
+
+ deny_read = hgweb.configlist('web', 'deny_read')
+ if deny_read and (not user or deny_read == ['*'] or user in deny_read):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+
+ allow_read = hgweb.configlist('web', 'allow_read')
+ result = (not allow_read) or (allow_read == ['*'])
+ if not (result or user in allow_read):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
+
+ if op == 'pull' and not hgweb.allowpull:
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
+ elif op == 'pull' or op is None: # op is None for interface requests
+ return
+
+ # enforce that you can only push using POST requests
+ if req.env['REQUEST_METHOD'] != 'POST':
+ msg = 'push requires POST request'
+ raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
+
+ # require ssl by default for pushing, auth info cannot be sniffed
+ # and replayed
+ scheme = req.env.get('wsgi.url_scheme')
+ if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https':
+ raise ErrorResponse(HTTP_OK, 'ssl required')
+
+ deny = hgweb.configlist('web', 'deny_push')
+ if deny and (not user or deny == ['*'] or user in deny):
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
+
+ allow = hgweb.configlist('web', 'allow_push')
+ result = allow and (allow == ['*'] or user in allow)
+ if not result:
+ raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
+
+# Hooks for hgweb permission checks; extensions can add hooks here.
+# Each hook is invoked like this: hook(hgweb, request, operation),
+# where operation is either read, pull or push. Hooks should either
+# raise an ErrorResponse exception, or just return.
+#
+# It is possible to do both authentication and authorization through
+# this.
+permhooks = [checkauthz]
+
+
+class ErrorResponse(Exception):
+ def __init__(self, code, message=None, headers=[]):
+ if message is None:
+ message = _statusmessage(code)
+ Exception.__init__(self)
+ self.code = code
+ self.message = message
+ self.headers = headers
+ def __str__(self):
+ return self.message
+
+class continuereader(object):
+ def __init__(self, f, write):
+ self.f = f
+ self._write = write
+ self.continued = False
+
+ def read(self, amt=-1):
+ if not self.continued:
+ self.continued = True
+ self._write('HTTP/1.1 100 Continue\r\n\r\n')
+ return self.f.read(amt)
+
+ def __getattr__(self, attr):
+ if attr in ('close', 'readline', 'readlines', '__iter__'):
+ return getattr(self.f, attr)
+ raise AttributeError()
+
+def _statusmessage(code):
+ from BaseHTTPServer import BaseHTTPRequestHandler
+ responses = BaseHTTPRequestHandler.responses
+ return responses.get(code, ('Error', 'Unknown error'))[0]
+
+def statusmessage(code, message=None):
+ return '%d %s' % (code, message or _statusmessage(code))
+
+def get_stat(spath):
+ """stat changelog if it exists, spath otherwise"""
+ cl_path = os.path.join(spath, "00changelog.i")
+ if os.path.exists(cl_path):
+ return os.stat(cl_path)
+ else:
+ return os.stat(spath)
+
+def get_mtime(spath):
+ return get_stat(spath).st_mtime
+
+def staticfile(directory, fname, req):
+ """return a file inside directory with guessed Content-Type header
+
+ fname always uses '/' as directory separator and isn't allowed to
+ contain unusual path components.
+ Content-Type is guessed using the mimetypes module.
+ Return an empty string if fname is illegal or file not found.
+
+ """
+ parts = fname.split('/')
+ for part in parts:
+ if (part in ('', os.curdir, os.pardir) or
+ os.sep in part or os.altsep is not None and os.altsep in part):
+ return ""
+ fpath = os.path.join(*parts)
+ if isinstance(directory, str):
+ directory = [directory]
+ for d in directory:
+ path = os.path.join(d, fpath)
+ if os.path.exists(path):
+ break
+ try:
+ os.stat(path)
+ ct = mimetypes.guess_type(path)[0] or "text/plain"
+ req.respond(HTTP_OK, ct, length = os.path.getsize(path))
+ fp = open(path, 'rb')
+ data = fp.read()
+ fp.close()
+ return data
+ except TypeError:
+ raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename')
+ except OSError, err:
+ if err.errno == errno.ENOENT:
+ raise ErrorResponse(HTTP_NOT_FOUND)
+ else:
+ raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror)
+
+def paritygen(stripecount, offset=0):
+ """count parity of horizontal stripes for easier reading"""
+ if stripecount and offset:
+ # account for offset, e.g. due to building the list in reverse
+ count = (stripecount + offset) % stripecount
+ parity = (stripecount + offset) / stripecount & 1
+ else:
+ count = 0
+ parity = 0
+ while True:
+ yield parity
+ count += 1
+ if stripecount and count >= stripecount:
+ parity = 1 - parity
+ count = 0
+
+def get_contact(config):
+ """Return repo contact information or empty string.
+
+ web.contact is the primary source, but if that is not set, try
+ ui.username or $EMAIL as a fallback to display something useful.
+ """
+ return (config("web", "contact") or
+ config("ui", "username") or
+ os.environ.get("EMAIL") or "")
+
+def caching(web, req):
+ tag = str(web.mtime)
+ if req.env.get('HTTP_IF_NONE_MATCH') == tag:
+ raise ErrorResponse(HTTP_NOT_MODIFIED)
+ req.headers.append(('ETag', tag))
diff --git a/websdk/mercurial/hgweb/hgweb_mod.py b/websdk/mercurial/hgweb/hgweb_mod.py
index 473f623..9f8f3ff 120000..100644
--- a/websdk/mercurial/hgweb/hgweb_mod.py
+++ b/websdk/mercurial/hgweb/hgweb_mod.py
@@ -1 +1,305 @@
-/usr/share/pyshared/mercurial/hgweb/hgweb_mod.py \ No newline at end of file
+# hgweb/hgweb_mod.py - Web interface for a repository.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os
+from mercurial import ui, hg, hook, error, encoding, templater, util
+from common import get_stat, ErrorResponse, permhooks, caching
+from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST
+from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR
+from request import wsgirequest
+import webcommands, protocol, webutil
+
+perms = {
+ 'changegroup': 'pull',
+ 'changegroupsubset': 'pull',
+ 'getbundle': 'pull',
+ 'stream_out': 'pull',
+ 'listkeys': 'pull',
+ 'unbundle': 'push',
+ 'pushkey': 'push',
+}
+
+class hgweb(object):
+ def __init__(self, repo, name=None, baseui=None):
+ if isinstance(repo, str):
+ if baseui:
+ u = baseui.copy()
+ else:
+ u = ui.ui()
+ self.repo = hg.repository(u, repo)
+ else:
+ self.repo = repo
+
+ self.repo.ui.setconfig('ui', 'report_untrusted', 'off')
+ self.repo.ui.setconfig('ui', 'interactive', 'off')
+ hook.redirect(True)
+ self.mtime = -1
+ self.size = -1
+ self.reponame = name
+ self.archives = 'zip', 'gz', 'bz2'
+ self.stripecount = 1
+ # a repo owner may set web.templates in .hg/hgrc to get any file
+ # readable by the user running the CGI script
+ self.templatepath = self.config('web', 'templates')
+
+ # The CGI scripts are often run by a user different from the repo owner.
+ # Trust the settings from the .hg/hgrc files by default.
+ def config(self, section, name, default=None, untrusted=True):
+ return self.repo.ui.config(section, name, default,
+ untrusted=untrusted)
+
+ def configbool(self, section, name, default=False, untrusted=True):
+ return self.repo.ui.configbool(section, name, default,
+ untrusted=untrusted)
+
+ def configlist(self, section, name, default=None, untrusted=True):
+ return self.repo.ui.configlist(section, name, default,
+ untrusted=untrusted)
+
+ def refresh(self, request=None):
+ if request:
+ self.repo.ui.environ = request.env
+ st = get_stat(self.repo.spath)
+ # compare changelog size in addition to mtime to catch
+ # rollbacks made less than a second ago
+ if st.st_mtime != self.mtime or st.st_size != self.size:
+ self.mtime = st.st_mtime
+ self.size = st.st_size
+ self.repo = hg.repository(self.repo.ui, self.repo.root)
+ self.maxchanges = int(self.config("web", "maxchanges", 10))
+ self.stripecount = int(self.config("web", "stripes", 1))
+ self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
+ self.maxfiles = int(self.config("web", "maxfiles", 10))
+ self.allowpull = self.configbool("web", "allowpull", True)
+ encoding.encoding = self.config("web", "encoding",
+ encoding.encoding)
+
+ def run(self):
+ if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+ raise RuntimeError("This function is only intended to be "
+ "called while running as a CGI script.")
+ import mercurial.hgweb.wsgicgi as wsgicgi
+ wsgicgi.launch(self)
+
+ def __call__(self, env, respond):
+ req = wsgirequest(env, respond)
+ return self.run_wsgi(req)
+
+ def run_wsgi(self, req):
+
+ self.refresh(req)
+
+ # work with CGI variables to create coherent structure
+ # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
+
+ req.url = req.env['SCRIPT_NAME']
+ if not req.url.endswith('/'):
+ req.url += '/'
+ if 'REPO_NAME' in req.env:
+ req.url += req.env['REPO_NAME'] + '/'
+
+ if 'PATH_INFO' in req.env:
+ parts = req.env['PATH_INFO'].strip('/').split('/')
+ repo_parts = req.env.get('REPO_NAME', '').split('/')
+ if parts[:len(repo_parts)] == repo_parts:
+ parts = parts[len(repo_parts):]
+ query = '/'.join(parts)
+ else:
+ query = req.env['QUERY_STRING'].split('&', 1)[0]
+ query = query.split(';', 1)[0]
+
+ # process this if it's a protocol request
+ # protocol bits don't need to create any URLs
+ # and the clients always use the old URL structure
+
+ cmd = req.form.get('cmd', [''])[0]
+ if protocol.iscmd(cmd):
+ try:
+ if query:
+ raise ErrorResponse(HTTP_NOT_FOUND)
+ if cmd in perms:
+ self.check_perm(req, perms[cmd])
+ return protocol.call(self.repo, req, cmd)
+ except ErrorResponse, inst:
+ # A client that sends unbundle without 100-continue will
+ # break if we respond early.
+ if (cmd == 'unbundle' and
+ (req.env.get('HTTP_EXPECT',
+ '').lower() != '100-continue') or
+ req.env.get('X-HgHttp2', '')):
+ req.drain()
+ req.respond(inst, protocol.HGTYPE)
+ return '0\n%s\n' % inst.message
+
+ # translate user-visible url structure to internal structure
+
+ args = query.split('/', 2)
+ if 'cmd' not in req.form and args and args[0]:
+
+ cmd = args.pop(0)
+ style = cmd.rfind('-')
+ if style != -1:
+ req.form['style'] = [cmd[:style]]
+ cmd = cmd[style + 1:]
+
+ # avoid accepting e.g. style parameter as command
+ if util.safehasattr(webcommands, cmd):
+ req.form['cmd'] = [cmd]
+ else:
+ cmd = ''
+
+ if cmd == 'static':
+ req.form['file'] = ['/'.join(args)]
+ else:
+ if args and args[0]:
+ node = args.pop(0)
+ req.form['node'] = [node]
+ if args:
+ req.form['file'] = args
+
+ ua = req.env.get('HTTP_USER_AGENT', '')
+ if cmd == 'rev' and 'mercurial' in ua:
+ req.form['style'] = ['raw']
+
+ if cmd == 'archive':
+ fn = req.form['node'][0]
+ for type_, spec in self.archive_specs.iteritems():
+ ext = spec[2]
+ if fn.endswith(ext):
+ req.form['node'] = [fn[:-len(ext)]]
+ req.form['type'] = [type_]
+
+ # process the web interface request
+
+ try:
+ tmpl = self.templater(req)
+ ctype = tmpl('mimetype', encoding=encoding.encoding)
+ ctype = templater.stringify(ctype)
+
+ # check read permissions non-static content
+ if cmd != 'static':
+ self.check_perm(req, None)
+
+ if cmd == '':
+ req.form['cmd'] = [tmpl.cache['default']]
+ cmd = req.form['cmd'][0]
+
+ if self.configbool('web', 'cache', True):
+ caching(self, req) # sets ETag header or raises NOT_MODIFIED
+ if cmd not in webcommands.__all__:
+ msg = 'no such method: %s' % cmd
+ raise ErrorResponse(HTTP_BAD_REQUEST, msg)
+ elif cmd == 'file' and 'raw' in req.form.get('style', []):
+ self.ctype = ctype
+ content = webcommands.rawfile(self, req, tmpl)
+ else:
+ content = getattr(webcommands, cmd)(self, req, tmpl)
+ req.respond(HTTP_OK, ctype)
+
+ return content
+
+ except error.LookupError, err:
+ req.respond(HTTP_NOT_FOUND, ctype)
+ msg = str(err)
+ if 'manifest' not in msg:
+ msg = 'revision not found: %s' % err.name
+ return tmpl('error', error=msg)
+ except (error.RepoError, error.RevlogError), inst:
+ req.respond(HTTP_SERVER_ERROR, ctype)
+ return tmpl('error', error=str(inst))
+ except ErrorResponse, inst:
+ req.respond(inst, ctype)
+ if inst.code == HTTP_NOT_MODIFIED:
+ # Not allowed to return a body on a 304
+ return ['']
+ return tmpl('error', error=inst.message)
+
+ def templater(self, req):
+
+ # determine scheme, port and server name
+ # this is needed to create absolute urls
+
+ proto = req.env.get('wsgi.url_scheme')
+ if proto == 'https':
+ proto = 'https'
+ default_port = "443"
+ else:
+ proto = 'http'
+ default_port = "80"
+
+ port = req.env["SERVER_PORT"]
+ port = port != default_port and (":" + port) or ""
+ urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
+ logourl = self.config("web", "logourl", "http://mercurial.selenic.com/")
+ logoimg = self.config("web", "logoimg", "hglogo.png")
+ staticurl = self.config("web", "staticurl") or req.url + 'static/'
+ if not staticurl.endswith('/'):
+ staticurl += '/'
+
+ # some functions for the templater
+
+ def header(**map):
+ yield tmpl('header', encoding=encoding.encoding, **map)
+
+ def footer(**map):
+ yield tmpl("footer", **map)
+
+ def motd(**map):
+ yield self.config("web", "motd", "")
+
+ # figure out which style to use
+
+ vars = {}
+ styles = (
+ req.form.get('style', [None])[0],
+ self.config('web', 'style'),
+ 'paper',
+ )
+ style, mapfile = templater.stylemap(styles, self.templatepath)
+ if style == styles[0]:
+ vars['style'] = style
+
+ start = req.url[-1] == '?' and '&' or '?'
+ sessionvars = webutil.sessionvars(vars, start)
+
+ if not self.reponame:
+ self.reponame = (self.config("web", "name")
+ or req.env.get('REPO_NAME')
+ or req.url.strip('/') or self.repo.root)
+
+ # create the templater
+
+ tmpl = templater.templater(mapfile,
+ defaults={"url": req.url,
+ "logourl": logourl,
+ "logoimg": logoimg,
+ "staticurl": staticurl,
+ "urlbase": urlbase,
+ "repo": self.reponame,
+ "header": header,
+ "footer": footer,
+ "motd": motd,
+ "sessionvars": sessionvars
+ })
+ return tmpl
+
+ def archivelist(self, nodeid):
+ allowed = self.configlist("web", "allow_archive")
+ for i, spec in self.archive_specs.iteritems():
+ if i in allowed or self.configbool("web", "allow" + i):
+ yield {"type" : i, "extension" : spec[2], "node" : nodeid}
+
+ archive_specs = {
+ 'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None),
+ 'gz': ('application/x-gzip', 'tgz', '.tar.gz', None),
+ 'zip': ('application/zip', 'zip', '.zip', None),
+ }
+
+ def check_perm(self, req, op):
+ for hook in permhooks:
+ hook(self, req, op)
diff --git a/websdk/mercurial/hgweb/hgwebdir_mod.py b/websdk/mercurial/hgweb/hgwebdir_mod.py
index cdbca6c..a22a00b 120000..100644
--- a/websdk/mercurial/hgweb/hgwebdir_mod.py
+++ b/websdk/mercurial/hgweb/hgwebdir_mod.py
@@ -1 +1,399 @@
-/usr/share/pyshared/mercurial/hgweb/hgwebdir_mod.py \ No newline at end of file
+# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, re, time
+from mercurial.i18n import _
+from mercurial import ui, hg, scmutil, util, templater
+from mercurial import error, encoding
+from common import ErrorResponse, get_mtime, staticfile, paritygen, \
+ get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
+from hgweb_mod import hgweb
+from request import wsgirequest
+import webutil
+
+def cleannames(items):
+ return [(util.pconvert(name).strip('/'), path) for name, path in items]
+
+def findrepos(paths):
+ repos = []
+ for prefix, root in cleannames(paths):
+ roothead, roottail = os.path.split(root)
+ # "foo = /bar/*" makes every subrepo of /bar/ to be
+ # mounted as foo/subrepo
+ # and "foo = /bar/**" also recurses into the subdirectories,
+ # remember to use it without working dir.
+ try:
+ recurse = {'*': False, '**': True}[roottail]
+ except KeyError:
+ repos.append((prefix, root))
+ continue
+ roothead = os.path.normpath(os.path.abspath(roothead))
+ paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
+ repos.extend(urlrepos(prefix, roothead, paths))
+ return repos
+
+def urlrepos(prefix, roothead, paths):
+ """yield url paths and filesystem paths from a list of repo paths
+
+ >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
+ >>> conv(urlrepos('hg', '/opt', ['/opt/r', '/opt/r/r', '/opt']))
+ [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
+ >>> conv(urlrepos('', '/opt', ['/opt/r', '/opt/r/r', '/opt']))
+ [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
+ """
+ for path in paths:
+ path = os.path.normpath(path)
+ yield (prefix + '/' +
+ util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path
+
+def geturlcgivars(baseurl, port):
+ """
+ Extract CGI variables from baseurl
+
+ >>> geturlcgivars("http://host.org/base", "80")
+ ('host.org', '80', '/base')
+ >>> geturlcgivars("http://host.org:8000/base", "80")
+ ('host.org', '8000', '/base')
+ >>> geturlcgivars('/base', 8000)
+ ('', '8000', '/base')
+ >>> geturlcgivars("base", '8000')
+ ('', '8000', '/base')
+ >>> geturlcgivars("http://host", '8000')
+ ('host', '8000', '/')
+ >>> geturlcgivars("http://host/", '8000')
+ ('host', '8000', '/')
+ """
+ u = util.url(baseurl)
+ name = u.host or ''
+ if u.port:
+ port = u.port
+ path = u.path or ""
+ if not path.startswith('/'):
+ path = '/' + path
+
+ return name, str(port), path
+
+class hgwebdir(object):
+ refreshinterval = 20
+
+ def __init__(self, conf, baseui=None):
+ self.conf = conf
+ self.baseui = baseui
+ self.lastrefresh = 0
+ self.motd = None
+ self.refresh()
+
+ def refresh(self):
+ if self.lastrefresh + self.refreshinterval > time.time():
+ return
+
+ if self.baseui:
+ u = self.baseui.copy()
+ else:
+ u = ui.ui()
+ u.setconfig('ui', 'report_untrusted', 'off')
+ u.setconfig('ui', 'interactive', 'off')
+
+ if not isinstance(self.conf, (dict, list, tuple)):
+ map = {'paths': 'hgweb-paths'}
+ if not os.path.exists(self.conf):
+ raise util.Abort(_('config file %s not found!') % self.conf)
+ u.readconfig(self.conf, remap=map, trust=True)
+ paths = []
+ for name, ignored in u.configitems('hgweb-paths'):
+ for path in u.configlist('hgweb-paths', name):
+ paths.append((name, path))
+ elif isinstance(self.conf, (list, tuple)):
+ paths = self.conf
+ elif isinstance(self.conf, dict):
+ paths = self.conf.items()
+
+ repos = findrepos(paths)
+ for prefix, root in u.configitems('collections'):
+ prefix = util.pconvert(prefix)
+ for path in scmutil.walkrepos(root, followsym=True):
+ repo = os.path.normpath(path)
+ name = util.pconvert(repo)
+ if name.startswith(prefix):
+ name = name[len(prefix):]
+ repos.append((name.lstrip('/'), repo))
+
+ self.repos = repos
+ self.ui = u
+ encoding.encoding = self.ui.config('web', 'encoding',
+ encoding.encoding)
+ self.style = self.ui.config('web', 'style', 'paper')
+ self.templatepath = self.ui.config('web', 'templates', None)
+ self.stripecount = self.ui.config('web', 'stripes', 1)
+ if self.stripecount:
+ self.stripecount = int(self.stripecount)
+ self._baseurl = self.ui.config('web', 'baseurl')
+ self.lastrefresh = time.time()
+
+ def run(self):
+ if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
+ raise RuntimeError("This function is only intended to be "
+ "called while running as a CGI script.")
+ import mercurial.hgweb.wsgicgi as wsgicgi
+ wsgicgi.launch(self)
+
+ def __call__(self, env, respond):
+ req = wsgirequest(env, respond)
+ return self.run_wsgi(req)
+
+ def read_allowed(self, ui, req):
+ """Check allow_read and deny_read config options of a repo's ui object
+ to determine user permissions. By default, with neither option set (or
+ both empty), allow all users to read the repo. There are two ways a
+ user can be denied read access: (1) deny_read is not empty, and the
+ user is unauthenticated or deny_read contains user (or *), and (2)
+ allow_read is not empty and the user is not in allow_read. Return True
+ if user is allowed to read the repo, else return False."""
+
+ user = req.env.get('REMOTE_USER')
+
+ deny_read = ui.configlist('web', 'deny_read', untrusted=True)
+ if deny_read and (not user or deny_read == ['*'] or user in deny_read):
+ return False
+
+ allow_read = ui.configlist('web', 'allow_read', untrusted=True)
+ # by default, allow reading if no allow_read option has been set
+ if (not allow_read) or (allow_read == ['*']) or (user in allow_read):
+ return True
+
+ return False
+
+ def run_wsgi(self, req):
+ try:
+ try:
+ self.refresh()
+
+ virtual = req.env.get("PATH_INFO", "").strip('/')
+ tmpl = self.templater(req)
+ ctype = tmpl('mimetype', encoding=encoding.encoding)
+ ctype = templater.stringify(ctype)
+
+ # a static file
+ if virtual.startswith('static/') or 'static' in req.form:
+ if virtual.startswith('static/'):
+ fname = virtual[7:]
+ else:
+ fname = req.form['static'][0]
+ static = templater.templatepath('static')
+ return (staticfile(static, fname, req),)
+
+ # top-level index
+ elif not virtual:
+ req.respond(HTTP_OK, ctype)
+ return self.makeindex(req, tmpl)
+
+ # nested indexes and hgwebs
+
+ repos = dict(self.repos)
+ virtualrepo = virtual
+ while virtualrepo:
+ real = repos.get(virtualrepo)
+ if real:
+ req.env['REPO_NAME'] = virtualrepo
+ try:
+ repo = hg.repository(self.ui, real)
+ return hgweb(repo).run_wsgi(req)
+ except IOError, inst:
+ msg = inst.strerror
+ raise ErrorResponse(HTTP_SERVER_ERROR, msg)
+ except error.RepoError, inst:
+ raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
+
+ up = virtualrepo.rfind('/')
+ if up < 0:
+ break
+ virtualrepo = virtualrepo[:up]
+
+ # browse subdirectories
+ subdir = virtual + '/'
+ if [r for r in repos if r.startswith(subdir)]:
+ req.respond(HTTP_OK, ctype)
+ return self.makeindex(req, tmpl, subdir)
+
+ # prefixes not found
+ req.respond(HTTP_NOT_FOUND, ctype)
+ return tmpl("notfound", repo=virtual)
+
+ except ErrorResponse, err:
+ req.respond(err, ctype)
+ return tmpl('error', error=err.message or '')
+ finally:
+ tmpl = None
+
+ def makeindex(self, req, tmpl, subdir=""):
+
+ def archivelist(ui, nodeid, url):
+ allowed = ui.configlist("web", "allow_archive", untrusted=True)
+ archives = []
+ for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
+ if i[0] in allowed or ui.configbool("web", "allow" + i[0],
+ untrusted=True):
+ archives.append({"type" : i[0], "extension": i[1],
+ "node": nodeid, "url": url})
+ return archives
+
+ def rawentries(subdir="", **map):
+
+ descend = self.ui.configbool('web', 'descend', True)
+ for name, path in self.repos:
+
+ if not name.startswith(subdir):
+ continue
+ name = name[len(subdir):]
+ if not descend and '/' in name:
+ continue
+
+ u = self.ui.copy()
+ try:
+ u.readconfig(os.path.join(path, '.hg', 'hgrc'))
+ except Exception, e:
+ u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
+ continue
+ def get(section, name, default=None):
+ return u.config(section, name, default, untrusted=True)
+
+ if u.configbool("web", "hidden", untrusted=True):
+ continue
+
+ if not self.read_allowed(u, req):
+ continue
+
+ parts = [name]
+ if 'PATH_INFO' in req.env:
+ parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
+ if req.env['SCRIPT_NAME']:
+ parts.insert(0, req.env['SCRIPT_NAME'])
+ url = re.sub(r'/+', '/', '/'.join(parts) + '/')
+
+ # update time with local timezone
+ try:
+ r = hg.repository(self.ui, path)
+ except IOError:
+ u.warn(_('error accessing repository at %s\n') % path)
+ continue
+ except error.RepoError:
+ u.warn(_('error accessing repository at %s\n') % path)
+ continue
+ try:
+ d = (get_mtime(r.spath), util.makedate()[1])
+ except OSError:
+ continue
+
+ contact = get_contact(get)
+ description = get("web", "description", "")
+ name = get("web", "name", name)
+ row = dict(contact=contact or "unknown",
+ contact_sort=contact.upper() or "unknown",
+ name=name,
+ name_sort=name,
+ url=url,
+ description=description or "unknown",
+ description_sort=description.upper() or "unknown",
+ lastchange=d,
+ lastchange_sort=d[1]-d[0],
+ archives=archivelist(u, "tip", url))
+ yield row
+
+ sortdefault = None, False
+ def entries(sortcolumn="", descending=False, subdir="", **map):
+ rows = rawentries(subdir=subdir, **map)
+
+ if sortcolumn and sortdefault != (sortcolumn, descending):
+ sortkey = '%s_sort' % sortcolumn
+ rows = sorted(rows, key=lambda x: x[sortkey],
+ reverse=descending)
+ for row, parity in zip(rows, paritygen(self.stripecount)):
+ row['parity'] = parity
+ yield row
+
+ self.refresh()
+ sortable = ["name", "description", "contact", "lastchange"]
+ sortcolumn, descending = sortdefault
+ if 'sort' in req.form:
+ sortcolumn = req.form['sort'][0]
+ descending = sortcolumn.startswith('-')
+ if descending:
+ sortcolumn = sortcolumn[1:]
+ if sortcolumn not in sortable:
+ sortcolumn = ""
+
+ sort = [("sort_%s" % column,
+ "%s%s" % ((not descending and column == sortcolumn)
+ and "-" or "", column))
+ for column in sortable]
+
+ self.refresh()
+ self.updatereqenv(req.env)
+
+ return tmpl("index", entries=entries, subdir=subdir,
+ sortcolumn=sortcolumn, descending=descending,
+ **dict(sort))
+
+ def templater(self, req):
+
+ def header(**map):
+ yield tmpl('header', encoding=encoding.encoding, **map)
+
+ def footer(**map):
+ yield tmpl("footer", **map)
+
+ def motd(**map):
+ if self.motd is not None:
+ yield self.motd
+ else:
+ yield config('web', 'motd', '')
+
+ def config(section, name, default=None, untrusted=True):
+ return self.ui.config(section, name, default, untrusted)
+
+ self.updatereqenv(req.env)
+
+ url = req.env.get('SCRIPT_NAME', '')
+ if not url.endswith('/'):
+ url += '/'
+
+ vars = {}
+ styles = (
+ req.form.get('style', [None])[0],
+ config('web', 'style'),
+ 'paper'
+ )
+ style, mapfile = templater.stylemap(styles, self.templatepath)
+ if style == styles[0]:
+ vars['style'] = style
+
+ start = url[-1] == '?' and '&' or '?'
+ sessionvars = webutil.sessionvars(vars, start)
+ logourl = config('web', 'logourl', 'http://mercurial.selenic.com/')
+ logoimg = config('web', 'logoimg', 'hglogo.png')
+ staticurl = config('web', 'staticurl') or url + 'static/'
+ if not staticurl.endswith('/'):
+ staticurl += '/'
+
+ tmpl = templater.templater(mapfile,
+ defaults={"header": header,
+ "footer": footer,
+ "motd": motd,
+ "url": url,
+ "logourl": logourl,
+ "logoimg": logoimg,
+ "staticurl": staticurl,
+ "sessionvars": sessionvars})
+ return tmpl
+
+ def updatereqenv(self, env):
+ if self._baseurl is not None:
+ name, port, path = geturlcgivars(self._baseurl, env['SERVER_PORT'])
+ env['SERVER_NAME'] = name
+ env['SERVER_PORT'] = port
+ env['SCRIPT_NAME'] = path
diff --git a/websdk/mercurial/hgweb/protocol.py b/websdk/mercurial/hgweb/protocol.py
index c5d1fb4..84aa6b1 120000..100644
--- a/websdk/mercurial/hgweb/protocol.py
+++ b/websdk/mercurial/hgweb/protocol.py
@@ -1 +1,97 @@
-/usr/share/pyshared/mercurial/hgweb/protocol.py \ No newline at end of file
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import cgi, cStringIO, zlib, urllib
+from mercurial import util, wireproto
+from common import HTTP_OK
+
+HGTYPE = 'application/mercurial-0.1'
+HGERRTYPE = 'application/hg-error'
+
+class webproto(object):
+ def __init__(self, req, ui):
+ self.req = req
+ self.response = ''
+ self.ui = ui
+ def getargs(self, args):
+ knownargs = self._args()
+ data = {}
+ keys = args.split()
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in knownargs.keys():
+ if key != 'cmd' and key not in keys:
+ star[key] = knownargs[key][0]
+ data['*'] = star
+ else:
+ data[k] = knownargs[k][0]
+ return [data[k] for k in keys]
+ def _args(self):
+ args = self.req.form.copy()
+ chunks = []
+ i = 1
+ while True:
+ h = self.req.env.get('HTTP_X_HGARG_' + str(i))
+ if h is None:
+ break
+ chunks += [h]
+ i += 1
+ args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True))
+ return args
+ def getfile(self, fp):
+ length = int(self.req.env['CONTENT_LENGTH'])
+ for s in util.filechunkiter(self.req, limit=length):
+ fp.write(s)
+ def redirect(self):
+ self.oldio = self.ui.fout, self.ui.ferr
+ self.ui.ferr = self.ui.fout = cStringIO.StringIO()
+ def restore(self):
+ val = self.ui.fout.getvalue()
+ self.ui.ferr, self.ui.fout = self.oldio
+ return val
+ def groupchunks(self, cg):
+ z = zlib.compressobj()
+ while True:
+ chunk = cg.read(4096)
+ if not chunk:
+ break
+ yield z.compress(chunk)
+ yield z.flush()
+ def _client(self):
+ return 'remote:%s:%s:%s' % (
+ self.req.env.get('wsgi.url_scheme') or 'http',
+ urllib.quote(self.req.env.get('REMOTE_HOST', '')),
+ urllib.quote(self.req.env.get('REMOTE_USER', '')))
+
+def iscmd(cmd):
+ return cmd in wireproto.commands
+
+def call(repo, req, cmd):
+ p = webproto(req, repo.ui)
+ rsp = wireproto.dispatch(repo, p, cmd)
+ if isinstance(rsp, str):
+ req.respond(HTTP_OK, HGTYPE, length=len(rsp))
+ return [rsp]
+ elif isinstance(rsp, wireproto.streamres):
+ req.respond(HTTP_OK, HGTYPE)
+ return rsp.gen
+ elif isinstance(rsp, wireproto.pushres):
+ val = p.restore()
+ req.respond(HTTP_OK, HGTYPE)
+ return ['%d\n%s' % (rsp.res, val)]
+ elif isinstance(rsp, wireproto.pusherr):
+ # drain the incoming bundle
+ req.drain()
+ p.restore()
+ rsp = '0\n%s\n' % rsp.res
+ req.respond(HTTP_OK, HGTYPE, length=len(rsp))
+ return [rsp]
+ elif isinstance(rsp, wireproto.ooberror):
+ rsp = rsp.message
+ req.respond(HTTP_OK, HGERRTYPE, length=len(rsp))
+ return [rsp]
diff --git a/websdk/mercurial/hgweb/request.py b/websdk/mercurial/hgweb/request.py
index 6113c10..1fddc95 120000..100644
--- a/websdk/mercurial/hgweb/request.py
+++ b/websdk/mercurial/hgweb/request.py
@@ -1 +1,147 @@
-/usr/share/pyshared/mercurial/hgweb/request.py \ No newline at end of file
+# hgweb/request.py - An http request from either CGI or the standalone server.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import socket, cgi, errno
+from mercurial import util
+from common import ErrorResponse, statusmessage, HTTP_NOT_MODIFIED
+
+shortcuts = {
+ 'cl': [('cmd', ['changelog']), ('rev', None)],
+ 'sl': [('cmd', ['shortlog']), ('rev', None)],
+ 'cs': [('cmd', ['changeset']), ('node', None)],
+ 'f': [('cmd', ['file']), ('filenode', None)],
+ 'fl': [('cmd', ['filelog']), ('filenode', None)],
+ 'fd': [('cmd', ['filediff']), ('node', None)],
+ 'fa': [('cmd', ['annotate']), ('filenode', None)],
+ 'mf': [('cmd', ['manifest']), ('manifest', None)],
+ 'ca': [('cmd', ['archive']), ('node', None)],
+ 'tags': [('cmd', ['tags'])],
+ 'tip': [('cmd', ['changeset']), ('node', ['tip'])],
+ 'static': [('cmd', ['static']), ('file', None)]
+}
+
+def normalize(form):
+ # first expand the shortcuts
+ for k in shortcuts.iterkeys():
+ if k in form:
+ for name, value in shortcuts[k]:
+ if value is None:
+ value = form[k]
+ form[name] = value
+ del form[k]
+ # And strip the values
+ for k, v in form.iteritems():
+ form[k] = [i.strip() for i in v]
+ return form
+
+class wsgirequest(object):
+ def __init__(self, wsgienv, start_response):
+ version = wsgienv['wsgi.version']
+ if (version < (1, 0)) or (version >= (2, 0)):
+ raise RuntimeError("Unknown and unsupported WSGI version %d.%d"
+ % version)
+ self.inp = wsgienv['wsgi.input']
+ self.err = wsgienv['wsgi.errors']
+ self.threaded = wsgienv['wsgi.multithread']
+ self.multiprocess = wsgienv['wsgi.multiprocess']
+ self.run_once = wsgienv['wsgi.run_once']
+ self.env = wsgienv
+ self.form = normalize(cgi.parse(self.inp,
+ self.env,
+ keep_blank_values=1))
+ self._start_response = start_response
+ self.server_write = None
+ self.headers = []
+
+ def __iter__(self):
+ return iter([])
+
+ def read(self, count=-1):
+ return self.inp.read(count)
+
+ def drain(self):
+ '''need to read all data from request, httplib is half-duplex'''
+ length = int(self.env.get('CONTENT_LENGTH') or 0)
+ for s in util.filechunkiter(self.inp, limit=length):
+ pass
+
+ def respond(self, status, type=None, filename=None, length=0):
+ if self._start_response is not None:
+
+ self.httphdr(type, filename, length)
+ if not self.headers:
+ raise RuntimeError("request.write called before headers sent")
+
+ for k, v in self.headers:
+ if not isinstance(v, str):
+ raise TypeError('header value must be string: %r' % v)
+
+ if isinstance(status, ErrorResponse):
+ self.header(status.headers)
+ if status.code == HTTP_NOT_MODIFIED:
+ # RFC 2616 Section 10.3.5: 304 Not Modified has cases where
+ # it MUST NOT include any headers other than these and no
+ # body
+ self.headers = [(k, v) for (k, v) in self.headers if
+ k in ('Date', 'ETag', 'Expires',
+ 'Cache-Control', 'Vary')]
+ status = statusmessage(status.code, status.message)
+ elif status == 200:
+ status = '200 Script output follows'
+ elif isinstance(status, int):
+ status = statusmessage(status)
+
+ self.server_write = self._start_response(status, self.headers)
+ self._start_response = None
+ self.headers = []
+
+ def write(self, thing):
+ if util.safehasattr(thing, "__iter__"):
+ for part in thing:
+ self.write(part)
+ else:
+ thing = str(thing)
+ try:
+ self.server_write(thing)
+ except socket.error, inst:
+ if inst[0] != errno.ECONNRESET:
+ raise
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ return None
+
+ def close(self):
+ return None
+
+ def header(self, headers=[('Content-Type','text/html')]):
+ self.headers.extend(headers)
+
+ def httphdr(self, type=None, filename=None, length=0, headers={}):
+ headers = headers.items()
+ if type is not None:
+ headers.append(('Content-Type', type))
+ if filename:
+ filename = (filename.split('/')[-1]
+ .replace('\\', '\\\\').replace('"', '\\"'))
+ headers.append(('Content-Disposition',
+ 'inline; filename="%s"' % filename))
+ if length:
+ headers.append(('Content-Length', str(length)))
+ self.header(headers)
+
+def wsgiapplication(app_maker):
+ '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir()
+ can and should now be used as a WSGI application.'''
+ application = app_maker()
+ def run_wsgi(env, respond):
+ return application(env, respond)
+ return run_wsgi
diff --git a/websdk/mercurial/hgweb/server.py b/websdk/mercurial/hgweb/server.py
index 92f92ad..3ad99f4 120000..100644
--- a/websdk/mercurial/hgweb/server.py
+++ b/websdk/mercurial/hgweb/server.py
@@ -1 +1,320 @@
-/usr/share/pyshared/mercurial/hgweb/server.py \ No newline at end of file
+# hgweb/server.py - The standalone hg web server.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
+from mercurial import util, error
+from mercurial.hgweb import common
+from mercurial.i18n import _
+
+def _splitURI(uri):
+ """ Return path and query splited from uri
+
+ Just like CGI environment, the path is unquoted, the query is
+ not.
+ """
+ if '?' in uri:
+ path, query = uri.split('?', 1)
+ else:
+ path, query = uri, ''
+ return urllib.unquote(path), query
+
+class _error_logger(object):
+ def __init__(self, handler):
+ self.handler = handler
+ def flush(self):
+ pass
+ def write(self, str):
+ self.writelines(str.split('\n'))
+ def writelines(self, seq):
+ for msg in seq:
+ self.handler.log_error("HG error: %s", msg)
+
+class _httprequesthandler(BaseHTTPServer.BaseHTTPRequestHandler):
+
+ url_scheme = 'http'
+
+ @staticmethod
+ def preparehttpserver(httpserver, ssl_cert):
+ """Prepare .socket of new HTTPServer instance"""
+ pass
+
+ def __init__(self, *args, **kargs):
+ self.protocol_version = 'HTTP/1.1'
+ BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
+
+ def _log_any(self, fp, format, *args):
+ fp.write("%s - - [%s] %s\n" % (self.client_address[0],
+ self.log_date_time_string(),
+ format % args))
+ fp.flush()
+
+ def log_error(self, format, *args):
+ self._log_any(self.server.errorlog, format, *args)
+
+ def log_message(self, format, *args):
+ self._log_any(self.server.accesslog, format, *args)
+
+ def log_request(self, code='-', size='-'):
+ xheaders = [h for h in self.headers.items() if h[0].startswith('x-')]
+ self.log_message('"%s" %s %s%s',
+ self.requestline, str(code), str(size),
+ ''.join([' %s:%s' % h for h in sorted(xheaders)]))
+
+ def do_write(self):
+ try:
+ self.do_hgweb()
+ except socket.error, inst:
+ if inst[0] != errno.EPIPE:
+ raise
+
+ def do_POST(self):
+ try:
+ self.do_write()
+ except Exception:
+ self._start_response("500 Internal Server Error", [])
+ self._write("Internal Server Error")
+ tb = "".join(traceback.format_exception(*sys.exc_info()))
+ self.log_error("Exception happened during processing "
+ "request '%s':\n%s", self.path, tb)
+
+ def do_GET(self):
+ self.do_POST()
+
+ def do_hgweb(self):
+ path, query = _splitURI(self.path)
+
+ env = {}
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+ env['REQUEST_METHOD'] = self.command
+ env['SERVER_NAME'] = self.server.server_name
+ env['SERVER_PORT'] = str(self.server.server_port)
+ env['REQUEST_URI'] = self.path
+ env['SCRIPT_NAME'] = self.server.prefix
+ env['PATH_INFO'] = path[len(self.server.prefix):]
+ env['REMOTE_HOST'] = self.client_address[0]
+ env['REMOTE_ADDR'] = self.client_address[0]
+ if query:
+ env['QUERY_STRING'] = query
+
+ if self.headers.typeheader is None:
+ env['CONTENT_TYPE'] = self.headers.type
+ else:
+ env['CONTENT_TYPE'] = self.headers.typeheader
+ length = self.headers.getheader('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ for header in [h for h in self.headers.keys()
+ if h not in ('content-type', 'content-length')]:
+ hkey = 'HTTP_' + header.replace('-', '_').upper()
+ hval = self.headers.getheader(header)
+ hval = hval.replace('\n', '').strip()
+ if hval:
+ env[hkey] = hval
+ env['SERVER_PROTOCOL'] = self.request_version
+ env['wsgi.version'] = (1, 0)
+ env['wsgi.url_scheme'] = self.url_scheme
+ if env.get('HTTP_EXPECT', '').lower() == '100-continue':
+ self.rfile = common.continuereader(self.rfile, self.wfile.write)
+
+ env['wsgi.input'] = self.rfile
+ env['wsgi.errors'] = _error_logger(self)
+ env['wsgi.multithread'] = isinstance(self.server,
+ SocketServer.ThreadingMixIn)
+ env['wsgi.multiprocess'] = isinstance(self.server,
+ SocketServer.ForkingMixIn)
+ env['wsgi.run_once'] = 0
+
+ self.close_connection = True
+ self.saved_status = None
+ self.saved_headers = []
+ self.sent_headers = False
+ self.length = None
+ for chunk in self.server.application(env, self._start_response):
+ self._write(chunk)
+
+ def send_headers(self):
+ if not self.saved_status:
+ raise AssertionError("Sending headers before "
+ "start_response() called")
+ saved_status = self.saved_status.split(None, 1)
+ saved_status[0] = int(saved_status[0])
+ self.send_response(*saved_status)
+ should_close = True
+ for h in self.saved_headers:
+ self.send_header(*h)
+ if h[0].lower() == 'content-length':
+ should_close = False
+ self.length = int(h[1])
+ # The value of the Connection header is a list of case-insensitive
+ # tokens separated by commas and optional whitespace.
+ if 'close' in [token.strip().lower() for token in
+ self.headers.get('connection', '').split(',')]:
+ should_close = True
+ if should_close:
+ self.send_header('Connection', 'close')
+ self.close_connection = should_close
+ self.end_headers()
+ self.sent_headers = True
+
+ def _start_response(self, http_status, headers, exc_info=None):
+ code, msg = http_status.split(None, 1)
+ code = int(code)
+ self.saved_status = http_status
+ bad_headers = ('connection', 'transfer-encoding')
+ self.saved_headers = [h for h in headers
+ if h[0].lower() not in bad_headers]
+ return self._write
+
+ def _write(self, data):
+ if not self.saved_status:
+ raise AssertionError("data written before start_response() called")
+ elif not self.sent_headers:
+ self.send_headers()
+ if self.length is not None:
+ if len(data) > self.length:
+ raise AssertionError("Content-length header sent, but more "
+ "bytes than specified are being written.")
+ self.length = self.length - len(data)
+ self.wfile.write(data)
+ self.wfile.flush()
+
+class _httprequesthandleropenssl(_httprequesthandler):
+ """HTTPS handler based on pyOpenSSL"""
+
+ url_scheme = 'https'
+
+ @staticmethod
+ def preparehttpserver(httpserver, ssl_cert):
+ try:
+ import OpenSSL
+ OpenSSL.SSL.Context
+ except ImportError:
+ raise util.Abort(_("SSL support is unavailable"))
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(ssl_cert)
+ ctx.use_certificate_file(ssl_cert)
+ sock = socket.socket(httpserver.address_family, httpserver.socket_type)
+ httpserver.socket = OpenSSL.SSL.Connection(ctx, sock)
+ httpserver.server_bind()
+ httpserver.server_activate()
+
+ def setup(self):
+ self.connection = self.request
+ self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
+
+ def do_write(self):
+ import OpenSSL
+ try:
+ _httprequesthandler.do_write(self)
+ except OpenSSL.SSL.SysCallError, inst:
+ if inst.args[0] != errno.EPIPE:
+ raise
+
+ def handle_one_request(self):
+ import OpenSSL
+ try:
+ _httprequesthandler.handle_one_request(self)
+ except (OpenSSL.SSL.SysCallError, OpenSSL.SSL.ZeroReturnError):
+ self.close_connection = True
+ pass
+
+class _httprequesthandlerssl(_httprequesthandler):
+ """HTTPS handler based on Pythons ssl module (introduced in 2.6)"""
+
+ url_scheme = 'https'
+
+ @staticmethod
+ def preparehttpserver(httpserver, ssl_cert):
+ try:
+ import ssl
+ ssl.wrap_socket
+ except ImportError:
+ raise util.Abort(_("SSL support is unavailable"))
+ httpserver.socket = ssl.wrap_socket(httpserver.socket, server_side=True,
+ certfile=ssl_cert, ssl_version=ssl.PROTOCOL_SSLv23)
+
+ def setup(self):
+ self.connection = self.request
+ self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
+
+try:
+ from threading import activeCount
+ activeCount() # silence pyflakes
+ _mixin = SocketServer.ThreadingMixIn
+except ImportError:
+ if util.safehasattr(os, "fork"):
+ _mixin = SocketServer.ForkingMixIn
+ else:
+ class _mixin(object):
+ pass
+
+def openlog(opt, default):
+ if opt and opt != '-':
+ return open(opt, 'a')
+ return default
+
+class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
+
+ # SO_REUSEADDR has broken semantics on windows
+ if os.name == 'nt':
+ allow_reuse_address = 0
+
+ def __init__(self, ui, app, addr, handler, **kwargs):
+ BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs)
+ self.daemon_threads = True
+ self.application = app
+
+ handler.preparehttpserver(self, ui.config('web', 'certificate'))
+
+ prefix = ui.config('web', 'prefix', '')
+ if prefix:
+ prefix = '/' + prefix.strip('/')
+ self.prefix = prefix
+
+ alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout)
+ elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr)
+ self.accesslog = alog
+ self.errorlog = elog
+
+ self.addr, self.port = self.socket.getsockname()[0:2]
+ self.fqaddr = socket.getfqdn(addr[0])
+
+class IPv6HTTPServer(MercurialHTTPServer):
+ address_family = getattr(socket, 'AF_INET6', None)
+ def __init__(self, *args, **kwargs):
+ if self.address_family is None:
+ raise error.RepoError(_('IPv6 is not available on this system'))
+ super(IPv6HTTPServer, self).__init__(*args, **kwargs)
+
+def create_server(ui, app):
+
+ if ui.config('web', 'certificate'):
+ if sys.version_info >= (2, 6):
+ handler = _httprequesthandlerssl
+ else:
+ handler = _httprequesthandleropenssl
+ else:
+ handler = _httprequesthandler
+
+ if ui.configbool('web', 'ipv6'):
+ cls = IPv6HTTPServer
+ else:
+ cls = MercurialHTTPServer
+
+ # ugly hack due to python issue5853 (for threaded use)
+ import mimetypes; mimetypes.init()
+
+ address = ui.config('web', 'address', '')
+ port = util.getport(ui.config('web', 'port', 8000))
+ try:
+ return cls(ui, app, (address, port), handler)
+ except socket.error, inst:
+ raise util.Abort(_("cannot start server at '%s:%d': %s")
+ % (address, port, inst.args[1]))
diff --git a/websdk/mercurial/hgweb/webcommands.py b/websdk/mercurial/hgweb/webcommands.py
index 29141b6..fc43ca3 120000..100644
--- a/websdk/mercurial/hgweb/webcommands.py
+++ b/websdk/mercurial/hgweb/webcommands.py
@@ -1 +1,842 @@
-/usr/share/pyshared/mercurial/hgweb/webcommands.py \ No newline at end of file
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, mimetypes, re, cgi, copy
+import webutil
+from mercurial import error, encoding, archival, templater, templatefilters
+from mercurial.node import short, hex
+from mercurial.util import binary
+from common import paritygen, staticfile, get_contact, ErrorResponse
+from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
+from mercurial import graphmod
+from mercurial import help as helpmod
+from mercurial.i18n import _
+
+# __all__ is populated with the allowed commands. Be sure to add to it if
+# you're adding a new command, or the new command won't work.
+
+__all__ = [
+ 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
+ 'manifest', 'tags', 'bookmarks', 'branches', 'summary', 'filediff', 'diff',
+ 'annotate', 'filelog', 'archive', 'static', 'graph', 'help',
+]
+
+def log(web, req, tmpl):
+ if 'file' in req.form and req.form['file'][0]:
+ return filelog(web, req, tmpl)
+ else:
+ return changelog(web, req, tmpl)
+
+def rawfile(web, req, tmpl):
+ guessmime = web.configbool('web', 'guessmime', False)
+
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ if not path:
+ content = manifest(web, req, tmpl)
+ req.respond(HTTP_OK, web.ctype)
+ return content
+
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ except error.LookupError, inst:
+ try:
+ content = manifest(web, req, tmpl)
+ req.respond(HTTP_OK, web.ctype)
+ return content
+ except ErrorResponse:
+ raise inst
+
+ path = fctx.path()
+ text = fctx.data()
+ mt = 'application/binary'
+ if guessmime:
+ mt = mimetypes.guess_type(path)[0]
+ if mt is None:
+ mt = binary(text) and 'application/binary' or 'text/plain'
+ if mt.startswith('text/'):
+ mt += '; charset="%s"' % encoding.encoding
+
+ req.respond(HTTP_OK, mt, path, len(text))
+ return [text]
+
+def _filerevision(web, tmpl, fctx):
+ f = fctx.path()
+ text = fctx.data()
+ parity = paritygen(web.stripecount)
+
+ if binary(text):
+ mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
+ text = '(binary:%s)' % mt
+
+ def lines():
+ for lineno, t in enumerate(text.splitlines(True)):
+ yield {"line": t,
+ "lineid": "l%d" % (lineno + 1),
+ "linenumber": "% 6d" % (lineno + 1),
+ "parity": parity.next()}
+
+ return tmpl("filerevision",
+ file=f,
+ path=webutil.up(f),
+ text=lines(),
+ rev=fctx.rev(),
+ node=fctx.hex(),
+ author=fctx.user(),
+ date=fctx.date(),
+ desc=fctx.description(),
+ branch=webutil.nodebranchnodefault(fctx),
+ parent=webutil.parents(fctx),
+ child=webutil.children(fctx),
+ rename=webutil.renamelink(fctx),
+ permissions=fctx.manifest().flags(f))
+
+def file(web, req, tmpl):
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ if not path:
+ return manifest(web, req, tmpl)
+ try:
+ return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
+ except error.LookupError, inst:
+ try:
+ return manifest(web, req, tmpl)
+ except ErrorResponse:
+ raise inst
+
+def _search(web, req, tmpl):
+
+ query = req.form['rev'][0]
+ revcount = web.maxchanges
+ if 'revcount' in req.form:
+ revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = max(revcount, 1)
+ tmpl.defaults['sessionvars']['revcount'] = revcount
+
+ lessvars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount / 2, 1)
+ lessvars['rev'] = query
+ morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars['revcount'] = revcount * 2
+ morevars['rev'] = query
+
+ def changelist(**map):
+ count = 0
+ qw = query.lower().split()
+
+ def revgen():
+ for i in xrange(len(web.repo) - 1, 0, -100):
+ l = []
+ for j in xrange(max(0, i - 100), i + 1):
+ ctx = web.repo[j]
+ l.append(ctx)
+ l.reverse()
+ for e in l:
+ yield e
+
+ for ctx in revgen():
+ miss = 0
+ for q in qw:
+ if not (q in ctx.user().lower() or
+ q in ctx.description().lower() or
+ q in " ".join(ctx.files()).lower()):
+ miss = 1
+ break
+ if miss:
+ continue
+
+ count += 1
+ n = ctx.node()
+ showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
+ files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+
+ yield tmpl('searchentry',
+ parity=parity.next(),
+ author=ctx.user(),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ changelogtag=showtags,
+ desc=ctx.description(),
+ date=ctx.date(),
+ files=files,
+ rev=ctx.rev(),
+ node=hex(n),
+ tags=webutil.nodetagsdict(web.repo, n),
+ bookmarks=webutil.nodebookmarksdict(web.repo, n),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+ if count >= revcount:
+ break
+
+ tip = web.repo['tip']
+ parity = paritygen(web.stripecount)
+
+ return tmpl('search', query=query, node=tip.hex(),
+ entries=changelist, archives=web.archivelist("tip"),
+ morevars=morevars, lessvars=lessvars)
+
+def changelog(web, req, tmpl, shortlog=False):
+
+ if 'node' in req.form:
+ ctx = webutil.changectx(web.repo, req)
+ else:
+ if 'rev' in req.form:
+ hi = req.form['rev'][0]
+ else:
+ hi = len(web.repo) - 1
+ try:
+ ctx = web.repo[hi]
+ except error.RepoError:
+ return _search(web, req, tmpl) # XXX redirect to 404 page?
+
+ def changelist(limit=0, **map):
+ l = [] # build a list in forward order for efficiency
+ for i in xrange(start, end):
+ ctx = web.repo[i]
+ n = ctx.node()
+ showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
+ files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
+
+ l.insert(0, {"parity": parity.next(),
+ "author": ctx.user(),
+ "parent": webutil.parents(ctx, i - 1),
+ "child": webutil.children(ctx, i + 1),
+ "changelogtag": showtags,
+ "desc": ctx.description(),
+ "date": ctx.date(),
+ "files": files,
+ "rev": i,
+ "node": hex(n),
+ "tags": webutil.nodetagsdict(web.repo, n),
+ "bookmarks": webutil.nodebookmarksdict(web.repo, n),
+ "inbranch": webutil.nodeinbranch(web.repo, ctx),
+ "branches": webutil.nodebranchdict(web.repo, ctx)
+ })
+
+ if limit > 0:
+ l = l[:limit]
+
+ for e in l:
+ yield e
+
+ revcount = shortlog and web.maxshortchanges or web.maxchanges
+ if 'revcount' in req.form:
+ revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = max(revcount, 1)
+ tmpl.defaults['sessionvars']['revcount'] = revcount
+
+ lessvars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount / 2, 1)
+ morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars['revcount'] = revcount * 2
+
+ count = len(web.repo)
+ pos = ctx.rev()
+ start = max(0, pos - revcount + 1)
+ end = min(count, start + revcount)
+ pos = end - 1
+ parity = paritygen(web.stripecount, offset=start - end)
+
+ changenav = webutil.revnavgen(pos, revcount, count, web.repo.changectx)
+
+ return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav,
+ node=ctx.hex(), rev=pos, changesets=count,
+ entries=lambda **x: changelist(limit=0,**x),
+ latestentry=lambda **x: changelist(limit=1,**x),
+ archives=web.archivelist("tip"), revcount=revcount,
+ morevars=morevars, lessvars=lessvars)
+
+def shortlog(web, req, tmpl):
+ return changelog(web, req, tmpl, shortlog = True)
+
+def changeset(web, req, tmpl):
+ ctx = webutil.changectx(web.repo, req)
+ showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
+ showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark',
+ ctx.node())
+ showbranch = webutil.nodebranchnodefault(ctx)
+
+ files = []
+ parity = paritygen(web.stripecount)
+ for f in ctx.files():
+ template = f in ctx and 'filenodelink' or 'filenolink'
+ files.append(tmpl(template,
+ node=ctx.hex(), file=f,
+ parity=parity.next()))
+
+ style = web.config('web', 'style', 'paper')
+ if 'style' in req.form:
+ style = req.form['style'][0]
+
+ parity = paritygen(web.stripecount)
+ diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style)
+
+ parity = paritygen(web.stripecount)
+ diffstatgen = webutil.diffstatgen(ctx)
+ diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity)
+
+ return tmpl('changeset',
+ diff=diffs,
+ rev=ctx.rev(),
+ node=ctx.hex(),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ changesettag=showtags,
+ changesetbookmark=showbookmarks,
+ changesetbranch=showbranch,
+ author=ctx.user(),
+ desc=ctx.description(),
+ date=ctx.date(),
+ files=files,
+ diffsummary=lambda **x: webutil.diffsummary(diffstatgen),
+ diffstat=diffstat,
+ archives=web.archivelist(ctx.hex()),
+ tags=webutil.nodetagsdict(web.repo, ctx.node()),
+ bookmarks=webutil.nodebookmarksdict(web.repo, ctx.node()),
+ branch=webutil.nodebranchnodefault(ctx),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+rev = changeset
+
+def manifest(web, req, tmpl):
+ ctx = webutil.changectx(web.repo, req)
+ path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
+ mf = ctx.manifest()
+ node = ctx.node()
+
+ files = {}
+ dirs = {}
+ parity = paritygen(web.stripecount)
+
+ if path and path[-1] != "/":
+ path += "/"
+ l = len(path)
+ abspath = "/" + path
+
+ for f, n in mf.iteritems():
+ if f[:l] != path:
+ continue
+ remain = f[l:]
+ elements = remain.split('/')
+ if len(elements) == 1:
+ files[remain] = f
+ else:
+ h = dirs # need to retain ref to dirs (root)
+ for elem in elements[0:-1]:
+ if elem not in h:
+ h[elem] = {}
+ h = h[elem]
+ if len(h) > 1:
+ break
+ h[None] = None # denotes files present
+
+ if mf and not files and not dirs:
+ raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
+
+ def filelist(**map):
+ for f in sorted(files):
+ full = files[f]
+
+ fctx = ctx.filectx(full)
+ yield {"file": full,
+ "parity": parity.next(),
+ "basename": f,
+ "date": fctx.date(),
+ "size": fctx.size(),
+ "permissions": mf.flags(full)}
+
+ def dirlist(**map):
+ for d in sorted(dirs):
+
+ emptydirs = []
+ h = dirs[d]
+ while isinstance(h, dict) and len(h) == 1:
+ k, v = h.items()[0]
+ if v:
+ emptydirs.append(k)
+ h = v
+
+ path = "%s%s" % (abspath, d)
+ yield {"parity": parity.next(),
+ "path": path,
+ "emptydirs": "/".join(emptydirs),
+ "basename": d}
+
+ return tmpl("manifest",
+ rev=ctx.rev(),
+ node=hex(node),
+ path=abspath,
+ up=webutil.up(abspath),
+ upparity=parity.next(),
+ fentries=filelist,
+ dentries=dirlist,
+ archives=web.archivelist(hex(node)),
+ tags=webutil.nodetagsdict(web.repo, node),
+ bookmarks=webutil.nodebookmarksdict(web.repo, node),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx))
+
+def tags(web, req, tmpl):
+ i = web.repo.tagslist()
+ i.reverse()
+ parity = paritygen(web.stripecount)
+
+ def entries(notip=False, limit=0, **map):
+ count = 0
+ for k, n in i:
+ if notip and k == "tip":
+ continue
+ if limit > 0 and count >= limit:
+ continue
+ count = count + 1
+ yield {"parity": parity.next(),
+ "tag": k,
+ "date": web.repo[n].date(),
+ "node": hex(n)}
+
+ return tmpl("tags",
+ node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(False, 0, **x),
+ entriesnotip=lambda **x: entries(True, 0, **x),
+ latestentry=lambda **x: entries(True, 1, **x))
+
+def bookmarks(web, req, tmpl):
+ i = web.repo._bookmarks.items()
+ parity = paritygen(web.stripecount)
+
+ def entries(limit=0, **map):
+ count = 0
+ for k, n in sorted(i):
+ if limit > 0 and count >= limit:
+ continue
+ count = count + 1
+ yield {"parity": parity.next(),
+ "bookmark": k,
+ "date": web.repo[n].date(),
+ "node": hex(n)}
+
+ return tmpl("bookmarks",
+ node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(0, **x),
+ latestentry=lambda **x: entries(1, **x))
+
+def branches(web, req, tmpl):
+ tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems())
+ heads = web.repo.heads()
+ parity = paritygen(web.stripecount)
+ sortkey = lambda ctx: ('close' not in ctx.extra(), ctx.rev())
+
+ def entries(limit, **map):
+ count = 0
+ for ctx in sorted(tips, key=sortkey, reverse=True):
+ if limit > 0 and count >= limit:
+ return
+ count += 1
+ if not web.repo.branchheads(ctx.branch()):
+ status = 'closed'
+ elif ctx.node() not in heads:
+ status = 'inactive'
+ else:
+ status = 'open'
+ yield {'parity': parity.next(),
+ 'branch': ctx.branch(),
+ 'status': status,
+ 'node': ctx.hex(),
+ 'date': ctx.date()}
+
+ return tmpl('branches', node=hex(web.repo.changelog.tip()),
+ entries=lambda **x: entries(0, **x),
+ latestentry=lambda **x: entries(1, **x))
+
+def summary(web, req, tmpl):
+ i = web.repo.tagslist()
+ i.reverse()
+
+ def tagentries(**map):
+ parity = paritygen(web.stripecount)
+ count = 0
+ for k, n in i:
+ if k == "tip": # skip tip
+ continue
+
+ count += 1
+ if count > 10: # limit to 10 tags
+ break
+
+ yield tmpl("tagentry",
+ parity=parity.next(),
+ tag=k,
+ node=hex(n),
+ date=web.repo[n].date())
+
+ def bookmarks(**map):
+ parity = paritygen(web.stripecount)
+ b = web.repo._bookmarks.items()
+ for k, n in sorted(b)[:10]: # limit to 10 bookmarks
+ yield {'parity': parity.next(),
+ 'bookmark': k,
+ 'date': web.repo[n].date(),
+ 'node': hex(n)}
+
+ def branches(**map):
+ parity = paritygen(web.stripecount)
+
+ b = web.repo.branchtags()
+ l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()]
+ for r, n, t in sorted(l):
+ yield {'parity': parity.next(),
+ 'branch': t,
+ 'node': hex(n),
+ 'date': web.repo[n].date()}
+
+ def changelist(**map):
+ parity = paritygen(web.stripecount, offset=start - end)
+ l = [] # build a list in forward order for efficiency
+ for i in xrange(start, end):
+ ctx = web.repo[i]
+ n = ctx.node()
+ hn = hex(n)
+
+ l.insert(0, tmpl(
+ 'shortlogentry',
+ parity=parity.next(),
+ author=ctx.user(),
+ desc=ctx.description(),
+ date=ctx.date(),
+ rev=i,
+ node=hn,
+ tags=webutil.nodetagsdict(web.repo, n),
+ bookmarks=webutil.nodebookmarksdict(web.repo, n),
+ inbranch=webutil.nodeinbranch(web.repo, ctx),
+ branches=webutil.nodebranchdict(web.repo, ctx)))
+
+ yield l
+
+ tip = web.repo['tip']
+ count = len(web.repo)
+ start = max(0, count - web.maxchanges)
+ end = min(count, start + web.maxchanges)
+
+ return tmpl("summary",
+ desc=web.config("web", "description", "unknown"),
+ owner=get_contact(web.config) or "unknown",
+ lastchange=tip.date(),
+ tags=tagentries,
+ bookmarks=bookmarks,
+ branches=branches,
+ shortlog=changelist,
+ node=tip.hex(),
+ archives=web.archivelist("tip"))
+
+def filediff(web, req, tmpl):
+ fctx, ctx = None, None
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ except LookupError:
+ ctx = webutil.changectx(web.repo, req)
+ path = webutil.cleanpath(web.repo, req.form['file'][0])
+ if path not in ctx.files():
+ raise
+
+ if fctx is not None:
+ n = fctx.node()
+ path = fctx.path()
+ else:
+ n = ctx.node()
+ # path already defined in except clause
+
+ parity = paritygen(web.stripecount)
+ style = web.config('web', 'style', 'paper')
+ if 'style' in req.form:
+ style = req.form['style'][0]
+
+ diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity, style)
+ rename = fctx and webutil.renamelink(fctx) or []
+ ctx = fctx and fctx or ctx
+ return tmpl("filediff",
+ file=path,
+ node=hex(n),
+ rev=ctx.rev(),
+ date=ctx.date(),
+ desc=ctx.description(),
+ author=ctx.user(),
+ rename=rename,
+ branch=webutil.nodebranchnodefault(ctx),
+ parent=webutil.parents(ctx),
+ child=webutil.children(ctx),
+ diff=diffs)
+
+diff = filediff
+
+def annotate(web, req, tmpl):
+ fctx = webutil.filectx(web.repo, req)
+ f = fctx.path()
+ parity = paritygen(web.stripecount)
+
+ def annotate(**map):
+ last = None
+ if binary(fctx.data()):
+ mt = (mimetypes.guess_type(fctx.path())[0]
+ or 'application/octet-stream')
+ lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
+ '(binary:%s)' % mt)])
+ else:
+ lines = enumerate(fctx.annotate(follow=True, linenumber=True))
+ for lineno, ((f, targetline), l) in lines:
+ fnode = f.filenode()
+
+ if last != fnode:
+ last = fnode
+
+ yield {"parity": parity.next(),
+ "node": f.hex(),
+ "rev": f.rev(),
+ "author": f.user(),
+ "desc": f.description(),
+ "file": f.path(),
+ "targetline": targetline,
+ "line": l,
+ "lineid": "l%d" % (lineno + 1),
+ "linenumber": "% 6d" % (lineno + 1),
+ "revdate": f.date()}
+
+ return tmpl("fileannotate",
+ file=f,
+ annotate=annotate,
+ path=webutil.up(f),
+ rev=fctx.rev(),
+ node=fctx.hex(),
+ author=fctx.user(),
+ date=fctx.date(),
+ desc=fctx.description(),
+ rename=webutil.renamelink(fctx),
+ branch=webutil.nodebranchnodefault(fctx),
+ parent=webutil.parents(fctx),
+ child=webutil.children(fctx),
+ permissions=fctx.manifest().flags(f))
+
+def filelog(web, req, tmpl):
+
+ try:
+ fctx = webutil.filectx(web.repo, req)
+ f = fctx.path()
+ fl = fctx.filelog()
+ except error.LookupError:
+ f = webutil.cleanpath(web.repo, req.form['file'][0])
+ fl = web.repo.file(f)
+ numrevs = len(fl)
+ if not numrevs: # file doesn't exist at all
+ raise
+ rev = webutil.changectx(web.repo, req).rev()
+ first = fl.linkrev(0)
+ if rev < first: # current rev is from before file existed
+ raise
+ frev = numrevs - 1
+ while fl.linkrev(frev) > rev:
+ frev -= 1
+ fctx = web.repo.filectx(f, fl.linkrev(frev))
+
+ revcount = web.maxshortchanges
+ if 'revcount' in req.form:
+ revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = max(revcount, 1)
+ tmpl.defaults['sessionvars']['revcount'] = revcount
+
+ lessvars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount / 2, 1)
+ morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars['revcount'] = revcount * 2
+
+ count = fctx.filerev() + 1
+ start = max(0, fctx.filerev() - revcount + 1) # first rev on this page
+ end = min(count, start + revcount) # last rev on this page
+ parity = paritygen(web.stripecount, offset=start - end)
+
+ def entries(limit=0, **map):
+ l = []
+
+ repo = web.repo
+ for i in xrange(start, end):
+ iterfctx = fctx.filectx(i)
+
+ l.insert(0, {"parity": parity.next(),
+ "filerev": i,
+ "file": f,
+ "node": iterfctx.hex(),
+ "author": iterfctx.user(),
+ "date": iterfctx.date(),
+ "rename": webutil.renamelink(iterfctx),
+ "parent": webutil.parents(iterfctx),
+ "child": webutil.children(iterfctx),
+ "desc": iterfctx.description(),
+ "tags": webutil.nodetagsdict(repo, iterfctx.node()),
+ "bookmarks": webutil.nodebookmarksdict(
+ repo, iterfctx.node()),
+ "branch": webutil.nodebranchnodefault(iterfctx),
+ "inbranch": webutil.nodeinbranch(repo, iterfctx),
+ "branches": webutil.nodebranchdict(repo, iterfctx)})
+
+ if limit > 0:
+ l = l[:limit]
+
+ for e in l:
+ yield e
+
+ nodefunc = lambda x: fctx.filectx(fileid=x)
+ nav = webutil.revnavgen(end - 1, revcount, count, nodefunc)
+ return tmpl("filelog", file=f, node=fctx.hex(), nav=nav,
+ entries=lambda **x: entries(limit=0, **x),
+ latestentry=lambda **x: entries(limit=1, **x),
+ revcount=revcount, morevars=morevars, lessvars=lessvars)
+
+def archive(web, req, tmpl):
+ type_ = req.form.get('type', [None])[0]
+ allowed = web.configlist("web", "allow_archive")
+ key = req.form['node'][0]
+
+ if type_ not in web.archives:
+ msg = 'Unsupported archive type: %s' % type_
+ raise ErrorResponse(HTTP_NOT_FOUND, msg)
+
+ if not ((type_ in allowed or
+ web.configbool("web", "allow" + type_, False))):
+ msg = 'Archive type not allowed: %s' % type_
+ raise ErrorResponse(HTTP_FORBIDDEN, msg)
+
+ reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
+ cnode = web.repo.lookup(key)
+ arch_version = key
+ if cnode == key or key == 'tip':
+ arch_version = short(cnode)
+ name = "%s-%s" % (reponame, arch_version)
+ mimetype, artype, extension, encoding = web.archive_specs[type_]
+ headers = [
+ ('Content-Type', mimetype),
+ ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
+ ]
+ if encoding:
+ headers.append(('Content-Encoding', encoding))
+ req.header(headers)
+ req.respond(HTTP_OK)
+ archival.archive(web.repo, req, cnode, artype, prefix=name)
+ return []
+
+
+def static(web, req, tmpl):
+ fname = req.form['file'][0]
+ # a repo owner may set web.static in .hg/hgrc to get any file
+ # readable by the user running the CGI script
+ static = web.config("web", "static", None, untrusted=False)
+ if not static:
+ tp = web.templatepath or templater.templatepath()
+ if isinstance(tp, str):
+ tp = [tp]
+ static = [os.path.join(p, 'static') for p in tp]
+ return [staticfile(static, fname, req)]
+
+def graph(web, req, tmpl):
+
+ rev = webutil.changectx(web.repo, req).rev()
+ bg_height = 39
+ revcount = web.maxshortchanges
+ if 'revcount' in req.form:
+ revcount = int(req.form.get('revcount', [revcount])[0])
+ revcount = max(revcount, 1)
+ tmpl.defaults['sessionvars']['revcount'] = revcount
+
+ lessvars = copy.copy(tmpl.defaults['sessionvars'])
+ lessvars['revcount'] = max(revcount / 2, 1)
+ morevars = copy.copy(tmpl.defaults['sessionvars'])
+ morevars['revcount'] = revcount * 2
+
+ max_rev = len(web.repo) - 1
+ revcount = min(max_rev, revcount)
+ revnode = web.repo.changelog.node(rev)
+ revnode_hex = hex(revnode)
+ uprev = min(max_rev, rev + revcount)
+ downrev = max(0, rev - revcount)
+ count = len(web.repo)
+ changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
+ startrev = rev
+ # if starting revision is less than 60 set it to uprev
+ if rev < web.maxshortchanges:
+ startrev = uprev
+
+ dag = graphmod.dagwalker(web.repo, range(startrev, downrev - 1, -1))
+ tree = list(graphmod.colored(dag))
+ canvasheight = (len(tree) + 1) * bg_height - 27
+ data = []
+ for (id, type, ctx, vtx, edges) in tree:
+ if type != graphmod.CHANGESET:
+ continue
+ node = str(ctx)
+ age = templatefilters.age(ctx.date())
+ desc = templatefilters.firstline(ctx.description())
+ desc = cgi.escape(templatefilters.nonempty(desc))
+ user = cgi.escape(templatefilters.person(ctx.user()))
+ branch = ctx.branch()
+ branch = branch, web.repo.branchtags().get(branch) == ctx.node()
+ data.append((node, vtx, edges, desc, user, age, branch, ctx.tags(),
+ ctx.bookmarks()))
+
+ return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
+ lessvars=lessvars, morevars=morevars, downrev=downrev,
+ canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
+ node=revnode_hex, changenav=changenav)
+
+def _getdoc(e):
+ doc = e[0].__doc__
+ if doc:
+ doc = doc.split('\n')[0]
+ else:
+ doc = _('(no help text available)')
+ return doc
+
+def help(web, req, tmpl):
+ from mercurial import commands # avoid cycle
+
+ topicname = req.form.get('node', [None])[0]
+ if not topicname:
+ def topics(**map):
+ for entries, summary, _ in helpmod.helptable:
+ entries = sorted(entries, key=len)
+ yield {'topic': entries[-1], 'summary': summary}
+
+ early, other = [], []
+ primary = lambda s: s.split('|')[0]
+ for c, e in commands.table.iteritems():
+ doc = _getdoc(e)
+ if 'DEPRECATED' in doc or c.startswith('debug'):
+ continue
+ cmd = primary(c)
+ if cmd.startswith('^'):
+ early.append((cmd[1:], doc))
+ else:
+ other.append((cmd, doc))
+
+ early.sort()
+ other.sort()
+
+ def earlycommands(**map):
+ for c, doc in early:
+ yield {'topic': c, 'summary': doc}
+
+ def othercommands(**map):
+ for c, doc in other:
+ yield {'topic': c, 'summary': doc}
+
+ return tmpl('helptopics', topics=topics, earlycommands=earlycommands,
+ othercommands=othercommands, title='Index')
+
+ u = webutil.wsgiui()
+ u.pushbuffer()
+ try:
+ commands.help_(u, topicname)
+ except error.UnknownCommand:
+ raise ErrorResponse(HTTP_NOT_FOUND)
+ doc = u.popbuffer()
+ return tmpl('help', topic=topicname, doc=doc)
diff --git a/websdk/mercurial/hgweb/webutil.py b/websdk/mercurial/hgweb/webutil.py
index 5ff5492..bad7cdd 120000..100644
--- a/websdk/mercurial/hgweb/webutil.py
+++ b/websdk/mercurial/hgweb/webutil.py
@@ -1 +1,269 @@
-/usr/share/pyshared/mercurial/hgweb/webutil.py \ No newline at end of file
+# hgweb/webutil.py - utility library for the web interface.
+#
+# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, copy
+from mercurial import match, patch, scmutil, error, ui, util
+from mercurial.i18n import _
+from mercurial.node import hex, nullid
+
+def up(p):
+ if p[0] != "/":
+ p = "/" + p
+ if p[-1] == "/":
+ p = p[:-1]
+ up = os.path.dirname(p)
+ if up == "/":
+ return "/"
+ return up + "/"
+
+def revnavgen(pos, pagelen, limit, nodefunc):
+ def seq(factor, limit=None):
+ if limit:
+ yield limit
+ if limit >= 20 and limit <= 40:
+ yield 50
+ else:
+ yield 1 * factor
+ yield 3 * factor
+ for f in seq(factor * 10):
+ yield f
+
+ navbefore = []
+ navafter = []
+
+ last = 0
+ for f in seq(1, pagelen):
+ if f < pagelen or f <= last:
+ continue
+ if f > limit:
+ break
+ last = f
+ if pos + f < limit:
+ navafter.append(("+%d" % f, hex(nodefunc(pos + f).node())))
+ if pos - f >= 0:
+ navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node())))
+
+ navafter.append(("tip", "tip"))
+ try:
+ navbefore.insert(0, ("(0)", hex(nodefunc('0').node())))
+ except error.RepoError:
+ pass
+
+ def gen(l):
+ def f(**map):
+ for label, node in l:
+ yield {"label": label, "node": node}
+ return f
+
+ return (dict(before=gen(navbefore), after=gen(navafter)),)
+
+def _siblings(siblings=[], hiderev=None):
+ siblings = [s for s in siblings if s.node() != nullid]
+ if len(siblings) == 1 and siblings[0].rev() == hiderev:
+ return
+ for s in siblings:
+ d = {'node': s.hex(), 'rev': s.rev()}
+ d['user'] = s.user()
+ d['date'] = s.date()
+ d['description'] = s.description()
+ d['branch'] = s.branch()
+ if util.safehasattr(s, 'path'):
+ d['file'] = s.path()
+ yield d
+
+def parents(ctx, hide=None):
+ return _siblings(ctx.parents(), hide)
+
+def children(ctx, hide=None):
+ return _siblings(ctx.children(), hide)
+
+def renamelink(fctx):
+ r = fctx.renamed()
+ if r:
+ return [dict(file=r[0], node=hex(r[1]))]
+ return []
+
+def nodetagsdict(repo, node):
+ return [{"name": i} for i in repo.nodetags(node)]
+
+def nodebookmarksdict(repo, node):
+ return [{"name": i} for i in repo.nodebookmarks(node)]
+
+def nodebranchdict(repo, ctx):
+ branches = []
+ branch = ctx.branch()
+ # If this is an empty repo, ctx.node() == nullid,
+ # ctx.branch() == 'default', but branchtags() is
+ # an empty dict. Using dict.get avoids a traceback.
+ if repo.branchtags().get(branch) == ctx.node():
+ branches.append({"name": branch})
+ return branches
+
+def nodeinbranch(repo, ctx):
+ branches = []
+ branch = ctx.branch()
+ if branch != 'default' and repo.branchtags().get(branch) != ctx.node():
+ branches.append({"name": branch})
+ return branches
+
+def nodebranchnodefault(ctx):
+ branches = []
+ branch = ctx.branch()
+ if branch != 'default':
+ branches.append({"name": branch})
+ return branches
+
+def showtag(repo, tmpl, t1, node=nullid, **args):
+ for t in repo.nodetags(node):
+ yield tmpl(t1, tag=t, **args)
+
+def showbookmark(repo, tmpl, t1, node=nullid, **args):
+ for t in repo.nodebookmarks(node):
+ yield tmpl(t1, bookmark=t, **args)
+
+def cleanpath(repo, path):
+ path = path.lstrip('/')
+ return scmutil.canonpath(repo.root, '', path)
+
+def changectx(repo, req):
+ changeid = "tip"
+ if 'node' in req.form:
+ changeid = req.form['node'][0]
+ elif 'manifest' in req.form:
+ changeid = req.form['manifest'][0]
+
+ try:
+ ctx = repo[changeid]
+ except error.RepoError:
+ man = repo.manifest
+ ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
+
+ return ctx
+
+def filectx(repo, req):
+ path = cleanpath(repo, req.form['file'][0])
+ if 'node' in req.form:
+ changeid = req.form['node'][0]
+ else:
+ changeid = req.form['filenode'][0]
+ try:
+ fctx = repo[changeid][path]
+ except error.RepoError:
+ fctx = repo.filectx(path, fileid=changeid)
+
+ return fctx
+
+def listfilediffs(tmpl, files, node, max):
+ for f in files[:max]:
+ yield tmpl('filedifflink', node=hex(node), file=f)
+ if len(files) > max:
+ yield tmpl('fileellipses')
+
+def diffs(repo, tmpl, ctx, files, parity, style):
+
+ def countgen():
+ start = 1
+ while True:
+ yield start
+ start += 1
+
+ blockcount = countgen()
+ def prettyprintlines(diff):
+ blockno = blockcount.next()
+ for lineno, l in enumerate(diff.splitlines(True)):
+ lineno = "%d.%d" % (blockno, lineno + 1)
+ if l.startswith('+'):
+ ltype = "difflineplus"
+ elif l.startswith('-'):
+ ltype = "difflineminus"
+ elif l.startswith('@'):
+ ltype = "difflineat"
+ else:
+ ltype = "diffline"
+ yield tmpl(ltype,
+ line=l,
+ lineid="l%s" % lineno,
+ linenumber="% 8s" % lineno)
+
+ if files:
+ m = match.exact(repo.root, repo.getcwd(), files)
+ else:
+ m = match.always(repo.root, repo.getcwd())
+
+ diffopts = patch.diffopts(repo.ui, untrusted=True)
+ parents = ctx.parents()
+ node1 = parents and parents[0].node() or nullid
+ node2 = ctx.node()
+
+ block = []
+ for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
+ if chunk.startswith('diff') and block:
+ yield tmpl('diffblock', parity=parity.next(),
+ lines=prettyprintlines(''.join(block)))
+ block = []
+ if chunk.startswith('diff') and style != 'raw':
+ chunk = ''.join(chunk.splitlines(True)[1:])
+ block.append(chunk)
+ yield tmpl('diffblock', parity=parity.next(),
+ lines=prettyprintlines(''.join(block)))
+
+def diffstatgen(ctx):
+ '''Generator function that provides the diffstat data.'''
+
+ stats = patch.diffstatdata(util.iterlines(ctx.diff()))
+ maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
+ while True:
+ yield stats, maxname, maxtotal, addtotal, removetotal, binary
+
+def diffsummary(statgen):
+ '''Return a short summary of the diff.'''
+
+ stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
+ return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
+ len(stats), addtotal, removetotal)
+
+def diffstat(tmpl, ctx, statgen, parity):
+ '''Return a diffstat template for each file in the diff.'''
+
+ stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
+ files = ctx.files()
+
+ def pct(i):
+ if maxtotal == 0:
+ return 0
+ return (float(i) / maxtotal) * 100
+
+ fileno = 0
+ for filename, adds, removes, isbinary in stats:
+ template = filename in files and 'diffstatlink' or 'diffstatnolink'
+ total = adds + removes
+ fileno += 1
+ yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
+ total=total, addpct=pct(adds), removepct=pct(removes),
+ parity=parity.next())
+
+class sessionvars(object):
+ def __init__(self, vars, start='?'):
+ self.start = start
+ self.vars = vars
+ def __getitem__(self, key):
+ return self.vars[key]
+ def __setitem__(self, key, value):
+ self.vars[key] = value
+ def __copy__(self):
+ return sessionvars(copy.copy(self.vars), self.start)
+ def __iter__(self):
+ separator = self.start
+ for key, value in self.vars.iteritems():
+ yield {'name': key, 'value': str(value), 'separator': separator}
+ separator = '&'
+
+class wsgiui(ui.ui):
+ # default termwidth breaks under mod_wsgi
+ def termwidth(self):
+ return 80
diff --git a/websdk/mercurial/hgweb/wsgicgi.py b/websdk/mercurial/hgweb/wsgicgi.py
index fa03838..5d18d8e 120000..100644
--- a/websdk/mercurial/hgweb/wsgicgi.py
+++ b/websdk/mercurial/hgweb/wsgicgi.py
@@ -1 +1,81 @@
-/usr/share/pyshared/mercurial/hgweb/wsgicgi.py \ No newline at end of file
+# hgweb/wsgicgi.py - CGI->WSGI translator
+#
+# Copyright 2006 Eric Hopper <hopper@omnifarious.org>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+#
+# This was originally copied from the public domain code at
+# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side
+
+import os, sys
+from mercurial import util
+from mercurial.hgweb import common
+
+def launch(application):
+ util.setbinary(sys.stdin)
+ util.setbinary(sys.stdout)
+
+ environ = dict(os.environ.iteritems())
+ environ.setdefault('PATH_INFO', '')
+ if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'):
+ # IIS includes script_name in path_info
+ scriptname = environ['SCRIPT_NAME']
+ if environ['PATH_INFO'].startswith(scriptname):
+ environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):]
+
+ stdin = sys.stdin
+ if environ.get('HTTP_EXPECT', '').lower() == '100-continue':
+ stdin = common.continuereader(stdin, sys.stdout.write)
+
+ environ['wsgi.input'] = stdin
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1, 0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = True
+
+ if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ headers_set = []
+ headers_sent = []
+ out = sys.stdout
+
+ def write(data):
+ if not headers_set:
+ raise AssertionError("write() before start_response()")
+
+ elif not headers_sent:
+ # Before the first output, send the stored headers
+ status, response_headers = headers_sent[:] = headers_set
+ out.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ out.write('%s: %s\r\n' % header)
+ out.write('\r\n')
+
+ out.write(data)
+ out.flush()
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0](exc_info[1], exc_info[2])
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ content = application(environ, start_response)
+ try:
+ for chunk in content:
+ write(chunk)
+ finally:
+ getattr(content, 'close', lambda : None)()
diff --git a/websdk/mercurial/hook.py b/websdk/mercurial/hook.py
index 6d162db..88987ee 120000..100644
--- a/websdk/mercurial/hook.py
+++ b/websdk/mercurial/hook.py
@@ -1 +1,173 @@
-/usr/share/pyshared/mercurial/hook.py \ No newline at end of file
+# hook.py - hook support for mercurial
+#
+# Copyright 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import os, sys
+import extensions, util
+
+def _pythonhook(ui, repo, name, hname, funcname, args, throw):
+ '''call python hook. hook is callable object, looked up as
+ name in python module. if callable returns "true", hook
+ fails, else passes. if hook raises exception, treated as
+ hook failure. exception propagates if throw is "true".
+
+ reason for "true" meaning "hook failed" is so that
+ unmodified commands (e.g. mercurial.commands.update) can
+ be run as hooks without wrappers to convert return values.'''
+
+ ui.note(_("calling hook %s: %s\n") % (hname, funcname))
+ obj = funcname
+ if not util.safehasattr(obj, '__call__'):
+ d = funcname.rfind('.')
+ if d == -1:
+ raise util.Abort(_('%s hook is invalid ("%s" not in '
+ 'a module)') % (hname, funcname))
+ modname = funcname[:d]
+ oldpaths = sys.path
+ if util.mainfrozen():
+ # binary installs require sys.path manipulation
+ modpath, modfile = os.path.split(modname)
+ if modpath and modfile:
+ sys.path = sys.path[:] + [modpath]
+ modname = modfile
+ try:
+ obj = __import__(modname)
+ except ImportError:
+ e1 = sys.exc_type, sys.exc_value, sys.exc_traceback
+ try:
+ # extensions are loaded with hgext_ prefix
+ obj = __import__("hgext_%s" % modname)
+ except ImportError:
+ e2 = sys.exc_type, sys.exc_value, sys.exc_traceback
+ if ui.tracebackflag:
+ ui.warn(_('exception from first failed import attempt:\n'))
+ ui.traceback(e1)
+ if ui.tracebackflag:
+ ui.warn(_('exception from second failed import attempt:\n'))
+ ui.traceback(e2)
+ raise util.Abort(_('%s hook is invalid '
+ '(import of "%s" failed)') %
+ (hname, modname))
+ sys.path = oldpaths
+ try:
+ for p in funcname.split('.')[1:]:
+ obj = getattr(obj, p)
+ except AttributeError:
+ raise util.Abort(_('%s hook is invalid '
+ '("%s" is not defined)') %
+ (hname, funcname))
+ if not util.safehasattr(obj, '__call__'):
+ raise util.Abort(_('%s hook is invalid '
+ '("%s" is not callable)') %
+ (hname, funcname))
+ try:
+ try:
+ # redirect IO descriptors the the ui descriptors so hooks
+ # that write directly to these don't mess up the command
+ # protocol when running through the command server
+ old = sys.stdout, sys.stderr, sys.stdin
+ sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin
+
+ r = obj(ui=ui, repo=repo, hooktype=name, **args)
+ except KeyboardInterrupt:
+ raise
+ except Exception, exc:
+ if isinstance(exc, util.Abort):
+ ui.warn(_('error: %s hook failed: %s\n') %
+ (hname, exc.args[0]))
+ else:
+ ui.warn(_('error: %s hook raised an exception: '
+ '%s\n') % (hname, exc))
+ if throw:
+ raise
+ ui.traceback()
+ return True
+ finally:
+ sys.stdout, sys.stderr, sys.stdin = old
+ if r:
+ if throw:
+ raise util.Abort(_('%s hook failed') % hname)
+ ui.warn(_('warning: %s hook failed\n') % hname)
+ return r
+
+def _exthook(ui, repo, name, cmd, args, throw):
+ ui.note(_("running hook %s: %s\n") % (name, cmd))
+
+ env = {}
+ for k, v in args.iteritems():
+ if util.safehasattr(v, '__call__'):
+ v = v()
+ if isinstance(v, dict):
+ # make the dictionary element order stable across Python
+ # implementations
+ v = ('{' +
+ ', '.join('%r: %r' % i for i in sorted(v.iteritems())) +
+ '}')
+ env['HG_' + k.upper()] = v
+
+ if repo:
+ cwd = repo.root
+ else:
+ cwd = os.getcwd()
+ if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'):
+ r = util.system(cmd, environ=env, cwd=cwd, out=ui)
+ else:
+ r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout)
+ if r:
+ desc, r = util.explainexit(r)
+ if throw:
+ raise util.Abort(_('%s hook %s') % (name, desc))
+ ui.warn(_('warning: %s hook %s\n') % (name, desc))
+ return r
+
+_redirect = False
+def redirect(state):
+ global _redirect
+ _redirect = state
+
+def hook(ui, repo, name, throw=False, **args):
+ r = False
+
+ oldstdout = -1
+ if _redirect:
+ try:
+ stdoutno = sys.__stdout__.fileno()
+ stderrno = sys.__stderr__.fileno()
+ # temporarily redirect stdout to stderr, if possible
+ if stdoutno >= 0 and stderrno >= 0:
+ sys.__stdout__.flush()
+ oldstdout = os.dup(stdoutno)
+ os.dup2(stderrno, stdoutno)
+ except AttributeError:
+ # __stdout/err__ doesn't have fileno(), it's not a real file
+ pass
+
+ try:
+ for hname, cmd in ui.configitems('hooks'):
+ if hname.split('.')[0] != name or not cmd:
+ continue
+ if util.safehasattr(cmd, '__call__'):
+ r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r
+ elif cmd.startswith('python:'):
+ if cmd.count(':') >= 2:
+ path, cmd = cmd[7:].rsplit(':', 1)
+ path = util.expandpath(path)
+ if repo:
+ path = os.path.join(repo.root, path)
+ mod = extensions.loadpath(path, 'hghook.%s' % hname)
+ hookfn = getattr(mod, cmd)
+ else:
+ hookfn = cmd[7:].strip()
+ r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r
+ else:
+ r = _exthook(ui, repo, hname, cmd, args, throw) or r
+ finally:
+ if _redirect and oldstdout >= 0:
+ os.dup2(oldstdout, stdoutno)
+ os.close(oldstdout)
+
+ return r
diff --git a/websdk/mercurial/httpclient/__init__.py b/websdk/mercurial/httpclient/__init__.py
new file mode 100644
index 0000000..3200edd
--- /dev/null
+++ b/websdk/mercurial/httpclient/__init__.py
@@ -0,0 +1,711 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Improved HTTP/1.1 client library
+
+This library contains an HTTPConnection which is similar to the one in
+httplib, but has several additional features:
+
+ * supports keepalives natively
+ * uses select() to block for incoming data
+ * notices when the server responds early to a request
+ * implements ssl inline instead of in a different class
+"""
+
+import cStringIO
+import errno
+import httplib
+import logging
+import rfc822
+import select
+import socket
+
+import socketutil
+
+logger = logging.getLogger(__name__)
+
+__all__ = ['HTTPConnection', 'HTTPResponse']
+
+HTTP_VER_1_0 = 'HTTP/1.0'
+HTTP_VER_1_1 = 'HTTP/1.1'
+
+_LEN_CLOSE_IS_END = -1
+
+OUTGOING_BUFFER_SIZE = 1 << 15
+INCOMING_BUFFER_SIZE = 1 << 20
+
+HDR_ACCEPT_ENCODING = 'accept-encoding'
+HDR_CONNECTION_CTRL = 'connection'
+HDR_CONTENT_LENGTH = 'content-length'
+HDR_XFER_ENCODING = 'transfer-encoding'
+
+XFER_ENCODING_CHUNKED = 'chunked'
+
+CONNECTION_CLOSE = 'close'
+
+EOL = '\r\n'
+_END_HEADERS = EOL * 2
+
+# Based on some searching around, 1 second seems like a reasonable
+# default here.
+TIMEOUT_ASSUME_CONTINUE = 1
+TIMEOUT_DEFAULT = None
+
+
+class HTTPResponse(object):
+ """Response from an HTTP server.
+
+ The response will continue to load as available. If you need the
+ complete response before continuing, check the .complete() method.
+ """
+ def __init__(self, sock, timeout):
+ self.sock = sock
+ self.raw_response = ''
+ self._body = None
+ self._headers_len = 0
+ self._content_len = 0
+ self.headers = None
+ self.will_close = False
+ self.status_line = ''
+ self.status = None
+ self.http_version = None
+ self.reason = None
+ self._chunked = False
+ self._chunked_done = False
+ self._chunked_until_next = 0
+ self._chunked_skip_bytes = 0
+ self._chunked_preloaded_block = None
+
+ self._read_location = 0
+ self._eol = EOL
+
+ self._timeout = timeout
+
+ @property
+ def _end_headers(self):
+ return self._eol * 2
+
+ def complete(self):
+ """Returns true if this response is completely loaded.
+
+ Note that if this is a connection where complete means the
+ socket is closed, this will nearly always return False, even
+ in cases where all the data has actually been loaded.
+ """
+ if self._chunked:
+ return self._chunked_done
+ if self._content_len == _LEN_CLOSE_IS_END:
+ return False
+ return self._body is not None and len(self._body) >= self._content_len
+
+ def readline(self):
+ """Read a single line from the response body.
+
+ This may block until either a line ending is found or the
+ response is complete.
+ """
+ eol = self._body.find('\n', self._read_location)
+ while eol == -1 and not self.complete():
+ self._select()
+ eol = self._body.find('\n', self._read_location)
+ if eol != -1:
+ eol += 1
+ else:
+ eol = len(self._body)
+ data = self._body[self._read_location:eol]
+ self._read_location = eol
+ return data
+
+ def read(self, length=None):
+ # if length is None, unbounded read
+ while (not self.complete() # never select on a finished read
+ and (not length # unbounded, so we wait for complete()
+ or (self._read_location + length) > len(self._body))):
+ self._select()
+ if not length:
+ length = len(self._body) - self._read_location
+ elif len(self._body) < (self._read_location + length):
+ length = len(self._body) - self._read_location
+ r = self._body[self._read_location:self._read_location + length]
+ self._read_location += len(r)
+ if self.complete() and self.will_close:
+ self.sock.close()
+ return r
+
+ def _select(self):
+ r, _, _ = select.select([self.sock], [], [], self._timeout)
+ if not r:
+ # socket was not readable. If the response is not complete
+ # and we're not a _LEN_CLOSE_IS_END response, raise a timeout.
+ # If we are a _LEN_CLOSE_IS_END response and we have no data,
+ # raise a timeout.
+ if not (self.complete() or
+ (self._content_len == _LEN_CLOSE_IS_END and self._body)):
+ logger.info('timed out with timeout of %s', self._timeout)
+ raise HTTPTimeoutException('timeout reading data')
+ logger.info('cl: %r body: %r', self._content_len, self._body)
+ try:
+ data = self.sock.recv(INCOMING_BUFFER_SIZE)
+ # If the socket was readable and no data was read, that
+ # means the socket was closed. If this isn't a
+ # _CLOSE_IS_END socket, then something is wrong if we're
+ # here (we shouldn't enter _select() if the response is
+ # complete), so abort.
+ if not data and self._content_len != _LEN_CLOSE_IS_END:
+ raise HTTPRemoteClosedError(
+ 'server appears to have closed the socket mid-response')
+ except socket.sslerror, e:
+ if e.args[0] != socket.SSL_ERROR_WANT_READ:
+ raise
+ logger.debug('SSL_WANT_READ in _select, should retry later')
+ return True
+ logger.debug('response read %d data during _select', len(data))
+ if not data:
+ if self.headers and self._content_len == _LEN_CLOSE_IS_END:
+ self._content_len = len(self._body)
+ return False
+ else:
+ self._load_response(data)
+ return True
+
+ def _chunked_parsedata(self, data):
+ if self._chunked_preloaded_block:
+ data = self._chunked_preloaded_block + data
+ self._chunked_preloaded_block = None
+ while data:
+ logger.debug('looping with %d data remaining', len(data))
+ # Slice out anything we should skip
+ if self._chunked_skip_bytes:
+ if len(data) <= self._chunked_skip_bytes:
+ self._chunked_skip_bytes -= len(data)
+ data = ''
+ break
+ else:
+ data = data[self._chunked_skip_bytes:]
+ self._chunked_skip_bytes = 0
+
+ # determine how much is until the next chunk
+ if self._chunked_until_next:
+ amt = self._chunked_until_next
+ logger.debug('reading remaining %d of existing chunk', amt)
+ self._chunked_until_next = 0
+ body = data
+ else:
+ try:
+ amt, body = data.split(self._eol, 1)
+ except ValueError:
+ self._chunked_preloaded_block = data
+ logger.debug('saving %r as a preloaded block for chunked',
+ self._chunked_preloaded_block)
+ return
+ amt = int(amt, base=16)
+ logger.debug('reading chunk of length %d', amt)
+ if amt == 0:
+ self._chunked_done = True
+
+ # read through end of what we have or the chunk
+ self._body += body[:amt]
+ if len(body) >= amt:
+ data = body[amt:]
+ self._chunked_skip_bytes = len(self._eol)
+ else:
+ self._chunked_until_next = amt - len(body)
+ self._chunked_skip_bytes = 0
+ data = ''
+
+ def _load_response(self, data):
+ if self._chunked:
+ self._chunked_parsedata(data)
+ return
+ elif self._body is not None:
+ self._body += data
+ return
+
+ # We haven't seen end of headers yet
+ self.raw_response += data
+ # This is a bogus server with bad line endings
+ if self._eol not in self.raw_response:
+ for bad_eol in ('\n', '\r'):
+ if (bad_eol in self.raw_response
+ # verify that bad_eol is not the end of the incoming data
+ # as this could be a response line that just got
+ # split between \r and \n.
+ and (self.raw_response.index(bad_eol) <
+ (len(self.raw_response) - 1))):
+ logger.info('bogus line endings detected, '
+ 'using %r for EOL', bad_eol)
+ self._eol = bad_eol
+ break
+ # exit early if not at end of headers
+ if self._end_headers not in self.raw_response or self.headers:
+ return
+
+ # handle 100-continue response
+ hdrs, body = self.raw_response.split(self._end_headers, 1)
+ http_ver, status = hdrs.split(' ', 1)
+ if status.startswith('100'):
+ self.raw_response = body
+ logger.debug('continue seen, setting body to %r', body)
+ return
+
+ # arriving here means we should parse response headers
+ # as all headers have arrived completely
+ hdrs, body = self.raw_response.split(self._end_headers, 1)
+ del self.raw_response
+ if self._eol in hdrs:
+ self.status_line, hdrs = hdrs.split(self._eol, 1)
+ else:
+ self.status_line = hdrs
+ hdrs = ''
+ # TODO HTTP < 1.0 support
+ (self.http_version, self.status,
+ self.reason) = self.status_line.split(' ', 2)
+ self.status = int(self.status)
+ if self._eol != EOL:
+ hdrs = hdrs.replace(self._eol, '\r\n')
+ headers = rfc822.Message(cStringIO.StringIO(hdrs))
+ if HDR_CONTENT_LENGTH in headers:
+ self._content_len = int(headers[HDR_CONTENT_LENGTH])
+ if self.http_version == HTTP_VER_1_0:
+ self.will_close = True
+ elif HDR_CONNECTION_CTRL in headers:
+ self.will_close = (
+ headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE)
+ if self._content_len == 0:
+ self._content_len = _LEN_CLOSE_IS_END
+ if (HDR_XFER_ENCODING in headers
+ and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED):
+ self._body = ''
+ self._chunked_parsedata(body)
+ self._chunked = True
+ if self._body is None:
+ self._body = body
+ self.headers = headers
+
+
+class HTTPConnection(object):
+ """Connection to a single http server.
+
+ Supports 100-continue and keepalives natively. Uses select() for
+ non-blocking socket operations.
+ """
+ http_version = HTTP_VER_1_1
+ response_class = HTTPResponse
+
+ def __init__(self, host, port=None, use_ssl=None, ssl_validator=None,
+ timeout=TIMEOUT_DEFAULT,
+ continue_timeout=TIMEOUT_ASSUME_CONTINUE,
+ proxy_hostport=None, **ssl_opts):
+ """Create a new HTTPConnection.
+
+ Args:
+ host: The host to which we'll connect.
+ port: Optional. The port over which we'll connect. Default 80 for
+ non-ssl, 443 for ssl.
+ use_ssl: Optional. Wether to use ssl. Defaults to False if port is
+ not 443, true if port is 443.
+ ssl_validator: a function(socket) to validate the ssl cert
+ timeout: Optional. Connection timeout, default is TIMEOUT_DEFAULT.
+ continue_timeout: Optional. Timeout for waiting on an expected
+ "100 Continue" response. Default is TIMEOUT_ASSUME_CONTINUE.
+ proxy_hostport: Optional. Tuple of (host, port) to use as an http
+ proxy for the connection. Default is to not use a proxy.
+ """
+ if port is None and host.count(':') == 1 or ']:' in host:
+ host, port = host.rsplit(':', 1)
+ port = int(port)
+ if '[' in host:
+ host = host[1:-1]
+ if use_ssl is None and port is None:
+ use_ssl = False
+ port = 80
+ elif use_ssl is None:
+ use_ssl = (port == 443)
+ elif port is None:
+ port = (use_ssl and 443 or 80)
+ self.port = port
+ if use_ssl and not socketutil.have_ssl:
+ raise Exception('ssl requested but unavailable on this Python')
+ self.ssl = use_ssl
+ self.ssl_opts = ssl_opts
+ self._ssl_validator = ssl_validator
+ self.host = host
+ self.sock = None
+ self._current_response = None
+ self._current_response_taken = False
+ if proxy_hostport is None:
+ self._proxy_host = self._proxy_port = None
+ else:
+ self._proxy_host, self._proxy_port = proxy_hostport
+
+ self.timeout = timeout
+ self.continue_timeout = continue_timeout
+
+ def _connect(self):
+ """Connect to the host and port specified in __init__."""
+ if self.sock:
+ return
+ if self._proxy_host is not None:
+ logger.info('Connecting to http proxy %s:%s',
+ self._proxy_host, self._proxy_port)
+ sock = socketutil.create_connection((self._proxy_host,
+ self._proxy_port))
+ if self.ssl:
+ # TODO proxy header support
+ data = self.buildheaders('CONNECT', '%s:%d' % (self.host,
+ self.port),
+ {}, HTTP_VER_1_0)
+ sock.send(data)
+ sock.setblocking(0)
+ r = self.response_class(sock, self.timeout)
+ timeout_exc = HTTPTimeoutException(
+ 'Timed out waiting for CONNECT response from proxy')
+ while not r.complete():
+ try:
+ if not r._select():
+ raise timeout_exc
+ except HTTPTimeoutException:
+ # This raise/except pattern looks goofy, but
+ # _select can raise the timeout as well as the
+ # loop body. I wish it wasn't this convoluted,
+ # but I don't have a better solution
+ # immediately handy.
+ raise timeout_exc
+ if r.status != 200:
+ raise HTTPProxyConnectFailedException(
+ 'Proxy connection failed: %d %s' % (r.status,
+ r.read()))
+ logger.info('CONNECT (for SSL) to %s:%s via proxy succeeded.',
+ self.host, self.port)
+ else:
+ sock = socketutil.create_connection((self.host, self.port))
+ if self.ssl:
+ logger.debug('wrapping socket for ssl with options %r',
+ self.ssl_opts)
+ sock = socketutil.wrap_socket(sock, **self.ssl_opts)
+ if self._ssl_validator:
+ self._ssl_validator(sock)
+ sock.setblocking(0)
+ self.sock = sock
+
+ def buildheaders(self, method, path, headers, http_ver):
+ if self.ssl and self.port == 443 or self.port == 80:
+ # default port for protocol, so leave it out
+ hdrhost = self.host
+ else:
+ # include nonstandard port in header
+ if ':' in self.host: # must be IPv6
+ hdrhost = '[%s]:%d' % (self.host, self.port)
+ else:
+ hdrhost = '%s:%d' % (self.host, self.port)
+ if self._proxy_host and not self.ssl:
+ # When talking to a regular http proxy we must send the
+ # full URI, but in all other cases we must not (although
+ # technically RFC 2616 says servers must accept our
+ # request if we screw up, experimentally few do that
+ # correctly.)
+ assert path[0] == '/', 'path must start with a /'
+ path = 'http://%s%s' % (hdrhost, path)
+ outgoing = ['%s %s %s%s' % (method, path, http_ver, EOL)]
+ headers['host'] = ('Host', hdrhost)
+ headers[HDR_ACCEPT_ENCODING] = (HDR_ACCEPT_ENCODING, 'identity')
+ for hdr, val in headers.itervalues():
+ outgoing.append('%s: %s%s' % (hdr, val, EOL))
+ outgoing.append(EOL)
+ return ''.join(outgoing)
+
+ def close(self):
+ """Close the connection to the server.
+
+ This is a no-op if the connection is already closed. The
+ connection may automatically close if requessted by the server
+ or required by the nature of a response.
+ """
+ if self.sock is None:
+ return
+ self.sock.close()
+ self.sock = None
+ logger.info('closed connection to %s on %s', self.host, self.port)
+
+ def busy(self):
+ """Returns True if this connection object is currently in use.
+
+ If a response is still pending, this will return True, even if
+ the request has finished sending. In the future,
+ HTTPConnection may transparently juggle multiple connections
+ to the server, in which case this will be useful to detect if
+ any of those connections is ready for use.
+ """
+ cr = self._current_response
+ if cr is not None:
+ if self._current_response_taken:
+ if cr.will_close:
+ self.sock = None
+ self._current_response = None
+ return False
+ elif cr.complete():
+ self._current_response = None
+ return False
+ return True
+ return False
+
+ def request(self, method, path, body=None, headers={},
+ expect_continue=False):
+ """Send a request to the server.
+
+ For increased flexibility, this does not return the response
+ object. Future versions of HTTPConnection that juggle multiple
+ sockets will be able to send (for example) 5 requests all at
+ once, and then let the requests arrive as data is
+ available. Use the `getresponse()` method to retrieve the
+ response.
+ """
+ if self.busy():
+ raise httplib.CannotSendRequest(
+ 'Can not send another request before '
+ 'current response is read!')
+ self._current_response_taken = False
+
+ logger.info('sending %s request for %s to %s on port %s',
+ method, path, self.host, self.port)
+ hdrs = dict((k.lower(), (k, v)) for k, v in headers.iteritems())
+ if hdrs.get('expect', ('', ''))[1].lower() == '100-continue':
+ expect_continue = True
+ elif expect_continue:
+ hdrs['expect'] = ('Expect', '100-Continue')
+
+ chunked = False
+ if body and HDR_CONTENT_LENGTH not in hdrs:
+ if getattr(body, '__len__', False):
+ hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH, len(body))
+ elif getattr(body, 'read', False):
+ hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING,
+ XFER_ENCODING_CHUNKED)
+ chunked = True
+ else:
+ raise BadRequestData('body has no __len__() nor read()')
+
+ self._connect()
+ outgoing_headers = self.buildheaders(
+ method, path, hdrs, self.http_version)
+ response = None
+ first = True
+
+ def reconnect(where):
+ logger.info('reconnecting during %s', where)
+ self.close()
+ self._connect()
+
+ while ((outgoing_headers or body)
+ and not (response and response.complete())):
+ select_timeout = self.timeout
+ out = outgoing_headers or body
+ blocking_on_continue = False
+ if expect_continue and not outgoing_headers and not (
+ response and response.headers):
+ logger.info(
+ 'waiting up to %s seconds for'
+ ' continue response from server',
+ self.continue_timeout)
+ select_timeout = self.continue_timeout
+ blocking_on_continue = True
+ out = False
+ if out:
+ w = [self.sock]
+ else:
+ w = []
+ r, w, x = select.select([self.sock], w, [], select_timeout)
+ # if we were expecting a 100 continue and it's been long
+ # enough, just go ahead and assume it's ok. This is the
+ # recommended behavior from the RFC.
+ if r == w == x == []:
+ if blocking_on_continue:
+ expect_continue = False
+ logger.info('no response to continue expectation from '
+ 'server, optimistically sending request body')
+ else:
+ raise HTTPTimeoutException('timeout sending data')
+ # TODO exceptional conditions with select? (what are those be?)
+ # TODO if the response is loading, must we finish sending at all?
+ #
+ # Certainly not if it's going to close the connection and/or
+ # the response is already done...I think.
+ was_first = first
+
+ # incoming data
+ if r:
+ try:
+ try:
+ data = r[0].recv(INCOMING_BUFFER_SIZE)
+ except socket.sslerror, e:
+ if e.args[0] != socket.SSL_ERROR_WANT_READ:
+ raise
+ logger.debug(
+ 'SSL_WANT_READ while sending data, retrying...')
+ continue
+ if not data:
+ logger.info('socket appears closed in read')
+ self.sock = None
+ self._current_response = None
+ # This if/elif ladder is a bit subtle,
+ # comments in each branch should help.
+ if response is not None and (
+ response.complete() or
+ response._content_len == _LEN_CLOSE_IS_END):
+ # Server responded completely and then
+ # closed the socket. We should just shut
+ # things down and let the caller get their
+ # response.
+ logger.info('Got an early response, '
+ 'aborting remaining request.')
+ break
+ elif was_first and response is None:
+ # Most likely a keepalive that got killed
+ # on the server's end. Commonly happens
+ # after getting a really large response
+ # from the server.
+ logger.info(
+ 'Connection appeared closed in read on first'
+ ' request loop iteration, will retry.')
+ reconnect('read')
+ continue
+ else:
+ # We didn't just send the first data hunk,
+ # and either have a partial response or no
+ # response at all. There's really nothing
+ # meaningful we can do here.
+ raise HTTPStateError(
+ 'Connection appears closed after '
+ 'some request data was written, but the '
+ 'response was missing or incomplete!')
+ logger.debug('read %d bytes in request()', len(data))
+ if response is None:
+ response = self.response_class(r[0], self.timeout)
+ response._load_response(data)
+ # Jump to the next select() call so we load more
+ # data if the server is still sending us content.
+ continue
+ except socket.error, e:
+ if e[0] != errno.EPIPE and not was_first:
+ raise
+ if (response._content_len
+ and response._content_len != _LEN_CLOSE_IS_END):
+ outgoing_headers = sent_data + outgoing_headers
+ reconnect('read')
+
+ # outgoing data
+ if w and out:
+ try:
+ if getattr(out, 'read', False):
+ data = out.read(OUTGOING_BUFFER_SIZE)
+ if not data:
+ continue
+ if len(data) < OUTGOING_BUFFER_SIZE:
+ if chunked:
+ body = '0' + EOL + EOL
+ else:
+ body = None
+ if chunked:
+ out = hex(len(data))[2:] + EOL + data + EOL
+ else:
+ out = data
+ amt = w[0].send(out)
+ except socket.error, e:
+ if e[0] == socket.SSL_ERROR_WANT_WRITE and self.ssl:
+ # This means that SSL hasn't flushed its buffer into
+ # the socket yet.
+ # TODO: find a way to block on ssl flushing its buffer
+ # similar to selecting on a raw socket.
+ continue
+ elif (e[0] not in (errno.ECONNRESET, errno.EPIPE)
+ and not first):
+ raise
+ reconnect('write')
+ amt = self.sock.send(out)
+ logger.debug('sent %d', amt)
+ first = False
+ # stash data we think we sent in case the socket breaks
+ # when we read from it
+ if was_first:
+ sent_data = out[:amt]
+ if out is body:
+ body = out[amt:]
+ else:
+ outgoing_headers = out[amt:]
+
+ # close if the server response said to or responded before eating
+ # the whole request
+ if response is None:
+ response = self.response_class(self.sock, self.timeout)
+ complete = response.complete()
+ data_left = bool(outgoing_headers or body)
+ if data_left:
+ logger.info('stopped sending request early, '
+ 'will close the socket to be safe.')
+ response.will_close = True
+ if response.will_close:
+ # The socket will be closed by the response, so we disown
+ # the socket
+ self.sock = None
+ self._current_response = response
+
+ def getresponse(self):
+ if self._current_response is None:
+ raise httplib.ResponseNotReady()
+ r = self._current_response
+ while r.headers is None:
+ r._select()
+ if r.will_close:
+ self.sock = None
+ self._current_response = None
+ elif r.complete():
+ self._current_response = None
+ else:
+ self._current_response_taken = True
+ return r
+
+
+class HTTPTimeoutException(httplib.HTTPException):
+ """A timeout occurred while waiting on the server."""
+
+
+class BadRequestData(httplib.HTTPException):
+ """Request body object has neither __len__ nor read."""
+
+
+class HTTPProxyConnectFailedException(httplib.HTTPException):
+ """Connecting to the HTTP proxy failed."""
+
+
+class HTTPStateError(httplib.HTTPException):
+ """Invalid internal state encountered."""
+
+
+class HTTPRemoteClosedError(httplib.HTTPException):
+ """The server closed the remote socket in the middle of a response."""
+# no-check-code
diff --git a/websdk/mercurial/httpclient/socketutil.py b/websdk/mercurial/httpclient/socketutil.py
new file mode 100644
index 0000000..6f06a3c
--- /dev/null
+++ b/websdk/mercurial/httpclient/socketutil.py
@@ -0,0 +1,127 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Abstraction to simplify socket use for Python < 2.6
+
+This will attempt to use the ssl module and the new
+socket.create_connection method, but fall back to the old
+methods if those are unavailable.
+"""
+import logging
+import socket
+
+logger = logging.getLogger(__name__)
+
+try:
+ import ssl
+ ssl.wrap_socket # make demandimporters load the module
+ have_ssl = True
+except ImportError:
+ import httplib
+ import urllib2
+ have_ssl = getattr(urllib2, 'HTTPSHandler', False)
+ ssl = False
+
+
+try:
+ create_connection = socket.create_connection
+except AttributeError:
+ def create_connection(address):
+ host, port = address
+ msg = "getaddrinfo returns an empty list"
+ sock = None
+ for res in socket.getaddrinfo(host, port, 0,
+ socket.SOCK_STREAM):
+ af, socktype, proto, _canonname, sa = res
+ try:
+ sock = socket.socket(af, socktype, proto)
+ logger.info("connect: (%s, %s)", host, port)
+ sock.connect(sa)
+ except socket.error, msg:
+ logger.info('connect fail: %s %s', host, port)
+ if sock:
+ sock.close()
+ sock = None
+ continue
+ break
+ if not sock:
+ raise socket.error, msg
+ return sock
+
+if ssl:
+ wrap_socket = ssl.wrap_socket
+ CERT_NONE = ssl.CERT_NONE
+ CERT_OPTIONAL = ssl.CERT_OPTIONAL
+ CERT_REQUIRED = ssl.CERT_REQUIRED
+else:
+ class FakeSocket(httplib.FakeSocket):
+ """Socket wrapper that supports SSL.
+ """
+ # backport the behavior from Python 2.6, which is to busy wait
+ # on the socket instead of anything nice. Sigh.
+ # See http://bugs.python.org/issue3890 for more info.
+ def recv(self, buflen=1024, flags=0):
+ """ssl-aware wrapper around socket.recv
+ """
+ if flags != 0:
+ raise ValueError(
+ "non-zero flags not allowed in calls to recv() on %s" %
+ self.__class__)
+ while True:
+ try:
+ return self._ssl.read(buflen)
+ except socket.sslerror, x:
+ if x.args[0] == socket.SSL_ERROR_WANT_READ:
+ continue
+ else:
+ raise x
+
+ _PROTOCOL_SSLv23 = 2
+
+ CERT_NONE = 0
+ CERT_OPTIONAL = 1
+ CERT_REQUIRED = 2
+
+ def wrap_socket(sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=_PROTOCOL_SSLv23, ca_certs=None,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True):
+ if cert_reqs != CERT_NONE and ca_certs:
+ raise CertificateValidationUnsupported(
+ 'SSL certificate validation requires the ssl module'
+ '(included in Python 2.6 and later.)')
+ sslob = socket.ssl(sock)
+ # borrow httplib's workaround for no ssl.wrap_socket
+ sock = FakeSocket(sock, sslob)
+ return sock
+
+
+class CertificateValidationUnsupported(Exception):
+ """Exception raised when cert validation is requested but unavailable."""
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/__init__.py b/websdk/mercurial/httpclient/tests/__init__.py
new file mode 100644
index 0000000..84b3a07
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/__init__.py
@@ -0,0 +1 @@
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/simple_http_test.py b/websdk/mercurial/httpclient/tests/simple_http_test.py
new file mode 100644
index 0000000..f933d50
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/simple_http_test.py
@@ -0,0 +1,401 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import socket
+import unittest
+
+import http
+
+# relative import to ease embedding the library
+import util
+
+
+class SimpleHttpTest(util.HttpTestBase, unittest.TestCase):
+
+ def _run_simple_test(self, host, server_data, expected_req, expected_data):
+ con = http.HTTPConnection(host)
+ con._connect()
+ con.sock.data = server_data
+ con.request('GET', '/')
+
+ self.assertStringEqual(expected_req, con.sock.sent)
+ self.assertEqual(expected_data, con.getresponse().read())
+
+ def test_broken_data_obj(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ self.assertRaises(http.BadRequestData,
+ con.request, 'POST', '/', body=1)
+
+ def test_no_keepalive_http_1_0(self):
+ expected_request_one = """GET /remote/.hg/requires HTTP/1.1
+Host: localhost:9999
+range: bytes=0-
+accept-encoding: identity
+accept: application/mercurial-0.1
+user-agent: mercurial/proto-1.0
+
+""".replace('\n', '\r\n')
+ expected_response_headers = """HTTP/1.0 200 OK
+Server: SimpleHTTP/0.6 Python/2.6.1
+Date: Sun, 01 May 2011 13:56:57 GMT
+Content-type: application/octet-stream
+Content-Length: 33
+Last-Modified: Sun, 01 May 2011 13:56:56 GMT
+
+""".replace('\n', '\r\n')
+ expected_response_body = """revlogv1
+store
+fncache
+dotencode
+"""
+ con = http.HTTPConnection('localhost:9999')
+ con._connect()
+ con.sock.data = [expected_response_headers, expected_response_body]
+ con.request('GET', '/remote/.hg/requires',
+ headers={'accept-encoding': 'identity',
+ 'range': 'bytes=0-',
+ 'accept': 'application/mercurial-0.1',
+ 'user-agent': 'mercurial/proto-1.0',
+ })
+ self.assertStringEqual(expected_request_one, con.sock.sent)
+ self.assertEqual(con.sock.closed, False)
+ self.assertNotEqual(con.sock.data, [])
+ self.assert_(con.busy())
+ resp = con.getresponse()
+ self.assertStringEqual(resp.read(), expected_response_body)
+ self.failIf(con.busy())
+ self.assertEqual(con.sock, None)
+ self.assertEqual(resp.sock.data, [])
+ self.assert_(resp.sock.closed)
+
+ def test_multiline_header(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Multiline: Value\r\n',
+ ' Rest of value\r\n',
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['Value\n Rest of value'],
+ resp.headers.getheaders('multiline'))
+ # Socket should not be closed
+ self.assertEqual(resp.sock.closed, False)
+ self.assertEqual(con.sock.closed, False)
+
+ def testSimpleRequest(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'MultiHeader: Value\r\n'
+ 'MultiHeader: Other Value\r\n'
+ 'MultiHeader: One More!\r\n'
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['Value', 'Other Value', 'One More!'],
+ resp.headers.getheaders('multiheader'))
+ self.assertEqual(['BogusServer 1.0'],
+ resp.headers.getheaders('server'))
+
+ def testHeaderlessResponse(self):
+ con = http.HTTPConnection('1.2.3.4', use_ssl=False)
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual({}, dict(resp.headers))
+ self.assertEqual(resp.status, 200)
+
+ def testReadline(self):
+ con = http.HTTPConnection('1.2.3.4')
+ con._connect()
+ # make sure it trickles in one byte at a time
+ # so that we touch all the cases in readline
+ con.sock.data = list(''.join(
+ ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Connection: Close\r\n',
+ '\r\n'
+ '1\n2\nabcdefg\n4\n5']))
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ con.request('GET', '/')
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ r = con.getresponse()
+ for expected in ['1\n', '2\n', 'abcdefg\n', '4\n', '5']:
+ actual = r.readline()
+ self.assertEqual(expected, actual,
+ 'Expected %r, got %r' % (expected, actual))
+
+ def testIPv6(self):
+ self._run_simple_test('[::1]:8221',
+ ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 10',
+ '\r\n\r\n'
+ '1234567890'],
+ ('GET / HTTP/1.1\r\n'
+ 'Host: [::1]:8221\r\n'
+ 'accept-encoding: identity\r\n\r\n'),
+ '1234567890')
+ self._run_simple_test('::2',
+ ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 10',
+ '\r\n\r\n'
+ '1234567890'],
+ ('GET / HTTP/1.1\r\n'
+ 'Host: ::2\r\n'
+ 'accept-encoding: identity\r\n\r\n'),
+ '1234567890')
+ self._run_simple_test('[::3]:443',
+ ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 10',
+ '\r\n\r\n'
+ '1234567890'],
+ ('GET / HTTP/1.1\r\n'
+ 'Host: ::3\r\n'
+ 'accept-encoding: identity\r\n\r\n'),
+ '1234567890')
+
+ def testEarlyContinueResponse(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 403 Forbidden\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 18',
+ '\r\n\r\n'
+ "You can't do that."]
+ expected_req = self.doPost(con, expect_body=False)
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assertStringEqual(expected_req, sock.sent)
+ self.assertEqual("You can't do that.", con.getresponse().read())
+ self.assertEqual(sock.closed, True)
+
+ def testDeniedAfterContinueTimeoutExpires(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 403 Forbidden\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 18\r\n',
+ 'Connection: close',
+ '\r\n\r\n'
+ "You can't do that."]
+ sock.read_wait_sentinel = 'Dear server, send response!'
+ sock.close_on_empty = True
+ # send enough data out that we'll chunk it into multiple
+ # blocks and the socket will close before we can send the
+ # whole request.
+ post_body = ('This is some POST data\n' * 1024 * 32 +
+ 'Dear server, send response!\n' +
+ 'This is some POST data\n' * 1024 * 32)
+ expected_req = self.doPost(con, expect_body=False,
+ body_to_send=post_body)
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assert_('POST data\n' in sock.sent)
+ self.assert_('Dear server, send response!\n' in sock.sent)
+ # We expect not all of our data was sent.
+ self.assertNotEqual(sock.sent, expected_req)
+ self.assertEqual("You can't do that.", con.getresponse().read())
+ self.assertEqual(sock.closed, True)
+
+ def testPostData(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.read_wait_sentinel = 'POST data'
+ sock.early_data = ['HTTP/1.1 100 Co', 'ntinue\r\n\r\n']
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 16',
+ '\r\n\r\n',
+ "You can do that."]
+ expected_req = self.doPost(con, expect_body=True)
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assertEqual(expected_req, sock.sent)
+ self.assertEqual("You can do that.", con.getresponse().read())
+ self.assertEqual(sock.closed, False)
+
+ def testServerWithoutContinue(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.read_wait_sentinel = 'POST data'
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 16',
+ '\r\n\r\n',
+ "You can do that."]
+ expected_req = self.doPost(con, expect_body=True)
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assertEqual(expected_req, sock.sent)
+ self.assertEqual("You can do that.", con.getresponse().read())
+ self.assertEqual(sock.closed, False)
+
+ def testServerWithSlowContinue(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.read_wait_sentinel = 'POST data'
+ sock.data = ['HTTP/1.1 100 ', 'Continue\r\n\r\n',
+ 'HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 16',
+ '\r\n\r\n',
+ "You can do that."]
+ expected_req = self.doPost(con, expect_body=True)
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assertEqual(expected_req, sock.sent)
+ resp = con.getresponse()
+ self.assertEqual("You can do that.", resp.read())
+ self.assertEqual(200, resp.status)
+ self.assertEqual(sock.closed, False)
+
+ def testSlowConnection(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ # simulate one byte arriving at a time, to check for various
+ # corner cases
+ con.sock.data = list('HTTP/1.1 200 OK\r\n'
+ 'Server: BogusServer 1.0\r\n'
+ 'Content-Length: 10'
+ '\r\n\r\n'
+ '1234567890')
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ self.assertEqual('1234567890', con.getresponse().read())
+
+ def testTimeout(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ con.sock.data = []
+ con.request('GET', '/')
+ self.assertRaises(http.HTTPTimeoutException,
+ con.getresponse)
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+
+ def test_conn_keep_alive_but_server_close_anyway(self):
+ sockets = []
+ def closingsocket(*args, **kwargs):
+ s = util.MockSocket(*args, **kwargs)
+ sockets.append(s)
+ s.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Connection: Keep-Alive\r\n',
+ 'Content-Length: 16',
+ '\r\n\r\n',
+ 'You can do that.']
+ s.close_on_empty = True
+ return s
+
+ socket.socket = closingsocket
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ con.request('GET', '/')
+ r1 = con.getresponse()
+ r1.read()
+ self.assertFalse(con.sock.closed)
+ self.assert_(con.sock.remote_closed)
+ con.request('GET', '/')
+ self.assertEqual(2, len(sockets))
+
+ def test_server_closes_before_end_of_body(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ s = con.sock
+ s.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Connection: Keep-Alive\r\n',
+ 'Content-Length: 16',
+ '\r\n\r\n',
+ 'You can '] # Note: this is shorter than content-length
+ s.close_on_empty = True
+ con.request('GET', '/')
+ r1 = con.getresponse()
+ self.assertRaises(http.HTTPRemoteClosedError, r1.read)
+
+ def test_no_response_raises_response_not_ready(self):
+ con = http.HTTPConnection('foo')
+ self.assertRaises(http.httplib.ResponseNotReady, con.getresponse)
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/test_bogus_responses.py b/websdk/mercurial/httpclient/tests/test_bogus_responses.py
new file mode 100644
index 0000000..486e770
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/test_bogus_responses.py
@@ -0,0 +1,68 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""Tests against malformed responses.
+
+Server implementations that respond with only LF instead of CRLF have
+been observed. Checking against ones that use only CR is a hedge
+against that potential insanit.y
+"""
+import unittest
+
+import http
+
+# relative import to ease embedding the library
+import util
+
+
+class SimpleHttpTest(util.HttpTestBase, unittest.TestCase):
+
+ def bogusEOL(self, eol):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK%s' % eol,
+ 'Server: BogusServer 1.0%s' % eol,
+ 'Content-Length: 10',
+ eol * 2,
+ '1234567890']
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 80), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ self.assertEqual('1234567890', con.getresponse().read())
+
+ def testOnlyLinefeed(self):
+ self.bogusEOL('\n')
+
+ def testOnlyCarriageReturn(self):
+ self.bogusEOL('\r')
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/test_chunked_transfer.py b/websdk/mercurial/httpclient/tests/test_chunked_transfer.py
new file mode 100644
index 0000000..88244b5
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/test_chunked_transfer.py
@@ -0,0 +1,153 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import cStringIO
+import unittest
+
+import http
+
+# relative import to ease embedding the library
+import util
+
+
+def chunkedblock(x, eol='\r\n'):
+ r"""Make a chunked transfer-encoding block.
+
+ >>> chunkedblock('hi')
+ '2\r\nhi\r\n'
+ >>> chunkedblock('hi' * 10)
+ '14\r\nhihihihihihihihihihi\r\n'
+ >>> chunkedblock('hi', eol='\n')
+ '2\nhi\n'
+ """
+ return ''.join((hex(len(x))[2:], eol, x, eol))
+
+
+class ChunkedTransferTest(util.HttpTestBase, unittest.TestCase):
+ def testChunkedUpload(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.read_wait_sentinel = '0\r\n\r\n'
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 6',
+ '\r\n\r\n',
+ "Thanks"]
+
+ zz = 'zz\n'
+ con.request('POST', '/', body=cStringIO.StringIO(
+ (zz * (0x8010 / 3)) + 'end-of-body'))
+ expected_req = ('POST / HTTP/1.1\r\n'
+ 'transfer-encoding: chunked\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+ expected_req += chunkedblock('zz\n' * (0x8000 / 3) + 'zz')
+ expected_req += chunkedblock(
+ '\n' + 'zz\n' * ((0x1b - len('end-of-body')) / 3) + 'end-of-body')
+ expected_req += '0\r\n\r\n'
+ self.assertEqual(('1.2.3.4', 80), sock.sa)
+ self.assertStringEqual(expected_req, sock.sent)
+ self.assertEqual("Thanks", con.getresponse().read())
+ self.assertEqual(sock.closed, False)
+
+ def testChunkedDownload(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'transfer-encoding: chunked',
+ '\r\n\r\n',
+ chunkedblock('hi '),
+ chunkedblock('there'),
+ chunkedblock(''),
+ ]
+ con.request('GET', '/')
+ self.assertStringEqual('hi there', con.getresponse().read())
+
+ def testChunkedDownloadBadEOL(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 200 OK\n',
+ 'Server: BogusServer 1.0\n',
+ 'transfer-encoding: chunked',
+ '\n\n',
+ chunkedblock('hi ', eol='\n'),
+ chunkedblock('there', eol='\n'),
+ chunkedblock('', eol='\n'),
+ ]
+ con.request('GET', '/')
+ self.assertStringEqual('hi there', con.getresponse().read())
+
+ def testChunkedDownloadPartialChunkBadEOL(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 200 OK\n',
+ 'Server: BogusServer 1.0\n',
+ 'transfer-encoding: chunked',
+ '\n\n',
+ chunkedblock('hi ', eol='\n'),
+ ] + list(chunkedblock('there\n' * 5, eol='\n')) + [
+ chunkedblock('', eol='\n')]
+ con.request('GET', '/')
+ self.assertStringEqual('hi there\nthere\nthere\nthere\nthere\n',
+ con.getresponse().read())
+
+ def testChunkedDownloadPartialChunk(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'transfer-encoding: chunked',
+ '\r\n\r\n',
+ chunkedblock('hi '),
+ ] + list(chunkedblock('there\n' * 5)) + [chunkedblock('')]
+ con.request('GET', '/')
+ self.assertStringEqual('hi there\nthere\nthere\nthere\nthere\n',
+ con.getresponse().read())
+
+ def testChunkedDownloadEarlyHangup(self):
+ con = http.HTTPConnection('1.2.3.4:80')
+ con._connect()
+ sock = con.sock
+ broken = chunkedblock('hi'*20)[:-1]
+ sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'transfer-encoding: chunked',
+ '\r\n\r\n',
+ broken,
+ ]
+ sock.close_on_empty = True
+ con.request('GET', '/')
+ resp = con.getresponse()
+ self.assertRaises(http.HTTPRemoteClosedError, resp.read)
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/test_proxy_support.py b/websdk/mercurial/httpclient/tests/test_proxy_support.py
new file mode 100644
index 0000000..1526a9a
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/test_proxy_support.py
@@ -0,0 +1,135 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import unittest
+import socket
+
+import http
+
+# relative import to ease embedding the library
+import util
+
+
+def make_preloaded_socket(data):
+ """Make a socket pre-loaded with data so it can be read during connect.
+
+ Useful for https proxy tests because we have to read from the
+ socket during _connect rather than later on.
+ """
+ def s(*args, **kwargs):
+ sock = util.MockSocket(*args, **kwargs)
+ sock.early_data = data[:]
+ return sock
+ return s
+
+
+class ProxyHttpTest(util.HttpTestBase, unittest.TestCase):
+
+ def _run_simple_test(self, host, server_data, expected_req, expected_data):
+ con = http.HTTPConnection(host)
+ con._connect()
+ con.sock.data = server_data
+ con.request('GET', '/')
+
+ self.assertEqual(expected_req, con.sock.sent)
+ self.assertEqual(expected_data, con.getresponse().read())
+
+ def testSimpleRequest(self):
+ con = http.HTTPConnection('1.2.3.4:80',
+ proxy_hostport=('magicproxy', 4242))
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'MultiHeader: Value\r\n'
+ 'MultiHeader: Other Value\r\n'
+ 'MultiHeader: One More!\r\n'
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ con.request('GET', '/')
+
+ expected_req = ('GET http://1.2.3.4/ HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('127.0.0.42', 4242), con.sock.sa)
+ self.assertStringEqual(expected_req, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['Value', 'Other Value', 'One More!'],
+ resp.headers.getheaders('multiheader'))
+ self.assertEqual(['BogusServer 1.0'],
+ resp.headers.getheaders('server'))
+
+ def testSSLRequest(self):
+ con = http.HTTPConnection('1.2.3.4:443',
+ proxy_hostport=('magicproxy', 4242))
+ socket.socket = make_preloaded_socket(
+ ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'])
+ con._connect()
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ connect_sent = con.sock.sent
+ con.sock.sent = ''
+ con.request('GET', '/')
+
+ expected_connect = ('CONNECT 1.2.3.4:443 HTTP/1.0\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n'
+ '\r\n')
+ expected_request = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('127.0.0.42', 4242), con.sock.sa)
+ self.assertStringEqual(expected_connect, connect_sent)
+ self.assertStringEqual(expected_request, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual(resp.status, 200)
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['BogusServer 1.0'],
+ resp.headers.getheaders('server'))
+
+ def testSSLProxyFailure(self):
+ con = http.HTTPConnection('1.2.3.4:443',
+ proxy_hostport=('magicproxy', 4242))
+ socket.socket = make_preloaded_socket(
+ ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n'])
+ self.assertRaises(http.HTTPProxyConnectFailedException, con._connect)
+ self.assertRaises(http.HTTPProxyConnectFailedException,
+ con.request, 'GET', '/')
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/test_ssl.py b/websdk/mercurial/httpclient/tests/test_ssl.py
new file mode 100644
index 0000000..5799a8f
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/test_ssl.py
@@ -0,0 +1,93 @@
+# Copyright 2011, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import unittest
+
+import http
+
+# relative import to ease embedding the library
+import util
+
+
+
+class HttpSslTest(util.HttpTestBase, unittest.TestCase):
+ def testSslRereadRequired(self):
+ con = http.HTTPConnection('1.2.3.4:443')
+ con._connect()
+ # extend the list instead of assign because of how
+ # MockSSLSocket works.
+ con.sock.data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'MultiHeader: Value\r\n'
+ 'MultiHeader: Other Value\r\n'
+ 'MultiHeader: One More!\r\n'
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+ con.request('GET', '/')
+
+ expected_req = ('GET / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'accept-encoding: identity\r\n\r\n')
+
+ self.assertEqual(('1.2.3.4', 443), con.sock.sa)
+ self.assertEqual(expected_req, con.sock.sent)
+ resp = con.getresponse()
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['Value', 'Other Value', 'One More!'],
+ resp.headers.getheaders('multiheader'))
+ self.assertEqual(['BogusServer 1.0'],
+ resp.headers.getheaders('server'))
+
+ def testSslRereadInEarlyResponse(self):
+ con = http.HTTPConnection('1.2.3.4:443')
+ con._connect()
+ con.sock.early_data = ['HTTP/1.1 200 OK\r\n',
+ 'Server: BogusServer 1.0\r\n',
+ 'MultiHeader: Value\r\n'
+ 'MultiHeader: Other Value\r\n'
+ 'MultiHeader: One More!\r\n'
+ 'Content-Length: 10\r\n',
+ '\r\n'
+ '1234567890'
+ ]
+
+ expected_req = self.doPost(con, False)
+ self.assertEqual(None, con.sock,
+ 'Connection should have disowned socket')
+
+ resp = con.getresponse()
+ self.assertEqual(('1.2.3.4', 443), resp.sock.sa)
+ self.assertEqual(expected_req, resp.sock.sent)
+ self.assertEqual('1234567890', resp.read())
+ self.assertEqual(['Value', 'Other Value', 'One More!'],
+ resp.headers.getheaders('multiheader'))
+ self.assertEqual(['BogusServer 1.0'],
+ resp.headers.getheaders('server'))
+# no-check-code
diff --git a/websdk/mercurial/httpclient/tests/util.py b/websdk/mercurial/httpclient/tests/util.py
new file mode 100644
index 0000000..bbc3d87
--- /dev/null
+++ b/websdk/mercurial/httpclient/tests/util.py
@@ -0,0 +1,195 @@
+# Copyright 2010, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import difflib
+import socket
+
+import http
+
+
+class MockSocket(object):
+ """Mock non-blocking socket object.
+
+ This is ONLY capable of mocking a nonblocking socket.
+
+ Attributes:
+ early_data: data to always send as soon as end of headers is seen
+ data: a list of strings to return on recv(), with the
+ assumption that the socket would block between each
+ string in the list.
+ read_wait_sentinel: data that must be written to the socket before
+ beginning the response.
+ close_on_empty: If true, close the socket when it runs out of data
+ for the client.
+ """
+ def __init__(self, af, socktype, proto):
+ self.af = af
+ self.socktype = socktype
+ self.proto = proto
+
+ self.early_data = []
+ self.data = []
+ self.remote_closed = self.closed = False
+ self.close_on_empty = False
+ self.sent = ''
+ self.read_wait_sentinel = http._END_HEADERS
+
+ def close(self):
+ self.closed = True
+
+ def connect(self, sa):
+ self.sa = sa
+
+ def setblocking(self, timeout):
+ assert timeout == 0
+
+ def recv(self, amt=-1):
+ if self.early_data:
+ datalist = self.early_data
+ elif not self.data:
+ return ''
+ else:
+ datalist = self.data
+ if amt == -1:
+ return datalist.pop(0)
+ data = datalist.pop(0)
+ if len(data) > amt:
+ datalist.insert(0, data[amt:])
+ if not self.data and not self.early_data and self.close_on_empty:
+ self.remote_closed = True
+ return data[:amt]
+
+ @property
+ def ready_for_read(self):
+ return ((self.early_data and http._END_HEADERS in self.sent)
+ or (self.read_wait_sentinel in self.sent and self.data)
+ or self.closed or self.remote_closed)
+
+ def send(self, data):
+ # this is a horrible mock, but nothing needs us to raise the
+ # correct exception yet
+ assert not self.closed, 'attempted to write to a closed socket'
+ assert not self.remote_closed, ('attempted to write to a'
+ ' socket closed by the server')
+ if len(data) > 8192:
+ data = data[:8192]
+ self.sent += data
+ return len(data)
+
+
+def mockselect(r, w, x, timeout=0):
+ """Simple mock for select()
+ """
+ readable = filter(lambda s: s.ready_for_read, r)
+ return readable, w[:], []
+
+
+class MockSSLSocket(object):
+ def __init__(self, sock):
+ self._sock = sock
+ self._fail_recv = True
+
+ def __getattr__(self, key):
+ return getattr(self._sock, key)
+
+ def __setattr__(self, key, value):
+ if key not in ('_sock', '_fail_recv'):
+ return setattr(self._sock, key, value)
+ return object.__setattr__(self, key, value)
+
+ def recv(self, amt=-1):
+ try:
+ if self._fail_recv:
+ raise socket.sslerror(socket.SSL_ERROR_WANT_READ)
+ return self._sock.recv(amt=amt)
+ finally:
+ self._fail_recv = not self._fail_recv
+
+
+def mocksslwrap(sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=http.socketutil.CERT_NONE,
+ ssl_version=None, ca_certs=None,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True):
+ return MockSSLSocket(sock)
+
+
+def mockgetaddrinfo(host, port, unused, streamtype):
+ assert unused == 0
+ assert streamtype == socket.SOCK_STREAM
+ if host.count('.') != 3:
+ host = '127.0.0.42'
+ return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '',
+ (host, port))]
+
+
+class HttpTestBase(object):
+ def setUp(self):
+ self.orig_socket = socket.socket
+ socket.socket = MockSocket
+
+ self.orig_getaddrinfo = socket.getaddrinfo
+ socket.getaddrinfo = mockgetaddrinfo
+
+ self.orig_select = http.select.select
+ http.select.select = mockselect
+
+ self.orig_sslwrap = http.socketutil.wrap_socket
+ http.socketutil.wrap_socket = mocksslwrap
+
+ def tearDown(self):
+ socket.socket = self.orig_socket
+ http.select.select = self.orig_select
+ http.socketutil.wrap_socket = self.orig_sslwrap
+ socket.getaddrinfo = self.orig_getaddrinfo
+
+ def assertStringEqual(self, l, r):
+ try:
+ self.assertEqual(l, r, ('failed string equality check, '
+ 'see stdout for details'))
+ except:
+ add_nl = lambda li: map(lambda x: x + '\n', li)
+ print 'failed expectation:'
+ print ''.join(difflib.unified_diff(
+ add_nl(l.splitlines()), add_nl(r.splitlines()),
+ fromfile='expected', tofile='got'))
+ raise
+
+ def doPost(self, con, expect_body, body_to_send='This is some POST data'):
+ con.request('POST', '/', body=body_to_send,
+ expect_continue=True)
+ expected_req = ('POST / HTTP/1.1\r\n'
+ 'Host: 1.2.3.4\r\n'
+ 'content-length: %d\r\n'
+ 'Expect: 100-Continue\r\n'
+ 'accept-encoding: identity\r\n\r\n' %
+ len(body_to_send))
+ if expect_body:
+ expected_req += body_to_send
+ return expected_req
+# no-check-code
diff --git a/websdk/mercurial/httpconnection.py b/websdk/mercurial/httpconnection.py
new file mode 100644
index 0000000..c5fb058
--- /dev/null
+++ b/websdk/mercurial/httpconnection.py
@@ -0,0 +1,284 @@
+# httpconnection.py - urllib2 handler for new http support
+#
+# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+# Copyright 2011 Google, Inc.
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+import logging
+import socket
+import urllib
+import urllib2
+import os
+
+from mercurial import httpclient
+from mercurial import sslutil
+from mercurial import util
+from mercurial.i18n import _
+
+# moved here from url.py to avoid a cycle
+class httpsendfile(object):
+ """This is a wrapper around the objects returned by python's "open".
+
+ Its purpose is to send file-like objects via HTTP.
+ It do however not define a __len__ attribute because the length
+ might be more than Py_ssize_t can handle.
+ """
+
+ def __init__(self, ui, *args, **kwargs):
+ # We can't just "self._data = open(*args, **kwargs)" here because there
+ # is an "open" function defined in this module that shadows the global
+ # one
+ self.ui = ui
+ self._data = open(*args, **kwargs)
+ self.seek = self._data.seek
+ self.close = self._data.close
+ self.write = self._data.write
+ self.length = os.fstat(self._data.fileno()).st_size
+ self._pos = 0
+ self._total = self.length / 1024 * 2
+
+ def read(self, *args, **kwargs):
+ try:
+ ret = self._data.read(*args, **kwargs)
+ except EOFError:
+ self.ui.progress(_('sending'), None)
+ self._pos += len(ret)
+ # We pass double the max for total because we currently have
+ # to send the bundle twice in the case of a server that
+ # requires authentication. Since we can't know until we try
+ # once whether authentication will be required, just lie to
+ # the user and maybe the push succeeds suddenly at 50%.
+ self.ui.progress(_('sending'), self._pos / 1024,
+ unit=_('kb'), total=self._total)
+ return ret
+
+# moved here from url.py to avoid a cycle
+def readauthforuri(ui, uri, user):
+ # Read configuration
+ config = dict()
+ for key, val in ui.configitems('auth'):
+ if '.' not in key:
+ ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
+ continue
+ group, setting = key.rsplit('.', 1)
+ gdict = config.setdefault(group, dict())
+ if setting in ('username', 'cert', 'key'):
+ val = util.expandpath(val)
+ gdict[setting] = val
+
+ # Find the best match
+ if '://' in uri:
+ scheme, hostpath = uri.split('://', 1)
+ else:
+ # py2.4.1 doesn't provide the full URI
+ scheme, hostpath = 'http', uri
+ bestuser = None
+ bestlen = 0
+ bestauth = None
+ for group, auth in config.iteritems():
+ if user and user != auth.get('username', user):
+ # If a username was set in the URI, the entry username
+ # must either match it or be unset
+ continue
+ prefix = auth.get('prefix')
+ if not prefix:
+ continue
+ p = prefix.split('://', 1)
+ if len(p) > 1:
+ schemes, prefix = [p[0]], p[1]
+ else:
+ schemes = (auth.get('schemes') or 'https').split()
+ if (prefix == '*' or hostpath.startswith(prefix)) and \
+ (len(prefix) > bestlen or (len(prefix) == bestlen and \
+ not bestuser and 'username' in auth)) \
+ and scheme in schemes:
+ bestlen = len(prefix)
+ bestauth = group, auth
+ bestuser = auth.get('username')
+ if user and not bestuser:
+ auth['username'] = user
+ return bestauth
+
+# Mercurial (at least until we can remove the old codepath) requires
+# that the http response object be sufficiently file-like, so we
+# provide a close() method here.
+class HTTPResponse(httpclient.HTTPResponse):
+ def close(self):
+ pass
+
+class HTTPConnection(httpclient.HTTPConnection):
+ response_class = HTTPResponse
+ def request(self, method, uri, body=None, headers={}):
+ if isinstance(body, httpsendfile):
+ body.seek(0)
+ httpclient.HTTPConnection.request(self, method, uri, body=body,
+ headers=headers)
+
+
+_configuredlogging = False
+LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s'
+# Subclass BOTH of these because otherwise urllib2 "helpfully"
+# reinserts them since it notices we don't include any subclasses of
+# them.
+class http2handler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
+ def __init__(self, ui, pwmgr):
+ global _configuredlogging
+ urllib2.AbstractHTTPHandler.__init__(self)
+ self.ui = ui
+ self.pwmgr = pwmgr
+ self._connections = {}
+ loglevel = ui.config('ui', 'http2debuglevel', default=None)
+ if loglevel and not _configuredlogging:
+ _configuredlogging = True
+ logger = logging.getLogger('mercurial.httpclient')
+ logger.setLevel(getattr(logging, loglevel.upper()))
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter(LOGFMT))
+ logger.addHandler(handler)
+
+ def close_all(self):
+ """Close and remove all connection objects being kept for reuse."""
+ for openconns in self._connections.values():
+ for conn in openconns:
+ conn.close()
+ self._connections = {}
+
+ # shamelessly borrowed from urllib2.AbstractHTTPHandler
+ def do_open(self, http_class, req, use_ssl):
+ """Return an addinfourl object for the request, using http_class.
+
+ http_class must implement the HTTPConnection API from httplib.
+ The addinfourl return value is a file-like object. It also
+ has methods and attributes including:
+ - info(): return a mimetools.Message object for the headers
+ - geturl(): return the original request URL
+ - code: HTTP status code
+ """
+ # If using a proxy, the host returned by get_host() is
+ # actually the proxy. On Python 2.6.1, the real destination
+ # hostname is encoded in the URI in the urllib2 request
+ # object. On Python 2.6.5, it's stored in the _tunnel_host
+ # attribute which has no accessor.
+ tunhost = getattr(req, '_tunnel_host', None)
+ host = req.get_host()
+ if tunhost:
+ proxyhost = host
+ host = tunhost
+ elif req.has_proxy():
+ proxyhost = req.get_host()
+ host = req.get_selector().split('://', 1)[1].split('/', 1)[0]
+ else:
+ proxyhost = None
+
+ if proxyhost:
+ if ':' in proxyhost:
+ # Note: this means we'll explode if we try and use an
+ # IPv6 http proxy. This isn't a regression, so we
+ # won't worry about it for now.
+ proxyhost, proxyport = proxyhost.rsplit(':', 1)
+ else:
+ proxyport = 3128 # squid default
+ proxy = (proxyhost, proxyport)
+ else:
+ proxy = None
+
+ if not host:
+ raise urllib2.URLError('no host given')
+
+ connkey = use_ssl, host, proxy
+ allconns = self._connections.get(connkey, [])
+ conns = [c for c in allconns if not c.busy()]
+ if conns:
+ h = conns[0]
+ else:
+ if allconns:
+ self.ui.debug('all connections for %s busy, making a new '
+ 'one\n' % host)
+ timeout = None
+ if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ timeout = req.timeout
+ h = http_class(host, timeout=timeout, proxy_hostport=proxy)
+ self._connections.setdefault(connkey, []).append(h)
+
+ headers = dict(req.headers)
+ headers.update(req.unredirected_hdrs)
+ headers = dict(
+ (name.title(), val) for name, val in headers.items())
+ try:
+ path = req.get_selector()
+ if '://' in path:
+ path = path.split('://', 1)[1].split('/', 1)[1]
+ if path[0] != '/':
+ path = '/' + path
+ h.request(req.get_method(), path, req.data, headers)
+ r = h.getresponse()
+ except socket.error, err: # XXX what error?
+ raise urllib2.URLError(err)
+
+ # Pick apart the HTTPResponse object to get the addinfourl
+ # object initialized properly.
+ r.recv = r.read
+
+ resp = urllib.addinfourl(r, r.headers, req.get_full_url())
+ resp.code = r.status
+ resp.msg = r.reason
+ return resp
+
+ # httplib always uses the given host/port as the socket connect
+ # target, and then allows full URIs in the request path, which it
+ # then observes and treats as a signal to do proxying instead.
+ def http_open(self, req):
+ if req.get_full_url().startswith('https'):
+ return self.https_open(req)
+ return self.do_open(HTTPConnection, req, False)
+
+ def https_open(self, req):
+ # req.get_full_url() does not contain credentials and we may
+ # need them to match the certificates.
+ url = req.get_full_url()
+ user, password = self.pwmgr.find_stored_password(url)
+ res = readauthforuri(self.ui, url, user)
+ if res:
+ group, auth = res
+ self.auth = auth
+ self.ui.debug("using auth.%s.* for authentication\n" % group)
+ else:
+ self.auth = None
+ return self.do_open(self._makesslconnection, req, True)
+
+ def _makesslconnection(self, host, port=443, *args, **kwargs):
+ keyfile = None
+ certfile = None
+
+ if args: # key_file
+ keyfile = args.pop(0)
+ if args: # cert_file
+ certfile = args.pop(0)
+
+ # if the user has specified different key/cert files in
+ # hgrc, we prefer these
+ if self.auth and 'key' in self.auth and 'cert' in self.auth:
+ keyfile = self.auth['key']
+ certfile = self.auth['cert']
+
+ # let host port take precedence
+ if ':' in host and '[' not in host or ']:' in host:
+ host, port = host.rsplit(':', 1)
+ port = int(port)
+ if '[' in host:
+ host = host[1:-1]
+
+ if keyfile:
+ kwargs['keyfile'] = keyfile
+ if certfile:
+ kwargs['certfile'] = certfile
+
+ kwargs.update(sslutil.sslkwargs(self.ui, host))
+
+ con = HTTPConnection(host, port, use_ssl=True,
+ ssl_validator=sslutil.validator(self.ui, host),
+ **kwargs)
+ return con
diff --git a/websdk/mercurial/httprepo.py b/websdk/mercurial/httprepo.py
index ed742e6..9ee2eae 120000..100644
--- a/websdk/mercurial/httprepo.py
+++ b/websdk/mercurial/httprepo.py
@@ -1 +1,247 @@
-/usr/share/pyshared/mercurial/httprepo.py \ No newline at end of file
+# httprepo.py - HTTP repository proxy classes for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid
+from i18n import _
+import changegroup, statichttprepo, error, httpconnection, url, util, wireproto
+import os, urllib, urllib2, zlib, httplib
+import errno, socket
+
+def zgenerator(f):
+ zd = zlib.decompressobj()
+ try:
+ for chunk in util.filechunkiter(f):
+ while chunk:
+ yield zd.decompress(chunk, 2**18)
+ chunk = zd.unconsumed_tail
+ except httplib.HTTPException:
+ raise IOError(None, _('connection ended unexpectedly'))
+ yield zd.flush()
+
+class httprepository(wireproto.wirerepository):
+ def __init__(self, ui, path):
+ self.path = path
+ self.caps = None
+ self.handler = None
+ self.urlopener = None
+ u = util.url(path)
+ if u.query or u.fragment:
+ raise util.Abort(_('unsupported URL component: "%s"') %
+ (u.query or u.fragment))
+
+ # urllib cannot handle URLs with embedded user or passwd
+ self._url, authinfo = u.authinfo()
+
+ self.ui = ui
+ self.ui.debug('using %s\n' % self._url)
+
+ self.urlopener = url.opener(ui, authinfo)
+
+ def __del__(self):
+ if self.urlopener:
+ for h in self.urlopener.handlers:
+ h.close()
+ getattr(h, "close_all", lambda : None)()
+
+ def url(self):
+ return self.path
+
+ # look up capabilities only when needed
+
+ def _fetchcaps(self):
+ self.caps = set(self._call('capabilities').split())
+
+ def get_caps(self):
+ if self.caps is None:
+ try:
+ self._fetchcaps()
+ except error.RepoError:
+ self.caps = set()
+ self.ui.debug('capabilities: %s\n' %
+ (' '.join(self.caps or ['none'])))
+ return self.caps
+
+ capabilities = property(get_caps)
+
+ def lock(self):
+ raise util.Abort(_('operation not supported over http'))
+
+ def _callstream(self, cmd, **args):
+ if cmd == 'pushkey':
+ args['data'] = ''
+ data = args.pop('data', None)
+ size = 0
+ if util.safehasattr(data, 'length'):
+ size = data.length
+ elif data is not None:
+ size = len(data)
+ headers = args.pop('headers', {})
+
+ if size and self.ui.configbool('ui', 'usehttp2', False):
+ headers['Expect'] = '100-Continue'
+ headers['X-HgHttp2'] = '1'
+
+ self.ui.debug("sending %s command\n" % cmd)
+ q = [('cmd', cmd)]
+ headersize = 0
+ if len(args) > 0:
+ httpheader = self.capable('httpheader')
+ if httpheader:
+ headersize = int(httpheader.split(',')[0])
+ if headersize > 0:
+ # The headers can typically carry more data than the URL.
+ encargs = urllib.urlencode(sorted(args.items()))
+ headerfmt = 'X-HgArg-%s'
+ contentlen = headersize - len(headerfmt % '000' + ': \r\n')
+ headernum = 0
+ for i in xrange(0, len(encargs), contentlen):
+ headernum += 1
+ header = headerfmt % str(headernum)
+ headers[header] = encargs[i:i + contentlen]
+ varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)]
+ headers['Vary'] = ','.join(varyheaders)
+ else:
+ q += sorted(args.items())
+ qs = '?%s' % urllib.urlencode(q)
+ cu = "%s%s" % (self._url, qs)
+ req = urllib2.Request(cu, data, headers)
+ if data is not None:
+ self.ui.debug("sending %s bytes\n" % size)
+ req.add_unredirected_header('Content-Length', '%d' % size)
+ try:
+ resp = self.urlopener.open(req)
+ except urllib2.HTTPError, inst:
+ if inst.code == 401:
+ raise util.Abort(_('authorization failed'))
+ raise
+ except httplib.HTTPException, inst:
+ self.ui.debug('http error while sending %s command\n' % cmd)
+ self.ui.traceback()
+ raise IOError(None, inst)
+ except IndexError:
+ # this only happens with Python 2.3, later versions raise URLError
+ raise util.Abort(_('http error, possibly caused by proxy setting'))
+ # record the url we got redirected to
+ resp_url = resp.geturl()
+ if resp_url.endswith(qs):
+ resp_url = resp_url[:-len(qs)]
+ if self._url.rstrip('/') != resp_url.rstrip('/'):
+ if not self.ui.quiet:
+ self.ui.warn(_('real URL is %s\n') % resp_url)
+ self._url = resp_url
+ try:
+ proto = resp.getheader('content-type')
+ except AttributeError:
+ proto = resp.headers.get('content-type', '')
+
+ safeurl = util.hidepassword(self._url)
+ if proto.startswith('application/hg-error'):
+ raise error.OutOfBandError(resp.read())
+ # accept old "text/plain" and "application/hg-changegroup" for now
+ if not (proto.startswith('application/mercurial-') or
+ proto.startswith('text/plain') or
+ proto.startswith('application/hg-changegroup')):
+ self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
+ raise error.RepoError(
+ _("'%s' does not appear to be an hg repository:\n"
+ "---%%<--- (%s)\n%s\n---%%<---\n")
+ % (safeurl, proto or 'no content-type', resp.read()))
+
+ if proto.startswith('application/mercurial-'):
+ try:
+ version = proto.split('-', 1)[1]
+ version_info = tuple([int(n) for n in version.split('.')])
+ except ValueError:
+ raise error.RepoError(_("'%s' sent a broken Content-Type "
+ "header (%s)") % (safeurl, proto))
+ if version_info > (0, 1):
+ raise error.RepoError(_("'%s' uses newer protocol %s") %
+ (safeurl, version))
+
+ return resp
+
+ def _call(self, cmd, **args):
+ fp = self._callstream(cmd, **args)
+ try:
+ return fp.read()
+ finally:
+ # if using keepalive, allow connection to be reused
+ fp.close()
+
+ def _callpush(self, cmd, cg, **args):
+ # have to stream bundle to a temp file because we do not have
+ # http 1.1 chunked transfer.
+
+ types = self.capable('unbundle')
+ try:
+ types = types.split(',')
+ except AttributeError:
+ # servers older than d1b16a746db6 will send 'unbundle' as a
+ # boolean capability. They only support headerless/uncompressed
+ # bundles.
+ types = [""]
+ for x in types:
+ if x in changegroup.bundletypes:
+ type = x
+ break
+
+ tempname = changegroup.writebundle(cg, None, type)
+ fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
+ headers = {'Content-Type': 'application/mercurial-0.1'}
+
+ try:
+ try:
+ r = self._call(cmd, data=fp, headers=headers, **args)
+ vals = r.split('\n', 1)
+ if len(vals) < 2:
+ raise error.ResponseError(_("unexpected response:"), r)
+ return vals
+ except socket.error, err:
+ if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
+ raise util.Abort(_('push failed: %s') % err.args[1])
+ raise util.Abort(err.args[1])
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+ def _abort(self, exception):
+ raise exception
+
+ def _decompress(self, stream):
+ return util.chunkbuffer(zgenerator(stream))
+
+class httpsrepository(httprepository):
+ def __init__(self, ui, path):
+ if not url.has_https:
+ raise util.Abort(_('Python support for SSL and HTTPS '
+ 'is not installed'))
+ httprepository.__init__(self, ui, path)
+
+def instance(ui, path, create):
+ if create:
+ raise util.Abort(_('cannot create new http repository'))
+ try:
+ if path.startswith('https:'):
+ inst = httpsrepository(ui, path)
+ else:
+ inst = httprepository(ui, path)
+ try:
+ # Try to do useful work when checking compatibility.
+ # Usually saves a roundtrip since we want the caps anyway.
+ inst._fetchcaps()
+ except error.RepoError:
+ # No luck, try older compatibility check.
+ inst.between([(nullid, nullid)])
+ return inst
+ except error.RepoError, httpexception:
+ try:
+ r = statichttprepo.instance(ui, "static-" + path, create)
+ ui.note('(falling back to static-http)\n')
+ return r
+ except error.RepoError:
+ raise httpexception # use the original http RepoError instead
diff --git a/websdk/mercurial/i18n.py b/websdk/mercurial/i18n.py
index 6a9a8f0..b06a02e 120000..100644
--- a/websdk/mercurial/i18n.py
+++ b/websdk/mercurial/i18n.py
@@ -1 +1,63 @@
-/usr/share/pyshared/mercurial/i18n.py \ No newline at end of file
+# i18n.py - internationalization support for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import encoding
+import gettext, sys, os
+
+# modelled after templater.templatepath:
+if getattr(sys, 'frozen', None) is not None:
+ module = sys.executable
+else:
+ module = __file__
+
+base = os.path.dirname(module)
+for dir in ('.', '..'):
+ localedir = os.path.join(base, dir, 'locale')
+ if os.path.isdir(localedir):
+ break
+
+t = gettext.translation('hg', localedir, fallback=True)
+
+def gettext(message):
+ """Translate message.
+
+ The message is looked up in the catalog to get a Unicode string,
+ which is encoded in the local encoding before being returned.
+
+ Important: message is restricted to characters in the encoding
+ given by sys.getdefaultencoding() which is most likely 'ascii'.
+ """
+ # If message is None, t.ugettext will return u'None' as the
+ # translation whereas our callers expect us to return None.
+ if message is None:
+ return message
+
+ paragraphs = message.split('\n\n')
+ # Be careful not to translate the empty string -- it holds the
+ # meta data of the .po file.
+ u = u'\n\n'.join([p and t.ugettext(p) or '' for p in paragraphs])
+ try:
+ # encoding.tolocal cannot be used since it will first try to
+ # decode the Unicode string. Calling u.decode(enc) really
+ # means u.encode(sys.getdefaultencoding()).decode(enc). Since
+ # the Python encoding defaults to 'ascii', this fails if the
+ # translated string use non-ASCII characters.
+ return u.encode(encoding.encoding, "replace")
+ except LookupError:
+ # An unknown encoding results in a LookupError.
+ return message
+
+def _plain():
+ if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
+ return False
+ exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
+ return 'i18n' not in exceptions
+
+if _plain():
+ _ = lambda message: message
+else:
+ _ = gettext
diff --git a/websdk/mercurial/ignore.py b/websdk/mercurial/ignore.py
index 5920262..9c2e9cf 120000..100644
--- a/websdk/mercurial/ignore.py
+++ b/websdk/mercurial/ignore.py
@@ -1 +1,105 @@
-/usr/share/pyshared/mercurial/ignore.py \ No newline at end of file
+# ignore.py - ignored file handling for mercurial
+#
+# Copyright 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import util, match
+import re
+
+_commentre = None
+
+def ignorepats(lines):
+ '''parse lines (iterable) of .hgignore text, returning a tuple of
+ (patterns, parse errors). These patterns should be given to compile()
+ to be validated and converted into a match function.'''
+ syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
+ syntax = 'relre:'
+ patterns = []
+ warnings = []
+
+ for line in lines:
+ if "#" in line:
+ global _commentre
+ if not _commentre:
+ _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*')
+ # remove comments prefixed by an even number of escapes
+ line = _commentre.sub(r'\1', line)
+ # fixup properly escaped comments that survived the above
+ line = line.replace("\\#", "#")
+ line = line.rstrip()
+ if not line:
+ continue
+
+ if line.startswith('syntax:'):
+ s = line[7:].strip()
+ try:
+ syntax = syntaxes[s]
+ except KeyError:
+ warnings.append(_("ignoring invalid syntax '%s'") % s)
+ continue
+ pat = syntax + line
+ for s, rels in syntaxes.iteritems():
+ if line.startswith(rels):
+ pat = line
+ break
+ elif line.startswith(s+':'):
+ pat = rels + line[len(s)+1:]
+ break
+ patterns.append(pat)
+
+ return patterns, warnings
+
+def ignore(root, files, warn):
+ '''return matcher covering patterns in 'files'.
+
+ the files parsed for patterns include:
+ .hgignore in the repository root
+ any additional files specified in the [ui] section of ~/.hgrc
+
+ trailing white space is dropped.
+ the escape character is backslash.
+ comments start with #.
+ empty lines are skipped.
+
+ lines can be of the following formats:
+
+ syntax: regexp # defaults following lines to non-rooted regexps
+ syntax: glob # defaults following lines to non-rooted globs
+ re:pattern # non-rooted regular expression
+ glob:pattern # non-rooted glob
+ pattern # pattern of the current default type'''
+
+ pats = {}
+ for f in files:
+ try:
+ pats[f] = []
+ fp = open(f)
+ pats[f], warnings = ignorepats(fp)
+ fp.close()
+ for warning in warnings:
+ warn("%s: %s\n" % (f, warning))
+ except IOError, inst:
+ if f != files[0]:
+ warn(_("skipping unreadable ignore file '%s': %s\n") %
+ (f, inst.strerror))
+
+ allpats = []
+ for patlist in pats.values():
+ allpats.extend(patlist)
+ if not allpats:
+ return util.never
+
+ try:
+ ignorefunc = match.match(root, '', [], allpats)
+ except util.Abort:
+ # Re-raise an exception where the src is the right file
+ for f, patlist in pats.iteritems():
+ try:
+ match.match(root, '', [], patlist)
+ except util.Abort, inst:
+ raise util.Abort('%s: %s' % (f, inst[0]))
+
+ return ignorefunc
diff --git a/websdk/mercurial/keepalive.py b/websdk/mercurial/keepalive.py
index e159a6f..0402dfb 120000..100644
--- a/websdk/mercurial/keepalive.py
+++ b/websdk/mercurial/keepalive.py
@@ -1 +1,766 @@
-/usr/share/pyshared/mercurial/keepalive.py \ No newline at end of file
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the
+# Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330,
+# Boston, MA 02111-1307 USA
+
+# This file is part of urlgrabber, a high-level cross-protocol url-grabber
+# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
+
+# Modified by Benoit Boissinot:
+# - fix for digest auth (inspired from urllib2.py @ Python v2.4)
+# Modified by Dirkjan Ochtman:
+# - import md5 function from a local util module
+# Modified by Martin Geisler:
+# - moved md5 function from local util module to this module
+# Modified by Augie Fackler:
+# - add safesend method and use it to prevent broken pipe errors
+# on large POST requests
+
+"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
+
+>>> import urllib2
+>>> from keepalive import HTTPHandler
+>>> keepalive_handler = HTTPHandler()
+>>> opener = urllib2.build_opener(keepalive_handler)
+>>> urllib2.install_opener(opener)
+>>>
+>>> fo = urllib2.urlopen('http://www.python.org')
+
+If a connection to a given host is requested, and all of the existing
+connections are still in use, another connection will be opened. If
+the handler tries to use an existing connection but it fails in some
+way, it will be closed and removed from the pool.
+
+To remove the handler, simply re-run build_opener with no arguments, and
+install that opener.
+
+You can explicitly close connections by using the close_connection()
+method of the returned file-like object (described below) or you can
+use the handler methods:
+
+ close_connection(host)
+ close_all()
+ open_connections()
+
+NOTE: using the close_connection and close_all methods of the handler
+should be done with care when using multiple threads.
+ * there is nothing that prevents another thread from creating new
+ connections immediately after connections are closed
+ * no checks are done to prevent in-use connections from being closed
+
+>>> keepalive_handler.close_all()
+
+EXTRA ATTRIBUTES AND METHODS
+
+ Upon a status of 200, the object returned has a few additional
+ attributes and methods, which should not be used if you want to
+ remain consistent with the normal urllib2-returned objects:
+
+ close_connection() - close the connection to the host
+ readlines() - you know, readlines()
+ status - the return status (ie 404)
+ reason - english translation of status (ie 'File not found')
+
+ If you want the best of both worlds, use this inside an
+ AttributeError-catching try:
+
+ >>> try: status = fo.status
+ >>> except AttributeError: status = None
+
+ Unfortunately, these are ONLY there if status == 200, so it's not
+ easy to distinguish between non-200 responses. The reason is that
+ urllib2 tries to do clever things with error codes 301, 302, 401,
+ and 407, and it wraps the object upon return.
+
+ For python versions earlier than 2.4, you can avoid this fancy error
+ handling by setting the module-level global HANDLE_ERRORS to zero.
+ You see, prior to 2.4, it's the HTTP Handler's job to determine what
+ to handle specially, and what to just pass up. HANDLE_ERRORS == 0
+ means "pass everything up". In python 2.4, however, this job no
+ longer belongs to the HTTP Handler and is now done by a NEW handler,
+ HTTPErrorProcessor. Here's the bottom line:
+
+ python version < 2.4
+ HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as
+ errors
+ HANDLE_ERRORS == 0 pass everything up, error processing is
+ left to the calling code
+ python version >= 2.4
+ HANDLE_ERRORS == 1 pass up 200, treat the rest as errors
+ HANDLE_ERRORS == 0 (default) pass everything up, let the
+ other handlers (specifically,
+ HTTPErrorProcessor) decide what to do
+
+ In practice, setting the variable either way makes little difference
+ in python 2.4, so for the most consistent behavior across versions,
+ you probably just want to use the defaults, which will give you
+ exceptions on errors.
+
+"""
+
+# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
+
+import errno
+import httplib
+import socket
+import thread
+import urllib2
+
+DEBUG = None
+
+import sys
+if sys.version_info < (2, 4):
+ HANDLE_ERRORS = 1
+else: HANDLE_ERRORS = 0
+
+class ConnectionManager(object):
+ """
+ The connection manager must be able to:
+ * keep track of all existing
+ """
+ def __init__(self):
+ self._lock = thread.allocate_lock()
+ self._hostmap = {} # map hosts to a list of connections
+ self._connmap = {} # map connections to host
+ self._readymap = {} # map connection to ready state
+
+ def add(self, host, connection, ready):
+ self._lock.acquire()
+ try:
+ if not host in self._hostmap:
+ self._hostmap[host] = []
+ self._hostmap[host].append(connection)
+ self._connmap[connection] = host
+ self._readymap[connection] = ready
+ finally:
+ self._lock.release()
+
+ def remove(self, connection):
+ self._lock.acquire()
+ try:
+ try:
+ host = self._connmap[connection]
+ except KeyError:
+ pass
+ else:
+ del self._connmap[connection]
+ del self._readymap[connection]
+ self._hostmap[host].remove(connection)
+ if not self._hostmap[host]: del self._hostmap[host]
+ finally:
+ self._lock.release()
+
+ def set_ready(self, connection, ready):
+ try:
+ self._readymap[connection] = ready
+ except KeyError:
+ pass
+
+ def get_ready_conn(self, host):
+ conn = None
+ self._lock.acquire()
+ try:
+ if host in self._hostmap:
+ for c in self._hostmap[host]:
+ if self._readymap[c]:
+ self._readymap[c] = 0
+ conn = c
+ break
+ finally:
+ self._lock.release()
+ return conn
+
+ def get_all(self, host=None):
+ if host:
+ return list(self._hostmap.get(host, []))
+ else:
+ return dict(self._hostmap)
+
+class KeepAliveHandler(object):
+ def __init__(self):
+ self._cm = ConnectionManager()
+
+ #### Connection Management
+ def open_connections(self):
+ """return a list of connected hosts and the number of connections
+ to each. [('foo.com:80', 2), ('bar.org', 1)]"""
+ return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
+
+ def close_connection(self, host):
+ """close connection(s) to <host>
+ host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
+ no error occurs if there is no connection to that host."""
+ for h in self._cm.get_all(host):
+ self._cm.remove(h)
+ h.close()
+
+ def close_all(self):
+ """close all open connections"""
+ for host, conns in self._cm.get_all().iteritems():
+ for h in conns:
+ self._cm.remove(h)
+ h.close()
+
+ def _request_closed(self, request, host, connection):
+ """tells us that this request is now closed and the the
+ connection is ready for another request"""
+ self._cm.set_ready(connection, 1)
+
+ def _remove_connection(self, host, connection, close=0):
+ if close:
+ connection.close()
+ self._cm.remove(connection)
+
+ #### Transaction Execution
+ def http_open(self, req):
+ return self.do_open(HTTPConnection, req)
+
+ def do_open(self, http_class, req):
+ host = req.get_host()
+ if not host:
+ raise urllib2.URLError('no host given')
+
+ try:
+ h = self._cm.get_ready_conn(host)
+ while h:
+ r = self._reuse_connection(h, req, host)
+
+ # if this response is non-None, then it worked and we're
+ # done. Break out, skipping the else block.
+ if r:
+ break
+
+ # connection is bad - possibly closed by server
+ # discard it and ask for the next free connection
+ h.close()
+ self._cm.remove(h)
+ h = self._cm.get_ready_conn(host)
+ else:
+ # no (working) free connections were found. Create a new one.
+ h = http_class(host)
+ if DEBUG:
+ DEBUG.info("creating new connection to %s (%d)",
+ host, id(h))
+ self._cm.add(host, h, 0)
+ self._start_transaction(h, req)
+ r = h.getresponse()
+ except (socket.error, httplib.HTTPException), err:
+ raise urllib2.URLError(err)
+
+ # if not a persistent connection, don't try to reuse it
+ if r.will_close:
+ self._cm.remove(h)
+
+ if DEBUG:
+ DEBUG.info("STATUS: %s, %s", r.status, r.reason)
+ r._handler = self
+ r._host = host
+ r._url = req.get_full_url()
+ r._connection = h
+ r.code = r.status
+ r.headers = r.msg
+ r.msg = r.reason
+
+ if r.status == 200 or not HANDLE_ERRORS:
+ return r
+ else:
+ return self.parent.error('http', req, r,
+ r.status, r.msg, r.headers)
+
+ def _reuse_connection(self, h, req, host):
+ """start the transaction with a re-used connection
+ return a response object (r) upon success or None on failure.
+ This DOES not close or remove bad connections in cases where
+ it returns. However, if an unexpected exception occurs, it
+ will close and remove the connection before re-raising.
+ """
+ try:
+ self._start_transaction(h, req)
+ r = h.getresponse()
+ # note: just because we got something back doesn't mean it
+ # worked. We'll check the version below, too.
+ except (socket.error, httplib.HTTPException):
+ r = None
+ except:
+ # adding this block just in case we've missed
+ # something we will still raise the exception, but
+ # lets try and close the connection and remove it
+ # first. We previously got into a nasty loop
+ # where an exception was uncaught, and so the
+ # connection stayed open. On the next try, the
+ # same exception was raised, etc. The tradeoff is
+ # that it's now possible this call will raise
+ # a DIFFERENT exception
+ if DEBUG:
+ DEBUG.error("unexpected exception - closing "
+ "connection to %s (%d)", host, id(h))
+ self._cm.remove(h)
+ h.close()
+ raise
+
+ if r is None or r.version == 9:
+ # httplib falls back to assuming HTTP 0.9 if it gets a
+ # bad header back. This is most likely to happen if
+ # the socket has been closed by the server since we
+ # last used the connection.
+ if DEBUG:
+ DEBUG.info("failed to re-use connection to %s (%d)",
+ host, id(h))
+ r = None
+ else:
+ if DEBUG:
+ DEBUG.info("re-using connection to %s (%d)", host, id(h))
+
+ return r
+
+ def _start_transaction(self, h, req):
+ # What follows mostly reimplements HTTPConnection.request()
+ # except it adds self.parent.addheaders in the mix.
+ headers = req.headers.copy()
+ if sys.version_info >= (2, 4):
+ headers.update(req.unredirected_hdrs)
+ headers.update(self.parent.addheaders)
+ headers = dict((n.lower(), v) for n, v in headers.items())
+ skipheaders = {}
+ for n in ('host', 'accept-encoding'):
+ if n in headers:
+ skipheaders['skip_' + n.replace('-', '_')] = 1
+ try:
+ if req.has_data():
+ data = req.get_data()
+ h.putrequest('POST', req.get_selector(), **skipheaders)
+ if 'content-type' not in headers:
+ h.putheader('Content-type',
+ 'application/x-www-form-urlencoded')
+ if 'content-length' not in headers:
+ h.putheader('Content-length', '%d' % len(data))
+ else:
+ h.putrequest('GET', req.get_selector(), **skipheaders)
+ except (socket.error), err:
+ raise urllib2.URLError(err)
+ for k, v in headers.items():
+ h.putheader(k, v)
+ h.endheaders()
+ if req.has_data():
+ h.send(data)
+
+class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler):
+ pass
+
+class HTTPResponse(httplib.HTTPResponse):
+ # we need to subclass HTTPResponse in order to
+ # 1) add readline() and readlines() methods
+ # 2) add close_connection() methods
+ # 3) add info() and geturl() methods
+
+ # in order to add readline(), read must be modified to deal with a
+ # buffer. example: readline must read a buffer and then spit back
+ # one line at a time. The only real alternative is to read one
+ # BYTE at a time (ick). Once something has been read, it can't be
+ # put back (ok, maybe it can, but that's even uglier than this),
+ # so if you THEN do a normal read, you must first take stuff from
+ # the buffer.
+
+ # the read method wraps the original to accomodate buffering,
+ # although read() never adds to the buffer.
+ # Both readline and readlines have been stolen with almost no
+ # modification from socket.py
+
+
+ def __init__(self, sock, debuglevel=0, strict=0, method=None):
+ if method: # the httplib in python 2.3 uses the method arg
+ httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
+ else: # 2.2 doesn't
+ httplib.HTTPResponse.__init__(self, sock, debuglevel)
+ self.fileno = sock.fileno
+ self.code = None
+ self._rbuf = ''
+ self._rbufsize = 8096
+ self._handler = None # inserted by the handler later
+ self._host = None # (same)
+ self._url = None # (same)
+ self._connection = None # (same)
+
+ _raw_read = httplib.HTTPResponse.read
+
+ def close(self):
+ if self.fp:
+ self.fp.close()
+ self.fp = None
+ if self._handler:
+ self._handler._request_closed(self, self._host,
+ self._connection)
+
+ def close_connection(self):
+ self._handler._remove_connection(self._host, self._connection, close=1)
+ self.close()
+
+ def info(self):
+ return self.headers
+
+ def geturl(self):
+ return self._url
+
+ def read(self, amt=None):
+ # the _rbuf test is only in this first if for speed. It's not
+ # logically necessary
+ if self._rbuf and not amt is None:
+ L = len(self._rbuf)
+ if amt > L:
+ amt -= L
+ else:
+ s = self._rbuf[:amt]
+ self._rbuf = self._rbuf[amt:]
+ return s
+
+ s = self._rbuf + self._raw_read(amt)
+ self._rbuf = ''
+ return s
+
+ # stolen from Python SVN #68532 to fix issue1088
+ def _read_chunked(self, amt):
+ chunk_left = self.chunk_left
+ value = ''
+
+ # XXX This accumulates chunks by repeated string concatenation,
+ # which is not efficient as the number or size of chunks gets big.
+ while True:
+ if chunk_left is None:
+ line = self.fp.readline()
+ i = line.find(';')
+ if i >= 0:
+ line = line[:i] # strip chunk-extensions
+ try:
+ chunk_left = int(line, 16)
+ except ValueError:
+ # close the connection as protocol synchronisation is
+ # probably lost
+ self.close()
+ raise httplib.IncompleteRead(value)
+ if chunk_left == 0:
+ break
+ if amt is None:
+ value += self._safe_read(chunk_left)
+ elif amt < chunk_left:
+ value += self._safe_read(amt)
+ self.chunk_left = chunk_left - amt
+ return value
+ elif amt == chunk_left:
+ value += self._safe_read(amt)
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ self.chunk_left = None
+ return value
+ else:
+ value += self._safe_read(chunk_left)
+ amt -= chunk_left
+
+ # we read the whole chunk, get another
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ chunk_left = None
+
+ # read and discard trailer up to the CRLF terminator
+ ### note: we shouldn't have any trailers!
+ while True:
+ line = self.fp.readline()
+ if not line:
+ # a vanishingly small number of sites EOF without
+ # sending the trailer
+ break
+ if line == '\r\n':
+ break
+
+ # we read everything; close the "file"
+ self.close()
+
+ return value
+
+ def readline(self, limit=-1):
+ i = self._rbuf.find('\n')
+ while i < 0 and not (0 < limit <= len(self._rbuf)):
+ new = self._raw_read(self._rbufsize)
+ if not new:
+ break
+ i = new.find('\n')
+ if i >= 0:
+ i = i + len(self._rbuf)
+ self._rbuf = self._rbuf + new
+ if i < 0:
+ i = len(self._rbuf)
+ else:
+ i = i + 1
+ if 0 <= limit < len(self._rbuf):
+ i = limit
+ data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
+ return data
+
+ def readlines(self, sizehint = 0):
+ total = 0
+ list = []
+ while True:
+ line = self.readline()
+ if not line:
+ break
+ list.append(line)
+ total += len(line)
+ if sizehint and total >= sizehint:
+ break
+ return list
+
+def safesend(self, str):
+ """Send `str' to the server.
+
+ Shamelessly ripped off from httplib to patch a bad behavior.
+ """
+ # _broken_pipe_resp is an attribute we set in this function
+ # if the socket is closed while we're sending data but
+ # the server sent us a response before hanging up.
+ # In that case, we want to pretend to send the rest of the
+ # outgoing data, and then let the user use getresponse()
+ # (which we wrap) to get this last response before
+ # opening a new socket.
+ if getattr(self, '_broken_pipe_resp', None) is not None:
+ return
+
+ if self.sock is None:
+ if self.auto_open:
+ self.connect()
+ else:
+ raise httplib.NotConnected()
+
+ # send the data to the server. if we get a broken pipe, then close
+ # the socket. we want to reconnect when somebody tries to send again.
+ #
+ # NOTE: we DO propagate the error, though, because we cannot simply
+ # ignore the error... the caller will know if they can retry.
+ if self.debuglevel > 0:
+ print "send:", repr(str)
+ try:
+ blocksize = 8192
+ read = getattr(str, 'read', None)
+ if read is not None:
+ if self.debuglevel > 0:
+ print "sendIng a read()able"
+ data = read(blocksize)
+ while data:
+ self.sock.sendall(data)
+ data = read(blocksize)
+ else:
+ self.sock.sendall(str)
+ except socket.error, v:
+ reraise = True
+ if v[0] == errno.EPIPE: # Broken pipe
+ if self._HTTPConnection__state == httplib._CS_REQ_SENT:
+ self._broken_pipe_resp = None
+ self._broken_pipe_resp = self.getresponse()
+ reraise = False
+ self.close()
+ if reraise:
+ raise
+
+def wrapgetresponse(cls):
+ """Wraps getresponse in cls with a broken-pipe sane version.
+ """
+ def safegetresponse(self):
+ # In safesend() we might set the _broken_pipe_resp
+ # attribute, in which case the socket has already
+ # been closed and we just need to give them the response
+ # back. Otherwise, we use the normal response path.
+ r = getattr(self, '_broken_pipe_resp', None)
+ if r is not None:
+ return r
+ return cls.getresponse(self)
+ safegetresponse.__doc__ = cls.getresponse.__doc__
+ return safegetresponse
+
+class HTTPConnection(httplib.HTTPConnection):
+ # use the modified response class
+ response_class = HTTPResponse
+ send = safesend
+ getresponse = wrapgetresponse(httplib.HTTPConnection)
+
+
+#########################################################################
+##### TEST FUNCTIONS
+#########################################################################
+
+def error_handler(url):
+ global HANDLE_ERRORS
+ orig = HANDLE_ERRORS
+ keepalive_handler = HTTPHandler()
+ opener = urllib2.build_opener(keepalive_handler)
+ urllib2.install_opener(opener)
+ pos = {0: 'off', 1: 'on'}
+ for i in (0, 1):
+ print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i)
+ HANDLE_ERRORS = i
+ try:
+ fo = urllib2.urlopen(url)
+ fo.read()
+ fo.close()
+ try:
+ status, reason = fo.status, fo.reason
+ except AttributeError:
+ status, reason = None, None
+ except IOError, e:
+ print " EXCEPTION: %s" % e
+ raise
+ else:
+ print " status = %s, reason = %s" % (status, reason)
+ HANDLE_ERRORS = orig
+ hosts = keepalive_handler.open_connections()
+ print "open connections:", hosts
+ keepalive_handler.close_all()
+
+def md5(s):
+ try:
+ from hashlib import md5 as _md5
+ except ImportError:
+ from md5 import md5 as _md5
+ global md5
+ md5 = _md5
+ return _md5(s)
+
+def continuity(url):
+ format = '%25s: %s'
+
+ # first fetch the file with the normal http handler
+ opener = urllib2.build_opener()
+ urllib2.install_opener(opener)
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ m = md5.new(foo)
+ print format % ('normal urllib', m.hexdigest())
+
+ # now install the keepalive handler and try again
+ opener = urllib2.build_opener(HTTPHandler())
+ urllib2.install_opener(opener)
+
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ m = md5.new(foo)
+ print format % ('keepalive read', m.hexdigest())
+
+ fo = urllib2.urlopen(url)
+ foo = ''
+ while True:
+ f = fo.readline()
+ if f:
+ foo = foo + f
+ else: break
+ fo.close()
+ m = md5.new(foo)
+ print format % ('keepalive readline', m.hexdigest())
+
+def comp(N, url):
+ print ' making %i connections to:\n %s' % (N, url)
+
+ sys.stdout.write(' first using the normal urllib handlers')
+ # first use normal opener
+ opener = urllib2.build_opener()
+ urllib2.install_opener(opener)
+ t1 = fetch(N, url)
+ print ' TIME: %.3f s' % t1
+
+ sys.stdout.write(' now using the keepalive handler ')
+ # now install the keepalive handler and try again
+ opener = urllib2.build_opener(HTTPHandler())
+ urllib2.install_opener(opener)
+ t2 = fetch(N, url)
+ print ' TIME: %.3f s' % t2
+ print ' improvement factor: %.2f' % (t1 / t2)
+
+def fetch(N, url, delay=0):
+ import time
+ lens = []
+ starttime = time.time()
+ for i in range(N):
+ if delay and i > 0:
+ time.sleep(delay)
+ fo = urllib2.urlopen(url)
+ foo = fo.read()
+ fo.close()
+ lens.append(len(foo))
+ diff = time.time() - starttime
+
+ j = 0
+ for i in lens[1:]:
+ j = j + 1
+ if not i == lens[0]:
+ print "WARNING: inconsistent length on read %i: %i" % (j, i)
+
+ return diff
+
+def test_timeout(url):
+ global DEBUG
+ dbbackup = DEBUG
+ class FakeLogger(object):
+ def debug(self, msg, *args):
+ print msg % args
+ info = warning = error = debug
+ DEBUG = FakeLogger()
+ print " fetching the file to establish a connection"
+ fo = urllib2.urlopen(url)
+ data1 = fo.read()
+ fo.close()
+
+ i = 20
+ print " waiting %i seconds for the server to close the connection" % i
+ while i > 0:
+ sys.stdout.write('\r %2i' % i)
+ sys.stdout.flush()
+ time.sleep(1)
+ i -= 1
+ sys.stderr.write('\r')
+
+ print " fetching the file a second time"
+ fo = urllib2.urlopen(url)
+ data2 = fo.read()
+ fo.close()
+
+ if data1 == data2:
+ print ' data are identical'
+ else:
+ print ' ERROR: DATA DIFFER'
+
+ DEBUG = dbbackup
+
+
+def test(url, N=10):
+ print "checking error hander (do this on a non-200)"
+ try: error_handler(url)
+ except IOError:
+ print "exiting - exception will prevent further tests"
+ sys.exit()
+ print
+ print "performing continuity test (making sure stuff isn't corrupted)"
+ continuity(url)
+ print
+ print "performing speed comparison"
+ comp(N, url)
+ print
+ print "performing dropped-connection check"
+ test_timeout(url)
+
+if __name__ == '__main__':
+ import time
+ import sys
+ try:
+ N = int(sys.argv[1])
+ url = sys.argv[2]
+ except:
+ print "%s <integer> <url>" % sys.argv[0]
+ else:
+ test(url, N)
diff --git a/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..20c7ec1
--- /dev/null
+++ b/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..88f9e80
--- /dev/null
+++ b/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..92eb59d
--- /dev/null
+++ b/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..efa851e
--- /dev/null
+++ b/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..67c147b
--- /dev/null
+++ b/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..2daf2d5
--- /dev/null
+++ b/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..282ff96
--- /dev/null
+++ b/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..c120a34
--- /dev/null
+++ b/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..eda573c
--- /dev/null
+++ b/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..1d53822
--- /dev/null
+++ b/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..9f55d4e
--- /dev/null
+++ b/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo
new file mode 100644
index 0000000..5615355
--- /dev/null
+++ b/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo
Binary files differ
diff --git a/websdk/mercurial/localrepo.py b/websdk/mercurial/localrepo.py
index 74d1c61..ce89f5e 120000..100644
--- a/websdk/mercurial/localrepo.py
+++ b/websdk/mercurial/localrepo.py
@@ -1 +1,2101 @@
-/usr/share/pyshared/mercurial/localrepo.py \ No newline at end of file
+# localrepo.py - read/write repository class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import bin, hex, nullid, nullrev, short
+from i18n import _
+import repo, changegroup, subrepo, discovery, pushkey
+import changelog, dirstate, filelog, manifest, context, bookmarks
+import lock, transaction, store, encoding
+import scmutil, util, extensions, hook, error, revset
+import match as matchmod
+import merge as mergemod
+import tags as tagsmod
+from lock import release
+import weakref, errno, os, time, inspect
+propertycache = util.propertycache
+filecache = scmutil.filecache
+
+class localrepository(repo.repository):
+ capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
+ 'known', 'getbundle'))
+ supportedformats = set(('revlogv1', 'generaldelta'))
+ supported = supportedformats | set(('store', 'fncache', 'shared',
+ 'dotencode'))
+
+ def __init__(self, baseui, path=None, create=False):
+ repo.repository.__init__(self)
+ self.root = os.path.realpath(util.expandpath(path))
+ self.path = os.path.join(self.root, ".hg")
+ self.origroot = path
+ self.auditor = scmutil.pathauditor(self.root, self._checknested)
+ self.opener = scmutil.opener(self.path)
+ self.wopener = scmutil.opener(self.root)
+ self.baseui = baseui
+ self.ui = baseui.copy()
+
+ try:
+ self.ui.readconfig(self.join("hgrc"), self.root)
+ extensions.loadall(self.ui)
+ except IOError:
+ pass
+
+ if not os.path.isdir(self.path):
+ if create:
+ if not os.path.exists(path):
+ util.makedirs(path)
+ util.makedir(self.path, notindexed=True)
+ requirements = ["revlogv1"]
+ if self.ui.configbool('format', 'usestore', True):
+ os.mkdir(os.path.join(self.path, "store"))
+ requirements.append("store")
+ if self.ui.configbool('format', 'usefncache', True):
+ requirements.append("fncache")
+ if self.ui.configbool('format', 'dotencode', True):
+ requirements.append('dotencode')
+ # create an invalid changelog
+ self.opener.append(
+ "00changelog.i",
+ '\0\0\0\2' # represents revlogv2
+ ' dummy changelog to prevent using the old repo layout'
+ )
+ if self.ui.configbool('format', 'generaldelta', False):
+ requirements.append("generaldelta")
+ requirements = set(requirements)
+ else:
+ raise error.RepoError(_("repository %s not found") % path)
+ elif create:
+ raise error.RepoError(_("repository %s already exists") % path)
+ else:
+ try:
+ requirements = scmutil.readrequires(self.opener, self.supported)
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ requirements = set()
+
+ self.sharedpath = self.path
+ try:
+ s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
+ if not os.path.exists(s):
+ raise error.RepoError(
+ _('.hg/sharedpath points to nonexistent directory %s') % s)
+ self.sharedpath = s
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ self.store = store.store(requirements, self.sharedpath, scmutil.opener)
+ self.spath = self.store.path
+ self.sopener = self.store.opener
+ self.sjoin = self.store.join
+ self.opener.createmode = self.store.createmode
+ self._applyrequirements(requirements)
+ if create:
+ self._writerequirements()
+
+
+ self._branchcache = None
+ self._branchcachetip = None
+ self.filterpats = {}
+ self._datafilters = {}
+ self._transref = self._lockref = self._wlockref = None
+
+ # A cache for various files under .hg/ that tracks file changes,
+ # (used by the filecache decorator)
+ #
+ # Maps a property name to its util.filecacheentry
+ self._filecache = {}
+
+ def _applyrequirements(self, requirements):
+ self.requirements = requirements
+ openerreqs = set(('revlogv1', 'generaldelta'))
+ self.sopener.options = dict((r, 1) for r in requirements
+ if r in openerreqs)
+
+ def _writerequirements(self):
+ reqfile = self.opener("requires", "w")
+ for r in self.requirements:
+ reqfile.write("%s\n" % r)
+ reqfile.close()
+
+ def _checknested(self, path):
+ """Determine if path is a legal nested repository."""
+ if not path.startswith(self.root):
+ return False
+ subpath = path[len(self.root) + 1:]
+
+ # XXX: Checking against the current working copy is wrong in
+ # the sense that it can reject things like
+ #
+ # $ hg cat -r 10 sub/x.txt
+ #
+ # if sub/ is no longer a subrepository in the working copy
+ # parent revision.
+ #
+ # However, it can of course also allow things that would have
+ # been rejected before, such as the above cat command if sub/
+ # is a subrepository now, but was a normal directory before.
+ # The old path auditor would have rejected by mistake since it
+ # panics when it sees sub/.hg/.
+ #
+ # All in all, checking against the working copy seems sensible
+ # since we want to prevent access to nested repositories on
+ # the filesystem *now*.
+ ctx = self[None]
+ parts = util.splitpath(subpath)
+ while parts:
+ prefix = os.sep.join(parts)
+ if prefix in ctx.substate:
+ if prefix == subpath:
+ return True
+ else:
+ sub = ctx.sub(prefix)
+ return sub.checknested(subpath[len(prefix) + 1:])
+ else:
+ parts.pop()
+ return False
+
+ @filecache('bookmarks')
+ def _bookmarks(self):
+ return bookmarks.read(self)
+
+ @filecache('bookmarks.current')
+ def _bookmarkcurrent(self):
+ return bookmarks.readcurrent(self)
+
+ def _writebookmarks(self, marks):
+ bookmarks.write(self)
+
+ @filecache('00changelog.i', True)
+ def changelog(self):
+ c = changelog.changelog(self.sopener)
+ if 'HG_PENDING' in os.environ:
+ p = os.environ['HG_PENDING']
+ if p.startswith(self.root):
+ c.readpending('00changelog.i.a')
+ return c
+
+ @filecache('00manifest.i', True)
+ def manifest(self):
+ return manifest.manifest(self.sopener)
+
+ @filecache('dirstate')
+ def dirstate(self):
+ warned = [0]
+ def validate(node):
+ try:
+ self.changelog.rev(node)
+ return node
+ except error.LookupError:
+ if not warned[0]:
+ warned[0] = True
+ self.ui.warn(_("warning: ignoring unknown"
+ " working parent %s!\n") % short(node))
+ return nullid
+
+ return dirstate.dirstate(self.opener, self.ui, self.root, validate)
+
+ def __getitem__(self, changeid):
+ if changeid is None:
+ return context.workingctx(self)
+ return context.changectx(self, changeid)
+
+ def __contains__(self, changeid):
+ try:
+ return bool(self.lookup(changeid))
+ except error.RepoLookupError:
+ return False
+
+ def __nonzero__(self):
+ return True
+
+ def __len__(self):
+ return len(self.changelog)
+
+ def __iter__(self):
+ for i in xrange(len(self)):
+ yield i
+
+ def set(self, expr, *args):
+ '''
+ Yield a context for each matching revision, after doing arg
+ replacement via revset.formatspec
+ '''
+
+ expr = revset.formatspec(expr, *args)
+ m = revset.match(None, expr)
+ for r in m(self, range(len(self))):
+ yield self[r]
+
+ def url(self):
+ return 'file:' + self.root
+
+ def hook(self, name, throw=False, **args):
+ return hook.hook(self.ui, self, name, throw, **args)
+
+ tag_disallowed = ':\r\n'
+
+ def _tag(self, names, node, message, local, user, date, extra={}):
+ if isinstance(names, str):
+ allchars = names
+ names = (names,)
+ else:
+ allchars = ''.join(names)
+ for c in self.tag_disallowed:
+ if c in allchars:
+ raise util.Abort(_('%r cannot be used in a tag name') % c)
+
+ branches = self.branchmap()
+ for name in names:
+ self.hook('pretag', throw=True, node=hex(node), tag=name,
+ local=local)
+ if name in branches:
+ self.ui.warn(_("warning: tag %s conflicts with existing"
+ " branch name\n") % name)
+
+ def writetags(fp, names, munge, prevtags):
+ fp.seek(0, 2)
+ if prevtags and prevtags[-1] != '\n':
+ fp.write('\n')
+ for name in names:
+ m = munge and munge(name) or name
+ if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
+ old = self.tags().get(name, nullid)
+ fp.write('%s %s\n' % (hex(old), m))
+ fp.write('%s %s\n' % (hex(node), m))
+ fp.close()
+
+ prevtags = ''
+ if local:
+ try:
+ fp = self.opener('localtags', 'r+')
+ except IOError:
+ fp = self.opener('localtags', 'a')
+ else:
+ prevtags = fp.read()
+
+ # local tags are stored in the current charset
+ writetags(fp, names, None, prevtags)
+ for name in names:
+ self.hook('tag', node=hex(node), tag=name, local=local)
+ return
+
+ try:
+ fp = self.wfile('.hgtags', 'rb+')
+ except IOError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ fp = self.wfile('.hgtags', 'ab')
+ else:
+ prevtags = fp.read()
+
+ # committed tags are stored in UTF-8
+ writetags(fp, names, encoding.fromlocal, prevtags)
+
+ fp.close()
+
+ if '.hgtags' not in self.dirstate:
+ self[None].add(['.hgtags'])
+
+ m = matchmod.exact(self.root, '', ['.hgtags'])
+ tagnode = self.commit(message, user, date, extra=extra, match=m)
+
+ for name in names:
+ self.hook('tag', node=hex(node), tag=name, local=local)
+
+ return tagnode
+
+ def tag(self, names, node, message, local, user, date):
+ '''tag a revision with one or more symbolic names.
+
+ names is a list of strings or, when adding a single tag, names may be a
+ string.
+
+ if local is True, the tags are stored in a per-repository file.
+ otherwise, they are stored in the .hgtags file, and a new
+ changeset is committed with the change.
+
+ keyword arguments:
+
+ local: whether to store tags in non-version-controlled file
+ (default False)
+
+ message: commit message to use if committing
+
+ user: name of user to use if committing
+
+ date: date tuple to use if committing'''
+
+ if not local:
+ for x in self.status()[:5]:
+ if '.hgtags' in x:
+ raise util.Abort(_('working copy of .hgtags is changed '
+ '(please commit .hgtags manually)'))
+
+ self.tags() # instantiate the cache
+ self._tag(names, node, message, local, user, date)
+
+ @propertycache
+ def _tagscache(self):
+ '''Returns a tagscache object that contains various tags related caches.'''
+
+ # This simplifies its cache management by having one decorated
+ # function (this one) and the rest simply fetch things from it.
+ class tagscache(object):
+ def __init__(self):
+ # These two define the set of tags for this repository. tags
+ # maps tag name to node; tagtypes maps tag name to 'global' or
+ # 'local'. (Global tags are defined by .hgtags across all
+ # heads, and local tags are defined in .hg/localtags.)
+ # They constitute the in-memory cache of tags.
+ self.tags = self.tagtypes = None
+
+ self.nodetagscache = self.tagslist = None
+
+ cache = tagscache()
+ cache.tags, cache.tagtypes = self._findtags()
+
+ return cache
+
+ def tags(self):
+ '''return a mapping of tag to node'''
+ return self._tagscache.tags
+
+ def _findtags(self):
+ '''Do the hard work of finding tags. Return a pair of dicts
+ (tags, tagtypes) where tags maps tag name to node, and tagtypes
+ maps tag name to a string like \'global\' or \'local\'.
+ Subclasses or extensions are free to add their own tags, but
+ should be aware that the returned dicts will be retained for the
+ duration of the localrepo object.'''
+
+ # XXX what tagtype should subclasses/extensions use? Currently
+ # mq and bookmarks add tags, but do not set the tagtype at all.
+ # Should each extension invent its own tag type? Should there
+ # be one tagtype for all such "virtual" tags? Or is the status
+ # quo fine?
+
+ alltags = {} # map tag name to (node, hist)
+ tagtypes = {}
+
+ tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
+ tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
+
+ # Build the return dicts. Have to re-encode tag names because
+ # the tags module always uses UTF-8 (in order not to lose info
+ # writing to the cache), but the rest of Mercurial wants them in
+ # local encoding.
+ tags = {}
+ for (name, (node, hist)) in alltags.iteritems():
+ if node != nullid:
+ try:
+ # ignore tags to unknown nodes
+ self.changelog.lookup(node)
+ tags[encoding.tolocal(name)] = node
+ except error.LookupError:
+ pass
+ tags['tip'] = self.changelog.tip()
+ tagtypes = dict([(encoding.tolocal(name), value)
+ for (name, value) in tagtypes.iteritems()])
+ return (tags, tagtypes)
+
+ def tagtype(self, tagname):
+ '''
+ return the type of the given tag. result can be:
+
+ 'local' : a local tag
+ 'global' : a global tag
+ None : tag does not exist
+ '''
+
+ return self._tagscache.tagtypes.get(tagname)
+
+ def tagslist(self):
+ '''return a list of tags ordered by revision'''
+ if not self._tagscache.tagslist:
+ l = []
+ for t, n in self.tags().iteritems():
+ r = self.changelog.rev(n)
+ l.append((r, t, n))
+ self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
+
+ return self._tagscache.tagslist
+
+ def nodetags(self, node):
+ '''return the tags associated with a node'''
+ if not self._tagscache.nodetagscache:
+ nodetagscache = {}
+ for t, n in self.tags().iteritems():
+ nodetagscache.setdefault(n, []).append(t)
+ for tags in nodetagscache.itervalues():
+ tags.sort()
+ self._tagscache.nodetagscache = nodetagscache
+ return self._tagscache.nodetagscache.get(node, [])
+
+ def nodebookmarks(self, node):
+ marks = []
+ for bookmark, n in self._bookmarks.iteritems():
+ if n == node:
+ marks.append(bookmark)
+ return sorted(marks)
+
+ def _branchtags(self, partial, lrev):
+ # TODO: rename this function?
+ tiprev = len(self) - 1
+ if lrev != tiprev:
+ ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
+ self._updatebranchcache(partial, ctxgen)
+ self._writebranchcache(partial, self.changelog.tip(), tiprev)
+
+ return partial
+
+ def updatebranchcache(self):
+ tip = self.changelog.tip()
+ if self._branchcache is not None and self._branchcachetip == tip:
+ return self._branchcache
+
+ oldtip = self._branchcachetip
+ self._branchcachetip = tip
+ if oldtip is None or oldtip not in self.changelog.nodemap:
+ partial, last, lrev = self._readbranchcache()
+ else:
+ lrev = self.changelog.rev(oldtip)
+ partial = self._branchcache
+
+ self._branchtags(partial, lrev)
+ # this private cache holds all heads (not just tips)
+ self._branchcache = partial
+
+ def branchmap(self):
+ '''returns a dictionary {branch: [branchheads]}'''
+ self.updatebranchcache()
+ return self._branchcache
+
+ def branchtags(self):
+ '''return a dict where branch names map to the tipmost head of
+ the branch, open heads come before closed'''
+ bt = {}
+ for bn, heads in self.branchmap().iteritems():
+ tip = heads[-1]
+ for h in reversed(heads):
+ if 'close' not in self.changelog.read(h)[5]:
+ tip = h
+ break
+ bt[bn] = tip
+ return bt
+
+ def _readbranchcache(self):
+ partial = {}
+ try:
+ f = self.opener("cache/branchheads")
+ lines = f.read().split('\n')
+ f.close()
+ except (IOError, OSError):
+ return {}, nullid, nullrev
+
+ try:
+ last, lrev = lines.pop(0).split(" ", 1)
+ last, lrev = bin(last), int(lrev)
+ if lrev >= len(self) or self[lrev].node() != last:
+ # invalidate the cache
+ raise ValueError('invalidating branch cache (tip differs)')
+ for l in lines:
+ if not l:
+ continue
+ node, label = l.split(" ", 1)
+ label = encoding.tolocal(label.strip())
+ partial.setdefault(label, []).append(bin(node))
+ except KeyboardInterrupt:
+ raise
+ except Exception, inst:
+ if self.ui.debugflag:
+ self.ui.warn(str(inst), '\n')
+ partial, last, lrev = {}, nullid, nullrev
+ return partial, last, lrev
+
+ def _writebranchcache(self, branches, tip, tiprev):
+ try:
+ f = self.opener("cache/branchheads", "w", atomictemp=True)
+ f.write("%s %s\n" % (hex(tip), tiprev))
+ for label, nodes in branches.iteritems():
+ for node in nodes:
+ f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
+ f.close()
+ except (IOError, OSError):
+ pass
+
+ def _updatebranchcache(self, partial, ctxgen):
+ # collect new branch entries
+ newbranches = {}
+ for c in ctxgen:
+ newbranches.setdefault(c.branch(), []).append(c.node())
+ # if older branchheads are reachable from new ones, they aren't
+ # really branchheads. Note checking parents is insufficient:
+ # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
+ for branch, newnodes in newbranches.iteritems():
+ bheads = partial.setdefault(branch, [])
+ bheads.extend(newnodes)
+ if len(bheads) <= 1:
+ continue
+ bheads = sorted(bheads, key=lambda x: self[x].rev())
+ # starting from tip means fewer passes over reachable
+ while newnodes:
+ latest = newnodes.pop()
+ if latest not in bheads:
+ continue
+ minbhrev = self[bheads[0]].node()
+ reachable = self.changelog.reachable(latest, minbhrev)
+ reachable.remove(latest)
+ if reachable:
+ bheads = [b for b in bheads if b not in reachable]
+ partial[branch] = bheads
+
+ def lookup(self, key):
+ if isinstance(key, int):
+ return self.changelog.node(key)
+ elif key == '.':
+ return self.dirstate.p1()
+ elif key == 'null':
+ return nullid
+ elif key == 'tip':
+ return self.changelog.tip()
+ n = self.changelog._match(key)
+ if n:
+ return n
+ if key in self._bookmarks:
+ return self._bookmarks[key]
+ if key in self.tags():
+ return self.tags()[key]
+ if key in self.branchtags():
+ return self.branchtags()[key]
+ n = self.changelog._partialmatch(key)
+ if n:
+ return n
+
+ # can't find key, check if it might have come from damaged dirstate
+ if key in self.dirstate.parents():
+ raise error.Abort(_("working directory has unknown parent '%s'!")
+ % short(key))
+ try:
+ if len(key) == 20:
+ key = hex(key)
+ except TypeError:
+ pass
+ raise error.RepoLookupError(_("unknown revision '%s'") % key)
+
+ def lookupbranch(self, key, remote=None):
+ repo = remote or self
+ if key in repo.branchmap():
+ return key
+
+ repo = (remote and remote.local()) and remote or self
+ return repo[key].branch()
+
+ def known(self, nodes):
+ nm = self.changelog.nodemap
+ return [(n in nm) for n in nodes]
+
+ def local(self):
+ return self
+
+ def join(self, f):
+ return os.path.join(self.path, f)
+
+ def wjoin(self, f):
+ return os.path.join(self.root, f)
+
+ def file(self, f):
+ if f[0] == '/':
+ f = f[1:]
+ return filelog.filelog(self.sopener, f)
+
+ def changectx(self, changeid):
+ return self[changeid]
+
+ def parents(self, changeid=None):
+ '''get list of changectxs for parents of changeid'''
+ return self[changeid].parents()
+
+ def filectx(self, path, changeid=None, fileid=None):
+ """changeid can be a changeset revision, node, or tag.
+ fileid can be a file revision or node."""
+ return context.filectx(self, path, changeid, fileid)
+
+ def getcwd(self):
+ return self.dirstate.getcwd()
+
+ def pathto(self, f, cwd=None):
+ return self.dirstate.pathto(f, cwd)
+
+ def wfile(self, f, mode='r'):
+ return self.wopener(f, mode)
+
+ def _link(self, f):
+ return os.path.islink(self.wjoin(f))
+
+ def _loadfilter(self, filter):
+ if filter not in self.filterpats:
+ l = []
+ for pat, cmd in self.ui.configitems(filter):
+ if cmd == '!':
+ continue
+ mf = matchmod.match(self.root, '', [pat])
+ fn = None
+ params = cmd
+ for name, filterfn in self._datafilters.iteritems():
+ if cmd.startswith(name):
+ fn = filterfn
+ params = cmd[len(name):].lstrip()
+ break
+ if not fn:
+ fn = lambda s, c, **kwargs: util.filter(s, c)
+ # Wrap old filters not supporting keyword arguments
+ if not inspect.getargspec(fn)[2]:
+ oldfn = fn
+ fn = lambda s, c, **kwargs: oldfn(s, c)
+ l.append((mf, fn, params))
+ self.filterpats[filter] = l
+ return self.filterpats[filter]
+
+ def _filter(self, filterpats, filename, data):
+ for mf, fn, cmd in filterpats:
+ if mf(filename):
+ self.ui.debug("filtering %s through %s\n" % (filename, cmd))
+ data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
+ break
+
+ return data
+
+ @propertycache
+ def _encodefilterpats(self):
+ return self._loadfilter('encode')
+
+ @propertycache
+ def _decodefilterpats(self):
+ return self._loadfilter('decode')
+
+ def adddatafilter(self, name, filter):
+ self._datafilters[name] = filter
+
+ def wread(self, filename):
+ if self._link(filename):
+ data = os.readlink(self.wjoin(filename))
+ else:
+ data = self.wopener.read(filename)
+ return self._filter(self._encodefilterpats, filename, data)
+
+ def wwrite(self, filename, data, flags):
+ data = self._filter(self._decodefilterpats, filename, data)
+ if 'l' in flags:
+ self.wopener.symlink(data, filename)
+ else:
+ self.wopener.write(filename, data)
+ if 'x' in flags:
+ util.setflags(self.wjoin(filename), False, True)
+
+ def wwritedata(self, filename, data):
+ return self._filter(self._decodefilterpats, filename, data)
+
+ def transaction(self, desc):
+ tr = self._transref and self._transref() or None
+ if tr and tr.running():
+ return tr.nest()
+
+ # abort here if the journal already exists
+ if os.path.exists(self.sjoin("journal")):
+ raise error.RepoError(
+ _("abandoned transaction found - run hg recover"))
+
+ journalfiles = self._writejournal(desc)
+ renames = [(x, undoname(x)) for x in journalfiles]
+
+ tr = transaction.transaction(self.ui.warn, self.sopener,
+ self.sjoin("journal"),
+ aftertrans(renames),
+ self.store.createmode)
+ self._transref = weakref.ref(tr)
+ return tr
+
+ def _writejournal(self, desc):
+ # save dirstate for rollback
+ try:
+ ds = self.opener.read("dirstate")
+ except IOError:
+ ds = ""
+ self.opener.write("journal.dirstate", ds)
+ self.opener.write("journal.branch",
+ encoding.fromlocal(self.dirstate.branch()))
+ self.opener.write("journal.desc",
+ "%d\n%s\n" % (len(self), desc))
+
+ bkname = self.join('bookmarks')
+ if os.path.exists(bkname):
+ util.copyfile(bkname, self.join('journal.bookmarks'))
+ else:
+ self.opener.write('journal.bookmarks', '')
+
+ return (self.sjoin('journal'), self.join('journal.dirstate'),
+ self.join('journal.branch'), self.join('journal.desc'),
+ self.join('journal.bookmarks'))
+
+ def recover(self):
+ lock = self.lock()
+ try:
+ if os.path.exists(self.sjoin("journal")):
+ self.ui.status(_("rolling back interrupted transaction\n"))
+ transaction.rollback(self.sopener, self.sjoin("journal"),
+ self.ui.warn)
+ self.invalidate()
+ return True
+ else:
+ self.ui.warn(_("no interrupted transaction available\n"))
+ return False
+ finally:
+ lock.release()
+
+ def rollback(self, dryrun=False, force=False):
+ wlock = lock = None
+ try:
+ wlock = self.wlock()
+ lock = self.lock()
+ if os.path.exists(self.sjoin("undo")):
+ return self._rollback(dryrun, force)
+ else:
+ self.ui.warn(_("no rollback information available\n"))
+ return 1
+ finally:
+ release(lock, wlock)
+
+ def _rollback(self, dryrun, force):
+ ui = self.ui
+ try:
+ args = self.opener.read('undo.desc').splitlines()
+ (oldlen, desc, detail) = (int(args[0]), args[1], None)
+ if len(args) >= 3:
+ detail = args[2]
+ oldtip = oldlen - 1
+
+ if detail and ui.verbose:
+ msg = (_('repository tip rolled back to revision %s'
+ ' (undo %s: %s)\n')
+ % (oldtip, desc, detail))
+ else:
+ msg = (_('repository tip rolled back to revision %s'
+ ' (undo %s)\n')
+ % (oldtip, desc))
+ except IOError:
+ msg = _('rolling back unknown transaction\n')
+ desc = None
+
+ if not force and self['.'] != self['tip'] and desc == 'commit':
+ raise util.Abort(
+ _('rollback of last commit while not checked out '
+ 'may lose data'), hint=_('use -f to force'))
+
+ ui.status(msg)
+ if dryrun:
+ return 0
+
+ parents = self.dirstate.parents()
+ transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
+ if os.path.exists(self.join('undo.bookmarks')):
+ util.rename(self.join('undo.bookmarks'),
+ self.join('bookmarks'))
+ self.invalidate()
+
+ parentgone = (parents[0] not in self.changelog.nodemap or
+ parents[1] not in self.changelog.nodemap)
+ if parentgone:
+ util.rename(self.join('undo.dirstate'), self.join('dirstate'))
+ try:
+ branch = self.opener.read('undo.branch')
+ self.dirstate.setbranch(branch)
+ except IOError:
+ ui.warn(_('named branch could not be reset: '
+ 'current branch is still \'%s\'\n')
+ % self.dirstate.branch())
+
+ self.dirstate.invalidate()
+ self.destroyed()
+ parents = tuple([p.rev() for p in self.parents()])
+ if len(parents) > 1:
+ ui.status(_('working directory now based on '
+ 'revisions %d and %d\n') % parents)
+ else:
+ ui.status(_('working directory now based on '
+ 'revision %d\n') % parents)
+ return 0
+
+ def invalidatecaches(self):
+ try:
+ delattr(self, '_tagscache')
+ except AttributeError:
+ pass
+
+ self._branchcache = None # in UTF-8
+ self._branchcachetip = None
+
+ def invalidatedirstate(self):
+ '''Invalidates the dirstate, causing the next call to dirstate
+ to check if it was modified since the last time it was read,
+ rereading it if it has.
+
+ This is different to dirstate.invalidate() that it doesn't always
+ rereads the dirstate. Use dirstate.invalidate() if you want to
+ explicitly read the dirstate again (i.e. restoring it to a previous
+ known good state).'''
+ try:
+ delattr(self, 'dirstate')
+ except AttributeError:
+ pass
+
+ def invalidate(self):
+ for k in self._filecache:
+ # dirstate is invalidated separately in invalidatedirstate()
+ if k == 'dirstate':
+ continue
+
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
+ self.invalidatecaches()
+
+ def _lock(self, lockname, wait, releasefn, acquirefn, desc):
+ try:
+ l = lock.lock(lockname, 0, releasefn, desc=desc)
+ except error.LockHeld, inst:
+ if not wait:
+ raise
+ self.ui.warn(_("waiting for lock on %s held by %r\n") %
+ (desc, inst.locker))
+ # default to 600 seconds timeout
+ l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
+ releasefn, desc=desc)
+ if acquirefn:
+ acquirefn()
+ return l
+
+ def lock(self, wait=True):
+ '''Lock the repository store (.hg/store) and return a weak reference
+ to the lock. Use this before modifying the store (e.g. committing or
+ stripping). If you are opening a transaction, get a lock as well.)'''
+ l = self._lockref and self._lockref()
+ if l is not None and l.held:
+ l.lock()
+ return l
+
+ def unlock():
+ self.store.write()
+ for k, ce in self._filecache.items():
+ if k == 'dirstate':
+ continue
+ ce.refresh()
+
+ l = self._lock(self.sjoin("lock"), wait, unlock,
+ self.invalidate, _('repository %s') % self.origroot)
+ self._lockref = weakref.ref(l)
+ return l
+
+ def wlock(self, wait=True):
+ '''Lock the non-store parts of the repository (everything under
+ .hg except .hg/store) and return a weak reference to the lock.
+ Use this before modifying files in .hg.'''
+ l = self._wlockref and self._wlockref()
+ if l is not None and l.held:
+ l.lock()
+ return l
+
+ def unlock():
+ self.dirstate.write()
+ ce = self._filecache.get('dirstate')
+ if ce:
+ ce.refresh()
+
+ l = self._lock(self.join("wlock"), wait, unlock,
+ self.invalidatedirstate, _('working directory of %s') %
+ self.origroot)
+ self._wlockref = weakref.ref(l)
+ return l
+
+ def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
+ """
+ commit an individual file as part of a larger transaction
+ """
+
+ fname = fctx.path()
+ text = fctx.data()
+ flog = self.file(fname)
+ fparent1 = manifest1.get(fname, nullid)
+ fparent2 = fparent2o = manifest2.get(fname, nullid)
+
+ meta = {}
+ copy = fctx.renamed()
+ if copy and copy[0] != fname:
+ # Mark the new revision of this file as a copy of another
+ # file. This copy data will effectively act as a parent
+ # of this new revision. If this is a merge, the first
+ # parent will be the nullid (meaning "look up the copy data")
+ # and the second one will be the other parent. For example:
+ #
+ # 0 --- 1 --- 3 rev1 changes file foo
+ # \ / rev2 renames foo to bar and changes it
+ # \- 2 -/ rev3 should have bar with all changes and
+ # should record that bar descends from
+ # bar in rev2 and foo in rev1
+ #
+ # this allows this merge to succeed:
+ #
+ # 0 --- 1 --- 3 rev4 reverts the content change from rev2
+ # \ / merging rev3 and rev4 should use bar@rev2
+ # \- 2 --- 4 as the merge base
+ #
+
+ cfname = copy[0]
+ crev = manifest1.get(cfname)
+ newfparent = fparent2
+
+ if manifest2: # branch merge
+ if fparent2 == nullid or crev is None: # copied on remote side
+ if cfname in manifest2:
+ crev = manifest2[cfname]
+ newfparent = fparent1
+
+ # find source in nearest ancestor if we've lost track
+ if not crev:
+ self.ui.debug(" %s: searching for copy revision for %s\n" %
+ (fname, cfname))
+ for ancestor in self[None].ancestors():
+ if cfname in ancestor:
+ crev = ancestor[cfname].filenode()
+ break
+
+ if crev:
+ self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
+ meta["copy"] = cfname
+ meta["copyrev"] = hex(crev)
+ fparent1, fparent2 = nullid, newfparent
+ else:
+ self.ui.warn(_("warning: can't find ancestor for '%s' "
+ "copied from '%s'!\n") % (fname, cfname))
+
+ elif fparent2 != nullid:
+ # is one parent an ancestor of the other?
+ fparentancestor = flog.ancestor(fparent1, fparent2)
+ if fparentancestor == fparent1:
+ fparent1, fparent2 = fparent2, nullid
+ elif fparentancestor == fparent2:
+ fparent2 = nullid
+
+ # is the file changed?
+ if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
+ changelist.append(fname)
+ return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
+
+ # are just the flags changed during merge?
+ if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
+ changelist.append(fname)
+
+ return fparent1
+
+ def commit(self, text="", user=None, date=None, match=None, force=False,
+ editor=False, extra={}):
+ """Add a new revision to current repository.
+
+ Revision information is gathered from the working directory,
+ match can be used to filter the committed files. If editor is
+ supplied, it is called to get a commit message.
+ """
+
+ def fail(f, msg):
+ raise util.Abort('%s: %s' % (f, msg))
+
+ if not match:
+ match = matchmod.always(self.root, '')
+
+ if not force:
+ vdirs = []
+ match.dir = vdirs.append
+ match.bad = fail
+
+ wlock = self.wlock()
+ try:
+ wctx = self[None]
+ merge = len(wctx.parents()) > 1
+
+ if (not force and merge and match and
+ (match.files() or match.anypats())):
+ raise util.Abort(_('cannot partially commit a merge '
+ '(do not specify files or patterns)'))
+
+ changes = self.status(match=match, clean=force)
+ if force:
+ changes[0].extend(changes[6]) # mq may commit unchanged files
+
+ # check subrepos
+ subs = []
+ removedsubs = set()
+ if '.hgsub' in wctx:
+ # only manage subrepos and .hgsubstate if .hgsub is present
+ for p in wctx.parents():
+ removedsubs.update(s for s in p.substate if match(s))
+ for s in wctx.substate:
+ removedsubs.discard(s)
+ if match(s) and wctx.sub(s).dirty():
+ subs.append(s)
+ if (subs or removedsubs):
+ if (not match('.hgsub') and
+ '.hgsub' in (wctx.modified() + wctx.added())):
+ raise util.Abort(
+ _("can't commit subrepos without .hgsub"))
+ if '.hgsubstate' not in changes[0]:
+ changes[0].insert(0, '.hgsubstate')
+ if '.hgsubstate' in changes[2]:
+ changes[2].remove('.hgsubstate')
+ elif '.hgsub' in changes[2]:
+ # clean up .hgsubstate when .hgsub is removed
+ if ('.hgsubstate' in wctx and
+ '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
+ changes[2].insert(0, '.hgsubstate')
+
+ if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
+ changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
+ if changedsubs:
+ raise util.Abort(_("uncommitted changes in subrepo %s")
+ % changedsubs[0],
+ hint=_("use --subrepos for recursive commit"))
+
+ # make sure all explicit patterns are matched
+ if not force and match.files():
+ matched = set(changes[0] + changes[1] + changes[2])
+
+ for f in match.files():
+ if f == '.' or f in matched or f in wctx.substate:
+ continue
+ if f in changes[3]: # missing
+ fail(f, _('file not found!'))
+ if f in vdirs: # visited directory
+ d = f + '/'
+ for mf in matched:
+ if mf.startswith(d):
+ break
+ else:
+ fail(f, _("no match under directory!"))
+ elif f not in self.dirstate:
+ fail(f, _("file not tracked!"))
+
+ if (not force and not extra.get("close") and not merge
+ and not (changes[0] or changes[1] or changes[2])
+ and wctx.branch() == wctx.p1().branch()):
+ return None
+
+ ms = mergemod.mergestate(self)
+ for f in changes[0]:
+ if f in ms and ms[f] == 'u':
+ raise util.Abort(_("unresolved merge conflicts "
+ "(see hg help resolve)"))
+
+ cctx = context.workingctx(self, text, user, date, extra, changes)
+ if editor:
+ cctx._text = editor(self, cctx, subs)
+ edited = (text != cctx._text)
+
+ # commit subs
+ if subs or removedsubs:
+ state = wctx.substate.copy()
+ for s in sorted(subs):
+ sub = wctx.sub(s)
+ self.ui.status(_('committing subrepository %s\n') %
+ subrepo.subrelpath(sub))
+ sr = sub.commit(cctx._text, user, date)
+ state[s] = (state[s][0], sr)
+ subrepo.writestate(self, state)
+
+ # Save commit message in case this transaction gets rolled back
+ # (e.g. by a pretxncommit hook). Leave the content alone on
+ # the assumption that the user will use the same editor again.
+ msgfn = self.savecommitmessage(cctx._text)
+
+ p1, p2 = self.dirstate.parents()
+ hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
+ try:
+ self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
+ ret = self.commitctx(cctx, True)
+ except:
+ if edited:
+ self.ui.write(
+ _('note: commit message saved in %s\n') % msgfn)
+ raise
+
+ # update bookmarks, dirstate and mergestate
+ bookmarks.update(self, p1, ret)
+ for f in changes[0] + changes[1]:
+ self.dirstate.normal(f)
+ for f in changes[2]:
+ self.dirstate.drop(f)
+ self.dirstate.setparents(ret)
+ ms.reset()
+ finally:
+ wlock.release()
+
+ self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
+ return ret
+
+ def commitctx(self, ctx, error=False):
+ """Add a new revision to current repository.
+ Revision information is passed via the context argument.
+ """
+
+ tr = lock = None
+ removed = list(ctx.removed())
+ p1, p2 = ctx.p1(), ctx.p2()
+ user = ctx.user()
+
+ lock = self.lock()
+ try:
+ tr = self.transaction("commit")
+ trp = weakref.proxy(tr)
+
+ if ctx.files():
+ m1 = p1.manifest().copy()
+ m2 = p2.manifest()
+
+ # check in files
+ new = {}
+ changed = []
+ linkrev = len(self)
+ for f in sorted(ctx.modified() + ctx.added()):
+ self.ui.note(f + "\n")
+ try:
+ fctx = ctx[f]
+ new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
+ changed)
+ m1.set(f, fctx.flags())
+ except OSError, inst:
+ self.ui.warn(_("trouble committing %s!\n") % f)
+ raise
+ except IOError, inst:
+ errcode = getattr(inst, 'errno', errno.ENOENT)
+ if error or errcode and errcode != errno.ENOENT:
+ self.ui.warn(_("trouble committing %s!\n") % f)
+ raise
+ else:
+ removed.append(f)
+
+ # update manifest
+ m1.update(new)
+ removed = [f for f in sorted(removed) if f in m1 or f in m2]
+ drop = [f for f in removed if f in m1]
+ for f in drop:
+ del m1[f]
+ mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
+ p2.manifestnode(), (new, drop))
+ files = changed + removed
+ else:
+ mn = p1.manifestnode()
+ files = []
+
+ # update changelog
+ self.changelog.delayupdate()
+ n = self.changelog.add(mn, files, ctx.description(),
+ trp, p1.node(), p2.node(),
+ user, ctx.date(), ctx.extra().copy())
+ p = lambda: self.changelog.writepending() and self.root or ""
+ xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
+ self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
+ parent2=xp2, pending=p)
+ self.changelog.finalize(trp)
+ tr.close()
+
+ if self._branchcache:
+ self.updatebranchcache()
+ return n
+ finally:
+ if tr:
+ tr.release()
+ lock.release()
+
+ def destroyed(self):
+ '''Inform the repository that nodes have been destroyed.
+ Intended for use by strip and rollback, so there's a common
+ place for anything that has to be done after destroying history.'''
+ # XXX it might be nice if we could take the list of destroyed
+ # nodes, but I don't see an easy way for rollback() to do that
+
+ # Ensure the persistent tag cache is updated. Doing it now
+ # means that the tag cache only has to worry about destroyed
+ # heads immediately after a strip/rollback. That in turn
+ # guarantees that "cachetip == currenttip" (comparing both rev
+ # and node) always means no nodes have been added or destroyed.
+
+ # XXX this is suboptimal when qrefresh'ing: we strip the current
+ # head, refresh the tag cache, then immediately add a new head.
+ # But I think doing it this way is necessary for the "instant
+ # tag cache retrieval" case to work.
+ self.invalidatecaches()
+
+ def walk(self, match, node=None):
+ '''
+ walk recursively through the directory tree or a given
+ changeset, finding all files matched by the match
+ function
+ '''
+ return self[node].walk(match)
+
+ def status(self, node1='.', node2=None, match=None,
+ ignored=False, clean=False, unknown=False,
+ listsubrepos=False):
+ """return status of files between two nodes or node and working directory
+
+ If node1 is None, use the first dirstate parent instead.
+ If node2 is None, compare node1 with working directory.
+ """
+
+ def mfmatches(ctx):
+ mf = ctx.manifest().copy()
+ for fn in mf.keys():
+ if not match(fn):
+ del mf[fn]
+ return mf
+
+ if isinstance(node1, context.changectx):
+ ctx1 = node1
+ else:
+ ctx1 = self[node1]
+ if isinstance(node2, context.changectx):
+ ctx2 = node2
+ else:
+ ctx2 = self[node2]
+
+ working = ctx2.rev() is None
+ parentworking = working and ctx1 == self['.']
+ match = match or matchmod.always(self.root, self.getcwd())
+ listignored, listclean, listunknown = ignored, clean, unknown
+
+ # load earliest manifest first for caching reasons
+ if not working and ctx2.rev() < ctx1.rev():
+ ctx2.manifest()
+
+ if not parentworking:
+ def bad(f, msg):
+ if f not in ctx1:
+ self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
+ match.bad = bad
+
+ if working: # we need to scan the working dir
+ subrepos = []
+ if '.hgsub' in self.dirstate:
+ subrepos = ctx2.substate.keys()
+ s = self.dirstate.status(match, subrepos, listignored,
+ listclean, listunknown)
+ cmp, modified, added, removed, deleted, unknown, ignored, clean = s
+
+ # check for any possibly clean files
+ if parentworking and cmp:
+ fixup = []
+ # do a full compare of any files that might have changed
+ for f in sorted(cmp):
+ if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
+ or ctx1[f].cmp(ctx2[f])):
+ modified.append(f)
+ else:
+ fixup.append(f)
+
+ # update dirstate for files that are actually clean
+ if fixup:
+ if listclean:
+ clean += fixup
+
+ try:
+ # updating the dirstate is optional
+ # so we don't wait on the lock
+ wlock = self.wlock(False)
+ try:
+ for f in fixup:
+ self.dirstate.normal(f)
+ finally:
+ wlock.release()
+ except error.LockError:
+ pass
+
+ if not parentworking:
+ mf1 = mfmatches(ctx1)
+ if working:
+ # we are comparing working dir against non-parent
+ # generate a pseudo-manifest for the working dir
+ mf2 = mfmatches(self['.'])
+ for f in cmp + modified + added:
+ mf2[f] = None
+ mf2.set(f, ctx2.flags(f))
+ for f in removed:
+ if f in mf2:
+ del mf2[f]
+ else:
+ # we are comparing two revisions
+ deleted, unknown, ignored = [], [], []
+ mf2 = mfmatches(ctx2)
+
+ modified, added, clean = [], [], []
+ for fn in mf2:
+ if fn in mf1:
+ if (fn not in deleted and
+ (mf1.flags(fn) != mf2.flags(fn) or
+ (mf1[fn] != mf2[fn] and
+ (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
+ modified.append(fn)
+ elif listclean:
+ clean.append(fn)
+ del mf1[fn]
+ elif fn not in deleted:
+ added.append(fn)
+ removed = mf1.keys()
+
+ if working and modified and not self.dirstate._checklink:
+ # Symlink placeholders may get non-symlink-like contents
+ # via user error or dereferencing by NFS or Samba servers,
+ # so we filter out any placeholders that don't look like a
+ # symlink
+ sane = []
+ for f in modified:
+ if ctx2.flags(f) == 'l':
+ d = ctx2[f].data()
+ if len(d) >= 1024 or '\n' in d or util.binary(d):
+ self.ui.debug('ignoring suspect symlink placeholder'
+ ' "%s"\n' % f)
+ continue
+ sane.append(f)
+ modified = sane
+
+ r = modified, added, removed, deleted, unknown, ignored, clean
+
+ if listsubrepos:
+ for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
+ if working:
+ rev2 = None
+ else:
+ rev2 = ctx2.substate[subpath][1]
+ try:
+ submatch = matchmod.narrowmatcher(subpath, match)
+ s = sub.status(rev2, match=submatch, ignored=listignored,
+ clean=listclean, unknown=listunknown,
+ listsubrepos=True)
+ for rfiles, sfiles in zip(r, s):
+ rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
+ except error.LookupError:
+ self.ui.status(_("skipping missing subrepository: %s\n")
+ % subpath)
+
+ for l in r:
+ l.sort()
+ return r
+
+ def heads(self, start=None):
+ heads = self.changelog.heads(start)
+ # sort the output in rev descending order
+ return sorted(heads, key=self.changelog.rev, reverse=True)
+
+ def branchheads(self, branch=None, start=None, closed=False):
+ '''return a (possibly filtered) list of heads for the given branch
+
+ Heads are returned in topological order, from newest to oldest.
+ If branch is None, use the dirstate branch.
+ If start is not None, return only heads reachable from start.
+ If closed is True, return heads that are marked as closed as well.
+ '''
+ if branch is None:
+ branch = self[None].branch()
+ branches = self.branchmap()
+ if branch not in branches:
+ return []
+ # the cache returns heads ordered lowest to highest
+ bheads = list(reversed(branches[branch]))
+ if start is not None:
+ # filter out the heads that cannot be reached from startrev
+ fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
+ bheads = [h for h in bheads if h in fbheads]
+ if not closed:
+ bheads = [h for h in bheads if
+ ('close' not in self.changelog.read(h)[5])]
+ return bheads
+
+ def branches(self, nodes):
+ if not nodes:
+ nodes = [self.changelog.tip()]
+ b = []
+ for n in nodes:
+ t = n
+ while True:
+ p = self.changelog.parents(n)
+ if p[1] != nullid or p[0] == nullid:
+ b.append((t, n, p[0], p[1]))
+ break
+ n = p[0]
+ return b
+
+ def between(self, pairs):
+ r = []
+
+ for top, bottom in pairs:
+ n, l, i = top, [], 0
+ f = 1
+
+ while n != bottom and n != nullid:
+ p = self.changelog.parents(n)[0]
+ if i == f:
+ l.append(n)
+ f = f * 2
+ n = p
+ i += 1
+
+ r.append(l)
+
+ return r
+
+ def pull(self, remote, heads=None, force=False):
+ lock = self.lock()
+ try:
+ tmp = discovery.findcommonincoming(self, remote, heads=heads,
+ force=force)
+ common, fetch, rheads = tmp
+ if not fetch:
+ self.ui.status(_("no changes found\n"))
+ result = 0
+ else:
+ if heads is None and list(common) == [nullid]:
+ self.ui.status(_("requesting all changes\n"))
+ elif heads is None and remote.capable('changegroupsubset'):
+ # issue1320, avoid a race if remote changed after discovery
+ heads = rheads
+
+ if remote.capable('getbundle'):
+ cg = remote.getbundle('pull', common=common,
+ heads=heads or rheads)
+ elif heads is None:
+ cg = remote.changegroup(fetch, 'pull')
+ elif not remote.capable('changegroupsubset'):
+ raise util.Abort(_("partial pull cannot be done because "
+ "other repository doesn't support "
+ "changegroupsubset."))
+ else:
+ cg = remote.changegroupsubset(fetch, heads, 'pull')
+ result = self.addchangegroup(cg, 'pull', remote.url(),
+ lock=lock)
+ finally:
+ lock.release()
+
+ return result
+
+ def checkpush(self, force, revs):
+ """Extensions can override this function if additional checks have
+ to be performed before pushing, or call it if they override push
+ command.
+ """
+ pass
+
+ def push(self, remote, force=False, revs=None, newbranch=False):
+ '''Push outgoing changesets (limited by revs) from the current
+ repository to remote. Return an integer:
+ - 0 means HTTP error *or* nothing to push
+ - 1 means we pushed and remote head count is unchanged *or*
+ we have outgoing changesets but refused to push
+ - other values as described by addchangegroup()
+ '''
+ # there are two ways to push to remote repo:
+ #
+ # addchangegroup assumes local user can lock remote
+ # repo (local filesystem, old ssh servers).
+ #
+ # unbundle assumes local user cannot lock remote repo (new ssh
+ # servers, http servers).
+
+ self.checkpush(force, revs)
+ lock = None
+ unbundle = remote.capable('unbundle')
+ if not unbundle:
+ lock = remote.lock()
+ try:
+ cg, remote_heads = discovery.prepush(self, remote, force, revs,
+ newbranch)
+ ret = remote_heads
+ if cg is not None:
+ if unbundle:
+ # local repo finds heads on server, finds out what
+ # revs it must push. once revs transferred, if server
+ # finds it has different heads (someone else won
+ # commit/push race), server aborts.
+ if force:
+ remote_heads = ['force']
+ # ssh: return remote's addchangegroup()
+ # http: return remote's addchangegroup() or 0 for error
+ ret = remote.unbundle(cg, remote_heads, 'push')
+ else:
+ # we return an integer indicating remote head count change
+ ret = remote.addchangegroup(cg, 'push', self.url(),
+ lock=lock)
+ finally:
+ if lock is not None:
+ lock.release()
+
+ self.ui.debug("checking for updated bookmarks\n")
+ rb = remote.listkeys('bookmarks')
+ for k in rb.keys():
+ if k in self._bookmarks:
+ nr, nl = rb[k], hex(self._bookmarks[k])
+ if nr in self:
+ cr = self[nr]
+ cl = self[nl]
+ if cl in cr.descendants():
+ r = remote.pushkey('bookmarks', k, nr, nl)
+ if r:
+ self.ui.status(_("updating bookmark %s\n") % k)
+ else:
+ self.ui.warn(_('updating bookmark %s'
+ ' failed!\n') % k)
+
+ return ret
+
+ def changegroupinfo(self, nodes, source):
+ if self.ui.verbose or source == 'bundle':
+ self.ui.status(_("%d changesets found\n") % len(nodes))
+ if self.ui.debugflag:
+ self.ui.debug("list of changesets:\n")
+ for node in nodes:
+ self.ui.debug("%s\n" % hex(node))
+
+ def changegroupsubset(self, bases, heads, source):
+ """Compute a changegroup consisting of all the nodes that are
+ descendants of any of the bases and ancestors of any of the heads.
+ Return a chunkbuffer object whose read() method will return
+ successive changegroup chunks.
+
+ It is fairly complex as determining which filenodes and which
+ manifest nodes need to be included for the changeset to be complete
+ is non-trivial.
+
+ Another wrinkle is doing the reverse, figuring out which changeset in
+ the changegroup a particular filenode or manifestnode belongs to.
+ """
+ cl = self.changelog
+ if not bases:
+ bases = [nullid]
+ csets, bases, heads = cl.nodesbetween(bases, heads)
+ # We assume that all ancestors of bases are known
+ common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
+ return self._changegroupsubset(common, csets, heads, source)
+
+ def getbundle(self, source, heads=None, common=None):
+ """Like changegroupsubset, but returns the set difference between the
+ ancestors of heads and the ancestors common.
+
+ If heads is None, use the local heads. If common is None, use [nullid].
+
+ The nodes in common might not all be known locally due to the way the
+ current discovery protocol works.
+ """
+ cl = self.changelog
+ if common:
+ nm = cl.nodemap
+ common = [n for n in common if n in nm]
+ else:
+ common = [nullid]
+ if not heads:
+ heads = cl.heads()
+ common, missing = cl.findcommonmissing(common, heads)
+ if not missing:
+ return None
+ return self._changegroupsubset(common, missing, heads, source)
+
+ def _changegroupsubset(self, commonrevs, csets, heads, source):
+
+ cl = self.changelog
+ mf = self.manifest
+ mfs = {} # needed manifests
+ fnodes = {} # needed file nodes
+ changedfiles = set()
+ fstate = ['', {}]
+ count = [0]
+
+ # can we go through the fast path ?
+ heads.sort()
+ if heads == sorted(self.heads()):
+ return self._changegroup(csets, source)
+
+ # slow path
+ self.hook('preoutgoing', throw=True, source=source)
+ self.changegroupinfo(csets, source)
+
+ # filter any nodes that claim to be part of the known set
+ def prune(revlog, missing):
+ return [n for n in missing
+ if revlog.linkrev(revlog.rev(n)) not in commonrevs]
+
+ def lookup(revlog, x):
+ if revlog == cl:
+ c = cl.read(x)
+ changedfiles.update(c[3])
+ mfs.setdefault(c[0], x)
+ count[0] += 1
+ self.ui.progress(_('bundling'), count[0],
+ unit=_('changesets'), total=len(csets))
+ return x
+ elif revlog == mf:
+ clnode = mfs[x]
+ mdata = mf.readfast(x)
+ for f in changedfiles:
+ if f in mdata:
+ fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
+ count[0] += 1
+ self.ui.progress(_('bundling'), count[0],
+ unit=_('manifests'), total=len(mfs))
+ return mfs[x]
+ else:
+ self.ui.progress(
+ _('bundling'), count[0], item=fstate[0],
+ unit=_('files'), total=len(changedfiles))
+ return fstate[1][x]
+
+ bundler = changegroup.bundle10(lookup)
+ reorder = self.ui.config('bundle', 'reorder', 'auto')
+ if reorder == 'auto':
+ reorder = None
+ else:
+ reorder = util.parsebool(reorder)
+
+ def gengroup():
+ # Create a changenode group generator that will call our functions
+ # back to lookup the owning changenode and collect information.
+ for chunk in cl.group(csets, bundler, reorder=reorder):
+ yield chunk
+ self.ui.progress(_('bundling'), None)
+
+ # Create a generator for the manifestnodes that calls our lookup
+ # and data collection functions back.
+ count[0] = 0
+ for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
+ yield chunk
+ self.ui.progress(_('bundling'), None)
+
+ mfs.clear()
+
+ # Go through all our files in order sorted by name.
+ count[0] = 0
+ for fname in sorted(changedfiles):
+ filerevlog = self.file(fname)
+ if not len(filerevlog):
+ raise util.Abort(_("empty or missing revlog for %s") % fname)
+ fstate[0] = fname
+ fstate[1] = fnodes.pop(fname, {})
+
+ nodelist = prune(filerevlog, fstate[1])
+ if nodelist:
+ count[0] += 1
+ yield bundler.fileheader(fname)
+ for chunk in filerevlog.group(nodelist, bundler, reorder):
+ yield chunk
+
+ # Signal that no more groups are left.
+ yield bundler.close()
+ self.ui.progress(_('bundling'), None)
+
+ if csets:
+ self.hook('outgoing', node=hex(csets[0]), source=source)
+
+ return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
+
+ def changegroup(self, basenodes, source):
+ # to avoid a race we use changegroupsubset() (issue1320)
+ return self.changegroupsubset(basenodes, self.heads(), source)
+
+ def _changegroup(self, nodes, source):
+ """Compute the changegroup of all nodes that we have that a recipient
+ doesn't. Return a chunkbuffer object whose read() method will return
+ successive changegroup chunks.
+
+ This is much easier than the previous function as we can assume that
+ the recipient has any changenode we aren't sending them.
+
+ nodes is the set of nodes to send"""
+
+ cl = self.changelog
+ mf = self.manifest
+ mfs = {}
+ changedfiles = set()
+ fstate = ['']
+ count = [0]
+
+ self.hook('preoutgoing', throw=True, source=source)
+ self.changegroupinfo(nodes, source)
+
+ revset = set([cl.rev(n) for n in nodes])
+
+ def gennodelst(log):
+ return [log.node(r) for r in log if log.linkrev(r) in revset]
+
+ def lookup(revlog, x):
+ if revlog == cl:
+ c = cl.read(x)
+ changedfiles.update(c[3])
+ mfs.setdefault(c[0], x)
+ count[0] += 1
+ self.ui.progress(_('bundling'), count[0],
+ unit=_('changesets'), total=len(nodes))
+ return x
+ elif revlog == mf:
+ count[0] += 1
+ self.ui.progress(_('bundling'), count[0],
+ unit=_('manifests'), total=len(mfs))
+ return cl.node(revlog.linkrev(revlog.rev(x)))
+ else:
+ self.ui.progress(
+ _('bundling'), count[0], item=fstate[0],
+ total=len(changedfiles), unit=_('files'))
+ return cl.node(revlog.linkrev(revlog.rev(x)))
+
+ bundler = changegroup.bundle10(lookup)
+ reorder = self.ui.config('bundle', 'reorder', 'auto')
+ if reorder == 'auto':
+ reorder = None
+ else:
+ reorder = util.parsebool(reorder)
+
+ def gengroup():
+ '''yield a sequence of changegroup chunks (strings)'''
+ # construct a list of all changed files
+
+ for chunk in cl.group(nodes, bundler, reorder=reorder):
+ yield chunk
+ self.ui.progress(_('bundling'), None)
+
+ count[0] = 0
+ for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
+ yield chunk
+ self.ui.progress(_('bundling'), None)
+
+ count[0] = 0
+ for fname in sorted(changedfiles):
+ filerevlog = self.file(fname)
+ if not len(filerevlog):
+ raise util.Abort(_("empty or missing revlog for %s") % fname)
+ fstate[0] = fname
+ nodelist = gennodelst(filerevlog)
+ if nodelist:
+ count[0] += 1
+ yield bundler.fileheader(fname)
+ for chunk in filerevlog.group(nodelist, bundler, reorder):
+ yield chunk
+ yield bundler.close()
+ self.ui.progress(_('bundling'), None)
+
+ if nodes:
+ self.hook('outgoing', node=hex(nodes[0]), source=source)
+
+ return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
+
+ def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
+ """Add the changegroup returned by source.read() to this repo.
+ srctype is a string like 'push', 'pull', or 'unbundle'. url is
+ the URL of the repo where this changegroup is coming from.
+ If lock is not None, the function takes ownership of the lock
+ and releases it after the changegroup is added.
+
+ Return an integer summarizing the change to this repo:
+ - nothing changed or no source: 0
+ - more heads than before: 1+added heads (2..n)
+ - fewer heads than before: -1-removed heads (-2..-n)
+ - number of heads stays the same: 1
+ """
+ def csmap(x):
+ self.ui.debug("add changeset %s\n" % short(x))
+ return len(cl)
+
+ def revmap(x):
+ return cl.rev(x)
+
+ if not source:
+ return 0
+
+ self.hook('prechangegroup', throw=True, source=srctype, url=url)
+
+ changesets = files = revisions = 0
+ efiles = set()
+
+ # write changelog data to temp files so concurrent readers will not see
+ # inconsistent view
+ cl = self.changelog
+ cl.delayupdate()
+ oldheads = cl.heads()
+
+ tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
+ try:
+ trp = weakref.proxy(tr)
+ # pull off the changeset group
+ self.ui.status(_("adding changesets\n"))
+ clstart = len(cl)
+ class prog(object):
+ step = _('changesets')
+ count = 1
+ ui = self.ui
+ total = None
+ def __call__(self):
+ self.ui.progress(self.step, self.count, unit=_('chunks'),
+ total=self.total)
+ self.count += 1
+ pr = prog()
+ source.callback = pr
+
+ source.changelogheader()
+ if (cl.addgroup(source, csmap, trp) is None
+ and not emptyok):
+ raise util.Abort(_("received changelog group is empty"))
+ clend = len(cl)
+ changesets = clend - clstart
+ for c in xrange(clstart, clend):
+ efiles.update(self[c].files())
+ efiles = len(efiles)
+ self.ui.progress(_('changesets'), None)
+
+ # pull off the manifest group
+ self.ui.status(_("adding manifests\n"))
+ pr.step = _('manifests')
+ pr.count = 1
+ pr.total = changesets # manifests <= changesets
+ # no need to check for empty manifest group here:
+ # if the result of the merge of 1 and 2 is the same in 3 and 4,
+ # no new manifest will be created and the manifest group will
+ # be empty during the pull
+ source.manifestheader()
+ self.manifest.addgroup(source, revmap, trp)
+ self.ui.progress(_('manifests'), None)
+
+ needfiles = {}
+ if self.ui.configbool('server', 'validate', default=False):
+ # validate incoming csets have their manifests
+ for cset in xrange(clstart, clend):
+ mfest = self.changelog.read(self.changelog.node(cset))[0]
+ mfest = self.manifest.readdelta(mfest)
+ # store file nodes we must see
+ for f, n in mfest.iteritems():
+ needfiles.setdefault(f, set()).add(n)
+
+ # process the files
+ self.ui.status(_("adding file changes\n"))
+ pr.step = _('files')
+ pr.count = 1
+ pr.total = efiles
+ source.callback = None
+
+ while True:
+ chunkdata = source.filelogheader()
+ if not chunkdata:
+ break
+ f = chunkdata["filename"]
+ self.ui.debug("adding %s revisions\n" % f)
+ pr()
+ fl = self.file(f)
+ o = len(fl)
+ if fl.addgroup(source, revmap, trp) is None:
+ raise util.Abort(_("received file revlog group is empty"))
+ revisions += len(fl) - o
+ files += 1
+ if f in needfiles:
+ needs = needfiles[f]
+ for new in xrange(o, len(fl)):
+ n = fl.node(new)
+ if n in needs:
+ needs.remove(n)
+ if not needs:
+ del needfiles[f]
+ self.ui.progress(_('files'), None)
+
+ for f, needs in needfiles.iteritems():
+ fl = self.file(f)
+ for n in needs:
+ try:
+ fl.rev(n)
+ except error.LookupError:
+ raise util.Abort(
+ _('missing file data for %s:%s - run hg verify') %
+ (f, hex(n)))
+
+ dh = 0
+ if oldheads:
+ heads = cl.heads()
+ dh = len(heads) - len(oldheads)
+ for h in heads:
+ if h not in oldheads and 'close' in self[h].extra():
+ dh -= 1
+ htext = ""
+ if dh:
+ htext = _(" (%+d heads)") % dh
+
+ self.ui.status(_("added %d changesets"
+ " with %d changes to %d files%s\n")
+ % (changesets, revisions, files, htext))
+
+ if changesets > 0:
+ p = lambda: cl.writepending() and self.root or ""
+ self.hook('pretxnchangegroup', throw=True,
+ node=hex(cl.node(clstart)), source=srctype,
+ url=url, pending=p)
+
+ # make changelog see real files again
+ cl.finalize(trp)
+
+ tr.close()
+ finally:
+ tr.release()
+ if lock:
+ lock.release()
+
+ if changesets > 0:
+ # forcefully update the on-disk branch cache
+ self.ui.debug("updating the branch cache\n")
+ self.updatebranchcache()
+ self.hook("changegroup", node=hex(cl.node(clstart)),
+ source=srctype, url=url)
+
+ for i in xrange(clstart, clend):
+ self.hook("incoming", node=hex(cl.node(i)),
+ source=srctype, url=url)
+
+ # never return 0 here:
+ if dh < 0:
+ return dh - 1
+ else:
+ return dh + 1
+
+ def stream_in(self, remote, requirements):
+ lock = self.lock()
+ try:
+ fp = remote.stream_out()
+ l = fp.readline()
+ try:
+ resp = int(l)
+ except ValueError:
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ if resp == 1:
+ raise util.Abort(_('operation forbidden by server'))
+ elif resp == 2:
+ raise util.Abort(_('locking the remote repository failed'))
+ elif resp != 0:
+ raise util.Abort(_('the server sent an unknown error code'))
+ self.ui.status(_('streaming all changes\n'))
+ l = fp.readline()
+ try:
+ total_files, total_bytes = map(int, l.split(' ', 1))
+ except (ValueError, TypeError):
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ self.ui.status(_('%d files to transfer, %s of data\n') %
+ (total_files, util.bytecount(total_bytes)))
+ start = time.time()
+ for i in xrange(total_files):
+ # XXX doesn't support '\n' or '\r' in filenames
+ l = fp.readline()
+ try:
+ name, size = l.split('\0', 1)
+ size = int(size)
+ except (ValueError, TypeError):
+ raise error.ResponseError(
+ _('Unexpected response from remote server:'), l)
+ self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
+ # for backwards compat, name was partially encoded
+ ofp = self.sopener(store.decodedir(name), 'w')
+ for chunk in util.filechunkiter(fp, limit=size):
+ ofp.write(chunk)
+ ofp.close()
+ elapsed = time.time() - start
+ if elapsed <= 0:
+ elapsed = 0.001
+ self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
+ (util.bytecount(total_bytes), elapsed,
+ util.bytecount(total_bytes / elapsed)))
+
+ # new requirements = old non-format requirements + new format-related
+ # requirements from the streamed-in repository
+ requirements.update(set(self.requirements) - self.supportedformats)
+ self._applyrequirements(requirements)
+ self._writerequirements()
+
+ self.invalidate()
+ return len(self.heads()) + 1
+ finally:
+ lock.release()
+
+ def clone(self, remote, heads=[], stream=False):
+ '''clone remote repository.
+
+ keyword arguments:
+ heads: list of revs to clone (forces use of pull)
+ stream: use streaming clone if possible'''
+
+ # now, all clients that can request uncompressed clones can
+ # read repo formats supported by all servers that can serve
+ # them.
+
+ # if revlog format changes, client will have to check version
+ # and format flags on "stream" capability, and use
+ # uncompressed only if compatible.
+
+ if stream and not heads:
+ # 'stream' means remote revlog format is revlogv1 only
+ if remote.capable('stream'):
+ return self.stream_in(remote, set(('revlogv1',)))
+ # otherwise, 'streamreqs' contains the remote revlog format
+ streamreqs = remote.capable('streamreqs')
+ if streamreqs:
+ streamreqs = set(streamreqs.split(','))
+ # if we support it, stream in and adjust our requirements
+ if not streamreqs - self.supportedformats:
+ return self.stream_in(remote, streamreqs)
+ return self.pull(remote, heads)
+
+ def pushkey(self, namespace, key, old, new):
+ self.hook('prepushkey', throw=True, namespace=namespace, key=key,
+ old=old, new=new)
+ ret = pushkey.push(self, namespace, key, old, new)
+ self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
+ ret=ret)
+ return ret
+
+ def listkeys(self, namespace):
+ self.hook('prelistkeys', throw=True, namespace=namespace)
+ values = pushkey.list(self, namespace)
+ self.hook('listkeys', namespace=namespace, values=values)
+ return values
+
+ def debugwireargs(self, one, two, three=None, four=None, five=None):
+ '''used to test argument passing over the wire'''
+ return "%s %s %s %s %s" % (one, two, three, four, five)
+
+ def savecommitmessage(self, text):
+ fp = self.opener('last-message.txt', 'wb')
+ try:
+ fp.write(text)
+ finally:
+ fp.close()
+ return self.pathto(fp.name[len(self.root)+1:])
+
+# used to avoid circular references so destructors work
+def aftertrans(files):
+ renamefiles = [tuple(t) for t in files]
+ def a():
+ for src, dest in renamefiles:
+ util.rename(src, dest)
+ return a
+
+def undoname(fn):
+ base, name = os.path.split(fn)
+ assert name.startswith('journal')
+ return os.path.join(base, name.replace('journal', 'undo', 1))
+
+def instance(ui, path, create):
+ return localrepository(ui, util.urllocalpath(path), create)
+
+def islocal(path):
+ return True
diff --git a/websdk/mercurial/lock.py b/websdk/mercurial/lock.py
index 39206c3..bf33f02 120000..100644
--- a/websdk/mercurial/lock.py
+++ b/websdk/mercurial/lock.py
@@ -1 +1,137 @@
-/usr/share/pyshared/mercurial/lock.py \ No newline at end of file
+# lock.py - simple advisory locking scheme for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import util, error
+import errno, os, socket, time
+import warnings
+
+class lock(object):
+ '''An advisory lock held by one process to control access to a set
+ of files. Non-cooperating processes or incorrectly written scripts
+ can ignore Mercurial's locking scheme and stomp all over the
+ repository, so don't do that.
+
+ Typically used via localrepository.lock() to lock the repository
+ store (.hg/store/) or localrepository.wlock() to lock everything
+ else under .hg/.'''
+
+ # lock is symlink on platforms that support it, file on others.
+
+ # symlink is used because create of directory entry and contents
+ # are atomic even over nfs.
+
+ # old-style lock: symlink to pid
+ # new-style lock: symlink to hostname:pid
+
+ _host = None
+
+ def __init__(self, file, timeout=-1, releasefn=None, desc=None):
+ self.f = file
+ self.held = 0
+ self.timeout = timeout
+ self.releasefn = releasefn
+ self.desc = desc
+ self.lock()
+
+ def __del__(self):
+ if self.held:
+ warnings.warn("use lock.release instead of del lock",
+ category=DeprecationWarning,
+ stacklevel=2)
+
+ # ensure the lock will be removed
+ # even if recursive locking did occur
+ self.held = 1
+
+ self.release()
+
+ def lock(self):
+ timeout = self.timeout
+ while True:
+ try:
+ self.trylock()
+ return 1
+ except error.LockHeld, inst:
+ if timeout != 0:
+ time.sleep(1)
+ if timeout > 0:
+ timeout -= 1
+ continue
+ raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc,
+ inst.locker)
+
+ def trylock(self):
+ if self.held:
+ self.held += 1
+ return
+ if lock._host is None:
+ lock._host = socket.gethostname()
+ lockname = '%s:%s' % (lock._host, os.getpid())
+ while not self.held:
+ try:
+ util.makelock(lockname, self.f)
+ self.held = 1
+ except (OSError, IOError), why:
+ if why.errno == errno.EEXIST:
+ locker = self.testlock()
+ if locker is not None:
+ raise error.LockHeld(errno.EAGAIN, self.f, self.desc,
+ locker)
+ else:
+ raise error.LockUnavailable(why.errno, why.strerror,
+ why.filename, self.desc)
+
+ def testlock(self):
+ """return id of locker if lock is valid, else None.
+
+ If old-style lock, we cannot tell what machine locker is on.
+ with new-style lock, if locker is on this machine, we can
+ see if locker is alive. If locker is on this machine but
+ not alive, we can safely break lock.
+
+ The lock file is only deleted when None is returned.
+
+ """
+ locker = util.readlock(self.f)
+ try:
+ host, pid = locker.split(":", 1)
+ except ValueError:
+ return locker
+ if host != lock._host:
+ return locker
+ try:
+ pid = int(pid)
+ except ValueError:
+ return locker
+ if util.testpid(pid):
+ return locker
+ # if locker dead, break lock. must do this with another lock
+ # held, or can race and break valid lock.
+ try:
+ l = lock(self.f + '.break', timeout=0)
+ util.unlink(self.f)
+ l.release()
+ except error.LockError:
+ return locker
+
+ def release(self):
+ if self.held > 1:
+ self.held -= 1
+ elif self.held == 1:
+ self.held = 0
+ if self.releasefn:
+ self.releasefn()
+ try:
+ util.unlink(self.f)
+ except OSError:
+ pass
+
+def release(*locks):
+ for lock in locks:
+ if lock is not None:
+ lock.release()
+
diff --git a/websdk/mercurial/lsprof.py b/websdk/mercurial/lsprof.py
index 2496191..7ce6f67 120000..100644
--- a/websdk/mercurial/lsprof.py
+++ b/websdk/mercurial/lsprof.py
@@ -1 +1,109 @@
-/usr/share/pyshared/mercurial/lsprof.py \ No newline at end of file
+import sys
+from _lsprof import Profiler, profiler_entry
+
+__all__ = ['profile', 'Stats']
+
+def profile(f, *args, **kwds):
+ """XXX docstring"""
+ p = Profiler()
+ p.enable(subcalls=True, builtins=True)
+ try:
+ f(*args, **kwds)
+ finally:
+ p.disable()
+ return Stats(p.getstats())
+
+
+class Stats(object):
+ """XXX docstring"""
+
+ def __init__(self, data):
+ self.data = data
+
+ def sort(self, crit="inlinetime"):
+ """XXX docstring"""
+ if crit not in profiler_entry.__dict__:
+ raise ValueError("Can't sort by %s" % crit)
+ self.data.sort(key=lambda x: getattr(x, crit), reverse=True)
+ for e in self.data:
+ if e.calls:
+ e.calls.sort(key=lambda x: getattr(x, crit), reverse=True)
+
+ def pprint(self, top=None, file=None, limit=None, climit=None):
+ """XXX docstring"""
+ if file is None:
+ file = sys.stdout
+ d = self.data
+ if top is not None:
+ d = d[:top]
+ cols = "% 12s %12s %11.4f %11.4f %s\n"
+ hcols = "% 12s %12s %12s %12s %s\n"
+ file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
+ "Inline(ms)", "module:lineno(function)"))
+ count = 0
+ for e in d:
+ file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
+ e.inlinetime, label(e.code)))
+ count += 1
+ if limit is not None and count == limit:
+ return
+ ccount = 0
+ if e.calls:
+ for se in e.calls:
+ file.write(cols % ("+%s" % se.callcount, se.reccallcount,
+ se.totaltime, se.inlinetime,
+ "+%s" % label(se.code)))
+ count += 1
+ ccount += 1
+ if limit is not None and count == limit:
+ return
+ if climit is not None and ccount == climit:
+ break
+
+ def freeze(self):
+ """Replace all references to code objects with string
+ descriptions; this makes it possible to pickle the instance."""
+
+ # this code is probably rather ickier than it needs to be!
+ for i in range(len(self.data)):
+ e = self.data[i]
+ if not isinstance(e.code, str):
+ self.data[i] = type(e)((label(e.code),) + e[1:])
+ if e.calls:
+ for j in range(len(e.calls)):
+ se = e.calls[j]
+ if not isinstance(se.code, str):
+ e.calls[j] = type(se)((label(se.code),) + se[1:])
+
+_fn2mod = {}
+
+def label(code):
+ if isinstance(code, str):
+ return code
+ try:
+ mname = _fn2mod[code.co_filename]
+ except KeyError:
+ for k, v in list(sys.modules.iteritems()):
+ if v is None:
+ continue
+ if not isinstance(getattr(v, '__file__', None), str):
+ continue
+ if v.__file__.startswith(code.co_filename):
+ mname = _fn2mod[code.co_filename] = k
+ break
+ else:
+ mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename
+
+ return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
+
+
+if __name__ == '__main__':
+ import os
+ sys.argv = sys.argv[1:]
+ if not sys.argv:
+ print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
+ sys.exit(2)
+ sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
+ stats = profile(execfile, sys.argv[0], globals(), locals())
+ stats.sort()
+ stats.pprint()
diff --git a/websdk/mercurial/lsprofcalltree.py b/websdk/mercurial/lsprofcalltree.py
index 1ec89c8..358b951 120000..100644
--- a/websdk/mercurial/lsprofcalltree.py
+++ b/websdk/mercurial/lsprofcalltree.py
@@ -1 +1,86 @@
-/usr/share/pyshared/mercurial/lsprofcalltree.py \ No newline at end of file
+"""
+lsprofcalltree.py - lsprof output which is readable by kcachegrind
+
+Authors:
+ * David Allouche <david <at> allouche.net>
+ * Jp Calderone & Itamar Shtull-Trauring
+ * Johan Dahlin
+
+This software may be used and distributed according to the terms
+of the GNU General Public License, incorporated herein by reference.
+"""
+
+def label(code):
+ if isinstance(code, str):
+ return '~' + code # built-in functions ('~' sorts at the end)
+ else:
+ return '%s %s:%d' % (code.co_name,
+ code.co_filename,
+ code.co_firstlineno)
+
+class KCacheGrind(object):
+ def __init__(self, profiler):
+ self.data = profiler.getstats()
+ self.out_file = None
+
+ def output(self, out_file):
+ self.out_file = out_file
+ print >> out_file, 'events: Ticks'
+ self._print_summary()
+ for entry in self.data:
+ self._entry(entry)
+
+ def _print_summary(self):
+ max_cost = 0
+ for entry in self.data:
+ totaltime = int(entry.totaltime * 1000)
+ max_cost = max(max_cost, totaltime)
+ print >> self.out_file, 'summary: %d' % (max_cost,)
+
+ def _entry(self, entry):
+ out_file = self.out_file
+
+ code = entry.code
+ #print >> out_file, 'ob=%s' % (code.co_filename,)
+ if isinstance(code, str):
+ print >> out_file, 'fi=~'
+ else:
+ print >> out_file, 'fi=%s' % (code.co_filename,)
+ print >> out_file, 'fn=%s' % (label(code),)
+
+ inlinetime = int(entry.inlinetime * 1000)
+ if isinstance(code, str):
+ print >> out_file, '0 ', inlinetime
+ else:
+ print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime)
+
+ # recursive calls are counted in entry.calls
+ if entry.calls:
+ calls = entry.calls
+ else:
+ calls = []
+
+ if isinstance(code, str):
+ lineno = 0
+ else:
+ lineno = code.co_firstlineno
+
+ for subentry in calls:
+ self._subentry(lineno, subentry)
+ print >> out_file
+
+ def _subentry(self, lineno, subentry):
+ out_file = self.out_file
+ code = subentry.code
+ #print >> out_file, 'cob=%s' % (code.co_filename,)
+ print >> out_file, 'cfn=%s' % (label(code),)
+ if isinstance(code, str):
+ print >> out_file, 'cfi=~'
+ print >> out_file, 'calls=%d 0' % (subentry.callcount,)
+ else:
+ print >> out_file, 'cfi=%s' % (code.co_filename,)
+ print >> out_file, 'calls=%d %d' % (
+ subentry.callcount, code.co_firstlineno)
+
+ totaltime = int(subentry.totaltime * 1000)
+ print >> out_file, '%d %d' % (lineno, totaltime)
diff --git a/websdk/mercurial/mail.py b/websdk/mercurial/mail.py
index d53690f..0e822c8 120000..100644
--- a/websdk/mercurial/mail.py
+++ b/websdk/mercurial/mail.py
@@ -1 +1,233 @@
-/usr/share/pyshared/mercurial/mail.py \ No newline at end of file
+# mail.py - mail sending bits for mercurial
+#
+# Copyright 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import util, encoding
+import os, smtplib, socket, quopri
+import email.Header, email.MIMEText, email.Utils
+
+_oldheaderinit = email.Header.Header.__init__
+def _unifiedheaderinit(self, *args, **kw):
+ """
+ Python2.7 introduces a backwards incompatible change
+ (Python issue1974, r70772) in email.Generator.Generator code:
+ pre-2.7 code passed "continuation_ws='\t'" to the Header
+ constructor, and 2.7 removed this parameter.
+
+ Default argument is continuation_ws=' ', which means that the
+ behaviour is different in <2.7 and 2.7
+
+ We consider the 2.7 behaviour to be preferable, but need
+ to have an unified behaviour for versions 2.4 to 2.7
+ """
+ # override continuation_ws
+ kw['continuation_ws'] = ' '
+ _oldheaderinit(self, *args, **kw)
+
+email.Header.Header.__dict__['__init__'] = _unifiedheaderinit
+
+def _smtp(ui):
+ '''build an smtp connection and return a function to send mail'''
+ local_hostname = ui.config('smtp', 'local_hostname')
+ tls = ui.config('smtp', 'tls', 'none')
+ # backward compatible: when tls = true, we use starttls.
+ starttls = tls == 'starttls' or util.parsebool(tls)
+ smtps = tls == 'smtps'
+ if (starttls or smtps) and not util.safehasattr(socket, 'ssl'):
+ raise util.Abort(_("can't use TLS: Python SSL support not installed"))
+ if smtps:
+ ui.note(_('(using smtps)\n'))
+ s = smtplib.SMTP_SSL(local_hostname=local_hostname)
+ else:
+ s = smtplib.SMTP(local_hostname=local_hostname)
+ mailhost = ui.config('smtp', 'host')
+ if not mailhost:
+ raise util.Abort(_('smtp.host not configured - cannot send mail'))
+ mailport = util.getport(ui.config('smtp', 'port', 25))
+ ui.note(_('sending mail: smtp host %s, port %s\n') %
+ (mailhost, mailport))
+ s.connect(host=mailhost, port=mailport)
+ if starttls:
+ ui.note(_('(using starttls)\n'))
+ s.ehlo()
+ s.starttls()
+ s.ehlo()
+ username = ui.config('smtp', 'username')
+ password = ui.config('smtp', 'password')
+ if username and not password:
+ password = ui.getpass()
+ if username and password:
+ ui.note(_('(authenticating to mail server as %s)\n') %
+ (username))
+ try:
+ s.login(username, password)
+ except smtplib.SMTPException, inst:
+ raise util.Abort(inst)
+
+ def send(sender, recipients, msg):
+ try:
+ return s.sendmail(sender, recipients, msg)
+ except smtplib.SMTPRecipientsRefused, inst:
+ recipients = [r[1] for r in inst.recipients.values()]
+ raise util.Abort('\n' + '\n'.join(recipients))
+ except smtplib.SMTPException, inst:
+ raise util.Abort(inst)
+
+ return send
+
+def _sendmail(ui, sender, recipients, msg):
+ '''send mail using sendmail.'''
+ program = ui.config('email', 'method')
+ cmdline = '%s -f %s %s' % (program, util.email(sender),
+ ' '.join(map(util.email, recipients)))
+ ui.note(_('sending mail: %s\n') % cmdline)
+ fp = util.popen(cmdline, 'w')
+ fp.write(msg)
+ ret = fp.close()
+ if ret:
+ raise util.Abort('%s %s' % (
+ os.path.basename(program.split(None, 1)[0]),
+ util.explainexit(ret)[0]))
+
+def connect(ui):
+ '''make a mail connection. return a function to send mail.
+ call as sendmail(sender, list-of-recipients, msg).'''
+ if ui.config('email', 'method', 'smtp') == 'smtp':
+ return _smtp(ui)
+ return lambda s, r, m: _sendmail(ui, s, r, m)
+
+def sendmail(ui, sender, recipients, msg):
+ send = connect(ui)
+ return send(sender, recipients, msg)
+
+def validateconfig(ui):
+ '''determine if we have enough config data to try sending email.'''
+ method = ui.config('email', 'method', 'smtp')
+ if method == 'smtp':
+ if not ui.config('smtp', 'host'):
+ raise util.Abort(_('smtp specified as email transport, '
+ 'but no smtp host configured'))
+ else:
+ if not util.findexe(method):
+ raise util.Abort(_('%r specified as email transport, '
+ 'but not in PATH') % method)
+
+def mimetextpatch(s, subtype='plain', display=False):
+ '''If patch in utf-8 transfer-encode it.'''
+
+ enc = None
+ for line in s.splitlines():
+ if len(line) > 950:
+ s = quopri.encodestring(s)
+ enc = "quoted-printable"
+ break
+
+ cs = 'us-ascii'
+ if not display:
+ try:
+ s.decode('us-ascii')
+ except UnicodeDecodeError:
+ try:
+ s.decode('utf-8')
+ cs = 'utf-8'
+ except UnicodeDecodeError:
+ # We'll go with us-ascii as a fallback.
+ pass
+
+ msg = email.MIMEText.MIMEText(s, subtype, cs)
+ if enc:
+ del msg['Content-Transfer-Encoding']
+ msg['Content-Transfer-Encoding'] = enc
+ return msg
+
+def _charsets(ui):
+ '''Obtains charsets to send mail parts not containing patches.'''
+ charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')]
+ fallbacks = [encoding.fallbackencoding.lower(),
+ encoding.encoding.lower(), 'utf-8']
+ for cs in fallbacks: # find unique charsets while keeping order
+ if cs not in charsets:
+ charsets.append(cs)
+ return [cs for cs in charsets if not cs.endswith('ascii')]
+
+def _encode(ui, s, charsets):
+ '''Returns (converted) string, charset tuple.
+ Finds out best charset by cycling through sendcharsets in descending
+ order. Tries both encoding and fallbackencoding for input. Only as
+ last resort send as is in fake ascii.
+ Caveat: Do not use for mail parts containing patches!'''
+ try:
+ s.decode('ascii')
+ except UnicodeDecodeError:
+ sendcharsets = charsets or _charsets(ui)
+ for ics in (encoding.encoding, encoding.fallbackencoding):
+ try:
+ u = s.decode(ics)
+ except UnicodeDecodeError:
+ continue
+ for ocs in sendcharsets:
+ try:
+ return u.encode(ocs), ocs
+ except UnicodeEncodeError:
+ pass
+ except LookupError:
+ ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs)
+ # if ascii, or all conversion attempts fail, send (broken) ascii
+ return s, 'us-ascii'
+
+def headencode(ui, s, charsets=None, display=False):
+ '''Returns RFC-2047 compliant header from given string.'''
+ if not display:
+ # split into words?
+ s, cs = _encode(ui, s, charsets)
+ return str(email.Header.Header(s, cs))
+ return s
+
+def _addressencode(ui, name, addr, charsets=None):
+ name = headencode(ui, name, charsets)
+ try:
+ acc, dom = addr.split('@')
+ acc = acc.encode('ascii')
+ dom = dom.decode(encoding.encoding).encode('idna')
+ addr = '%s@%s' % (acc, dom)
+ except UnicodeDecodeError:
+ raise util.Abort(_('invalid email address: %s') % addr)
+ except ValueError:
+ try:
+ # too strict?
+ addr = addr.encode('ascii')
+ except UnicodeDecodeError:
+ raise util.Abort(_('invalid local address: %s') % addr)
+ return email.Utils.formataddr((name, addr))
+
+def addressencode(ui, address, charsets=None, display=False):
+ '''Turns address into RFC-2047 compliant header.'''
+ if display or not address:
+ return address or ''
+ name, addr = email.Utils.parseaddr(address)
+ return _addressencode(ui, name, addr, charsets)
+
+def addrlistencode(ui, addrs, charsets=None, display=False):
+ '''Turns a list of addresses into a list of RFC-2047 compliant headers.
+ A single element of input list may contain multiple addresses, but output
+ always has one address per item'''
+ if display:
+ return [a.strip() for a in addrs if a.strip()]
+
+ result = []
+ for name, addr in email.Utils.getaddresses(addrs):
+ if name or addr:
+ result.append(_addressencode(ui, name, addr, charsets))
+ return result
+
+def mimeencode(ui, s, charsets=None, display=False):
+ '''creates mime text object, encodes it if needed, and sets
+ charset and transfer-encoding accordingly.'''
+ cs = 'us-ascii'
+ if not display:
+ s, cs = _encode(ui, s, charsets)
+ return email.MIMEText.MIMEText(s, 'plain', cs)
diff --git a/websdk/mercurial/manifest.py b/websdk/mercurial/manifest.py
index d1ab529..7c3781c 120000..100644
--- a/websdk/mercurial/manifest.py
+++ b/websdk/mercurial/manifest.py
@@ -1 +1,204 @@
-/usr/share/pyshared/mercurial/manifest.py \ No newline at end of file
+# manifest.py - manifest revision class for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import mdiff, parsers, error, revlog
+import array, struct
+
+class manifestdict(dict):
+ def __init__(self, mapping=None, flags=None):
+ if mapping is None:
+ mapping = {}
+ if flags is None:
+ flags = {}
+ dict.__init__(self, mapping)
+ self._flags = flags
+ def flags(self, f):
+ return self._flags.get(f, "")
+ def set(self, f, flags):
+ self._flags[f] = flags
+ def copy(self):
+ return manifestdict(self, dict.copy(self._flags))
+
+class manifest(revlog.revlog):
+ def __init__(self, opener):
+ self._mancache = None
+ revlog.revlog.__init__(self, opener, "00manifest.i")
+
+ def parse(self, lines):
+ mfdict = manifestdict()
+ parsers.parse_manifest(mfdict, mfdict._flags, lines)
+ return mfdict
+
+ def readdelta(self, node):
+ r = self.rev(node)
+ return self.parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
+
+ def readfast(self, node):
+ '''use the faster of readdelta or read'''
+ r = self.rev(node)
+ deltaparent = self.deltaparent(r)
+ if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
+ return self.readdelta(node)
+ return self.read(node)
+
+ def read(self, node):
+ if node == revlog.nullid:
+ return manifestdict() # don't upset local cache
+ if self._mancache and self._mancache[0] == node:
+ return self._mancache[1]
+ text = self.revision(node)
+ arraytext = array.array('c', text)
+ mapping = self.parse(text)
+ self._mancache = (node, mapping, arraytext)
+ return mapping
+
+ def _search(self, m, s, lo=0, hi=None):
+ '''return a tuple (start, end) that says where to find s within m.
+
+ If the string is found m[start:end] are the line containing
+ that string. If start == end the string was not found and
+ they indicate the proper sorted insertion point. This was
+ taken from bisect_left, and modified to find line start/end as
+ it goes along.
+
+ m should be a buffer or a string
+ s is a string'''
+ def advance(i, c):
+ while i < lenm and m[i] != c:
+ i += 1
+ return i
+ if not s:
+ return (lo, lo)
+ lenm = len(m)
+ if not hi:
+ hi = lenm
+ while lo < hi:
+ mid = (lo + hi) // 2
+ start = mid
+ while start > 0 and m[start - 1] != '\n':
+ start -= 1
+ end = advance(start, '\0')
+ if m[start:end] < s:
+ # we know that after the null there are 40 bytes of sha1
+ # this translates to the bisect lo = mid + 1
+ lo = advance(end + 40, '\n') + 1
+ else:
+ # this translates to the bisect hi = mid
+ hi = start
+ end = advance(lo, '\0')
+ found = m[lo:end]
+ if s == found:
+ # we know that after the null there are 40 bytes of sha1
+ end = advance(end + 40, '\n')
+ return (lo, end + 1)
+ else:
+ return (lo, lo)
+
+ def find(self, node, f):
+ '''look up entry for a single file efficiently.
+ return (node, flags) pair if found, (None, None) if not.'''
+ if self._mancache and self._mancache[0] == node:
+ return self._mancache[1].get(f), self._mancache[1].flags(f)
+ text = self.revision(node)
+ start, end = self._search(text, f)
+ if start == end:
+ return None, None
+ l = text[start:end]
+ f, n = l.split('\0')
+ return revlog.bin(n[:40]), n[40:-1]
+
+ def add(self, map, transaction, link, p1=None, p2=None,
+ changed=None):
+ # apply the changes collected during the bisect loop to our addlist
+ # return a delta suitable for addrevision
+ def addlistdelta(addlist, x):
+ # start from the bottom up
+ # so changes to the offsets don't mess things up.
+ for start, end, content in reversed(x):
+ if content:
+ addlist[start:end] = array.array('c', content)
+ else:
+ del addlist[start:end]
+ return "".join(struct.pack(">lll", start, end, len(content)) + content
+ for start, end, content in x)
+
+ def checkforbidden(l):
+ for f in l:
+ if '\n' in f or '\r' in f:
+ raise error.RevlogError(
+ _("'\\n' and '\\r' disallowed in filenames: %r") % f)
+
+ # if we're using the cache, make sure it is valid and
+ # parented by the same node we're diffing against
+ if not (changed and self._mancache and p1 and self._mancache[0] == p1):
+ files = sorted(map)
+ checkforbidden(files)
+
+ # if this is changed to support newlines in filenames,
+ # be sure to check the templates/ dir again (especially *-raw.tmpl)
+ hex, flags = revlog.hex, map.flags
+ text = ''.join("%s\0%s%s\n" % (f, hex(map[f]), flags(f))
+ for f in files)
+ arraytext = array.array('c', text)
+ cachedelta = None
+ else:
+ added, removed = changed
+ addlist = self._mancache[2]
+
+ checkforbidden(added)
+ # combine the changed lists into one list for sorting
+ work = [(x, False) for x in added]
+ work.extend((x, True) for x in removed)
+ # this could use heapq.merge() (from python2.6+) or equivalent
+ # since the lists are already sorted
+ work.sort()
+
+ delta = []
+ dstart = None
+ dend = None
+ dline = [""]
+ start = 0
+ # zero copy representation of addlist as a buffer
+ addbuf = buffer(addlist)
+
+ # start with a readonly loop that finds the offset of
+ # each line and creates the deltas
+ for f, todelete in work:
+ # bs will either be the index of the item or the insert point
+ start, end = self._search(addbuf, f, start)
+ if not todelete:
+ l = "%s\0%s%s\n" % (f, revlog.hex(map[f]), map.flags(f))
+ else:
+ if start == end:
+ # item we want to delete was not found, error out
+ raise AssertionError(
+ _("failed to remove %s from manifest") % f)
+ l = ""
+ if dstart is not None and dstart <= start and dend >= start:
+ if dend < end:
+ dend = end
+ if l:
+ dline.append(l)
+ else:
+ if dstart is not None:
+ delta.append([dstart, dend, "".join(dline)])
+ dstart = start
+ dend = end
+ dline = [l]
+
+ if dstart is not None:
+ delta.append([dstart, dend, "".join(dline)])
+ # apply the delta to the addlist, and get a delta for addrevision
+ cachedelta = (self.rev(p1), addlistdelta(addlist, delta))
+ arraytext = addlist
+ text = buffer(arraytext)
+
+ n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
+ self._mancache = (n, map, arraytext)
+
+ return n
diff --git a/websdk/mercurial/match.py b/websdk/mercurial/match.py
index 14782e1..17dd752 120000..100644
--- a/websdk/mercurial/match.py
+++ b/websdk/mercurial/match.py
@@ -1 +1,337 @@
-/usr/share/pyshared/mercurial/match.py \ No newline at end of file
+# match.py - filename matching
+#
+# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import re
+import scmutil, util, fileset
+from i18n import _
+
+def _expandsets(pats, ctx):
+ '''convert set: patterns into a list of files in the given context'''
+ fset = set()
+ other = []
+
+ for kind, expr in pats:
+ if kind == 'set':
+ if not ctx:
+ raise util.Abort("fileset expression with no context")
+ s = fileset.getfileset(ctx, expr)
+ fset.update(s)
+ continue
+ other.append((kind, expr))
+ return fset, other
+
+class match(object):
+ def __init__(self, root, cwd, patterns, include=[], exclude=[],
+ default='glob', exact=False, auditor=None, ctx=None):
+ """build an object to match a set of file patterns
+
+ arguments:
+ root - the canonical root of the tree you're matching against
+ cwd - the current working directory, if relevant
+ patterns - patterns to find
+ include - patterns to include
+ exclude - patterns to exclude
+ default - if a pattern in names has no explicit type, assume this one
+ exact - patterns are actually literals
+
+ a pattern is one of:
+ 'glob:<glob>' - a glob relative to cwd
+ 're:<regexp>' - a regular expression
+ 'path:<path>' - a path relative to canonroot
+ 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
+ 'relpath:<path>' - a path relative to cwd
+ 'relre:<regexp>' - a regexp that needn't match the start of a name
+ 'set:<fileset>' - a fileset expression
+ '<something>' - a pattern of the specified default type
+ """
+
+ self._root = root
+ self._cwd = cwd
+ self._files = []
+ self._anypats = bool(include or exclude)
+ self._ctx = ctx
+
+ if include:
+ pats = _normalize(include, 'glob', root, cwd, auditor)
+ self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
+ if exclude:
+ pats = _normalize(exclude, 'glob', root, cwd, auditor)
+ self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
+ if exact:
+ self._files = patterns
+ pm = self.exact
+ elif patterns:
+ pats = _normalize(patterns, default, root, cwd, auditor)
+ self._files = _roots(pats)
+ self._anypats = self._anypats or _anypats(pats)
+ self.patternspat, pm = _buildmatch(ctx, pats, '$')
+
+ if patterns or exact:
+ if include:
+ if exclude:
+ m = lambda f: im(f) and not em(f) and pm(f)
+ else:
+ m = lambda f: im(f) and pm(f)
+ else:
+ if exclude:
+ m = lambda f: not em(f) and pm(f)
+ else:
+ m = pm
+ else:
+ if include:
+ if exclude:
+ m = lambda f: im(f) and not em(f)
+ else:
+ m = im
+ else:
+ if exclude:
+ m = lambda f: not em(f)
+ else:
+ m = lambda f: True
+
+ self.matchfn = m
+ self._fmap = set(self._files)
+
+ def __call__(self, fn):
+ return self.matchfn(fn)
+ def __iter__(self):
+ for f in self._files:
+ yield f
+ def bad(self, f, msg):
+ '''callback for each explicit file that can't be
+ found/accessed, with an error message
+ '''
+ pass
+ def dir(self, f):
+ pass
+ def missing(self, f):
+ pass
+ def exact(self, f):
+ return f in self._fmap
+ def rel(self, f):
+ return util.pathto(self._root, self._cwd, f)
+ def files(self):
+ return self._files
+ def anypats(self):
+ return self._anypats
+
+class exact(match):
+ def __init__(self, root, cwd, files):
+ match.__init__(self, root, cwd, files, exact = True)
+
+class always(match):
+ def __init__(self, root, cwd):
+ match.__init__(self, root, cwd, [])
+
+class narrowmatcher(match):
+ """Adapt a matcher to work on a subdirectory only.
+
+ The paths are remapped to remove/insert the path as needed:
+
+ >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
+ >>> m2 = narrowmatcher('sub', m1)
+ >>> bool(m2('a.txt'))
+ False
+ >>> bool(m2('b.txt'))
+ True
+ >>> bool(m2.matchfn('a.txt'))
+ False
+ >>> bool(m2.matchfn('b.txt'))
+ True
+ >>> m2.files()
+ ['b.txt']
+ >>> m2.exact('b.txt')
+ True
+ >>> m2.rel('b.txt')
+ 'b.txt'
+ >>> def bad(f, msg):
+ ... print "%s: %s" % (f, msg)
+ >>> m1.bad = bad
+ >>> m2.bad('x.txt', 'No such file')
+ sub/x.txt: No such file
+ """
+
+ def __init__(self, path, matcher):
+ self._root = matcher._root
+ self._cwd = matcher._cwd
+ self._path = path
+ self._matcher = matcher
+
+ self._files = [f[len(path) + 1:] for f in matcher._files
+ if f.startswith(path + "/")]
+ self._anypats = matcher._anypats
+ self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
+ self._fmap = set(self._files)
+
+ def bad(self, f, msg):
+ self._matcher.bad(self._path + "/" + f, msg)
+
+def patkind(pat):
+ return _patsplit(pat, None)[0]
+
+def _patsplit(pat, default):
+ """Split a string into an optional pattern kind prefix and the
+ actual pattern."""
+ if ':' in pat:
+ kind, val = pat.split(':', 1)
+ if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
+ 'listfile', 'listfile0', 'set'):
+ return kind, val
+ return default, pat
+
+def _globre(pat):
+ "convert a glob pattern into a regexp"
+ i, n = 0, len(pat)
+ res = ''
+ group = 0
+ escape = re.escape
+ def peek():
+ return i < n and pat[i]
+ while i < n:
+ c = pat[i]
+ i += 1
+ if c not in '*?[{},\\':
+ res += escape(c)
+ elif c == '*':
+ if peek() == '*':
+ i += 1
+ res += '.*'
+ else:
+ res += '[^/]*'
+ elif c == '?':
+ res += '.'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] in '!]':
+ j += 1
+ while j < n and pat[j] != ']':
+ j += 1
+ if j >= n:
+ res += '\\['
+ else:
+ stuff = pat[i:j].replace('\\','\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ elif c == '{':
+ group += 1
+ res += '(?:'
+ elif c == '}' and group:
+ res += ')'
+ group -= 1
+ elif c == ',' and group:
+ res += '|'
+ elif c == '\\':
+ p = peek()
+ if p:
+ i += 1
+ res += escape(p)
+ else:
+ res += escape(c)
+ else:
+ res += escape(c)
+ return res
+
+def _regex(kind, name, tail):
+ '''convert a pattern into a regular expression'''
+ if not name:
+ return ''
+ if kind == 're':
+ return name
+ elif kind == 'path':
+ return '^' + re.escape(name) + '(?:/|$)'
+ elif kind == 'relglob':
+ return '(?:|.*/)' + _globre(name) + tail
+ elif kind == 'relpath':
+ return re.escape(name) + '(?:/|$)'
+ elif kind == 'relre':
+ if name.startswith('^'):
+ return name
+ return '.*' + name
+ return _globre(name) + tail
+
+def _buildmatch(ctx, pats, tail):
+ fset, pats = _expandsets(pats, ctx)
+ if not pats:
+ return "", fset.__contains__
+
+ pat, mf = _buildregexmatch(pats, tail)
+ if fset:
+ return pat, lambda f: f in fset or mf(f)
+ return pat, mf
+
+def _buildregexmatch(pats, tail):
+ """build a matching function from a set of patterns"""
+ try:
+ pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
+ if len(pat) > 20000:
+ raise OverflowError()
+ return pat, re.compile(pat).match
+ except OverflowError:
+ # We're using a Python with a tiny regex engine and we
+ # made it explode, so we'll divide the pattern list in two
+ # until it works
+ l = len(pats)
+ if l < 2:
+ raise
+ pata, a = _buildregexmatch(pats[:l//2], tail)
+ patb, b = _buildregexmatch(pats[l//2:], tail)
+ return pat, lambda s: a(s) or b(s)
+ except re.error:
+ for k, p in pats:
+ try:
+ re.compile('(?:%s)' % _regex(k, p, tail))
+ except re.error:
+ raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
+ raise util.Abort(_("invalid pattern"))
+
+def _normalize(names, default, root, cwd, auditor):
+ pats = []
+ for kind, name in [_patsplit(p, default) for p in names]:
+ if kind in ('glob', 'relpath'):
+ name = scmutil.canonpath(root, cwd, name, auditor)
+ elif kind in ('relglob', 'path'):
+ name = util.normpath(name)
+ elif kind in ('listfile', 'listfile0'):
+ try:
+ files = util.readfile(name)
+ if kind == 'listfile0':
+ files = files.split('\0')
+ else:
+ files = files.splitlines()
+ files = [f for f in files if f]
+ except EnvironmentError:
+ raise util.Abort(_("unable to read file list (%s)") % name)
+ pats += _normalize(files, default, root, cwd, auditor)
+ continue
+
+ pats.append((kind, name))
+ return pats
+
+def _roots(patterns):
+ r = []
+ for kind, name in patterns:
+ if kind == 'glob': # find the non-glob prefix
+ root = []
+ for p in name.split('/'):
+ if '[' in p or '{' in p or '*' in p or '?' in p:
+ break
+ root.append(p)
+ r.append('/'.join(root) or '.')
+ elif kind in ('relpath', 'path'):
+ r.append(name or '.')
+ elif kind == 'relglob':
+ r.append('.')
+ return r
+
+def _anypats(patterns):
+ for kind, name in patterns:
+ if kind in ('glob', 're', 'relglob', 'relre'):
+ return True
diff --git a/websdk/mercurial/mdiff.py b/websdk/mercurial/mdiff.py
index 458f756..55ea89a 120000..100644
--- a/websdk/mercurial/mdiff.py
+++ b/websdk/mercurial/mdiff.py
@@ -1 +1,287 @@
-/usr/share/pyshared/mercurial/mdiff.py \ No newline at end of file
+# mdiff.py - diff and patch routines for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import bdiff, mpatch, util
+import re, struct
+
+def splitnewlines(text):
+ '''like str.splitlines, but only split on newlines.'''
+ lines = [l + '\n' for l in text.split('\n')]
+ if lines:
+ if lines[-1] == '\n':
+ lines.pop()
+ else:
+ lines[-1] = lines[-1][:-1]
+ return lines
+
+class diffopts(object):
+ '''context is the number of context lines
+ text treats all files as text
+ showfunc enables diff -p output
+ git enables the git extended patch format
+ nodates removes dates from diff headers
+ ignorews ignores all whitespace changes in the diff
+ ignorewsamount ignores changes in the amount of whitespace
+ ignoreblanklines ignores changes whose lines are all blank
+ upgrade generates git diffs to avoid data loss
+ '''
+
+ defaults = {
+ 'context': 3,
+ 'text': False,
+ 'showfunc': False,
+ 'git': False,
+ 'nodates': False,
+ 'ignorews': False,
+ 'ignorewsamount': False,
+ 'ignoreblanklines': False,
+ 'upgrade': False,
+ }
+
+ __slots__ = defaults.keys()
+
+ def __init__(self, **opts):
+ for k in self.__slots__:
+ v = opts.get(k)
+ if v is None:
+ v = self.defaults[k]
+ setattr(self, k, v)
+
+ try:
+ self.context = int(self.context)
+ except ValueError:
+ raise util.Abort(_('diff context lines count must be '
+ 'an integer, not %r') % self.context)
+
+ def copy(self, **kwargs):
+ opts = dict((k, getattr(self, k)) for k in self.defaults)
+ opts.update(kwargs)
+ return diffopts(**opts)
+
+defaultopts = diffopts()
+
+def wsclean(opts, text, blank=True):
+ if opts.ignorews:
+ text = re.sub('[ \t\r]+', '', text)
+ elif opts.ignorewsamount:
+ text = re.sub('[ \t\r]+', ' ', text)
+ text = text.replace(' \n', '\n')
+ if blank and opts.ignoreblanklines:
+ text = re.sub('\n+', '\n', text).strip('\n')
+ return text
+
+def diffline(revs, a, b, opts):
+ parts = ['diff']
+ if opts.git:
+ parts.append('--git')
+ if revs and not opts.git:
+ parts.append(' '.join(["-r %s" % rev for rev in revs]))
+ if opts.git:
+ parts.append('a/%s' % a)
+ parts.append('b/%s' % b)
+ else:
+ parts.append(a)
+ return ' '.join(parts) + '\n'
+
+def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts):
+ def datetag(date, addtab=True):
+ if not opts.git and not opts.nodates:
+ return '\t%s\n' % date
+ if addtab and ' ' in fn1:
+ return '\t\n'
+ return '\n'
+
+ if not a and not b:
+ return ""
+ epoch = util.datestr((0, 0))
+
+ if not opts.text and (util.binary(a) or util.binary(b)):
+ if a and b and len(a) == len(b) and a == b:
+ return ""
+ l = ['Binary file %s has changed\n' % fn1]
+ elif not a:
+ b = splitnewlines(b)
+ if a is None:
+ l1 = '--- /dev/null%s' % datetag(epoch, False)
+ else:
+ l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
+ l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
+ l3 = "@@ -0,0 +1,%d @@\n" % len(b)
+ l = [l1, l2, l3] + ["+" + e for e in b]
+ elif not b:
+ a = splitnewlines(a)
+ l1 = "--- %s%s" % ("a/" + fn1, datetag(ad))
+ if b is None:
+ l2 = '+++ /dev/null%s' % datetag(epoch, False)
+ else:
+ l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd))
+ l3 = "@@ -1,%d +0,0 @@\n" % len(a)
+ l = [l1, l2, l3] + ["-" + e for e in a]
+ else:
+ al = splitnewlines(a)
+ bl = splitnewlines(b)
+ l = list(_unidiff(a, b, al, bl, opts=opts))
+ if not l:
+ return ""
+
+ l.insert(0, "--- a/%s%s" % (fn1, datetag(ad)))
+ l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd)))
+
+ for ln in xrange(len(l)):
+ if l[ln][-1] != '\n':
+ l[ln] += "\n\ No newline at end of file\n"
+
+ if r:
+ l.insert(0, diffline(r, fn1, fn2, opts))
+
+ return "".join(l)
+
+# creates a headerless unified diff
+# t1 and t2 are the text to be diffed
+# l1 and l2 are the text broken up into lines
+def _unidiff(t1, t2, l1, l2, opts=defaultopts):
+ def contextend(l, len):
+ ret = l + opts.context
+ if ret > len:
+ ret = len
+ return ret
+
+ def contextstart(l):
+ ret = l - opts.context
+ if ret < 0:
+ return 0
+ return ret
+
+ lastfunc = [0, '']
+ def yieldhunk(hunk):
+ (astart, a2, bstart, b2, delta) = hunk
+ aend = contextend(a2, len(l1))
+ alen = aend - astart
+ blen = b2 - bstart + aend - a2
+
+ func = ""
+ if opts.showfunc:
+ lastpos, func = lastfunc
+ # walk backwards from the start of the context up to the start of
+ # the previous hunk context until we find a line starting with an
+ # alphanumeric char.
+ for i in xrange(astart - 1, lastpos - 1, -1):
+ if l1[i][0].isalnum():
+ func = ' ' + l1[i].rstrip()[:40]
+ lastfunc[1] = func
+ break
+ # by recording this hunk's starting point as the next place to
+ # start looking for function lines, we avoid reading any line in
+ # the file more than once.
+ lastfunc[0] = astart
+
+ # zero-length hunk ranges report their start line as one less
+ if alen:
+ astart += 1
+ if blen:
+ bstart += 1
+
+ yield "@@ -%d,%d +%d,%d @@%s\n" % (astart, alen,
+ bstart, blen, func)
+ for x in delta:
+ yield x
+ for x in xrange(a2, aend):
+ yield ' ' + l1[x]
+
+ # bdiff.blocks gives us the matching sequences in the files. The loop
+ # below finds the spaces between those matching sequences and translates
+ # them into diff output.
+ #
+ if opts.ignorews or opts.ignorewsamount:
+ t1 = wsclean(opts, t1, False)
+ t2 = wsclean(opts, t2, False)
+
+ diff = bdiff.blocks(t1, t2)
+ hunk = None
+ for i, s1 in enumerate(diff):
+ # The first match is special.
+ # we've either found a match starting at line 0 or a match later
+ # in the file. If it starts later, old and new below will both be
+ # empty and we'll continue to the next match.
+ if i > 0:
+ s = diff[i - 1]
+ else:
+ s = [0, 0, 0, 0]
+ delta = []
+ a1 = s[1]
+ a2 = s1[0]
+ b1 = s[3]
+ b2 = s1[2]
+
+ old = l1[a1:a2]
+ new = l2[b1:b2]
+
+ # bdiff sometimes gives huge matches past eof, this check eats them,
+ # and deals with the special first match case described above
+ if not old and not new:
+ continue
+
+ if opts.ignoreblanklines:
+ if wsclean(opts, "".join(old)) == wsclean(opts, "".join(new)):
+ continue
+
+ astart = contextstart(a1)
+ bstart = contextstart(b1)
+ prev = None
+ if hunk:
+ # join with the previous hunk if it falls inside the context
+ if astart < hunk[1] + opts.context + 1:
+ prev = hunk
+ astart = hunk[1]
+ bstart = hunk[3]
+ else:
+ for x in yieldhunk(hunk):
+ yield x
+ if prev:
+ # we've joined the previous hunk, record the new ending points.
+ hunk[1] = a2
+ hunk[3] = b2
+ delta = hunk[4]
+ else:
+ # create a new hunk
+ hunk = [astart, a2, bstart, b2, delta]
+
+ delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
+ delta[len(delta):] = ['-' + x for x in old]
+ delta[len(delta):] = ['+' + x for x in new]
+
+ if hunk:
+ for x in yieldhunk(hunk):
+ yield x
+
+def patchtext(bin):
+ pos = 0
+ t = []
+ while pos < len(bin):
+ p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
+ pos += 12
+ t.append(bin[pos:pos + l])
+ pos += l
+ return "".join(t)
+
+def patch(a, bin):
+ if len(a) == 0:
+ # skip over trivial delta header
+ return buffer(bin, 12)
+ return mpatch.patches(a, [bin])
+
+# similar to difflib.SequenceMatcher.get_matching_blocks
+def get_matching_blocks(a, b):
+ return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
+
+def trivialdiffheader(length):
+ return struct.pack(">lll", 0, 0, length)
+
+patches = mpatch.patches
+patchedsize = mpatch.patchedsize
+textdiff = bdiff.bdiff
diff --git a/websdk/mercurial/merge.py b/websdk/mercurial/merge.py
index 9338027..3f4f65c 120000..100644
--- a/websdk/mercurial/merge.py
+++ b/websdk/mercurial/merge.py
@@ -1 +1,574 @@
-/usr/share/pyshared/mercurial/merge.py \ No newline at end of file
+# merge.py - directory-level update/merge handling for Mercurial
+#
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid, nullrev, hex, bin
+from i18n import _
+import scmutil, util, filemerge, copies, subrepo, encoding
+import errno, os, shutil
+
+class mergestate(object):
+ '''track 3-way merge state of individual files'''
+ def __init__(self, repo):
+ self._repo = repo
+ self._dirty = False
+ self._read()
+ def reset(self, node=None):
+ self._state = {}
+ if node:
+ self._local = node
+ shutil.rmtree(self._repo.join("merge"), True)
+ self._dirty = False
+ def _read(self):
+ self._state = {}
+ try:
+ f = self._repo.opener("merge/state")
+ for i, l in enumerate(f):
+ if i == 0:
+ self._local = bin(l[:-1])
+ else:
+ bits = l[:-1].split("\0")
+ self._state[bits[0]] = bits[1:]
+ f.close()
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ self._dirty = False
+ def commit(self):
+ if self._dirty:
+ f = self._repo.opener("merge/state", "w")
+ f.write(hex(self._local) + "\n")
+ for d, v in self._state.iteritems():
+ f.write("\0".join([d] + v) + "\n")
+ f.close()
+ self._dirty = False
+ def add(self, fcl, fco, fca, fd, flags):
+ hash = util.sha1(fcl.path()).hexdigest()
+ self._repo.opener.write("merge/" + hash, fcl.data())
+ self._state[fd] = ['u', hash, fcl.path(), fca.path(),
+ hex(fca.filenode()), fco.path(), flags]
+ self._dirty = True
+ def __contains__(self, dfile):
+ return dfile in self._state
+ def __getitem__(self, dfile):
+ return self._state[dfile][0]
+ def __iter__(self):
+ l = self._state.keys()
+ l.sort()
+ for f in l:
+ yield f
+ def mark(self, dfile, state):
+ self._state[dfile][0] = state
+ self._dirty = True
+ def resolve(self, dfile, wctx, octx):
+ if self[dfile] == 'r':
+ return 0
+ state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
+ f = self._repo.opener("merge/" + hash)
+ self._repo.wwrite(dfile, f.read(), flags)
+ f.close()
+ fcd = wctx[dfile]
+ fco = octx[ofile]
+ fca = self._repo.filectx(afile, fileid=anode)
+ r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
+ if r is None:
+ # no real conflict
+ del self._state[dfile]
+ elif not r:
+ self.mark(dfile, 'r')
+ return r
+
+def _checkunknown(wctx, mctx, folding):
+ "check for collisions between unknown files and files in mctx"
+ if folding:
+ foldf = util.normcase
+ else:
+ foldf = lambda fn: fn
+ folded = {}
+ for fn in mctx:
+ folded[foldf(fn)] = fn
+ for fn in wctx.unknown():
+ f = foldf(fn)
+ if f in folded and mctx[folded[f]].cmp(wctx[f]):
+ raise util.Abort(_("untracked file in working directory differs"
+ " from file in requested revision: '%s'") % fn)
+
+def _checkcollision(mctx):
+ "check for case folding collisions in the destination context"
+ folded = {}
+ for fn in mctx:
+ fold = encoding.lower(fn)
+ if fold in folded:
+ raise util.Abort(_("case-folding collision between %s and %s")
+ % (fn, folded[fold]))
+ folded[fold] = fn
+
+def _forgetremoved(wctx, mctx, branchmerge):
+ """
+ Forget removed files
+
+ If we're jumping between revisions (as opposed to merging), and if
+ neither the working directory nor the target rev has the file,
+ then we need to remove it from the dirstate, to prevent the
+ dirstate from listing the file when it is no longer in the
+ manifest.
+
+ If we're merging, and the other revision has removed a file
+ that is not present in the working directory, we need to mark it
+ as removed.
+ """
+
+ action = []
+ state = branchmerge and 'r' or 'f'
+ for f in wctx.deleted():
+ if f not in mctx:
+ action.append((f, state))
+
+ if not branchmerge:
+ for f in wctx.removed():
+ if f not in mctx:
+ action.append((f, "f"))
+
+ return action
+
+def manifestmerge(repo, p1, p2, pa, overwrite, partial):
+ """
+ Merge p1 and p2 with ancestor pa and generate merge action list
+
+ overwrite = whether we clobber working files
+ partial = function to filter file lists
+ """
+
+ def fmerge(f, f2, fa):
+ """merge flags"""
+ a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
+ if m == n: # flags agree
+ return m # unchanged
+ if m and n and not a: # flags set, don't agree, differ from parent
+ r = repo.ui.promptchoice(
+ _(" conflicting flags for %s\n"
+ "(n)one, e(x)ec or sym(l)ink?") % f,
+ (_("&None"), _("E&xec"), _("Sym&link")), 0)
+ if r == 1:
+ return "x" # Exec
+ if r == 2:
+ return "l" # Symlink
+ return ""
+ if m and m != a: # changed from a to m
+ return m
+ if n and n != a: # changed from a to n
+ return n
+ return '' # flag was cleared
+
+ def act(msg, m, f, *args):
+ repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
+ action.append((f, m) + args)
+
+ action, copy = [], {}
+
+ if overwrite:
+ pa = p1
+ elif pa == p2: # backwards
+ pa = p1.p1()
+ elif pa and repo.ui.configbool("merge", "followcopies", True):
+ dirs = repo.ui.configbool("merge", "followdirs", True)
+ copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
+ for of, fl in diverge.iteritems():
+ act("divergent renames", "dr", of, fl)
+
+ repo.ui.note(_("resolving manifests\n"))
+ repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
+ repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
+
+ m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
+ copied = set(copy.values())
+
+ if '.hgsubstate' in m1:
+ # check whether sub state is modified
+ for s in p1.substate:
+ if p1.sub(s).dirty():
+ m1['.hgsubstate'] += "+"
+ break
+
+ # Compare manifests
+ for f, n in m1.iteritems():
+ if partial and not partial(f):
+ continue
+ if f in m2:
+ rflags = fmerge(f, f, f)
+ a = ma.get(f, nullid)
+ if n == m2[f] or m2[f] == a: # same or local newer
+ # is file locally modified or flags need changing?
+ # dirstate flags may need to be made current
+ if m1.flags(f) != rflags or n[20:]:
+ act("update permissions", "e", f, rflags)
+ elif n == a: # remote newer
+ act("remote is newer", "g", f, rflags)
+ else: # both changed
+ act("versions differ", "m", f, f, f, rflags, False)
+ elif f in copied: # files we'll deal with on m2 side
+ pass
+ elif f in copy:
+ f2 = copy[f]
+ if f2 not in m2: # directory rename
+ act("remote renamed directory to " + f2, "d",
+ f, None, f2, m1.flags(f))
+ else: # case 2 A,B/B/B or case 4,21 A/B/B
+ act("local copied/moved to " + f2, "m",
+ f, f2, f, fmerge(f, f2, f2), False)
+ elif f in ma: # clean, a different, no remote
+ if n != ma[f]:
+ if repo.ui.promptchoice(
+ _(" local changed %s which remote deleted\n"
+ "use (c)hanged version or (d)elete?") % f,
+ (_("&Changed"), _("&Delete")), 0):
+ act("prompt delete", "r", f)
+ else:
+ act("prompt keep", "a", f)
+ elif n[20:] == "a": # added, no remote
+ act("remote deleted", "f", f)
+ elif n[20:] != "u":
+ act("other deleted", "r", f)
+
+ for f, n in m2.iteritems():
+ if partial and not partial(f):
+ continue
+ if f in m1 or f in copied: # files already visited
+ continue
+ if f in copy:
+ f2 = copy[f]
+ if f2 not in m1: # directory rename
+ act("local renamed directory to " + f2, "d",
+ None, f, f2, m2.flags(f))
+ elif f2 in m2: # rename case 1, A/A,B/A
+ act("remote copied to " + f, "m",
+ f2, f, f, fmerge(f2, f, f2), False)
+ else: # case 3,20 A/B/A
+ act("remote moved to " + f, "m",
+ f2, f, f, fmerge(f2, f, f2), True)
+ elif f not in ma:
+ act("remote created", "g", f, m2.flags(f))
+ elif n != ma[f]:
+ if repo.ui.promptchoice(
+ _("remote changed %s which local deleted\n"
+ "use (c)hanged version or leave (d)eleted?") % f,
+ (_("&Changed"), _("&Deleted")), 0) == 0:
+ act("prompt recreating", "g", f, m2.flags(f))
+
+ return action
+
+def actionkey(a):
+ return a[1] == 'r' and -1 or 0, a
+
+def applyupdates(repo, action, wctx, mctx, actx, overwrite):
+ """apply the merge action list to the working directory
+
+ wctx is the working copy context
+ mctx is the context to be merged into the working copy
+ actx is the context of the common ancestor
+
+ Return a tuple of counts (updated, merged, removed, unresolved) that
+ describes how many files were affected by the update.
+ """
+
+ updated, merged, removed, unresolved = 0, 0, 0, 0
+ ms = mergestate(repo)
+ ms.reset(wctx.p1().node())
+ moves = []
+ action.sort(key=actionkey)
+
+ # prescan for merges
+ for a in action:
+ f, m = a[:2]
+ if m == 'm': # merge
+ f2, fd, flags, move = a[2:]
+ if f == '.hgsubstate': # merged internally
+ continue
+ repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
+ fcl = wctx[f]
+ fco = mctx[f2]
+ if mctx == actx: # backwards, use working dir parent as ancestor
+ if fcl.parents():
+ fca = fcl.p1()
+ else:
+ fca = repo.filectx(f, fileid=nullrev)
+ else:
+ fca = fcl.ancestor(fco, actx)
+ if not fca:
+ fca = repo.filectx(f, fileid=nullrev)
+ ms.add(fcl, fco, fca, fd, flags)
+ if f != fd and move:
+ moves.append(f)
+
+ audit = scmutil.pathauditor(repo.root)
+
+ # remove renamed files after safely stored
+ for f in moves:
+ if os.path.lexists(repo.wjoin(f)):
+ repo.ui.debug("removing %s\n" % f)
+ audit(f)
+ os.unlink(repo.wjoin(f))
+
+ numupdates = len(action)
+ for i, a in enumerate(action):
+ f, m = a[:2]
+ repo.ui.progress(_('updating'), i + 1, item=f, total=numupdates,
+ unit=_('files'))
+ if f and f[0] == "/":
+ continue
+ if m == "r": # remove
+ repo.ui.note(_("removing %s\n") % f)
+ audit(f)
+ if f == '.hgsubstate': # subrepo states need updating
+ subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
+ try:
+ util.unlinkpath(repo.wjoin(f))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ repo.ui.warn(_("update failed to remove %s: %s!\n") %
+ (f, inst.strerror))
+ removed += 1
+ elif m == "m": # merge
+ if f == '.hgsubstate': # subrepo states need updating
+ subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), overwrite)
+ continue
+ f2, fd, flags, move = a[2:]
+ repo.wopener.audit(fd)
+ r = ms.resolve(fd, wctx, mctx)
+ if r is not None and r > 0:
+ unresolved += 1
+ else:
+ if r is None:
+ updated += 1
+ else:
+ merged += 1
+ util.setflags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
+ if (move and repo.dirstate.normalize(fd) != f
+ and os.path.lexists(repo.wjoin(f))):
+ repo.ui.debug("removing %s\n" % f)
+ audit(f)
+ os.unlink(repo.wjoin(f))
+ elif m == "g": # get
+ flags = a[2]
+ repo.ui.note(_("getting %s\n") % f)
+ t = mctx.filectx(f).data()
+ repo.wwrite(f, t, flags)
+ t = None
+ updated += 1
+ if f == '.hgsubstate': # subrepo states need updating
+ subrepo.submerge(repo, wctx, mctx, wctx, overwrite)
+ elif m == "d": # directory rename
+ f2, fd, flags = a[2:]
+ if f:
+ repo.ui.note(_("moving %s to %s\n") % (f, fd))
+ audit(f)
+ t = wctx.filectx(f).data()
+ repo.wwrite(fd, t, flags)
+ util.unlinkpath(repo.wjoin(f))
+ if f2:
+ repo.ui.note(_("getting %s to %s\n") % (f2, fd))
+ t = mctx.filectx(f2).data()
+ repo.wwrite(fd, t, flags)
+ updated += 1
+ elif m == "dr": # divergent renames
+ fl = a[2]
+ repo.ui.warn(_("note: possible conflict - %s was renamed "
+ "multiple times to:\n") % f)
+ for nf in fl:
+ repo.ui.warn(" %s\n" % nf)
+ elif m == "e": # exec
+ flags = a[2]
+ repo.wopener.audit(f)
+ util.setflags(repo.wjoin(f), 'l' in flags, 'x' in flags)
+ ms.commit()
+ repo.ui.progress(_('updating'), None, total=numupdates, unit=_('files'))
+
+ return updated, merged, removed, unresolved
+
+def recordupdates(repo, action, branchmerge):
+ "record merge actions to the dirstate"
+
+ for a in action:
+ f, m = a[:2]
+ if m == "r": # remove
+ if branchmerge:
+ repo.dirstate.remove(f)
+ else:
+ repo.dirstate.drop(f)
+ elif m == "a": # re-add
+ if not branchmerge:
+ repo.dirstate.add(f)
+ elif m == "f": # forget
+ repo.dirstate.drop(f)
+ elif m == "e": # exec change
+ repo.dirstate.normallookup(f)
+ elif m == "g": # get
+ if branchmerge:
+ repo.dirstate.otherparent(f)
+ else:
+ repo.dirstate.normal(f)
+ elif m == "m": # merge
+ f2, fd, flag, move = a[2:]
+ if branchmerge:
+ # We've done a branch merge, mark this file as merged
+ # so that we properly record the merger later
+ repo.dirstate.merge(fd)
+ if f != f2: # copy/rename
+ if move:
+ repo.dirstate.remove(f)
+ if f != fd:
+ repo.dirstate.copy(f, fd)
+ else:
+ repo.dirstate.copy(f2, fd)
+ else:
+ # We've update-merged a locally modified file, so
+ # we set the dirstate to emulate a normal checkout
+ # of that file some time in the past. Thus our
+ # merge will appear as a normal local file
+ # modification.
+ if f2 == fd: # file not locally copied/moved
+ repo.dirstate.normallookup(fd)
+ if move:
+ repo.dirstate.drop(f)
+ elif m == "d": # directory rename
+ f2, fd, flag = a[2:]
+ if not f2 and f not in repo.dirstate:
+ # untracked file moved
+ continue
+ if branchmerge:
+ repo.dirstate.add(fd)
+ if f:
+ repo.dirstate.remove(f)
+ repo.dirstate.copy(f, fd)
+ if f2:
+ repo.dirstate.copy(f2, fd)
+ else:
+ repo.dirstate.normal(fd)
+ if f:
+ repo.dirstate.drop(f)
+
+def update(repo, node, branchmerge, force, partial, ancestor=None):
+ """
+ Perform a merge between the working directory and the given node
+
+ node = the node to update to, or None if unspecified
+ branchmerge = whether to merge between branches
+ force = whether to force branch merging or file overwriting
+ partial = a function to filter file lists (dirstate not updated)
+
+ The table below shows all the behaviors of the update command
+ given the -c and -C or no options, whether the working directory
+ is dirty, whether a revision is specified, and the relationship of
+ the parent rev to the target rev (linear, on the same named
+ branch, or on another named branch).
+
+ This logic is tested by test-update-branches.t.
+
+ -c -C dirty rev | linear same cross
+ n n n n | ok (1) x
+ n n n y | ok ok ok
+ n n y * | merge (2) (2)
+ n y * * | --- discard ---
+ y n y * | --- (3) ---
+ y n n * | --- ok ---
+ y y * * | --- (4) ---
+
+ x = can't happen
+ * = don't-care
+ 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
+ 2 = abort: crosses branches (use 'hg merge' to merge or
+ use 'hg update -C' to discard changes)
+ 3 = abort: uncommitted local changes
+ 4 = incompatible options (checked in commands.py)
+
+ Return the same tuple as applyupdates().
+ """
+
+ onode = node
+ wlock = repo.wlock()
+ try:
+ wc = repo[None]
+ if node is None:
+ # tip of current branch
+ try:
+ node = repo.branchtags()[wc.branch()]
+ except KeyError:
+ if wc.branch() == "default": # no default branch!
+ node = repo.lookup("tip") # update to tip
+ else:
+ raise util.Abort(_("branch %s not found") % wc.branch())
+ overwrite = force and not branchmerge
+ pl = wc.parents()
+ p1, p2 = pl[0], repo[node]
+ if ancestor:
+ pa = repo[ancestor]
+ else:
+ pa = p1.ancestor(p2)
+
+ fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
+
+ ### check phase
+ if not overwrite and len(pl) > 1:
+ raise util.Abort(_("outstanding uncommitted merges"))
+ if branchmerge:
+ if pa == p2:
+ raise util.Abort(_("merging with a working directory ancestor"
+ " has no effect"))
+ elif pa == p1:
+ if p1.branch() == p2.branch():
+ raise util.Abort(_("nothing to merge (use 'hg update'"
+ " or check 'hg heads')"))
+ if not force and (wc.files() or wc.deleted()):
+ raise util.Abort(_("outstanding uncommitted changes "
+ "(use 'hg status' to list changes)"))
+ for s in wc.substate:
+ if wc.sub(s).dirty():
+ raise util.Abort(_("outstanding uncommitted changes in "
+ "subrepository '%s'") % s)
+
+ elif not overwrite:
+ if pa == p1 or pa == p2: # linear
+ pass # all good
+ elif wc.dirty(missing=True):
+ raise util.Abort(_("crosses branches (merge branches or use"
+ " --clean to discard changes)"))
+ elif onode is None:
+ raise util.Abort(_("crosses branches (merge branches or update"
+ " --check to force update)"))
+ else:
+ # Allow jumping branches if clean and specific rev given
+ overwrite = True
+
+ ### calculate phase
+ action = []
+ wc.status(unknown=True) # prime cache
+ folding = not util.checkcase(repo.path)
+ if not force:
+ _checkunknown(wc, p2, folding)
+ if folding:
+ _checkcollision(p2)
+ action += _forgetremoved(wc, p2, branchmerge)
+ action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
+
+ ### apply phase
+ if not branchmerge: # just jump to the new rev
+ fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
+ if not partial:
+ repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
+
+ stats = applyupdates(repo, action, wc, p2, pa, overwrite)
+
+ if not partial:
+ repo.dirstate.setparents(fp1, fp2)
+ recordupdates(repo, action, branchmerge)
+ if not branchmerge:
+ repo.dirstate.setbranch(p2.branch())
+ finally:
+ wlock.release()
+
+ if not partial:
+ repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
+ return stats
diff --git a/websdk/mercurial/minirst.py b/websdk/mercurial/minirst.py
index b09835a..f375def 120000..100644
--- a/websdk/mercurial/minirst.py
+++ b/websdk/mercurial/minirst.py
@@ -1 +1,687 @@
-/usr/share/pyshared/mercurial/minirst.py \ No newline at end of file
+# minirst.py - minimal reStructuredText parser
+#
+# Copyright 2009, 2010 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""simplified reStructuredText parser.
+
+This parser knows just enough about reStructuredText to parse the
+Mercurial docstrings.
+
+It cheats in a major way: nested blocks are not really nested. They
+are just indented blocks that look like they are nested. This relies
+on the user to keep the right indentation for the blocks.
+
+Remember to update http://mercurial.selenic.com/wiki/HelpStyleGuide
+when adding support for new constructs.
+"""
+
+import re
+import util, encoding
+from i18n import _
+
+def replace(text, substs):
+ '''
+ Apply a list of (find, replace) pairs to a text.
+
+ >>> replace("foo bar", [('f', 'F'), ('b', 'B')])
+ 'Foo Bar'
+ >>> encoding.encoding = 'latin1'
+ >>> replace('\\x81\\\\', [('\\\\', '/')])
+ '\\x81/'
+ >>> encoding.encoding = 'shiftjis'
+ >>> replace('\\x81\\\\', [('\\\\', '/')])
+ '\\x81\\\\'
+ '''
+
+ # some character encodings (cp932 for Japanese, at least) use
+ # ASCII characters other than control/alphabet/digit as a part of
+ # multi-bytes characters, so direct replacing with such characters
+ # on strings in local encoding causes invalid byte sequences.
+ utext = text.decode(encoding.encoding)
+ for f, t in substs:
+ utext = utext.replace(f, t)
+ return utext.encode(encoding.encoding)
+
+_blockre = re.compile(r"\n(?:\s*\n)+")
+
+def findblocks(text):
+ """Find continuous blocks of lines in text.
+
+ Returns a list of dictionaries representing the blocks. Each block
+ has an 'indent' field and a 'lines' field.
+ """
+ blocks = []
+ for b in _blockre.split(text.lstrip('\n').rstrip()):
+ lines = b.splitlines()
+ if lines:
+ indent = min((len(l) - len(l.lstrip())) for l in lines)
+ lines = [l[indent:] for l in lines]
+ blocks.append(dict(indent=indent, lines=lines))
+ return blocks
+
+def findliteralblocks(blocks):
+ """Finds literal blocks and adds a 'type' field to the blocks.
+
+ Literal blocks are given the type 'literal', all other blocks are
+ given type the 'paragraph'.
+ """
+ i = 0
+ while i < len(blocks):
+ # Searching for a block that looks like this:
+ #
+ # +------------------------------+
+ # | paragraph |
+ # | (ends with "::") |
+ # +------------------------------+
+ # +---------------------------+
+ # | indented literal block |
+ # +---------------------------+
+ blocks[i]['type'] = 'paragraph'
+ if blocks[i]['lines'][-1].endswith('::') and i + 1 < len(blocks):
+ indent = blocks[i]['indent']
+ adjustment = blocks[i + 1]['indent'] - indent
+
+ if blocks[i]['lines'] == ['::']:
+ # Expanded form: remove block
+ del blocks[i]
+ i -= 1
+ elif blocks[i]['lines'][-1].endswith(' ::'):
+ # Partially minimized form: remove space and both
+ # colons.
+ blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3]
+ else:
+ # Fully minimized form: remove just one colon.
+ blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1]
+
+ # List items are formatted with a hanging indent. We must
+ # correct for this here while we still have the original
+ # information on the indentation of the subsequent literal
+ # blocks available.
+ m = _bulletre.match(blocks[i]['lines'][0])
+ if m:
+ indent += m.end()
+ adjustment -= m.end()
+
+ # Mark the following indented blocks.
+ while i + 1 < len(blocks) and blocks[i + 1]['indent'] > indent:
+ blocks[i + 1]['type'] = 'literal'
+ blocks[i + 1]['indent'] -= adjustment
+ i += 1
+ i += 1
+ return blocks
+
+_bulletre = re.compile(r'(-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ')
+_optionre = re.compile(r'^(-([a-zA-Z0-9]), )?(--[a-z0-9-]+)'
+ r'((.*) +)(.*)$')
+_fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):[ ]+(.*)')
+_definitionre = re.compile(r'[^ ]')
+_tablere = re.compile(r'(=+\s+)*=+')
+
+def splitparagraphs(blocks):
+ """Split paragraphs into lists."""
+ # Tuples with (list type, item regexp, single line items?). Order
+ # matters: definition lists has the least specific regexp and must
+ # come last.
+ listtypes = [('bullet', _bulletre, True),
+ ('option', _optionre, True),
+ ('field', _fieldre, True),
+ ('definition', _definitionre, False)]
+
+ def match(lines, i, itemre, singleline):
+ """Does itemre match an item at line i?
+
+ A list item can be followed by an idented line or another list
+ item (but only if singleline is True).
+ """
+ line1 = lines[i]
+ line2 = i + 1 < len(lines) and lines[i + 1] or ''
+ if not itemre.match(line1):
+ return False
+ if singleline:
+ return line2 == '' or line2[0] == ' ' or itemre.match(line2)
+ else:
+ return line2.startswith(' ')
+
+ i = 0
+ while i < len(blocks):
+ if blocks[i]['type'] == 'paragraph':
+ lines = blocks[i]['lines']
+ for type, itemre, singleline in listtypes:
+ if match(lines, 0, itemre, singleline):
+ items = []
+ for j, line in enumerate(lines):
+ if match(lines, j, itemre, singleline):
+ items.append(dict(type=type, lines=[],
+ indent=blocks[i]['indent']))
+ items[-1]['lines'].append(line)
+ blocks[i:i + 1] = items
+ break
+ i += 1
+ return blocks
+
+_fieldwidth = 12
+
+def updatefieldlists(blocks):
+ """Find key and maximum key width for field lists."""
+ i = 0
+ while i < len(blocks):
+ if blocks[i]['type'] != 'field':
+ i += 1
+ continue
+
+ keywidth = 0
+ j = i
+ while j < len(blocks) and blocks[j]['type'] == 'field':
+ m = _fieldre.match(blocks[j]['lines'][0])
+ key, rest = m.groups()
+ blocks[j]['lines'][0] = rest
+ blocks[j]['key'] = key
+ keywidth = max(keywidth, len(key))
+ j += 1
+
+ for block in blocks[i:j]:
+ block['keywidth'] = keywidth
+ i = j + 1
+
+ return blocks
+
+def updateoptionlists(blocks):
+ i = 0
+ while i < len(blocks):
+ if blocks[i]['type'] != 'option':
+ i += 1
+ continue
+
+ optstrwidth = 0
+ j = i
+ while j < len(blocks) and blocks[j]['type'] == 'option':
+ m = _optionre.match(blocks[j]['lines'][0])
+
+ shortoption = m.group(2)
+ group3 = m.group(3)
+ longoption = group3[2:].strip()
+ desc = m.group(6).strip()
+ longoptionarg = m.group(5).strip()
+ blocks[j]['lines'][0] = desc
+
+ noshortop = ''
+ if not shortoption:
+ noshortop = ' '
+
+ opt = "%s%s" % (shortoption and "-%s " % shortoption or '',
+ ("%s--%s %s") % (noshortop, longoption,
+ longoptionarg))
+ opt = opt.rstrip()
+ blocks[j]['optstr'] = opt
+ optstrwidth = max(optstrwidth, encoding.colwidth(opt))
+ j += 1
+
+ for block in blocks[i:j]:
+ block['optstrwidth'] = optstrwidth
+ i = j + 1
+ return blocks
+
+def prunecontainers(blocks, keep):
+ """Prune unwanted containers.
+
+ The blocks must have a 'type' field, i.e., they should have been
+ run through findliteralblocks first.
+ """
+ pruned = []
+ i = 0
+ while i + 1 < len(blocks):
+ # Searching for a block that looks like this:
+ #
+ # +-------+---------------------------+
+ # | ".. container ::" type |
+ # +---+ |
+ # | blocks |
+ # +-------------------------------+
+ if (blocks[i]['type'] == 'paragraph' and
+ blocks[i]['lines'][0].startswith('.. container::')):
+ indent = blocks[i]['indent']
+ adjustment = blocks[i + 1]['indent'] - indent
+ containertype = blocks[i]['lines'][0][15:]
+ prune = containertype not in keep
+ if prune:
+ pruned.append(containertype)
+
+ # Always delete "..container:: type" block
+ del blocks[i]
+ j = i
+ i -= 1
+ while j < len(blocks) and blocks[j]['indent'] > indent:
+ if prune:
+ del blocks[j]
+ else:
+ blocks[j]['indent'] -= adjustment
+ j += 1
+ i += 1
+ return blocks, pruned
+
+_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""")
+
+def findtables(blocks):
+ '''Find simple tables
+
+ Only simple one-line table elements are supported
+ '''
+
+ for block in blocks:
+ # Searching for a block that looks like this:
+ #
+ # === ==== ===
+ # A B C
+ # === ==== === <- optional
+ # 1 2 3
+ # x y z
+ # === ==== ===
+ if (block['type'] == 'paragraph' and
+ len(block['lines']) > 2 and
+ _tablere.match(block['lines'][0]) and
+ block['lines'][0] == block['lines'][-1]):
+ block['type'] = 'table'
+ block['header'] = False
+ div = block['lines'][0]
+
+ # column markers are ASCII so we can calculate column
+ # position in bytes
+ columns = [x for x in xrange(len(div))
+ if div[x] == '=' and (x == 0 or div[x - 1] == ' ')]
+ rows = []
+ for l in block['lines'][1:-1]:
+ if l == div:
+ block['header'] = True
+ continue
+ row = []
+ # we measure columns not in bytes or characters but in
+ # colwidth which makes things tricky
+ pos = columns[0] # leading whitespace is bytes
+ for n, start in enumerate(columns):
+ if n + 1 < len(columns):
+ width = columns[n + 1] - start
+ v = encoding.getcols(l, pos, width) # gather columns
+ pos += len(v) # calculate byte position of end
+ row.append(v.strip())
+ else:
+ row.append(l[pos:].strip())
+ rows.append(row)
+
+ block['table'] = rows
+
+ return blocks
+
+def findsections(blocks):
+ """Finds sections.
+
+ The blocks must have a 'type' field, i.e., they should have been
+ run through findliteralblocks first.
+ """
+ for block in blocks:
+ # Searching for a block that looks like this:
+ #
+ # +------------------------------+
+ # | Section title |
+ # | ------------- |
+ # +------------------------------+
+ if (block['type'] == 'paragraph' and
+ len(block['lines']) == 2 and
+ encoding.colwidth(block['lines'][0]) == len(block['lines'][1]) and
+ _sectionre.match(block['lines'][1])):
+ block['underline'] = block['lines'][1][0]
+ block['type'] = 'section'
+ del block['lines'][1]
+ return blocks
+
+def inlineliterals(blocks):
+ substs = [('``', '"')]
+ for b in blocks:
+ if b['type'] in ('paragraph', 'section'):
+ b['lines'] = [replace(l, substs) for l in b['lines']]
+ return blocks
+
+def hgrole(blocks):
+ substs = [(':hg:`', '"hg '), ('`', '"')]
+ for b in blocks:
+ if b['type'] in ('paragraph', 'section'):
+ # Turn :hg:`command` into "hg command". This also works
+ # when there is a line break in the command and relies on
+ # the fact that we have no stray back-quotes in the input
+ # (run the blocks through inlineliterals first).
+ b['lines'] = [replace(l, substs) for l in b['lines']]
+ return blocks
+
+def addmargins(blocks):
+ """Adds empty blocks for vertical spacing.
+
+ This groups bullets, options, and definitions together with no vertical
+ space between them, and adds an empty block between all other blocks.
+ """
+ i = 1
+ while i < len(blocks):
+ if (blocks[i]['type'] == blocks[i - 1]['type'] and
+ blocks[i]['type'] in ('bullet', 'option', 'field')):
+ i += 1
+ else:
+ blocks.insert(i, dict(lines=[''], indent=0, type='margin'))
+ i += 2
+ return blocks
+
+def prunecomments(blocks):
+ """Remove comments."""
+ i = 0
+ while i < len(blocks):
+ b = blocks[i]
+ if b['type'] == 'paragraph' and (b['lines'][0].startswith('.. ') or
+ b['lines'] == ['..']):
+ del blocks[i]
+ if i < len(blocks) and blocks[i]['type'] == 'margin':
+ del blocks[i]
+ else:
+ i += 1
+ return blocks
+
+_admonitionre = re.compile(r"\.\. (admonition|attention|caution|danger|"
+ r"error|hint|important|note|tip|warning)::",
+ flags=re.IGNORECASE)
+
+def findadmonitions(blocks):
+ """
+ Makes the type of the block an admonition block if
+ the first line is an admonition directive
+ """
+ i = 0
+ while i < len(blocks):
+ m = _admonitionre.match(blocks[i]['lines'][0])
+ if m:
+ blocks[i]['type'] = 'admonition'
+ admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower()
+
+ firstline = blocks[i]['lines'][0][m.end() + 1:]
+ if firstline:
+ blocks[i]['lines'].insert(1, ' ' + firstline)
+
+ blocks[i]['admonitiontitle'] = admonitiontitle
+ del blocks[i]['lines'][0]
+ i = i + 1
+ return blocks
+
+_admonitiontitles = {'attention': _('Attention:'),
+ 'caution': _('Caution:'),
+ 'danger': _('!Danger!') ,
+ 'error': _('Error:'),
+ 'hint': _('Hint:'),
+ 'important': _('Important:'),
+ 'note': _('Note:'),
+ 'tip': _('Tip:'),
+ 'warning': _('Warning!')}
+
+def formatoption(block, width):
+ desc = ' '.join(map(str.strip, block['lines']))
+ colwidth = encoding.colwidth(block['optstr'])
+ usablewidth = width - 1
+ hanging = block['optstrwidth']
+ initindent = '%s%s ' % (block['optstr'], ' ' * ((hanging - colwidth)))
+ hangindent = ' ' * (encoding.colwidth(initindent) + 1)
+ return ' %s\n' % (util.wrap(desc, usablewidth,
+ initindent=initindent,
+ hangindent=hangindent))
+
+def formatblock(block, width):
+ """Format a block according to width."""
+ if width <= 0:
+ width = 78
+ indent = ' ' * block['indent']
+ if block['type'] == 'admonition':
+ admonition = _admonitiontitles[block['admonitiontitle']]
+ hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
+
+ defindent = indent + hang * ' '
+ text = ' '.join(map(str.strip, block['lines']))
+ return '%s\n%s\n' % (indent + admonition,
+ util.wrap(text, width=width,
+ initindent=defindent,
+ hangindent=defindent))
+ if block['type'] == 'margin':
+ return '\n'
+ if block['type'] == 'literal':
+ indent += ' '
+ return indent + ('\n' + indent).join(block['lines']) + '\n'
+ if block['type'] == 'section':
+ underline = encoding.colwidth(block['lines'][0]) * block['underline']
+ return "%s%s\n%s%s\n" % (indent, block['lines'][0],indent, underline)
+ if block['type'] == 'table':
+ table = block['table']
+ # compute column widths
+ widths = [max([encoding.colwidth(e) for e in c]) for c in zip(*table)]
+ text = ''
+ span = sum(widths) + len(widths) - 1
+ indent = ' ' * block['indent']
+ hang = ' ' * (len(indent) + span - widths[-1])
+
+ for row in table:
+ l = []
+ for w, v in zip(widths, row):
+ pad = ' ' * (w - encoding.colwidth(v))
+ l.append(v + pad)
+ l = ' '.join(l)
+ l = util.wrap(l, width=width, initindent=indent, hangindent=hang)
+ if not text and block['header']:
+ text = l + '\n' + indent + '-' * (min(width, span)) + '\n'
+ else:
+ text += l + "\n"
+ return text
+ if block['type'] == 'definition':
+ term = indent + block['lines'][0]
+ hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip())
+ defindent = indent + hang * ' '
+ text = ' '.join(map(str.strip, block['lines'][1:]))
+ return '%s\n%s\n' % (term, util.wrap(text, width=width,
+ initindent=defindent,
+ hangindent=defindent))
+ subindent = indent
+ if block['type'] == 'bullet':
+ if block['lines'][0].startswith('| '):
+ # Remove bullet for line blocks and add no extra
+ # indention.
+ block['lines'][0] = block['lines'][0][2:]
+ else:
+ m = _bulletre.match(block['lines'][0])
+ subindent = indent + m.end() * ' '
+ elif block['type'] == 'field':
+ keywidth = block['keywidth']
+ key = block['key']
+
+ subindent = indent + _fieldwidth * ' '
+ if len(key) + 2 > _fieldwidth:
+ # key too large, use full line width
+ key = key.ljust(width)
+ elif keywidth + 2 < _fieldwidth:
+ # all keys are small, add only two spaces
+ key = key.ljust(keywidth + 2)
+ subindent = indent + (keywidth + 2) * ' '
+ else:
+ # mixed sizes, use fieldwidth for this one
+ key = key.ljust(_fieldwidth)
+ block['lines'][0] = key + block['lines'][0]
+ elif block['type'] == 'option':
+ return formatoption(block, width)
+
+ text = ' '.join(map(str.strip, block['lines']))
+ return util.wrap(text, width=width,
+ initindent=indent,
+ hangindent=subindent) + '\n'
+
+def formathtml(blocks):
+ """Format RST blocks as HTML"""
+
+ out = []
+ headernest = ''
+ listnest = []
+
+ def openlist(start, level):
+ if not listnest or listnest[-1][0] != start:
+ listnest.append((start, level))
+ out.append('<%s>\n' % start)
+
+ blocks = [b for b in blocks if b['type'] != 'margin']
+
+ for pos, b in enumerate(blocks):
+ btype = b['type']
+ level = b['indent']
+ lines = b['lines']
+
+ if btype == 'admonition':
+ admonition = _admonitiontitles[b['admonitiontitle']]
+ text = ' '.join(map(str.strip, lines))
+ out.append('<p>\n<b>%s</b> %s\n</p>\n' % (admonition, text))
+ elif btype == 'paragraph':
+ out.append('<p>\n%s\n</p>\n' % '\n'.join(lines))
+ elif btype == 'margin':
+ pass
+ elif btype == 'literal':
+ out.append('<pre>\n%s\n</pre>\n' % '\n'.join(lines))
+ elif btype == 'section':
+ i = b['underline']
+ if i not in headernest:
+ headernest += i
+ level = headernest.index(i) + 1
+ out.append('<h%d>%s</h%d>\n' % (level, lines[0], level))
+ elif btype == 'table':
+ table = b['table']
+ t = []
+ for row in table:
+ l = []
+ for v in zip(row):
+ if not t:
+ l.append('<th>%s</th>' % v)
+ else:
+ l.append('<td>%s</td>' % v)
+ t.append(' <tr>%s</tr>\n' % ''.join(l))
+ out.append('<table>\n%s</table>\n' % ''.join(t))
+ elif btype == 'definition':
+ openlist('dl', level)
+ term = lines[0]
+ text = ' '.join(map(str.strip, lines[1:]))
+ out.append(' <dt>%s\n <dd>%s\n' % (term, text))
+ elif btype == 'bullet':
+ bullet, head = lines[0].split(' ', 1)
+ if bullet == '-':
+ openlist('ul', level)
+ else:
+ openlist('ol', level)
+ out.append(' <li> %s\n' % ' '.join([head] + lines[1:]))
+ elif btype == 'field':
+ openlist('dl', level)
+ key = b['key']
+ text = ' '.join(map(str.strip, lines))
+ out.append(' <dt>%s\n <dd>%s\n' % (key, text))
+ elif btype == 'option':
+ openlist('dl', level)
+ opt = b['optstr']
+ desc = ' '.join(map(str.strip, lines))
+ out.append(' <dt>%s\n <dd>%s\n' % (opt, desc))
+
+ # close lists if indent level of next block is lower
+ if listnest:
+ start, level = listnest[-1]
+ if pos == len(blocks) - 1:
+ out.append('</%s>\n' % start)
+ listnest.pop()
+ else:
+ nb = blocks[pos + 1]
+ ni = nb['indent']
+ if (ni < level or
+ (ni == level and
+ nb['type'] not in 'definition bullet field option')):
+ out.append('</%s>\n' % start)
+ listnest.pop()
+
+ return ''.join(out)
+
+def parse(text, indent=0, keep=None):
+ """Parse text into a list of blocks"""
+ pruned = []
+ blocks = findblocks(text)
+ for b in blocks:
+ b['indent'] += indent
+ blocks = findliteralblocks(blocks)
+ blocks = findtables(blocks)
+ blocks, pruned = prunecontainers(blocks, keep or [])
+ blocks = findsections(blocks)
+ blocks = inlineliterals(blocks)
+ blocks = hgrole(blocks)
+ blocks = splitparagraphs(blocks)
+ blocks = updatefieldlists(blocks)
+ blocks = updateoptionlists(blocks)
+ blocks = addmargins(blocks)
+ blocks = prunecomments(blocks)
+ blocks = findadmonitions(blocks)
+ return blocks, pruned
+
+def formatblocks(blocks, width):
+ text = ''.join(formatblock(b, width) for b in blocks)
+ return text
+
+def format(text, width=80, indent=0, keep=None, style='plain'):
+ """Parse and format the text according to width."""
+ blocks, pruned = parse(text, indent, keep or [])
+ if style == 'html':
+ text = formathtml(blocks)
+ else:
+ text = ''.join(formatblock(b, width) for b in blocks)
+ if keep is None:
+ return text
+ else:
+ return text, pruned
+
+def getsections(blocks):
+ '''return a list of (section name, nesting level, blocks) tuples'''
+ nest = ""
+ level = 0
+ secs = []
+ for b in blocks:
+ if b['type'] == 'section':
+ i = b['underline']
+ if i not in nest:
+ nest += i
+ level = nest.index(i) + 1
+ nest = nest[:level]
+ secs.append((b['lines'][0], level, [b]))
+ else:
+ if not secs:
+ # add an initial empty section
+ secs = [('', 0, [])]
+ secs[-1][2].append(b)
+ return secs
+
+def decorateblocks(blocks, width):
+ '''generate a list of (section name, line text) pairs for search'''
+ lines = []
+ for s in getsections(blocks):
+ section = s[0]
+ text = formatblocks(s[2], width)
+ lines.append([(section, l) for l in text.splitlines(True)])
+ return lines
+
+def maketable(data, indent=0, header=False):
+ '''Generate an RST table for the given table data'''
+
+ widths = [max(encoding.colwidth(e) for e in c) for c in zip(*data)]
+ indent = ' ' * indent
+ div = indent + ' '.join('=' * w for w in widths) + '\n'
+
+ out = [div]
+ for row in data:
+ l = []
+ for w, v in zip(widths, row):
+ pad = ' ' * (w - encoding.colwidth(v))
+ l.append(v + pad)
+ out.append(indent + ' '.join(l) + "\n")
+ if header and len(data) > 1:
+ out.insert(2, div)
+ out.append(div)
+ return ''.join(out)
diff --git a/websdk/mercurial/node.py b/websdk/mercurial/node.py
index e7e4e04..9debeaf 120000..100644
--- a/websdk/mercurial/node.py
+++ b/websdk/mercurial/node.py
@@ -1 +1,18 @@
-/usr/share/pyshared/mercurial/node.py \ No newline at end of file
+# node.py - basic nodeid manipulation for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import binascii
+
+nullrev = -1
+nullid = "\0" * 20
+
+# This ugly style has a noticeable effect in manifest parsing
+hex = binascii.hexlify
+bin = binascii.unhexlify
+
+def short(node):
+ return hex(node[:6])
diff --git a/websdk/mercurial/parser.py b/websdk/mercurial/parser.py
index 80b6d10..5c32faa 120000..100644
--- a/websdk/mercurial/parser.py
+++ b/websdk/mercurial/parser.py
@@ -1 +1,95 @@
-/usr/share/pyshared/mercurial/parser.py \ No newline at end of file
+# parser.py - simple top-down operator precedence parser for mercurial
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# see http://effbot.org/zone/simple-top-down-parsing.htm and
+# http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
+# for background
+
+# takes a tokenizer and elements
+# tokenizer is an iterator that returns type, value pairs
+# elements is a mapping of types to binding strength, prefix and infix actions
+# an action is a tree node name, a tree label, and an optional match
+# __call__(program) parses program into a labelled tree
+
+import error
+from i18n import _
+
+class parser(object):
+ def __init__(self, tokenizer, elements, methods=None):
+ self._tokenizer = tokenizer
+ self._elements = elements
+ self._methods = methods
+ self.current = None
+ def _advance(self):
+ 'advance the tokenizer'
+ t = self.current
+ try:
+ self.current = self._iter.next()
+ except StopIteration:
+ pass
+ return t
+ def _match(self, m, pos):
+ 'make sure the tokenizer matches an end condition'
+ if self.current[0] != m:
+ raise error.ParseError(_("unexpected token: %s") % self.current[0],
+ self.current[2])
+ self._advance()
+ def _parse(self, bind=0):
+ token, value, pos = self._advance()
+ # handle prefix rules on current token
+ prefix = self._elements[token][1]
+ if not prefix:
+ raise error.ParseError(_("not a prefix: %s") % token, pos)
+ if len(prefix) == 1:
+ expr = (prefix[0], value)
+ else:
+ if len(prefix) > 2 and prefix[2] == self.current[0]:
+ self._match(prefix[2], pos)
+ expr = (prefix[0], None)
+ else:
+ expr = (prefix[0], self._parse(prefix[1]))
+ if len(prefix) > 2:
+ self._match(prefix[2], pos)
+ # gather tokens until we meet a lower binding strength
+ while bind < self._elements[self.current[0]][0]:
+ token, value, pos = self._advance()
+ e = self._elements[token]
+ # check for suffix - next token isn't a valid prefix
+ if len(e) == 4 and not self._elements[self.current[0]][1]:
+ suffix = e[3]
+ expr = (suffix[0], expr)
+ else:
+ # handle infix rules
+ if len(e) < 3 or not e[2]:
+ raise error.ParseError(_("not an infix: %s") % token, pos)
+ infix = e[2]
+ if len(infix) == 3 and infix[2] == self.current[0]:
+ self._match(infix[2], pos)
+ expr = (infix[0], expr, (None))
+ else:
+ expr = (infix[0], expr, self._parse(infix[1]))
+ if len(infix) == 3:
+ self._match(infix[2], pos)
+ return expr
+ def parse(self, message):
+ 'generate a parse tree from a message'
+ self._iter = self._tokenizer(message)
+ self._advance()
+ res = self._parse()
+ token, value, pos = self.current
+ return res, pos
+ def eval(self, tree):
+ 'recursively evaluate a parse tree using node methods'
+ if not isinstance(tree, tuple):
+ return tree
+ return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
+ def __call__(self, message):
+ 'parse a message into a parse tree and evaluate if methods given'
+ t = self.parse(message)
+ if self._methods:
+ return self.eval(t)
+ return t
diff --git a/websdk/mercurial/patch.py b/websdk/mercurial/patch.py
index 87748b9..5e0c6ef 120000..100644
--- a/websdk/mercurial/patch.py
+++ b/websdk/mercurial/patch.py
@@ -1 +1,1869 @@
-/usr/share/pyshared/mercurial/patch.py \ No newline at end of file
+# patch.py - patch file parsing routines
+#
+# Copyright 2006 Brendan Cully <brendan@kublai.com>
+# Copyright 2007 Chris Mason <chris.mason@oracle.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import cStringIO, email.Parser, os, errno, re
+import tempfile, zlib, shutil
+
+from i18n import _
+from node import hex, nullid, short
+import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
+import context
+
+gitre = re.compile('diff --git a/(.*) b/(.*)')
+
+class PatchError(Exception):
+ pass
+
+
+# public functions
+
+def split(stream):
+ '''return an iterator of individual patches from a stream'''
+ def isheader(line, inheader):
+ if inheader and line[0] in (' ', '\t'):
+ # continuation
+ return True
+ if line[0] in (' ', '-', '+'):
+ # diff line - don't check for header pattern in there
+ return False
+ l = line.split(': ', 1)
+ return len(l) == 2 and ' ' not in l[0]
+
+ def chunk(lines):
+ return cStringIO.StringIO(''.join(lines))
+
+ def hgsplit(stream, cur):
+ inheader = True
+
+ for line in stream:
+ if not line.strip():
+ inheader = False
+ if not inheader and line.startswith('# HG changeset patch'):
+ yield chunk(cur)
+ cur = []
+ inheader = True
+
+ cur.append(line)
+
+ if cur:
+ yield chunk(cur)
+
+ def mboxsplit(stream, cur):
+ for line in stream:
+ if line.startswith('From '):
+ for c in split(chunk(cur[1:])):
+ yield c
+ cur = []
+
+ cur.append(line)
+
+ if cur:
+ for c in split(chunk(cur[1:])):
+ yield c
+
+ def mimesplit(stream, cur):
+ def msgfp(m):
+ fp = cStringIO.StringIO()
+ g = email.Generator.Generator(fp, mangle_from_=False)
+ g.flatten(m)
+ fp.seek(0)
+ return fp
+
+ for line in stream:
+ cur.append(line)
+ c = chunk(cur)
+
+ m = email.Parser.Parser().parse(c)
+ if not m.is_multipart():
+ yield msgfp(m)
+ else:
+ ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
+ for part in m.walk():
+ ct = part.get_content_type()
+ if ct not in ok_types:
+ continue
+ yield msgfp(part)
+
+ def headersplit(stream, cur):
+ inheader = False
+
+ for line in stream:
+ if not inheader and isheader(line, inheader):
+ yield chunk(cur)
+ cur = []
+ inheader = True
+ if inheader and not isheader(line, inheader):
+ inheader = False
+
+ cur.append(line)
+
+ if cur:
+ yield chunk(cur)
+
+ def remainder(cur):
+ yield chunk(cur)
+
+ class fiter(object):
+ def __init__(self, fp):
+ self.fp = fp
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ l = self.fp.readline()
+ if not l:
+ raise StopIteration
+ return l
+
+ inheader = False
+ cur = []
+
+ mimeheaders = ['content-type']
+
+ if not util.safehasattr(stream, 'next'):
+ # http responses, for example, have readline but not next
+ stream = fiter(stream)
+
+ for line in stream:
+ cur.append(line)
+ if line.startswith('# HG changeset patch'):
+ return hgsplit(stream, cur)
+ elif line.startswith('From '):
+ return mboxsplit(stream, cur)
+ elif isheader(line, inheader):
+ inheader = True
+ if line.split(':', 1)[0].lower() in mimeheaders:
+ # let email parser handle this
+ return mimesplit(stream, cur)
+ elif line.startswith('--- ') and inheader:
+ # No evil headers seen by diff start, split by hand
+ return headersplit(stream, cur)
+ # Not enough info, keep reading
+
+ # if we are here, we have a very plain patch
+ return remainder(cur)
+
+def extract(ui, fileobj):
+ '''extract patch from data read from fileobj.
+
+ patch can be a normal patch or contained in an email message.
+
+ return tuple (filename, message, user, date, branch, node, p1, p2).
+ Any item in the returned tuple can be None. If filename is None,
+ fileobj did not contain a patch. Caller must unlink filename when done.'''
+
+ # attempt to detect the start of a patch
+ # (this heuristic is borrowed from quilt)
+ diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
+ r'retrieving revision [0-9]+(\.[0-9]+)*$|'
+ r'---[ \t].*?^\+\+\+[ \t]|'
+ r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
+
+ fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
+ tmpfp = os.fdopen(fd, 'w')
+ try:
+ msg = email.Parser.Parser().parse(fileobj)
+
+ subject = msg['Subject']
+ user = msg['From']
+ if not subject and not user:
+ # Not an email, restore parsed headers if any
+ subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
+
+ gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
+ # should try to parse msg['Date']
+ date = None
+ nodeid = None
+ branch = None
+ parents = []
+
+ if subject:
+ if subject.startswith('[PATCH'):
+ pend = subject.find(']')
+ if pend >= 0:
+ subject = subject[pend + 1:].lstrip()
+ subject = re.sub(r'\n[ \t]+', ' ', subject)
+ ui.debug('Subject: %s\n' % subject)
+ if user:
+ ui.debug('From: %s\n' % user)
+ diffs_seen = 0
+ ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
+ message = ''
+ for part in msg.walk():
+ content_type = part.get_content_type()
+ ui.debug('Content-Type: %s\n' % content_type)
+ if content_type not in ok_types:
+ continue
+ payload = part.get_payload(decode=True)
+ m = diffre.search(payload)
+ if m:
+ hgpatch = False
+ hgpatchheader = False
+ ignoretext = False
+
+ ui.debug('found patch at byte %d\n' % m.start(0))
+ diffs_seen += 1
+ cfp = cStringIO.StringIO()
+ for line in payload[:m.start(0)].splitlines():
+ if line.startswith('# HG changeset patch') and not hgpatch:
+ ui.debug('patch generated by hg export\n')
+ hgpatch = True
+ hgpatchheader = True
+ # drop earlier commit message content
+ cfp.seek(0)
+ cfp.truncate()
+ subject = None
+ elif hgpatchheader:
+ if line.startswith('# User '):
+ user = line[7:]
+ ui.debug('From: %s\n' % user)
+ elif line.startswith("# Date "):
+ date = line[7:]
+ elif line.startswith("# Branch "):
+ branch = line[9:]
+ elif line.startswith("# Node ID "):
+ nodeid = line[10:]
+ elif line.startswith("# Parent "):
+ parents.append(line[10:])
+ elif not line.startswith("# "):
+ hgpatchheader = False
+ elif line == '---' and gitsendmail:
+ ignoretext = True
+ if not hgpatchheader and not ignoretext:
+ cfp.write(line)
+ cfp.write('\n')
+ message = cfp.getvalue()
+ if tmpfp:
+ tmpfp.write(payload)
+ if not payload.endswith('\n'):
+ tmpfp.write('\n')
+ elif not diffs_seen and message and content_type == 'text/plain':
+ message += '\n' + payload
+ except:
+ tmpfp.close()
+ os.unlink(tmpname)
+ raise
+
+ if subject and not message.startswith(subject):
+ message = '%s\n%s' % (subject, message)
+ tmpfp.close()
+ if not diffs_seen:
+ os.unlink(tmpname)
+ return None, message, user, date, branch, None, None, None
+ p1 = parents and parents.pop(0) or None
+ p2 = parents and parents.pop(0) or None
+ return tmpname, message, user, date, branch, nodeid, p1, p2
+
+class patchmeta(object):
+ """Patched file metadata
+
+ 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
+ or COPY. 'path' is patched file path. 'oldpath' is set to the
+ origin file when 'op' is either COPY or RENAME, None otherwise. If
+ file mode is changed, 'mode' is a tuple (islink, isexec) where
+ 'islink' is True if the file is a symlink and 'isexec' is True if
+ the file is executable. Otherwise, 'mode' is None.
+ """
+ def __init__(self, path):
+ self.path = path
+ self.oldpath = None
+ self.mode = None
+ self.op = 'MODIFY'
+ self.binary = False
+
+ def setmode(self, mode):
+ islink = mode & 020000
+ isexec = mode & 0100
+ self.mode = (islink, isexec)
+
+ def copy(self):
+ other = patchmeta(self.path)
+ other.oldpath = self.oldpath
+ other.mode = self.mode
+ other.op = self.op
+ other.binary = self.binary
+ return other
+
+ def __repr__(self):
+ return "<patchmeta %s %r>" % (self.op, self.path)
+
+def readgitpatch(lr):
+ """extract git-style metadata about patches from <patchname>"""
+
+ # Filter patch for git information
+ gp = None
+ gitpatches = []
+ for line in lr:
+ line = line.rstrip(' \r\n')
+ if line.startswith('diff --git'):
+ m = gitre.match(line)
+ if m:
+ if gp:
+ gitpatches.append(gp)
+ dst = m.group(2)
+ gp = patchmeta(dst)
+ elif gp:
+ if line.startswith('--- '):
+ gitpatches.append(gp)
+ gp = None
+ continue
+ if line.startswith('rename from '):
+ gp.op = 'RENAME'
+ gp.oldpath = line[12:]
+ elif line.startswith('rename to '):
+ gp.path = line[10:]
+ elif line.startswith('copy from '):
+ gp.op = 'COPY'
+ gp.oldpath = line[10:]
+ elif line.startswith('copy to '):
+ gp.path = line[8:]
+ elif line.startswith('deleted file'):
+ gp.op = 'DELETE'
+ elif line.startswith('new file mode '):
+ gp.op = 'ADD'
+ gp.setmode(int(line[-6:], 8))
+ elif line.startswith('new mode '):
+ gp.setmode(int(line[-6:], 8))
+ elif line.startswith('GIT binary patch'):
+ gp.binary = True
+ if gp:
+ gitpatches.append(gp)
+
+ return gitpatches
+
+class linereader(object):
+ # simple class to allow pushing lines back into the input stream
+ def __init__(self, fp):
+ self.fp = fp
+ self.buf = []
+
+ def push(self, line):
+ if line is not None:
+ self.buf.append(line)
+
+ def readline(self):
+ if self.buf:
+ l = self.buf[0]
+ del self.buf[0]
+ return l
+ return self.fp.readline()
+
+ def __iter__(self):
+ while True:
+ l = self.readline()
+ if not l:
+ break
+ yield l
+
+class abstractbackend(object):
+ def __init__(self, ui):
+ self.ui = ui
+
+ def getfile(self, fname):
+ """Return target file data and flags as a (data, (islink,
+ isexec)) tuple.
+ """
+ raise NotImplementedError
+
+ def setfile(self, fname, data, mode, copysource):
+ """Write data to target file fname and set its mode. mode is a
+ (islink, isexec) tuple. If data is None, the file content should
+ be left unchanged. If the file is modified after being copied,
+ copysource is set to the original file name.
+ """
+ raise NotImplementedError
+
+ def unlink(self, fname):
+ """Unlink target file."""
+ raise NotImplementedError
+
+ def writerej(self, fname, failed, total, lines):
+ """Write rejected lines for fname. total is the number of hunks
+ which failed to apply and total the total number of hunks for this
+ files.
+ """
+ pass
+
+ def exists(self, fname):
+ raise NotImplementedError
+
+class fsbackend(abstractbackend):
+ def __init__(self, ui, basedir):
+ super(fsbackend, self).__init__(ui)
+ self.opener = scmutil.opener(basedir)
+
+ def _join(self, f):
+ return os.path.join(self.opener.base, f)
+
+ def getfile(self, fname):
+ path = self._join(fname)
+ if os.path.islink(path):
+ return (os.readlink(path), (True, False))
+ isexec = False
+ try:
+ isexec = os.lstat(path).st_mode & 0100 != 0
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+ return (self.opener.read(fname), (False, isexec))
+
+ def setfile(self, fname, data, mode, copysource):
+ islink, isexec = mode
+ if data is None:
+ util.setflags(self._join(fname), islink, isexec)
+ return
+ if islink:
+ self.opener.symlink(data, fname)
+ else:
+ self.opener.write(fname, data)
+ if isexec:
+ util.setflags(self._join(fname), False, True)
+
+ def unlink(self, fname):
+ try:
+ util.unlinkpath(self._join(fname))
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ def writerej(self, fname, failed, total, lines):
+ fname = fname + ".rej"
+ self.ui.warn(
+ _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
+ (failed, total, fname))
+ fp = self.opener(fname, 'w')
+ fp.writelines(lines)
+ fp.close()
+
+ def exists(self, fname):
+ return os.path.lexists(self._join(fname))
+
+class workingbackend(fsbackend):
+ def __init__(self, ui, repo, similarity):
+ super(workingbackend, self).__init__(ui, repo.root)
+ self.repo = repo
+ self.similarity = similarity
+ self.removed = set()
+ self.changed = set()
+ self.copied = []
+
+ def _checkknown(self, fname):
+ if self.repo.dirstate[fname] == '?' and self.exists(fname):
+ raise PatchError(_('cannot patch %s: file is not tracked') % fname)
+
+ def setfile(self, fname, data, mode, copysource):
+ self._checkknown(fname)
+ super(workingbackend, self).setfile(fname, data, mode, copysource)
+ if copysource is not None:
+ self.copied.append((copysource, fname))
+ self.changed.add(fname)
+
+ def unlink(self, fname):
+ self._checkknown(fname)
+ super(workingbackend, self).unlink(fname)
+ self.removed.add(fname)
+ self.changed.add(fname)
+
+ def close(self):
+ wctx = self.repo[None]
+ addremoved = set(self.changed)
+ for src, dst in self.copied:
+ scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
+ addremoved.discard(src)
+ if (not self.similarity) and self.removed:
+ wctx.forget(sorted(self.removed))
+ if addremoved:
+ cwd = self.repo.getcwd()
+ if cwd:
+ addremoved = [util.pathto(self.repo.root, cwd, f)
+ for f in addremoved]
+ scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
+ return sorted(self.changed)
+
+class filestore(object):
+ def __init__(self, maxsize=None):
+ self.opener = None
+ self.files = {}
+ self.created = 0
+ self.maxsize = maxsize
+ if self.maxsize is None:
+ self.maxsize = 4*(2**20)
+ self.size = 0
+ self.data = {}
+
+ def setfile(self, fname, data, mode, copied=None):
+ if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
+ self.data[fname] = (data, mode, copied)
+ self.size += len(data)
+ else:
+ if self.opener is None:
+ root = tempfile.mkdtemp(prefix='hg-patch-')
+ self.opener = scmutil.opener(root)
+ # Avoid filename issues with these simple names
+ fn = str(self.created)
+ self.opener.write(fn, data)
+ self.created += 1
+ self.files[fname] = (fn, mode, copied)
+
+ def getfile(self, fname):
+ if fname in self.data:
+ return self.data[fname]
+ if not self.opener or fname not in self.files:
+ raise IOError()
+ fn, mode, copied = self.files[fname]
+ return self.opener.read(fn), mode, copied
+
+ def close(self):
+ if self.opener:
+ shutil.rmtree(self.opener.base)
+
+class repobackend(abstractbackend):
+ def __init__(self, ui, repo, ctx, store):
+ super(repobackend, self).__init__(ui)
+ self.repo = repo
+ self.ctx = ctx
+ self.store = store
+ self.changed = set()
+ self.removed = set()
+ self.copied = {}
+
+ def _checkknown(self, fname):
+ if fname not in self.ctx:
+ raise PatchError(_('cannot patch %s: file is not tracked') % fname)
+
+ def getfile(self, fname):
+ try:
+ fctx = self.ctx[fname]
+ except error.LookupError:
+ raise IOError()
+ flags = fctx.flags()
+ return fctx.data(), ('l' in flags, 'x' in flags)
+
+ def setfile(self, fname, data, mode, copysource):
+ if copysource:
+ self._checkknown(copysource)
+ if data is None:
+ data = self.ctx[fname].data()
+ self.store.setfile(fname, data, mode, copysource)
+ self.changed.add(fname)
+ if copysource:
+ self.copied[fname] = copysource
+
+ def unlink(self, fname):
+ self._checkknown(fname)
+ self.removed.add(fname)
+
+ def exists(self, fname):
+ return fname in self.ctx
+
+ def close(self):
+ return self.changed | self.removed
+
+# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
+unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
+contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
+eolmodes = ['strict', 'crlf', 'lf', 'auto']
+
+class patchfile(object):
+ def __init__(self, ui, gp, backend, store, eolmode='strict'):
+ self.fname = gp.path
+ self.eolmode = eolmode
+ self.eol = None
+ self.backend = backend
+ self.ui = ui
+ self.lines = []
+ self.exists = False
+ self.missing = True
+ self.mode = gp.mode
+ self.copysource = gp.oldpath
+ self.create = gp.op in ('ADD', 'COPY', 'RENAME')
+ self.remove = gp.op == 'DELETE'
+ try:
+ if self.copysource is None:
+ data, mode = backend.getfile(self.fname)
+ self.exists = True
+ else:
+ data, mode = store.getfile(self.copysource)[:2]
+ self.exists = backend.exists(self.fname)
+ self.missing = False
+ if data:
+ self.lines = mdiff.splitnewlines(data)
+ if self.mode is None:
+ self.mode = mode
+ if self.lines:
+ # Normalize line endings
+ if self.lines[0].endswith('\r\n'):
+ self.eol = '\r\n'
+ elif self.lines[0].endswith('\n'):
+ self.eol = '\n'
+ if eolmode != 'strict':
+ nlines = []
+ for l in self.lines:
+ if l.endswith('\r\n'):
+ l = l[:-2] + '\n'
+ nlines.append(l)
+ self.lines = nlines
+ except IOError:
+ if self.create:
+ self.missing = False
+ if self.mode is None:
+ self.mode = (False, False)
+ if self.missing:
+ self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
+
+ self.hash = {}
+ self.dirty = 0
+ self.offset = 0
+ self.skew = 0
+ self.rej = []
+ self.fileprinted = False
+ self.printfile(False)
+ self.hunks = 0
+
+ def writelines(self, fname, lines, mode):
+ if self.eolmode == 'auto':
+ eol = self.eol
+ elif self.eolmode == 'crlf':
+ eol = '\r\n'
+ else:
+ eol = '\n'
+
+ if self.eolmode != 'strict' and eol and eol != '\n':
+ rawlines = []
+ for l in lines:
+ if l and l[-1] == '\n':
+ l = l[:-1] + eol
+ rawlines.append(l)
+ lines = rawlines
+
+ self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
+
+ def printfile(self, warn):
+ if self.fileprinted:
+ return
+ if warn or self.ui.verbose:
+ self.fileprinted = True
+ s = _("patching file %s\n") % self.fname
+ if warn:
+ self.ui.warn(s)
+ else:
+ self.ui.note(s)
+
+
+ def findlines(self, l, linenum):
+ # looks through the hash and finds candidate lines. The
+ # result is a list of line numbers sorted based on distance
+ # from linenum
+
+ cand = self.hash.get(l, [])
+ if len(cand) > 1:
+ # resort our list of potentials forward then back.
+ cand.sort(key=lambda x: abs(x - linenum))
+ return cand
+
+ def write_rej(self):
+ # our rejects are a little different from patch(1). This always
+ # creates rejects in the same form as the original patch. A file
+ # header is inserted so that you can run the reject through patch again
+ # without having to type the filename.
+ if not self.rej:
+ return
+ base = os.path.basename(self.fname)
+ lines = ["--- %s\n+++ %s\n" % (base, base)]
+ for x in self.rej:
+ for l in x.hunk:
+ lines.append(l)
+ if l[-1] != '\n':
+ lines.append("\n\ No newline at end of file\n")
+ self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
+
+ def apply(self, h):
+ if not h.complete():
+ raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
+ (h.number, h.desc, len(h.a), h.lena, len(h.b),
+ h.lenb))
+
+ self.hunks += 1
+
+ if self.missing:
+ self.rej.append(h)
+ return -1
+
+ if self.exists and self.create:
+ if self.copysource:
+ self.ui.warn(_("cannot create %s: destination already "
+ "exists\n" % self.fname))
+ else:
+ self.ui.warn(_("file %s already exists\n") % self.fname)
+ self.rej.append(h)
+ return -1
+
+ if isinstance(h, binhunk):
+ if self.remove:
+ self.backend.unlink(self.fname)
+ else:
+ self.lines[:] = h.new()
+ self.offset += len(h.new())
+ self.dirty = True
+ return 0
+
+ horig = h
+ if (self.eolmode in ('crlf', 'lf')
+ or self.eolmode == 'auto' and self.eol):
+ # If new eols are going to be normalized, then normalize
+ # hunk data before patching. Otherwise, preserve input
+ # line-endings.
+ h = h.getnormalized()
+
+ # fast case first, no offsets, no fuzz
+ old = h.old()
+ start = h.starta + self.offset
+ # zero length hunk ranges already have their start decremented
+ if h.lena:
+ start -= 1
+ orig_start = start
+ # if there's skew we want to emit the "(offset %d lines)" even
+ # when the hunk cleanly applies at start + skew, so skip the
+ # fast case code
+ if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
+ if self.remove:
+ self.backend.unlink(self.fname)
+ else:
+ self.lines[start : start + h.lena] = h.new()
+ self.offset += h.lenb - h.lena
+ self.dirty = True
+ return 0
+
+ # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
+ self.hash = {}
+ for x, s in enumerate(self.lines):
+ self.hash.setdefault(s, []).append(x)
+ if h.hunk[-1][0] != ' ':
+ # if the hunk tried to put something at the bottom of the file
+ # override the start line and use eof here
+ search_start = len(self.lines)
+ else:
+ search_start = orig_start + self.skew
+
+ for fuzzlen in xrange(3):
+ for toponly in [True, False]:
+ old = h.old(fuzzlen, toponly)
+
+ cand = self.findlines(old[0][1:], search_start)
+ for l in cand:
+ if diffhelpers.testhunk(old, self.lines, l) == 0:
+ newlines = h.new(fuzzlen, toponly)
+ self.lines[l : l + len(old)] = newlines
+ self.offset += len(newlines) - len(old)
+ self.skew = l - orig_start
+ self.dirty = True
+ offset = l - orig_start - fuzzlen
+ if fuzzlen:
+ msg = _("Hunk #%d succeeded at %d "
+ "with fuzz %d "
+ "(offset %d lines).\n")
+ self.printfile(True)
+ self.ui.warn(msg %
+ (h.number, l + 1, fuzzlen, offset))
+ else:
+ msg = _("Hunk #%d succeeded at %d "
+ "(offset %d lines).\n")
+ self.ui.note(msg % (h.number, l + 1, offset))
+ return fuzzlen
+ self.printfile(True)
+ self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
+ self.rej.append(horig)
+ return -1
+
+ def close(self):
+ if self.dirty:
+ self.writelines(self.fname, self.lines, self.mode)
+ self.write_rej()
+ return len(self.rej)
+
+class hunk(object):
+ def __init__(self, desc, num, lr, context):
+ self.number = num
+ self.desc = desc
+ self.hunk = [desc]
+ self.a = []
+ self.b = []
+ self.starta = self.lena = None
+ self.startb = self.lenb = None
+ if lr is not None:
+ if context:
+ self.read_context_hunk(lr)
+ else:
+ self.read_unified_hunk(lr)
+
+ def getnormalized(self):
+ """Return a copy with line endings normalized to LF."""
+
+ def normalize(lines):
+ nlines = []
+ for line in lines:
+ if line.endswith('\r\n'):
+ line = line[:-2] + '\n'
+ nlines.append(line)
+ return nlines
+
+ # Dummy object, it is rebuilt manually
+ nh = hunk(self.desc, self.number, None, None)
+ nh.number = self.number
+ nh.desc = self.desc
+ nh.hunk = self.hunk
+ nh.a = normalize(self.a)
+ nh.b = normalize(self.b)
+ nh.starta = self.starta
+ nh.startb = self.startb
+ nh.lena = self.lena
+ nh.lenb = self.lenb
+ return nh
+
+ def read_unified_hunk(self, lr):
+ m = unidesc.match(self.desc)
+ if not m:
+ raise PatchError(_("bad hunk #%d") % self.number)
+ self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
+ if self.lena is None:
+ self.lena = 1
+ else:
+ self.lena = int(self.lena)
+ if self.lenb is None:
+ self.lenb = 1
+ else:
+ self.lenb = int(self.lenb)
+ self.starta = int(self.starta)
+ self.startb = int(self.startb)
+ diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
+ # if we hit eof before finishing out the hunk, the last line will
+ # be zero length. Lets try to fix it up.
+ while len(self.hunk[-1]) == 0:
+ del self.hunk[-1]
+ del self.a[-1]
+ del self.b[-1]
+ self.lena -= 1
+ self.lenb -= 1
+ self._fixnewline(lr)
+
+ def read_context_hunk(self, lr):
+ self.desc = lr.readline()
+ m = contextdesc.match(self.desc)
+ if not m:
+ raise PatchError(_("bad hunk #%d") % self.number)
+ foo, self.starta, foo2, aend, foo3 = m.groups()
+ self.starta = int(self.starta)
+ if aend is None:
+ aend = self.starta
+ self.lena = int(aend) - self.starta
+ if self.starta:
+ self.lena += 1
+ for x in xrange(self.lena):
+ l = lr.readline()
+ if l.startswith('---'):
+ # lines addition, old block is empty
+ lr.push(l)
+ break
+ s = l[2:]
+ if l.startswith('- ') or l.startswith('! '):
+ u = '-' + s
+ elif l.startswith(' '):
+ u = ' ' + s
+ else:
+ raise PatchError(_("bad hunk #%d old text line %d") %
+ (self.number, x))
+ self.a.append(u)
+ self.hunk.append(u)
+
+ l = lr.readline()
+ if l.startswith('\ '):
+ s = self.a[-1][:-1]
+ self.a[-1] = s
+ self.hunk[-1] = s
+ l = lr.readline()
+ m = contextdesc.match(l)
+ if not m:
+ raise PatchError(_("bad hunk #%d") % self.number)
+ foo, self.startb, foo2, bend, foo3 = m.groups()
+ self.startb = int(self.startb)
+ if bend is None:
+ bend = self.startb
+ self.lenb = int(bend) - self.startb
+ if self.startb:
+ self.lenb += 1
+ hunki = 1
+ for x in xrange(self.lenb):
+ l = lr.readline()
+ if l.startswith('\ '):
+ # XXX: the only way to hit this is with an invalid line range.
+ # The no-eol marker is not counted in the line range, but I
+ # guess there are diff(1) out there which behave differently.
+ s = self.b[-1][:-1]
+ self.b[-1] = s
+ self.hunk[hunki - 1] = s
+ continue
+ if not l:
+ # line deletions, new block is empty and we hit EOF
+ lr.push(l)
+ break
+ s = l[2:]
+ if l.startswith('+ ') or l.startswith('! '):
+ u = '+' + s
+ elif l.startswith(' '):
+ u = ' ' + s
+ elif len(self.b) == 0:
+ # line deletions, new block is empty
+ lr.push(l)
+ break
+ else:
+ raise PatchError(_("bad hunk #%d old text line %d") %
+ (self.number, x))
+ self.b.append(s)
+ while True:
+ if hunki >= len(self.hunk):
+ h = ""
+ else:
+ h = self.hunk[hunki]
+ hunki += 1
+ if h == u:
+ break
+ elif h.startswith('-'):
+ continue
+ else:
+ self.hunk.insert(hunki - 1, u)
+ break
+
+ if not self.a:
+ # this happens when lines were only added to the hunk
+ for x in self.hunk:
+ if x.startswith('-') or x.startswith(' '):
+ self.a.append(x)
+ if not self.b:
+ # this happens when lines were only deleted from the hunk
+ for x in self.hunk:
+ if x.startswith('+') or x.startswith(' '):
+ self.b.append(x[1:])
+ # @@ -start,len +start,len @@
+ self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
+ self.startb, self.lenb)
+ self.hunk[0] = self.desc
+ self._fixnewline(lr)
+
+ def _fixnewline(self, lr):
+ l = lr.readline()
+ if l.startswith('\ '):
+ diffhelpers.fix_newline(self.hunk, self.a, self.b)
+ else:
+ lr.push(l)
+
+ def complete(self):
+ return len(self.a) == self.lena and len(self.b) == self.lenb
+
+ def fuzzit(self, l, fuzz, toponly):
+ # this removes context lines from the top and bottom of list 'l'. It
+ # checks the hunk to make sure only context lines are removed, and then
+ # returns a new shortened list of lines.
+ fuzz = min(fuzz, len(l)-1)
+ if fuzz:
+ top = 0
+ bot = 0
+ hlen = len(self.hunk)
+ for x in xrange(hlen - 1):
+ # the hunk starts with the @@ line, so use x+1
+ if self.hunk[x + 1][0] == ' ':
+ top += 1
+ else:
+ break
+ if not toponly:
+ for x in xrange(hlen - 1):
+ if self.hunk[hlen - bot - 1][0] == ' ':
+ bot += 1
+ else:
+ break
+
+ # top and bot now count context in the hunk
+ # adjust them if either one is short
+ context = max(top, bot, 3)
+ if bot < context:
+ bot = max(0, fuzz - (context - bot))
+ else:
+ bot = min(fuzz, bot)
+ if top < context:
+ top = max(0, fuzz - (context - top))
+ else:
+ top = min(fuzz, top)
+
+ return l[top:len(l)-bot]
+ return l
+
+ def old(self, fuzz=0, toponly=False):
+ return self.fuzzit(self.a, fuzz, toponly)
+
+ def new(self, fuzz=0, toponly=False):
+ return self.fuzzit(self.b, fuzz, toponly)
+
+class binhunk(object):
+ 'A binary patch file. Only understands literals so far.'
+ def __init__(self, lr):
+ self.text = None
+ self.hunk = ['GIT binary patch\n']
+ self._read(lr)
+
+ def complete(self):
+ return self.text is not None
+
+ def new(self):
+ return [self.text]
+
+ def _read(self, lr):
+ line = lr.readline()
+ self.hunk.append(line)
+ while line and not line.startswith('literal '):
+ line = lr.readline()
+ self.hunk.append(line)
+ if not line:
+ raise PatchError(_('could not extract binary patch'))
+ size = int(line[8:].rstrip())
+ dec = []
+ line = lr.readline()
+ self.hunk.append(line)
+ while len(line) > 1:
+ l = line[0]
+ if l <= 'Z' and l >= 'A':
+ l = ord(l) - ord('A') + 1
+ else:
+ l = ord(l) - ord('a') + 27
+ dec.append(base85.b85decode(line[1:-1])[:l])
+ line = lr.readline()
+ self.hunk.append(line)
+ text = zlib.decompress(''.join(dec))
+ if len(text) != size:
+ raise PatchError(_('binary patch is %d bytes, not %d') %
+ len(text), size)
+ self.text = text
+
+def parsefilename(str):
+ # --- filename \t|space stuff
+ s = str[4:].rstrip('\r\n')
+ i = s.find('\t')
+ if i < 0:
+ i = s.find(' ')
+ if i < 0:
+ return s
+ return s[:i]
+
+def pathstrip(path, strip):
+ pathlen = len(path)
+ i = 0
+ if strip == 0:
+ return '', path.rstrip()
+ count = strip
+ while count > 0:
+ i = path.find('/', i)
+ if i == -1:
+ raise PatchError(_("unable to strip away %d of %d dirs from %s") %
+ (count, strip, path))
+ i += 1
+ # consume '//' in the path
+ while i < pathlen - 1 and path[i] == '/':
+ i += 1
+ count -= 1
+ return path[:i].lstrip(), path[i:].rstrip()
+
+def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
+ nulla = afile_orig == "/dev/null"
+ nullb = bfile_orig == "/dev/null"
+ create = nulla and hunk.starta == 0 and hunk.lena == 0
+ remove = nullb and hunk.startb == 0 and hunk.lenb == 0
+ abase, afile = pathstrip(afile_orig, strip)
+ gooda = not nulla and backend.exists(afile)
+ bbase, bfile = pathstrip(bfile_orig, strip)
+ if afile == bfile:
+ goodb = gooda
+ else:
+ goodb = not nullb and backend.exists(bfile)
+ missing = not goodb and not gooda and not create
+
+ # some diff programs apparently produce patches where the afile is
+ # not /dev/null, but afile starts with bfile
+ abasedir = afile[:afile.rfind('/') + 1]
+ bbasedir = bfile[:bfile.rfind('/') + 1]
+ if (missing and abasedir == bbasedir and afile.startswith(bfile)
+ and hunk.starta == 0 and hunk.lena == 0):
+ create = True
+ missing = False
+
+ # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
+ # diff is between a file and its backup. In this case, the original
+ # file should be patched (see original mpatch code).
+ isbackup = (abase == bbase and bfile.startswith(afile))
+ fname = None
+ if not missing:
+ if gooda and goodb:
+ fname = isbackup and afile or bfile
+ elif gooda:
+ fname = afile
+
+ if not fname:
+ if not nullb:
+ fname = isbackup and afile or bfile
+ elif not nulla:
+ fname = afile
+ else:
+ raise PatchError(_("undefined source and destination files"))
+
+ gp = patchmeta(fname)
+ if create:
+ gp.op = 'ADD'
+ elif remove:
+ gp.op = 'DELETE'
+ return gp
+
+def scangitpatch(lr, firstline):
+ """
+ Git patches can emit:
+ - rename a to b
+ - change b
+ - copy a to c
+ - change c
+
+ We cannot apply this sequence as-is, the renamed 'a' could not be
+ found for it would have been renamed already. And we cannot copy
+ from 'b' instead because 'b' would have been changed already. So
+ we scan the git patch for copy and rename commands so we can
+ perform the copies ahead of time.
+ """
+ pos = 0
+ try:
+ pos = lr.fp.tell()
+ fp = lr.fp
+ except IOError:
+ fp = cStringIO.StringIO(lr.fp.read())
+ gitlr = linereader(fp)
+ gitlr.push(firstline)
+ gitpatches = readgitpatch(gitlr)
+ fp.seek(pos)
+ return gitpatches
+
+def iterhunks(fp):
+ """Read a patch and yield the following events:
+ - ("file", afile, bfile, firsthunk): select a new target file.
+ - ("hunk", hunk): a new hunk is ready to be applied, follows a
+ "file" event.
+ - ("git", gitchanges): current diff is in git format, gitchanges
+ maps filenames to gitpatch records. Unique event.
+ """
+ afile = ""
+ bfile = ""
+ state = None
+ hunknum = 0
+ emitfile = newfile = False
+ gitpatches = None
+
+ # our states
+ BFILE = 1
+ context = None
+ lr = linereader(fp)
+
+ while True:
+ x = lr.readline()
+ if not x:
+ break
+ if state == BFILE and (
+ (not context and x[0] == '@')
+ or (context is not False and x.startswith('***************'))
+ or x.startswith('GIT binary patch')):
+ gp = None
+ if (gitpatches and
+ (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
+ gp = gitpatches.pop()[2]
+ if x.startswith('GIT binary patch'):
+ h = binhunk(lr)
+ else:
+ if context is None and x.startswith('***************'):
+ context = True
+ h = hunk(x, hunknum + 1, lr, context)
+ hunknum += 1
+ if emitfile:
+ emitfile = False
+ yield 'file', (afile, bfile, h, gp and gp.copy() or None)
+ yield 'hunk', h
+ elif x.startswith('diff --git'):
+ m = gitre.match(x)
+ if not m:
+ continue
+ if not gitpatches:
+ # scan whole input for git metadata
+ gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
+ in scangitpatch(lr, x)]
+ yield 'git', [g[2].copy() for g in gitpatches
+ if g[2].op in ('COPY', 'RENAME')]
+ gitpatches.reverse()
+ afile = 'a/' + m.group(1)
+ bfile = 'b/' + m.group(2)
+ while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
+ gp = gitpatches.pop()[2]
+ yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
+ gp = gitpatches[-1][2]
+ # copy/rename + modify should modify target, not source
+ if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
+ afile = bfile
+ newfile = True
+ elif x.startswith('---'):
+ # check for a unified diff
+ l2 = lr.readline()
+ if not l2.startswith('+++'):
+ lr.push(l2)
+ continue
+ newfile = True
+ context = False
+ afile = parsefilename(x)
+ bfile = parsefilename(l2)
+ elif x.startswith('***'):
+ # check for a context diff
+ l2 = lr.readline()
+ if not l2.startswith('---'):
+ lr.push(l2)
+ continue
+ l3 = lr.readline()
+ lr.push(l3)
+ if not l3.startswith("***************"):
+ lr.push(l2)
+ continue
+ newfile = True
+ context = True
+ afile = parsefilename(x)
+ bfile = parsefilename(l2)
+
+ if newfile:
+ newfile = False
+ emitfile = True
+ state = BFILE
+ hunknum = 0
+
+ while gitpatches:
+ gp = gitpatches.pop()[2]
+ yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
+
+def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
+ """Reads a patch from fp and tries to apply it.
+
+ Returns 0 for a clean patch, -1 if any rejects were found and 1 if
+ there was any fuzz.
+
+ If 'eolmode' is 'strict', the patch content and patched file are
+ read in binary mode. Otherwise, line endings are ignored when
+ patching then normalized according to 'eolmode'.
+ """
+ return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
+ eolmode=eolmode)
+
+def _applydiff(ui, fp, patcher, backend, store, strip=1,
+ eolmode='strict'):
+
+ def pstrip(p):
+ return pathstrip(p, strip - 1)[1]
+
+ rejects = 0
+ err = 0
+ current_file = None
+
+ for state, values in iterhunks(fp):
+ if state == 'hunk':
+ if not current_file:
+ continue
+ ret = current_file.apply(values)
+ if ret > 0:
+ err = 1
+ elif state == 'file':
+ if current_file:
+ rejects += current_file.close()
+ current_file = None
+ afile, bfile, first_hunk, gp = values
+ if gp:
+ path = pstrip(gp.path)
+ gp.path = pstrip(gp.path)
+ if gp.oldpath:
+ gp.oldpath = pstrip(gp.oldpath)
+ else:
+ gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
+ if gp.op == 'RENAME':
+ backend.unlink(gp.oldpath)
+ if not first_hunk:
+ if gp.op == 'DELETE':
+ backend.unlink(gp.path)
+ continue
+ data, mode = None, None
+ if gp.op in ('RENAME', 'COPY'):
+ data, mode = store.getfile(gp.oldpath)[:2]
+ if gp.mode:
+ mode = gp.mode
+ if gp.op == 'ADD':
+ # Added files without content have no hunk and
+ # must be created
+ data = ''
+ if data or mode:
+ if (gp.op in ('ADD', 'RENAME', 'COPY')
+ and backend.exists(gp.path)):
+ raise PatchError(_("cannot create %s: destination "
+ "already exists") % gp.path)
+ backend.setfile(gp.path, data, mode, gp.oldpath)
+ continue
+ try:
+ current_file = patcher(ui, gp, backend, store,
+ eolmode=eolmode)
+ except PatchError, inst:
+ ui.warn(str(inst) + '\n')
+ current_file = None
+ rejects += 1
+ continue
+ elif state == 'git':
+ for gp in values:
+ path = pstrip(gp.oldpath)
+ data, mode = backend.getfile(path)
+ store.setfile(path, data, mode)
+ else:
+ raise util.Abort(_('unsupported parser state: %s') % state)
+
+ if current_file:
+ rejects += current_file.close()
+
+ if rejects:
+ return -1
+ return err
+
+def _externalpatch(ui, repo, patcher, patchname, strip, files,
+ similarity):
+ """use <patcher> to apply <patchname> to the working directory.
+ returns whether patch was applied with fuzz factor."""
+
+ fuzz = False
+ args = []
+ cwd = repo.root
+ if cwd:
+ args.append('-d %s' % util.shellquote(cwd))
+ fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
+ util.shellquote(patchname)))
+ try:
+ for line in fp:
+ line = line.rstrip()
+ ui.note(line + '\n')
+ if line.startswith('patching file '):
+ pf = util.parsepatchoutput(line)
+ printed_file = False
+ files.add(pf)
+ elif line.find('with fuzz') >= 0:
+ fuzz = True
+ if not printed_file:
+ ui.warn(pf + '\n')
+ printed_file = True
+ ui.warn(line + '\n')
+ elif line.find('saving rejects to file') >= 0:
+ ui.warn(line + '\n')
+ elif line.find('FAILED') >= 0:
+ if not printed_file:
+ ui.warn(pf + '\n')
+ printed_file = True
+ ui.warn(line + '\n')
+ finally:
+ if files:
+ cfiles = list(files)
+ cwd = repo.getcwd()
+ if cwd:
+ cfiles = [util.pathto(repo.root, cwd, f)
+ for f in cfiles]
+ scmutil.addremove(repo, cfiles, similarity=similarity)
+ code = fp.close()
+ if code:
+ raise PatchError(_("patch command failed: %s") %
+ util.explainexit(code)[0])
+ return fuzz
+
+def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
+ if files is None:
+ files = set()
+ if eolmode is None:
+ eolmode = ui.config('patch', 'eol', 'strict')
+ if eolmode.lower() not in eolmodes:
+ raise util.Abort(_('unsupported line endings type: %s') % eolmode)
+ eolmode = eolmode.lower()
+
+ store = filestore()
+ try:
+ fp = open(patchobj, 'rb')
+ except TypeError:
+ fp = patchobj
+ try:
+ ret = applydiff(ui, fp, backend, store, strip=strip,
+ eolmode=eolmode)
+ finally:
+ if fp != patchobj:
+ fp.close()
+ files.update(backend.close())
+ store.close()
+ if ret < 0:
+ raise PatchError(_('patch failed to apply'))
+ return ret > 0
+
+def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
+ similarity=0):
+ """use builtin patch to apply <patchobj> to the working directory.
+ returns whether patch was applied with fuzz factor."""
+ backend = workingbackend(ui, repo, similarity)
+ return patchbackend(ui, backend, patchobj, strip, files, eolmode)
+
+def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
+ eolmode='strict'):
+ backend = repobackend(ui, repo, ctx, store)
+ return patchbackend(ui, backend, patchobj, strip, files, eolmode)
+
+def makememctx(repo, parents, text, user, date, branch, files, store,
+ editor=None):
+ def getfilectx(repo, memctx, path):
+ data, (islink, isexec), copied = store.getfile(path)
+ return context.memfilectx(path, data, islink=islink, isexec=isexec,
+ copied=copied)
+ extra = {}
+ if branch:
+ extra['branch'] = encoding.fromlocal(branch)
+ ctx = context.memctx(repo, parents, text, files, getfilectx, user,
+ date, extra)
+ if editor:
+ ctx._text = editor(repo, ctx, [])
+ return ctx
+
+def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
+ similarity=0):
+ """Apply <patchname> to the working directory.
+
+ 'eolmode' specifies how end of lines should be handled. It can be:
+ - 'strict': inputs are read in binary mode, EOLs are preserved
+ - 'crlf': EOLs are ignored when patching and reset to CRLF
+ - 'lf': EOLs are ignored when patching and reset to LF
+ - None: get it from user settings, default to 'strict'
+ 'eolmode' is ignored when using an external patcher program.
+
+ Returns whether patch was applied with fuzz factor.
+ """
+ patcher = ui.config('ui', 'patch')
+ if files is None:
+ files = set()
+ try:
+ if patcher:
+ return _externalpatch(ui, repo, patcher, patchname, strip,
+ files, similarity)
+ return internalpatch(ui, repo, patchname, strip, files, eolmode,
+ similarity)
+ except PatchError, err:
+ raise util.Abort(str(err))
+
+def changedfiles(ui, repo, patchpath, strip=1):
+ backend = fsbackend(ui, repo.root)
+ fp = open(patchpath, 'rb')
+ try:
+ changed = set()
+ for state, values in iterhunks(fp):
+ if state == 'file':
+ afile, bfile, first_hunk, gp = values
+ if gp:
+ gp.path = pathstrip(gp.path, strip - 1)[1]
+ if gp.oldpath:
+ gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
+ else:
+ gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
+ changed.add(gp.path)
+ if gp.op == 'RENAME':
+ changed.add(gp.oldpath)
+ elif state not in ('hunk', 'git'):
+ raise util.Abort(_('unsupported parser state: %s') % state)
+ return changed
+ finally:
+ fp.close()
+
+def b85diff(to, tn):
+ '''print base85-encoded binary diff'''
+ def gitindex(text):
+ if not text:
+ return hex(nullid)
+ l = len(text)
+ s = util.sha1('blob %d\0' % l)
+ s.update(text)
+ return s.hexdigest()
+
+ def fmtline(line):
+ l = len(line)
+ if l <= 26:
+ l = chr(ord('A') + l - 1)
+ else:
+ l = chr(l - 26 + ord('a') - 1)
+ return '%c%s\n' % (l, base85.b85encode(line, True))
+
+ def chunk(text, csize=52):
+ l = len(text)
+ i = 0
+ while i < l:
+ yield text[i:i + csize]
+ i += csize
+
+ tohash = gitindex(to)
+ tnhash = gitindex(tn)
+ if tohash == tnhash:
+ return ""
+
+ # TODO: deltas
+ ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
+ (tohash, tnhash, len(tn))]
+ for l in chunk(zlib.compress(tn)):
+ ret.append(fmtline(l))
+ ret.append('\n')
+ return ''.join(ret)
+
+class GitDiffRequired(Exception):
+ pass
+
+def diffopts(ui, opts=None, untrusted=False):
+ def get(key, name=None, getter=ui.configbool):
+ return ((opts and opts.get(key)) or
+ getter('diff', name or key, None, untrusted=untrusted))
+ return mdiff.diffopts(
+ text=opts and opts.get('text'),
+ git=get('git'),
+ nodates=get('nodates'),
+ showfunc=get('show_function', 'showfunc'),
+ ignorews=get('ignore_all_space', 'ignorews'),
+ ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
+ ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
+ context=get('unified', getter=ui.config))
+
+def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
+ losedatafn=None, prefix=''):
+ '''yields diff of changes to files between two nodes, or node and
+ working directory.
+
+ if node1 is None, use first dirstate parent instead.
+ if node2 is None, compare node1 with working directory.
+
+ losedatafn(**kwarg) is a callable run when opts.upgrade=True and
+ every time some change cannot be represented with the current
+ patch format. Return False to upgrade to git patch format, True to
+ accept the loss or raise an exception to abort the diff. It is
+ called with the name of current file being diffed as 'fn'. If set
+ to None, patches will always be upgraded to git format when
+ necessary.
+
+ prefix is a filename prefix that is prepended to all filenames on
+ display (used for subrepos).
+ '''
+
+ if opts is None:
+ opts = mdiff.defaultopts
+
+ if not node1 and not node2:
+ node1 = repo.dirstate.p1()
+
+ def lrugetfilectx():
+ cache = {}
+ order = []
+ def getfilectx(f, ctx):
+ fctx = ctx.filectx(f, filelog=cache.get(f))
+ if f not in cache:
+ if len(cache) > 20:
+ del cache[order.pop(0)]
+ cache[f] = fctx.filelog()
+ else:
+ order.remove(f)
+ order.append(f)
+ return fctx
+ return getfilectx
+ getfilectx = lrugetfilectx()
+
+ ctx1 = repo[node1]
+ ctx2 = repo[node2]
+
+ if not changes:
+ changes = repo.status(ctx1, ctx2, match=match)
+ modified, added, removed = changes[:3]
+
+ if not modified and not added and not removed:
+ return []
+
+ revs = None
+ if not repo.ui.quiet:
+ hexfunc = repo.ui.debugflag and hex or short
+ revs = [hexfunc(node) for node in [node1, node2] if node]
+
+ copy = {}
+ if opts.git or opts.upgrade:
+ copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
+
+ difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
+ modified, added, removed, copy, getfilectx, opts, losedata, prefix)
+ if opts.upgrade and not opts.git:
+ try:
+ def losedata(fn):
+ if not losedatafn or not losedatafn(fn=fn):
+ raise GitDiffRequired()
+ # Buffer the whole output until we are sure it can be generated
+ return list(difffn(opts.copy(git=False), losedata))
+ except GitDiffRequired:
+ return difffn(opts.copy(git=True), None)
+ else:
+ return difffn(opts, None)
+
+def difflabel(func, *args, **kw):
+ '''yields 2-tuples of (output, label) based on the output of func()'''
+ headprefixes = [('diff', 'diff.diffline'),
+ ('copy', 'diff.extended'),
+ ('rename', 'diff.extended'),
+ ('old', 'diff.extended'),
+ ('new', 'diff.extended'),
+ ('deleted', 'diff.extended'),
+ ('---', 'diff.file_a'),
+ ('+++', 'diff.file_b')]
+ textprefixes = [('@', 'diff.hunk'),
+ ('-', 'diff.deleted'),
+ ('+', 'diff.inserted')]
+ head = False
+ for chunk in func(*args, **kw):
+ lines = chunk.split('\n')
+ for i, line in enumerate(lines):
+ if i != 0:
+ yield ('\n', '')
+ if head:
+ if line.startswith('@'):
+ head = False
+ else:
+ if line and not line[0] in ' +-@\\':
+ head = True
+ stripline = line
+ if not head and line and line[0] in '+-':
+ # highlight trailing whitespace, but only in changed lines
+ stripline = line.rstrip()
+ prefixes = textprefixes
+ if head:
+ prefixes = headprefixes
+ for prefix, label in prefixes:
+ if stripline.startswith(prefix):
+ yield (stripline, label)
+ break
+ else:
+ yield (line, '')
+ if line != stripline:
+ yield (line[len(stripline):], 'diff.trailingwhitespace')
+
+def diffui(*args, **kw):
+ '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
+ return difflabel(diff, *args, **kw)
+
+
+def _addmodehdr(header, omode, nmode):
+ if omode != nmode:
+ header.append('old mode %s\n' % omode)
+ header.append('new mode %s\n' % nmode)
+
+def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
+ copy, getfilectx, opts, losedatafn, prefix):
+
+ def join(f):
+ return os.path.join(prefix, f)
+
+ date1 = util.datestr(ctx1.date())
+ man1 = ctx1.manifest()
+
+ gone = set()
+ gitmode = {'l': '120000', 'x': '100755', '': '100644'}
+
+ copyto = dict([(v, k) for k, v in copy.items()])
+
+ if opts.git:
+ revs = None
+
+ for f in sorted(modified + added + removed):
+ to = None
+ tn = None
+ dodiff = True
+ header = []
+ if f in man1:
+ to = getfilectx(f, ctx1).data()
+ if f not in removed:
+ tn = getfilectx(f, ctx2).data()
+ a, b = f, f
+ if opts.git or losedatafn:
+ if f in added:
+ mode = gitmode[ctx2.flags(f)]
+ if f in copy or f in copyto:
+ if opts.git:
+ if f in copy:
+ a = copy[f]
+ else:
+ a = copyto[f]
+ omode = gitmode[man1.flags(a)]
+ _addmodehdr(header, omode, mode)
+ if a in removed and a not in gone:
+ op = 'rename'
+ gone.add(a)
+ else:
+ op = 'copy'
+ header.append('%s from %s\n' % (op, join(a)))
+ header.append('%s to %s\n' % (op, join(f)))
+ to = getfilectx(a, ctx1).data()
+ else:
+ losedatafn(f)
+ else:
+ if opts.git:
+ header.append('new file mode %s\n' % mode)
+ elif ctx2.flags(f):
+ losedatafn(f)
+ # In theory, if tn was copied or renamed we should check
+ # if the source is binary too but the copy record already
+ # forces git mode.
+ if util.binary(tn):
+ if opts.git:
+ dodiff = 'binary'
+ else:
+ losedatafn(f)
+ if not opts.git and not tn:
+ # regular diffs cannot represent new empty file
+ losedatafn(f)
+ elif f in removed:
+ if opts.git:
+ # have we already reported a copy above?
+ if ((f in copy and copy[f] in added
+ and copyto[copy[f]] == f) or
+ (f in copyto and copyto[f] in added
+ and copy[copyto[f]] == f)):
+ dodiff = False
+ else:
+ header.append('deleted file mode %s\n' %
+ gitmode[man1.flags(f)])
+ elif not to or util.binary(to):
+ # regular diffs cannot represent empty file deletion
+ losedatafn(f)
+ else:
+ oflag = man1.flags(f)
+ nflag = ctx2.flags(f)
+ binary = util.binary(to) or util.binary(tn)
+ if opts.git:
+ _addmodehdr(header, gitmode[oflag], gitmode[nflag])
+ if binary:
+ dodiff = 'binary'
+ elif binary or nflag != oflag:
+ losedatafn(f)
+ if opts.git:
+ header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
+
+ if dodiff:
+ if dodiff == 'binary':
+ text = b85diff(to, tn)
+ else:
+ text = mdiff.unidiff(to, date1,
+ # ctx2 date may be dynamic
+ tn, util.datestr(ctx2.date()),
+ join(a), join(b), revs, opts=opts)
+ if header and (text or len(header) > 1):
+ yield ''.join(header)
+ if text:
+ yield text
+
+def diffstatsum(stats):
+ maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
+ for f, a, r, b in stats:
+ maxfile = max(maxfile, encoding.colwidth(f))
+ maxtotal = max(maxtotal, a + r)
+ addtotal += a
+ removetotal += r
+ binary = binary or b
+
+ return maxfile, maxtotal, addtotal, removetotal, binary
+
+def diffstatdata(lines):
+ diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
+
+ results = []
+ filename, adds, removes, isbinary = None, 0, 0, False
+
+ def addresult():
+ if filename:
+ results.append((filename, adds, removes, isbinary))
+
+ for line in lines:
+ if line.startswith('diff'):
+ addresult()
+ # set numbers to 0 anyway when starting new file
+ adds, removes, isbinary = 0, 0, False
+ if line.startswith('diff --git'):
+ filename = gitre.search(line).group(1)
+ elif line.startswith('diff -r'):
+ # format: "diff -r ... -r ... filename"
+ filename = diffre.search(line).group(1)
+ elif line.startswith('+') and not line.startswith('+++'):
+ adds += 1
+ elif line.startswith('-') and not line.startswith('---'):
+ removes += 1
+ elif (line.startswith('GIT binary patch') or
+ line.startswith('Binary file')):
+ isbinary = True
+ addresult()
+ return results
+
+def diffstat(lines, width=80, git=False):
+ output = []
+ stats = diffstatdata(lines)
+ maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
+
+ countwidth = len(str(maxtotal))
+ if hasbinary and countwidth < 3:
+ countwidth = 3
+ graphwidth = width - countwidth - maxname - 6
+ if graphwidth < 10:
+ graphwidth = 10
+
+ def scale(i):
+ if maxtotal <= graphwidth:
+ return i
+ # If diffstat runs out of room it doesn't print anything,
+ # which isn't very useful, so always print at least one + or -
+ # if there were at least some changes.
+ return max(i * graphwidth // maxtotal, int(bool(i)))
+
+ for filename, adds, removes, isbinary in stats:
+ if isbinary:
+ count = 'Bin'
+ else:
+ count = adds + removes
+ pluses = '+' * scale(adds)
+ minuses = '-' * scale(removes)
+ output.append(' %s%s | %*s %s%s\n' %
+ (filename, ' ' * (maxname - encoding.colwidth(filename)),
+ countwidth, count, pluses, minuses))
+
+ if stats:
+ output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
+ % (len(stats), totaladds, totalremoves))
+
+ return ''.join(output)
+
+def diffstatui(*args, **kw):
+ '''like diffstat(), but yields 2-tuples of (output, label) for
+ ui.write()
+ '''
+
+ for line in diffstat(*args, **kw).splitlines():
+ if line and line[-1] in '+-':
+ name, graph = line.rsplit(' ', 1)
+ yield (name + ' ', '')
+ m = re.search(r'\++', graph)
+ if m:
+ yield (m.group(0), 'diffstat.inserted')
+ m = re.search(r'-+', graph)
+ if m:
+ yield (m.group(0), 'diffstat.deleted')
+ else:
+ yield (line, '')
+ yield ('\n', '')
diff --git a/websdk/mercurial/posix.py b/websdk/mercurial/posix.py
index 08491f6..46c3705 120000..100644
--- a/websdk/mercurial/posix.py
+++ b/websdk/mercurial/posix.py
@@ -1 +1,416 @@
-/usr/share/pyshared/mercurial/posix.py \ No newline at end of file
+# posix.py - Posix utility function implementations for Mercurial
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import os, sys, errno, stat, getpass, pwd, grp, tempfile, unicodedata
+
+posixfile = open
+nulldev = '/dev/null'
+normpath = os.path.normpath
+samestat = os.path.samestat
+oslink = os.link
+unlink = os.unlink
+rename = os.rename
+expandglobs = False
+
+umask = os.umask(0)
+os.umask(umask)
+
+def openhardlinks():
+ '''return true if it is safe to hold open file handles to hardlinks'''
+ return True
+
+def nlinks(name):
+ '''return number of hardlinks for the given file'''
+ return os.lstat(name).st_nlink
+
+def parsepatchoutput(output_line):
+ """parses the output produced by patch and returns the filename"""
+ pf = output_line[14:]
+ if os.sys.platform == 'OpenVMS':
+ if pf[0] == '`':
+ pf = pf[1:-1] # Remove the quotes
+ else:
+ if pf.startswith("'") and pf.endswith("'") and " " in pf:
+ pf = pf[1:-1] # Remove the quotes
+ return pf
+
+def sshargs(sshcmd, host, user, port):
+ '''Build argument list for ssh'''
+ args = user and ("%s@%s" % (user, host)) or host
+ return port and ("%s -p %s" % (args, port)) or args
+
+def isexec(f):
+ """check whether a file is executable"""
+ return (os.lstat(f).st_mode & 0100 != 0)
+
+def setflags(f, l, x):
+ s = os.lstat(f).st_mode
+ if l:
+ if not stat.S_ISLNK(s):
+ # switch file to link
+ fp = open(f)
+ data = fp.read()
+ fp.close()
+ os.unlink(f)
+ try:
+ os.symlink(data, f)
+ except OSError:
+ # failed to make a link, rewrite file
+ fp = open(f, "w")
+ fp.write(data)
+ fp.close()
+ # no chmod needed at this point
+ return
+ if stat.S_ISLNK(s):
+ # switch link to file
+ data = os.readlink(f)
+ os.unlink(f)
+ fp = open(f, "w")
+ fp.write(data)
+ fp.close()
+ s = 0666 & ~umask # avoid restatting for chmod
+
+ sx = s & 0100
+ if x and not sx:
+ # Turn on +x for every +r bit when making a file executable
+ # and obey umask.
+ os.chmod(f, s | (s & 0444) >> 2 & ~umask)
+ elif not x and sx:
+ # Turn off all +x bits
+ os.chmod(f, s & 0666)
+
+def copymode(src, dst, mode=None):
+ '''Copy the file mode from the file at path src to dst.
+ If src doesn't exist, we're using mode instead. If mode is None, we're
+ using umask.'''
+ try:
+ st_mode = os.lstat(src).st_mode & 0777
+ except OSError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ st_mode = mode
+ if st_mode is None:
+ st_mode = ~umask
+ st_mode &= 0666
+ os.chmod(dst, st_mode)
+
+def checkexec(path):
+ """
+ Check whether the given path is on a filesystem with UNIX-like exec flags
+
+ Requires a directory (like /foo/.hg)
+ """
+
+ # VFAT on some Linux versions can flip mode but it doesn't persist
+ # a FS remount. Frequently we can detect it if files are created
+ # with exec bit on.
+
+ try:
+ EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
+ fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
+ try:
+ os.close(fh)
+ m = os.stat(fn).st_mode & 0777
+ new_file_has_exec = m & EXECFLAGS
+ os.chmod(fn, m ^ EXECFLAGS)
+ exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
+ finally:
+ os.unlink(fn)
+ except (IOError, OSError):
+ # we don't care, the user probably won't be able to commit anyway
+ return False
+ return not (new_file_has_exec or exec_flags_cannot_flip)
+
+def checklink(path):
+ """check whether the given path is on a symlink-capable filesystem"""
+ # mktemp is not racy because symlink creation will fail if the
+ # file already exists
+ name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
+ try:
+ os.symlink(".", name)
+ os.unlink(name)
+ return True
+ except (OSError, AttributeError):
+ return False
+
+def checkosfilename(path):
+ '''Check that the base-relative path is a valid filename on this platform.
+ Returns None if the path is ok, or a UI string describing the problem.'''
+ pass # on posix platforms, every path is ok
+
+def setbinary(fd):
+ pass
+
+def pconvert(path):
+ return path
+
+def localpath(path):
+ return path
+
+def samefile(fpath1, fpath2):
+ """Returns whether path1 and path2 refer to the same file. This is only
+ guaranteed to work for files, not directories."""
+ return os.path.samefile(fpath1, fpath2)
+
+def samedevice(fpath1, fpath2):
+ """Returns whether fpath1 and fpath2 are on the same device. This is only
+ guaranteed to work for files, not directories."""
+ st1 = os.lstat(fpath1)
+ st2 = os.lstat(fpath2)
+ return st1.st_dev == st2.st_dev
+
+# os.path.normcase is a no-op, which doesn't help us on non-native filesystems
+def normcase(path):
+ return path.lower()
+
+if sys.platform == 'darwin':
+ import fcntl # only needed on darwin, missing on jython
+
+ def normcase(path):
+ try:
+ u = path.decode('utf-8')
+ except UnicodeDecodeError:
+ # percent-encode any characters that don't round-trip
+ p2 = path.decode('utf-8', 'ignore').encode('utf-8')
+ s = ""
+ pos = 0
+ for c in path:
+ if p2[pos:pos + 1] == c:
+ s += c
+ pos += 1
+ else:
+ s += "%%%02X" % ord(c)
+ u = s.decode('utf-8')
+
+ # Decompose then lowercase (HFS+ technote specifies lower)
+ return unicodedata.normalize('NFD', u).lower().encode('utf-8')
+
+ def realpath(path):
+ '''
+ Returns the true, canonical file system path equivalent to the given
+ path.
+
+ Equivalent means, in this case, resulting in the same, unique
+ file system link to the path. Every file system entry, whether a file,
+ directory, hard link or symbolic link or special, will have a single
+ path preferred by the system, but may allow multiple, differing path
+ lookups to point to it.
+
+ Most regular UNIX file systems only allow a file system entry to be
+ looked up by its distinct path. Obviously, this does not apply to case
+ insensitive file systems, whether case preserving or not. The most
+ complex issue to deal with is file systems transparently reencoding the
+ path, such as the non-standard Unicode normalisation required for HFS+
+ and HFSX.
+ '''
+ # Constants copied from /usr/include/sys/fcntl.h
+ F_GETPATH = 50
+ O_SYMLINK = 0x200000
+
+ try:
+ fd = os.open(path, O_SYMLINK)
+ except OSError, err:
+ if err.errno == errno.ENOENT:
+ return path
+ raise
+
+ try:
+ return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0')
+ finally:
+ os.close(fd)
+elif sys.version_info < (2, 4, 2, 'final'):
+ # Workaround for http://bugs.python.org/issue1213894 (os.path.realpath
+ # didn't resolve symlinks that were the first component of the path.)
+ def realpath(path):
+ if os.path.isabs(path):
+ return os.path.realpath(path)
+ else:
+ return os.path.realpath('./' + path)
+else:
+ # Fallback to the likely inadequate Python builtin function.
+ realpath = os.path.realpath
+
+def shellquote(s):
+ if os.sys.platform == 'OpenVMS':
+ return '"%s"' % s
+ else:
+ return "'%s'" % s.replace("'", "'\\''")
+
+def quotecommand(cmd):
+ return cmd
+
+def popen(command, mode='r'):
+ return os.popen(command, mode)
+
+def testpid(pid):
+ '''return False if pid dead, True if running or not sure'''
+ if os.sys.platform == 'OpenVMS':
+ return True
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError, inst:
+ return inst.errno != errno.ESRCH
+
+def explainexit(code):
+ """return a 2-tuple (desc, code) describing a subprocess status
+ (codes from kill are negative - not os.system/wait encoding)"""
+ if code >= 0:
+ return _("exited with status %d") % code, code
+ return _("killed by signal %d") % -code, -code
+
+def isowner(st):
+ """Return True if the stat object st is from the current user."""
+ return st.st_uid == os.getuid()
+
+def findexe(command):
+ '''Find executable for command searching like which does.
+ If command is a basename then PATH is searched for command.
+ PATH isn't searched if command is an absolute or relative path.
+ If command isn't found None is returned.'''
+ if sys.platform == 'OpenVMS':
+ return command
+
+ def findexisting(executable):
+ 'Will return executable if existing file'
+ if os.path.isfile(executable) and os.access(executable, os.X_OK):
+ return executable
+ return None
+
+ if os.sep in command:
+ return findexisting(command)
+
+ for path in os.environ.get('PATH', '').split(os.pathsep):
+ executable = findexisting(os.path.join(path, command))
+ if executable is not None:
+ return executable
+ return None
+
+def setsignalhandler():
+ pass
+
+def statfiles(files):
+ 'Stat each file in files and yield stat or None if file does not exist.'
+ lstat = os.lstat
+ for nf in files:
+ try:
+ st = lstat(nf)
+ except OSError, err:
+ if err.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ st = None
+ yield st
+
+def getuser():
+ '''return name of current user'''
+ return getpass.getuser()
+
+def username(uid=None):
+ """Return the name of the user with the given uid.
+
+ If uid is None, return the name of the current user."""
+
+ if uid is None:
+ uid = os.getuid()
+ try:
+ return pwd.getpwuid(uid)[0]
+ except KeyError:
+ return str(uid)
+
+def groupname(gid=None):
+ """Return the name of the group with the given gid.
+
+ If gid is None, return the name of the current group."""
+
+ if gid is None:
+ gid = os.getgid()
+ try:
+ return grp.getgrgid(gid)[0]
+ except KeyError:
+ return str(gid)
+
+def groupmembers(name):
+ """Return the list of members of the group with the given
+ name, KeyError if the group does not exist.
+ """
+ return list(grp.getgrnam(name).gr_mem)
+
+def spawndetached(args):
+ return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
+ args[0], args)
+
+def gethgcmd():
+ return sys.argv[:1]
+
+def termwidth():
+ try:
+ import termios, array, fcntl
+ for dev in (sys.stderr, sys.stdout, sys.stdin):
+ try:
+ try:
+ fd = dev.fileno()
+ except AttributeError:
+ continue
+ if not os.isatty(fd):
+ continue
+ arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8)
+ width = array.array('h', arri)[1]
+ if width > 0:
+ return width
+ except ValueError:
+ pass
+ except IOError, e:
+ if e[0] == errno.EINVAL:
+ pass
+ else:
+ raise
+ except ImportError:
+ pass
+ return 80
+
+def makedir(path, notindexed):
+ os.mkdir(path)
+
+def unlinkpath(f):
+ """unlink and remove the directory if it is empty"""
+ os.unlink(f)
+ # try removing directories that might now be empty
+ try:
+ os.removedirs(os.path.dirname(f))
+ except OSError:
+ pass
+
+def lookupreg(key, name=None, scope=None):
+ return None
+
+def hidewindow():
+ """Hide current shell window.
+
+ Used to hide the window opened when starting asynchronous
+ child process under Windows, unneeded on other systems.
+ """
+ pass
+
+class cachestat(object):
+ def __init__(self, path):
+ self.stat = os.stat(path)
+
+ def cacheable(self):
+ return bool(self.stat.st_ino)
+
+ def __eq__(self, other):
+ try:
+ return self.stat == other.stat
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not self == other
+
+def executablepath():
+ return None # available on Windows only
diff --git a/websdk/mercurial/pushkey.py b/websdk/mercurial/pushkey.py
index 7cd8bd0..2343319 120000..100644
--- a/websdk/mercurial/pushkey.py
+++ b/websdk/mercurial/pushkey.py
@@ -1 +1,34 @@
-/usr/share/pyshared/mercurial/pushkey.py \ No newline at end of file
+# pushkey.py - dispatching for pushing and pulling keys
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import bookmarks
+
+def _nslist(repo):
+ n = {}
+ for k in _namespaces:
+ n[k] = ""
+ return n
+
+_namespaces = {"namespaces": (lambda *x: False, _nslist),
+ "bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks)}
+
+def register(namespace, pushkey, listkeys):
+ _namespaces[namespace] = (pushkey, listkeys)
+
+def _get(namespace):
+ return _namespaces.get(namespace, (lambda *x: False, lambda *x: {}))
+
+def push(repo, namespace, key, old, new):
+ '''should succeed iff value was old'''
+ pk = _get(namespace)[0]
+ return pk(repo, key, old, new)
+
+def list(repo, namespace):
+ '''return a dict'''
+ lk = _get(namespace)[1]
+ return lk(repo)
+
diff --git a/websdk/mercurial/py3kcompat.py b/websdk/mercurial/py3kcompat.py
new file mode 100644
index 0000000..8843e9e
--- /dev/null
+++ b/websdk/mercurial/py3kcompat.py
@@ -0,0 +1,72 @@
+# py3kcompat.py - compatibility definitions for running hg in py3k
+#
+# Copyright 2010 Renato Cunha <renatoc@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import os, builtins
+
+from numbers import Number
+
+def bytesformatter(format, args):
+ '''Custom implementation of a formatter for bytestrings.
+
+ This function currently relias on the string formatter to do the
+ formatting and always returns bytes objects.
+
+ >>> bytesformatter(20, 10)
+ 0
+ >>> bytesformatter('unicode %s, %s!', ('string', 'foo'))
+ b'unicode string, foo!'
+ >>> bytesformatter(b'test %s', 'me')
+ b'test me'
+ >>> bytesformatter('test %s', 'me')
+ b'test me'
+ >>> bytesformatter(b'test %s', b'me')
+ b'test me'
+ >>> bytesformatter('test %s', b'me')
+ b'test me'
+ >>> bytesformatter('test %d: %s', (1, b'result'))
+ b'test 1: result'
+ '''
+ # The current implementation just converts from bytes to unicode, do
+ # what's needed and then convert the results back to bytes.
+ # Another alternative is to use the Python C API implementation.
+ if isinstance(format, Number):
+ # If the fixer erroneously passes a number remainder operation to
+ # bytesformatter, we just return the correct operation
+ return format % args
+ if isinstance(format, bytes):
+ format = format.decode('utf-8', 'surrogateescape')
+ if isinstance(args, bytes):
+ args = args.decode('utf-8', 'surrogateescape')
+ if isinstance(args, tuple):
+ newargs = []
+ for arg in args:
+ if isinstance(arg, bytes):
+ arg = arg.decode('utf-8', 'surrogateescape')
+ newargs.append(arg)
+ args = tuple(newargs)
+ ret = format % args
+ return ret.encode('utf-8', 'surrogateescape')
+builtins.bytesformatter = bytesformatter
+
+# Create bytes equivalents for os.environ values
+for key in list(os.environ.keys()):
+ # UTF-8 is fine for us
+ bkey = key.encode('utf-8', 'surrogateescape')
+ bvalue = os.environ[key].encode('utf-8', 'surrogateescape')
+ os.environ[bkey] = bvalue
+
+origord = builtins.ord
+def fakeord(char):
+ if isinstance(char, int):
+ return char
+ return origord(char)
+builtins.ord = fakeord
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/websdk/mercurial/repair.py b/websdk/mercurial/repair.py
index 008f132..c95dff1 120000..100644
--- a/websdk/mercurial/repair.py
+++ b/websdk/mercurial/repair.py
@@ -1 +1,158 @@
-/usr/share/pyshared/mercurial/repair.py \ No newline at end of file
+# repair.py - functions for repository repair for mercurial
+#
+# Copyright 2005, 2006 Chris Mason <mason@suse.com>
+# Copyright 2007 Matt Mackall
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from mercurial import changegroup, bookmarks
+from mercurial.node import short
+from mercurial.i18n import _
+import os
+
+def _bundle(repo, bases, heads, node, suffix, compress=True):
+ """create a bundle with the specified revisions as a backup"""
+ cg = repo.changegroupsubset(bases, heads, 'strip')
+ backupdir = repo.join("strip-backup")
+ if not os.path.isdir(backupdir):
+ os.mkdir(backupdir)
+ name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
+ if compress:
+ bundletype = "HG10BZ"
+ else:
+ bundletype = "HG10UN"
+ return changegroup.writebundle(cg, name, bundletype)
+
+def _collectfiles(repo, striprev):
+ """find out the filelogs affected by the strip"""
+ files = set()
+
+ for x in xrange(striprev, len(repo)):
+ files.update(repo[x].files())
+
+ return sorted(files)
+
+def _collectbrokencsets(repo, files, striprev):
+ """return the changesets which will be broken by the truncation"""
+ s = set()
+ def collectone(revlog):
+ links = (revlog.linkrev(i) for i in revlog)
+ # find the truncation point of the revlog
+ for lrev in links:
+ if lrev >= striprev:
+ break
+ # see if any revision after this point has a linkrev
+ # less than striprev (those will be broken by strip)
+ for lrev in links:
+ if lrev < striprev:
+ s.add(lrev)
+
+ collectone(repo.manifest)
+ for fname in files:
+ collectone(repo.file(fname))
+
+ return s
+
+def strip(ui, repo, node, backup="all"):
+ cl = repo.changelog
+ # TODO delete the undo files, and handle undo of merge sets
+ striprev = cl.rev(node)
+
+ keeppartialbundle = backup == 'strip'
+
+ # Some revisions with rev > striprev may not be descendants of striprev.
+ # We have to find these revisions and put them in a bundle, so that
+ # we can restore them after the truncations.
+ # To create the bundle we use repo.changegroupsubset which requires
+ # the list of heads and bases of the set of interesting revisions.
+ # (head = revision in the set that has no descendant in the set;
+ # base = revision in the set that has no ancestor in the set)
+ tostrip = set(cl.descendants(striprev))
+ tostrip.add(striprev)
+
+ files = _collectfiles(repo, striprev)
+ saverevs = _collectbrokencsets(repo, files, striprev)
+
+ # compute heads
+ saveheads = set(saverevs)
+ for r in xrange(striprev + 1, len(cl)):
+ if r not in tostrip:
+ saverevs.add(r)
+ saveheads.difference_update(cl.parentrevs(r))
+ saveheads.add(r)
+ saveheads = [cl.node(r) for r in saveheads]
+
+ # compute base nodes
+ if saverevs:
+ descendants = set(cl.descendants(*saverevs))
+ saverevs.difference_update(descendants)
+ savebases = [cl.node(r) for r in saverevs]
+
+ bm = repo._bookmarks
+ updatebm = []
+ for m in bm:
+ rev = repo[bm[m]].rev()
+ if rev in tostrip:
+ updatebm.append(m)
+
+ # create a changegroup for all the branches we need to keep
+ backupfile = None
+ if backup == "all":
+ backupfile = _bundle(repo, [node], cl.heads(), node, 'backup')
+ repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
+ if saveheads or savebases:
+ # do not compress partial bundle if we remove it from disk later
+ chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
+ compress=keeppartialbundle)
+
+ mfst = repo.manifest
+
+ tr = repo.transaction("strip")
+ offset = len(tr.entries)
+
+ try:
+ tr.startgroup()
+ cl.strip(striprev, tr)
+ mfst.strip(striprev, tr)
+ for fn in files:
+ repo.file(fn).strip(striprev, tr)
+ tr.endgroup()
+
+ try:
+ for i in xrange(offset, len(tr.entries)):
+ file, troffset, ignore = tr.entries[i]
+ repo.sopener(file, 'a').truncate(troffset)
+ tr.close()
+ except:
+ tr.abort()
+ raise
+
+ if saveheads or savebases:
+ ui.note(_("adding branch\n"))
+ f = open(chgrpfile, "rb")
+ gen = changegroup.readbundle(f, chgrpfile)
+ if not repo.ui.verbose:
+ # silence internal shuffling chatter
+ repo.ui.pushbuffer()
+ repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
+ if not repo.ui.verbose:
+ repo.ui.popbuffer()
+ f.close()
+ if not keeppartialbundle:
+ os.unlink(chgrpfile)
+
+ for m in updatebm:
+ bm[m] = repo['.'].node()
+ bookmarks.write(repo)
+
+ except:
+ if backupfile:
+ ui.warn(_("strip failed, full bundle stored in '%s'\n")
+ % backupfile)
+ elif saveheads:
+ ui.warn(_("strip failed, partial bundle stored in '%s'\n")
+ % chgrpfile)
+ raise
+
+ repo.destroyed()
diff --git a/websdk/mercurial/repo.py b/websdk/mercurial/repo.py
index 10de77a..60de133 120000..100644
--- a/websdk/mercurial/repo.py
+++ b/websdk/mercurial/repo.py
@@ -1 +1,40 @@
-/usr/share/pyshared/mercurial/repo.py \ No newline at end of file
+# repo.py - repository base classes for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import error
+
+class repository(object):
+ def capable(self, name):
+ '''tell whether repo supports named capability.
+ return False if not supported.
+ if boolean capability, return True.
+ if string capability, return string.'''
+ if name in self.capabilities:
+ return True
+ name_eq = name + '='
+ for cap in self.capabilities:
+ if cap.startswith(name_eq):
+ return cap[len(name_eq):]
+ return False
+
+ def requirecap(self, name, purpose):
+ '''raise an exception if the given capability is not present'''
+ if not self.capable(name):
+ raise error.CapabilityError(
+ _('cannot %s; remote repository does not '
+ 'support the %r capability') % (purpose, name))
+
+ def local(self):
+ return False
+
+ def cancopy(self):
+ return self.local()
+
+ def close(self):
+ pass
diff --git a/websdk/mercurial/revlog.py b/websdk/mercurial/revlog.py
index 665cc96..02b553f 120000..100644
--- a/websdk/mercurial/revlog.py
+++ b/websdk/mercurial/revlog.py
@@ -1 +1,1280 @@
-/usr/share/pyshared/mercurial/revlog.py \ No newline at end of file
+# revlog.py - storage back-end for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Storage back-end for Mercurial.
+
+This provides efficient delta storage with O(1) retrieve and append
+and O(changes) merge between branches.
+"""
+
+# import stuff from node for others to import from revlog
+from node import bin, hex, nullid, nullrev
+from i18n import _
+import ancestor, mdiff, parsers, error, util, dagutil
+import struct, zlib, errno
+
+_pack = struct.pack
+_unpack = struct.unpack
+_compress = zlib.compress
+_decompress = zlib.decompress
+_sha = util.sha1
+
+# revlog header flags
+REVLOGV0 = 0
+REVLOGNG = 1
+REVLOGNGINLINEDATA = (1 << 16)
+REVLOGGENERALDELTA = (1 << 17)
+REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
+REVLOG_DEFAULT_FORMAT = REVLOGNG
+REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
+REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
+
+# revlog index flags
+REVIDX_KNOWN_FLAGS = 0
+
+# max size of revlog with inline data
+_maxinline = 131072
+_chunksize = 1048576
+
+RevlogError = error.RevlogError
+LookupError = error.LookupError
+
+def getoffset(q):
+ return int(q >> 16)
+
+def gettype(q):
+ return int(q & 0xFFFF)
+
+def offset_type(offset, type):
+ return long(long(offset) << 16 | type)
+
+nullhash = _sha(nullid)
+
+def hash(text, p1, p2):
+ """generate a hash from the given text and its parent hashes
+
+ This hash combines both the current file contents and its history
+ in a manner that makes it easy to distinguish nodes with the same
+ content in the revision graph.
+ """
+ # As of now, if one of the parent node is null, p2 is null
+ if p2 == nullid:
+ # deep copy of a hash is faster than creating one
+ s = nullhash.copy()
+ s.update(p1)
+ else:
+ # none of the parent nodes are nullid
+ l = [p1, p2]
+ l.sort()
+ s = _sha(l[0])
+ s.update(l[1])
+ s.update(text)
+ return s.digest()
+
+def compress(text):
+ """ generate a possibly-compressed representation of text """
+ if not text:
+ return ("", text)
+ l = len(text)
+ bin = None
+ if l < 44:
+ pass
+ elif l > 1000000:
+ # zlib makes an internal copy, thus doubling memory usage for
+ # large files, so lets do this in pieces
+ z = zlib.compressobj()
+ p = []
+ pos = 0
+ while pos < l:
+ pos2 = pos + 2**20
+ p.append(z.compress(text[pos:pos2]))
+ pos = pos2
+ p.append(z.flush())
+ if sum(map(len, p)) < l:
+ bin = "".join(p)
+ else:
+ bin = _compress(text)
+ if bin is None or len(bin) > l:
+ if text[0] == '\0':
+ return ("", text)
+ return ('u', text)
+ return ("", bin)
+
+def decompress(bin):
+ """ decompress the given input """
+ if not bin:
+ return bin
+ t = bin[0]
+ if t == '\0':
+ return bin
+ if t == 'x':
+ return _decompress(bin)
+ if t == 'u':
+ return bin[1:]
+ raise RevlogError(_("unknown compression type %r") % t)
+
+indexformatv0 = ">4l20s20s20s"
+v0shaoffset = 56
+
+class revlogoldio(object):
+ def __init__(self):
+ self.size = struct.calcsize(indexformatv0)
+
+ def parseindex(self, data, inline):
+ s = self.size
+ index = []
+ nodemap = {nullid: nullrev}
+ n = off = 0
+ l = len(data)
+ while off + s <= l:
+ cur = data[off:off + s]
+ off += s
+ e = _unpack(indexformatv0, cur)
+ # transform to revlogv1 format
+ e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
+ nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
+ index.append(e2)
+ nodemap[e[6]] = n
+ n += 1
+
+ # add the magic null revision at -1
+ index.append((0, 0, 0, -1, -1, -1, -1, nullid))
+
+ return index, nodemap, None
+
+ def packentry(self, entry, node, version, rev):
+ if gettype(entry[0]):
+ raise RevlogError(_("index entry flags need RevlogNG"))
+ e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
+ node(entry[5]), node(entry[6]), entry[7])
+ return _pack(indexformatv0, *e2)
+
+# index ng:
+# 6 bytes: offset
+# 2 bytes: flags
+# 4 bytes: compressed length
+# 4 bytes: uncompressed length
+# 4 bytes: base rev
+# 4 bytes: link rev
+# 4 bytes: parent 1 rev
+# 4 bytes: parent 2 rev
+# 32 bytes: nodeid
+indexformatng = ">Qiiiiii20s12x"
+ngshaoffset = 32
+versionformat = ">I"
+
+class revlogio(object):
+ def __init__(self):
+ self.size = struct.calcsize(indexformatng)
+
+ def parseindex(self, data, inline):
+ # call the C implementation to parse the index data
+ index, cache = parsers.parse_index2(data, inline)
+ return index, None, cache
+
+ def packentry(self, entry, node, version, rev):
+ p = _pack(indexformatng, *entry)
+ if rev == 0:
+ p = _pack(versionformat, version) + p[4:]
+ return p
+
+class revlog(object):
+ """
+ the underlying revision storage object
+
+ A revlog consists of two parts, an index and the revision data.
+
+ The index is a file with a fixed record size containing
+ information on each revision, including its nodeid (hash), the
+ nodeids of its parents, the position and offset of its data within
+ the data file, and the revision it's based on. Finally, each entry
+ contains a linkrev entry that can serve as a pointer to external
+ data.
+
+ The revision data itself is a linear collection of data chunks.
+ Each chunk represents a revision and is usually represented as a
+ delta against the previous chunk. To bound lookup time, runs of
+ deltas are limited to about 2 times the length of the original
+ version data. This makes retrieval of a version proportional to
+ its size, or O(1) relative to the number of revisions.
+
+ Both pieces of the revlog are written to in an append-only
+ fashion, which means we never need to rewrite a file to insert or
+ remove data, and can use some simple techniques to avoid the need
+ for locking while reading.
+ """
+ def __init__(self, opener, indexfile):
+ """
+ create a revlog object
+
+ opener is a function that abstracts the file opening operation
+ and can be used to implement COW semantics or the like.
+ """
+ self.indexfile = indexfile
+ self.datafile = indexfile[:-2] + ".d"
+ self.opener = opener
+ self._cache = None
+ self._basecache = (0, 0)
+ self._chunkcache = (0, '')
+ self.index = []
+ self._pcache = {}
+ self._nodecache = {nullid: nullrev}
+ self._nodepos = None
+
+ v = REVLOG_DEFAULT_VERSION
+ opts = getattr(opener, 'options', None)
+ if opts is not None:
+ if 'revlogv1' in opts:
+ if 'generaldelta' in opts:
+ v |= REVLOGGENERALDELTA
+ else:
+ v = 0
+
+ i = ''
+ self._initempty = True
+ try:
+ f = self.opener(self.indexfile)
+ i = f.read()
+ f.close()
+ if len(i) > 0:
+ v = struct.unpack(versionformat, i[:4])[0]
+ self._initempty = False
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+
+ self.version = v
+ self._inline = v & REVLOGNGINLINEDATA
+ self._generaldelta = v & REVLOGGENERALDELTA
+ flags = v & ~0xFFFF
+ fmt = v & 0xFFFF
+ if fmt == REVLOGV0 and flags:
+ raise RevlogError(_("index %s unknown flags %#04x for format v0")
+ % (self.indexfile, flags >> 16))
+ elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
+ raise RevlogError(_("index %s unknown flags %#04x for revlogng")
+ % (self.indexfile, flags >> 16))
+ elif fmt > REVLOGNG:
+ raise RevlogError(_("index %s unknown format %d")
+ % (self.indexfile, fmt))
+
+ self._io = revlogio()
+ if self.version == REVLOGV0:
+ self._io = revlogoldio()
+ try:
+ d = self._io.parseindex(i, self._inline)
+ except (ValueError, IndexError):
+ raise RevlogError(_("index %s is corrupted") % (self.indexfile))
+ self.index, nodemap, self._chunkcache = d
+ if nodemap is not None:
+ self.nodemap = self._nodecache = nodemap
+ if not self._chunkcache:
+ self._chunkclear()
+
+ def tip(self):
+ return self.node(len(self.index) - 2)
+ def __len__(self):
+ return len(self.index) - 1
+ def __iter__(self):
+ for i in xrange(len(self)):
+ yield i
+
+ @util.propertycache
+ def nodemap(self):
+ self.rev(self.node(0))
+ return self._nodecache
+
+ def rev(self, node):
+ try:
+ return self._nodecache[node]
+ except KeyError:
+ n = self._nodecache
+ i = self.index
+ p = self._nodepos
+ if p is None:
+ p = len(i) - 2
+ for r in xrange(p, -1, -1):
+ v = i[r][7]
+ n[v] = r
+ if v == node:
+ self._nodepos = r - 1
+ return r
+ raise LookupError(node, self.indexfile, _('no node'))
+
+ def node(self, rev):
+ return self.index[rev][7]
+ def linkrev(self, rev):
+ return self.index[rev][4]
+ def parents(self, node):
+ i = self.index
+ d = i[self.rev(node)]
+ return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
+ def parentrevs(self, rev):
+ return self.index[rev][5:7]
+ def start(self, rev):
+ return int(self.index[rev][0] >> 16)
+ def end(self, rev):
+ return self.start(rev) + self.length(rev)
+ def length(self, rev):
+ return self.index[rev][1]
+ def chainbase(self, rev):
+ index = self.index
+ base = index[rev][3]
+ while base != rev:
+ rev = base
+ base = index[rev][3]
+ return base
+ def flags(self, rev):
+ return self.index[rev][0] & 0xFFFF
+ def rawsize(self, rev):
+ """return the length of the uncompressed text for a given revision"""
+ l = self.index[rev][2]
+ if l >= 0:
+ return l
+
+ t = self.revision(self.node(rev))
+ return len(t)
+ size = rawsize
+
+ def reachable(self, node, stop=None):
+ """return the set of all nodes ancestral to a given node, including
+ the node itself, stopping when stop is matched"""
+ reachable = set((node,))
+ visit = [node]
+ if stop:
+ stopn = self.rev(stop)
+ else:
+ stopn = 0
+ while visit:
+ n = visit.pop(0)
+ if n == stop:
+ continue
+ if n == nullid:
+ continue
+ for p in self.parents(n):
+ if self.rev(p) < stopn:
+ continue
+ if p not in reachable:
+ reachable.add(p)
+ visit.append(p)
+ return reachable
+
+ def ancestors(self, *revs):
+ """Generate the ancestors of 'revs' in reverse topological order.
+
+ Yield a sequence of revision numbers starting with the parents
+ of each revision in revs, i.e., each revision is *not* considered
+ an ancestor of itself. Results are in breadth-first order:
+ parents of each rev in revs, then parents of those, etc. Result
+ does not include the null revision."""
+ visit = list(revs)
+ seen = set([nullrev])
+ while visit:
+ for parent in self.parentrevs(visit.pop(0)):
+ if parent not in seen:
+ visit.append(parent)
+ seen.add(parent)
+ yield parent
+
+ def descendants(self, *revs):
+ """Generate the descendants of 'revs' in revision order.
+
+ Yield a sequence of revision numbers starting with a child of
+ some rev in revs, i.e., each revision is *not* considered a
+ descendant of itself. Results are ordered by revision number (a
+ topological sort)."""
+ first = min(revs)
+ if first == nullrev:
+ for i in self:
+ yield i
+ return
+
+ seen = set(revs)
+ for i in xrange(first + 1, len(self)):
+ for x in self.parentrevs(i):
+ if x != nullrev and x in seen:
+ seen.add(i)
+ yield i
+ break
+
+ def findcommonmissing(self, common=None, heads=None):
+ """Return a tuple of the ancestors of common and the ancestors of heads
+ that are not ancestors of common.
+
+ More specifically, the second element is a list of nodes N such that
+ every N satisfies the following constraints:
+
+ 1. N is an ancestor of some node in 'heads'
+ 2. N is not an ancestor of any node in 'common'
+
+ The list is sorted by revision number, meaning it is
+ topologically sorted.
+
+ 'heads' and 'common' are both lists of node IDs. If heads is
+ not supplied, uses all of the revlog's heads. If common is not
+ supplied, uses nullid."""
+ if common is None:
+ common = [nullid]
+ if heads is None:
+ heads = self.heads()
+
+ common = [self.rev(n) for n in common]
+ heads = [self.rev(n) for n in heads]
+
+ # we want the ancestors, but inclusive
+ has = set(self.ancestors(*common))
+ has.add(nullrev)
+ has.update(common)
+
+ # take all ancestors from heads that aren't in has
+ missing = set()
+ visit = [r for r in heads if r not in has]
+ while visit:
+ r = visit.pop(0)
+ if r in missing:
+ continue
+ else:
+ missing.add(r)
+ for p in self.parentrevs(r):
+ if p not in has:
+ visit.append(p)
+ missing = list(missing)
+ missing.sort()
+ return has, [self.node(r) for r in missing]
+
+ def findmissing(self, common=None, heads=None):
+ """Return the ancestors of heads that are not ancestors of common.
+
+ More specifically, return a list of nodes N such that every N
+ satisfies the following constraints:
+
+ 1. N is an ancestor of some node in 'heads'
+ 2. N is not an ancestor of any node in 'common'
+
+ The list is sorted by revision number, meaning it is
+ topologically sorted.
+
+ 'heads' and 'common' are both lists of node IDs. If heads is
+ not supplied, uses all of the revlog's heads. If common is not
+ supplied, uses nullid."""
+ _common, missing = self.findcommonmissing(common, heads)
+ return missing
+
+ def nodesbetween(self, roots=None, heads=None):
+ """Return a topological path from 'roots' to 'heads'.
+
+ Return a tuple (nodes, outroots, outheads) where 'nodes' is a
+ topologically sorted list of all nodes N that satisfy both of
+ these constraints:
+
+ 1. N is a descendant of some node in 'roots'
+ 2. N is an ancestor of some node in 'heads'
+
+ Every node is considered to be both a descendant and an ancestor
+ of itself, so every reachable node in 'roots' and 'heads' will be
+ included in 'nodes'.
+
+ 'outroots' is the list of reachable nodes in 'roots', i.e., the
+ subset of 'roots' that is returned in 'nodes'. Likewise,
+ 'outheads' is the subset of 'heads' that is also in 'nodes'.
+
+ 'roots' and 'heads' are both lists of node IDs. If 'roots' is
+ unspecified, uses nullid as the only root. If 'heads' is
+ unspecified, uses list of all of the revlog's heads."""
+ nonodes = ([], [], [])
+ if roots is not None:
+ roots = list(roots)
+ if not roots:
+ return nonodes
+ lowestrev = min([self.rev(n) for n in roots])
+ else:
+ roots = [nullid] # Everybody's a descendant of nullid
+ lowestrev = nullrev
+ if (lowestrev == nullrev) and (heads is None):
+ # We want _all_ the nodes!
+ return ([self.node(r) for r in self], [nullid], list(self.heads()))
+ if heads is None:
+ # All nodes are ancestors, so the latest ancestor is the last
+ # node.
+ highestrev = len(self) - 1
+ # Set ancestors to None to signal that every node is an ancestor.
+ ancestors = None
+ # Set heads to an empty dictionary for later discovery of heads
+ heads = {}
+ else:
+ heads = list(heads)
+ if not heads:
+ return nonodes
+ ancestors = set()
+ # Turn heads into a dictionary so we can remove 'fake' heads.
+ # Also, later we will be using it to filter out the heads we can't
+ # find from roots.
+ heads = dict.fromkeys(heads, False)
+ # Start at the top and keep marking parents until we're done.
+ nodestotag = set(heads)
+ # Remember where the top was so we can use it as a limit later.
+ highestrev = max([self.rev(n) for n in nodestotag])
+ while nodestotag:
+ # grab a node to tag
+ n = nodestotag.pop()
+ # Never tag nullid
+ if n == nullid:
+ continue
+ # A node's revision number represents its place in a
+ # topologically sorted list of nodes.
+ r = self.rev(n)
+ if r >= lowestrev:
+ if n not in ancestors:
+ # If we are possibly a descendant of one of the roots
+ # and we haven't already been marked as an ancestor
+ ancestors.add(n) # Mark as ancestor
+ # Add non-nullid parents to list of nodes to tag.
+ nodestotag.update([p for p in self.parents(n) if
+ p != nullid])
+ elif n in heads: # We've seen it before, is it a fake head?
+ # So it is, real heads should not be the ancestors of
+ # any other heads.
+ heads.pop(n)
+ if not ancestors:
+ return nonodes
+ # Now that we have our set of ancestors, we want to remove any
+ # roots that are not ancestors.
+
+ # If one of the roots was nullid, everything is included anyway.
+ if lowestrev > nullrev:
+ # But, since we weren't, let's recompute the lowest rev to not
+ # include roots that aren't ancestors.
+
+ # Filter out roots that aren't ancestors of heads
+ roots = [n for n in roots if n in ancestors]
+ # Recompute the lowest revision
+ if roots:
+ lowestrev = min([self.rev(n) for n in roots])
+ else:
+ # No more roots? Return empty list
+ return nonodes
+ else:
+ # We are descending from nullid, and don't need to care about
+ # any other roots.
+ lowestrev = nullrev
+ roots = [nullid]
+ # Transform our roots list into a set.
+ descendants = set(roots)
+ # Also, keep the original roots so we can filter out roots that aren't
+ # 'real' roots (i.e. are descended from other roots).
+ roots = descendants.copy()
+ # Our topologically sorted list of output nodes.
+ orderedout = []
+ # Don't start at nullid since we don't want nullid in our output list,
+ # and if nullid shows up in descedents, empty parents will look like
+ # they're descendants.
+ for r in xrange(max(lowestrev, 0), highestrev + 1):
+ n = self.node(r)
+ isdescendant = False
+ if lowestrev == nullrev: # Everybody is a descendant of nullid
+ isdescendant = True
+ elif n in descendants:
+ # n is already a descendant
+ isdescendant = True
+ # This check only needs to be done here because all the roots
+ # will start being marked is descendants before the loop.
+ if n in roots:
+ # If n was a root, check if it's a 'real' root.
+ p = tuple(self.parents(n))
+ # If any of its parents are descendants, it's not a root.
+ if (p[0] in descendants) or (p[1] in descendants):
+ roots.remove(n)
+ else:
+ p = tuple(self.parents(n))
+ # A node is a descendant if either of its parents are
+ # descendants. (We seeded the dependents list with the roots
+ # up there, remember?)
+ if (p[0] in descendants) or (p[1] in descendants):
+ descendants.add(n)
+ isdescendant = True
+ if isdescendant and ((ancestors is None) or (n in ancestors)):
+ # Only include nodes that are both descendants and ancestors.
+ orderedout.append(n)
+ if (ancestors is not None) and (n in heads):
+ # We're trying to figure out which heads are reachable
+ # from roots.
+ # Mark this head as having been reached
+ heads[n] = True
+ elif ancestors is None:
+ # Otherwise, we're trying to discover the heads.
+ # Assume this is a head because if it isn't, the next step
+ # will eventually remove it.
+ heads[n] = True
+ # But, obviously its parents aren't.
+ for p in self.parents(n):
+ heads.pop(p, None)
+ heads = [n for n, flag in heads.iteritems() if flag]
+ roots = list(roots)
+ assert orderedout
+ assert roots
+ assert heads
+ return (orderedout, roots, heads)
+
+ def headrevs(self):
+ count = len(self)
+ if not count:
+ return [nullrev]
+ ishead = [1] * (count + 1)
+ index = self.index
+ for r in xrange(count):
+ e = index[r]
+ ishead[e[5]] = ishead[e[6]] = 0
+ return [r for r in xrange(count) if ishead[r]]
+
+ def heads(self, start=None, stop=None):
+ """return the list of all nodes that have no children
+
+ if start is specified, only heads that are descendants of
+ start will be returned
+ if stop is specified, it will consider all the revs from stop
+ as if they had no children
+ """
+ if start is None and stop is None:
+ if not len(self):
+ return [nullid]
+ return [self.node(r) for r in self.headrevs()]
+
+ if start is None:
+ start = nullid
+ if stop is None:
+ stop = []
+ stoprevs = set([self.rev(n) for n in stop])
+ startrev = self.rev(start)
+ reachable = set((startrev,))
+ heads = set((startrev,))
+
+ parentrevs = self.parentrevs
+ for r in xrange(startrev + 1, len(self)):
+ for p in parentrevs(r):
+ if p in reachable:
+ if r not in stoprevs:
+ reachable.add(r)
+ heads.add(r)
+ if p in heads and p not in stoprevs:
+ heads.remove(p)
+
+ return [self.node(r) for r in heads]
+
+ def children(self, node):
+ """find the children of a given node"""
+ c = []
+ p = self.rev(node)
+ for r in range(p + 1, len(self)):
+ prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
+ if prevs:
+ for pr in prevs:
+ if pr == p:
+ c.append(self.node(r))
+ elif p == nullrev:
+ c.append(self.node(r))
+ return c
+
+ def descendant(self, start, end):
+ if start == nullrev:
+ return True
+ for i in self.descendants(start):
+ if i == end:
+ return True
+ elif i > end:
+ break
+ return False
+
+ def ancestor(self, a, b):
+ """calculate the least common ancestor of nodes a and b"""
+
+ # fast path, check if it is a descendant
+ a, b = self.rev(a), self.rev(b)
+ start, end = sorted((a, b))
+ if self.descendant(start, end):
+ return self.node(start)
+
+ def parents(rev):
+ return [p for p in self.parentrevs(rev) if p != nullrev]
+
+ c = ancestor.ancestor(a, b, parents)
+ if c is None:
+ return nullid
+
+ return self.node(c)
+
+ def _match(self, id):
+ if isinstance(id, (long, int)):
+ # rev
+ return self.node(id)
+ if len(id) == 20:
+ # possibly a binary node
+ # odds of a binary node being all hex in ASCII are 1 in 10**25
+ try:
+ node = id
+ self.rev(node) # quick search the index
+ return node
+ except LookupError:
+ pass # may be partial hex id
+ try:
+ # str(rev)
+ rev = int(id)
+ if str(rev) != id:
+ raise ValueError
+ if rev < 0:
+ rev = len(self) + rev
+ if rev < 0 or rev >= len(self):
+ raise ValueError
+ return self.node(rev)
+ except (ValueError, OverflowError):
+ pass
+ if len(id) == 40:
+ try:
+ # a full hex nodeid?
+ node = bin(id)
+ self.rev(node)
+ return node
+ except (TypeError, LookupError):
+ pass
+
+ def _partialmatch(self, id):
+ if id in self._pcache:
+ return self._pcache[id]
+
+ if len(id) < 40:
+ try:
+ # hex(node)[:...]
+ l = len(id) // 2 # grab an even number of digits
+ prefix = bin(id[:l * 2])
+ nl = [e[7] for e in self.index if e[7].startswith(prefix)]
+ nl = [n for n in nl if hex(n).startswith(id)]
+ if len(nl) > 0:
+ if len(nl) == 1:
+ self._pcache[id] = nl[0]
+ return nl[0]
+ raise LookupError(id, self.indexfile,
+ _('ambiguous identifier'))
+ return None
+ except TypeError:
+ pass
+
+ def lookup(self, id):
+ """locate a node based on:
+ - revision number or str(revision number)
+ - nodeid or subset of hex nodeid
+ """
+ n = self._match(id)
+ if n is not None:
+ return n
+ n = self._partialmatch(id)
+ if n:
+ return n
+
+ raise LookupError(id, self.indexfile, _('no match found'))
+
+ def cmp(self, node, text):
+ """compare text with a given file revision
+
+ returns True if text is different than what is stored.
+ """
+ p1, p2 = self.parents(node)
+ return hash(text, p1, p2) != node
+
+ def _addchunk(self, offset, data):
+ o, d = self._chunkcache
+ # try to add to existing cache
+ if o + len(d) == offset and len(d) + len(data) < _chunksize:
+ self._chunkcache = o, d + data
+ else:
+ self._chunkcache = offset, data
+
+ def _loadchunk(self, offset, length):
+ if self._inline:
+ df = self.opener(self.indexfile)
+ else:
+ df = self.opener(self.datafile)
+
+ readahead = max(65536, length)
+ df.seek(offset)
+ d = df.read(readahead)
+ df.close()
+ self._addchunk(offset, d)
+ if readahead > length:
+ return d[:length]
+ return d
+
+ def _getchunk(self, offset, length):
+ o, d = self._chunkcache
+ l = len(d)
+
+ # is it in the cache?
+ cachestart = offset - o
+ cacheend = cachestart + length
+ if cachestart >= 0 and cacheend <= l:
+ if cachestart == 0 and cacheend == l:
+ return d # avoid a copy
+ return d[cachestart:cacheend]
+
+ return self._loadchunk(offset, length)
+
+ def _chunkraw(self, startrev, endrev):
+ start = self.start(startrev)
+ length = self.end(endrev) - start
+ if self._inline:
+ start += (startrev + 1) * self._io.size
+ return self._getchunk(start, length)
+
+ def _chunk(self, rev):
+ return decompress(self._chunkraw(rev, rev))
+
+ def _chunkbase(self, rev):
+ return self._chunk(rev)
+
+ def _chunkclear(self):
+ self._chunkcache = (0, '')
+
+ def deltaparent(self, rev):
+ """return deltaparent of the given revision"""
+ base = self.index[rev][3]
+ if base == rev:
+ return nullrev
+ elif self._generaldelta:
+ return base
+ else:
+ return rev - 1
+
+ def revdiff(self, rev1, rev2):
+ """return or calculate a delta between two revisions"""
+ if rev1 != nullrev and self.deltaparent(rev2) == rev1:
+ return self._chunk(rev2)
+
+ return mdiff.textdiff(self.revision(self.node(rev1)),
+ self.revision(self.node(rev2)))
+
+ def revision(self, node):
+ """return an uncompressed revision of a given node"""
+ cachedrev = None
+ if node == nullid:
+ return ""
+ if self._cache:
+ if self._cache[0] == node:
+ return self._cache[2]
+ cachedrev = self._cache[1]
+
+ # look up what we need to read
+ text = None
+ rev = self.rev(node)
+
+ # check rev flags
+ if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
+ raise RevlogError(_('incompatible revision flag %x') %
+ (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
+
+ # build delta chain
+ chain = []
+ index = self.index # for performance
+ generaldelta = self._generaldelta
+ iterrev = rev
+ e = index[iterrev]
+ while iterrev != e[3] and iterrev != cachedrev:
+ chain.append(iterrev)
+ if generaldelta:
+ iterrev = e[3]
+ else:
+ iterrev -= 1
+ e = index[iterrev]
+ chain.reverse()
+ base = iterrev
+
+ if iterrev == cachedrev:
+ # cache hit
+ text = self._cache[2]
+
+ # drop cache to save memory
+ self._cache = None
+
+ self._chunkraw(base, rev)
+ if text is None:
+ text = self._chunkbase(base)
+
+ bins = [self._chunk(r) for r in chain]
+ text = mdiff.patches(text, bins)
+
+ text = self._checkhash(text, node, rev)
+
+ self._cache = (node, rev, text)
+ return text
+
+ def _checkhash(self, text, node, rev):
+ p1, p2 = self.parents(node)
+ if node != hash(text, p1, p2):
+ raise RevlogError(_("integrity check failed on %s:%d")
+ % (self.indexfile, rev))
+ return text
+
+ def checkinlinesize(self, tr, fp=None):
+ if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
+ return
+
+ trinfo = tr.find(self.indexfile)
+ if trinfo is None:
+ raise RevlogError(_("%s not found in the transaction")
+ % self.indexfile)
+
+ trindex = trinfo[2]
+ dataoff = self.start(trindex)
+
+ tr.add(self.datafile, dataoff)
+
+ if fp:
+ fp.flush()
+ fp.close()
+
+ df = self.opener(self.datafile, 'w')
+ try:
+ for r in self:
+ df.write(self._chunkraw(r, r))
+ finally:
+ df.close()
+
+ fp = self.opener(self.indexfile, 'w', atomictemp=True)
+ self.version &= ~(REVLOGNGINLINEDATA)
+ self._inline = False
+ for i in self:
+ e = self._io.packentry(self.index[i], self.node, self.version, i)
+ fp.write(e)
+
+ # if we don't call close, the temp file will never replace the
+ # real index
+ fp.close()
+
+ tr.replace(self.indexfile, trindex * self._io.size)
+ self._chunkclear()
+
+ def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
+ """add a revision to the log
+
+ text - the revision data to add
+ transaction - the transaction object used for rollback
+ link - the linkrev data to add
+ p1, p2 - the parent nodeids of the revision
+ cachedelta - an optional precomputed delta
+ """
+ node = hash(text, p1, p2)
+ if node in self.nodemap:
+ return node
+
+ dfh = None
+ if not self._inline:
+ dfh = self.opener(self.datafile, "a")
+ ifh = self.opener(self.indexfile, "a+")
+ try:
+ return self._addrevision(node, text, transaction, link, p1, p2,
+ cachedelta, ifh, dfh)
+ finally:
+ if dfh:
+ dfh.close()
+ ifh.close()
+
+ def _addrevision(self, node, text, transaction, link, p1, p2,
+ cachedelta, ifh, dfh):
+ """internal function to add revisions to the log
+
+ see addrevision for argument descriptions.
+ invariants:
+ - text is optional (can be None); if not set, cachedelta must be set.
+ if both are set, they must correspond to eachother.
+ """
+ btext = [text]
+ def buildtext():
+ if btext[0] is not None:
+ return btext[0]
+ # flush any pending writes here so we can read it in revision
+ if dfh:
+ dfh.flush()
+ ifh.flush()
+ basetext = self.revision(self.node(cachedelta[0]))
+ btext[0] = mdiff.patch(basetext, cachedelta[1])
+ chk = hash(btext[0], p1, p2)
+ if chk != node:
+ raise RevlogError(_("consistency error in delta"))
+ return btext[0]
+
+ def builddelta(rev):
+ # can we use the cached delta?
+ if cachedelta and cachedelta[0] == rev:
+ delta = cachedelta[1]
+ else:
+ t = buildtext()
+ ptext = self.revision(self.node(rev))
+ delta = mdiff.textdiff(ptext, t)
+ data = compress(delta)
+ l = len(data[1]) + len(data[0])
+ if basecache[0] == rev:
+ chainbase = basecache[1]
+ else:
+ chainbase = self.chainbase(rev)
+ dist = l + offset - self.start(chainbase)
+ if self._generaldelta:
+ base = rev
+ else:
+ base = chainbase
+ return dist, l, data, base, chainbase
+
+ curr = len(self)
+ prev = curr - 1
+ base = chainbase = curr
+ offset = self.end(prev)
+ flags = 0
+ d = None
+ basecache = self._basecache
+ p1r, p2r = self.rev(p1), self.rev(p2)
+
+ # should we try to build a delta?
+ if prev != nullrev:
+ if self._generaldelta:
+ if p1r >= basecache[1]:
+ d = builddelta(p1r)
+ elif p2r >= basecache[1]:
+ d = builddelta(p2r)
+ else:
+ d = builddelta(prev)
+ else:
+ d = builddelta(prev)
+ dist, l, data, base, chainbase = d
+
+ # full versions are inserted when the needed deltas
+ # become comparable to the uncompressed text
+ if text is None:
+ textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
+ cachedelta[1])
+ else:
+ textlen = len(text)
+ if d is None or dist > textlen * 2:
+ text = buildtext()
+ data = compress(text)
+ l = len(data[1]) + len(data[0])
+ base = chainbase = curr
+
+ e = (offset_type(offset, flags), l, textlen,
+ base, link, p1r, p2r, node)
+ self.index.insert(-1, e)
+ self.nodemap[node] = curr
+
+ entry = self._io.packentry(e, self.node, self.version, curr)
+ if not self._inline:
+ transaction.add(self.datafile, offset)
+ transaction.add(self.indexfile, curr * len(entry))
+ if data[0]:
+ dfh.write(data[0])
+ dfh.write(data[1])
+ dfh.flush()
+ ifh.write(entry)
+ else:
+ offset += curr * self._io.size
+ transaction.add(self.indexfile, offset, curr)
+ ifh.write(entry)
+ ifh.write(data[0])
+ ifh.write(data[1])
+ self.checkinlinesize(transaction, ifh)
+
+ if type(text) == str: # only accept immutable objects
+ self._cache = (node, curr, text)
+ self._basecache = (curr, chainbase)
+ return node
+
+ def group(self, nodelist, bundler, reorder=None):
+ """Calculate a delta group, yielding a sequence of changegroup chunks
+ (strings).
+
+ Given a list of changeset revs, return a set of deltas and
+ metadata corresponding to nodes. The first delta is
+ first parent(nodelist[0]) -> nodelist[0], the receiver is
+ guaranteed to have this parent as it has all history before
+ these changesets. In the case firstparent is nullrev the
+ changegroup starts with a full revision.
+ """
+
+ # if we don't have any revisions touched by these changesets, bail
+ if len(nodelist) == 0:
+ yield bundler.close()
+ return
+
+ # for generaldelta revlogs, we linearize the revs; this will both be
+ # much quicker and generate a much smaller bundle
+ if (self._generaldelta and reorder is not False) or reorder:
+ dag = dagutil.revlogdag(self)
+ revs = set(self.rev(n) for n in nodelist)
+ revs = dag.linearize(revs)
+ else:
+ revs = sorted([self.rev(n) for n in nodelist])
+
+ # add the parent of the first rev
+ p = self.parentrevs(revs[0])[0]
+ revs.insert(0, p)
+
+ # build deltas
+ for r in xrange(len(revs) - 1):
+ prev, curr = revs[r], revs[r + 1]
+ for c in bundler.revchunk(self, curr, prev):
+ yield c
+
+ yield bundler.close()
+
+ def addgroup(self, bundle, linkmapper, transaction):
+ """
+ add a delta group
+
+ given a set of deltas, add them to the revision log. the
+ first delta is against its parent, which should be in our
+ log, the rest are against the previous delta.
+ """
+
+ # track the base of the current delta log
+ node = None
+
+ r = len(self)
+ end = 0
+ if r:
+ end = self.end(r - 1)
+ ifh = self.opener(self.indexfile, "a+")
+ isize = r * self._io.size
+ if self._inline:
+ transaction.add(self.indexfile, end + isize, r)
+ dfh = None
+ else:
+ transaction.add(self.indexfile, isize, r)
+ transaction.add(self.datafile, end)
+ dfh = self.opener(self.datafile, "a")
+
+ try:
+ # loop through our set of deltas
+ chain = None
+ while True:
+ chunkdata = bundle.deltachunk(chain)
+ if not chunkdata:
+ break
+ node = chunkdata['node']
+ p1 = chunkdata['p1']
+ p2 = chunkdata['p2']
+ cs = chunkdata['cs']
+ deltabase = chunkdata['deltabase']
+ delta = chunkdata['delta']
+
+ link = linkmapper(cs)
+ if node in self.nodemap:
+ # this can happen if two branches make the same change
+ chain = node
+ continue
+
+ for p in (p1, p2):
+ if not p in self.nodemap:
+ raise LookupError(p, self.indexfile,
+ _('unknown parent'))
+
+ if deltabase not in self.nodemap:
+ raise LookupError(deltabase, self.indexfile,
+ _('unknown delta base'))
+
+ baserev = self.rev(deltabase)
+ chain = self._addrevision(node, None, transaction, link,
+ p1, p2, (baserev, delta), ifh, dfh)
+ if not dfh and not self._inline:
+ # addrevision switched from inline to conventional
+ # reopen the index
+ ifh.close()
+ dfh = self.opener(self.datafile, "a")
+ ifh = self.opener(self.indexfile, "a")
+ finally:
+ if dfh:
+ dfh.close()
+ ifh.close()
+
+ return node
+
+ def strip(self, minlink, transaction):
+ """truncate the revlog on the first revision with a linkrev >= minlink
+
+ This function is called when we're stripping revision minlink and
+ its descendants from the repository.
+
+ We have to remove all revisions with linkrev >= minlink, because
+ the equivalent changelog revisions will be renumbered after the
+ strip.
+
+ So we truncate the revlog on the first of these revisions, and
+ trust that the caller has saved the revisions that shouldn't be
+ removed and that it'll readd them after this truncation.
+ """
+ if len(self) == 0:
+ return
+
+ for rev in self:
+ if self.index[rev][4] >= minlink:
+ break
+ else:
+ return
+
+ # first truncate the files on disk
+ end = self.start(rev)
+ if not self._inline:
+ transaction.add(self.datafile, end)
+ end = rev * self._io.size
+ else:
+ end += rev * self._io.size
+
+ transaction.add(self.indexfile, end)
+
+ # then reset internal state in memory to forget those revisions
+ self._cache = None
+ self._chunkclear()
+ for x in xrange(rev, len(self)):
+ del self.nodemap[self.node(x)]
+
+ del self.index[rev:-1]
+
+ def checksize(self):
+ expected = 0
+ if len(self):
+ expected = max(0, self.end(len(self) - 1))
+
+ try:
+ f = self.opener(self.datafile)
+ f.seek(0, 2)
+ actual = f.tell()
+ f.close()
+ dd = actual - expected
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ dd = 0
+
+ try:
+ f = self.opener(self.indexfile)
+ f.seek(0, 2)
+ actual = f.tell()
+ f.close()
+ s = self._io.size
+ i = max(0, actual // s)
+ di = actual - (i * s)
+ if self._inline:
+ databytes = 0
+ for r in self:
+ databytes += max(0, self.length(r))
+ dd = 0
+ di = actual - len(self) * s - databytes
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ di = 0
+
+ return (dd, di)
+
+ def files(self):
+ res = [self.indexfile]
+ if not self._inline:
+ res.append(self.datafile)
+ return res
diff --git a/websdk/mercurial/revset.py b/websdk/mercurial/revset.py
index 2ee671e..d63c49d 120000..100644
--- a/websdk/mercurial/revset.py
+++ b/websdk/mercurial/revset.py
@@ -1 +1,1141 @@
-/usr/share/pyshared/mercurial/revset.py \ No newline at end of file
+# revset.py - revision set queries for mercurial
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import re
+import parser, util, error, discovery, hbisect
+import node as nodemod
+import bookmarks as bookmarksmod
+import match as matchmod
+from i18n import _
+
+elements = {
+ "(": (20, ("group", 1, ")"), ("func", 1, ")")),
+ "~": (18, None, ("ancestor", 18)),
+ "^": (18, None, ("parent", 18), ("parentpost", 18)),
+ "-": (5, ("negate", 19), ("minus", 5)),
+ "::": (17, ("dagrangepre", 17), ("dagrange", 17),
+ ("dagrangepost", 17)),
+ "..": (17, ("dagrangepre", 17), ("dagrange", 17),
+ ("dagrangepost", 17)),
+ ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
+ "not": (10, ("not", 10)),
+ "!": (10, ("not", 10)),
+ "and": (5, None, ("and", 5)),
+ "&": (5, None, ("and", 5)),
+ "or": (4, None, ("or", 4)),
+ "|": (4, None, ("or", 4)),
+ "+": (4, None, ("or", 4)),
+ ",": (2, None, ("list", 2)),
+ ")": (0, None, None),
+ "symbol": (0, ("symbol",), None),
+ "string": (0, ("string",), None),
+ "end": (0, None, None),
+}
+
+keywords = set(['and', 'or', 'not'])
+
+def tokenize(program):
+ pos, l = 0, len(program)
+ while pos < l:
+ c = program[pos]
+ if c.isspace(): # skip inter-token whitespace
+ pass
+ elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
+ yield ('::', None, pos)
+ pos += 1 # skip ahead
+ elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
+ yield ('..', None, pos)
+ pos += 1 # skip ahead
+ elif c in "():,-|&+!~^": # handle simple operators
+ yield (c, None, pos)
+ elif (c in '"\'' or c == 'r' and
+ program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+ if c == 'r':
+ pos += 1
+ c = program[pos]
+ decode = lambda x: x
+ else:
+ decode = lambda x: x.decode('string-escape')
+ pos += 1
+ s = pos
+ while pos < l: # find closing quote
+ d = program[pos]
+ if d == '\\': # skip over escaped characters
+ pos += 2
+ continue
+ if d == c:
+ yield ('string', decode(program[s:pos]), s)
+ break
+ pos += 1
+ else:
+ raise error.ParseError(_("unterminated string"), s)
+ elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword
+ s = pos
+ pos += 1
+ while pos < l: # find end of symbol
+ d = program[pos]
+ if not (d.isalnum() or d in "._" or ord(d) > 127):
+ break
+ if d == '.' and program[pos - 1] == '.': # special case for ..
+ pos -= 1
+ break
+ pos += 1
+ sym = program[s:pos]
+ if sym in keywords: # operator keywords
+ yield (sym, None, s)
+ else:
+ yield ('symbol', sym, s)
+ pos -= 1
+ else:
+ raise error.ParseError(_("syntax error"), pos)
+ pos += 1
+ yield ('end', None, pos)
+
+# helpers
+
+def getstring(x, err):
+ if x and (x[0] == 'string' or x[0] == 'symbol'):
+ return x[1]
+ raise error.ParseError(err)
+
+def getlist(x):
+ if not x:
+ return []
+ if x[0] == 'list':
+ return getlist(x[1]) + [x[2]]
+ return [x]
+
+def getargs(x, min, max, err):
+ l = getlist(x)
+ if len(l) < min or len(l) > max:
+ raise error.ParseError(err)
+ return l
+
+def getset(repo, subset, x):
+ if not x:
+ raise error.ParseError(_("missing argument"))
+ return methods[x[0]](repo, subset, *x[1:])
+
+# operator methods
+
+def stringset(repo, subset, x):
+ x = repo[x].rev()
+ if x == -1 and len(subset) == len(repo):
+ return [-1]
+ if len(subset) == len(repo) or x in subset:
+ return [x]
+ return []
+
+def symbolset(repo, subset, x):
+ if x in symbols:
+ raise error.ParseError(_("can't use %s here") % x)
+ return stringset(repo, subset, x)
+
+def rangeset(repo, subset, x, y):
+ m = getset(repo, subset, x)
+ if not m:
+ m = getset(repo, range(len(repo)), x)
+
+ n = getset(repo, subset, y)
+ if not n:
+ n = getset(repo, range(len(repo)), y)
+
+ if not m or not n:
+ return []
+ m, n = m[0], n[-1]
+
+ if m < n:
+ r = range(m, n + 1)
+ else:
+ r = range(m, n - 1, -1)
+ s = set(subset)
+ return [x for x in r if x in s]
+
+def andset(repo, subset, x, y):
+ return getset(repo, getset(repo, subset, x), y)
+
+def orset(repo, subset, x, y):
+ xl = getset(repo, subset, x)
+ s = set(xl)
+ yl = getset(repo, [r for r in subset if r not in s], y)
+ return xl + yl
+
+def notset(repo, subset, x):
+ s = set(getset(repo, subset, x))
+ return [r for r in subset if r not in s]
+
+def listset(repo, subset, a, b):
+ raise error.ParseError(_("can't use a list in this context"))
+
+def func(repo, subset, a, b):
+ if a[0] == 'symbol' and a[1] in symbols:
+ return symbols[a[1]](repo, subset, b)
+ raise error.ParseError(_("not a function: %s") % a[1])
+
+# functions
+
+def adds(repo, subset, x):
+ """``adds(pattern)``
+ Changesets that add a file matching pattern.
+ """
+ # i18n: "adds" is a keyword
+ pat = getstring(x, _("adds requires a pattern"))
+ return checkstatus(repo, subset, pat, 1)
+
+def ancestor(repo, subset, x):
+ """``ancestor(single, single)``
+ Greatest common ancestor of the two changesets.
+ """
+ # i18n: "ancestor" is a keyword
+ l = getargs(x, 2, 2, _("ancestor requires two arguments"))
+ r = range(len(repo))
+ a = getset(repo, r, l[0])
+ b = getset(repo, r, l[1])
+ if len(a) != 1 or len(b) != 1:
+ # i18n: "ancestor" is a keyword
+ raise error.ParseError(_("ancestor arguments must be single revisions"))
+ an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
+
+ return [r for r in an if r in subset]
+
+def ancestors(repo, subset, x):
+ """``ancestors(set)``
+ Changesets that are ancestors of a changeset in set.
+ """
+ args = getset(repo, range(len(repo)), x)
+ if not args:
+ return []
+ s = set(repo.changelog.ancestors(*args)) | set(args)
+ return [r for r in subset if r in s]
+
+def ancestorspec(repo, subset, x, n):
+ """``set~n``
+ Changesets that are the Nth ancestor (first parents only) of a changeset in set.
+ """
+ try:
+ n = int(n[1])
+ except (TypeError, ValueError):
+ raise error.ParseError(_("~ expects a number"))
+ ps = set()
+ cl = repo.changelog
+ for r in getset(repo, subset, x):
+ for i in range(n):
+ r = cl.parentrevs(r)[0]
+ ps.add(r)
+ return [r for r in subset if r in ps]
+
+def author(repo, subset, x):
+ """``author(string)``
+ Alias for ``user(string)``.
+ """
+ # i18n: "author" is a keyword
+ n = getstring(x, _("author requires a string")).lower()
+ return [r for r in subset if n in repo[r].user().lower()]
+
+def bisect(repo, subset, x):
+ """``bisect(string)``
+ Changesets marked in the specified bisect status:
+
+ - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
+ - ``goods``, ``bads`` : csets topologicaly good/bad
+ - ``range`` : csets taking part in the bisection
+ - ``pruned`` : csets that are goods, bads or skipped
+ - ``untested`` : csets whose fate is yet unknown
+ - ``ignored`` : csets ignored due to DAG topology
+ """
+ status = getstring(x, _("bisect requires a string")).lower()
+ return [r for r in subset if r in hbisect.get(repo, status)]
+
+# Backward-compatibility
+# - no help entry so that we do not advertise it any more
+def bisected(repo, subset, x):
+ return bisect(repo, subset, x)
+
+def bookmark(repo, subset, x):
+ """``bookmark([name])``
+ The named bookmark or all bookmarks.
+ """
+ # i18n: "bookmark" is a keyword
+ args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
+ if args:
+ bm = getstring(args[0],
+ # i18n: "bookmark" is a keyword
+ _('the argument to bookmark must be a string'))
+ bmrev = bookmarksmod.listbookmarks(repo).get(bm, None)
+ if not bmrev:
+ raise util.Abort(_("bookmark '%s' does not exist") % bm)
+ bmrev = repo[bmrev].rev()
+ return [r for r in subset if r == bmrev]
+ bms = set([repo[r].rev()
+ for r in bookmarksmod.listbookmarks(repo).values()])
+ return [r for r in subset if r in bms]
+
+def branch(repo, subset, x):
+ """``branch(string or set)``
+ All changesets belonging to the given branch or the branches of the given
+ changesets.
+ """
+ try:
+ b = getstring(x, '')
+ if b in repo.branchmap():
+ return [r for r in subset if repo[r].branch() == b]
+ except error.ParseError:
+ # not a string, but another revspec, e.g. tip()
+ pass
+
+ s = getset(repo, range(len(repo)), x)
+ b = set()
+ for r in s:
+ b.add(repo[r].branch())
+ s = set(s)
+ return [r for r in subset if r in s or repo[r].branch() in b]
+
+def checkstatus(repo, subset, pat, field):
+ m = matchmod.match(repo.root, repo.getcwd(), [pat])
+ s = []
+ fast = (m.files() == [pat])
+ for r in subset:
+ c = repo[r]
+ if fast:
+ if pat not in c.files():
+ continue
+ else:
+ for f in c.files():
+ if m(f):
+ break
+ else:
+ continue
+ files = repo.status(c.p1().node(), c.node())[field]
+ if fast:
+ if pat in files:
+ s.append(r)
+ else:
+ for f in files:
+ if m(f):
+ s.append(r)
+ break
+ return s
+
+def children(repo, subset, x):
+ """``children(set)``
+ Child changesets of changesets in set.
+ """
+ cs = set()
+ cl = repo.changelog
+ s = set(getset(repo, range(len(repo)), x))
+ for r in xrange(0, len(repo)):
+ for p in cl.parentrevs(r):
+ if p in s:
+ cs.add(r)
+ return [r for r in subset if r in cs]
+
+def closed(repo, subset, x):
+ """``closed()``
+ Changeset is closed.
+ """
+ # i18n: "closed" is a keyword
+ getargs(x, 0, 0, _("closed takes no arguments"))
+ return [r for r in subset if repo[r].extra().get('close')]
+
+def contains(repo, subset, x):
+ """``contains(pattern)``
+ Revision contains a file matching pattern. See :hg:`help patterns`
+ for information about file patterns.
+ """
+ # i18n: "contains" is a keyword
+ pat = getstring(x, _("contains requires a pattern"))
+ m = matchmod.match(repo.root, repo.getcwd(), [pat])
+ s = []
+ if m.files() == [pat]:
+ for r in subset:
+ if pat in repo[r]:
+ s.append(r)
+ else:
+ for r in subset:
+ for f in repo[r].manifest():
+ if m(f):
+ s.append(r)
+ break
+ return s
+
+def date(repo, subset, x):
+ """``date(interval)``
+ Changesets within the interval, see :hg:`help dates`.
+ """
+ # i18n: "date" is a keyword
+ ds = getstring(x, _("date requires a string"))
+ dm = util.matchdate(ds)
+ return [r for r in subset if dm(repo[r].date()[0])]
+
+def desc(repo, subset, x):
+ """``desc(string)``
+ Search commit message for string. The match is case-insensitive.
+ """
+ # i18n: "desc" is a keyword
+ ds = getstring(x, _("desc requires a string")).lower()
+ l = []
+ for r in subset:
+ c = repo[r]
+ if ds in c.description().lower():
+ l.append(r)
+ return l
+
+def descendants(repo, subset, x):
+ """``descendants(set)``
+ Changesets which are descendants of changesets in set.
+ """
+ args = getset(repo, range(len(repo)), x)
+ if not args:
+ return []
+ s = set(repo.changelog.descendants(*args)) | set(args)
+ return [r for r in subset if r in s]
+
+def filelog(repo, subset, x):
+ """``filelog(pattern)``
+ Changesets connected to the specified filelog.
+ """
+
+ pat = getstring(x, _("filelog requires a pattern"))
+ m = matchmod.match(repo.root, repo.getcwd(), [pat], default='relpath')
+ s = set()
+
+ if not m.anypats():
+ for f in m.files():
+ fl = repo.file(f)
+ for fr in fl:
+ s.add(fl.linkrev(fr))
+ else:
+ for f in repo[None]:
+ if m(f):
+ fl = repo.file(f)
+ for fr in fl:
+ s.add(fl.linkrev(fr))
+
+ return [r for r in subset if r in s]
+
+def first(repo, subset, x):
+ """``first(set, [n])``
+ An alias for limit().
+ """
+ return limit(repo, subset, x)
+
+def follow(repo, subset, x):
+ """``follow([file])``
+ An alias for ``::.`` (ancestors of the working copy's first parent).
+ If a filename is specified, the history of the given file is followed,
+ including copies.
+ """
+ # i18n: "follow" is a keyword
+ l = getargs(x, 0, 1, _("follow takes no arguments or a filename"))
+ p = repo['.'].rev()
+ if l:
+ x = getstring(l[0], _("follow expected a filename"))
+ if x in repo['.']:
+ s = set(ctx.rev() for ctx in repo['.'][x].ancestors())
+ else:
+ return []
+ else:
+ s = set(repo.changelog.ancestors(p))
+
+ s |= set([p])
+ return [r for r in subset if r in s]
+
+def followfile(repo, subset, x):
+ """``follow()``
+ An alias for ``::.`` (ancestors of the working copy's first parent).
+ """
+ # i18n: "follow" is a keyword
+ getargs(x, 0, 0, _("follow takes no arguments"))
+ p = repo['.'].rev()
+ s = set(repo.changelog.ancestors(p)) | set([p])
+ return [r for r in subset if r in s]
+
+def getall(repo, subset, x):
+ """``all()``
+ All changesets, the same as ``0:tip``.
+ """
+ # i18n: "all" is a keyword
+ getargs(x, 0, 0, _("all takes no arguments"))
+ return subset
+
+def grep(repo, subset, x):
+ """``grep(regex)``
+ Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')``
+ to ensure special escape characters are handled correctly. Unlike
+ ``keyword(string)``, the match is case-sensitive.
+ """
+ try:
+ # i18n: "grep" is a keyword
+ gr = re.compile(getstring(x, _("grep requires a string")))
+ except re.error, e:
+ raise error.ParseError(_('invalid match pattern: %s') % e)
+ l = []
+ for r in subset:
+ c = repo[r]
+ for e in c.files() + [c.user(), c.description()]:
+ if gr.search(e):
+ l.append(r)
+ break
+ return l
+
+def hasfile(repo, subset, x):
+ """``file(pattern)``
+ Changesets affecting files matched by pattern.
+ """
+ # i18n: "file" is a keyword
+ pat = getstring(x, _("file requires a pattern"))
+ m = matchmod.match(repo.root, repo.getcwd(), [pat])
+ s = []
+ for r in subset:
+ for f in repo[r].files():
+ if m(f):
+ s.append(r)
+ break
+ return s
+
+def head(repo, subset, x):
+ """``head()``
+ Changeset is a named branch head.
+ """
+ # i18n: "head" is a keyword
+ getargs(x, 0, 0, _("head takes no arguments"))
+ hs = set()
+ for b, ls in repo.branchmap().iteritems():
+ hs.update(repo[h].rev() for h in ls)
+ return [r for r in subset if r in hs]
+
+def heads(repo, subset, x):
+ """``heads(set)``
+ Members of set with no children in set.
+ """
+ s = getset(repo, subset, x)
+ ps = set(parents(repo, subset, x))
+ return [r for r in s if r not in ps]
+
+def keyword(repo, subset, x):
+ """``keyword(string)``
+ Search commit message, user name, and names of changed files for
+ string. The match is case-insensitive.
+ """
+ # i18n: "keyword" is a keyword
+ kw = getstring(x, _("keyword requires a string")).lower()
+ l = []
+ for r in subset:
+ c = repo[r]
+ t = " ".join(c.files() + [c.user(), c.description()])
+ if kw in t.lower():
+ l.append(r)
+ return l
+
+def limit(repo, subset, x):
+ """``limit(set, [n])``
+ First n members of set, defaulting to 1.
+ """
+ # i18n: "limit" is a keyword
+ l = getargs(x, 1, 2, _("limit requires one or two arguments"))
+ try:
+ lim = 1
+ if len(l) == 2:
+ # i18n: "limit" is a keyword
+ lim = int(getstring(l[1], _("limit requires a number")))
+ except (TypeError, ValueError):
+ # i18n: "limit" is a keyword
+ raise error.ParseError(_("limit expects a number"))
+ ss = set(subset)
+ os = getset(repo, range(len(repo)), l[0])[:lim]
+ return [r for r in os if r in ss]
+
+def last(repo, subset, x):
+ """``last(set, [n])``
+ Last n members of set, defaulting to 1.
+ """
+ # i18n: "last" is a keyword
+ l = getargs(x, 1, 2, _("last requires one or two arguments"))
+ try:
+ lim = 1
+ if len(l) == 2:
+ # i18n: "last" is a keyword
+ lim = int(getstring(l[1], _("last requires a number")))
+ except (TypeError, ValueError):
+ # i18n: "last" is a keyword
+ raise error.ParseError(_("last expects a number"))
+ ss = set(subset)
+ os = getset(repo, range(len(repo)), l[0])[-lim:]
+ return [r for r in os if r in ss]
+
+def maxrev(repo, subset, x):
+ """``max(set)``
+ Changeset with highest revision number in set.
+ """
+ os = getset(repo, range(len(repo)), x)
+ if os:
+ m = max(os)
+ if m in subset:
+ return [m]
+ return []
+
+def merge(repo, subset, x):
+ """``merge()``
+ Changeset is a merge changeset.
+ """
+ # i18n: "merge" is a keyword
+ getargs(x, 0, 0, _("merge takes no arguments"))
+ cl = repo.changelog
+ return [r for r in subset if cl.parentrevs(r)[1] != -1]
+
+def minrev(repo, subset, x):
+ """``min(set)``
+ Changeset with lowest revision number in set.
+ """
+ os = getset(repo, range(len(repo)), x)
+ if os:
+ m = min(os)
+ if m in subset:
+ return [m]
+ return []
+
+def modifies(repo, subset, x):
+ """``modifies(pattern)``
+ Changesets modifying files matched by pattern.
+ """
+ # i18n: "modifies" is a keyword
+ pat = getstring(x, _("modifies requires a pattern"))
+ return checkstatus(repo, subset, pat, 0)
+
+def node(repo, subset, x):
+ """``id(string)``
+ Revision non-ambiguously specified by the given hex string prefix.
+ """
+ # i18n: "id" is a keyword
+ l = getargs(x, 1, 1, _("id requires one argument"))
+ # i18n: "id" is a keyword
+ n = getstring(l[0], _("id requires a string"))
+ if len(n) == 40:
+ rn = repo[n].rev()
+ else:
+ rn = repo.changelog.rev(repo.changelog._partialmatch(n))
+ return [r for r in subset if r == rn]
+
+def outgoing(repo, subset, x):
+ """``outgoing([path])``
+ Changesets not found in the specified destination repository, or the
+ default push location.
+ """
+ import hg # avoid start-up nasties
+ # i18n: "outgoing" is a keyword
+ l = getargs(x, 0, 1, _("outgoing takes one or no arguments"))
+ # i18n: "outgoing" is a keyword
+ dest = l and getstring(l[0], _("outgoing requires a repository path")) or ''
+ dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
+ dest, branches = hg.parseurl(dest)
+ revs, checkout = hg.addbranchrevs(repo, repo, branches, [])
+ if revs:
+ revs = [repo.lookup(rev) for rev in revs]
+ other = hg.peer(repo, {}, dest)
+ repo.ui.pushbuffer()
+ common, outheads = discovery.findcommonoutgoing(repo, other, onlyheads=revs)
+ repo.ui.popbuffer()
+ cl = repo.changelog
+ o = set([cl.rev(r) for r in repo.changelog.findmissing(common, outheads)])
+ return [r for r in subset if r in o]
+
+def p1(repo, subset, x):
+ """``p1([set])``
+ First parent of changesets in set, or the working directory.
+ """
+ if x is None:
+ p = repo[x].p1().rev()
+ return [r for r in subset if r == p]
+
+ ps = set()
+ cl = repo.changelog
+ for r in getset(repo, range(len(repo)), x):
+ ps.add(cl.parentrevs(r)[0])
+ return [r for r in subset if r in ps]
+
+def p2(repo, subset, x):
+ """``p2([set])``
+ Second parent of changesets in set, or the working directory.
+ """
+ if x is None:
+ ps = repo[x].parents()
+ try:
+ p = ps[1].rev()
+ return [r for r in subset if r == p]
+ except IndexError:
+ return []
+
+ ps = set()
+ cl = repo.changelog
+ for r in getset(repo, range(len(repo)), x):
+ ps.add(cl.parentrevs(r)[1])
+ return [r for r in subset if r in ps]
+
+def parents(repo, subset, x):
+ """``parents([set])``
+ The set of all parents for all changesets in set, or the working directory.
+ """
+ if x is None:
+ ps = tuple(p.rev() for p in repo[x].parents())
+ return [r for r in subset if r in ps]
+
+ ps = set()
+ cl = repo.changelog
+ for r in getset(repo, range(len(repo)), x):
+ ps.update(cl.parentrevs(r))
+ return [r for r in subset if r in ps]
+
+def parentspec(repo, subset, x, n):
+ """``set^0``
+ The set.
+ ``set^1`` (or ``set^``), ``set^2``
+ First or second parent, respectively, of all changesets in set.
+ """
+ try:
+ n = int(n[1])
+ if n not in (0, 1, 2):
+ raise ValueError
+ except (TypeError, ValueError):
+ raise error.ParseError(_("^ expects a number 0, 1, or 2"))
+ ps = set()
+ cl = repo.changelog
+ for r in getset(repo, subset, x):
+ if n == 0:
+ ps.add(r)
+ elif n == 1:
+ ps.add(cl.parentrevs(r)[0])
+ elif n == 2:
+ parents = cl.parentrevs(r)
+ if len(parents) > 1:
+ ps.add(parents[1])
+ return [r for r in subset if r in ps]
+
+def present(repo, subset, x):
+ """``present(set)``
+ An empty set, if any revision in set isn't found; otherwise,
+ all revisions in set.
+ """
+ try:
+ return getset(repo, subset, x)
+ except error.RepoLookupError:
+ return []
+
+def removes(repo, subset, x):
+ """``removes(pattern)``
+ Changesets which remove files matching pattern.
+ """
+ # i18n: "removes" is a keyword
+ pat = getstring(x, _("removes requires a pattern"))
+ return checkstatus(repo, subset, pat, 2)
+
+def rev(repo, subset, x):
+ """``rev(number)``
+ Revision with the given numeric identifier.
+ """
+ # i18n: "rev" is a keyword
+ l = getargs(x, 1, 1, _("rev requires one argument"))
+ try:
+ # i18n: "rev" is a keyword
+ l = int(getstring(l[0], _("rev requires a number")))
+ except (TypeError, ValueError):
+ # i18n: "rev" is a keyword
+ raise error.ParseError(_("rev expects a number"))
+ return [r for r in subset if r == l]
+
+def reverse(repo, subset, x):
+ """``reverse(set)``
+ Reverse order of set.
+ """
+ l = getset(repo, subset, x)
+ l.reverse()
+ return l
+
+def roots(repo, subset, x):
+ """``roots(set)``
+ Changesets with no parent changeset in set.
+ """
+ s = getset(repo, subset, x)
+ cs = set(children(repo, subset, x))
+ return [r for r in s if r not in cs]
+
+def sort(repo, subset, x):
+ """``sort(set[, [-]key...])``
+ Sort set by keys. The default sort order is ascending, specify a key
+ as ``-key`` to sort in descending order.
+
+ The keys can be:
+
+ - ``rev`` for the revision number,
+ - ``branch`` for the branch name,
+ - ``desc`` for the commit message (description),
+ - ``user`` for user name (``author`` can be used as an alias),
+ - ``date`` for the commit date
+ """
+ # i18n: "sort" is a keyword
+ l = getargs(x, 1, 2, _("sort requires one or two arguments"))
+ keys = "rev"
+ if len(l) == 2:
+ keys = getstring(l[1], _("sort spec must be a string"))
+
+ s = l[0]
+ keys = keys.split()
+ l = []
+ def invert(s):
+ return "".join(chr(255 - ord(c)) for c in s)
+ for r in getset(repo, subset, s):
+ c = repo[r]
+ e = []
+ for k in keys:
+ if k == 'rev':
+ e.append(r)
+ elif k == '-rev':
+ e.append(-r)
+ elif k == 'branch':
+ e.append(c.branch())
+ elif k == '-branch':
+ e.append(invert(c.branch()))
+ elif k == 'desc':
+ e.append(c.description())
+ elif k == '-desc':
+ e.append(invert(c.description()))
+ elif k in 'user author':
+ e.append(c.user())
+ elif k in '-user -author':
+ e.append(invert(c.user()))
+ elif k == 'date':
+ e.append(c.date()[0])
+ elif k == '-date':
+ e.append(-c.date()[0])
+ else:
+ raise error.ParseError(_("unknown sort key %r") % k)
+ e.append(r)
+ l.append(e)
+ l.sort()
+ return [e[-1] for e in l]
+
+def tag(repo, subset, x):
+ """``tag([name])``
+ The specified tag by name, or all tagged revisions if no name is given.
+ """
+ # i18n: "tag" is a keyword
+ args = getargs(x, 0, 1, _("tag takes one or no arguments"))
+ cl = repo.changelog
+ if args:
+ tn = getstring(args[0],
+ # i18n: "tag" is a keyword
+ _('the argument to tag must be a string'))
+ if not repo.tags().get(tn, None):
+ raise util.Abort(_("tag '%s' does not exist") % tn)
+ s = set([cl.rev(n) for t, n in repo.tagslist() if t == tn])
+ else:
+ s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
+ return [r for r in subset if r in s]
+
+def tagged(repo, subset, x):
+ return tag(repo, subset, x)
+
+def user(repo, subset, x):
+ """``user(string)``
+ User name contains string. The match is case-insensitive.
+ """
+ return author(repo, subset, x)
+
+symbols = {
+ "adds": adds,
+ "all": getall,
+ "ancestor": ancestor,
+ "ancestors": ancestors,
+ "author": author,
+ "bisect": bisect,
+ "bisected": bisected,
+ "bookmark": bookmark,
+ "branch": branch,
+ "children": children,
+ "closed": closed,
+ "contains": contains,
+ "date": date,
+ "desc": desc,
+ "descendants": descendants,
+ "file": hasfile,
+ "filelog": filelog,
+ "first": first,
+ "follow": follow,
+ "grep": grep,
+ "head": head,
+ "heads": heads,
+ "id": node,
+ "keyword": keyword,
+ "last": last,
+ "limit": limit,
+ "max": maxrev,
+ "merge": merge,
+ "min": minrev,
+ "modifies": modifies,
+ "outgoing": outgoing,
+ "p1": p1,
+ "p2": p2,
+ "parents": parents,
+ "present": present,
+ "removes": removes,
+ "rev": rev,
+ "reverse": reverse,
+ "roots": roots,
+ "sort": sort,
+ "tag": tag,
+ "tagged": tagged,
+ "user": user,
+}
+
+methods = {
+ "range": rangeset,
+ "string": stringset,
+ "symbol": symbolset,
+ "and": andset,
+ "or": orset,
+ "not": notset,
+ "list": listset,
+ "func": func,
+ "ancestor": ancestorspec,
+ "parent": parentspec,
+ "parentpost": p1,
+}
+
+def optimize(x, small):
+ if x is None:
+ return 0, x
+
+ smallbonus = 1
+ if small:
+ smallbonus = .5
+
+ op = x[0]
+ if op == 'minus':
+ return optimize(('and', x[1], ('not', x[2])), small)
+ elif op == 'dagrange':
+ return optimize(('and', ('func', ('symbol', 'descendants'), x[1]),
+ ('func', ('symbol', 'ancestors'), x[2])), small)
+ elif op == 'dagrangepre':
+ return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
+ elif op == 'dagrangepost':
+ return optimize(('func', ('symbol', 'descendants'), x[1]), small)
+ elif op == 'rangepre':
+ return optimize(('range', ('string', '0'), x[1]), small)
+ elif op == 'rangepost':
+ return optimize(('range', x[1], ('string', 'tip')), small)
+ elif op == 'negate':
+ return optimize(('string',
+ '-' + getstring(x[1], _("can't negate that"))), small)
+ elif op in 'string symbol negate':
+ return smallbonus, x # single revisions are small
+ elif op == 'and' or op == 'dagrange':
+ wa, ta = optimize(x[1], True)
+ wb, tb = optimize(x[2], True)
+ w = min(wa, wb)
+ if wa > wb:
+ return w, (op, tb, ta)
+ return w, (op, ta, tb)
+ elif op == 'or':
+ wa, ta = optimize(x[1], False)
+ wb, tb = optimize(x[2], False)
+ if wb < wa:
+ wb, wa = wa, wb
+ return max(wa, wb), (op, ta, tb)
+ elif op == 'not':
+ o = optimize(x[1], not small)
+ return o[0], (op, o[1])
+ elif op == 'parentpost':
+ o = optimize(x[1], small)
+ return o[0], (op, o[1])
+ elif op == 'group':
+ return optimize(x[1], small)
+ elif op in 'range list parent ancestorspec':
+ if op == 'parent':
+ # x^:y means (x^) : y, not x ^ (:y)
+ post = ('parentpost', x[1])
+ if x[2][0] == 'dagrangepre':
+ return optimize(('dagrange', post, x[2][1]), small)
+ elif x[2][0] == 'rangepre':
+ return optimize(('range', post, x[2][1]), small)
+
+ wa, ta = optimize(x[1], small)
+ wb, tb = optimize(x[2], small)
+ return wa + wb, (op, ta, tb)
+ elif op == 'func':
+ f = getstring(x[1], _("not a symbol"))
+ wa, ta = optimize(x[2], small)
+ if f in ("author branch closed date desc file grep keyword "
+ "outgoing user"):
+ w = 10 # slow
+ elif f in "modifies adds removes":
+ w = 30 # slower
+ elif f == "contains":
+ w = 100 # very slow
+ elif f == "ancestor":
+ w = 1 * smallbonus
+ elif f in "reverse limit first":
+ w = 0
+ elif f in "sort":
+ w = 10 # assume most sorts look at changelog
+ else:
+ w = 1
+ return w + wa, (op, x[1], ta)
+ return 1, x
+
+class revsetalias(object):
+ funcre = re.compile('^([^(]+)\(([^)]+)\)$')
+ args = None
+
+ def __init__(self, name, value):
+ '''Aliases like:
+
+ h = heads(default)
+ b($1) = ancestors($1) - ancestors(default)
+ '''
+ if isinstance(name, tuple): # parameter substitution
+ self.tree = name
+ self.replacement = value
+ else: # alias definition
+ m = self.funcre.search(name)
+ if m:
+ self.tree = ('func', ('symbol', m.group(1)))
+ self.args = [x.strip() for x in m.group(2).split(',')]
+ for arg in self.args:
+ value = value.replace(arg, repr(arg))
+ else:
+ self.tree = ('symbol', name)
+
+ self.replacement, pos = parse(value)
+ if pos != len(value):
+ raise error.ParseError(_('invalid token'), pos)
+
+ def process(self, tree):
+ if isinstance(tree, tuple):
+ if self.args is None:
+ if tree == self.tree:
+ return self.replacement
+ elif tree[:2] == self.tree:
+ l = getlist(tree[2])
+ if len(l) != len(self.args):
+ raise error.ParseError(
+ _('invalid number of arguments: %s') % len(l))
+ result = self.replacement
+ for a, v in zip(self.args, l):
+ valalias = revsetalias(('string', a), v)
+ result = valalias.process(result)
+ return result
+ return tuple(map(self.process, tree))
+ return tree
+
+def findaliases(ui, tree):
+ for k, v in ui.configitems('revsetalias'):
+ alias = revsetalias(k, v)
+ tree = alias.process(tree)
+ return tree
+
+parse = parser.parser(tokenize, elements).parse
+
+def match(ui, spec):
+ if not spec:
+ raise error.ParseError(_("empty query"))
+ tree, pos = parse(spec)
+ if (pos != len(spec)):
+ raise error.ParseError(_("invalid token"), pos)
+ if ui:
+ tree = findaliases(ui, tree)
+ weight, tree = optimize(tree, True)
+ def mfunc(repo, subset):
+ return getset(repo, subset, tree)
+ return mfunc
+
+def formatspec(expr, *args):
+ '''
+ This is a convenience function for using revsets internally, and
+ escapes arguments appropriately. Aliases are intentionally ignored
+ so that intended expression behavior isn't accidentally subverted.
+
+ Supported arguments:
+
+ %r = revset expression, parenthesized
+ %d = int(arg), no quoting
+ %s = string(arg), escaped and single-quoted
+ %b = arg.branch(), escaped and single-quoted
+ %n = hex(arg), single-quoted
+ %% = a literal '%'
+
+ Prefixing the type with 'l' specifies a parenthesized list of that type.
+
+ >>> formatspec('%r:: and %lr', '10 or 11', ("this()", "that()"))
+ '(10 or 11):: and ((this()) or (that()))'
+ >>> formatspec('%d:: and not %d::', 10, 20)
+ '10:: and not 20::'
+ >>> formatspec('%ld or %ld', [], [1])
+ '(0-0) or 1'
+ >>> formatspec('keyword(%s)', 'foo\\xe9')
+ "keyword('foo\\\\xe9')"
+ >>> b = lambda: 'default'
+ >>> b.branch = b
+ >>> formatspec('branch(%b)', b)
+ "branch('default')"
+ >>> formatspec('root(%ls)', ['a', 'b', 'c', 'd'])
+ "root((('a' or 'b') or ('c' or 'd')))"
+ '''
+
+ def quote(s):
+ return repr(str(s))
+
+ def argtype(c, arg):
+ if c == 'd':
+ return str(int(arg))
+ elif c == 's':
+ return quote(arg)
+ elif c == 'r':
+ parse(arg) # make sure syntax errors are confined
+ return '(%s)' % arg
+ elif c == 'n':
+ return quote(nodemod.hex(arg))
+ elif c == 'b':
+ return quote(arg.branch())
+
+ def listexp(s, t):
+ "balance a list s of type t to limit parse tree depth"
+ l = len(s)
+ if l == 0:
+ return '(0-0)' # a minimal way to represent an empty set
+ if l == 1:
+ return argtype(t, s[0])
+ m = l / 2
+ return '(%s or %s)' % (listexp(s[:m], t), listexp(s[m:], t))
+
+ ret = ''
+ pos = 0
+ arg = 0
+ while pos < len(expr):
+ c = expr[pos]
+ if c == '%':
+ pos += 1
+ d = expr[pos]
+ if d == '%':
+ ret += d
+ elif d in 'dsnbr':
+ ret += argtype(d, args[arg])
+ arg += 1
+ elif d == 'l':
+ # a list of some type
+ pos += 1
+ d = expr[pos]
+ ret += listexp(args[arg], d)
+ arg += 1
+ else:
+ raise util.Abort('unexpected revspec format character %s' % d)
+ else:
+ ret += c
+ pos += 1
+
+ return ret
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = symbols.values()
diff --git a/websdk/mercurial/scmutil.py b/websdk/mercurial/scmutil.py
new file mode 100644
index 0000000..d8d08a5
--- /dev/null
+++ b/websdk/mercurial/scmutil.py
@@ -0,0 +1,803 @@
+# scmutil.py - Mercurial core utility functions
+#
+# Copyright Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import util, error, osutil, revset, similar, encoding
+import match as matchmod
+import os, errno, re, stat, sys, glob
+
+def checkfilename(f):
+ '''Check that the filename f is an acceptable filename for a tracked file'''
+ if '\r' in f or '\n' in f:
+ raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
+
+def checkportable(ui, f):
+ '''Check if filename f is portable and warn or abort depending on config'''
+ checkfilename(f)
+ abort, warn = checkportabilityalert(ui)
+ if abort or warn:
+ msg = util.checkwinfilename(f)
+ if msg:
+ msg = "%s: %r" % (msg, f)
+ if abort:
+ raise util.Abort(msg)
+ ui.warn(_("warning: %s\n") % msg)
+
+def checkportabilityalert(ui):
+ '''check if the user's config requests nothing, a warning, or abort for
+ non-portable filenames'''
+ val = ui.config('ui', 'portablefilenames', 'warn')
+ lval = val.lower()
+ bval = util.parsebool(val)
+ abort = os.name == 'nt' or lval == 'abort'
+ warn = bval or lval == 'warn'
+ if bval is None and not (warn or abort or lval == 'ignore'):
+ raise error.ConfigError(
+ _("ui.portablefilenames value is invalid ('%s')") % val)
+ return abort, warn
+
+class casecollisionauditor(object):
+ def __init__(self, ui, abort, existingiter):
+ self._ui = ui
+ self._abort = abort
+ self._map = {}
+ for f in existingiter:
+ self._map[encoding.lower(f)] = f
+
+ def __call__(self, f):
+ fl = encoding.lower(f)
+ map = self._map
+ if fl in map and map[fl] != f:
+ msg = _('possible case-folding collision for %s') % f
+ if self._abort:
+ raise util.Abort(msg)
+ self._ui.warn(_("warning: %s\n") % msg)
+ map[fl] = f
+
+class pathauditor(object):
+ '''ensure that a filesystem path contains no banned components.
+ the following properties of a path are checked:
+
+ - ends with a directory separator
+ - under top-level .hg
+ - starts at the root of a windows drive
+ - contains ".."
+ - traverses a symlink (e.g. a/symlink_here/b)
+ - inside a nested repository (a callback can be used to approve
+ some nested repositories, e.g., subrepositories)
+ '''
+
+ def __init__(self, root, callback=None):
+ self.audited = set()
+ self.auditeddir = set()
+ self.root = root
+ self.callback = callback
+
+ def __call__(self, path):
+ '''Check the relative path.
+ path may contain a pattern (e.g. foodir/**.txt)'''
+
+ if path in self.audited:
+ return
+ # AIX ignores "/" at end of path, others raise EISDIR.
+ if util.endswithsep(path):
+ raise util.Abort(_("path ends in directory separator: %s") % path)
+ normpath = os.path.normcase(path)
+ parts = util.splitpath(normpath)
+ if (os.path.splitdrive(path)[0]
+ or parts[0].lower() in ('.hg', '.hg.', '')
+ or os.pardir in parts):
+ raise util.Abort(_("path contains illegal component: %s") % path)
+ if '.hg' in path.lower():
+ lparts = [p.lower() for p in parts]
+ for p in '.hg', '.hg.':
+ if p in lparts[1:]:
+ pos = lparts.index(p)
+ base = os.path.join(*parts[:pos])
+ raise util.Abort(_('path %r is inside nested repo %r')
+ % (path, base))
+
+ parts.pop()
+ prefixes = []
+ while parts:
+ prefix = os.sep.join(parts)
+ if prefix in self.auditeddir:
+ break
+ curpath = os.path.join(self.root, prefix)
+ try:
+ st = os.lstat(curpath)
+ except OSError, err:
+ # EINVAL can be raised as invalid path syntax under win32.
+ # They must be ignored for patterns can be checked too.
+ if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
+ raise
+ else:
+ if stat.S_ISLNK(st.st_mode):
+ raise util.Abort(
+ _('path %r traverses symbolic link %r')
+ % (path, prefix))
+ elif (stat.S_ISDIR(st.st_mode) and
+ os.path.isdir(os.path.join(curpath, '.hg'))):
+ if not self.callback or not self.callback(curpath):
+ raise util.Abort(_('path %r is inside nested repo %r') %
+ (path, prefix))
+ prefixes.append(prefix)
+ parts.pop()
+
+ self.audited.add(path)
+ # only add prefixes to the cache after checking everything: we don't
+ # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
+ self.auditeddir.update(prefixes)
+
+class abstractopener(object):
+ """Abstract base class; cannot be instantiated"""
+
+ def __init__(self, *args, **kwargs):
+ '''Prevent instantiation; don't call this from subclasses.'''
+ raise NotImplementedError('attempted instantiating ' + str(type(self)))
+
+ def read(self, path):
+ fp = self(path, 'rb')
+ try:
+ return fp.read()
+ finally:
+ fp.close()
+
+ def write(self, path, data):
+ fp = self(path, 'wb')
+ try:
+ return fp.write(data)
+ finally:
+ fp.close()
+
+ def append(self, path, data):
+ fp = self(path, 'ab')
+ try:
+ return fp.write(data)
+ finally:
+ fp.close()
+
+class opener(abstractopener):
+ '''Open files relative to a base directory
+
+ This class is used to hide the details of COW semantics and
+ remote file access from higher level code.
+ '''
+ def __init__(self, base, audit=True):
+ self.base = base
+ self._audit = audit
+ if audit:
+ self.auditor = pathauditor(base)
+ else:
+ self.auditor = util.always
+ self.createmode = None
+ self._trustnlink = None
+
+ @util.propertycache
+ def _cansymlink(self):
+ return util.checklink(self.base)
+
+ def _fixfilemode(self, name):
+ if self.createmode is None:
+ return
+ os.chmod(name, self.createmode & 0666)
+
+ def __call__(self, path, mode="r", text=False, atomictemp=False):
+ if self._audit:
+ r = util.checkosfilename(path)
+ if r:
+ raise util.Abort("%s: %r" % (r, path))
+ self.auditor(path)
+ f = os.path.join(self.base, path)
+
+ if not text and "b" not in mode:
+ mode += "b" # for that other OS
+
+ nlink = -1
+ dirname, basename = os.path.split(f)
+ # If basename is empty, then the path is malformed because it points
+ # to a directory. Let the posixfile() call below raise IOError.
+ if basename and mode not in ('r', 'rb'):
+ if atomictemp:
+ if not os.path.isdir(dirname):
+ util.makedirs(dirname, self.createmode)
+ return util.atomictempfile(f, mode, self.createmode)
+ try:
+ if 'w' in mode:
+ util.unlink(f)
+ nlink = 0
+ else:
+ # nlinks() may behave differently for files on Windows
+ # shares if the file is open.
+ fd = util.posixfile(f)
+ nlink = util.nlinks(f)
+ if nlink < 1:
+ nlink = 2 # force mktempcopy (issue1922)
+ fd.close()
+ except (OSError, IOError), e:
+ if e.errno != errno.ENOENT:
+ raise
+ nlink = 0
+ if not os.path.isdir(dirname):
+ util.makedirs(dirname, self.createmode)
+ if nlink > 0:
+ if self._trustnlink is None:
+ self._trustnlink = nlink > 1 or util.checknlink(f)
+ if nlink > 1 or not self._trustnlink:
+ util.rename(util.mktempcopy(f), f)
+ fp = util.posixfile(f, mode)
+ if nlink == 0:
+ self._fixfilemode(f)
+ return fp
+
+ def symlink(self, src, dst):
+ self.auditor(dst)
+ linkname = os.path.join(self.base, dst)
+ try:
+ os.unlink(linkname)
+ except OSError:
+ pass
+
+ dirname = os.path.dirname(linkname)
+ if not os.path.exists(dirname):
+ util.makedirs(dirname, self.createmode)
+
+ if self._cansymlink:
+ try:
+ os.symlink(src, linkname)
+ except OSError, err:
+ raise OSError(err.errno, _('could not symlink to %r: %s') %
+ (src, err.strerror), linkname)
+ else:
+ f = self(dst, "w")
+ f.write(src)
+ f.close()
+ self._fixfilemode(dst)
+
+ def audit(self, path):
+ self.auditor(path)
+
+class filteropener(abstractopener):
+ '''Wrapper opener for filtering filenames with a function.'''
+
+ def __init__(self, opener, filter):
+ self._filter = filter
+ self._orig = opener
+
+ def __call__(self, path, *args, **kwargs):
+ return self._orig(self._filter(path), *args, **kwargs)
+
+def canonpath(root, cwd, myname, auditor=None):
+ '''return the canonical path of myname, given cwd and root'''
+ if util.endswithsep(root):
+ rootsep = root
+ else:
+ rootsep = root + os.sep
+ name = myname
+ if not os.path.isabs(name):
+ name = os.path.join(root, cwd, name)
+ name = os.path.normpath(name)
+ if auditor is None:
+ auditor = pathauditor(root)
+ if name != rootsep and name.startswith(rootsep):
+ name = name[len(rootsep):]
+ auditor(name)
+ return util.pconvert(name)
+ elif name == root:
+ return ''
+ else:
+ # Determine whether `name' is in the hierarchy at or beneath `root',
+ # by iterating name=dirname(name) until that causes no change (can't
+ # check name == '/', because that doesn't work on windows). For each
+ # `name', compare dev/inode numbers. If they match, the list `rel'
+ # holds the reversed list of components making up the relative file
+ # name we want.
+ root_st = os.stat(root)
+ rel = []
+ while True:
+ try:
+ name_st = os.stat(name)
+ except OSError:
+ break
+ if util.samestat(name_st, root_st):
+ if not rel:
+ # name was actually the same as root (maybe a symlink)
+ return ''
+ rel.reverse()
+ name = os.path.join(*rel)
+ auditor(name)
+ return util.pconvert(name)
+ dirname, basename = os.path.split(name)
+ rel.append(basename)
+ if dirname == name:
+ break
+ name = dirname
+
+ raise util.Abort('%s not under root' % myname)
+
+def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
+ '''yield every hg repository under path, recursively.'''
+ def errhandler(err):
+ if err.filename == path:
+ raise err
+ samestat = getattr(os.path, 'samestat', None)
+ if followsym and samestat is not None:
+ def adddir(dirlst, dirname):
+ match = False
+ dirstat = os.stat(dirname)
+ for lstdirstat in dirlst:
+ if samestat(dirstat, lstdirstat):
+ match = True
+ break
+ if not match:
+ dirlst.append(dirstat)
+ return not match
+ else:
+ followsym = False
+
+ if (seen_dirs is None) and followsym:
+ seen_dirs = []
+ adddir(seen_dirs, path)
+ for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
+ dirs.sort()
+ if '.hg' in dirs:
+ yield root # found a repository
+ qroot = os.path.join(root, '.hg', 'patches')
+ if os.path.isdir(os.path.join(qroot, '.hg')):
+ yield qroot # we have a patch queue repo here
+ if recurse:
+ # avoid recursing inside the .hg directory
+ dirs.remove('.hg')
+ else:
+ dirs[:] = [] # don't descend further
+ elif followsym:
+ newdirs = []
+ for d in dirs:
+ fname = os.path.join(root, d)
+ if adddir(seen_dirs, fname):
+ if os.path.islink(fname):
+ for hgname in walkrepos(fname, True, seen_dirs):
+ yield hgname
+ else:
+ newdirs.append(d)
+ dirs[:] = newdirs
+
+def osrcpath():
+ '''return default os-specific hgrc search path'''
+ path = systemrcpath()
+ path.extend(userrcpath())
+ path = [os.path.normpath(f) for f in path]
+ return path
+
+_rcpath = None
+
+def rcpath():
+ '''return hgrc search path. if env var HGRCPATH is set, use it.
+ for each item in path, if directory, use files ending in .rc,
+ else use item.
+ make HGRCPATH empty to only look in .hg/hgrc of current repo.
+ if no HGRCPATH, use default os-specific path.'''
+ global _rcpath
+ if _rcpath is None:
+ if 'HGRCPATH' in os.environ:
+ _rcpath = []
+ for p in os.environ['HGRCPATH'].split(os.pathsep):
+ if not p:
+ continue
+ p = util.expandpath(p)
+ if os.path.isdir(p):
+ for f, kind in osutil.listdir(p):
+ if f.endswith('.rc'):
+ _rcpath.append(os.path.join(p, f))
+ else:
+ _rcpath.append(p)
+ else:
+ _rcpath = osrcpath()
+ return _rcpath
+
+if os.name != 'nt':
+
+ def rcfiles(path):
+ rcs = [os.path.join(path, 'hgrc')]
+ rcdir = os.path.join(path, 'hgrc.d')
+ try:
+ rcs.extend([os.path.join(rcdir, f)
+ for f, kind in osutil.listdir(rcdir)
+ if f.endswith(".rc")])
+ except OSError:
+ pass
+ return rcs
+
+ def systemrcpath():
+ path = []
+ # old mod_python does not set sys.argv
+ if len(getattr(sys, 'argv', [])) > 0:
+ p = os.path.dirname(os.path.dirname(sys.argv[0]))
+ path.extend(rcfiles(os.path.join(p, 'etc/mercurial')))
+ path.extend(rcfiles('/etc/mercurial'))
+ return path
+
+ def userrcpath():
+ return [os.path.expanduser('~/.hgrc')]
+
+else:
+
+ _HKEY_LOCAL_MACHINE = 0x80000002L
+
+ def systemrcpath():
+ '''return default os-specific hgrc search path'''
+ rcpath = []
+ filename = util.executablepath()
+ # Use mercurial.ini found in directory with hg.exe
+ progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
+ if os.path.isfile(progrc):
+ rcpath.append(progrc)
+ return rcpath
+ # Use hgrc.d found in directory with hg.exe
+ progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
+ if os.path.isdir(progrcd):
+ for f, kind in osutil.listdir(progrcd):
+ if f.endswith('.rc'):
+ rcpath.append(os.path.join(progrcd, f))
+ return rcpath
+ # else look for a system rcpath in the registry
+ value = util.lookupreg('SOFTWARE\\Mercurial', None,
+ _HKEY_LOCAL_MACHINE)
+ if not isinstance(value, str) or not value:
+ return rcpath
+ value = value.replace('/', os.sep)
+ for p in value.split(os.pathsep):
+ if p.lower().endswith('mercurial.ini'):
+ rcpath.append(p)
+ elif os.path.isdir(p):
+ for f, kind in osutil.listdir(p):
+ if f.endswith('.rc'):
+ rcpath.append(os.path.join(p, f))
+ return rcpath
+
+ def userrcpath():
+ '''return os-specific hgrc search path to the user dir'''
+ home = os.path.expanduser('~')
+ path = [os.path.join(home, 'mercurial.ini'),
+ os.path.join(home, '.hgrc')]
+ userprofile = os.environ.get('USERPROFILE')
+ if userprofile:
+ path.append(os.path.join(userprofile, 'mercurial.ini'))
+ path.append(os.path.join(userprofile, '.hgrc'))
+ return path
+
+def revsingle(repo, revspec, default='.'):
+ if not revspec:
+ return repo[default]
+
+ l = revrange(repo, [revspec])
+ if len(l) < 1:
+ raise util.Abort(_('empty revision set'))
+ return repo[l[-1]]
+
+def revpair(repo, revs):
+ if not revs:
+ return repo.dirstate.p1(), None
+
+ l = revrange(repo, revs)
+
+ if len(l) == 0:
+ return repo.dirstate.p1(), None
+
+ if len(l) == 1:
+ return repo.lookup(l[0]), None
+
+ return repo.lookup(l[0]), repo.lookup(l[-1])
+
+_revrangesep = ':'
+
+def revrange(repo, revs):
+ """Yield revision as strings from a list of revision specifications."""
+
+ def revfix(repo, val, defval):
+ if not val and val != 0 and defval is not None:
+ return defval
+ return repo.changelog.rev(repo.lookup(val))
+
+ seen, l = set(), []
+ for spec in revs:
+ # attempt to parse old-style ranges first to deal with
+ # things like old-tag which contain query metacharacters
+ try:
+ if isinstance(spec, int):
+ seen.add(spec)
+ l.append(spec)
+ continue
+
+ if _revrangesep in spec:
+ start, end = spec.split(_revrangesep, 1)
+ start = revfix(repo, start, 0)
+ end = revfix(repo, end, len(repo) - 1)
+ step = start > end and -1 or 1
+ for rev in xrange(start, end + step, step):
+ if rev in seen:
+ continue
+ seen.add(rev)
+ l.append(rev)
+ continue
+ elif spec and spec in repo: # single unquoted rev
+ rev = revfix(repo, spec, None)
+ if rev in seen:
+ continue
+ seen.add(rev)
+ l.append(rev)
+ continue
+ except error.RepoLookupError:
+ pass
+
+ # fall through to new-style queries if old-style fails
+ m = revset.match(repo.ui, spec)
+ for r in m(repo, range(len(repo))):
+ if r not in seen:
+ l.append(r)
+ seen.update(l)
+
+ return l
+
+def expandpats(pats):
+ if not util.expandglobs:
+ return list(pats)
+ ret = []
+ for p in pats:
+ kind, name = matchmod._patsplit(p, None)
+ if kind is None:
+ try:
+ globbed = glob.glob(name)
+ except re.error:
+ globbed = [name]
+ if globbed:
+ ret.extend(globbed)
+ continue
+ ret.append(p)
+ return ret
+
+def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
+ if pats == ("",):
+ pats = []
+ if not globbed and default == 'relpath':
+ pats = expandpats(pats or [])
+
+ m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
+ default)
+ def badfn(f, msg):
+ ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
+ m.bad = badfn
+ return m
+
+def matchall(repo):
+ return matchmod.always(repo.root, repo.getcwd())
+
+def matchfiles(repo, files):
+ return matchmod.exact(repo.root, repo.getcwd(), files)
+
+def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
+ if dry_run is None:
+ dry_run = opts.get('dry_run')
+ if similarity is None:
+ similarity = float(opts.get('similarity') or 0)
+ # we'd use status here, except handling of symlinks and ignore is tricky
+ added, unknown, deleted, removed = [], [], [], []
+ audit_path = pathauditor(repo.root)
+ m = match(repo[None], pats, opts)
+ for abs in repo.walk(m):
+ target = repo.wjoin(abs)
+ good = True
+ try:
+ audit_path(abs)
+ except (OSError, util.Abort):
+ good = False
+ rel = m.rel(abs)
+ exact = m.exact(abs)
+ if good and abs not in repo.dirstate:
+ unknown.append(abs)
+ if repo.ui.verbose or not exact:
+ repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
+ elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target)
+ or (os.path.isdir(target) and not os.path.islink(target))):
+ deleted.append(abs)
+ if repo.ui.verbose or not exact:
+ repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
+ # for finding renames
+ elif repo.dirstate[abs] == 'r':
+ removed.append(abs)
+ elif repo.dirstate[abs] == 'a':
+ added.append(abs)
+ copies = {}
+ if similarity > 0:
+ for old, new, score in similar.findrenames(repo,
+ added + unknown, removed + deleted, similarity):
+ if repo.ui.verbose or not m.exact(old) or not m.exact(new):
+ repo.ui.status(_('recording removal of %s as rename to %s '
+ '(%d%% similar)\n') %
+ (m.rel(old), m.rel(new), score * 100))
+ copies[new] = old
+
+ if not dry_run:
+ wctx = repo[None]
+ wlock = repo.wlock()
+ try:
+ wctx.forget(deleted)
+ wctx.add(unknown)
+ for new, old in copies.iteritems():
+ wctx.copy(old, new)
+ finally:
+ wlock.release()
+
+def updatedir(ui, repo, patches, similarity=0):
+ '''Update dirstate after patch application according to metadata'''
+ if not patches:
+ return []
+ copies = []
+ removes = set()
+ cfiles = patches.keys()
+ cwd = repo.getcwd()
+ if cwd:
+ cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
+ for f in patches:
+ gp = patches[f]
+ if not gp:
+ continue
+ if gp.op == 'RENAME':
+ copies.append((gp.oldpath, gp.path))
+ removes.add(gp.oldpath)
+ elif gp.op == 'COPY':
+ copies.append((gp.oldpath, gp.path))
+ elif gp.op == 'DELETE':
+ removes.add(gp.path)
+
+ wctx = repo[None]
+ for src, dst in copies:
+ dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
+ if (not similarity) and removes:
+ wctx.remove(sorted(removes), True)
+
+ for f in patches:
+ gp = patches[f]
+ if gp and gp.mode:
+ islink, isexec = gp.mode
+ dst = repo.wjoin(gp.path)
+ # patch won't create empty files
+ if gp.op == 'ADD' and not os.path.lexists(dst):
+ flags = (isexec and 'x' or '') + (islink and 'l' or '')
+ repo.wwrite(gp.path, '', flags)
+ util.setflags(dst, islink, isexec)
+ addremove(repo, cfiles, similarity=similarity)
+ files = patches.keys()
+ files.extend([r for r in removes if r not in files])
+ return sorted(files)
+
+def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
+ """Update the dirstate to reflect the intent of copying src to dst. For
+ different reasons it might not end with dst being marked as copied from src.
+ """
+ origsrc = repo.dirstate.copied(src) or src
+ if dst == origsrc: # copying back a copy?
+ if repo.dirstate[dst] not in 'mn' and not dryrun:
+ repo.dirstate.normallookup(dst)
+ else:
+ if repo.dirstate[origsrc] == 'a' and origsrc == src:
+ if not ui.quiet:
+ ui.warn(_("%s has not been committed yet, so no copy "
+ "data will be stored for %s.\n")
+ % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
+ if repo.dirstate[dst] in '?r' and not dryrun:
+ wctx.add([dst])
+ elif not dryrun:
+ wctx.copy(origsrc, dst)
+
+def readrequires(opener, supported):
+ '''Reads and parses .hg/requires and checks if all entries found
+ are in the list of supported features.'''
+ requirements = set(opener.read("requires").splitlines())
+ missings = []
+ for r in requirements:
+ if r not in supported:
+ if not r or not r[0].isalnum():
+ raise error.RequirementError(_(".hg/requires file is corrupt"))
+ missings.append(r)
+ missings.sort()
+ if missings:
+ raise error.RequirementError(_("unknown repository format: "
+ "requires features '%s' (upgrade Mercurial)") % "', '".join(missings))
+ return requirements
+
+class filecacheentry(object):
+ def __init__(self, path):
+ self.path = path
+ self.cachestat = filecacheentry.stat(self.path)
+
+ if self.cachestat:
+ self._cacheable = self.cachestat.cacheable()
+ else:
+ # None means we don't know yet
+ self._cacheable = None
+
+ def refresh(self):
+ if self.cacheable():
+ self.cachestat = filecacheentry.stat(self.path)
+
+ def cacheable(self):
+ if self._cacheable is not None:
+ return self._cacheable
+
+ # we don't know yet, assume it is for now
+ return True
+
+ def changed(self):
+ # no point in going further if we can't cache it
+ if not self.cacheable():
+ return True
+
+ newstat = filecacheentry.stat(self.path)
+
+ # we may not know if it's cacheable yet, check again now
+ if newstat and self._cacheable is None:
+ self._cacheable = newstat.cacheable()
+
+ # check again
+ if not self._cacheable:
+ return True
+
+ if self.cachestat != newstat:
+ self.cachestat = newstat
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def stat(path):
+ try:
+ return util.cachestat(path)
+ except OSError, e:
+ if e.errno != errno.ENOENT:
+ raise
+
+class filecache(object):
+ '''A property like decorator that tracks a file under .hg/ for updates.
+
+ Records stat info when called in _filecache.
+
+ On subsequent calls, compares old stat info with new info, and recreates
+ the object when needed, updating the new stat info in _filecache.
+
+ Mercurial either atomic renames or appends for files under .hg,
+ so to ensure the cache is reliable we need the filesystem to be able
+ to tell us if a file has been replaced. If it can't, we fallback to
+ recreating the object on every call (essentially the same behaviour as
+ propertycache).'''
+ def __init__(self, path, instore=False):
+ self.path = path
+ self.instore = instore
+
+ def __call__(self, func):
+ self.func = func
+ self.name = func.__name__
+ return self
+
+ def __get__(self, obj, type=None):
+ entry = obj._filecache.get(self.name)
+
+ if entry:
+ if entry.changed():
+ entry.obj = self.func(obj)
+ else:
+ path = self.instore and obj.sjoin(self.path) or obj.join(self.path)
+
+ # We stat -before- creating the object so our cache doesn't lie if
+ # a writer modified between the time we read and stat
+ entry = filecacheentry(path)
+ entry.obj = self.func(obj)
+
+ obj._filecache[self.name] = entry
+
+ setattr(obj, self.name, entry.obj)
+ return entry.obj
diff --git a/websdk/mercurial/setdiscovery.py b/websdk/mercurial/setdiscovery.py
new file mode 100644
index 0000000..bbb1b18
--- /dev/null
+++ b/websdk/mercurial/setdiscovery.py
@@ -0,0 +1,194 @@
+# setdiscovery.py - improved discovery of common nodeset for mercurial
+#
+# Copyright 2010 Benoit Boissinot <bboissin@gmail.com>
+# and Peter Arrenbrecht <peter@arrenbrecht.ch>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid
+from i18n import _
+import random, collections, util, dagutil
+
+def _updatesample(dag, nodes, sample, always, quicksamplesize=0):
+ # if nodes is empty we scan the entire graph
+ if nodes:
+ heads = dag.headsetofconnecteds(nodes)
+ else:
+ heads = dag.heads()
+ dist = {}
+ visit = collections.deque(heads)
+ seen = set()
+ factor = 1
+ while visit:
+ curr = visit.popleft()
+ if curr in seen:
+ continue
+ d = dist.setdefault(curr, 1)
+ if d > factor:
+ factor *= 2
+ if d == factor:
+ if curr not in always: # need this check for the early exit below
+ sample.add(curr)
+ if quicksamplesize and (len(sample) >= quicksamplesize):
+ return
+ seen.add(curr)
+ for p in dag.parents(curr):
+ if not nodes or p in nodes:
+ dist.setdefault(p, d + 1)
+ visit.append(p)
+
+def _setupsample(dag, nodes, size):
+ if len(nodes) <= size:
+ return set(nodes), None, 0
+ always = dag.headsetofconnecteds(nodes)
+ desiredlen = size - len(always)
+ if desiredlen <= 0:
+ # This could be bad if there are very many heads, all unknown to the
+ # server. We're counting on long request support here.
+ return always, None, desiredlen
+ return always, set(), desiredlen
+
+def _takequicksample(dag, nodes, size, initial):
+ always, sample, desiredlen = _setupsample(dag, nodes, size)
+ if sample is None:
+ return always
+ if initial:
+ fromset = None
+ else:
+ fromset = nodes
+ _updatesample(dag, fromset, sample, always, quicksamplesize=desiredlen)
+ sample.update(always)
+ return sample
+
+def _takefullsample(dag, nodes, size):
+ always, sample, desiredlen = _setupsample(dag, nodes, size)
+ if sample is None:
+ return always
+ # update from heads
+ _updatesample(dag, nodes, sample, always)
+ # update from roots
+ _updatesample(dag.inverse(), nodes, sample, always)
+ assert sample
+ if len(sample) > desiredlen:
+ sample = set(random.sample(sample, desiredlen))
+ elif len(sample) < desiredlen:
+ more = desiredlen - len(sample)
+ sample.update(random.sample(list(nodes - sample - always), more))
+ sample.update(always)
+ return sample
+
+def findcommonheads(ui, local, remote,
+ initialsamplesize=100,
+ fullsamplesize=200,
+ abortwhenunrelated=True):
+ '''Return a tuple (common, anyincoming, remoteheads) used to identify
+ missing nodes from or in remote.
+
+ shortcutlocal determines whether we try use direct access to localrepo if
+ remote is actually local.
+ '''
+ roundtrips = 0
+ cl = local.changelog
+ dag = dagutil.revlogdag(cl)
+
+ # early exit if we know all the specified remote heads already
+ ui.debug("query 1; heads\n")
+ roundtrips += 1
+ ownheads = dag.heads()
+ sample = ownheads
+ if remote.local():
+ # stopgap until we have a proper localpeer that supports batch()
+ srvheadhashes = remote.heads()
+ yesno = remote.known(dag.externalizeall(sample))
+ elif remote.capable('batch'):
+ batch = remote.batch()
+ srvheadhashesref = batch.heads()
+ yesnoref = batch.known(dag.externalizeall(sample))
+ batch.submit()
+ srvheadhashes = srvheadhashesref.value
+ yesno = yesnoref.value
+ else:
+ # compatibitity with pre-batch, but post-known remotes during 1.9 devel
+ srvheadhashes = remote.heads()
+ sample = []
+
+ if cl.tip() == nullid:
+ if srvheadhashes != [nullid]:
+ return [nullid], True, srvheadhashes
+ return [nullid], False, []
+
+ # start actual discovery (we note this before the next "if" for
+ # compatibility reasons)
+ ui.status(_("searching for changes\n"))
+
+ srvheads = dag.internalizeall(srvheadhashes, filterunknown=True)
+ if len(srvheads) == len(srvheadhashes):
+ ui.debug("all remote heads known locally\n")
+ return (srvheadhashes, False, srvheadhashes,)
+
+ if sample and util.all(yesno):
+ ui.note(_("all local heads known remotely\n"))
+ ownheadhashes = dag.externalizeall(ownheads)
+ return (ownheadhashes, True, srvheadhashes,)
+
+ # full blown discovery
+ undecided = dag.nodeset() # own nodes where I don't know if remote knows them
+ common = set() # own nodes I know we both know
+ missing = set() # own nodes I know remote lacks
+
+ # treat remote heads (and maybe own heads) as a first implicit sample response
+ common.update(dag.ancestorset(srvheads))
+ undecided.difference_update(common)
+
+ full = False
+ while undecided:
+
+ if sample:
+ commoninsample = set(n for i, n in enumerate(sample) if yesno[i])
+ common.update(dag.ancestorset(commoninsample, common))
+
+ missinginsample = [n for i, n in enumerate(sample) if not yesno[i]]
+ missing.update(dag.descendantset(missinginsample, missing))
+
+ undecided.difference_update(missing)
+ undecided.difference_update(common)
+
+ if not undecided:
+ break
+
+ if full:
+ ui.note(_("sampling from both directions\n"))
+ sample = _takefullsample(dag, undecided, size=fullsamplesize)
+ elif common:
+ # use cheapish initial sample
+ ui.debug("taking initial sample\n")
+ sample = _takefullsample(dag, undecided, size=fullsamplesize)
+ else:
+ # use even cheaper initial sample
+ ui.debug("taking quick initial sample\n")
+ sample = _takequicksample(dag, undecided, size=initialsamplesize,
+ initial=True)
+
+ roundtrips += 1
+ ui.progress(_('searching'), roundtrips, unit=_('queries'))
+ ui.debug("query %i; still undecided: %i, sample size is: %i\n"
+ % (roundtrips, len(undecided), len(sample)))
+ # indices between sample and externalized version must match
+ sample = list(sample)
+ yesno = remote.known(dag.externalizeall(sample))
+ full = True
+
+ result = dag.headsetofconnecteds(common)
+ ui.progress(_('searching'), None)
+ ui.debug("%d total queries\n" % roundtrips)
+
+ if not result and srvheadhashes != [nullid]:
+ if abortwhenunrelated:
+ raise util.Abort(_("repository is unrelated"))
+ else:
+ ui.warn(_("warning: repository is unrelated\n"))
+ return (set([nullid]), True, srvheadhashes,)
+
+ anyincoming = (srvheadhashes != [nullid])
+ return dag.externalizeall(result), anyincoming, srvheadhashes
diff --git a/websdk/mercurial/similar.py b/websdk/mercurial/similar.py
index bc480f5..b18795b 120000..100644
--- a/websdk/mercurial/similar.py
+++ b/websdk/mercurial/similar.py
@@ -1 +1,103 @@
-/usr/share/pyshared/mercurial/similar.py \ No newline at end of file
+# similar.py - mechanisms for finding similar files
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import util
+import mdiff
+import bdiff
+
+def _findexactmatches(repo, added, removed):
+ '''find renamed files that have no changes
+
+ Takes a list of new filectxs and a list of removed filectxs, and yields
+ (before, after) tuples of exact matches.
+ '''
+ numfiles = len(added) + len(removed)
+
+ # Get hashes of removed files.
+ hashes = {}
+ for i, fctx in enumerate(removed):
+ repo.ui.progress(_('searching for exact renames'), i, total=numfiles)
+ h = util.sha1(fctx.data()).digest()
+ hashes[h] = fctx
+
+ # For each added file, see if it corresponds to a removed file.
+ for i, fctx in enumerate(added):
+ repo.ui.progress(_('searching for exact renames'), i + len(removed),
+ total=numfiles)
+ h = util.sha1(fctx.data()).digest()
+ if h in hashes:
+ yield (hashes[h], fctx)
+
+ # Done
+ repo.ui.progress(_('searching for exact renames'), None)
+
+def _findsimilarmatches(repo, added, removed, threshold):
+ '''find potentially renamed files based on similar file content
+
+ Takes a list of new filectxs and a list of removed filectxs, and yields
+ (before, after, score) tuples of partial matches.
+ '''
+ copies = {}
+ for i, r in enumerate(removed):
+ repo.ui.progress(_('searching for similar files'), i, total=len(removed))
+
+ # lazily load text
+ @util.cachefunc
+ def data():
+ orig = r.data()
+ return orig, mdiff.splitnewlines(orig)
+
+ def score(text):
+ orig, lines = data()
+ # bdiff.blocks() returns blocks of matching lines
+ # count the number of bytes in each
+ equal = 0
+ matches = bdiff.blocks(text, orig)
+ for x1, x2, y1, y2 in matches:
+ for line in lines[y1:y2]:
+ equal += len(line)
+
+ lengths = len(text) + len(orig)
+ return equal * 2.0 / lengths
+
+ for a in added:
+ bestscore = copies.get(a, (None, threshold))[1]
+ myscore = score(a.data())
+ if myscore >= bestscore:
+ copies[a] = (r, myscore)
+ repo.ui.progress(_('searching'), None)
+
+ for dest, v in copies.iteritems():
+ source, score = v
+ yield source, dest, score
+
+def findrenames(repo, added, removed, threshold):
+ '''find renamed files -- yields (before, after, score) tuples'''
+ parentctx = repo['.']
+ workingctx = repo[None]
+
+ # Zero length files will be frequently unrelated to each other, and
+ # tracking the deletion/addition of such a file will probably cause more
+ # harm than good. We strip them out here to avoid matching them later on.
+ addedfiles = set([workingctx[fp] for fp in added
+ if workingctx[fp].size() > 0])
+ removedfiles = set([parentctx[fp] for fp in removed
+ if fp in parentctx and parentctx[fp].size() > 0])
+
+ # Find exact matches.
+ for (a, b) in _findexactmatches(repo,
+ sorted(addedfiles), sorted(removedfiles)):
+ addedfiles.remove(b)
+ yield (a.path(), b.path(), 1.0)
+
+ # If the user requested similar files to be matched, search for them also.
+ if threshold < 1.0:
+ for (a, b, score) in _findsimilarmatches(repo,
+ sorted(addedfiles), sorted(removedfiles), threshold):
+ yield (a.path(), b.path(), score)
+
diff --git a/websdk/mercurial/simplemerge.py b/websdk/mercurial/simplemerge.py
index 9c7e7a1..3d63d85 120000..100644
--- a/websdk/mercurial/simplemerge.py
+++ b/websdk/mercurial/simplemerge.py
@@ -1 +1,453 @@
-/usr/share/pyshared/mercurial/simplemerge.py \ No newline at end of file
+# Copyright (C) 2004, 2005 Canonical Ltd
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+# mbp: "you know that thing where cvs gives you conflict markers?"
+# s: "i hate that."
+
+from i18n import _
+import scmutil, util, mdiff
+import sys, os
+
+class CantReprocessAndShowBase(Exception):
+ pass
+
+def intersect(ra, rb):
+ """Given two ranges return the range where they intersect or None.
+
+ >>> intersect((0, 10), (0, 6))
+ (0, 6)
+ >>> intersect((0, 10), (5, 15))
+ (5, 10)
+ >>> intersect((0, 10), (10, 15))
+ >>> intersect((0, 9), (10, 15))
+ >>> intersect((0, 9), (7, 15))
+ (7, 9)
+ """
+ assert ra[0] <= ra[1]
+ assert rb[0] <= rb[1]
+
+ sa = max(ra[0], rb[0])
+ sb = min(ra[1], rb[1])
+ if sa < sb:
+ return sa, sb
+ else:
+ return None
+
+def compare_range(a, astart, aend, b, bstart, bend):
+ """Compare a[astart:aend] == b[bstart:bend], without slicing.
+ """
+ if (aend - astart) != (bend - bstart):
+ return False
+ for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
+ if a[ia] != b[ib]:
+ return False
+ else:
+ return True
+
+class Merge3Text(object):
+ """3-way merge of texts.
+
+ Given strings BASE, OTHER, THIS, tries to produce a combined text
+ incorporating the changes from both BASE->OTHER and BASE->THIS."""
+ def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
+ self.basetext = basetext
+ self.atext = atext
+ self.btext = btext
+ if base is None:
+ base = mdiff.splitnewlines(basetext)
+ if a is None:
+ a = mdiff.splitnewlines(atext)
+ if b is None:
+ b = mdiff.splitnewlines(btext)
+ self.base = base
+ self.a = a
+ self.b = b
+
+ def merge_lines(self,
+ name_a=None,
+ name_b=None,
+ name_base=None,
+ start_marker='<<<<<<<',
+ mid_marker='=======',
+ end_marker='>>>>>>>',
+ base_marker=None,
+ reprocess=False):
+ """Return merge in cvs-like form.
+ """
+ self.conflicts = False
+ newline = '\n'
+ if len(self.a) > 0:
+ if self.a[0].endswith('\r\n'):
+ newline = '\r\n'
+ elif self.a[0].endswith('\r'):
+ newline = '\r'
+ if base_marker and reprocess:
+ raise CantReprocessAndShowBase()
+ if name_a:
+ start_marker = start_marker + ' ' + name_a
+ if name_b:
+ end_marker = end_marker + ' ' + name_b
+ if name_base and base_marker:
+ base_marker = base_marker + ' ' + name_base
+ merge_regions = self.merge_regions()
+ if reprocess is True:
+ merge_regions = self.reprocess_merge_regions(merge_regions)
+ for t in merge_regions:
+ what = t[0]
+ if what == 'unchanged':
+ for i in range(t[1], t[2]):
+ yield self.base[i]
+ elif what == 'a' or what == 'same':
+ for i in range(t[1], t[2]):
+ yield self.a[i]
+ elif what == 'b':
+ for i in range(t[1], t[2]):
+ yield self.b[i]
+ elif what == 'conflict':
+ self.conflicts = True
+ yield start_marker + newline
+ for i in range(t[3], t[4]):
+ yield self.a[i]
+ if base_marker is not None:
+ yield base_marker + newline
+ for i in range(t[1], t[2]):
+ yield self.base[i]
+ yield mid_marker + newline
+ for i in range(t[5], t[6]):
+ yield self.b[i]
+ yield end_marker + newline
+ else:
+ raise ValueError(what)
+
+ def merge_annotated(self):
+ """Return merge with conflicts, showing origin of lines.
+
+ Most useful for debugging merge.
+ """
+ for t in self.merge_regions():
+ what = t[0]
+ if what == 'unchanged':
+ for i in range(t[1], t[2]):
+ yield 'u | ' + self.base[i]
+ elif what == 'a' or what == 'same':
+ for i in range(t[1], t[2]):
+ yield what[0] + ' | ' + self.a[i]
+ elif what == 'b':
+ for i in range(t[1], t[2]):
+ yield 'b | ' + self.b[i]
+ elif what == 'conflict':
+ yield '<<<<\n'
+ for i in range(t[3], t[4]):
+ yield 'A | ' + self.a[i]
+ yield '----\n'
+ for i in range(t[5], t[6]):
+ yield 'B | ' + self.b[i]
+ yield '>>>>\n'
+ else:
+ raise ValueError(what)
+
+ def merge_groups(self):
+ """Yield sequence of line groups. Each one is a tuple:
+
+ 'unchanged', lines
+ Lines unchanged from base
+
+ 'a', lines
+ Lines taken from a
+
+ 'same', lines
+ Lines taken from a (and equal to b)
+
+ 'b', lines
+ Lines taken from b
+
+ 'conflict', base_lines, a_lines, b_lines
+ Lines from base were changed to either a or b and conflict.
+ """
+ for t in self.merge_regions():
+ what = t[0]
+ if what == 'unchanged':
+ yield what, self.base[t[1]:t[2]]
+ elif what == 'a' or what == 'same':
+ yield what, self.a[t[1]:t[2]]
+ elif what == 'b':
+ yield what, self.b[t[1]:t[2]]
+ elif what == 'conflict':
+ yield (what,
+ self.base[t[1]:t[2]],
+ self.a[t[3]:t[4]],
+ self.b[t[5]:t[6]])
+ else:
+ raise ValueError(what)
+
+ def merge_regions(self):
+ """Return sequences of matching and conflicting regions.
+
+ This returns tuples, where the first value says what kind we
+ have:
+
+ 'unchanged', start, end
+ Take a region of base[start:end]
+
+ 'same', astart, aend
+ b and a are different from base but give the same result
+
+ 'a', start, end
+ Non-clashing insertion from a[start:end]
+
+ Method is as follows:
+
+ The two sequences align only on regions which match the base
+ and both descendants. These are found by doing a two-way diff
+ of each one against the base, and then finding the
+ intersections between those regions. These "sync regions"
+ are by definition unchanged in both and easily dealt with.
+
+ The regions in between can be in any of three cases:
+ conflicted, or changed on only one side.
+ """
+
+ # section a[0:ia] has been disposed of, etc
+ iz = ia = ib = 0
+
+ for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
+ #print 'match base [%d:%d]' % (zmatch, zend)
+
+ matchlen = zend - zmatch
+ assert matchlen >= 0
+ assert matchlen == (aend - amatch)
+ assert matchlen == (bend - bmatch)
+
+ len_a = amatch - ia
+ len_b = bmatch - ib
+ len_base = zmatch - iz
+ assert len_a >= 0
+ assert len_b >= 0
+ assert len_base >= 0
+
+ #print 'unmatched a=%d, b=%d' % (len_a, len_b)
+
+ if len_a or len_b:
+ # try to avoid actually slicing the lists
+ equal_a = compare_range(self.a, ia, amatch,
+ self.base, iz, zmatch)
+ equal_b = compare_range(self.b, ib, bmatch,
+ self.base, iz, zmatch)
+ same = compare_range(self.a, ia, amatch,
+ self.b, ib, bmatch)
+
+ if same:
+ yield 'same', ia, amatch
+ elif equal_a and not equal_b:
+ yield 'b', ib, bmatch
+ elif equal_b and not equal_a:
+ yield 'a', ia, amatch
+ elif not equal_a and not equal_b:
+ yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
+ else:
+ raise AssertionError("can't handle a=b=base but unmatched")
+
+ ia = amatch
+ ib = bmatch
+ iz = zmatch
+
+ # if the same part of the base was deleted on both sides
+ # that's OK, we can just skip it.
+
+
+ if matchlen > 0:
+ assert ia == amatch
+ assert ib == bmatch
+ assert iz == zmatch
+
+ yield 'unchanged', zmatch, zend
+ iz = zend
+ ia = aend
+ ib = bend
+
+ def reprocess_merge_regions(self, merge_regions):
+ """Where there are conflict regions, remove the agreed lines.
+
+ Lines where both A and B have made the same changes are
+ eliminated.
+ """
+ for region in merge_regions:
+ if region[0] != "conflict":
+ yield region
+ continue
+ type, iz, zmatch, ia, amatch, ib, bmatch = region
+ a_region = self.a[ia:amatch]
+ b_region = self.b[ib:bmatch]
+ matches = mdiff.get_matching_blocks(''.join(a_region),
+ ''.join(b_region))
+ next_a = ia
+ next_b = ib
+ for region_ia, region_ib, region_len in matches[:-1]:
+ region_ia += ia
+ region_ib += ib
+ reg = self.mismatch_region(next_a, region_ia, next_b,
+ region_ib)
+ if reg is not None:
+ yield reg
+ yield 'same', region_ia, region_len + region_ia
+ next_a = region_ia + region_len
+ next_b = region_ib + region_len
+ reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
+ if reg is not None:
+ yield reg
+
+ def mismatch_region(next_a, region_ia, next_b, region_ib):
+ if next_a < region_ia or next_b < region_ib:
+ return 'conflict', None, None, next_a, region_ia, next_b, region_ib
+ mismatch_region = staticmethod(mismatch_region)
+
+ def find_sync_regions(self):
+ """Return a list of sync regions, where both descendants match the base.
+
+ Generates a list of (base1, base2, a1, a2, b1, b2). There is
+ always a zero-length sync region at the end of all the files.
+ """
+
+ ia = ib = 0
+ amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
+ bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
+ len_a = len(amatches)
+ len_b = len(bmatches)
+
+ sl = []
+
+ while ia < len_a and ib < len_b:
+ abase, amatch, alen = amatches[ia]
+ bbase, bmatch, blen = bmatches[ib]
+
+ # there is an unconflicted block at i; how long does it
+ # extend? until whichever one ends earlier.
+ i = intersect((abase, abase + alen), (bbase, bbase + blen))
+ if i:
+ intbase = i[0]
+ intend = i[1]
+ intlen = intend - intbase
+
+ # found a match of base[i[0], i[1]]; this may be less than
+ # the region that matches in either one
+ assert intlen <= alen
+ assert intlen <= blen
+ assert abase <= intbase
+ assert bbase <= intbase
+
+ asub = amatch + (intbase - abase)
+ bsub = bmatch + (intbase - bbase)
+ aend = asub + intlen
+ bend = bsub + intlen
+
+ assert self.base[intbase:intend] == self.a[asub:aend], \
+ (self.base[intbase:intend], self.a[asub:aend])
+
+ assert self.base[intbase:intend] == self.b[bsub:bend]
+
+ sl.append((intbase, intend,
+ asub, aend,
+ bsub, bend))
+
+ # advance whichever one ends first in the base text
+ if (abase + alen) < (bbase + blen):
+ ia += 1
+ else:
+ ib += 1
+
+ intbase = len(self.base)
+ abase = len(self.a)
+ bbase = len(self.b)
+ sl.append((intbase, intbase, abase, abase, bbase, bbase))
+
+ return sl
+
+ def find_unconflicted(self):
+ """Return a list of ranges in base that are not conflicted."""
+ am = mdiff.get_matching_blocks(self.basetext, self.atext)
+ bm = mdiff.get_matching_blocks(self.basetext, self.btext)
+
+ unc = []
+
+ while am and bm:
+ # there is an unconflicted block at i; how long does it
+ # extend? until whichever one ends earlier.
+ a1 = am[0][0]
+ a2 = a1 + am[0][2]
+ b1 = bm[0][0]
+ b2 = b1 + bm[0][2]
+ i = intersect((a1, a2), (b1, b2))
+ if i:
+ unc.append(i)
+
+ if a2 < b2:
+ del am[0]
+ else:
+ del bm[0]
+
+ return unc
+
+def simplemerge(ui, local, base, other, **opts):
+ def readfile(filename):
+ f = open(filename, "rb")
+ text = f.read()
+ f.close()
+ if util.binary(text):
+ msg = _("%s looks like a binary file.") % filename
+ if not opts.get('quiet'):
+ ui.warn(_('warning: %s\n') % msg)
+ if not opts.get('text'):
+ raise util.Abort(msg)
+ return text
+
+ name_a = local
+ name_b = other
+ labels = opts.get('label', [])
+ if labels:
+ name_a = labels.pop(0)
+ if labels:
+ name_b = labels.pop(0)
+ if labels:
+ raise util.Abort(_("can only specify two labels."))
+
+ try:
+ localtext = readfile(local)
+ basetext = readfile(base)
+ othertext = readfile(other)
+ except util.Abort:
+ return 1
+
+ local = os.path.realpath(local)
+ if not opts.get('print'):
+ opener = scmutil.opener(os.path.dirname(local))
+ out = opener(os.path.basename(local), "w", atomictemp=True)
+ else:
+ out = sys.stdout
+
+ reprocess = not opts.get('no_minimal')
+
+ m3 = Merge3Text(basetext, localtext, othertext)
+ for line in m3.merge_lines(name_a=name_a, name_b=name_b,
+ reprocess=reprocess):
+ out.write(line)
+
+ if not opts.get('print'):
+ out.close()
+
+ if m3.conflicts:
+ if not opts.get('quiet'):
+ ui.warn(_("warning: conflicts during merge.\n"))
+ return 1
diff --git a/websdk/mercurial/sshrepo.py b/websdk/mercurial/sshrepo.py
index abd4fbc..16a4992 120000..100644
--- a/websdk/mercurial/sshrepo.py
+++ b/websdk/mercurial/sshrepo.py
@@ -1 +1,225 @@
-/usr/share/pyshared/mercurial/sshrepo.py \ No newline at end of file
+# sshrepo.py - ssh repository proxy class for mercurial
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import util, error, wireproto
+
+class remotelock(object):
+ def __init__(self, repo):
+ self.repo = repo
+ def release(self):
+ self.repo.unlock()
+ self.repo = None
+ def __del__(self):
+ if self.repo:
+ self.release()
+
+class sshrepository(wireproto.wirerepository):
+ def __init__(self, ui, path, create=False):
+ self._url = path
+ self.ui = ui
+
+ u = util.url(path, parsequery=False, parsefragment=False)
+ if u.scheme != 'ssh' or not u.host or u.path is None:
+ self._abort(error.RepoError(_("couldn't parse location %s") % path))
+
+ self.user = u.user
+ if u.passwd is not None:
+ self._abort(error.RepoError(_("password in URL not supported")))
+ self.host = u.host
+ self.port = u.port
+ self.path = u.path or "."
+
+ sshcmd = self.ui.config("ui", "ssh", "ssh")
+ remotecmd = self.ui.config("ui", "remotecmd", "hg")
+
+ args = util.sshargs(sshcmd, self.host, self.user, self.port)
+
+ if create:
+ cmd = '%s %s "%s init %s"'
+ cmd = cmd % (sshcmd, args, remotecmd, self.path)
+
+ ui.note(_('running %s\n') % cmd)
+ res = util.system(cmd)
+ if res != 0:
+ self._abort(error.RepoError(_("could not create remote repo")))
+
+ self.validate_repo(ui, sshcmd, args, remotecmd)
+
+ def url(self):
+ return self._url
+
+ def validate_repo(self, ui, sshcmd, args, remotecmd):
+ # cleanup up previous run
+ self.cleanup()
+
+ cmd = '%s %s "%s -R %s serve --stdio"'
+ cmd = cmd % (sshcmd, args, remotecmd, self.path)
+
+ cmd = util.quotecommand(cmd)
+ ui.note(_('running %s\n') % cmd)
+ self.pipeo, self.pipei, self.pipee = util.popen3(cmd)
+
+ # skip any noise generated by remote shell
+ self._callstream("hello")
+ r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40)))
+ lines = ["", "dummy"]
+ max_noise = 500
+ while lines[-1] and max_noise:
+ l = r.readline()
+ self.readerr()
+ if lines[-1] == "1\n" and l == "\n":
+ break
+ if l:
+ ui.debug("remote: ", l)
+ lines.append(l)
+ max_noise -= 1
+ else:
+ self._abort(error.RepoError(_("no suitable response from remote hg")))
+
+ self.capabilities = set()
+ for l in reversed(lines):
+ if l.startswith("capabilities:"):
+ self.capabilities.update(l[:-1].split(":")[1].split())
+ break
+
+ def readerr(self):
+ while True:
+ size = util.fstat(self.pipee).st_size
+ if size == 0:
+ break
+ s = self.pipee.read(size)
+ if not s:
+ break
+ for l in s.splitlines():
+ self.ui.status(_("remote: "), l, '\n')
+
+ def _abort(self, exception):
+ self.cleanup()
+ raise exception
+
+ def cleanup(self):
+ try:
+ self.pipeo.close()
+ self.pipei.close()
+ # read the error descriptor until EOF
+ for l in self.pipee:
+ self.ui.status(_("remote: "), l)
+ self.pipee.close()
+ except:
+ pass
+
+ __del__ = cleanup
+
+ def _callstream(self, cmd, **args):
+ self.ui.debug("sending %s command\n" % cmd)
+ self.pipeo.write("%s\n" % cmd)
+ _func, names = wireproto.commands[cmd]
+ keys = names.split()
+ wireargs = {}
+ for k in keys:
+ if k == '*':
+ wireargs['*'] = args
+ break
+ else:
+ wireargs[k] = args[k]
+ del args[k]
+ for k, v in sorted(wireargs.iteritems()):
+ self.pipeo.write("%s %d\n" % (k, len(v)))
+ if isinstance(v, dict):
+ for dk, dv in v.iteritems():
+ self.pipeo.write("%s %d\n" % (dk, len(dv)))
+ self.pipeo.write(dv)
+ else:
+ self.pipeo.write(v)
+ self.pipeo.flush()
+
+ return self.pipei
+
+ def _call(self, cmd, **args):
+ self._callstream(cmd, **args)
+ return self._recv()
+
+ def _callpush(self, cmd, fp, **args):
+ r = self._call(cmd, **args)
+ if r:
+ return '', r
+ while True:
+ d = fp.read(4096)
+ if not d:
+ break
+ self._send(d)
+ self._send("", flush=True)
+ r = self._recv()
+ if r:
+ return '', r
+ return self._recv(), ''
+
+ def _decompress(self, stream):
+ return stream
+
+ def _recv(self):
+ l = self.pipei.readline()
+ if l == '\n':
+ err = []
+ while True:
+ line = self.pipee.readline()
+ if line == '-\n':
+ break
+ err.extend([line])
+ if len(err) > 0:
+ # strip the trailing newline added to the last line server-side
+ err[-1] = err[-1][:-1]
+ self._abort(error.OutOfBandError(*err))
+ self.readerr()
+ try:
+ l = int(l)
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), l))
+ return self.pipei.read(l)
+
+ def _send(self, data, flush=False):
+ self.pipeo.write("%d\n" % len(data))
+ if data:
+ self.pipeo.write(data)
+ if flush:
+ self.pipeo.flush()
+ self.readerr()
+
+ def lock(self):
+ self._call("lock")
+ return remotelock(self)
+
+ def unlock(self):
+ self._call("unlock")
+
+ def addchangegroup(self, cg, source, url, lock=None):
+ '''Send a changegroup to the remote server. Return an integer
+ similar to unbundle(). DEPRECATED, since it requires locking the
+ remote.'''
+ d = self._call("addchangegroup")
+ if d:
+ self._abort(error.RepoError(_("push refused: %s") % d))
+ while True:
+ d = cg.read(4096)
+ if not d:
+ break
+ self.pipeo.write(d)
+ self.readerr()
+
+ self.pipeo.flush()
+
+ self.readerr()
+ r = self._recv()
+ if not r:
+ return 1
+ try:
+ return int(r)
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), r))
+
+instance = sshrepository
diff --git a/websdk/mercurial/sshserver.py b/websdk/mercurial/sshserver.py
index 53d3f09..25f42fe 120000..100644
--- a/websdk/mercurial/sshserver.py
+++ b/websdk/mercurial/sshserver.py
@@ -1 +1,151 @@
-/usr/share/pyshared/mercurial/sshserver.py \ No newline at end of file
+# sshserver.py - ssh protocol server support for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import util, hook, wireproto, changegroup
+import os, sys
+
+class sshserver(object):
+ def __init__(self, ui, repo):
+ self.ui = ui
+ self.repo = repo
+ self.lock = None
+ self.fin = ui.fin
+ self.fout = ui.fout
+
+ hook.redirect(True)
+ ui.fout = repo.ui.fout = ui.ferr
+
+ # Prevent insertion/deletion of CRs
+ util.setbinary(self.fin)
+ util.setbinary(self.fout)
+
+ def getargs(self, args):
+ data = {}
+ keys = args.split()
+ for n in xrange(len(keys)):
+ argline = self.fin.readline()[:-1]
+ arg, l = argline.split()
+ if arg not in keys:
+ raise util.Abort("unexpected parameter %r" % arg)
+ if arg == '*':
+ star = {}
+ for k in xrange(int(l)):
+ argline = self.fin.readline()[:-1]
+ arg, l = argline.split()
+ val = self.fin.read(int(l))
+ star[arg] = val
+ data['*'] = star
+ else:
+ val = self.fin.read(int(l))
+ data[arg] = val
+ return [data[k] for k in keys]
+
+ def getarg(self, name):
+ return self.getargs(name)[0]
+
+ def getfile(self, fpout):
+ self.sendresponse('')
+ count = int(self.fin.readline())
+ while count:
+ fpout.write(self.fin.read(count))
+ count = int(self.fin.readline())
+
+ def redirect(self):
+ pass
+
+ def groupchunks(self, changegroup):
+ while True:
+ d = changegroup.read(4096)
+ if not d:
+ break
+ yield d
+
+ def sendresponse(self, v):
+ self.fout.write("%d\n" % len(v))
+ self.fout.write(v)
+ self.fout.flush()
+
+ def sendstream(self, source):
+ for chunk in source.gen:
+ self.fout.write(chunk)
+ self.fout.flush()
+
+ def sendpushresponse(self, rsp):
+ self.sendresponse('')
+ self.sendresponse(str(rsp.res))
+
+ def sendpusherror(self, rsp):
+ self.sendresponse(rsp.res)
+
+ def sendooberror(self, rsp):
+ self.ui.ferr.write('%s\n-\n' % rsp.message)
+ self.ui.ferr.flush()
+ self.fout.write('\n')
+ self.fout.flush()
+
+ def serve_forever(self):
+ try:
+ while self.serve_one():
+ pass
+ finally:
+ if self.lock is not None:
+ self.lock.release()
+ sys.exit(0)
+
+ handlers = {
+ str: sendresponse,
+ wireproto.streamres: sendstream,
+ wireproto.pushres: sendpushresponse,
+ wireproto.pusherr: sendpusherror,
+ wireproto.ooberror: sendooberror,
+ }
+
+ def serve_one(self):
+ cmd = self.fin.readline()[:-1]
+ if cmd and cmd in wireproto.commands:
+ rsp = wireproto.dispatch(self.repo, self, cmd)
+ self.handlers[rsp.__class__](self, rsp)
+ elif cmd:
+ impl = getattr(self, 'do_' + cmd, None)
+ if impl:
+ r = impl()
+ if r is not None:
+ self.sendresponse(r)
+ else: self.sendresponse("")
+ return cmd != ''
+
+ def do_lock(self):
+ '''DEPRECATED - allowing remote client to lock repo is not safe'''
+
+ self.lock = self.repo.lock()
+ return ""
+
+ def do_unlock(self):
+ '''DEPRECATED'''
+
+ if self.lock:
+ self.lock.release()
+ self.lock = None
+ return ""
+
+ def do_addchangegroup(self):
+ '''DEPRECATED'''
+
+ if not self.lock:
+ self.sendresponse("not locked")
+ return
+
+ self.sendresponse("")
+ cg = changegroup.unbundle10(self.fin, "UN")
+ r = self.repo.addchangegroup(cg, 'serve', self._client(),
+ lock=self.lock)
+ return str(r)
+
+ def _client(self):
+ client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
+ return 'remote:ssh:' + client
diff --git a/websdk/mercurial/sslutil.py b/websdk/mercurial/sslutil.py
new file mode 100644
index 0000000..63317ea
--- /dev/null
+++ b/websdk/mercurial/sslutil.py
@@ -0,0 +1,130 @@
+# sslutil.py - SSL handling for mercurial
+#
+# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+import os
+
+from mercurial import util
+from mercurial.i18n import _
+try:
+ # avoid using deprecated/broken FakeSocket in python 2.6
+ import ssl
+ ssl_wrap_socket = ssl.wrap_socket
+ CERT_REQUIRED = ssl.CERT_REQUIRED
+except ImportError:
+ CERT_REQUIRED = 2
+
+ import socket, httplib
+
+ def ssl_wrap_socket(sock, key_file, cert_file,
+ cert_reqs=CERT_REQUIRED, ca_certs=None):
+ if not util.safehasattr(socket, 'ssl'):
+ raise util.Abort(_('Python SSL support not found'))
+ if ca_certs:
+ raise util.Abort(_(
+ 'certificate checking requires Python 2.6'))
+
+ ssl = socket.ssl(sock, key_file, cert_file)
+ return httplib.FakeSocket(sock, ssl)
+
+def _verifycert(cert, hostname):
+ '''Verify that cert (in socket.getpeercert() format) matches hostname.
+ CRLs is not handled.
+
+ Returns error message if any problems are found and None on success.
+ '''
+ if not cert:
+ return _('no certificate received')
+ dnsname = hostname.lower()
+ def matchdnsname(certname):
+ return (certname == dnsname or
+ '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1])
+
+ san = cert.get('subjectAltName', [])
+ if san:
+ certnames = [value.lower() for key, value in san if key == 'DNS']
+ for name in certnames:
+ if matchdnsname(name):
+ return None
+ if certnames:
+ return _('certificate is for %s') % ', '.join(certnames)
+
+ # subject is only checked when subjectAltName is empty
+ for s in cert.get('subject', []):
+ key, value = s[0]
+ if key == 'commonName':
+ try:
+ # 'subject' entries are unicode
+ certname = value.lower().encode('ascii')
+ except UnicodeEncodeError:
+ return _('IDN in certificate not supported')
+ if matchdnsname(certname):
+ return None
+ return _('certificate is for %s') % certname
+ return _('no commonName or subjectAltName found in certificate')
+
+
+# CERT_REQUIRED means fetch the cert from the server all the time AND
+# validate it against the CA store provided in web.cacerts.
+#
+# We COMPLETELY ignore CERT_REQUIRED on Python <= 2.5, as it's totally
+# busted on those versions.
+
+def sslkwargs(ui, host):
+ cacerts = ui.config('web', 'cacerts')
+ hostfingerprint = ui.config('hostfingerprints', host)
+ if cacerts and not hostfingerprint:
+ cacerts = util.expandpath(cacerts)
+ if not os.path.exists(cacerts):
+ raise util.Abort(_('could not find web.cacerts: %s') % cacerts)
+ return {'ca_certs': cacerts,
+ 'cert_reqs': CERT_REQUIRED,
+ }
+ return {}
+
+class validator(object):
+ def __init__(self, ui, host):
+ self.ui = ui
+ self.host = host
+
+ def __call__(self, sock):
+ host = self.host
+ cacerts = self.ui.config('web', 'cacerts')
+ hostfingerprint = self.ui.config('hostfingerprints', host)
+ if cacerts and not hostfingerprint:
+ msg = _verifycert(sock.getpeercert(), host)
+ if msg:
+ raise util.Abort(_('%s certificate error: %s '
+ '(use --insecure to connect '
+ 'insecurely)') % (host, msg))
+ self.ui.debug('%s certificate successfully verified\n' % host)
+ else:
+ if getattr(sock, 'getpeercert', False):
+ peercert = sock.getpeercert(True)
+ peerfingerprint = util.sha1(peercert).hexdigest()
+ nicefingerprint = ":".join([peerfingerprint[x:x + 2]
+ for x in xrange(0, len(peerfingerprint), 2)])
+ if hostfingerprint:
+ if peerfingerprint.lower() != \
+ hostfingerprint.replace(':', '').lower():
+ raise util.Abort(_('invalid certificate for %s '
+ 'with fingerprint %s') %
+ (host, nicefingerprint))
+ self.ui.debug('%s certificate matched fingerprint %s\n' %
+ (host, nicefingerprint))
+ else:
+ self.ui.warn(_('warning: %s certificate '
+ 'with fingerprint %s not verified '
+ '(check hostfingerprints or web.cacerts '
+ 'config setting)\n') %
+ (host, nicefingerprint))
+ else: # python 2.5 ?
+ if hostfingerprint:
+ raise util.Abort(_("host fingerprint for %s can't be "
+ "verified (Python too old)") % host)
+ self.ui.warn(_("warning: certificate for %s can't be "
+ "verified (Python too old)\n") % host)
diff --git a/websdk/mercurial/statichttprepo.py b/websdk/mercurial/statichttprepo.py
index 1f173fb..4d3933a 120000..100644
--- a/websdk/mercurial/statichttprepo.py
+++ b/websdk/mercurial/statichttprepo.py
@@ -1 +1,138 @@
-/usr/share/pyshared/mercurial/statichttprepo.py \ No newline at end of file
+# statichttprepo.py - simple http repository class for mercurial
+#
+# This provides read-only repo access to repositories exported via static http
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import changelog, byterange, url, error
+import localrepo, manifest, util, scmutil, store
+import urllib, urllib2, errno
+
+class httprangereader(object):
+ def __init__(self, url, opener):
+ # we assume opener has HTTPRangeHandler
+ self.url = url
+ self.pos = 0
+ self.opener = opener
+ self.name = url
+ def seek(self, pos):
+ self.pos = pos
+ def read(self, bytes=None):
+ req = urllib2.Request(self.url)
+ end = ''
+ if bytes:
+ end = self.pos + bytes - 1
+ req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
+
+ try:
+ f = self.opener.open(req)
+ data = f.read()
+ # Python 2.6+ defines a getcode() function, and 2.4 and
+ # 2.5 appear to always have an undocumented code attribute
+ # set. If we can't read either of those, fall back to 206
+ # and hope for the best.
+ code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
+ except urllib2.HTTPError, inst:
+ num = inst.code == 404 and errno.ENOENT or None
+ raise IOError(num, inst)
+ except urllib2.URLError, inst:
+ raise IOError(None, inst.reason[1])
+
+ if code == 200:
+ # HTTPRangeHandler does nothing if remote does not support
+ # Range headers and returns the full entity. Let's slice it.
+ if bytes:
+ data = data[self.pos:self.pos + bytes]
+ else:
+ data = data[self.pos:]
+ elif bytes:
+ data = data[:bytes]
+ self.pos += len(data)
+ return data
+ def __iter__(self):
+ return iter(self.read().splitlines(1))
+ def close(self):
+ pass
+
+def build_opener(ui, authinfo):
+ # urllib cannot handle URLs with embedded user or passwd
+ urlopener = url.opener(ui, authinfo)
+ urlopener.add_handler(byterange.HTTPRangeHandler())
+
+ class statichttpopener(scmutil.abstractopener):
+ def __init__(self, base):
+ self.base = base
+
+ def __call__(self, path, mode="r", atomictemp=None):
+ if mode not in ('r', 'rb'):
+ raise IOError('Permission denied')
+ f = "/".join((self.base, urllib.quote(path)))
+ return httprangereader(f, urlopener)
+
+ return statichttpopener
+
+class statichttprepository(localrepo.localrepository):
+ def __init__(self, ui, path):
+ self._url = path
+ self.ui = ui
+
+ self.root = path
+ u = util.url(path.rstrip('/') + "/.hg")
+ self.path, authinfo = u.authinfo()
+
+ opener = build_opener(ui, authinfo)
+ self.opener = opener(self.path)
+
+ try:
+ requirements = scmutil.readrequires(self.opener, self.supported)
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ requirements = set()
+
+ # check if it is a non-empty old-style repository
+ try:
+ fp = self.opener("00changelog.i")
+ fp.read(1)
+ fp.close()
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ # we do not care about empty old-style repositories here
+ msg = _("'%s' does not appear to be an hg repository") % path
+ raise error.RepoError(msg)
+
+ # setup store
+ self.store = store.store(requirements, self.path, opener)
+ self.spath = self.store.path
+ self.sopener = self.store.opener
+ self.sjoin = self.store.join
+
+ self.manifest = manifest.manifest(self.sopener)
+ self.changelog = changelog.changelog(self.sopener)
+ self._tags = None
+ self.nodetagscache = None
+ self._branchcache = None
+ self._branchcachetip = None
+ self.encodepats = None
+ self.decodepats = None
+ self.capabilities.difference_update(["pushkey"])
+ self._filecache = {}
+
+ def url(self):
+ return self._url
+
+ def local(self):
+ return False
+
+ def lock(self, wait=True):
+ raise util.Abort(_('cannot lock static-http repository'))
+
+def instance(ui, path, create):
+ if create:
+ raise util.Abort(_('cannot create new static-http repository'))
+ return statichttprepository(ui, path[7:])
diff --git a/websdk/mercurial/store.py b/websdk/mercurial/store.py
index 5f2639a..fac0802 120000..100644
--- a/websdk/mercurial/store.py
+++ b/websdk/mercurial/store.py
@@ -1 +1,427 @@
-/usr/share/pyshared/mercurial/store.py \ No newline at end of file
+# store.py - repository store handling for Mercurial
+#
+# Copyright 2008 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import osutil, scmutil, util
+import os, stat
+
+_sha = util.sha1
+
+# This avoids a collision between a file named foo and a dir named
+# foo.i or foo.d
+def encodedir(path):
+ '''
+ >>> encodedir('data/foo.i')
+ 'data/foo.i'
+ >>> encodedir('data/foo.i/bla.i')
+ 'data/foo.i.hg/bla.i'
+ >>> encodedir('data/foo.i.hg/bla.i')
+ 'data/foo.i.hg.hg/bla.i'
+ '''
+ if not path.startswith('data/'):
+ return path
+ return (path
+ .replace(".hg/", ".hg.hg/")
+ .replace(".i/", ".i.hg/")
+ .replace(".d/", ".d.hg/"))
+
+def decodedir(path):
+ '''
+ >>> decodedir('data/foo.i')
+ 'data/foo.i'
+ >>> decodedir('data/foo.i.hg/bla.i')
+ 'data/foo.i/bla.i'
+ >>> decodedir('data/foo.i.hg.hg/bla.i')
+ 'data/foo.i.hg/bla.i'
+ '''
+ if not path.startswith('data/') or ".hg/" not in path:
+ return path
+ return (path
+ .replace(".d.hg/", ".d/")
+ .replace(".i.hg/", ".i/")
+ .replace(".hg.hg/", ".hg/"))
+
+def _buildencodefun():
+ '''
+ >>> enc, dec = _buildencodefun()
+
+ >>> enc('nothing/special.txt')
+ 'nothing/special.txt'
+ >>> dec('nothing/special.txt')
+ 'nothing/special.txt'
+
+ >>> enc('HELLO')
+ '_h_e_l_l_o'
+ >>> dec('_h_e_l_l_o')
+ 'HELLO'
+
+ >>> enc('hello:world?')
+ 'hello~3aworld~3f'
+ >>> dec('hello~3aworld~3f')
+ 'hello:world?'
+
+ >>> enc('the\x07quick\xADshot')
+ 'the~07quick~adshot'
+ >>> dec('the~07quick~adshot')
+ 'the\\x07quick\\xadshot'
+ '''
+ e = '_'
+ winreserved = [ord(x) for x in '\\:*?"<>|']
+ cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
+ for x in (range(32) + range(126, 256) + winreserved):
+ cmap[chr(x)] = "~%02x" % x
+ for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
+ cmap[chr(x)] = e + chr(x).lower()
+ dmap = {}
+ for k, v in cmap.iteritems():
+ dmap[v] = k
+ def decode(s):
+ i = 0
+ while i < len(s):
+ for l in xrange(1, 4):
+ try:
+ yield dmap[s[i:i + l]]
+ i += l
+ break
+ except KeyError:
+ pass
+ else:
+ raise KeyError
+ return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
+ lambda s: decodedir("".join(list(decode(s)))))
+
+encodefilename, decodefilename = _buildencodefun()
+
+def _buildlowerencodefun():
+ '''
+ >>> f = _buildlowerencodefun()
+ >>> f('nothing/special.txt')
+ 'nothing/special.txt'
+ >>> f('HELLO')
+ 'hello'
+ >>> f('hello:world?')
+ 'hello~3aworld~3f'
+ >>> f('the\x07quick\xADshot')
+ 'the~07quick~adshot'
+ '''
+ winreserved = [ord(x) for x in '\\:*?"<>|']
+ cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
+ for x in (range(32) + range(126, 256) + winreserved):
+ cmap[chr(x)] = "~%02x" % x
+ for x in range(ord("A"), ord("Z")+1):
+ cmap[chr(x)] = chr(x).lower()
+ return lambda s: "".join([cmap[c] for c in s])
+
+lowerencode = _buildlowerencodefun()
+
+_winreservednames = '''con prn aux nul
+ com1 com2 com3 com4 com5 com6 com7 com8 com9
+ lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
+def _auxencode(path, dotencode):
+ '''
+ Encodes filenames containing names reserved by Windows or which end in
+ period or space. Does not touch other single reserved characters c.
+ Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
+ Additionally encodes space or period at the beginning, if dotencode is
+ True.
+ path is assumed to be all lowercase.
+
+ >>> _auxencode('.foo/aux.txt/txt.aux/con/prn/nul/foo.', True)
+ '~2efoo/au~78.txt/txt.aux/co~6e/pr~6e/nu~6c/foo~2e'
+ >>> _auxencode('.com1com2/lpt9.lpt4.lpt1/conprn/foo.', False)
+ '.com1com2/lp~749.lpt4.lpt1/conprn/foo~2e'
+ >>> _auxencode('foo. ', True)
+ 'foo.~20'
+ >>> _auxencode(' .foo', True)
+ '~20.foo'
+ '''
+ res = []
+ for n in path.split('/'):
+ if n:
+ base = n.split('.')[0]
+ if base and (base in _winreservednames):
+ # encode third letter ('aux' -> 'au~78')
+ ec = "~%02x" % ord(n[2])
+ n = n[0:2] + ec + n[3:]
+ if n[-1] in '. ':
+ # encode last period or space ('foo...' -> 'foo..~2e')
+ n = n[:-1] + "~%02x" % ord(n[-1])
+ if dotencode and n[0] in '. ':
+ n = "~%02x" % ord(n[0]) + n[1:]
+ res.append(n)
+ return '/'.join(res)
+
+_maxstorepathlen = 120
+_dirprefixlen = 8
+_maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
+def _hybridencode(path, auxencode):
+ '''encodes path with a length limit
+
+ Encodes all paths that begin with 'data/', according to the following.
+
+ Default encoding (reversible):
+
+ Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
+ characters are encoded as '~xx', where xx is the two digit hex code
+ of the character (see encodefilename).
+ Relevant path components consisting of Windows reserved filenames are
+ masked by encoding the third character ('aux' -> 'au~78', see auxencode).
+
+ Hashed encoding (not reversible):
+
+ If the default-encoded path is longer than _maxstorepathlen, a
+ non-reversible hybrid hashing of the path is done instead.
+ This encoding uses up to _dirprefixlen characters of all directory
+ levels of the lowerencoded path, but not more levels than can fit into
+ _maxshortdirslen.
+ Then follows the filler followed by the sha digest of the full path.
+ The filler is the beginning of the basename of the lowerencoded path
+ (the basename is everything after the last path separator). The filler
+ is as long as possible, filling in characters from the basename until
+ the encoded path has _maxstorepathlen characters (or all chars of the
+ basename have been taken).
+ The extension (e.g. '.i' or '.d') is preserved.
+
+ The string 'data/' at the beginning is replaced with 'dh/', if the hashed
+ encoding was used.
+ '''
+ if not path.startswith('data/'):
+ return path
+ # escape directories ending with .i and .d
+ path = encodedir(path)
+ ndpath = path[len('data/'):]
+ res = 'data/' + auxencode(encodefilename(ndpath))
+ if len(res) > _maxstorepathlen:
+ digest = _sha(path).hexdigest()
+ aep = auxencode(lowerencode(ndpath))
+ _root, ext = os.path.splitext(aep)
+ parts = aep.split('/')
+ basename = parts[-1]
+ sdirs = []
+ for p in parts[:-1]:
+ d = p[:_dirprefixlen]
+ if d[-1] in '. ':
+ # Windows can't access dirs ending in period or space
+ d = d[:-1] + '_'
+ t = '/'.join(sdirs) + '/' + d
+ if len(t) > _maxshortdirslen:
+ break
+ sdirs.append(d)
+ dirs = '/'.join(sdirs)
+ if len(dirs) > 0:
+ dirs += '/'
+ res = 'dh/' + dirs + digest + ext
+ spaceleft = _maxstorepathlen - len(res)
+ if spaceleft > 0:
+ filler = basename[:spaceleft]
+ res = 'dh/' + dirs + filler + digest + ext
+ return res
+
+def _calcmode(path):
+ try:
+ # files in .hg/ will be created using this mode
+ mode = os.stat(path).st_mode
+ # avoid some useless chmods
+ if (0777 & ~util.umask) == (0777 & mode):
+ mode = None
+ except OSError:
+ mode = None
+ return mode
+
+_data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
+
+class basicstore(object):
+ '''base class for local repository stores'''
+ def __init__(self, path, openertype):
+ self.path = path
+ self.createmode = _calcmode(path)
+ op = openertype(self.path)
+ op.createmode = self.createmode
+ self.opener = scmutil.filteropener(op, encodedir)
+
+ def join(self, f):
+ return self.path + '/' + encodedir(f)
+
+ def _walk(self, relpath, recurse):
+ '''yields (unencoded, encoded, size)'''
+ path = self.path
+ if relpath:
+ path += '/' + relpath
+ striplen = len(self.path) + 1
+ l = []
+ if os.path.isdir(path):
+ visit = [path]
+ while visit:
+ p = visit.pop()
+ for f, kind, st in osutil.listdir(p, stat=True):
+ fp = p + '/' + f
+ if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
+ n = util.pconvert(fp[striplen:])
+ l.append((decodedir(n), n, st.st_size))
+ elif kind == stat.S_IFDIR and recurse:
+ visit.append(fp)
+ return sorted(l)
+
+ def datafiles(self):
+ return self._walk('data', True)
+
+ def walk(self):
+ '''yields (unencoded, encoded, size)'''
+ # yield data files first
+ for x in self.datafiles():
+ yield x
+ # yield manifest before changelog
+ for x in reversed(self._walk('', False)):
+ yield x
+
+ def copylist(self):
+ return ['requires'] + _data.split()
+
+ def write(self):
+ pass
+
+class encodedstore(basicstore):
+ def __init__(self, path, openertype):
+ self.path = path + '/store'
+ self.createmode = _calcmode(self.path)
+ op = openertype(self.path)
+ op.createmode = self.createmode
+ self.opener = scmutil.filteropener(op, encodefilename)
+
+ def datafiles(self):
+ for a, b, size in self._walk('data', True):
+ try:
+ a = decodefilename(a)
+ except KeyError:
+ a = None
+ yield a, b, size
+
+ def join(self, f):
+ return self.path + '/' + encodefilename(f)
+
+ def copylist(self):
+ return (['requires', '00changelog.i'] +
+ ['store/' + f for f in _data.split()])
+
+class fncache(object):
+ # the filename used to be partially encoded
+ # hence the encodedir/decodedir dance
+ def __init__(self, opener):
+ self.opener = opener
+ self.entries = None
+ self._dirty = False
+
+ def _load(self):
+ '''fill the entries from the fncache file'''
+ self.entries = set()
+ self._dirty = False
+ try:
+ fp = self.opener('fncache', mode='rb')
+ except IOError:
+ # skip nonexistent file
+ return
+ for n, line in enumerate(fp):
+ if (len(line) < 2) or (line[-1] != '\n'):
+ t = _('invalid entry in fncache, line %s') % (n + 1)
+ raise util.Abort(t)
+ self.entries.add(decodedir(line[:-1]))
+ fp.close()
+
+ def rewrite(self, files):
+ fp = self.opener('fncache', mode='wb')
+ for p in files:
+ fp.write(encodedir(p) + '\n')
+ fp.close()
+ self.entries = set(files)
+ self._dirty = False
+
+ def write(self):
+ if not self._dirty:
+ return
+ fp = self.opener('fncache', mode='wb', atomictemp=True)
+ for p in self.entries:
+ fp.write(encodedir(p) + '\n')
+ fp.close()
+ self._dirty = False
+
+ def add(self, fn):
+ if self.entries is None:
+ self._load()
+ if fn not in self.entries:
+ self._dirty = True
+ self.entries.add(fn)
+
+ def __contains__(self, fn):
+ if self.entries is None:
+ self._load()
+ return fn in self.entries
+
+ def __iter__(self):
+ if self.entries is None:
+ self._load()
+ return iter(self.entries)
+
+class _fncacheopener(scmutil.abstractopener):
+ def __init__(self, op, fnc, encode):
+ self.opener = op
+ self.fncache = fnc
+ self.encode = encode
+
+ def __call__(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rb') and path.startswith('data/'):
+ self.fncache.add(path)
+ return self.opener(self.encode(path), mode, *args, **kw)
+
+class fncachestore(basicstore):
+ def __init__(self, path, openertype, encode):
+ self.encode = encode
+ self.path = path + '/store'
+ self.createmode = _calcmode(self.path)
+ op = openertype(self.path)
+ op.createmode = self.createmode
+ fnc = fncache(op)
+ self.fncache = fnc
+ self.opener = _fncacheopener(op, fnc, encode)
+
+ def join(self, f):
+ return self.path + '/' + self.encode(f)
+
+ def datafiles(self):
+ rewrite = False
+ existing = []
+ spath = self.path
+ for f in self.fncache:
+ ef = self.encode(f)
+ try:
+ st = os.stat(spath + '/' + ef)
+ yield f, ef, st.st_size
+ existing.append(f)
+ except OSError:
+ # nonexistent entry
+ rewrite = True
+ if rewrite:
+ # rewrite fncache to remove nonexistent entries
+ # (may be caused by rollback / strip)
+ self.fncache.rewrite(existing)
+
+ def copylist(self):
+ d = ('data dh fncache'
+ ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
+ return (['requires', '00changelog.i'] +
+ ['store/' + f for f in d.split()])
+
+ def write(self):
+ self.fncache.write()
+
+def store(requirements, path, openertype):
+ if 'store' in requirements:
+ if 'fncache' in requirements:
+ auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
+ encode = lambda f: _hybridencode(f, auxencode)
+ return fncachestore(path, openertype, encode)
+ return encodedstore(path, openertype)
+ return basicstore(path, openertype)
diff --git a/websdk/mercurial/strutil.py b/websdk/mercurial/strutil.py
index b93316c..b33fb6b 120000..100644
--- a/websdk/mercurial/strutil.py
+++ b/websdk/mercurial/strutil.py
@@ -1 +1,34 @@
-/usr/share/pyshared/mercurial/strutil.py \ No newline at end of file
+# strutil.py - string utilities for Mercurial
+#
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+def findall(haystack, needle, start=0, end=None):
+ if end is None:
+ end = len(haystack)
+ if end < 0:
+ end += len(haystack)
+ if start < 0:
+ start += len(haystack)
+ while start < end:
+ c = haystack.find(needle, start, end)
+ if c == -1:
+ break
+ yield c
+ start = c + 1
+
+def rfindall(haystack, needle, start=0, end=None):
+ if end is None:
+ end = len(haystack)
+ if end < 0:
+ end += len(haystack)
+ if start < 0:
+ start += len(haystack)
+ while end >= 0:
+ c = haystack.rfind(needle, start, end)
+ if c == -1:
+ break
+ yield c
+ end = c - 1
diff --git a/websdk/mercurial/subrepo.py b/websdk/mercurial/subrepo.py
index 5a38894..1e7c9b2 120000..100644
--- a/websdk/mercurial/subrepo.py
+++ b/websdk/mercurial/subrepo.py
@@ -1 +1,1126 @@
-/usr/share/pyshared/mercurial/subrepo.py \ No newline at end of file
+# subrepo.py - sub-repository handling for Mercurial
+#
+# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import errno, os, re, xml.dom.minidom, shutil, posixpath
+import stat, subprocess, tarfile
+from i18n import _
+import config, scmutil, util, node, error, cmdutil, bookmarks
+hg = None
+propertycache = util.propertycache
+
+nullstate = ('', '', 'empty')
+
+def state(ctx, ui):
+ """return a state dict, mapping subrepo paths configured in .hgsub
+ to tuple: (source from .hgsub, revision from .hgsubstate, kind
+ (key in types dict))
+ """
+ p = config.config()
+ def read(f, sections=None, remap=None):
+ if f in ctx:
+ try:
+ data = ctx[f].data()
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+ # handle missing subrepo spec files as removed
+ ui.warn(_("warning: subrepo spec file %s not found\n") % f)
+ return
+ p.parse(f, data, sections, remap, read)
+ else:
+ raise util.Abort(_("subrepo spec file %s not found") % f)
+
+ if '.hgsub' in ctx:
+ read('.hgsub')
+
+ for path, src in ui.configitems('subpaths'):
+ p.set('subpaths', path, src, ui.configsource('subpaths', path))
+
+ rev = {}
+ if '.hgsubstate' in ctx:
+ try:
+ for l in ctx['.hgsubstate'].data().splitlines():
+ revision, path = l.split(" ", 1)
+ rev[path] = revision
+ except IOError, err:
+ if err.errno != errno.ENOENT:
+ raise
+
+ def remap(src):
+ for pattern, repl in p.items('subpaths'):
+ # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
+ # does a string decode.
+ repl = repl.encode('string-escape')
+ # However, we still want to allow back references to go
+ # through unharmed, so we turn r'\\1' into r'\1'. Again,
+ # extra escapes are needed because re.sub string decodes.
+ repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
+ try:
+ src = re.sub(pattern, repl, src, 1)
+ except re.error, e:
+ raise util.Abort(_("bad subrepository pattern in %s: %s")
+ % (p.source('subpaths', pattern), e))
+ return src
+
+ state = {}
+ for path, src in p[''].items():
+ kind = 'hg'
+ if src.startswith('['):
+ if ']' not in src:
+ raise util.Abort(_('missing ] in subrepo source'))
+ kind, src = src.split(']', 1)
+ kind = kind[1:]
+ src = src.lstrip() # strip any extra whitespace after ']'
+
+ if not util.url(src).isabs():
+ parent = _abssource(ctx._repo, abort=False)
+ if parent:
+ parent = util.url(parent)
+ parent.path = posixpath.join(parent.path or '', src)
+ parent.path = posixpath.normpath(parent.path)
+ joined = str(parent)
+ # Remap the full joined path and use it if it changes,
+ # else remap the original source.
+ remapped = remap(joined)
+ if remapped == joined:
+ src = remap(src)
+ else:
+ src = remapped
+
+ src = remap(src)
+ state[path] = (src.strip(), rev.get(path, ''), kind)
+
+ return state
+
+def writestate(repo, state):
+ """rewrite .hgsubstate in (outer) repo with these subrepo states"""
+ lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
+ repo.wwrite('.hgsubstate', ''.join(lines), '')
+
+def submerge(repo, wctx, mctx, actx, overwrite):
+ """delegated from merge.applyupdates: merging of .hgsubstate file
+ in working context, merging context and ancestor context"""
+ if mctx == actx: # backwards?
+ actx = wctx.p1()
+ s1 = wctx.substate
+ s2 = mctx.substate
+ sa = actx.substate
+ sm = {}
+
+ repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
+
+ def debug(s, msg, r=""):
+ if r:
+ r = "%s:%s:%s" % r
+ repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
+
+ for s, l in s1.items():
+ a = sa.get(s, nullstate)
+ ld = l # local state with possible dirty flag for compares
+ if wctx.sub(s).dirty():
+ ld = (l[0], l[1] + "+")
+ if wctx == actx: # overwrite
+ a = ld
+
+ if s in s2:
+ r = s2[s]
+ if ld == r or r == a: # no change or local is newer
+ sm[s] = l
+ continue
+ elif ld == a: # other side changed
+ debug(s, "other changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[0] != r[0]: # sources differ
+ if repo.ui.promptchoice(
+ _(' subrepository sources for %s differ\n'
+ 'use (l)ocal source (%s) or (r)emote source (%s)?')
+ % (s, l[0], r[0]),
+ (_('&Local'), _('&Remote')), 0):
+ debug(s, "prompt changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ elif ld[1] == a[1]: # local side is unchanged
+ debug(s, "other side changed, get", r)
+ wctx.sub(s).get(r, overwrite)
+ sm[s] = r
+ else:
+ debug(s, "both sides changed, merge with", r)
+ wctx.sub(s).merge(r)
+ sm[s] = l
+ elif ld == a: # remote removed, local unchanged
+ debug(s, "remote removed, remove")
+ wctx.sub(s).remove()
+ elif a == nullstate: # not present in remote or ancestor
+ debug(s, "local added, keep")
+ sm[s] = l
+ continue
+ else:
+ if repo.ui.promptchoice(
+ _(' local changed subrepository %s which remote removed\n'
+ 'use (c)hanged version or (d)elete?') % s,
+ (_('&Changed'), _('&Delete')), 0):
+ debug(s, "prompt remove")
+ wctx.sub(s).remove()
+
+ for s, r in sorted(s2.items()):
+ if s in s1:
+ continue
+ elif s not in sa:
+ debug(s, "remote added, get", r)
+ mctx.sub(s).get(r)
+ sm[s] = r
+ elif r != sa[s]:
+ if repo.ui.promptchoice(
+ _(' remote changed subrepository %s which local removed\n'
+ 'use (c)hanged version or (d)elete?') % s,
+ (_('&Changed'), _('&Delete')), 0) == 0:
+ debug(s, "prompt recreate", r)
+ wctx.sub(s).get(r)
+ sm[s] = r
+
+ # record merged .hgsubstate
+ writestate(repo, sm)
+
+def _updateprompt(ui, sub, dirty, local, remote):
+ if dirty:
+ msg = (_(' subrepository sources for %s differ\n'
+ 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
+ % (subrelpath(sub), local, remote))
+ else:
+ msg = (_(' subrepository sources for %s differ (in checked out version)\n'
+ 'use (l)ocal source (%s) or (r)emote source (%s)?\n')
+ % (subrelpath(sub), local, remote))
+ return ui.promptchoice(msg, (_('&Local'), _('&Remote')), 0)
+
+def reporelpath(repo):
+ """return path to this (sub)repo as seen from outermost repo"""
+ parent = repo
+ while util.safehasattr(parent, '_subparent'):
+ parent = parent._subparent
+ p = parent.root.rstrip(os.sep)
+ return repo.root[len(p) + 1:]
+
+def subrelpath(sub):
+ """return path to this subrepo as seen from outermost repo"""
+ if util.safehasattr(sub, '_relpath'):
+ return sub._relpath
+ if not util.safehasattr(sub, '_repo'):
+ return sub._path
+ return reporelpath(sub._repo)
+
+def _abssource(repo, push=False, abort=True):
+ """return pull/push path of repo - either based on parent repo .hgsub info
+ or on the top repo config. Abort or return None if no source found."""
+ if util.safehasattr(repo, '_subparent'):
+ source = util.url(repo._subsource)
+ if source.isabs():
+ return str(source)
+ source.path = posixpath.normpath(source.path)
+ parent = _abssource(repo._subparent, push, abort=False)
+ if parent:
+ parent = util.url(util.pconvert(parent))
+ parent.path = posixpath.join(parent.path or '', source.path)
+ parent.path = posixpath.normpath(parent.path)
+ return str(parent)
+ else: # recursion reached top repo
+ if util.safehasattr(repo, '_subtoppath'):
+ return repo._subtoppath
+ if push and repo.ui.config('paths', 'default-push'):
+ return repo.ui.config('paths', 'default-push')
+ if repo.ui.config('paths', 'default'):
+ return repo.ui.config('paths', 'default')
+ if abort:
+ raise util.Abort(_("default path for subrepository %s not found") %
+ reporelpath(repo))
+
+def itersubrepos(ctx1, ctx2):
+ """find subrepos in ctx1 or ctx2"""
+ # Create a (subpath, ctx) mapping where we prefer subpaths from
+ # ctx1. The subpaths from ctx2 are important when the .hgsub file
+ # has been modified (in ctx2) but not yet committed (in ctx1).
+ subpaths = dict.fromkeys(ctx2.substate, ctx2)
+ subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
+ for subpath, ctx in sorted(subpaths.iteritems()):
+ yield subpath, ctx.sub(subpath)
+
+def subrepo(ctx, path):
+ """return instance of the right subrepo class for subrepo in path"""
+ # subrepo inherently violates our import layering rules
+ # because it wants to make repo objects from deep inside the stack
+ # so we manually delay the circular imports to not break
+ # scripts that don't use our demand-loading
+ global hg
+ import hg as h
+ hg = h
+
+ scmutil.pathauditor(ctx._repo.root)(path)
+ state = ctx.substate.get(path, nullstate)
+ if state[2] not in types:
+ raise util.Abort(_('unknown subrepo type %s') % state[2])
+ return types[state[2]](ctx, path, state[:2])
+
+# subrepo classes need to implement the following abstract class:
+
+class abstractsubrepo(object):
+
+ def dirty(self, ignoreupdate=False):
+ """returns true if the dirstate of the subrepo is dirty or does not
+ match current stored state. If ignoreupdate is true, only check
+ whether the subrepo has uncommitted changes in its dirstate.
+ """
+ raise NotImplementedError
+
+ def checknested(self, path):
+ """check if path is a subrepository within this repository"""
+ return False
+
+ def commit(self, text, user, date):
+ """commit the current changes to the subrepo with the given
+ log message. Use given user and date if possible. Return the
+ new state of the subrepo.
+ """
+ raise NotImplementedError
+
+ def remove(self):
+ """remove the subrepo
+
+ (should verify the dirstate is not dirty first)
+ """
+ raise NotImplementedError
+
+ def get(self, state, overwrite=False):
+ """run whatever commands are needed to put the subrepo into
+ this state
+ """
+ raise NotImplementedError
+
+ def merge(self, state):
+ """merge currently-saved state with the new state."""
+ raise NotImplementedError
+
+ def push(self, force):
+ """perform whatever action is analogous to 'hg push'
+
+ This may be a no-op on some systems.
+ """
+ raise NotImplementedError
+
+ def add(self, ui, match, dryrun, prefix):
+ return []
+
+ def status(self, rev2, **opts):
+ return [], [], [], [], [], [], []
+
+ def diff(self, diffopts, node2, match, prefix, **opts):
+ pass
+
+ def outgoing(self, ui, dest, opts):
+ return 1
+
+ def incoming(self, ui, source, opts):
+ return 1
+
+ def files(self):
+ """return filename iterator"""
+ raise NotImplementedError
+
+ def filedata(self, name):
+ """return file data"""
+ raise NotImplementedError
+
+ def fileflags(self, name):
+ """return file flags"""
+ return ''
+
+ def archive(self, ui, archiver, prefix):
+ files = self.files()
+ total = len(files)
+ relpath = subrelpath(self)
+ ui.progress(_('archiving (%s)') % relpath, 0,
+ unit=_('files'), total=total)
+ for i, name in enumerate(files):
+ flags = self.fileflags(name)
+ mode = 'x' in flags and 0755 or 0644
+ symlink = 'l' in flags
+ archiver.addfile(os.path.join(prefix, self._path, name),
+ mode, symlink, self.filedata(name))
+ ui.progress(_('archiving (%s)') % relpath, i + 1,
+ unit=_('files'), total=total)
+ ui.progress(_('archiving (%s)') % relpath, None)
+
+
+class hgsubrepo(abstractsubrepo):
+ def __init__(self, ctx, path, state):
+ self._path = path
+ self._state = state
+ r = ctx._repo
+ root = r.wjoin(path)
+ create = False
+ if not os.path.exists(os.path.join(root, '.hg')):
+ create = True
+ util.makedirs(root)
+ self._repo = hg.repository(r.ui, root, create=create)
+ self._initrepo(r, state[0], create)
+
+ def _initrepo(self, parentrepo, source, create):
+ self._repo._subparent = parentrepo
+ self._repo._subsource = source
+
+ if create:
+ fp = self._repo.opener("hgrc", "w", text=True)
+ fp.write('[paths]\n')
+
+ def addpathconfig(key, value):
+ if value:
+ fp.write('%s = %s\n' % (key, value))
+ self._repo.ui.setconfig('paths', key, value)
+
+ defpath = _abssource(self._repo, abort=False)
+ defpushpath = _abssource(self._repo, True, abort=False)
+ addpathconfig('default', defpath)
+ if defpath != defpushpath:
+ addpathconfig('default-push', defpushpath)
+ fp.close()
+
+ def add(self, ui, match, dryrun, prefix):
+ return cmdutil.add(ui, self._repo, match, dryrun, True,
+ os.path.join(prefix, self._path))
+
+ def status(self, rev2, **opts):
+ try:
+ rev1 = self._state[1]
+ ctx1 = self._repo[rev1]
+ ctx2 = self._repo[rev2]
+ return self._repo.status(ctx1, ctx2, **opts)
+ except error.RepoLookupError, inst:
+ self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
+ % (inst, subrelpath(self)))
+ return [], [], [], [], [], [], []
+
+ def diff(self, diffopts, node2, match, prefix, **opts):
+ try:
+ node1 = node.bin(self._state[1])
+ # We currently expect node2 to come from substate and be
+ # in hex format
+ if node2 is not None:
+ node2 = node.bin(node2)
+ cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts,
+ node1, node2, match,
+ prefix=os.path.join(prefix, self._path),
+ listsubrepos=True, **opts)
+ except error.RepoLookupError, inst:
+ self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
+ % (inst, subrelpath(self)))
+
+ def archive(self, ui, archiver, prefix):
+ self._get(self._state + ('hg',))
+ abstractsubrepo.archive(self, ui, archiver, prefix)
+
+ rev = self._state[1]
+ ctx = self._repo[rev]
+ for subpath in ctx.substate:
+ s = subrepo(ctx, subpath)
+ s.archive(ui, archiver, os.path.join(prefix, self._path))
+
+ def dirty(self, ignoreupdate=False):
+ r = self._state[1]
+ if r == '' and not ignoreupdate: # no state recorded
+ return True
+ w = self._repo[None]
+ if r != w.p1().hex() and not ignoreupdate:
+ # different version checked out
+ return True
+ return w.dirty() # working directory changed
+
+ def checknested(self, path):
+ return self._repo._checknested(self._repo.wjoin(path))
+
+ def commit(self, text, user, date):
+ # don't bother committing in the subrepo if it's only been
+ # updated
+ if not self.dirty(True):
+ return self._repo['.'].hex()
+ self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
+ n = self._repo.commit(text, user, date)
+ if not n:
+ return self._repo['.'].hex() # different version checked out
+ return node.hex(n)
+
+ def remove(self):
+ # we can't fully delete the repository as it may contain
+ # local-only history
+ self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
+ hg.clean(self._repo, node.nullid, False)
+
+ def _get(self, state):
+ source, revision, kind = state
+ if revision not in self._repo:
+ self._repo._subsource = source
+ srcurl = _abssource(self._repo)
+ other = hg.peer(self._repo.ui, {}, srcurl)
+ if len(self._repo) == 0:
+ self._repo.ui.status(_('cloning subrepo %s from %s\n')
+ % (subrelpath(self), srcurl))
+ parentrepo = self._repo._subparent
+ shutil.rmtree(self._repo.path)
+ other, self._repo = hg.clone(self._repo._subparent.ui, {}, other,
+ self._repo.root, update=False)
+ self._initrepo(parentrepo, source, create=True)
+ else:
+ self._repo.ui.status(_('pulling subrepo %s from %s\n')
+ % (subrelpath(self), srcurl))
+ self._repo.pull(other)
+ bookmarks.updatefromremote(self._repo.ui, self._repo, other)
+
+ def get(self, state, overwrite=False):
+ self._get(state)
+ source, revision, kind = state
+ self._repo.ui.debug("getting subrepo %s\n" % self._path)
+ hg.clean(self._repo, revision, False)
+
+ def merge(self, state):
+ self._get(state)
+ cur = self._repo['.']
+ dst = self._repo[state[1]]
+ anc = dst.ancestor(cur)
+
+ def mergefunc():
+ if anc == cur:
+ self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
+ hg.update(self._repo, state[1])
+ elif anc == dst:
+ self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
+ else:
+ self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
+ hg.merge(self._repo, state[1], remind=False)
+
+ wctx = self._repo[None]
+ if self.dirty():
+ if anc != dst:
+ if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
+ mergefunc()
+ else:
+ mergefunc()
+ else:
+ mergefunc()
+
+ def push(self, force):
+ # push subrepos depth-first for coherent ordering
+ c = self._repo['']
+ subs = c.substate # only repos that are committed
+ for s in sorted(subs):
+ if not c.sub(s).push(force):
+ return False
+
+ dsturl = _abssource(self._repo, True)
+ self._repo.ui.status(_('pushing subrepo %s to %s\n') %
+ (subrelpath(self), dsturl))
+ other = hg.peer(self._repo.ui, {}, dsturl)
+ return self._repo.push(other, force)
+
+ def outgoing(self, ui, dest, opts):
+ return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
+
+ def incoming(self, ui, source, opts):
+ return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
+
+ def files(self):
+ rev = self._state[1]
+ ctx = self._repo[rev]
+ return ctx.manifest()
+
+ def filedata(self, name):
+ rev = self._state[1]
+ return self._repo[rev][name].data()
+
+ def fileflags(self, name):
+ rev = self._state[1]
+ ctx = self._repo[rev]
+ return ctx.flags(name)
+
+
+class svnsubrepo(abstractsubrepo):
+ def __init__(self, ctx, path, state):
+ self._path = path
+ self._state = state
+ self._ctx = ctx
+ self._ui = ctx._repo.ui
+ self._exe = util.findexe('svn')
+ if not self._exe:
+ raise util.Abort(_("'svn' executable not found for subrepo '%s'")
+ % self._path)
+
+ def _svncommand(self, commands, filename='', failok=False):
+ cmd = [self._exe]
+ extrakw = {}
+ if not self._ui.interactive():
+ # Making stdin be a pipe should prevent svn from behaving
+ # interactively even if we can't pass --non-interactive.
+ extrakw['stdin'] = subprocess.PIPE
+ # Starting in svn 1.5 --non-interactive is a global flag
+ # instead of being per-command, but we need to support 1.4 so
+ # we have to be intelligent about what commands take
+ # --non-interactive.
+ if commands[0] in ('update', 'checkout', 'commit'):
+ cmd.append('--non-interactive')
+ cmd.extend(commands)
+ if filename is not None:
+ path = os.path.join(self._ctx._repo.origroot, self._path, filename)
+ cmd.append(path)
+ env = dict(os.environ)
+ # Avoid localized output, preserve current locale for everything else.
+ env['LC_MESSAGES'] = 'C'
+ p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True, env=env, **extrakw)
+ stdout, stderr = p.communicate()
+ stderr = stderr.strip()
+ if not failok:
+ if p.returncode:
+ raise util.Abort(stderr or 'exited with code %d' % p.returncode)
+ if stderr:
+ self._ui.warn(stderr + '\n')
+ return stdout, stderr
+
+ @propertycache
+ def _svnversion(self):
+ output, err = self._svncommand(['--version'], filename=None)
+ m = re.search(r'^svn,\s+version\s+(\d+)\.(\d+)', output)
+ if not m:
+ raise util.Abort(_('cannot retrieve svn tool version'))
+ return (int(m.group(1)), int(m.group(2)))
+
+ def _wcrevs(self):
+ # Get the working directory revision as well as the last
+ # commit revision so we can compare the subrepo state with
+ # both. We used to store the working directory one.
+ output, err = self._svncommand(['info', '--xml'])
+ doc = xml.dom.minidom.parseString(output)
+ entries = doc.getElementsByTagName('entry')
+ lastrev, rev = '0', '0'
+ if entries:
+ rev = str(entries[0].getAttribute('revision')) or '0'
+ commits = entries[0].getElementsByTagName('commit')
+ if commits:
+ lastrev = str(commits[0].getAttribute('revision')) or '0'
+ return (lastrev, rev)
+
+ def _wcrev(self):
+ return self._wcrevs()[0]
+
+ def _wcchanged(self):
+ """Return (changes, extchanges) where changes is True
+ if the working directory was changed, and extchanges is
+ True if any of these changes concern an external entry.
+ """
+ output, err = self._svncommand(['status', '--xml'])
+ externals, changes = [], []
+ doc = xml.dom.minidom.parseString(output)
+ for e in doc.getElementsByTagName('entry'):
+ s = e.getElementsByTagName('wc-status')
+ if not s:
+ continue
+ item = s[0].getAttribute('item')
+ props = s[0].getAttribute('props')
+ path = e.getAttribute('path')
+ if item == 'external':
+ externals.append(path)
+ if (item not in ('', 'normal', 'unversioned', 'external')
+ or props not in ('', 'none', 'normal')):
+ changes.append(path)
+ for path in changes:
+ for ext in externals:
+ if path == ext or path.startswith(ext + os.sep):
+ return True, True
+ return bool(changes), False
+
+ def dirty(self, ignoreupdate=False):
+ if not self._wcchanged()[0]:
+ if self._state[1] in self._wcrevs() or ignoreupdate:
+ return False
+ return True
+
+ def commit(self, text, user, date):
+ # user and date are out of our hands since svn is centralized
+ changed, extchanged = self._wcchanged()
+ if not changed:
+ return self._wcrev()
+ if extchanged:
+ # Do not try to commit externals
+ raise util.Abort(_('cannot commit svn externals'))
+ commitinfo, err = self._svncommand(['commit', '-m', text])
+ self._ui.status(commitinfo)
+ newrev = re.search('Committed revision ([0-9]+).', commitinfo)
+ if not newrev:
+ raise util.Abort(commitinfo.splitlines()[-1])
+ newrev = newrev.groups()[0]
+ self._ui.status(self._svncommand(['update', '-r', newrev])[0])
+ return newrev
+
+ def remove(self):
+ if self.dirty():
+ self._ui.warn(_('not removing repo %s because '
+ 'it has changes.\n' % self._path))
+ return
+ self._ui.note(_('removing subrepo %s\n') % self._path)
+
+ def onerror(function, path, excinfo):
+ if function is not os.remove:
+ raise
+ # read-only files cannot be unlinked under Windows
+ s = os.stat(path)
+ if (s.st_mode & stat.S_IWRITE) != 0:
+ raise
+ os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
+ os.remove(path)
+
+ path = self._ctx._repo.wjoin(self._path)
+ shutil.rmtree(path, onerror=onerror)
+ try:
+ os.removedirs(os.path.dirname(path))
+ except OSError:
+ pass
+
+ def get(self, state, overwrite=False):
+ if overwrite:
+ self._svncommand(['revert', '--recursive'])
+ args = ['checkout']
+ if self._svnversion >= (1, 5):
+ args.append('--force')
+ # The revision must be specified at the end of the URL to properly
+ # update to a directory which has since been deleted and recreated.
+ args.append('%s@%s' % (state[0], state[1]))
+ status, err = self._svncommand(args, failok=True)
+ if not re.search('Checked out revision [0-9]+.', status):
+ if ('is already a working copy for a different URL' in err
+ and (self._wcchanged() == (False, False))):
+ # obstructed but clean working copy, so just blow it away.
+ self.remove()
+ self.get(state, overwrite=False)
+ return
+ raise util.Abort((status or err).splitlines()[-1])
+ self._ui.status(status)
+
+ def merge(self, state):
+ old = self._state[1]
+ new = state[1]
+ if new != self._wcrev():
+ dirty = old == self._wcrev() or self._wcchanged()[0]
+ if _updateprompt(self._ui, self, dirty, self._wcrev(), new):
+ self.get(state, False)
+
+ def push(self, force):
+ # push is a no-op for SVN
+ return True
+
+ def files(self):
+ output = self._svncommand(['list'])
+ # This works because svn forbids \n in filenames.
+ return output.splitlines()
+
+ def filedata(self, name):
+ return self._svncommand(['cat'], name)
+
+
+class gitsubrepo(abstractsubrepo):
+ def __init__(self, ctx, path, state):
+ # TODO add git version check.
+ self._state = state
+ self._ctx = ctx
+ self._path = path
+ self._relpath = os.path.join(reporelpath(ctx._repo), path)
+ self._abspath = ctx._repo.wjoin(path)
+ self._subparent = ctx._repo
+ self._ui = ctx._repo.ui
+
+ def _gitcommand(self, commands, env=None, stream=False):
+ return self._gitdir(commands, env=env, stream=stream)[0]
+
+ def _gitdir(self, commands, env=None, stream=False):
+ return self._gitnodir(commands, env=env, stream=stream,
+ cwd=self._abspath)
+
+ def _gitnodir(self, commands, env=None, stream=False, cwd=None):
+ """Calls the git command
+
+ The methods tries to call the git command. versions previor to 1.6.0
+ are not supported and very probably fail.
+ """
+ self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
+ # unless ui.quiet is set, print git's stderr,
+ # which is mostly progress and useful info
+ errpipe = None
+ if self._ui.quiet:
+ errpipe = open(os.devnull, 'w')
+ p = subprocess.Popen(['git'] + commands, bufsize=-1, cwd=cwd, env=env,
+ close_fds=util.closefds,
+ stdout=subprocess.PIPE, stderr=errpipe)
+ if stream:
+ return p.stdout, None
+
+ retdata = p.stdout.read().strip()
+ # wait for the child to exit to avoid race condition.
+ p.wait()
+
+ if p.returncode != 0 and p.returncode != 1:
+ # there are certain error codes that are ok
+ command = commands[0]
+ if command in ('cat-file', 'symbolic-ref'):
+ return retdata, p.returncode
+ # for all others, abort
+ raise util.Abort('git %s error %d in %s' %
+ (command, p.returncode, self._relpath))
+
+ return retdata, p.returncode
+
+ def _gitmissing(self):
+ return not os.path.exists(os.path.join(self._abspath, '.git'))
+
+ def _gitstate(self):
+ return self._gitcommand(['rev-parse', 'HEAD'])
+
+ def _gitcurrentbranch(self):
+ current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
+ if err:
+ current = None
+ return current
+
+ def _gitremote(self, remote):
+ out = self._gitcommand(['remote', 'show', '-n', remote])
+ line = out.split('\n')[1]
+ i = line.index('URL: ') + len('URL: ')
+ return line[i:]
+
+ def _githavelocally(self, revision):
+ out, code = self._gitdir(['cat-file', '-e', revision])
+ return code == 0
+
+ def _gitisancestor(self, r1, r2):
+ base = self._gitcommand(['merge-base', r1, r2])
+ return base == r1
+
+ def _gitisbare(self):
+ return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
+
+ def _gitupdatestat(self):
+ """This must be run before git diff-index.
+ diff-index only looks at changes to file stat;
+ this command looks at file contents and updates the stat."""
+ self._gitcommand(['update-index', '-q', '--refresh'])
+
+ def _gitbranchmap(self):
+ '''returns 2 things:
+ a map from git branch to revision
+ a map from revision to branches'''
+ branch2rev = {}
+ rev2branch = {}
+
+ out = self._gitcommand(['for-each-ref', '--format',
+ '%(objectname) %(refname)'])
+ for line in out.split('\n'):
+ revision, ref = line.split(' ')
+ if (not ref.startswith('refs/heads/') and
+ not ref.startswith('refs/remotes/')):
+ continue
+ if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
+ continue # ignore remote/HEAD redirects
+ branch2rev[ref] = revision
+ rev2branch.setdefault(revision, []).append(ref)
+ return branch2rev, rev2branch
+
+ def _gittracking(self, branches):
+ 'return map of remote branch to local tracking branch'
+ # assumes no more than one local tracking branch for each remote
+ tracking = {}
+ for b in branches:
+ if b.startswith('refs/remotes/'):
+ continue
+ bname = b.split('/', 2)[2]
+ remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
+ if remote:
+ ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
+ tracking['refs/remotes/%s/%s' %
+ (remote, ref.split('/', 2)[2])] = b
+ return tracking
+
+ def _abssource(self, source):
+ if '://' not in source:
+ # recognize the scp syntax as an absolute source
+ colon = source.find(':')
+ if colon != -1 and '/' not in source[:colon]:
+ return source
+ self._subsource = source
+ return _abssource(self)
+
+ def _fetch(self, source, revision):
+ if self._gitmissing():
+ source = self._abssource(source)
+ self._ui.status(_('cloning subrepo %s from %s\n') %
+ (self._relpath, source))
+ self._gitnodir(['clone', source, self._abspath])
+ if self._githavelocally(revision):
+ return
+ self._ui.status(_('pulling subrepo %s from %s\n') %
+ (self._relpath, self._gitremote('origin')))
+ # try only origin: the originally cloned repo
+ self._gitcommand(['fetch'])
+ if not self._githavelocally(revision):
+ raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
+ (revision, self._relpath))
+
+ def dirty(self, ignoreupdate=False):
+ if self._gitmissing():
+ return self._state[1] != ''
+ if self._gitisbare():
+ return True
+ if not ignoreupdate and self._state[1] != self._gitstate():
+ # different version checked out
+ return True
+ # check for staged changes or modified files; ignore untracked files
+ self._gitupdatestat()
+ out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
+ return code == 1
+
+ def get(self, state, overwrite=False):
+ source, revision, kind = state
+ if not revision:
+ self.remove()
+ return
+ self._fetch(source, revision)
+ # if the repo was set to be bare, unbare it
+ if self._gitisbare():
+ self._gitcommand(['config', 'core.bare', 'false'])
+ if self._gitstate() == revision:
+ self._gitcommand(['reset', '--hard', 'HEAD'])
+ return
+ elif self._gitstate() == revision:
+ if overwrite:
+ # first reset the index to unmark new files for commit, because
+ # reset --hard will otherwise throw away files added for commit,
+ # not just unmark them.
+ self._gitcommand(['reset', 'HEAD'])
+ self._gitcommand(['reset', '--hard', 'HEAD'])
+ return
+ branch2rev, rev2branch = self._gitbranchmap()
+
+ def checkout(args):
+ cmd = ['checkout']
+ if overwrite:
+ # first reset the index to unmark new files for commit, because
+ # the -f option will otherwise throw away files added for
+ # commit, not just unmark them.
+ self._gitcommand(['reset', 'HEAD'])
+ cmd.append('-f')
+ self._gitcommand(cmd + args)
+
+ def rawcheckout():
+ # no branch to checkout, check it out with no branch
+ self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
+ self._relpath)
+ self._ui.warn(_('check out a git branch if you intend '
+ 'to make changes\n'))
+ checkout(['-q', revision])
+
+ if revision not in rev2branch:
+ rawcheckout()
+ return
+ branches = rev2branch[revision]
+ firstlocalbranch = None
+ for b in branches:
+ if b == 'refs/heads/master':
+ # master trumps all other branches
+ checkout(['refs/heads/master'])
+ return
+ if not firstlocalbranch and not b.startswith('refs/remotes/'):
+ firstlocalbranch = b
+ if firstlocalbranch:
+ checkout([firstlocalbranch])
+ return
+
+ tracking = self._gittracking(branch2rev.keys())
+ # choose a remote branch already tracked if possible
+ remote = branches[0]
+ if remote not in tracking:
+ for b in branches:
+ if b in tracking:
+ remote = b
+ break
+
+ if remote not in tracking:
+ # create a new local tracking branch
+ local = remote.split('/', 2)[2]
+ checkout(['-b', local, remote])
+ elif self._gitisancestor(branch2rev[tracking[remote]], remote):
+ # When updating to a tracked remote branch,
+ # if the local tracking branch is downstream of it,
+ # a normal `git pull` would have performed a "fast-forward merge"
+ # which is equivalent to updating the local branch to the remote.
+ # Since we are only looking at branching at update, we need to
+ # detect this situation and perform this action lazily.
+ if tracking[remote] != self._gitcurrentbranch():
+ checkout([tracking[remote]])
+ self._gitcommand(['merge', '--ff', remote])
+ else:
+ # a real merge would be required, just checkout the revision
+ rawcheckout()
+
+ def commit(self, text, user, date):
+ if self._gitmissing():
+ raise util.Abort(_("subrepo %s is missing") % self._relpath)
+ cmd = ['commit', '-a', '-m', text]
+ env = os.environ.copy()
+ if user:
+ cmd += ['--author', user]
+ if date:
+ # git's date parser silently ignores when seconds < 1e9
+ # convert to ISO8601
+ env['GIT_AUTHOR_DATE'] = util.datestr(date,
+ '%Y-%m-%dT%H:%M:%S %1%2')
+ self._gitcommand(cmd, env=env)
+ # make sure commit works otherwise HEAD might not exist under certain
+ # circumstances
+ return self._gitstate()
+
+ def merge(self, state):
+ source, revision, kind = state
+ self._fetch(source, revision)
+ base = self._gitcommand(['merge-base', revision, self._state[1]])
+ self._gitupdatestat()
+ out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
+
+ def mergefunc():
+ if base == revision:
+ self.get(state) # fast forward merge
+ elif base != self._state[1]:
+ self._gitcommand(['merge', '--no-commit', revision])
+
+ if self.dirty():
+ if self._gitstate() != revision:
+ dirty = self._gitstate() == self._state[1] or code != 0
+ if _updateprompt(self._ui, self, dirty,
+ self._state[1][:7], revision[:7]):
+ mergefunc()
+ else:
+ mergefunc()
+
+ def push(self, force):
+ if not self._state[1]:
+ return True
+ if self._gitmissing():
+ raise util.Abort(_("subrepo %s is missing") % self._relpath)
+ # if a branch in origin contains the revision, nothing to do
+ branch2rev, rev2branch = self._gitbranchmap()
+ if self._state[1] in rev2branch:
+ for b in rev2branch[self._state[1]]:
+ if b.startswith('refs/remotes/origin/'):
+ return True
+ for b, revision in branch2rev.iteritems():
+ if b.startswith('refs/remotes/origin/'):
+ if self._gitisancestor(self._state[1], revision):
+ return True
+ # otherwise, try to push the currently checked out branch
+ cmd = ['push']
+ if force:
+ cmd.append('--force')
+
+ current = self._gitcurrentbranch()
+ if current:
+ # determine if the current branch is even useful
+ if not self._gitisancestor(self._state[1], current):
+ self._ui.warn(_('unrelated git branch checked out '
+ 'in subrepo %s\n') % self._relpath)
+ return False
+ self._ui.status(_('pushing branch %s of subrepo %s\n') %
+ (current.split('/', 2)[2], self._relpath))
+ self._gitcommand(cmd + ['origin', current])
+ return True
+ else:
+ self._ui.warn(_('no branch checked out in subrepo %s\n'
+ 'cannot push revision %s') %
+ (self._relpath, self._state[1]))
+ return False
+
+ def remove(self):
+ if self._gitmissing():
+ return
+ if self.dirty():
+ self._ui.warn(_('not removing repo %s because '
+ 'it has changes.\n') % self._relpath)
+ return
+ # we can't fully delete the repository as it may contain
+ # local-only history
+ self._ui.note(_('removing subrepo %s\n') % self._relpath)
+ self._gitcommand(['config', 'core.bare', 'true'])
+ for f in os.listdir(self._abspath):
+ if f == '.git':
+ continue
+ path = os.path.join(self._abspath, f)
+ if os.path.isdir(path) and not os.path.islink(path):
+ shutil.rmtree(path)
+ else:
+ os.remove(path)
+
+ def archive(self, ui, archiver, prefix):
+ source, revision = self._state
+ if not revision:
+ return
+ self._fetch(source, revision)
+
+ # Parse git's native archive command.
+ # This should be much faster than manually traversing the trees
+ # and objects with many subprocess calls.
+ tarstream = self._gitcommand(['archive', revision], stream=True)
+ tar = tarfile.open(fileobj=tarstream, mode='r|')
+ relpath = subrelpath(self)
+ ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
+ for i, info in enumerate(tar):
+ if info.isdir():
+ continue
+ if info.issym():
+ data = info.linkname
+ else:
+ data = tar.extractfile(info).read()
+ archiver.addfile(os.path.join(prefix, self._path, info.name),
+ info.mode, info.issym(), data)
+ ui.progress(_('archiving (%s)') % relpath, i + 1,
+ unit=_('files'))
+ ui.progress(_('archiving (%s)') % relpath, None)
+
+
+ def status(self, rev2, **opts):
+ rev1 = self._state[1]
+ if self._gitmissing() or not rev1:
+ # if the repo is missing, return no results
+ return [], [], [], [], [], [], []
+ modified, added, removed = [], [], []
+ self._gitupdatestat()
+ if rev2:
+ command = ['diff-tree', rev1, rev2]
+ else:
+ command = ['diff-index', rev1]
+ out = self._gitcommand(command)
+ for line in out.split('\n'):
+ tab = line.find('\t')
+ if tab == -1:
+ continue
+ status, f = line[tab - 1], line[tab + 1:]
+ if status == 'M':
+ modified.append(f)
+ elif status == 'A':
+ added.append(f)
+ elif status == 'D':
+ removed.append(f)
+
+ deleted = unknown = ignored = clean = []
+ return modified, added, removed, deleted, unknown, ignored, clean
+
+types = {
+ 'hg': hgsubrepo,
+ 'svn': svnsubrepo,
+ 'git': gitsubrepo,
+ }
diff --git a/websdk/mercurial/tags.py b/websdk/mercurial/tags.py
index bda3a6e..0ef4192 120000..100644
--- a/websdk/mercurial/tags.py
+++ b/websdk/mercurial/tags.py
@@ -1 +1,292 @@
-/usr/share/pyshared/mercurial/tags.py \ No newline at end of file
+# tags.py - read tag info from local repository
+#
+# Copyright 2009 Matt Mackall <mpm@selenic.com>
+# Copyright 2009 Greg Ward <greg@gerg.ca>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+# Currently this module only deals with reading and caching tags.
+# Eventually, it could take care of updating (adding/removing/moving)
+# tags too.
+
+from node import nullid, bin, hex, short
+from i18n import _
+import encoding
+import error
+import errno
+
+def findglobaltags(ui, repo, alltags, tagtypes):
+ '''Find global tags in repo by reading .hgtags from every head that
+ has a distinct version of it, using a cache to avoid excess work.
+ Updates the dicts alltags, tagtypes in place: alltags maps tag name
+ to (node, hist) pair (see _readtags() below), and tagtypes maps tag
+ name to tag type ("global" in this case).'''
+ # This is so we can be lazy and assume alltags contains only global
+ # tags when we pass it to _writetagcache().
+ assert len(alltags) == len(tagtypes) == 0, \
+ "findglobaltags() should be called first"
+
+ (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
+ if cachetags is not None:
+ assert not shouldwrite
+ # XXX is this really 100% correct? are there oddball special
+ # cases where a global tag should outrank a local tag but won't,
+ # because cachetags does not contain rank info?
+ _updatetags(cachetags, 'global', alltags, tagtypes)
+ return
+
+ seen = set() # set of fnode
+ fctx = None
+ for head in reversed(heads): # oldest to newest
+ assert head in repo.changelog.nodemap, \
+ "tag cache returned bogus head %s" % short(head)
+
+ fnode = tagfnode.get(head)
+ if fnode and fnode not in seen:
+ seen.add(fnode)
+ if not fctx:
+ fctx = repo.filectx('.hgtags', fileid=fnode)
+ else:
+ fctx = fctx.filectx(fnode)
+
+ filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
+ _updatetags(filetags, 'global', alltags, tagtypes)
+
+ # and update the cache (if necessary)
+ if shouldwrite:
+ _writetagcache(ui, repo, heads, tagfnode, alltags)
+
+def readlocaltags(ui, repo, alltags, tagtypes):
+ '''Read local tags in repo. Update alltags and tagtypes.'''
+ try:
+ data = repo.opener.read("localtags")
+ except IOError, inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ return
+
+ # localtags is in the local encoding; re-encode to UTF-8 on
+ # input for consistency with the rest of this module.
+ filetags = _readtags(
+ ui, repo, data.splitlines(), "localtags",
+ recode=encoding.fromlocal)
+ _updatetags(filetags, "local", alltags, tagtypes)
+
+def _readtags(ui, repo, lines, fn, recode=None):
+ '''Read tag definitions from a file (or any source of lines).
+ Return a mapping from tag name to (node, hist): node is the node id
+ from the last line read for that name, and hist is the list of node
+ ids previously associated with it (in file order). All node ids are
+ binary, not hex.'''
+
+ filetags = {} # map tag name to (node, hist)
+ count = 0
+
+ def warn(msg):
+ ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
+
+ for line in lines:
+ count += 1
+ if not line:
+ continue
+ try:
+ (nodehex, name) = line.split(" ", 1)
+ except ValueError:
+ warn(_("cannot parse entry"))
+ continue
+ name = name.strip()
+ if recode:
+ name = recode(name)
+ try:
+ nodebin = bin(nodehex)
+ except TypeError:
+ warn(_("node '%s' is not well formed") % nodehex)
+ continue
+
+ # update filetags
+ hist = []
+ if name in filetags:
+ n, hist = filetags[name]
+ hist.append(n)
+ filetags[name] = (nodebin, hist)
+ return filetags
+
+def _updatetags(filetags, tagtype, alltags, tagtypes):
+ '''Incorporate the tag info read from one file into the two
+ dictionaries, alltags and tagtypes, that contain all tag
+ info (global across all heads plus local).'''
+
+ for name, nodehist in filetags.iteritems():
+ if name not in alltags:
+ alltags[name] = nodehist
+ tagtypes[name] = tagtype
+ continue
+
+ # we prefer alltags[name] if:
+ # it supercedes us OR
+ # mutual supercedes and it has a higher rank
+ # otherwise we win because we're tip-most
+ anode, ahist = nodehist
+ bnode, bhist = alltags[name]
+ if (bnode != anode and anode in bhist and
+ (bnode not in ahist or len(bhist) > len(ahist))):
+ anode = bnode
+ ahist.extend([n for n in bhist if n not in ahist])
+ alltags[name] = anode, ahist
+ tagtypes[name] = tagtype
+
+
+# The tag cache only stores info about heads, not the tag contents
+# from each head. I.e. it doesn't try to squeeze out the maximum
+# performance, but is simpler has a better chance of actually
+# working correctly. And this gives the biggest performance win: it
+# avoids looking up .hgtags in the manifest for every head, and it
+# can avoid calling heads() at all if there have been no changes to
+# the repo.
+
+def _readtagcache(ui, repo):
+ '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
+ shouldwrite). If the cache is completely up-to-date, cachetags is a
+ dict of the form returned by _readtags(); otherwise, it is None and
+ heads and fnodes are set. In that case, heads is the list of all
+ heads currently in the repository (ordered from tip to oldest) and
+ fnodes is a mapping from head to .hgtags filenode. If those two are
+ set, caller is responsible for reading tag info from each head.'''
+
+ try:
+ cachefile = repo.opener('cache/tags', 'r')
+ # force reading the file for static-http
+ cachelines = iter(cachefile)
+ except IOError:
+ cachefile = None
+
+ # The cache file consists of lines like
+ # <headrev> <headnode> [<tagnode>]
+ # where <headrev> and <headnode> redundantly identify a repository
+ # head from the time the cache was written, and <tagnode> is the
+ # filenode of .hgtags on that head. Heads with no .hgtags file will
+ # have no <tagnode>. The cache is ordered from tip to oldest (which
+ # is part of why <headrev> is there: a quick visual check is all
+ # that's required to ensure correct order).
+ #
+ # This information is enough to let us avoid the most expensive part
+ # of finding global tags, which is looking up <tagnode> in the
+ # manifest for each head.
+ cacherevs = [] # list of headrev
+ cacheheads = [] # list of headnode
+ cachefnode = {} # map headnode to filenode
+ if cachefile:
+ try:
+ for line in cachelines:
+ if line == "\n":
+ break
+ line = line.rstrip().split()
+ cacherevs.append(int(line[0]))
+ headnode = bin(line[1])
+ cacheheads.append(headnode)
+ if len(line) == 3:
+ fnode = bin(line[2])
+ cachefnode[headnode] = fnode
+ except Exception:
+ # corruption of the tags cache, just recompute it
+ ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
+ cacheheads = []
+ cacherevs = []
+ cachefnode = {}
+
+ tipnode = repo.changelog.tip()
+ tiprev = len(repo.changelog) - 1
+
+ # Case 1 (common): tip is the same, so nothing has changed.
+ # (Unchanged tip trivially means no changesets have been added.
+ # But, thanks to localrepository.destroyed(), it also means none
+ # have been destroyed by strip or rollback.)
+ if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
+ tags = _readtags(ui, repo, cachelines, cachefile.name)
+ cachefile.close()
+ return (None, None, tags, False)
+ if cachefile:
+ cachefile.close() # ignore rest of file
+
+ repoheads = repo.heads()
+ # Case 2 (uncommon): empty repo; get out quickly and don't bother
+ # writing an empty cache.
+ if repoheads == [nullid]:
+ return ([], {}, {}, False)
+
+ # Case 3 (uncommon): cache file missing or empty.
+
+ # Case 4 (uncommon): tip rev decreased. This should only happen
+ # when we're called from localrepository.destroyed(). Refresh the
+ # cache so future invocations will not see disappeared heads in the
+ # cache.
+
+ # Case 5 (common): tip has changed, so we've added/replaced heads.
+
+ # As it happens, the code to handle cases 3, 4, 5 is the same.
+
+ # N.B. in case 4 (nodes destroyed), "new head" really means "newly
+ # exposed".
+ newheads = [head
+ for head in repoheads
+ if head not in set(cacheheads)]
+
+ # Now we have to lookup the .hgtags filenode for every new head.
+ # This is the most expensive part of finding tags, so performance
+ # depends primarily on the size of newheads. Worst case: no cache
+ # file, so newheads == repoheads.
+ for head in newheads:
+ cctx = repo[head]
+ try:
+ fnode = cctx.filenode('.hgtags')
+ cachefnode[head] = fnode
+ except error.LookupError:
+ # no .hgtags file on this head
+ pass
+
+ # Caller has to iterate over all heads, but can use the filenodes in
+ # cachefnode to get to each .hgtags revision quickly.
+ return (repoheads, cachefnode, None, True)
+
+def _writetagcache(ui, repo, heads, tagfnode, cachetags):
+
+ try:
+ cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
+ except (OSError, IOError):
+ return
+
+ realheads = repo.heads() # for sanity checks below
+ for head in heads:
+ # temporary sanity checks; these can probably be removed
+ # once this code has been in crew for a few weeks
+ assert head in repo.changelog.nodemap, \
+ 'trying to write non-existent node %s to tag cache' % short(head)
+ assert head in realheads, \
+ 'trying to write non-head %s to tag cache' % short(head)
+ assert head != nullid, \
+ 'trying to write nullid to tag cache'
+
+ # This can't fail because of the first assert above. When/if we
+ # remove that assert, we might want to catch LookupError here
+ # and downgrade it to a warning.
+ rev = repo.changelog.rev(head)
+
+ fnode = tagfnode.get(head)
+ if fnode:
+ cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
+ else:
+ cachefile.write('%d %s\n' % (rev, hex(head)))
+
+ # Tag names in the cache are in UTF-8 -- which is the whole reason
+ # we keep them in UTF-8 throughout this module. If we converted
+ # them local encoding on input, we would lose info writing them to
+ # the cache.
+ cachefile.write('\n')
+ for (name, (node, hist)) in cachetags.iteritems():
+ cachefile.write("%s %s\n" % (hex(node), name))
+
+ try:
+ cachefile.close()
+ except (OSError, IOError):
+ pass
diff --git a/websdk/mercurial/templatefilters.py b/websdk/mercurial/templatefilters.py
index 4b8749b..b9c1131 120000..100644
--- a/websdk/mercurial/templatefilters.py
+++ b/websdk/mercurial/templatefilters.py
@@ -1 +1,372 @@
-/usr/share/pyshared/mercurial/templatefilters.py \ No newline at end of file
+# template-filters.py - common template expansion filters
+#
+# Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import cgi, re, os, time, urllib
+import encoding, node, util
+import hbisect
+
+def addbreaks(text):
+ """:addbreaks: Any text. Add an XHTML "<br />" tag before the end of
+ every line except the last.
+ """
+ return text.replace('\n', '<br/>\n')
+
+agescales = [("year", 3600 * 24 * 365),
+ ("month", 3600 * 24 * 30),
+ ("week", 3600 * 24 * 7),
+ ("day", 3600 * 24),
+ ("hour", 3600),
+ ("minute", 60),
+ ("second", 1)]
+
+def age(date):
+ """:age: Date. Returns a human-readable date/time difference between the
+ given date/time and the current date/time.
+ """
+
+ def plural(t, c):
+ if c == 1:
+ return t
+ return t + "s"
+ def fmt(t, c):
+ return "%d %s" % (c, plural(t, c))
+
+ now = time.time()
+ then = date[0]
+ future = False
+ if then > now:
+ future = True
+ delta = max(1, int(then - now))
+ if delta > agescales[0][1] * 30:
+ return 'in the distant future'
+ else:
+ delta = max(1, int(now - then))
+ if delta > agescales[0][1] * 2:
+ return util.shortdate(date)
+
+ for t, s in agescales:
+ n = delta // s
+ if n >= 2 or s == 1:
+ if future:
+ return '%s from now' % fmt(t, n)
+ return '%s ago' % fmt(t, n)
+
+def basename(path):
+ """:basename: Any text. Treats the text as a path, and returns the last
+ component of the path after splitting by the path separator
+ (ignoring trailing separators). For example, "foo/bar/baz" becomes
+ "baz" and "foo/bar//" becomes "bar".
+ """
+ return os.path.basename(path)
+
+def datefilter(text):
+ """:date: Date. Returns a date in a Unix date format, including the
+ timezone: "Mon Sep 04 15:13:13 2006 0700".
+ """
+ return util.datestr(text)
+
+def domain(author):
+ """:domain: Any text. Finds the first string that looks like an email
+ address, and extracts just the domain component. Example: ``User
+ <user@example.com>`` becomes ``example.com``.
+ """
+ f = author.find('@')
+ if f == -1:
+ return ''
+ author = author[f + 1:]
+ f = author.find('>')
+ if f >= 0:
+ author = author[:f]
+ return author
+
+def email(text):
+ """:email: Any text. Extracts the first string that looks like an email
+ address. Example: ``User <user@example.com>`` becomes
+ ``user@example.com``.
+ """
+ return util.email(text)
+
+def escape(text):
+ """:escape: Any text. Replaces the special XML/XHTML characters "&", "<"
+ and ">" with XML entities.
+ """
+ return cgi.escape(text, True)
+
+para_re = None
+space_re = None
+
+def fill(text, width):
+ '''fill many paragraphs.'''
+ global para_re, space_re
+ if para_re is None:
+ para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
+ space_re = re.compile(r' +')
+
+ def findparas():
+ start = 0
+ while True:
+ m = para_re.search(text, start)
+ if not m:
+ uctext = unicode(text[start:], encoding.encoding)
+ w = len(uctext)
+ while 0 < w and uctext[w - 1].isspace():
+ w -= 1
+ yield (uctext[:w].encode(encoding.encoding),
+ uctext[w:].encode(encoding.encoding))
+ break
+ yield text[start:m.start(0)], m.group(1)
+ start = m.end(1)
+
+ return "".join([space_re.sub(' ', util.wrap(para, width=width)) + rest
+ for para, rest in findparas()])
+
+def fill68(text):
+ """:fill68: Any text. Wraps the text to fit in 68 columns."""
+ return fill(text, 68)
+
+def fill76(text):
+ """:fill76: Any text. Wraps the text to fit in 76 columns."""
+ return fill(text, 76)
+
+def firstline(text):
+ """:firstline: Any text. Returns the first line of text."""
+ try:
+ return text.splitlines(True)[0].rstrip('\r\n')
+ except IndexError:
+ return ''
+
+def hexfilter(text):
+ """:hex: Any text. Convert a binary Mercurial node identifier into
+ its long hexadecimal representation.
+ """
+ return node.hex(text)
+
+def hgdate(text):
+ """:hgdate: Date. Returns the date as a pair of numbers: "1157407993
+ 25200" (Unix timestamp, timezone offset).
+ """
+ return "%d %d" % text
+
+def isodate(text):
+ """:isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00
+ +0200".
+ """
+ return util.datestr(text, '%Y-%m-%d %H:%M %1%2')
+
+def isodatesec(text):
+ """:isodatesec: Date. Returns the date in ISO 8601 format, including
+ seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date
+ filter.
+ """
+ return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2')
+
+def indent(text, prefix):
+ '''indent each non-empty line of text after first with prefix.'''
+ lines = text.splitlines()
+ num_lines = len(lines)
+ endswithnewline = text[-1:] == '\n'
+ def indenter():
+ for i in xrange(num_lines):
+ l = lines[i]
+ if i and l.strip():
+ yield prefix
+ yield l
+ if i < num_lines - 1 or endswithnewline:
+ yield '\n'
+ return "".join(indenter())
+
+def json(obj):
+ if obj is None or obj is False or obj is True:
+ return {None: 'null', False: 'false', True: 'true'}[obj]
+ elif isinstance(obj, int) or isinstance(obj, float):
+ return str(obj)
+ elif isinstance(obj, str):
+ u = unicode(obj, encoding.encoding, 'replace')
+ return '"%s"' % jsonescape(u)
+ elif isinstance(obj, unicode):
+ return '"%s"' % jsonescape(obj)
+ elif util.safehasattr(obj, 'keys'):
+ out = []
+ for k, v in obj.iteritems():
+ s = '%s: %s' % (json(k), json(v))
+ out.append(s)
+ return '{' + ', '.join(out) + '}'
+ elif util.safehasattr(obj, '__iter__'):
+ out = []
+ for i in obj:
+ out.append(json(i))
+ return '[' + ', '.join(out) + ']'
+ else:
+ raise TypeError('cannot encode type %s' % obj.__class__.__name__)
+
+def _uescape(c):
+ if ord(c) < 0x80:
+ return c
+ else:
+ return '\\u%04x' % ord(c)
+
+_escapes = [
+ ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'),
+ ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'),
+]
+
+def jsonescape(s):
+ for k, v in _escapes:
+ s = s.replace(k, v)
+ return ''.join(_uescape(c) for c in s)
+
+def localdate(text):
+ """:localdate: Date. Converts a date to local date."""
+ return (text[0], util.makedate()[1])
+
+def nonempty(str):
+ """:nonempty: Any text. Returns '(none)' if the string is empty."""
+ return str or "(none)"
+
+def obfuscate(text):
+ """:obfuscate: Any text. Returns the input text rendered as a sequence of
+ XML entities.
+ """
+ text = unicode(text, encoding.encoding, 'replace')
+ return ''.join(['&#%d;' % ord(c) for c in text])
+
+def permissions(flags):
+ if "l" in flags:
+ return "lrwxrwxrwx"
+ if "x" in flags:
+ return "-rwxr-xr-x"
+ return "-rw-r--r--"
+
+def person(author):
+ """:person: Any text. Returns the text before an email address."""
+ if not '@' in author:
+ return author
+ f = author.find('<')
+ if f != -1:
+ return author[:f].rstrip()
+ f = author.find('@')
+ return author[:f].replace('.', ' ')
+
+def rfc3339date(text):
+ """:rfc3339date: Date. Returns a date using the Internet date format
+ specified in RFC 3339: "2009-08-18T13:00:13+02:00".
+ """
+ return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2")
+
+def rfc822date(text):
+ """:rfc822date: Date. Returns a date using the same format used in email
+ headers: "Tue, 18 Aug 2009 13:00:13 +0200".
+ """
+ return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2")
+
+def short(text):
+ """:short: Changeset hash. Returns the short form of a changeset hash,
+ i.e. a 12 hexadecimal digit string.
+ """
+ return text[:12]
+
+def shortbisect(text):
+ """:shortbisect: Any text. Treats `text` as a bisection status, and
+ returns a single-character representing the status (G: good, B: bad,
+ S: skipped, U: untested, I: ignored). Returns single space if `text`
+ is not a valid bisection status.
+ """
+ return hbisect.shortlabel(text) or ' '
+
+def shortdate(text):
+ """:shortdate: Date. Returns a date like "2006-09-18"."""
+ return util.shortdate(text)
+
+def stringescape(text):
+ return text.encode('string_escape')
+
+def stringify(thing):
+ """:stringify: Any type. Turns the value into text by converting values into
+ text and concatenating them.
+ """
+ if util.safehasattr(thing, '__iter__') and not isinstance(thing, str):
+ return "".join([stringify(t) for t in thing if t is not None])
+ return str(thing)
+
+def strip(text):
+ """:strip: Any text. Strips all leading and trailing whitespace."""
+ return text.strip()
+
+def stripdir(text):
+ """:stripdir: Treat the text as path and strip a directory level, if
+ possible. For example, "foo" and "foo/bar" becomes "foo".
+ """
+ dir = os.path.dirname(text)
+ if dir == "":
+ return os.path.basename(text)
+ else:
+ return dir
+
+def tabindent(text):
+ """:tabindent: Any text. Returns the text, with every line except the
+ first starting with a tab character.
+ """
+ return indent(text, '\t')
+
+def urlescape(text):
+ """:urlescape: Any text. Escapes all "special" characters. For example,
+ "foo bar" becomes "foo%20bar".
+ """
+ return urllib.quote(text)
+
+def userfilter(text):
+ """:user: Any text. Returns the user portion of an email address."""
+ return util.shortuser(text)
+
+def xmlescape(text):
+ text = (text
+ .replace('&', '&amp;')
+ .replace('<', '&lt;')
+ .replace('>', '&gt;')
+ .replace('"', '&quot;')
+ .replace("'", '&#39;')) # &apos; invalid in HTML
+ return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text)
+
+filters = {
+ "addbreaks": addbreaks,
+ "age": age,
+ "basename": basename,
+ "date": datefilter,
+ "domain": domain,
+ "email": email,
+ "escape": escape,
+ "fill68": fill68,
+ "fill76": fill76,
+ "firstline": firstline,
+ "hex": hexfilter,
+ "hgdate": hgdate,
+ "isodate": isodate,
+ "isodatesec": isodatesec,
+ "json": json,
+ "jsonescape": jsonescape,
+ "localdate": localdate,
+ "nonempty": nonempty,
+ "obfuscate": obfuscate,
+ "permissions": permissions,
+ "person": person,
+ "rfc3339date": rfc3339date,
+ "rfc822date": rfc822date,
+ "short": short,
+ "shortbisect": shortbisect,
+ "shortdate": shortdate,
+ "stringescape": stringescape,
+ "stringify": stringify,
+ "strip": strip,
+ "stripdir": stripdir,
+ "tabindent": tabindent,
+ "urlescape": urlescape,
+ "user": userfilter,
+ "xmlescape": xmlescape,
+}
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = filters.values()
diff --git a/websdk/mercurial/templatekw.py b/websdk/mercurial/templatekw.py
index eb2353e..a236d87 120000..100644
--- a/websdk/mercurial/templatekw.py
+++ b/websdk/mercurial/templatekw.py
@@ -1 +1,320 @@
-/usr/share/pyshared/mercurial/templatekw.py \ No newline at end of file
+# templatekw.py - common changeset template keywords
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import hex
+import patch, util, error
+import hbisect
+
+def showlist(name, values, plural=None, **args):
+ '''expand set of values.
+ name is name of key in template map.
+ values is list of strings or dicts.
+ plural is plural of name, if not simply name + 's'.
+
+ expansion works like this, given name 'foo'.
+
+ if values is empty, expand 'no_foos'.
+
+ if 'foo' not in template map, return values as a string,
+ joined by space.
+
+ expand 'start_foos'.
+
+ for each value, expand 'foo'. if 'last_foo' in template
+ map, expand it instead of 'foo' for last key.
+
+ expand 'end_foos'.
+ '''
+ templ = args['templ']
+ if plural:
+ names = plural
+ else: names = name + 's'
+ if not values:
+ noname = 'no_' + names
+ if noname in templ:
+ yield templ(noname, **args)
+ return
+ if name not in templ:
+ if isinstance(values[0], str):
+ yield ' '.join(values)
+ else:
+ for v in values:
+ yield dict(v, **args)
+ return
+ startname = 'start_' + names
+ if startname in templ:
+ yield templ(startname, **args)
+ vargs = args.copy()
+ def one(v, tag=name):
+ try:
+ vargs.update(v)
+ except (AttributeError, ValueError):
+ try:
+ for a, b in v:
+ vargs[a] = b
+ except ValueError:
+ vargs[name] = v
+ return templ(tag, **vargs)
+ lastname = 'last_' + name
+ if lastname in templ:
+ last = values.pop()
+ else:
+ last = None
+ for v in values:
+ yield one(v)
+ if last is not None:
+ yield one(last, tag=lastname)
+ endname = 'end_' + names
+ if endname in templ:
+ yield templ(endname, **args)
+
+def getfiles(repo, ctx, revcache):
+ if 'files' not in revcache:
+ revcache['files'] = repo.status(ctx.p1().node(), ctx.node())[:3]
+ return revcache['files']
+
+def getlatesttags(repo, ctx, cache):
+ '''return date, distance and name for the latest tag of rev'''
+
+ if 'latesttags' not in cache:
+ # Cache mapping from rev to a tuple with tag date, tag
+ # distance and tag name
+ cache['latesttags'] = {-1: (0, 0, 'null')}
+ latesttags = cache['latesttags']
+
+ rev = ctx.rev()
+ todo = [rev]
+ while todo:
+ rev = todo.pop()
+ if rev in latesttags:
+ continue
+ ctx = repo[rev]
+ tags = [t for t in ctx.tags() if repo.tagtype(t) == 'global']
+ if tags:
+ latesttags[rev] = ctx.date()[0], 0, ':'.join(sorted(tags))
+ continue
+ try:
+ # The tuples are laid out so the right one can be found by
+ # comparison.
+ pdate, pdist, ptag = max(
+ latesttags[p.rev()] for p in ctx.parents())
+ except KeyError:
+ # Cache miss - recurse
+ todo.append(rev)
+ todo.extend(p.rev() for p in ctx.parents())
+ continue
+ latesttags[rev] = pdate, pdist + 1, ptag
+ return latesttags[rev]
+
+def getrenamedfn(repo, endrev=None):
+ rcache = {}
+ if endrev is None:
+ endrev = len(repo)
+
+ def getrenamed(fn, rev):
+ '''looks up all renames for a file (up to endrev) the first
+ time the file is given. It indexes on the changerev and only
+ parses the manifest if linkrev != changerev.
+ Returns rename info for fn at changerev rev.'''
+ if fn not in rcache:
+ rcache[fn] = {}
+ fl = repo.file(fn)
+ for i in fl:
+ lr = fl.linkrev(i)
+ renamed = fl.renamed(fl.node(i))
+ rcache[fn][lr] = renamed
+ if lr >= endrev:
+ break
+ if rev in rcache[fn]:
+ return rcache[fn][rev]
+
+ # If linkrev != rev (i.e. rev not found in rcache) fallback to
+ # filectx logic.
+ try:
+ return repo[rev][fn].renamed()
+ except error.LookupError:
+ return None
+
+ return getrenamed
+
+
+def showauthor(repo, ctx, templ, **args):
+ """:author: String. The unmodified author of the changeset."""
+ return ctx.user()
+
+def showbisect(repo, ctx, templ, **args):
+ """:bisect: String. The changeset bisection status."""
+ return hbisect.label(repo, ctx.node())
+
+def showbranch(**args):
+ """:branch: String. The name of the branch on which the changeset was
+ committed.
+ """
+ return args['ctx'].branch()
+
+def showbranches(**args):
+ """:branches: List of strings. The name of the branch on which the
+ changeset was committed. Will be empty if the branch name was
+ default.
+ """
+ branch = args['ctx'].branch()
+ if branch != 'default':
+ return showlist('branch', [branch], plural='branches', **args)
+
+def showbookmarks(**args):
+ """:bookmarks: List of strings. Any bookmarks associated with the
+ changeset.
+ """
+ bookmarks = args['ctx'].bookmarks()
+ return showlist('bookmark', bookmarks, **args)
+
+def showchildren(**args):
+ """:children: List of strings. The children of the changeset."""
+ ctx = args['ctx']
+ childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()]
+ return showlist('children', childrevs, **args)
+
+def showdate(repo, ctx, templ, **args):
+ """:date: Date information. The date when the changeset was committed."""
+ return ctx.date()
+
+def showdescription(repo, ctx, templ, **args):
+ """:desc: String. The text of the changeset description."""
+ return ctx.description().strip()
+
+def showdiffstat(repo, ctx, templ, **args):
+ """:diffstat: String. Statistics of changes with the following format:
+ "modified files: +added/-removed lines"
+ """
+ stats = patch.diffstatdata(util.iterlines(ctx.diff()))
+ maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats)
+ return '%s: +%s/-%s' % (len(stats), adds, removes)
+
+def showextras(**args):
+ templ = args['templ']
+ for key, value in sorted(args['ctx'].extra().items()):
+ args = args.copy()
+ args.update(dict(key=key, value=value))
+ yield templ('extra', **args)
+
+def showfileadds(**args):
+ """:file_adds: List of strings. Files added by this changeset."""
+ repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
+ return showlist('file_add', getfiles(repo, ctx, revcache)[1], **args)
+
+def showfilecopies(**args):
+ """:file_copies: List of strings. Files copied in this changeset with
+ their sources.
+ """
+ cache, ctx = args['cache'], args['ctx']
+ copies = args['revcache'].get('copies')
+ if copies is None:
+ if 'getrenamed' not in cache:
+ cache['getrenamed'] = getrenamedfn(args['repo'])
+ copies = []
+ getrenamed = cache['getrenamed']
+ for fn in ctx.files():
+ rename = getrenamed(fn, ctx.rev())
+ if rename:
+ copies.append((fn, rename[0]))
+
+ c = [{'name': x[0], 'source': x[1]} for x in copies]
+ return showlist('file_copy', c, plural='file_copies', **args)
+
+# showfilecopiesswitch() displays file copies only if copy records are
+# provided before calling the templater, usually with a --copies
+# command line switch.
+def showfilecopiesswitch(**args):
+ """:file_copies_switch: List of strings. Like "file_copies" but displayed
+ only if the --copied switch is set.
+ """
+ copies = args['revcache'].get('copies') or []
+ c = [{'name': x[0], 'source': x[1]} for x in copies]
+ return showlist('file_copy', c, plural='file_copies', **args)
+
+def showfiledels(**args):
+ """:file_dels: List of strings. Files removed by this changeset."""
+ repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
+ return showlist('file_del', getfiles(repo, ctx, revcache)[2], **args)
+
+def showfilemods(**args):
+ """:file_mods: List of strings. Files modified by this changeset."""
+ repo, ctx, revcache = args['repo'], args['ctx'], args['revcache']
+ return showlist('file_mod', getfiles(repo, ctx, revcache)[0], **args)
+
+def showfiles(**args):
+ """:files: List of strings. All files modified, added, or removed by this
+ changeset.
+ """
+ return showlist('file', args['ctx'].files(), **args)
+
+def showlatesttag(repo, ctx, templ, cache, **args):
+ """:latesttag: String. Most recent global tag in the ancestors of this
+ changeset.
+ """
+ return getlatesttags(repo, ctx, cache)[2]
+
+def showlatesttagdistance(repo, ctx, templ, cache, **args):
+ """:latesttagdistance: Integer. Longest path to the latest tag."""
+ return getlatesttags(repo, ctx, cache)[1]
+
+def showmanifest(**args):
+ repo, ctx, templ = args['repo'], args['ctx'], args['templ']
+ args = args.copy()
+ args.update(dict(rev=repo.manifest.rev(ctx.changeset()[0]),
+ node=hex(ctx.changeset()[0])))
+ return templ('manifest', **args)
+
+def shownode(repo, ctx, templ, **args):
+ """:node: String. The changeset identification hash, as a 40 hexadecimal
+ digit string.
+ """
+ return ctx.hex()
+
+def showrev(repo, ctx, templ, **args):
+ """:rev: Integer. The repository-local changeset revision number."""
+ return ctx.rev()
+
+def showtags(**args):
+ """:tags: List of strings. Any tags associated with the changeset."""
+ return showlist('tag', args['ctx'].tags(), **args)
+
+# keywords are callables like:
+# fn(repo, ctx, templ, cache, revcache, **args)
+# with:
+# repo - current repository instance
+# ctx - the changectx being displayed
+# templ - the templater instance
+# cache - a cache dictionary for the whole templater run
+# revcache - a cache dictionary for the current revision
+keywords = {
+ 'author': showauthor,
+ 'bisect': showbisect,
+ 'branch': showbranch,
+ 'branches': showbranches,
+ 'bookmarks': showbookmarks,
+ 'children': showchildren,
+ 'date': showdate,
+ 'desc': showdescription,
+ 'diffstat': showdiffstat,
+ 'extras': showextras,
+ 'file_adds': showfileadds,
+ 'file_copies': showfilecopies,
+ 'file_copies_switch': showfilecopiesswitch,
+ 'file_dels': showfiledels,
+ 'file_mods': showfilemods,
+ 'files': showfiles,
+ 'latesttag': showlatesttag,
+ 'latesttagdistance': showlatesttagdistance,
+ 'manifest': showmanifest,
+ 'node': shownode,
+ 'rev': showrev,
+ 'tags': showtags,
+}
+
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = keywords.values()
diff --git a/websdk/mercurial/templater.py b/websdk/mercurial/templater.py
index 7b17be5..a35a3e2 120000..100644
--- a/websdk/mercurial/templater.py
+++ b/websdk/mercurial/templater.py
@@ -1 +1,392 @@
-/usr/share/pyshared/mercurial/templater.py \ No newline at end of file
+# templater.py - template expansion for output
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import sys, os
+import util, config, templatefilters, parser, error
+
+# template parsing
+
+elements = {
+ "(": (20, ("group", 1, ")"), ("func", 1, ")")),
+ ",": (2, None, ("list", 2)),
+ "|": (5, None, ("|", 5)),
+ "%": (6, None, ("%", 6)),
+ ")": (0, None, None),
+ "symbol": (0, ("symbol",), None),
+ "string": (0, ("string",), None),
+ "end": (0, None, None),
+}
+
+def tokenizer(data):
+ program, start, end = data
+ pos = start
+ while pos < end:
+ c = program[pos]
+ if c.isspace(): # skip inter-token whitespace
+ pass
+ elif c in "(,)%|": # handle simple operators
+ yield (c, None, pos)
+ elif (c in '"\'' or c == 'r' and
+ program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
+ if c == 'r':
+ pos += 1
+ c = program[pos]
+ decode = lambda x: x
+ else:
+ decode = lambda x: x.decode('string-escape')
+ pos += 1
+ s = pos
+ while pos < end: # find closing quote
+ d = program[pos]
+ if d == '\\': # skip over escaped characters
+ pos += 2
+ continue
+ if d == c:
+ yield ('string', decode(program[s:pos]), s)
+ break
+ pos += 1
+ else:
+ raise error.ParseError(_("unterminated string"), s)
+ elif c.isalnum() or c in '_':
+ s = pos
+ pos += 1
+ while pos < end: # find end of symbol
+ d = program[pos]
+ if not (d.isalnum() or d == "_"):
+ break
+ pos += 1
+ sym = program[s:pos]
+ yield ('symbol', sym, s)
+ pos -= 1
+ elif c == '}':
+ pos += 1
+ break
+ else:
+ raise error.ParseError(_("syntax error"), pos)
+ pos += 1
+ yield ('end', None, pos)
+
+def compiletemplate(tmpl, context):
+ parsed = []
+ pos, stop = 0, len(tmpl)
+ p = parser.parser(tokenizer, elements)
+
+ while pos < stop:
+ n = tmpl.find('{', pos)
+ if n < 0:
+ parsed.append(("string", tmpl[pos:]))
+ break
+ if n > 0 and tmpl[n - 1] == '\\':
+ # escaped
+ parsed.append(("string", tmpl[pos:n - 1] + "{"))
+ pos = n + 1
+ continue
+ if n > pos:
+ parsed.append(("string", tmpl[pos:n]))
+
+ pd = [tmpl, n + 1, stop]
+ parseres, pos = p.parse(pd)
+ parsed.append(parseres)
+
+ return [compileexp(e, context) for e in parsed]
+
+def compileexp(exp, context):
+ t = exp[0]
+ if t in methods:
+ return methods[t](exp, context)
+ raise error.ParseError(_("unknown method '%s'") % t)
+
+# template evaluation
+
+def getsymbol(exp):
+ if exp[0] == 'symbol':
+ return exp[1]
+ raise error.ParseError(_("expected a symbol"))
+
+def getlist(x):
+ if not x:
+ return []
+ if x[0] == 'list':
+ return getlist(x[1]) + [x[2]]
+ return [x]
+
+def getfilter(exp, context):
+ f = getsymbol(exp)
+ if f not in context._filters:
+ raise error.ParseError(_("unknown function '%s'") % f)
+ return context._filters[f]
+
+def gettemplate(exp, context):
+ if exp[0] == 'string':
+ return compiletemplate(exp[1], context)
+ if exp[0] == 'symbol':
+ return context._load(exp[1])
+ raise error.ParseError(_("expected template specifier"))
+
+def runstring(context, mapping, data):
+ return data
+
+def runsymbol(context, mapping, key):
+ v = mapping.get(key)
+ if v is None:
+ v = context._defaults.get(key, '')
+ if util.safehasattr(v, '__call__'):
+ return v(**mapping)
+ return v
+
+def buildfilter(exp, context):
+ func, data = compileexp(exp[1], context)
+ filt = getfilter(exp[2], context)
+ return (runfilter, (func, data, filt))
+
+def runfilter(context, mapping, data):
+ func, data, filt = data
+ return filt(func(context, mapping, data))
+
+def buildmap(exp, context):
+ func, data = compileexp(exp[1], context)
+ ctmpl = gettemplate(exp[2], context)
+ return (runmap, (func, data, ctmpl))
+
+def runmap(context, mapping, data):
+ func, data, ctmpl = data
+ d = func(context, mapping, data)
+ lm = mapping.copy()
+
+ for i in d:
+ if isinstance(i, dict):
+ lm.update(i)
+ for f, d in ctmpl:
+ yield f(context, lm, d)
+ else:
+ # v is not an iterable of dicts, this happen when 'key'
+ # has been fully expanded already and format is useless.
+ # If so, return the expanded value.
+ yield i
+
+def buildfunc(exp, context):
+ n = getsymbol(exp[1])
+ args = [compileexp(x, context) for x in getlist(exp[2])]
+ if n in funcs:
+ f = funcs[n]
+ return (f, args)
+ if n in context._filters:
+ if len(args) != 1:
+ raise error.ParseError(_("filter %s expects one argument") % n)
+ f = context._filters[n]
+ return (runfilter, (args[0][0], args[0][1], f))
+
+methods = {
+ "string": lambda e, c: (runstring, e[1]),
+ "symbol": lambda e, c: (runsymbol, e[1]),
+ "group": lambda e, c: compileexp(e[1], c),
+# ".": buildmember,
+ "|": buildfilter,
+ "%": buildmap,
+ "func": buildfunc,
+ }
+
+funcs = {
+}
+
+# template engine
+
+path = ['templates', '../templates']
+stringify = templatefilters.stringify
+
+def _flatten(thing):
+ '''yield a single stream from a possibly nested set of iterators'''
+ if isinstance(thing, str):
+ yield thing
+ elif not util.safehasattr(thing, '__iter__'):
+ if thing is not None:
+ yield str(thing)
+ else:
+ for i in thing:
+ if isinstance(i, str):
+ yield i
+ elif not util.safehasattr(i, '__iter__'):
+ if i is not None:
+ yield str(i)
+ elif i is not None:
+ for j in _flatten(i):
+ yield j
+
+def parsestring(s, quoted=True):
+ '''parse a string using simple c-like syntax.
+ string must be in quotes if quoted is True.'''
+ if quoted:
+ if len(s) < 2 or s[0] != s[-1]:
+ raise SyntaxError(_('unmatched quotes'))
+ return s[1:-1].decode('string_escape')
+
+ return s.decode('string_escape')
+
+class engine(object):
+ '''template expansion engine.
+
+ template expansion works like this. a map file contains key=value
+ pairs. if value is quoted, it is treated as string. otherwise, it
+ is treated as name of template file.
+
+ templater is asked to expand a key in map. it looks up key, and
+ looks for strings like this: {foo}. it expands {foo} by looking up
+ foo in map, and substituting it. expansion is recursive: it stops
+ when there is no more {foo} to replace.
+
+ expansion also allows formatting and filtering.
+
+ format uses key to expand each item in list. syntax is
+ {key%format}.
+
+ filter uses function to transform value. syntax is
+ {key|filter1|filter2|...}.'''
+
+ def __init__(self, loader, filters={}, defaults={}):
+ self._loader = loader
+ self._filters = filters
+ self._defaults = defaults
+ self._cache = {}
+
+ def _load(self, t):
+ '''load, parse, and cache a template'''
+ if t not in self._cache:
+ self._cache[t] = compiletemplate(self._loader(t), self)
+ return self._cache[t]
+
+ def process(self, t, mapping):
+ '''Perform expansion. t is name of map element to expand.
+ mapping contains added elements for use during expansion. Is a
+ generator.'''
+ return _flatten(func(self, mapping, data) for func, data in
+ self._load(t))
+
+engines = {'default': engine}
+
+class templater(object):
+
+ def __init__(self, mapfile, filters={}, defaults={}, cache={},
+ minchunk=1024, maxchunk=65536):
+ '''set up template engine.
+ mapfile is name of file to read map definitions from.
+ filters is dict of functions. each transforms a value into another.
+ defaults is dict of default map definitions.'''
+ self.mapfile = mapfile or 'template'
+ self.cache = cache.copy()
+ self.map = {}
+ self.base = (mapfile and os.path.dirname(mapfile)) or ''
+ self.filters = templatefilters.filters.copy()
+ self.filters.update(filters)
+ self.defaults = defaults
+ self.minchunk, self.maxchunk = minchunk, maxchunk
+ self.ecache = {}
+
+ if not mapfile:
+ return
+ if not os.path.exists(mapfile):
+ raise util.Abort(_('style not found: %s') % mapfile)
+
+ conf = config.config()
+ conf.read(mapfile)
+
+ for key, val in conf[''].items():
+ if val[0] in "'\"":
+ try:
+ self.cache[key] = parsestring(val)
+ except SyntaxError, inst:
+ raise SyntaxError('%s: %s' %
+ (conf.source('', key), inst.args[0]))
+ else:
+ val = 'default', val
+ if ':' in val[1]:
+ val = val[1].split(':', 1)
+ self.map[key] = val[0], os.path.join(self.base, val[1])
+
+ def __contains__(self, key):
+ return key in self.cache or key in self.map
+
+ def load(self, t):
+ '''Get the template for the given template name. Use a local cache.'''
+ if not t in self.cache:
+ try:
+ self.cache[t] = util.readfile(self.map[t][1])
+ except KeyError, inst:
+ raise util.Abort(_('"%s" not in template map') % inst.args[0])
+ except IOError, inst:
+ raise IOError(inst.args[0], _('template file %s: %s') %
+ (self.map[t][1], inst.args[1]))
+ return self.cache[t]
+
+ def __call__(self, t, **mapping):
+ ttype = t in self.map and self.map[t][0] or 'default'
+ if ttype not in self.ecache:
+ self.ecache[ttype] = engines[ttype](self.load,
+ self.filters, self.defaults)
+ proc = self.ecache[ttype]
+
+ stream = proc.process(t, mapping)
+ if self.minchunk:
+ stream = util.increasingchunks(stream, min=self.minchunk,
+ max=self.maxchunk)
+ return stream
+
+def templatepath(name=None):
+ '''return location of template file or directory (if no name).
+ returns None if not found.'''
+ normpaths = []
+
+ # executable version (py2exe) doesn't support __file__
+ if util.mainfrozen():
+ module = sys.executable
+ else:
+ module = __file__
+ for f in path:
+ if f.startswith('/'):
+ p = f
+ else:
+ fl = f.split('/')
+ p = os.path.join(os.path.dirname(module), *fl)
+ if name:
+ p = os.path.join(p, name)
+ if name and os.path.exists(p):
+ return os.path.normpath(p)
+ elif os.path.isdir(p):
+ normpaths.append(os.path.normpath(p))
+
+ return normpaths
+
+def stylemap(styles, paths=None):
+ """Return path to mapfile for a given style.
+
+ Searches mapfile in the following locations:
+ 1. templatepath/style/map
+ 2. templatepath/map-style
+ 3. templatepath/map
+ """
+
+ if paths is None:
+ paths = templatepath()
+ elif isinstance(paths, str):
+ paths = [paths]
+
+ if isinstance(styles, str):
+ styles = [styles]
+
+ for style in styles:
+ if not style:
+ continue
+ locations = [os.path.join(style, 'map'), 'map-' + style]
+ locations.append('map')
+
+ for path in paths:
+ for location in locations:
+ mapfile = os.path.join(path, location)
+ if os.path.isfile(mapfile):
+ return style, mapfile
+
+ raise RuntimeError("No hgweb templates found in %r" % paths)
diff --git a/websdk/mercurial/templates/atom/bookmarkentry.tmpl b/websdk/mercurial/templates/atom/bookmarkentry.tmpl
new file mode 100644
index 0000000..3edef27
--- /dev/null
+++ b/websdk/mercurial/templates/atom/bookmarkentry.tmpl
@@ -0,0 +1,8 @@
+ <entry>
+ <title>{bookmark|escape}</title>
+ <link rel="alternate" href="{urlbase}{url}rev/{node|short}"/>
+ <id>{urlbase}{url}#bookmark-{node}</id>
+ <updated>{date|rfc3339date}</updated>
+ <published>{date|rfc3339date}</published>
+ <content type="text">{bookmark|strip|escape}</content>
+ </entry>
diff --git a/websdk/mercurial/templates/atom/bookmarks.tmpl b/websdk/mercurial/templates/atom/bookmarks.tmpl
new file mode 100644
index 0000000..547c3a9
--- /dev/null
+++ b/websdk/mercurial/templates/atom/bookmarks.tmpl
@@ -0,0 +1,11 @@
+{header}
+ <id>{urlbase}{url}</id>
+ <link rel="self" href="{urlbase}{url}atom-bookmarks"/>
+ <link rel="alternate" href="{urlbase}{url}bookmarks"/>
+ <title>{repo|escape}: bookmarks</title>
+ <summary>{repo|escape} bookmark history</summary>
+ <author><name>Mercurial SCM</name></author>
+ {latestentry%feedupdated}
+
+{entries%bookmarkentry}
+</feed>
diff --git a/websdk/mercurial/templates/atom/changelog.tmpl b/websdk/mercurial/templates/atom/changelog.tmpl
new file mode 100644
index 0000000..29902ab
--- /dev/null
+++ b/websdk/mercurial/templates/atom/changelog.tmpl
@@ -0,0 +1,10 @@
+{header}
+ <!-- Changelog -->
+ <id>{urlbase}{url}</id>
+ <link rel="self" href="{urlbase}{url}atom-log"/>
+ <link rel="alternate" href="{urlbase}{url}"/>
+ <title>{repo|escape} Changelog</title>
+ {latestentry%feedupdated}
+
+{entries%changelogentry}
+</feed>
diff --git a/websdk/mercurial/templates/atom/changelogentry.tmpl b/websdk/mercurial/templates/atom/changelogentry.tmpl
new file mode 100644
index 0000000..068b5ea
--- /dev/null
+++ b/websdk/mercurial/templates/atom/changelogentry.tmpl
@@ -0,0 +1,16 @@
+ <entry>
+ <title>{desc|strip|firstline|strip|escape|nonempty}</title>
+ <id>{urlbase}{url}#changeset-{node}</id>
+ <link href="{urlbase}{url}rev/{node|short}"/>
+ <author>
+ <name>{author|person|escape}</name>
+ <email>{author|email|obfuscate}</email>
+ </author>
+ <updated>{date|rfc3339date}</updated>
+ <published>{date|rfc3339date}</published>
+ <content type="xhtml">
+ <div xmlns="http://www.w3.org/1999/xhtml">
+ <pre xml:space="preserve">{desc|escape|nonempty}</pre>
+ </div>
+ </content>
+ </entry>
diff --git a/websdk/mercurial/templates/atom/error.tmpl b/websdk/mercurial/templates/atom/error.tmpl
new file mode 100644
index 0000000..5735fba
--- /dev/null
+++ b/websdk/mercurial/templates/atom/error.tmpl
@@ -0,0 +1,17 @@
+{header}
+ <!-- Error -->
+ <id>{urlbase}{url}</id>
+ <link rel="self" href="{urlbase}{url}atom-log"/>
+ <link rel="alternate" href="{urlbase}{url}"/>
+ <title>Error</title>
+ <updated>1970-01-01T00:00:00+00:00</updated>
+ <entry>
+ <title>Error</title>
+ <id>http://mercurial.selenic.com/#error</id>
+ <author>
+ <name>mercurial</name>
+ </author>
+ <updated>1970-01-01T00:00:00+00:00</updated>
+ <content type="text">{error|escape}</content>
+ </entry>
+</feed>
diff --git a/websdk/mercurial/templates/atom/filelog.tmpl b/websdk/mercurial/templates/atom/filelog.tmpl
new file mode 100644
index 0000000..99d4e9b
--- /dev/null
+++ b/websdk/mercurial/templates/atom/filelog.tmpl
@@ -0,0 +1,8 @@
+{header}
+ <id>{urlbase}{url}atom-log/tip/{file|escape}</id>
+ <link rel="self" href="{urlbase}{url}atom-log/tip/{file|urlescape}"/>
+ <title>{repo|escape}: {file|escape} history</title>
+ {latestentry%feedupdated}
+
+{entries%changelogentry}
+</feed>
diff --git a/websdk/mercurial/templates/atom/header.tmpl b/websdk/mercurial/templates/atom/header.tmpl
new file mode 100644
index 0000000..90ffceb
--- /dev/null
+++ b/websdk/mercurial/templates/atom/header.tmpl
@@ -0,0 +1,2 @@
+<?xml version="1.0" encoding="{encoding}"?>
+<feed xmlns="http://www.w3.org/2005/Atom"> \ No newline at end of file
diff --git a/websdk/mercurial/templates/atom/map b/websdk/mercurial/templates/atom/map
new file mode 100644
index 0000000..b4d9324
--- /dev/null
+++ b/websdk/mercurial/templates/atom/map
@@ -0,0 +1,13 @@
+default = 'changelog'
+feedupdated = '<updated>{date|rfc3339date}</updated>'
+mimetype = 'application/atom+xml; charset={encoding}'
+header = header.tmpl
+changelog = changelog.tmpl
+changelogentry = changelogentry.tmpl
+filelog = filelog.tmpl
+filelogentry = filelogentry.tmpl
+tags = tags.tmpl
+tagentry = tagentry.tmpl
+bookmarks = bookmarks.tmpl
+bookmarkentry = bookmarkentry.tmpl
+error = error.tmpl
diff --git a/websdk/mercurial/templates/atom/tagentry.tmpl b/websdk/mercurial/templates/atom/tagentry.tmpl
new file mode 100644
index 0000000..857df12
--- /dev/null
+++ b/websdk/mercurial/templates/atom/tagentry.tmpl
@@ -0,0 +1,8 @@
+ <entry>
+ <title>{tag|escape}</title>
+ <link rel="alternate" href="{urlbase}{url}rev/{node|short}"/>
+ <id>{urlbase}{url}#tag-{node}</id>
+ <updated>{date|rfc3339date}</updated>
+ <published>{date|rfc3339date}</published>
+ <content type="text">{tag|strip|escape}</content>
+ </entry>
diff --git a/websdk/mercurial/templates/atom/tags.tmpl b/websdk/mercurial/templates/atom/tags.tmpl
new file mode 100644
index 0000000..82294ec
--- /dev/null
+++ b/websdk/mercurial/templates/atom/tags.tmpl
@@ -0,0 +1,11 @@
+{header}
+ <id>{urlbase}{url}</id>
+ <link rel="self" href="{urlbase}{url}atom-tags"/>
+ <link rel="alternate" href="{urlbase}{url}tags"/>
+ <title>{repo|escape}: tags</title>
+ <summary>{repo|escape} tag history</summary>
+ <author><name>Mercurial SCM</name></author>
+ {latestentry%feedupdated}
+
+{entriesnotip%tagentry}
+</feed>
diff --git a/websdk/mercurial/templates/coal/header.tmpl b/websdk/mercurial/templates/coal/header.tmpl
new file mode 100644
index 0000000..f44f73a
--- /dev/null
+++ b/websdk/mercurial/templates/coal/header.tmpl
@@ -0,0 +1,7 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+<head>
+<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
+<meta name="robots" content="index, nofollow" />
+<link rel="stylesheet" href="{staticurl}style-coal.css" type="text/css" />
+<script type="text/javascript" src="{staticurl}mercurial.js"></script>
diff --git a/websdk/mercurial/templates/coal/map b/websdk/mercurial/templates/coal/map
new file mode 100644
index 0000000..39d04e7
--- /dev/null
+++ b/websdk/mercurial/templates/coal/map
@@ -0,0 +1,214 @@
+default = 'shortlog'
+
+mimetype = 'text/html; charset={encoding}'
+header = header.tmpl
+footer = ../paper/footer.tmpl
+search = ../paper/search.tmpl
+
+changelog = ../paper/shortlog.tmpl
+shortlog = ../paper/shortlog.tmpl
+shortlogentry = ../paper/shortlogentry.tmpl
+graph = ../paper/graph.tmpl
+
+help = ../paper/help.tmpl
+helptopics = ../paper/helptopics.tmpl
+
+helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>'
+
+naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
+filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenolink = '{file|escape} '
+fileellipses = '...'
+diffstatlink = ../paper/diffstat.tmpl
+diffstatnolink = ../paper/diffstat.tmpl
+changelogentry = ../paper/shortlogentry.tmpl
+searchentry = ../paper/shortlogentry.tmpl
+changeset = ../paper/changeset.tmpl
+manifest = ../paper/manifest.tmpl
+
+nav = '{before%naventry} {after%naventry}'
+navshort = '{before%navshortentry}{after%navshortentry}'
+navgraph = '{before%navgraphentry}{after%navgraphentry}'
+filenav = '{before%filenaventry}{after%filenaventry}'
+
+direntry = '
+ <tr class="fileline parity{parity}">
+ <td class="name">
+ <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">
+ <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/
+ </a>
+ <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
+ {emptydirs|escape}
+ </a>
+ </td>
+ <td class="size"></td>
+ <td class="permissions">drwxr-xr-x</td>
+ </tr>'
+
+fileentry = '
+ <tr class="fileline parity{parity}">
+ <td class="filename">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape}
+ </a>
+ </td>
+ <td class="size">{size}</td>
+ <td class="permissions">{permissions|permissions}</td>
+ </tr>'
+
+filerevision = ../paper/filerevision.tmpl
+fileannotate = ../paper/fileannotate.tmpl
+filediff = ../paper/filediff.tmpl
+filelog = ../paper/filelog.tmpl
+fileline = '
+ <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>'
+filelogentry = ../paper/filelogentry.tmpl
+
+annotateline = '
+ <tr class="parity{parity}">
+ <td class="annotate">
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
+ title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
+ </td>
+ <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td>
+ </tr>'
+
+diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>'
+difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>'
+difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>'
+difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>'
+diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}'
+
+changelogparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+
+changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> '
+
+filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> '
+filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> '
+
+filerename = '{file|escape}@'
+filelogrename = '
+ <span class="base">
+ base
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {file|escape}@{node|short}
+ </a>
+ </span>'
+fileannotateparent = '
+ <tr>
+ <td class="metatag">parent:</td>
+ <td>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>'
+changelogchild = '
+ <tr>
+ <th class="child">child</th>
+ <td class="child">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {node|short}
+ </a>
+ </td>
+ </tr>'
+fileannotatechild = '
+ <tr>
+ <td class="metatag">child:</td>
+ <td>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {node|short}
+ </a>
+ </td>
+ </tr>'
+tags = ../paper/tags.tmpl
+tagentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {tag|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+bookmarks = ../paper/bookmarks.tmpl
+bookmarkentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {bookmark|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+branches = ../paper/branches.tmpl
+branchentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">
+ {branch|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+changelogtag = '<span class="tag">{name|escape}</span> '
+changesettag = '<span class="tag">{tag|escape}</span> '
+changesetbookmark = '<span class="tag">{bookmark|escape}</span> '
+changelogbranchhead = '<span class="branchhead">{name|escape}</span> '
+changelogbranchname = '<span class="branchname">{name|escape}</span> '
+
+filediffparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filelogparent = '
+ <tr>
+ <th>parent {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filediffchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+filelogchild = '
+ <tr>
+ <th>child {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+
+indexentry = '
+ <tr class="parity{parity}">
+ <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
+ <td>{description}</td>
+ <td>{contact|obfuscate}</td>
+ <td class="age">{lastchange|rfc822date}</td>
+ <td class="indexlinks">{archives%indexarchiveentry}</td>
+ </tr>\n'
+indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">&nbsp;&darr;{type|escape}</a>'
+index = ../paper/index.tmpl
+archiveentry = '
+ <li>
+ <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a>
+ </li>'
+notfound = ../paper/notfound.tmpl
+error = ../paper/error.tmpl
+urlparameter = '{separator}{name}={value|urlescape}'
+hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/websdk/mercurial/templates/gitweb/bookmarks.tmpl b/websdk/mercurial/templates/gitweb/bookmarks.tmpl
new file mode 100644
index 0000000..fc0e66a
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/bookmarks.tmpl
@@ -0,0 +1,32 @@
+{header}
+<title>{repo|escape}: Bookmarks</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-bookmarks" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-bookmarks" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / bookmarks
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+bookmarks |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+{entries%bookmarkentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/branches.tmpl b/websdk/mercurial/templates/gitweb/branches.tmpl
new file mode 100644
index 0000000..89f5457
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/branches.tmpl
@@ -0,0 +1,32 @@
+{header}
+<title>{repo|escape}: Branches</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+branches |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+{entries%branchentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/changelog.tmpl b/websdk/mercurial/templates/gitweb/changelog.tmpl
new file mode 100644
index 0000000..56d16d0
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/changelog.tmpl
@@ -0,0 +1,41 @@
+{header}
+<title>{repo|escape}: Changelog</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog
+</div>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<div class="search">
+<input type="text" name="rev" />
+</div>
+</form>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> |
+changelog |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+{changenav%nav}<br/>
+</div>
+
+{entries%changelogentry}
+
+<div class="page_nav">
+{changenav%nav}<br/>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/changelogentry.tmpl b/websdk/mercurial/templates/gitweb/changelogentry.tmpl
new file mode 100644
index 0000000..3935de6
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/changelogentry.tmpl
@@ -0,0 +1,14 @@
+<div>
+<a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}"><span class="age">{date|rfc822date}</span>{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></a>
+</div>
+<div class="title_text">
+<div class="log_link">
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a><br/>
+</div>
+<i>{author|obfuscate} [{date|rfc822date}] rev {rev}</i><br/>
+</div>
+<div class="log_body">
+{desc|strip|escape|addbreaks|nonempty}
+<br/>
+<br/>
+</div>
diff --git a/websdk/mercurial/templates/gitweb/changeset.tmpl b/websdk/mercurial/templates/gitweb/changeset.tmpl
new file mode 100644
index 0000000..7c8f731
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/changeset.tmpl
@@ -0,0 +1,53 @@
+{header}
+<title>{repo|escape}: changeset {rev}:{node|short}</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changeset
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+changeset |
+<a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry} |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div>
+<a class="title" href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></a>
+</div>
+<div class="title_text">
+<table cellspacing="0">
+<tr><td>author</td><td>{author|obfuscate}</td></tr>
+<tr><td></td><td class="date age">{date|rfc822date}</td></tr>
+{branch%changesetbranch}
+<tr><td>changeset {rev}</td><td style="font-family:monospace">{node|short}</td></tr>
+{parent%changesetparent}
+{child%changesetchild}
+</table></div>
+
+<div class="page_body">
+{desc|strip|escape|addbreaks|nonempty}
+</div>
+<div class="list_head"></div>
+<div class="title_text">
+<table cellspacing="0">
+{files}
+</table></div>
+
+<div class="page_body">{diff}</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/error.tmpl b/websdk/mercurial/templates/gitweb/error.tmpl
new file mode 100644
index 0000000..b59046f
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/error.tmpl
@@ -0,0 +1,33 @@
+{header}
+<title>{repo|escape}: Error</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / error
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="page_body">
+<br/>
+<i>An error occurred while processing your request</i><br/>
+<br/>
+{error|escape}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/fileannotate.tmpl b/websdk/mercurial/templates/gitweb/fileannotate.tmpl
new file mode 100644
index 0000000..abb1aed
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/fileannotate.tmpl
@@ -0,0 +1,65 @@
+{header}
+<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> |
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
+annotate |
+<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">{file|escape}</div>
+
+<div class="title_text">
+<table cellspacing="0">
+<tr>
+ <td>author</td>
+ <td>{author|obfuscate}</td></tr>
+<tr>
+ <td></td>
+ <td class="date age">{date|rfc822date}</td></tr>
+{branch%filerevbranch}
+<tr>
+ <td>changeset {rev}</td>
+ <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
+{parent%fileannotateparent}
+{child%fileannotatechild}
+<tr>
+ <td>permissions</td>
+ <td style="font-family:monospace">{permissions|permissions}</td></tr>
+</table>
+</div>
+
+<div class="page_path">
+{desc|strip|escape|addbreaks|nonempty}
+</div>
+<div class="page_body">
+<table>
+{annotate%annotateline}
+</table>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/filediff.tmpl b/websdk/mercurial/templates/gitweb/filediff.tmpl
new file mode 100644
index 0000000..5450adb
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/filediff.tmpl
@@ -0,0 +1,50 @@
+{header}
+<title>{repo|escape}: diff {file|escape}</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / diff
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> |
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
+diff |
+<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a><br/> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<div class="title">{file|escape}</div>
+
+<table>
+{branch%filerevbranch}
+<tr>
+ <td>changeset {rev}</td>
+ <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
+{parent%filediffparent}
+{child%filediffchild}
+</table>
+
+<div class="list_head"></div>
+
+<div class="page_body">
+{diff}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/filelog.tmpl b/websdk/mercurial/templates/gitweb/filelog.tmpl
new file mode 100644
index 0000000..52c042b
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/filelog.tmpl
@@ -0,0 +1,42 @@
+{header}
+<title>{repo|escape}: File revisions</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+revisions |
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
+<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+<a href="{url}rss-log/tip/{file|urlescape}">rss</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+{nav%filenav}
+</div>
+
+<div class="title" >{file|urlescape}</div>
+
+<table>
+{entries%filelogentry}
+</table>
+
+<div class="page_nav">
+{nav%filenav}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/filerevision.tmpl b/websdk/mercurial/templates/gitweb/filerevision.tmpl
new file mode 100644
index 0000000..966d254
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/filerevision.tmpl
@@ -0,0 +1,64 @@
+{header}
+<title>{repo|escape}: {file|escape}@{node|short}</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> |
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+file |
+<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> |
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
+<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">{file|escape}</div>
+
+<div class="title_text">
+<table cellspacing="0">
+<tr>
+ <td>author</td>
+ <td>{author|obfuscate}</td></tr>
+<tr>
+ <td></td>
+ <td class="date age">{date|rfc822date}</td></tr>
+{branch%filerevbranch}
+<tr>
+ <td>changeset {rev}</td>
+ <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
+{parent%filerevparent}
+{child%filerevchild}
+<tr>
+ <td>permissions</td>
+ <td style="font-family:monospace">{permissions|permissions}</td></tr>
+</table>
+</div>
+
+<div class="page_path">
+{desc|strip|escape|addbreaks|nonempty}
+</div>
+
+<div class="page_body">
+{text%fileline}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/footer.tmpl b/websdk/mercurial/templates/gitweb/footer.tmpl
new file mode 100644
index 0000000..298800b
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/footer.tmpl
@@ -0,0 +1,12 @@
+<script type="text/javascript">process_dates()</script>
+<div class="page_footer">
+<div class="page_footer_text">{repo|escape}</div>
+<div class="rss_logo">
+<a href="{url}rss-log">RSS</a>
+<a href="{url}atom-log">Atom</a>
+</div>
+<br />
+{motd}
+</div>
+</body>
+</html>
diff --git a/websdk/mercurial/templates/gitweb/graph.tmpl b/websdk/mercurial/templates/gitweb/graph.tmpl
new file mode 100644
index 0000000..0ddcf18
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/graph.tmpl
@@ -0,0 +1,128 @@
+{header}
+<title>{repo|escape}: Graph</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph
+</div>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<div class="search">
+<input type="text" name="rev" />
+</div>
+</form>
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
+graph |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
+| {changenav%navgraph}<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+
+<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript>
+
+<div id="wrapper">
+<ul id="nodebgs"></ul>
+<canvas id="graph" width="480" height="{canvasheight}"></canvas>
+<ul id="graphnodes"></ul>
+</div>
+
+<script>
+<!-- hide script content
+
+var data = {jsdata|json};
+var graph = new Graph();
+graph.scale({bg_height});
+
+graph.edge = function(x0, y0, x1, y1, color) \{
+
+ this.setColor(color, 0.0, 0.65);
+ this.ctx.beginPath();
+ this.ctx.moveTo(x0, y0);
+ this.ctx.lineTo(x1, y1);
+ this.ctx.stroke();
+
+}
+
+var revlink = '<li style="_STYLE"><span class="desc">';
+revlink += '<a class="list" href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID"><b>_DESC</b></a>';
+revlink += '</span> _TAGS';
+revlink += '<span class="info">_DATE, by _USER</span></li>';
+
+graph.vertex = function(x, y, color, parity, cur) \{
+
+ this.ctx.beginPath();
+ color = this.setColor(color, 0.25, 0.75);
+ this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
+ this.ctx.fill();
+
+ var bg = '<li class="bg parity' + parity + '"></li>';
+ var left = (this.columns + 1) * this.bg_height;
+ var nstyle = 'padding-left: ' + left + 'px;';
+ var item = revlink.replace(/_STYLE/, nstyle);
+ item = item.replace(/_PARITY/, 'parity' + parity);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_DESC/, cur[3]);
+ item = item.replace(/_USER/, cur[4]);
+ item = item.replace(/_DATE/, cur[5]);
+
+ var tagspan = '';
+ if (cur[7].length || cur[8].length || (cur[6][0] != 'default' || cur[6][1])) \{
+ tagspan = '<span class="logtags">';
+ if (cur[6][1]) \{
+ tagspan += '<span class="branchtag" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ } else if (!cur[6][1] && cur[6][0] != 'default') \{
+ tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ }
+ if (cur[7].length) \{
+ for (var t in cur[7]) \{
+ var tag = cur[7][t];
+ tagspan += '<span class="tagtag">' + tag + '</span> ';
+ }
+ }
+ if (cur[8].length) \{
+ for (var t in cur[8]) \{
+ var bookmark = cur[8][t];
+ tagspan += '<span class="bookmarktag">' + bookmark + '</span> ';
+ }
+ }
+ tagspan += '</span>';
+ }
+
+ item = item.replace(/_TAGS/, tagspan);
+ return [bg, item];
+
+}
+
+graph.render(data);
+
+// stop hiding script -->
+</script>
+
+<div class="page_nav">
+<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
+| {changenav%navgraph}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/header.tmpl b/websdk/mercurial/templates/gitweb/header.tmpl
new file mode 100644
index 0000000..8074dde
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/header.tmpl
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="{encoding}"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" lang="en-US">
+<head>
+<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
+<meta name="robots" content="index, nofollow"/>
+<link rel="stylesheet" href="{staticurl}style-gitweb.css" type="text/css" />
+<script type="text/javascript" src="{staticurl}mercurial.js"></script>
diff --git a/websdk/mercurial/templates/gitweb/help.tmpl b/websdk/mercurial/templates/gitweb/help.tmpl
new file mode 100644
index 0000000..2e90b82
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/help.tmpl
@@ -0,0 +1,33 @@
+{header}
+<title>{repo|escape}: Branches</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / help
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+help
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+
+<pre>
+{doc|escape}
+</pre>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/helptopics.tmpl b/websdk/mercurial/templates/gitweb/helptopics.tmpl
new file mode 100644
index 0000000..2f6c26d
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/helptopics.tmpl
@@ -0,0 +1,39 @@
+{header}
+<title>{repo|escape}: Branches</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / help
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+help
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+<tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr>
+{topics % helpentry}
+
+<tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr>
+{earlycommands % helpentry}
+
+<tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr>
+{othercommands % helpentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/index.tmpl b/websdk/mercurial/templates/gitweb/index.tmpl
new file mode 100644
index 0000000..3e0f4a2
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/index.tmpl
@@ -0,0 +1,26 @@
+{header}
+<title>Mercurial repositories index</title>
+</head>
+<body>
+
+<div class="page_header">
+ <a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a>
+ Repositories list
+</div>
+
+<table cellspacing="0">
+ <tr>
+ <td><a href="?sort={sort_name}">Name</a></td>
+ <td><a href="?sort={sort_description}">Description</a></td>
+ <td><a href="?sort={sort_contact}">Contact</a></td>
+ <td><a href="?sort={sort_lastchange}">Last modified</a></td>
+ <td>&nbsp;</td>
+ <td>&nbsp;</td>
+ </tr>
+ {entries%indexentry}
+</table>
+<div class="page_footer">
+{motd}
+</div>
+</body>
+</html>
diff --git a/websdk/mercurial/templates/gitweb/manifest.tmpl b/websdk/mercurial/templates/gitweb/manifest.tmpl
new file mode 100644
index 0000000..41e8c81
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/manifest.tmpl
@@ -0,0 +1,41 @@
+{header}
+<title>{repo|escape}: files</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+files |
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry} |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></div>
+<table cellspacing="0">
+<tr class="parity{upparity}">
+<td style="font-family:monospace">drwxr-xr-x</td>
+<td style="font-family:monospace"></td>
+<td style="font-family:monospace"></td>
+<td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
+<td class="link">&nbsp;</td>
+</tr>
+{dentries%direntry}
+{fentries%fileentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/map b/websdk/mercurial/templates/gitweb/map
new file mode 100644
index 0000000..0d1db2f
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/map
@@ -0,0 +1,272 @@
+default = 'summary'
+mimetype = 'text/html; charset={encoding}'
+header = header.tmpl
+footer = footer.tmpl
+search = search.tmpl
+changelog = changelog.tmpl
+summary = summary.tmpl
+error = error.tmpl
+notfound = notfound.tmpl
+
+help = help.tmpl
+helptopics = helptopics.tmpl
+
+helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>'
+
+naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
+filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenodelink = '
+ <tr class="parity{parity}">
+ <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
+ <td></td>
+ <td class="link">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
+ <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+ </td>
+ </tr>'
+filenolink = '
+ <tr class="parity{parity}">
+ <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
+ <td></td>
+ <td class="link">
+ file |
+ annotate |
+ <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+ </td>
+ </tr>'
+
+nav = '{before%naventry} {after%naventry}'
+navshort = '{before%navshortentry}{after%navshortentry}'
+navgraph = '{before%navgraphentry}{after%navgraphentry}'
+filenav = '{before%filenaventry}{after%filenaventry}'
+
+fileellipses = '...'
+changelogentry = changelogentry.tmpl
+searchentry = changelogentry.tmpl
+changeset = changeset.tmpl
+manifest = manifest.tmpl
+direntry = '
+ <tr class="parity{parity}">
+ <td style="font-family:monospace">drwxr-xr-x</td>
+ <td style="font-family:monospace"></td>
+ <td style="font-family:monospace"></td>
+ <td>
+ <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>
+ <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">{emptydirs|escape}</a>
+ </td>
+ <td class="link">
+ <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+fileentry = '
+ <tr class="parity{parity}">
+ <td style="font-family:monospace">{permissions|permissions}</td>
+ <td style="font-family:monospace" align=right>{date|isodate}</td>
+ <td style="font-family:monospace" align=right>{size}</td>
+ <td class="list">
+ <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>
+ </td>
+ <td class="link">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+ </td>
+ </tr>'
+filerevision = filerevision.tmpl
+fileannotate = fileannotate.tmpl
+filediff = filediff.tmpl
+filelog = filelog.tmpl
+fileline = '
+ <div style="font-family:monospace" class="parity{parity}">
+ <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre>
+ </div>'
+annotateline = '
+ <tr style="font-family:monospace" class="parity{parity}">
+ <td class="linenr" style="text-align: right;">
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"
+ title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
+ </td>
+ <td><pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a></pre></td>
+ <td><pre>{line|escape}</pre></td>
+ </tr>'
+difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+changelogparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+changesetbranch = '<tr><td>branch</td><td>{name}</td></tr>'
+changesetparent = '
+ <tr>
+ <td>parent {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+filerevbranch = '<tr><td>branch</td><td>{name}</td></tr>'
+filerevparent = '
+ <tr>
+ <td>parent {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+filerename = '{file|escape}@'
+filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>'
+fileannotateparent = '
+ <tr>
+ <td>parent {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+changelogchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+changesetchild = '
+ <tr>
+ <td>child {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+filerevchild = '
+ <tr>
+ <td>child {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+fileannotatechild = '
+ <tr>
+ <td>child {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+tags = tags.tmpl
+tagentry = '
+ <tr class="parity{parity}">
+ <td class="age"><i class="age">{date|rfc822date}</i></td>
+ <td><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"><b>{tag|escape}</b></a></td>
+ <td class="link">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+bookmarks = bookmarks.tmpl
+bookmarkentry = '
+ <tr class="parity{parity}">
+ <td class="age"><i class="age">{date|rfc822date}</i></td>
+ <td><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"><b>{bookmark|escape}</b></a></td>
+ <td class="link">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+branches = branches.tmpl
+branchentry = '
+ <tr class="parity{parity}">
+ <td class="age"><i class="age">{date|rfc822date}</i></td>
+ <td><a class="list" href="{url}shortlog/{node|short}{sessionvars%urlparameter}"><b>{node|short}</b></a></td>
+ <td class="{status}">{branch|escape}</td>
+ <td class="link">
+ <a href="{url}changeset/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+diffblock = '<pre>{lines}</pre>'
+filediffparent = '
+ <tr>
+ <td>parent {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {node|short}
+ </a>
+ </td>
+ </tr>'
+filelogparent = '
+ <tr>
+ <td align="right">parent {rev}:&nbsp;</td>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filediffchild = '
+ <tr>
+ <td>child {rev}</td>
+ <td style="font-family:monospace">
+ <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+filelogchild = '
+ <tr>
+ <td align="right">child {rev}:&nbsp;</td>
+ <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+shortlog = shortlog.tmpl
+graph = graph.tmpl
+tagtag = '<span class="tagtag" title="{name}">{name}</span> '
+branchtag = '<span class="branchtag" title="{name}">{name}</span> '
+inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> '
+bookmarktag = '<span class="bookmarktag" title="{name}">{name}</span> '
+shortlogentry = '
+ <tr class="parity{parity}">
+ <td class="age"><i class="age">{date|rfc822date}</i></td>
+ <td><i>{author|person}</i></td>
+ <td>
+ <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ <b>{desc|strip|firstline|escape|nonempty}</b>
+ <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span>
+ </a>
+ </td>
+ <td class="link" nowrap>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+filelogentry = '
+ <tr class="parity{parity}">
+ <td class="age"><i class="age">{date|rfc822date}</i></td>
+ <td>
+ <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ <b>{desc|strip|firstline|escape|nonempty}</b>
+ </a>
+ </td>
+ <td class="link">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>&nbsp;|&nbsp;<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a>&nbsp;|&nbsp;<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> {rename%filelogrename}</td>
+ </tr>'
+archiveentry = ' | <a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
+indexentry = '
+ <tr class="parity{parity}">
+ <td>
+ <a class="list" href="{url}{sessionvars%urlparameter}">
+ <b>{name|escape}</b>
+ </a>
+ </td>
+ <td>{description}</td>
+ <td>{contact|obfuscate}</td>
+ <td class="age">{lastchange|rfc822date}</td>
+ <td class="indexlinks">{archives%indexarchiveentry}</td>
+ <td><div class="rss_logo"><a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a></div></td>
+ </tr>\n'
+indexarchiveentry = ' <a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
+index = index.tmpl
+urlparameter = '{separator}{name}={value|urlescape}'
+hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/websdk/mercurial/templates/gitweb/notfound.tmpl b/websdk/mercurial/templates/gitweb/notfound.tmpl
new file mode 100644
index 0000000..e9f74a3
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/notfound.tmpl
@@ -0,0 +1,18 @@
+{header}
+<title>Mercurial repository not found</title>
+</head>
+
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a> Not found: {repo|escape}
+</div>
+
+<div class="page_body">
+The specified repository "{repo|escape}" is unknown, sorry.
+<br/>
+<br/>
+Please go back to the <a href="{url}">main repository list page</a>.
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/search.tmpl b/websdk/mercurial/templates/gitweb/search.tmpl
new file mode 100644
index 0000000..2d77ed6
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/search.tmpl
@@ -0,0 +1,39 @@
+{header}
+<title>{repo|escape}: Search</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<div class="search">
+<input type="text" name="rev" value="{query|escape}" />
+</div>
+</form>
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
+ |
+ <a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">searching for {query|escape}</div>
+
+{entries}
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/shortlog.tmpl b/websdk/mercurial/templates/gitweb/shortlog.tmpl
new file mode 100644
index 0000000..2e89dbc
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/shortlog.tmpl
@@ -0,0 +1,42 @@
+{header}
+<title>{repo|escape}: Shortlog</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog
+</div>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<div class="search">
+<input type="text" name="rev" />
+</div>
+</form>
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+shortlog |
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>{changenav%navshort}<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+{entries%shortlogentry}
+</table>
+
+<div class="page_nav">
+{changenav%navshort}
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/summary.tmpl b/websdk/mercurial/templates/gitweb/summary.tmpl
new file mode 100644
index 0000000..79f42a1
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/summary.tmpl
@@ -0,0 +1,66 @@
+{header}
+<title>{repo|escape}: Summary</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<div class="search">
+<input type="text" name="rev" />
+</div>
+</form>
+</div>
+
+<div class="page_nav">
+summary |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+<a href="{url}tags{sessionvars%urlparameter}">tags</a> |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+<tr><td>description</td><td>{desc}</td></tr>
+<tr><td>owner</td><td>{owner|obfuscate}</td></tr>
+<tr><td>last change</td><td>{lastchange|rfc822date}</td></tr>
+</table>
+
+<div><a class="title" href="{url}shortlog{sessionvars%urlparameter}">changes</a></div>
+<table cellspacing="0">
+{shortlog}
+<tr class="light"><td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td></tr>
+</table>
+
+<div><a class="title" href="{url}tags{sessionvars%urlparameter}">tags</a></div>
+<table cellspacing="0">
+{tags}
+<tr class="light"><td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td></tr>
+</table>
+
+<div><a class="title" href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></div>
+<table cellspacing="0">
+{bookmarks%bookmarkentry}
+<tr class="light"><td colspan="3"><a class="list" href="{url}bookmarks{sessionvars%urlparameter}">...</a></td></tr>
+</table>
+
+<div><a class="title" href="#">branches</a></div>
+<table cellspacing="0">
+{branches%branchentry}
+<tr class="light">
+ <td colspan="4"><a class="list" href="#">...</a></td>
+</tr>
+</table>
+{footer}
diff --git a/websdk/mercurial/templates/gitweb/tags.tmpl b/websdk/mercurial/templates/gitweb/tags.tmpl
new file mode 100644
index 0000000..54cbe06
--- /dev/null
+++ b/websdk/mercurial/templates/gitweb/tags.tmpl
@@ -0,0 +1,32 @@
+{header}
+<title>{repo|escape}: Tags</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}"/>
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}"/>
+</head>
+<body>
+
+<div class="page_header">
+<a href="{logourl}" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / tags
+</div>
+
+<div class="page_nav">
+<a href="{url}summary{sessionvars%urlparameter}">summary</a> |
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> |
+<a href="{url}log{sessionvars%urlparameter}">changelog</a> |
+<a href="{url}graph{sessionvars%urlparameter}">graph</a> |
+tags |
+<a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a> |
+<a href="{url}branches{sessionvars%urlparameter}">branches</a> |
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> |
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<br/>
+</div>
+
+<div class="title">&nbsp;</div>
+<table cellspacing="0">
+{entries%tagentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/map-cmdline.bisect b/websdk/mercurial/templates/map-cmdline.bisect
new file mode 100644
index 0000000..37c5ffb
--- /dev/null
+++ b/websdk/mercurial/templates/map-cmdline.bisect
@@ -0,0 +1,25 @@
+changeset = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n'
+changeset_quiet = '{bisect|shortbisect} {rev}:{node|short}\n'
+changeset_verbose = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n'
+changeset_debug = 'changeset: {rev}:{node}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n'
+start_files = 'files: '
+file = ' {file}'
+end_files = '\n'
+start_file_mods = 'files: '
+file_mod = ' {file_mod}'
+end_file_mods = '\n'
+start_file_adds = 'files+: '
+file_add = ' {file_add}'
+end_file_adds = '\n'
+start_file_dels = 'files-: '
+file_del = ' {file_del}'
+end_file_dels = '\n'
+start_file_copies = 'copies: '
+file_copy = ' {name} ({source})'
+end_file_copies = '\n'
+parent = 'parent: {rev}:{node|formatnode}\n'
+manifest = 'manifest: {rev}:{node}\n'
+branch = 'branch: {branch}\n'
+tag = 'tag: {tag}\n'
+bookmark = 'bookmark: {bookmark}\n'
+extra = 'extra: {key}={value|stringescape}\n'
diff --git a/websdk/mercurial/templates/map-cmdline.changelog b/websdk/mercurial/templates/map-cmdline.changelog
new file mode 100644
index 0000000..f54134a
--- /dev/null
+++ b/websdk/mercurial/templates/map-cmdline.changelog
@@ -0,0 +1,17 @@
+header = '{date|shortdate} {author|person} <{author|email}>\n\n'
+header_verbose = ''
+changeset = '\t* {files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\t[{node|short}]{tags}{branches}\n\n'
+changeset_quiet = '\t* {desc|firstline|fill68|tabindent|strip}\n\n'
+changeset_verbose = '{date|isodate} {author|person} <{author|email}> ({node|short}{tags}{branches})\n\n\t* {file_adds|stringify|fill68|tabindent}{file_dels|stringify|fill68|tabindent}{files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\n'
+start_tags = ' ['
+tag = '{tag}, '
+last_tag = '{tag}]'
+start_branches = ' <'
+branch = '{branch}, '
+last_branch = '{branch}>'
+file = '{file}, '
+last_file = '{file}:\n\t'
+file_add = '{file_add}, '
+last_file_add = '{file_add}: new file.\n* '
+file_del = '{file_del}, '
+last_file_del = '{file_del}: deleted file.\n* '
diff --git a/websdk/mercurial/templates/map-cmdline.compact b/websdk/mercurial/templates/map-cmdline.compact
new file mode 100644
index 0000000..0a71639
--- /dev/null
+++ b/websdk/mercurial/templates/map-cmdline.compact
@@ -0,0 +1,12 @@
+changeset = '{rev}{tags}{bookmarks}{parents} {node|short} {date|isodate} {author|user}\n {desc|firstline|strip}\n\n'
+changeset_quiet = '{rev}:{node|short}\n'
+changeset_verbose = '{rev}{tags}{parents} {node|short} {date|isodate} {author}\n {desc|strip}\n\n'
+start_tags = '['
+tag = '{tag},'
+last_tag = '{tag}]'
+start_parents = ':'
+parent = '{rev},'
+last_parent = '{rev}'
+start_bookmarks = '['
+bookmark = '{bookmark},'
+last_bookmark = '{bookmark}]'
diff --git a/websdk/mercurial/templates/map-cmdline.default b/websdk/mercurial/templates/map-cmdline.default
new file mode 100644
index 0000000..aeb695b
--- /dev/null
+++ b/websdk/mercurial/templates/map-cmdline.default
@@ -0,0 +1,25 @@
+changeset = 'changeset: {rev}:{node|short}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n'
+changeset_quiet = '{rev}:{node|short}\n'
+changeset_verbose = 'changeset: {rev}:{node|short}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n'
+changeset_debug = 'changeset: {rev}:{node}\n{branches}{bookmarks}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n'
+start_files = 'files: '
+file = ' {file}'
+end_files = '\n'
+start_file_mods = 'files: '
+file_mod = ' {file_mod}'
+end_file_mods = '\n'
+start_file_adds = 'files+: '
+file_add = ' {file_add}'
+end_file_adds = '\n'
+start_file_dels = 'files-: '
+file_del = ' {file_del}'
+end_file_dels = '\n'
+start_file_copies = 'copies: '
+file_copy = ' {name} ({source})'
+end_file_copies = '\n'
+parent = 'parent: {rev}:{node|formatnode}\n'
+manifest = 'manifest: {rev}:{node}\n'
+branch = 'branch: {branch}\n'
+tag = 'tag: {tag}\n'
+bookmark = 'bookmark: {bookmark}\n'
+extra = 'extra: {key}={value|stringescape}\n'
diff --git a/websdk/mercurial/templates/map-cmdline.xml b/websdk/mercurial/templates/map-cmdline.xml
new file mode 100644
index 0000000..ea1cfc6
--- /dev/null
+++ b/websdk/mercurial/templates/map-cmdline.xml
@@ -0,0 +1,20 @@
+header = '<?xml version="1.0"?>\n<log>\n'
+footer = '</log>\n'
+
+changeset = '<logentry revision="{rev}" node="{node}">\n{branches}{bookmarks}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n</logentry>\n'
+changeset_verbose = '<logentry revision="{rev}" node="{node}">\n{branches}{bookmarks}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n<paths>\n{file_adds}{file_dels}{file_mods}</paths>\n{file_copies}</logentry>\n'
+changeset_debug = '<logentry revision="{rev}" node="{node}">\n{branches}{bookmarks}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n<paths>\n{file_adds}{file_dels}{file_mods}</paths>\n{file_copies}{extras}</logentry>\n'
+
+file_add = '<path action="A">{file_add|xmlescape}</path>\n'
+file_mod = '<path action="M">{file_mod|xmlescape}</path>\n'
+file_del = '<path action="R">{file_del|xmlescape}</path>\n'
+
+start_file_copies = '<copies>\n'
+file_copy = '<copy source="{source|xmlescape}">{name|xmlescape}</copy>\n'
+end_file_copies = '</copies>\n'
+
+parent = '<parent revision="{rev}" node="{node}" />\n'
+branch = '<branch>{branch|xmlescape}</branch>\n'
+tag = '<tag>{tag|xmlescape}</tag>\n'
+bookmark = '<bookmark>{bookmark|xmlescape}</bookmark>\n'
+extra = '<extra key="{key|xmlescape}">{value|xmlescape}</extra>\n'
diff --git a/websdk/mercurial/templates/monoblue/bookmarks.tmpl b/websdk/mercurial/templates/monoblue/bookmarks.tmpl
new file mode 100644
index 0000000..7fb79ee
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/bookmarks.tmpl
@@ -0,0 +1,38 @@
+{header}
+ <title>{repo|escape}: Bookmarks</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Bookmarks</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li class="current">bookmarks</li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">bookmarks</h2>
+ <table cellspacing="0">
+{entries%bookmarkentry}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/branches.tmpl b/websdk/mercurial/templates/monoblue/branches.tmpl
new file mode 100644
index 0000000..969e5a1
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/branches.tmpl
@@ -0,0 +1,38 @@
+{header}
+ <title>{repo|escape}: Branches</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Branches</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li class="current">branches</li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">branches</h2>
+ <table cellspacing="0">
+{entries%branchentry}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/changelog.tmpl b/websdk/mercurial/templates/monoblue/changelog.tmpl
new file mode 100644
index 0000000..430c388
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/changelog.tmpl
@@ -0,0 +1,42 @@
+{header}
+ <title>{repo|escape}: changelog</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li class="current">changelog</li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">changelog</h2>
+ <div>
+ {entries%changelogentry}
+ </div>
+
+ <div class="page-path">
+{changenav%nav}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/changelogentry.tmpl b/websdk/mercurial/templates/monoblue/changelogentry.tmpl
new file mode 100644
index 0000000..1c04f07
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/changelogentry.tmpl
@@ -0,0 +1,6 @@
+<h3 class="changelog"><a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></a></h3>
+<ul class="changelog-entry">
+ <li class="age">{date|rfc822date}</li>
+ <li>by <span class="name">{author|obfuscate}</span> <span class="revdate">[{date|rfc822date}] rev {rev}</span></li>
+ <li class="description">{desc|strip|escape|addbreaks|nonempty}</li>
+</ul>
diff --git a/websdk/mercurial/templates/monoblue/changeset.tmpl b/websdk/mercurial/templates/monoblue/changeset.tmpl
new file mode 100644
index 0000000..56d7bb0
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/changeset.tmpl
@@ -0,0 +1,65 @@
+{header}
+<title>{repo|escape}: changeset {rev}:{node|short}</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li class="current">changeset</li>
+ <li><a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry}</li>
+ </ul>
+
+ <h2 class="no-link no-border">changeset</h2>
+
+ <h3 class="changeset"><a href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></a></h3>
+ <p class="changeset-age age">{date|rfc822date}</p>
+
+ <dl class="overview">
+ <dt>author</dt>
+ <dd>{author|obfuscate}</dd>
+ <dt>date</dt>
+ <dd>{date|rfc822date}</dd>
+ {branch%changesetbranch}
+ <dt>changeset {rev}</dt>
+ <dd>{node|short}</dd>
+ {parent%changesetparent}
+ {child%changesetchild}
+ </dl>
+
+ <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
+
+ <table>
+ {files}
+ </table>
+
+ <div class="diff">
+ {diff}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/error.tmpl b/websdk/mercurial/templates/monoblue/error.tmpl
new file mode 100644
index 0000000..edbef72
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/error.tmpl
@@ -0,0 +1,36 @@
+{header}
+ <title>{repo|escape}: Error</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li class="current">summary</li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">An error occurred while processing your request</h2>
+ <p class="normal">{error|escape}</p>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/fileannotate.tmpl b/websdk/mercurial/templates/monoblue/fileannotate.tmpl
new file mode 100644
index 0000000..fb067c1
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/fileannotate.tmpl
@@ -0,0 +1,65 @@
+{header}
+<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+ <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
+ <li class="current">annotate</li>
+ <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+ <li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li>
+ </ul>
+
+ <h2 class="no-link no-border">{file|escape}@{node|short} (annotated)</h2>
+ <h3 class="changeset">{file|escape}</h3>
+ <p class="changeset-age age">{date|rfc822date}</p>
+
+ <dl class="overview">
+ <dt>author</dt>
+ <dd>{author|obfuscate}</dd>
+ <dt>date</dt>
+ <dd>{date|rfc822date}</dd>
+ {branch%filerevbranch}
+ <dt>changeset {rev}</dt>
+ <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
+ {parent%fileannotateparent}
+ {child%fileannotatechild}
+ <dt>permissions</dt>
+ <dd>{permissions|permissions}</dd>
+ </dl>
+
+ <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
+
+ <table class="annotated">
+ {annotate%annotateline}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/filediff.tmpl b/websdk/mercurial/templates/monoblue/filediff.tmpl
new file mode 100644
index 0000000..e6298f9
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/filediff.tmpl
@@ -0,0 +1,56 @@
+{header}
+<title>{repo|escape}: diff {file|escape}</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file diff</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+ <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
+ <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+ <li class="current">diff</li>
+ <li><a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a></li>
+ </ul>
+
+ <h2 class="no-link no-border">diff: {file|escape}</h2>
+ <h3 class="changeset">{file|escape}</h3>
+
+ <dl class="overview">
+ {branch%filerevbranch}
+ <dt>changeset {rev}</dt>
+ <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
+ {parent%filediffparent}
+ {child%filediffchild}
+ </dl>
+
+ <div class="diff">
+ {diff}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/filelog.tmpl b/websdk/mercurial/templates/monoblue/filelog.tmpl
new file mode 100644
index 0000000..0e77f9f
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/filelog.tmpl
@@ -0,0 +1,51 @@
+{header}
+<title>{repo|escape}: File revisions</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+ <li class="current">revisions</li>
+ <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+ <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+ <li><a href="{url}rss-log/tip/{file|urlescape}">rss</a></li>
+ </ul>
+
+ <h2 class="no-link no-border">{file|urlescape}</h2>
+
+ <table>
+ {entries%filelogentry}
+ </table>
+
+ <div class="page-path">
+ {nav%filenav}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/filerevision.tmpl b/websdk/mercurial/templates/monoblue/filerevision.tmpl
new file mode 100644
index 0000000..7aaec8b
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/filerevision.tmpl
@@ -0,0 +1,65 @@
+{header}
+<title>{repo|escape}: {file|escape}@{node|short}</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li class="current">file</li>
+ <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li>
+ <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+ <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+ <li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
+ </ul>
+
+ <h2 class="no-link no-border">{file|escape}@{node|short}</h2>
+ <h3 class="changeset">{file|escape}</h3>
+ <p class="changeset-age age">{date|rfc822date}</p>
+
+ <dl class="overview">
+ <dt>author</dt>
+ <dd>{author|obfuscate}</dd>
+ <dt>date</dt>
+ <dd>{date|rfc822date}</dd>
+ {branch%filerevbranch}
+ <dt>changeset {rev}</dt>
+ <dd><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>
+ {parent%filerevparent}
+ {child%filerevchild}
+ <dt>permissions</dt>
+ <dd>{permissions|permissions}</dd>
+ </dl>
+
+ <p class="description">{desc|strip|escape|addbreaks|nonempty}</p>
+
+ <div class="source">
+ {text%fileline}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/footer.tmpl b/websdk/mercurial/templates/monoblue/footer.tmpl
new file mode 100644
index 0000000..c19bdae
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/footer.tmpl
@@ -0,0 +1,23 @@
+ <script type="text/javascript">process_dates()</script>
+ <div class="page-footer">
+ <p>Mercurial Repository: {repo|escape}</p>
+ <ul class="rss-logo">
+ <li><a href="{url}rss-log">RSS</a></li>
+ <li><a href="{url}atom-log">Atom</a></li>
+ </ul>
+ {motd}
+ </div>
+
+ <div id="powered-by">
+ <p><a href="{logourl}" title="Mercurial"><img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial"></a></p>
+ </div>
+
+ <div id="corner-top-left"></div>
+ <div id="corner-top-right"></div>
+ <div id="corner-bottom-left"></div>
+ <div id="corner-bottom-right"></div>
+
+</div>
+
+</body>
+</html>
diff --git a/websdk/mercurial/templates/monoblue/graph.tmpl b/websdk/mercurial/templates/monoblue/graph.tmpl
new file mode 100644
index 0000000..08fd79d
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/graph.tmpl
@@ -0,0 +1,125 @@
+{header}
+ <title>{repo|escape}: graph</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+ <!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li class="current">graph</li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">graph</h2>
+
+ <div id="noscript">The revision graph only works with JavaScript-enabled browsers.</div>
+ <div id="wrapper">
+ <ul id="nodebgs"></ul>
+ <canvas id="graph" width="480" height="{canvasheight}"></canvas>
+ <ul id="graphnodes"></ul>
+ </div>
+
+ <script>
+ <!-- hide script content
+
+ document.getElementById('noscript').style.display = 'none';
+
+ var data = {jsdata|json};
+ var graph = new Graph();
+ graph.scale({bg_height});
+
+ graph.edge = function(x0, y0, x1, y1, color) \{
+
+ this.setColor(color, 0.0, 0.65);
+ this.ctx.beginPath();
+ this.ctx.moveTo(x0, y0);
+ this.ctx.lineTo(x1, y1);
+ this.ctx.stroke();
+
+ }
+
+ var revlink = '<li style="_STYLE"><span class="desc">';
+ revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
+ revlink += '</span>_TAGS<span class="info">_DATE, by _USER</span></li>';
+
+ graph.vertex = function(x, y, color, parity, cur) \{
+
+ this.ctx.beginPath();
+ color = this.setColor(color, 0.25, 0.75);
+ this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
+ this.ctx.fill();
+
+ var bg = '<li class="bg parity' + parity + '"></li>';
+ var left = (this.columns + 1) * this.bg_height;
+ var nstyle = 'padding-left: ' + left + 'px;';
+ var item = revlink.replace(/_STYLE/, nstyle);
+ item = item.replace(/_PARITY/, 'parity' + parity);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_DESC/, cur[3]);
+ item = item.replace(/_USER/, cur[4]);
+ item = item.replace(/_DATE/, cur[5]);
+
+ var tagspan = '';
+ if (cur[7].length || cur[8].length || (cur[6][0] != 'default' || cur[6][1])) \{
+ tagspan = '<span class="logtags">';
+ if (cur[6][1]) \{
+ tagspan += '<span class="branchtag" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ } else if (!cur[6][1] && cur[6][0] != 'default') \{
+ tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ }
+ if (cur[7].length) \{
+ for (var t in cur[7]) \{
+ var tag = cur[7][t];
+ tagspan += '<span class="tagtag">' + tag + '</span> ';
+ }
+ }
+ if (cur[8].length) \{
+ for (var t in cur[8]) \{
+ var bookmark = cur[8][t];
+ tagspan += '<span class="bookmarktag">' + bookmark + '</span> ';
+ }
+ }
+ tagspan += '</span>';
+ }
+
+ item = item.replace(/_TAGS/, tagspan);
+ return [bg, item];
+
+ }
+
+ graph.render(data);
+
+ // stop hiding script -->
+ </script>
+
+ <div class="page-path">
+ <a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
+ <a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
+ | {changenav%navgraph}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/header.tmpl b/websdk/mercurial/templates/monoblue/header.tmpl
new file mode 100644
index 0000000..0c1f580
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/header.tmpl
@@ -0,0 +1,7 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+ <link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
+ <meta name="robots" content="index, nofollow"/>
+ <link rel="stylesheet" href="{staticurl}style-monoblue.css" type="text/css" />
+ <script type="text/javascript" src="{staticurl}mercurial.js"></script>
diff --git a/websdk/mercurial/templates/monoblue/help.tmpl b/websdk/mercurial/templates/monoblue/help.tmpl
new file mode 100644
index 0000000..33468f2
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/help.tmpl
@@ -0,0 +1,38 @@
+{header}
+ <title>{repo|escape}: Branches</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Help</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li class="current">help</li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">branches</h2>
+ <pre>
+ {doc|escape}
+ </pre>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/helptopics.tmpl b/websdk/mercurial/templates/monoblue/helptopics.tmpl
new file mode 100644
index 0000000..ca4ca9b
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/helptopics.tmpl
@@ -0,0 +1,45 @@
+{header}
+ <title>{repo|escape}: Branches</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Help</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li class="current">help</li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">branches</h2>
+ <table cellspacing="0">
+ <tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr>
+ {topics % helpentry}
+
+ <tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr>
+ {earlycommands % helpentry}
+
+ <tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr>
+ {othercommands % helpentry}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/index.tmpl b/websdk/mercurial/templates/monoblue/index.tmpl
new file mode 100644
index 0000000..e5086f7
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/index.tmpl
@@ -0,0 +1,39 @@
+{header}
+ <title>{repo|escape}: Mercurial repositories index</title>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1>Mercurial Repositories</h1>
+ <ul class="page-nav">
+ </ul>
+ </div>
+
+ <table cellspacing="0">
+ <tr>
+ <td><a href="?sort={sort_name}">Name</a></td>
+ <td><a href="?sort={sort_description}">Description</a></td>
+ <td><a href="?sort={sort_contact}">Contact</a></td>
+ <td><a href="?sort={sort_lastchange}">Last modified</a></td>
+ <td>&nbsp;</td>
+ <td>&nbsp;</td>
+ </tr>
+ {entries%indexentry}
+ </table>
+ <div class="page-footer">
+ {motd}
+ </div>
+
+ <div id="powered-by">
+ <p><a href="{logourl}" title="Mercurial"><img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial"></a></p>
+ </div>
+
+ <div id="corner-top-left"></div>
+ <div id="corner-top-right"></div>
+ <div id="corner-bottom-left"></div>
+ <div id="corner-bottom-right"></div>
+
+</div>
+</body>
+</html>
diff --git a/websdk/mercurial/templates/monoblue/manifest.tmpl b/websdk/mercurial/templates/monoblue/manifest.tmpl
new file mode 100644
index 0000000..c99af7c
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/manifest.tmpl
@@ -0,0 +1,53 @@
+{header}
+<title>{repo|escape}: files</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li class="current">files</li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <ul class="submenu">
+ <li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry}</li>
+ {archives%archiveentry}
+ </ul>
+
+ <h2 class="no-link no-border">files</h2>
+ <p class="files">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span></p>
+
+ <table>
+ <tr class="parity{upparity}">
+ <td>drwxr-xr-x</td>
+ <td></td>
+ <td></td>
+ <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
+ <td class="link">&nbsp;</td>
+ </tr>
+ {dentries%direntry}
+ {fentries%fileentry}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/map b/websdk/mercurial/templates/monoblue/map
new file mode 100644
index 0000000..7fa26c9
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/map
@@ -0,0 +1,240 @@
+default = 'summary'
+mimetype = 'text/html; charset={encoding}'
+header = header.tmpl
+footer = footer.tmpl
+search = search.tmpl
+changelog = changelog.tmpl
+summary = summary.tmpl
+error = error.tmpl
+notfound = notfound.tmpl
+
+help = help.tmpl
+helptopics = helptopics.tmpl
+
+helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>'
+
+naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a>'
+filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenodelink = '
+ <tr class="parity{parity}">
+ <td><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
+ <td></td>
+ <td>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> |
+ <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+ </td>
+ </tr>'
+filenolink = '
+ <tr class="parity{parity}">
+ <td><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td>
+ <td></td>
+ <td>
+ file |
+ annotate |
+ <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+ </td>
+ </tr>'
+
+nav = '{before%naventry} {after%naventry}'
+navshort = '{before%navshortentry}{after%navshortentry}'
+navgraph = '{before%navgraphentry}{after%navgraphentry}'
+filenav = '{before%filenaventry}{after%filenaventry}'
+
+fileellipses = '...'
+changelogentry = changelogentry.tmpl
+searchentry = changelogentry.tmpl
+changeset = changeset.tmpl
+manifest = manifest.tmpl
+direntry = '
+ <tr class="parity{parity}">
+ <td>drwxr-xr-x</td>
+ <td></td>
+ <td></td>
+ <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td>
+ <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></td>
+ </tr>'
+fileentry = '
+ <tr class="parity{parity}">
+ <td>{permissions|permissions}</td>
+ <td>{date|isodate}</td>
+ <td>{size}</td>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td>
+ <td>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> |
+ <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> |
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+ </td>
+ </tr>'
+filerevision = filerevision.tmpl
+fileannotate = fileannotate.tmpl
+filediff = filediff.tmpl
+filelog = filelog.tmpl
+fileline = '
+ <div style="font-family:monospace" class="parity{parity}">
+ <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre>
+ </div>'
+annotateline = '
+ <tr class="parity{parity}">
+ <td class="linenr">
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
+ title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
+ </td>
+ <td class="lineno">
+ <a href="#{lineid}" id="{lineid}">{linenumber}</a>
+ </td>
+ <td class="source">{line|escape}</td>
+ </tr>'
+difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>'
+changelogparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+changesetbranch = '<dt>branch</dt><dd>{name}</dd>'
+changesetparent = '
+ <dt>parent {rev}</dt>
+ <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
+filerevbranch = '<dt>branch</dt><dd>{name}</dd>'
+filerevparent = '
+ <dt>parent {rev}</dt>
+ <dd>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </dd>'
+filerename = '{file|escape}@'
+filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>'
+fileannotateparent = '
+ <dt>parent {rev}</dt>
+ <dd>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </dd>'
+changelogchild = '
+ <dt>child {rev}:</dt>
+ <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
+changesetchild = '
+ <dt>child {rev}</dt>
+ <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>'
+filerevchild = '
+ <dt>child {rev}</dt>
+ <dd>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
+ </dd>'
+fileannotatechild = '
+ <dt>child {rev}</dt>
+ <dd>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
+ </dd>'
+tags = tags.tmpl
+tagentry = '
+ <tr class="parity{parity}">
+ <td class="nowrap age">{date|rfc822date}</td>
+ <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a></td>
+ <td class="nowrap">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+bookmarks = bookmarks.tmpl
+bookmarkentry = '
+ <tr class="parity{parity}">
+ <td class="nowrap date">{date|rfc822date}</td>
+ <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{bookmark|escape}</a></td>
+ <td class="nowrap">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+branches = branches.tmpl
+branchentry = '
+ <tr class="parity{parity}">
+ <td class="nowrap age">{date|rfc822date}</td>
+ <td><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ <td class="{status}">{branch|escape}</td>
+ <td class="nowrap">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+diffblock = '<pre>{lines}</pre>'
+filediffparent = '
+ <dt>parent {rev}</dt>
+ <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>'
+filelogparent = '
+ <tr>
+ <td align="right">parent {rev}:&nbsp;</td>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filediffchild = '
+ <dt>child {rev}</dt>
+ <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>'
+filelogchild = '
+ <tr>
+ <td align="right">child {rev}:&nbsp;</td>
+ <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+shortlog = shortlog.tmpl
+tagtag = '<span class="tagtag" title="{name}">{name}</span> '
+branchtag = '<span class="branchtag" title="{name}">{name}</span> '
+inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> '
+bookmarktag = '<span class="bookmarktag" title="{name}">{name}</span> '
+shortlogentry = '
+ <tr class="parity{parity}">
+ <td class="nowrap age">{date|rfc822date}</td>
+ <td>{author|person}</td>
+ <td>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {desc|strip|firstline|escape|nonempty}
+ <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}</span>
+ </a>
+ </td>
+ <td class="nowrap">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> |
+ <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+ </td>
+ </tr>'
+filelogentry = '
+ <tr class="parity{parity}">
+ <td class="nowrap age">{date|rfc822date}</td>
+ <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td>
+ <td class="nowrap">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>&nbsp;|&nbsp;<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a>&nbsp;|&nbsp;<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+ {rename%filelogrename}
+ </td>
+ </tr>'
+archiveentry = '<li><a href="{url}archive/{node|short}{extension}">{type|escape}</a></li>'
+indexentry = '
+ <tr class="parity{parity}">
+ <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
+ <td>{description}</td>
+ <td>{contact|obfuscate}</td>
+ <td class="age">{lastchange|rfc822date}</td>
+ <td class="indexlinks">{archives%indexarchiveentry}</td>
+ <td>
+ <div class="rss_logo">
+ <a href="{url}rss-log">RSS</a>
+ <a href="{url}atom-log">Atom</a>
+ </div>
+ </td>
+ </tr>\n'
+indexarchiveentry = '<a href="{url}archive/{node|short}{extension}">{type|escape}</a> '
+index = index.tmpl
+urlparameter = '{separator}{name}={value|urlescape}'
+hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
+graph = graph.tmpl
diff --git a/websdk/mercurial/templates/monoblue/notfound.tmpl b/websdk/mercurial/templates/monoblue/notfound.tmpl
new file mode 100644
index 0000000..38df584
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/notfound.tmpl
@@ -0,0 +1,37 @@
+{header}
+ <title>{repo|escape}: Mercurial repository not found</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li class="current">summary</li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">Not Found</h2>
+ <p class="normal">The specified repository "{repo|escape}" is unknown, sorry.</p>
+ <p class="normal">Please go back to the <a href="{url}">main repository list page</a>.</p>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/search.tmpl b/websdk/mercurial/templates/monoblue/search.tmpl
new file mode 100644
index 0000000..2be816a
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/search.tmpl
@@ -0,0 +1,36 @@
+{header}
+ <title>{repo|escape}: Search</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" value="{query|escape}" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">searching for {query|escape}</h2>
+ {entries}
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/shortlog.tmpl b/websdk/mercurial/templates/monoblue/shortlog.tmpl
new file mode 100644
index 0000000..56c4584
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/shortlog.tmpl
@@ -0,0 +1,44 @@
+{header}
+ <title>{repo|escape}: shortlog</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li class="current">shortlog</li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ {archives%archiveentry}
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">shortlog</h2>
+
+ <table>
+{entries%shortlogentry}
+ </table>
+
+ <div class="page-path">
+ {changenav%navshort}
+ </div>
+
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/summary.tmpl b/websdk/mercurial/templates/monoblue/summary.tmpl
new file mode 100644
index 0000000..9528dc6
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/summary.tmpl
@@ -0,0 +1,76 @@
+{header}
+ <title>{repo|escape}: Summary</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li class="current">summary</li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">Mercurial Repository Overview</h2>
+ <dl class="overview">
+ <dt>name</dt>
+ <dd>{repo|escape}</dd>
+ <dt>description</dt>
+ <dd>{desc}</dd>
+ <dt>owner</dt>
+ <dd>{owner|obfuscate}</dd>
+ <dt>last change</dt>
+ <dd>{lastchange|rfc822date}</dd>
+ </dl>
+
+ <h2><a href="{url}shortlog{sessionvars%urlparameter}">Changes</a></h2>
+ <table>
+{shortlog}
+ <tr class="light">
+ <td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td>
+ </tr>
+ </table>
+
+ <h2><a href="{url}tags{sessionvars%urlparameter}">Tags</a></h2>
+ <table>
+{tags}
+ <tr class="light">
+ <td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td>
+ </tr>
+ </table>
+
+ <h2><a href="{url}bookmarks{sessionvars%urlparameter}">Bookmarks</a></h2>
+ <table>
+ {bookmarks%bookmarkentry}
+ <tr class="light">
+ <td colspan="3"><a class="list" href="{url}bookmarks{sessionvars%urlparameter}">...</a></td>
+ </tr>
+ </table>
+
+ <h2 class="no-link">Branches</h2>
+ <table>
+ {branches%branchentry}
+ <tr class="light">
+ <td colspan="4"><a class="list" href="#">...</a></td>
+ </tr>
+ </table>
+{footer}
diff --git a/websdk/mercurial/templates/monoblue/tags.tmpl b/websdk/mercurial/templates/monoblue/tags.tmpl
new file mode 100644
index 0000000..0c8f93d
--- /dev/null
+++ b/websdk/mercurial/templates/monoblue/tags.tmpl
@@ -0,0 +1,38 @@
+{header}
+ <title>{repo|escape}: Tags</title>
+ <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/>
+ <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/>
+</head>
+
+<body>
+<div id="container">
+ <div class="page-header">
+ <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Tags</h1>
+
+ <form action="{url}log">
+ {sessionvars%hiddenformentry}
+ <dl class="search">
+ <dt><label>Search: </label></dt>
+ <dd><input type="text" name="rev" /></dd>
+ </dl>
+ </form>
+
+ <ul class="page-nav">
+ <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li>
+ <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li>
+ <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li class="current">tags</li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+ </ul>
+ </div>
+
+ <h2 class="no-link no-border">tags</h2>
+ <table cellspacing="0">
+{entries%tagentry}
+ </table>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/bookmarks.tmpl b/websdk/mercurial/templates/paper/bookmarks.tmpl
new file mode 100644
index 0000000..5989243
--- /dev/null
+++ b/websdk/mercurial/templates/paper/bookmarks.tmpl
@@ -0,0 +1,49 @@
+{header}
+<title>{repo|escape}: bookmarks</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-bookmarks" title="Atom feed for {repo|escape}: bookmarks" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-bookmarks" title="RSS feed for {repo|escape}: bookmarks" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li class="active">bookmarks</li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>bookmarks</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<table class="bigtable">
+<tr>
+ <th>bookmark</th>
+ <th>node</th>
+</tr>
+{entries%bookmarkentry}
+</table>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/branches.tmpl b/websdk/mercurial/templates/paper/branches.tmpl
new file mode 100644
index 0000000..b26a04d
--- /dev/null
+++ b/websdk/mercurial/templates/paper/branches.tmpl
@@ -0,0 +1,60 @@
+{header}
+<title>{repo|escape}: branches</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}: branches" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}: branches" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li class="active">branches</li>
+</ul>
+<ul>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>branches</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<table class="bigtable">
+<tr>
+ <th>branch</th>
+ <th>node</th>
+</tr>
+{entries %
+' <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">
+ {branch|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+}
+</table>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/changeset.tmpl b/websdk/mercurial/templates/paper/changeset.tmpl
new file mode 100644
index 0000000..b7cc62b
--- /dev/null
+++ b/websdk/mercurial/templates/paper/changeset.tmpl
@@ -0,0 +1,87 @@
+{header}
+<title>{repo|escape}: {node|short}</title>
+</head>
+<body>
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+ <li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+ <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+ <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+ <li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+ <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+ <li class="active">changeset</li>
+ <li><a href="{url}raw-rev/{node|short}{sessionvars%urlparameter}">raw</a></li>
+ <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+ {archives%archiveentry}
+</ul>
+<ul>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>changeset {rev}:{node|short} {changesetbranch%changelogbranchname} {changesettag} {changesetbookmark}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="description">{desc|strip|escape|nonempty}</div>
+
+<table id="changesetEntry">
+<tr>
+ <th class="author">author</th>
+ <td class="author">{author|obfuscate}</td>
+</tr>
+<tr>
+ <th class="date">date</th>
+ <td class="date age">{date|rfc822date}</td></tr>
+<tr>
+ <th class="author">parents</th>
+ <td class="author">{parent%changesetparent}</td>
+</tr>
+<tr>
+ <th class="author">children</th>
+ <td class="author">{child%changesetchild}</td>
+</tr>
+<tr>
+ <th class="files">files</th>
+ <td class="files">{files}</td>
+</tr>
+<tr>
+ <th class="diffstat">diffstat</th>
+ <td class="diffstat">
+ {diffsummary}
+ <a id="diffstatexpand" href="javascript:showDiffstat()"/>[<tt>+</tt>]</a>
+ <div id="diffstatdetails" style="display:none;">
+ <a href="javascript:hideDiffstat()"/>[<tt>-</tt>]</a>
+ <p>
+ <table>{diffstat}</table>
+ </div>
+ </td>
+</tr>
+</table>
+
+<div class="overflow">
+<div class="sourcefirst"> line diff</div>
+
+{diff}
+</div>
+
+</div>
+</div>
+{footer}
diff --git a/websdk/mercurial/templates/paper/diffstat.tmpl b/websdk/mercurial/templates/paper/diffstat.tmpl
new file mode 100644
index 0000000..90261c9
--- /dev/null
+++ b/websdk/mercurial/templates/paper/diffstat.tmpl
@@ -0,0 +1,8 @@
+ <tr class="parity{parity}">
+ <td class="diffstat-file"><a href="#l{fileno}.1">{file|escape}</a></td>
+ <td class="diffstat-total" align="right">{total}</td>
+ <td class="diffstat-graph">
+ <span class="diffstat-add" style="width:{addpct}%;">&nbsp;</span>
+ <span class="diffstat-remove" style="width:{removepct}%;">&nbsp;</span>
+ </td>
+ </tr>
diff --git a/websdk/mercurial/templates/paper/error.tmpl b/websdk/mercurial/templates/paper/error.tmpl
new file mode 100644
index 0000000..1aca07c
--- /dev/null
+++ b/websdk/mercurial/templates/paper/error.tmpl
@@ -0,0 +1,45 @@
+{header}
+<title>{repo|escape}: error</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>error</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30"></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="description">
+<p>
+An error occurred while processing your request:
+</p>
+<p>
+{error|escape}
+</p>
+</div>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/fileannotate.tmpl b/websdk/mercurial/templates/paper/fileannotate.tmpl
new file mode 100644
index 0000000..cb02b71
--- /dev/null
+++ b/websdk/mercurial/templates/paper/fileannotate.tmpl
@@ -0,0 +1,82 @@
+{header}
+<title>{repo|escape}: {file|escape} annotate</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li>
+<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+<li class="active">annotate</li>
+<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
+<li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>annotate {file|escape} @ {rev}:{node|short}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="description">{desc|strip|escape|nonempty}</div>
+
+<table id="changesetEntry">
+<tr>
+ <th class="author">author</th>
+ <td class="author">{author|obfuscate}</td>
+</tr>
+<tr>
+ <th class="date">date</th>
+ <td class="date age">{date|rfc822date}</td>
+</tr>
+<tr>
+ <th class="author">parents</th>
+ <td class="author">{parent%filerevparent}</td>
+</tr>
+<tr>
+ <th class="author">children</th>
+ <td class="author">{child%filerevchild}</td>
+</tr>
+{changesettag}
+</table>
+
+<div class="overflow">
+<table class="bigtable">
+<tr>
+ <th class="annotate">rev</th>
+ <th class="line">&nbsp;&nbsp;line source</th>
+</tr>
+{annotate%annotateline}
+</table>
+</div>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/filediff.tmpl b/websdk/mercurial/templates/paper/filediff.tmpl
new file mode 100644
index 0000000..43524c4
--- /dev/null
+++ b/websdk/mercurial/templates/paper/filediff.tmpl
@@ -0,0 +1,77 @@
+{header}
+<title>{repo|escape}: {file|escape} diff</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li>
+<li class="active">diff</li>
+<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
+<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>diff {file|escape} @ {rev}:{node|short}</h3>
+
+<form class="search" action="{url}log">
+<p>{sessionvars%hiddenformentry}</p>
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="description">{desc|strip|escape|nonempty}</div>
+
+<table id="changesetEntry">
+<tr>
+ <th>author</th>
+ <td>{author|obfuscate}</td>
+</tr>
+<tr>
+ <th>date</th>
+ <td class="date age">{date|rfc822date}</td>
+</tr>
+<tr>
+ <th>parents</th>
+ <td>{parent%filerevparent}</td>
+</tr>
+<tr>
+ <th>children</th>
+ <td>{child%filerevchild}</td>
+</tr>
+{changesettag}
+</table>
+
+<div class="overflow">
+<div class="sourcefirst"> line diff</div>
+
+{diff}
+</div>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/filelog.tmpl b/websdk/mercurial/templates/paper/filelog.tmpl
new file mode 100644
index 0000000..4880d78
--- /dev/null
+++ b/websdk/mercurial/templates/paper/filelog.tmpl
@@ -0,0 +1,73 @@
+{header}
+<title>{repo|escape}: {file|escape} history</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li>
+<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+<li class="active">file log</li>
+<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>log {file|escape}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="navigate">
+<a href="{url}log/{node|short}/{file|urlescape}{lessvars%urlparameter}">less</a>
+<a href="{url}log/{node|short}/{file|urlescape}{morevars%urlparameter}">more</a>
+| {nav%filenav}</div>
+
+<table class="bigtable">
+ <tr>
+ <th class="age">age</th>
+ <th class="author">author</th>
+ <th class="description">description</th>
+ </tr>
+{entries%filelogentry}
+</table>
+
+<div class="navigate">
+<a href="{url}log/{node|short}/{file|urlescape}{lessvars%urlparameter}">less</a>
+<a href="{url}log/{node|short}/{file|urlescape}{morevars%urlparameter}">more</a>
+| {nav%filenav}
+</div>
+
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/filelogentry.tmpl b/websdk/mercurial/templates/paper/filelogentry.tmpl
new file mode 100644
index 0000000..8b4ae84
--- /dev/null
+++ b/websdk/mercurial/templates/paper/filelogentry.tmpl
@@ -0,0 +1,5 @@
+ <tr class="parity{parity}">
+ <td class="age">{date|rfc822date}</td>
+ <td class="author">{author|person}</td>
+ <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}{rename%filelogrename}</td>
+ </tr>
diff --git a/websdk/mercurial/templates/paper/filerevision.tmpl b/websdk/mercurial/templates/paper/filerevision.tmpl
new file mode 100644
index 0000000..cdadf48
--- /dev/null
+++ b/websdk/mercurial/templates/paper/filerevision.tmpl
@@ -0,0 +1,76 @@
+{header}
+<title>{repo|escape}: {node|short} {file|escape}</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+<li class="active">file</li>
+<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li>
+<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li>
+<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li>
+<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li>
+<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>view {file|escape} @ {rev}:{node|short}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="description">{desc|strip|escape|nonempty}</div>
+
+<table id="changesetEntry">
+<tr>
+ <th class="author">author</th>
+ <td class="author">{author|obfuscate}</td>
+</tr>
+<tr>
+ <th class="date">date</th>
+ <td class="date age">{date|rfc822date}</td>
+</tr>
+<tr>
+ <th class="author">parents</th>
+ <td class="author">{parent%filerevparent}</td>
+</tr>
+<tr>
+ <th class="author">children</th>
+ <td class="author">{child%filerevchild}</td>
+</tr>
+{changesettag}
+</table>
+
+<div class="overflow">
+<div class="sourcefirst"> line source</div>
+{text%fileline}
+<div class="sourcelast"></div>
+</div>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/footer.tmpl b/websdk/mercurial/templates/paper/footer.tmpl
new file mode 100644
index 0000000..4eb2d46
--- /dev/null
+++ b/websdk/mercurial/templates/paper/footer.tmpl
@@ -0,0 +1,5 @@
+<script type="text/javascript">process_dates()</script>
+{motd}
+
+</body>
+</html>
diff --git a/websdk/mercurial/templates/paper/graph.tmpl b/websdk/mercurial/templates/paper/graph.tmpl
new file mode 100644
index 0000000..80b44ac
--- /dev/null
+++ b/websdk/mercurial/templates/paper/graph.tmpl
@@ -0,0 +1,141 @@
+{header}
+<title>{repo|escape}: revision graph</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}: log" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}: log" />
+<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li class="active">graph</li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>graph</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="navigate">
+<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
+| rev {rev}: {changenav%navgraph}
+</div>
+
+<noscript><p>The revision graph only works with JavaScript-enabled browsers.</p></noscript>
+
+<div id="wrapper">
+<ul id="nodebgs"></ul>
+<canvas id="graph" width="480" height="{canvasheight}"></canvas>
+<ul id="graphnodes"></ul>
+</div>
+
+<script type="text/javascript">
+<!-- hide script content
+
+var data = {jsdata|json};
+var graph = new Graph();
+graph.scale({bg_height});
+
+graph.edge = function(x0, y0, x1, y1, color) \{
+
+ this.setColor(color, 0.0, 0.65);
+ this.ctx.beginPath();
+ this.ctx.moveTo(x0, y0);
+ this.ctx.lineTo(x1, y1);
+ this.ctx.stroke();
+
+}
+
+var revlink = '<li style="_STYLE"><span class="desc">';
+revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
+revlink += '</span>_TAGS<span class="info">_DATE, by _USER</span></li>';
+
+graph.vertex = function(x, y, color, parity, cur) \{
+
+ this.ctx.beginPath();
+ color = this.setColor(color, 0.25, 0.75);
+ this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
+ this.ctx.fill();
+
+ var bg = '<li class="bg parity' + parity + '"></li>';
+ var left = (this.columns + 1) * this.bg_height;
+ var nstyle = 'padding-left: ' + left + 'px;';
+ var item = revlink.replace(/_STYLE/, nstyle);
+ item = item.replace(/_PARITY/, 'parity' + parity);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_DESC/, cur[3]);
+ item = item.replace(/_USER/, cur[4]);
+ item = item.replace(/_DATE/, cur[5]);
+
+ var tagspan = '';
+ if (cur[7].length || cur[8].length || (cur[6][0] != 'default' || cur[6][1])) \{
+ tagspan = '<span class="logtags">';
+ if (cur[6][1]) \{
+ tagspan += '<span class="branchhead" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ } else if (!cur[6][1] && cur[6][0] != 'default') \{
+ tagspan += '<span class="branchname" title="' + cur[6][0] + '">';
+ tagspan += cur[6][0] + '</span> ';
+ }
+ if (cur[7].length) \{
+ for (var t in cur[7]) \{
+ var tag = cur[7][t];
+ tagspan += '<span class="tag">' + tag + '</span> ';
+ }
+ }
+ if (cur[8].length) \{
+ for (var b in cur[8]) \{
+ var bookmark = cur[8][b];
+ tagspan += '<span class="tag">' + bookmark + '</span> ';
+ }
+ }
+ tagspan += '</span>';
+ }
+
+ item = item.replace(/_TAGS/, tagspan);
+ return [bg, item];
+
+}
+
+graph.render(data);
+
+// stop hiding script -->
+</script>
+
+<div class="navigate">
+<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}graph/{rev}{morevars%urlparameter}">more</a>
+| rev {rev}: {changenav%navgraph}
+</div>
+
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/header.tmpl b/websdk/mercurial/templates/paper/header.tmpl
new file mode 100644
index 0000000..f24d768
--- /dev/null
+++ b/websdk/mercurial/templates/paper/header.tmpl
@@ -0,0 +1,7 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US">
+<head>
+<link rel="icon" href="{staticurl}hgicon.png" type="image/png" />
+<meta name="robots" content="index, nofollow" />
+<link rel="stylesheet" href="{staticurl}style-paper.css" type="text/css" />
+<script type="text/javascript" src="{staticurl}mercurial.js"></script>
diff --git a/websdk/mercurial/templates/paper/help.tmpl b/websdk/mercurial/templates/paper/help.tmpl
new file mode 100644
index 0000000..487e1f2
--- /dev/null
+++ b/websdk/mercurial/templates/paper/help.tmpl
@@ -0,0 +1,44 @@
+{header}
+<title>Help: {topic}</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+ <li class="active">help</li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>Help: {topic}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+<pre>
+{doc|escape}
+</pre>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/helptopics.tmpl b/websdk/mercurial/templates/paper/helptopics.tmpl
new file mode 100644
index 0000000..a723b53
--- /dev/null
+++ b/websdk/mercurial/templates/paper/helptopics.tmpl
@@ -0,0 +1,49 @@
+{header}
+<title>Help: {title}</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li class="active">help</li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+<table class="bigtable">
+<tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr>
+{topics % helpentry}
+
+<tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr>
+{earlycommands % helpentry}
+
+<tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr>
+{othercommands % helpentry}
+</table>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/index.tmpl b/websdk/mercurial/templates/paper/index.tmpl
new file mode 100644
index 0000000..04b4ffb
--- /dev/null
+++ b/websdk/mercurial/templates/paper/index.tmpl
@@ -0,0 +1,26 @@
+{header}
+<title>Mercurial repositories index</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial" /></a>
+</div>
+<div class="main">
+<h2>Mercurial Repositories</h2>
+
+<table class="bigtable">
+ <tr>
+ <th><a href="?sort={sort_name}">Name</a></th>
+ <th><a href="?sort={sort_description}">Description</a></th>
+ <th><a href="?sort={sort_contact}">Contact</a></th>
+ <th><a href="?sort={sort_lastchange}">Last modified</a></th>
+ <th>&nbsp;</th>
+ </tr>
+ {entries%indexentry}
+</table>
+</div>
+</div>
+{footer}
diff --git a/websdk/mercurial/templates/paper/manifest.tmpl b/websdk/mercurial/templates/paper/manifest.tmpl
new file mode 100644
index 0000000..3dc7e77
--- /dev/null
+++ b/websdk/mercurial/templates/paper/manifest.tmpl
@@ -0,0 +1,58 @@
+{header}
+<title>{repo|escape}: {node|short} {path|escape}</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li class="active">browse</li>
+</ul>
+<ul>
+{archives%archiveentry}
+</ul>
+<ul>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>directory {path|escape} @ {rev}:{node|short} {tags%changelogtag}</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<table class="bigtable">
+<tr>
+ <th class="name">name</th>
+ <th class="size">size</th>
+ <th class="permissions">permissions</th>
+</tr>
+<tr class="fileline parity{upparity}">
+ <td class="name"><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td>
+ <td class="size"></td>
+ <td class="permissions">drwxr-xr-x</td>
+</tr>
+{dentries%direntry}
+{fentries%fileentry}
+</table>
+</div>
+</div>
+{footer}
diff --git a/websdk/mercurial/templates/paper/map b/websdk/mercurial/templates/paper/map
new file mode 100644
index 0000000..42042e8
--- /dev/null
+++ b/websdk/mercurial/templates/paper/map
@@ -0,0 +1,213 @@
+default = 'shortlog'
+
+mimetype = 'text/html; charset={encoding}'
+header = header.tmpl
+footer = footer.tmpl
+search = search.tmpl
+
+changelog = shortlog.tmpl
+shortlog = shortlog.tmpl
+shortlogentry = shortlogentry.tmpl
+graph = graph.tmpl
+help = help.tmpl
+helptopics = helptopics.tmpl
+
+helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>'
+
+naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
+filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenolink = '{file|escape} '
+fileellipses = '...'
+diffstatlink = diffstat.tmpl
+diffstatnolink = diffstat.tmpl
+changelogentry = shortlogentry.tmpl
+searchentry = shortlogentry.tmpl
+changeset = changeset.tmpl
+manifest = manifest.tmpl
+
+nav = '{before%naventry} {after%naventry}'
+navshort = '{before%navshortentry}{after%navshortentry}'
+navgraph = '{before%navgraphentry}{after%navgraphentry}'
+filenav = '{before%filenaventry}{after%filenaventry}'
+
+direntry = '
+ <tr class="fileline parity{parity}">
+ <td class="name">
+ <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">
+ <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/
+ </a>
+ <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
+ {emptydirs|escape}
+ </a>
+ </td>
+ <td class="size"></td>
+ <td class="permissions">drwxr-xr-x</td>
+ </tr>'
+
+fileentry = '
+ <tr class="fileline parity{parity}">
+ <td class="filename">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape}
+ </a>
+ </td>
+ <td class="size">{size}</td>
+ <td class="permissions">{permissions|permissions}</td>
+ </tr>'
+
+filerevision = filerevision.tmpl
+fileannotate = fileannotate.tmpl
+filediff = filediff.tmpl
+filelog = filelog.tmpl
+fileline = '
+ <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>'
+filelogentry = filelogentry.tmpl
+
+annotateline = '
+ <tr class="parity{parity}">
+ <td class="annotate">
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}"
+ title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
+ </td>
+ <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td>
+ </tr>'
+
+diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>'
+difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>'
+difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>'
+difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>'
+diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}'
+
+changelogparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+
+changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> '
+
+filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> '
+filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> '
+
+filerename = '{file|escape}@'
+filelogrename = '
+ <span class="base">
+ base
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {file|escape}@{node|short}
+ </a>
+ </span>'
+fileannotateparent = '
+ <tr>
+ <td class="metatag">parent:</td>
+ <td>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>'
+changelogchild = '
+ <tr>
+ <th class="child">child</th>
+ <td class="child">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {node|short}
+ </a>
+ </td>
+ </tr>'
+fileannotatechild = '
+ <tr>
+ <td class="metatag">child:</td>
+ <td>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {node|short}
+ </a>
+ </td>
+ </tr>'
+tags = tags.tmpl
+tagentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {tag|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+bookmarks = bookmarks.tmpl
+bookmarkentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">
+ {bookmark|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+branches = branches.tmpl
+branchentry = '
+ <tr class="tagEntry parity{parity}">
+ <td>
+ <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">
+ {branch|escape}
+ </a>
+ </td>
+ <td class="node">
+ {node|short}
+ </td>
+ </tr>'
+changelogtag = '<span class="tag">{name|escape}</span> '
+changesettag = '<span class="tag">{tag|escape}</span> '
+changesetbookmark = '<span class="tag">{bookmark|escape}</span> '
+changelogbranchhead = '<span class="branchhead">{name|escape}</span> '
+changelogbranchname = '<span class="branchname">{name|escape}</span> '
+
+filediffparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filelogparent = '
+ <tr>
+ <th>parent {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filediffchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+filelogchild = '
+ <tr>
+ <th>child {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+
+indexentry = '
+ <tr class="parity{parity}">
+ <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
+ <td>{description}</td>
+ <td>{contact|obfuscate}</td>
+ <td class="age">{lastchange|rfc822date}</td>
+ <td class="indexlinks">{archives%indexarchiveentry}</td>
+ </tr>\n'
+indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">&nbsp;&darr;{type|escape}</a>'
+index = index.tmpl
+archiveentry = '
+ <li>
+ <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a>
+ </li>'
+notfound = notfound.tmpl
+error = error.tmpl
+urlparameter = '{separator}{name}={value|urlescape}'
+hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/websdk/mercurial/templates/paper/notfound.tmpl b/websdk/mercurial/templates/paper/notfound.tmpl
new file mode 100644
index 0000000..e9e6ba4
--- /dev/null
+++ b/websdk/mercurial/templates/paper/notfound.tmpl
@@ -0,0 +1,12 @@
+{header}
+<title>Mercurial repository not found</title>
+</head>
+<body>
+
+<h2>Mercurial repository not found</h2>
+
+The specified repository "{repo|escape}" is unknown, sorry.
+
+Please go back to the <a href="{url}">main repository list page</a>.
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/search.tmpl b/websdk/mercurial/templates/paper/search.tmpl
new file mode 100644
index 0000000..feee155
--- /dev/null
+++ b/websdk/mercurial/templates/paper/search.tmpl
@@ -0,0 +1,55 @@
+{header}
+<title>{repo|escape}: searching for {query|escape}</title>
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial"></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>searching for '{query|escape}'</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30"></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="navigate">
+<a href="{url}search/{lessvars%urlparameter}">less</a>
+<a href="{url}search/{morevars%urlparameter}">more</a>
+</div>
+
+<table class="bigtable">
+ <tr>
+ <th class="age">age</th>
+ <th class="author">author</th>
+ <th class="description">description</th>
+ </tr>
+{entries}
+</table>
+
+<div class="navigate">
+<a href="{url}search/{lessvars%urlparameter}">less</a>
+<a href="{url}search/{morevars%urlparameter}">more</a>
+</div>
+
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/shortlog.tmpl b/websdk/mercurial/templates/paper/shortlog.tmpl
new file mode 100644
index 0000000..b69d4b0
--- /dev/null
+++ b/websdk/mercurial/templates/paper/shortlog.tmpl
@@ -0,0 +1,70 @@
+{header}
+<title>{repo|escape}: log</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li class="active">log</li>
+<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li>
+<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li>
+<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li>
+</ul>
+<ul>
+{archives%archiveentry}
+</ul>
+<ul>
+ <li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>log</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<div class="navigate">
+<a href="{url}shortlog/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}shortlog/{rev}{morevars%urlparameter}">more</a>
+| rev {rev}: {changenav%navshort}
+</div>
+
+<table class="bigtable">
+ <tr>
+ <th class="age">age</th>
+ <th class="author">author</th>
+ <th class="description">description</th>
+ </tr>
+{entries%shortlogentry}
+</table>
+
+<div class="navigate">
+<a href="{url}shortlog/{rev}{lessvars%urlparameter}">less</a>
+<a href="{url}shortlog/{rev}{morevars%urlparameter}">more</a>
+| rev {rev}: {changenav%navshort}
+</div>
+
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/paper/shortlogentry.tmpl b/websdk/mercurial/templates/paper/shortlogentry.tmpl
new file mode 100644
index 0000000..09e94c0
--- /dev/null
+++ b/websdk/mercurial/templates/paper/shortlogentry.tmpl
@@ -0,0 +1,5 @@
+ <tr class="parity{parity}">
+ <td class="age">{date|rfc822date}</td>
+ <td class="author">{author|person}</td>
+ <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags % '<span class="tag">{name|escape}</span> '}{bookmarks % '<span class="tag">{name|escape}</span> '}</td>
+ </tr>
diff --git a/websdk/mercurial/templates/paper/tags.tmpl b/websdk/mercurial/templates/paper/tags.tmpl
new file mode 100644
index 0000000..9b4e623
--- /dev/null
+++ b/websdk/mercurial/templates/paper/tags.tmpl
@@ -0,0 +1,49 @@
+{header}
+<title>{repo|escape}: tags</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}: tags" />
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}: tags" />
+</head>
+<body>
+
+<div class="container">
+<div class="menu">
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" alt="mercurial" /></a>
+</div>
+<ul>
+<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li>
+<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li>
+<li class="active">tags</li>
+<li><a href="{url}bookmarks{sessionvars%urlparameter}">bookmarks</a></li>
+<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li>
+</ul>
+<ul>
+<li><a href="{url}help{sessionvars%urlparameter}">help</a></li>
+</ul>
+</div>
+
+<div class="main">
+<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2>
+<h3>tags</h3>
+
+<form class="search" action="{url}log">
+{sessionvars%hiddenformentry}
+<p><input name="rev" id="search1" type="text" size="30" /></p>
+<div id="hint">find changesets by author, revision,
+files, or words in the commit message</div>
+</form>
+
+<table class="bigtable">
+<tr>
+ <th>tag</th>
+ <th>node</th>
+</tr>
+{entries%tagentry}
+</table>
+</div>
+</div>
+
+{footer}
diff --git a/websdk/mercurial/templates/raw/changeset.tmpl b/websdk/mercurial/templates/raw/changeset.tmpl
new file mode 100644
index 0000000..b59d99b
--- /dev/null
+++ b/websdk/mercurial/templates/raw/changeset.tmpl
@@ -0,0 +1,9 @@
+{header}
+# HG changeset patch
+# User {author}
+# Date {date|hgdate}
+# Node ID {node}
+{parent%changesetparent}
+{desc}
+
+{diff}
diff --git a/websdk/mercurial/templates/raw/error.tmpl b/websdk/mercurial/templates/raw/error.tmpl
new file mode 100644
index 0000000..9407c13
--- /dev/null
+++ b/websdk/mercurial/templates/raw/error.tmpl
@@ -0,0 +1,2 @@
+{header}
+error: {error}
diff --git a/websdk/mercurial/templates/raw/fileannotate.tmpl b/websdk/mercurial/templates/raw/fileannotate.tmpl
new file mode 100644
index 0000000..ad1bed6
--- /dev/null
+++ b/websdk/mercurial/templates/raw/fileannotate.tmpl
@@ -0,0 +1,5 @@
+{header}
+{annotate%annotateline}
+{footer}
+
+
diff --git a/websdk/mercurial/templates/raw/filediff.tmpl b/websdk/mercurial/templates/raw/filediff.tmpl
new file mode 100644
index 0000000..c4014bc
--- /dev/null
+++ b/websdk/mercurial/templates/raw/filediff.tmpl
@@ -0,0 +1,5 @@
+{header}
+{diff}
+{footer}
+
+
diff --git a/websdk/mercurial/templates/raw/index.tmpl b/websdk/mercurial/templates/raw/index.tmpl
new file mode 100644
index 0000000..29d7c9e
--- /dev/null
+++ b/websdk/mercurial/templates/raw/index.tmpl
@@ -0,0 +1,2 @@
+{header}
+{entries%indexentry}
diff --git a/websdk/mercurial/templates/raw/manifest.tmpl b/websdk/mercurial/templates/raw/manifest.tmpl
new file mode 100644
index 0000000..8d4a934
--- /dev/null
+++ b/websdk/mercurial/templates/raw/manifest.tmpl
@@ -0,0 +1,3 @@
+{header}
+{dentries%direntry}{fentries%fileentry}
+{footer}
diff --git a/websdk/mercurial/templates/raw/map b/websdk/mercurial/templates/raw/map
new file mode 100644
index 0000000..ed5c35b
--- /dev/null
+++ b/websdk/mercurial/templates/raw/map
@@ -0,0 +1,30 @@
+mimetype = 'text/plain; charset={encoding}'
+header = ''
+footer = ''
+changeset = changeset.tmpl
+difflineplus = '{line}'
+difflineminus = '{line}'
+difflineat = '{line}'
+diffline = '{line}'
+changesetparent = '# Parent {node}'
+changesetchild = '# Child {node}'
+filenodelink = ''
+filenolink = ''
+fileline = '{line}'
+diffblock = '{lines}'
+filediff = filediff.tmpl
+fileannotate = fileannotate.tmpl
+annotateline = '{author|user}@{rev}: {line}'
+manifest = manifest.tmpl
+direntry = 'drwxr-xr-x {basename}\n'
+fileentry = '{permissions|permissions} {size} {basename}\n'
+index = index.tmpl
+notfound = notfound.tmpl
+error = error.tmpl
+indexentry = '{url}\n'
+tags = '{entries%tagentry}'
+tagentry = '{tag} {node}\n'
+bookmarks = '{entries%bookmarkentry}'
+bookmarkentry = '{bookmark} {node}\n'
+branches = '{entries%branchentry}'
+branchentry = '{branch} {node} {status}\n'
diff --git a/websdk/mercurial/templates/raw/notfound.tmpl b/websdk/mercurial/templates/raw/notfound.tmpl
new file mode 100644
index 0000000..a7b3251
--- /dev/null
+++ b/websdk/mercurial/templates/raw/notfound.tmpl
@@ -0,0 +1,2 @@
+{header}
+error: repository {repo} not found
diff --git a/websdk/mercurial/templates/rss/bookmarkentry.tmpl b/websdk/mercurial/templates/rss/bookmarkentry.tmpl
new file mode 100644
index 0000000..93b5614
--- /dev/null
+++ b/websdk/mercurial/templates/rss/bookmarkentry.tmpl
@@ -0,0 +1,6 @@
+<item>
+ <title>{bookmark|escape}</title>
+ <link>{urlbase}{url}rev/{node|short}</link>
+ <description><![CDATA[{bookmark|strip|escape|addbreaks}]]></description>
+ <pubDate>{date|rfc822date}</pubDate>
+</item>
diff --git a/websdk/mercurial/templates/rss/bookmarks.tmpl b/websdk/mercurial/templates/rss/bookmarks.tmpl
new file mode 100644
index 0000000..8cc8172
--- /dev/null
+++ b/websdk/mercurial/templates/rss/bookmarks.tmpl
@@ -0,0 +1,6 @@
+{header}
+ <title>{repo|escape}: bookmarks </title>
+ <description>{repo|escape} bookmark history</description>
+ {entries%bookmarkentry}
+ </channel>
+</rss>
diff --git a/websdk/mercurial/templates/rss/changelog.tmpl b/websdk/mercurial/templates/rss/changelog.tmpl
new file mode 100644
index 0000000..65b96ad
--- /dev/null
+++ b/websdk/mercurial/templates/rss/changelog.tmpl
@@ -0,0 +1,6 @@
+{header}
+ <title>{repo|escape} Changelog</title>
+ <description>{repo|escape} Changelog</description>
+ {entries%changelogentry}
+ </channel>
+</rss> \ No newline at end of file
diff --git a/websdk/mercurial/templates/rss/changelogentry.tmpl b/websdk/mercurial/templates/rss/changelogentry.tmpl
new file mode 100644
index 0000000..12fe8e0
--- /dev/null
+++ b/websdk/mercurial/templates/rss/changelogentry.tmpl
@@ -0,0 +1,7 @@
+<item>
+ <title>{desc|strip|firstline|strip|escape}</title>
+ <guid isPermaLink="true">{urlbase}{url}rev/{node|short}</guid>
+ <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description>
+ <author>{author|obfuscate}</author>
+ <pubDate>{date|rfc822date}</pubDate>
+</item>
diff --git a/websdk/mercurial/templates/rss/error.tmpl b/websdk/mercurial/templates/rss/error.tmpl
new file mode 100644
index 0000000..87e6009
--- /dev/null
+++ b/websdk/mercurial/templates/rss/error.tmpl
@@ -0,0 +1,10 @@
+{header}
+ <title>Error</title>
+ <description>Error</description>
+ <item>
+ <title>Error</title>
+ <description>{error|escape}</description>
+ <guid>http://mercurial.selenic.com/#error</guid>
+ </item>
+ </channel>
+</rss>
diff --git a/websdk/mercurial/templates/rss/filelog.tmpl b/websdk/mercurial/templates/rss/filelog.tmpl
new file mode 100644
index 0000000..31f4dc7
--- /dev/null
+++ b/websdk/mercurial/templates/rss/filelog.tmpl
@@ -0,0 +1,6 @@
+{header}
+ <title>{repo|escape}: {file|escape} history</title>
+ <description>{file|escape} revision history</description>
+ {entries%filelogentry}
+ </channel>
+</rss>
diff --git a/websdk/mercurial/templates/rss/filelogentry.tmpl b/websdk/mercurial/templates/rss/filelogentry.tmpl
new file mode 100644
index 0000000..51cd3af
--- /dev/null
+++ b/websdk/mercurial/templates/rss/filelogentry.tmpl
@@ -0,0 +1,7 @@
+<item>
+ <title>{desc|strip|firstline|strip|escape}</title>
+ <link>{urlbase}{url}log{node|short}/{file|urlescape}</link>
+ <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description>
+ <author>{author|obfuscate}</author>
+ <pubDate>{date|rfc822date}</pubDate>
+</item>
diff --git a/websdk/mercurial/templates/rss/header.tmpl b/websdk/mercurial/templates/rss/header.tmpl
new file mode 100644
index 0000000..ed29196
--- /dev/null
+++ b/websdk/mercurial/templates/rss/header.tmpl
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="{encoding}"?>
+<rss version="2.0">
+ <channel>
+ <link>{urlbase}{url}</link>
+ <language>en-us</language>
diff --git a/websdk/mercurial/templates/rss/map b/websdk/mercurial/templates/rss/map
new file mode 100644
index 0000000..0f141be
--- /dev/null
+++ b/websdk/mercurial/templates/rss/map
@@ -0,0 +1,12 @@
+default = 'changelog'
+mimetype = 'text/xml; charset={encoding}'
+header = header.tmpl
+changelog = changelog.tmpl
+changelogentry = changelogentry.tmpl
+filelog = filelog.tmpl
+filelogentry = filelogentry.tmpl
+tags = tags.tmpl
+tagentry = tagentry.tmpl
+bookmarks = bookmarks.tmpl
+bookmarkentry = bookmarkentry.tmpl
+error = error.tmpl
diff --git a/websdk/mercurial/templates/rss/tagentry.tmpl b/websdk/mercurial/templates/rss/tagentry.tmpl
new file mode 100644
index 0000000..42fa038
--- /dev/null
+++ b/websdk/mercurial/templates/rss/tagentry.tmpl
@@ -0,0 +1,6 @@
+<item>
+ <title>{tag|escape}</title>
+ <link>{urlbase}{url}rev/{node|short}</link>
+ <description><![CDATA[{tag|strip|escape|addbreaks}]]></description>
+ <pubDate>{date|rfc822date}</pubDate>
+</item>
diff --git a/websdk/mercurial/templates/rss/tags.tmpl b/websdk/mercurial/templates/rss/tags.tmpl
new file mode 100644
index 0000000..93f1e96
--- /dev/null
+++ b/websdk/mercurial/templates/rss/tags.tmpl
@@ -0,0 +1,6 @@
+{header}
+ <title>{repo|escape}: tags </title>
+ <description>{repo|escape} tag history</description>
+ {entriesnotip%tagentry}
+ </channel>
+</rss>
diff --git a/websdk/mercurial/templates/spartan/branches.tmpl b/websdk/mercurial/templates/spartan/branches.tmpl
new file mode 100644
index 0000000..43e3bdb
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/branches.tmpl
@@ -0,0 +1,27 @@
+{header}
+<title>{repo|escape}: branches</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-branches" title="Atom feed for {repo|escape}: branches">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-branches" title="RSS feed for {repo|escape}: branches">
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<a type="application/rss+xml" href="{url}rss-branches">rss</a>
+<a type="application/atom+xml" href="{url}atom-branches">atom</a>
+</div>
+
+<h2>branches:</h2>
+
+<ul id="tagEntries">
+{entries%branchentry}
+</ul>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/changelog.tmpl b/websdk/mercurial/templates/spartan/changelog.tmpl
new file mode 100644
index 0000000..466e681
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/changelog.tmpl
@@ -0,0 +1,44 @@
+{header}
+<title>{repo|escape}: changelog</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}">
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+{archives%archiveentry}
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<a type="application/rss+xml" href="{url}rss-log">rss</a>
+<a type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a>
+</div>
+
+<h2>changelog for {repo|escape}</h2>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search1">search:</label>
+<input name="rev" id="search1" type="text" size="30">
+navigate: <small class="navigate">{changenav%nav}</small>
+</p>
+</form>
+
+{entries%changelogentry}
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search2">search:</label>
+<input name="rev" id="search2" type="text" size="30">
+navigate: <small class="navigate">{changenav%nav}</small>
+</p>
+</form>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/changelogentry.tmpl b/websdk/mercurial/templates/spartan/changelogentry.tmpl
new file mode 100644
index 0000000..f506b94
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/changelogentry.tmpl
@@ -0,0 +1,25 @@
+<table class="logEntry parity{parity}">
+ <tr>
+ <th><span class="age">{date|rfc822date}</span>:</th>
+ <th class="firstline">{desc|strip|firstline|escape|nonempty}</th>
+ </tr>
+ <tr>
+ <th class="revision">changeset {rev}:</th>
+ <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>
+ {parent%changelogparent}
+ {child%changelogchild}
+ {changelogtag}
+ <tr>
+ <th class="author">author:</th>
+ <td class="author">{author|obfuscate}</td>
+ </tr>
+ <tr>
+ <th class="date">date:</th>
+ <td class="date">{date|rfc822date}</td>
+ </tr>
+ <tr>
+ <th class="files"><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>:</th>
+ <td class="files">{files}</td>
+ </tr>
+</table>
diff --git a/websdk/mercurial/templates/spartan/changeset.tmpl b/websdk/mercurial/templates/spartan/changeset.tmpl
new file mode 100644
index 0000000..b357126
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/changeset.tmpl
@@ -0,0 +1,52 @@
+{header}
+<title>{repo|escape}: changeset {node|short}</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+<a href="{url}raw-rev/{node|short}">raw</a>
+{archives%archiveentry}
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>changeset: {desc|strip|escape|firstline|nonempty}</h2>
+
+<table id="changesetEntry">
+<tr>
+ <th class="changeset">changeset {rev}:</th>
+ <td class="changeset"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+</tr>
+{parent%changesetparent}
+{child%changesetchild}
+{changesettag}
+<tr>
+ <th class="author">author:</th>
+ <td class="author">{author|obfuscate}</td>
+</tr>
+<tr>
+ <th class="date">date:</th>
+ <td class="date age">{date|rfc822date}</td>
+</tr>
+<tr>
+ <th class="files">files:</th>
+ <td class="files">{files}</td>
+</tr>
+<tr>
+ <th class="description">description:</th>
+ <td class="description">{desc|strip|escape|addbreaks|nonempty}</td>
+</tr>
+</table>
+
+<div id="changesetDiff">
+{diff}
+</div>
+
+{footer}
+
+
diff --git a/websdk/mercurial/templates/spartan/error.tmpl b/websdk/mercurial/templates/spartan/error.tmpl
new file mode 100644
index 0000000..fc2c788
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/error.tmpl
@@ -0,0 +1,15 @@
+{header}
+<title>Mercurial Error</title>
+</head>
+<body>
+
+<h2>Mercurial Error</h2>
+
+<p>
+An error occurred while processing your request:
+</p>
+<p>
+{error|escape}
+</p>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/fileannotate.tmpl b/websdk/mercurial/templates/spartan/fileannotate.tmpl
new file mode 100644
index 0000000..5d6a72b
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/fileannotate.tmpl
@@ -0,0 +1,49 @@
+{header}
+<title>{repo|escape}: {file|escape} annotate</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
+<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>Annotate {file|escape}</h2>
+
+<table>
+<tr>
+ <td class="metatag">changeset {rev}:</td>
+ <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
+{parent%fileannotateparent}
+{child%fileannotatechild}
+<tr>
+ <td class="metatag">author:</td>
+ <td>{author|obfuscate}</td></tr>
+<tr>
+ <td class="metatag">date:</td>
+ <td class="date age">{date|rfc822date}</td>
+</tr>
+<tr>
+ <td class="metatag">permissions:</td>
+ <td>{permissions|permissions}</td>
+</tr>
+<tr>
+ <td class="metatag">description:</td>
+ <td>{desc|strip|escape|addbreaks|nonempty}</td>
+</tr>
+</table>
+
+<table cellspacing="0" cellpadding="0">
+{annotate%annotateline}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/filediff.tmpl b/websdk/mercurial/templates/spartan/filediff.tmpl
new file mode 100644
index 0000000..7640cba
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/filediff.tmpl
@@ -0,0 +1,37 @@
+{header}
+<title>{repo|escape}: {file|escape} diff</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>{file|escape}</h2>
+
+<table id="filediffEntry">
+<tr>
+ <th class="revision">revision {rev}:</th>
+ <td class="revision"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+</tr>
+{parent%filediffparent}
+{child%filediffchild}
+</table>
+
+<div id="fileDiff">
+{diff}
+</div>
+
+{footer}
+
+
diff --git a/websdk/mercurial/templates/spartan/filelog.tmpl b/websdk/mercurial/templates/spartan/filelog.tmpl
new file mode 100644
index 0000000..d003f79
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/filelog.tmpl
@@ -0,0 +1,29 @@
+{header}
+<title>{repo|escape}: {file|escape} history</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}">
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a>
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<a type="application/rss+xml" href="{url}rss-log/tip/{file|urlescape}">rss</a>
+<a type="application/atom+xml" href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">atom</a>
+</div>
+
+<h2>{file|escape} revision history</h2>
+
+<p>navigate: <small class="navigate">{nav%filenav}</small></p>
+
+{entries%filelogentry}
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/filelogentry.tmpl b/websdk/mercurial/templates/spartan/filelogentry.tmpl
new file mode 100644
index 0000000..baff52d
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/filelogentry.tmpl
@@ -0,0 +1,25 @@
+<table class="logEntry parity{parity}">
+ <tr>
+ <th><span class="age">{date|rfc822date}</span>:</th>
+ <th class="firstline"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></th>
+ </tr>
+ <tr>
+ <th class="revision">revision {filerev}:</td>
+ <td class="node">
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a>
+ <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(diff)</a>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(annotate)</a>
+ </td>
+ </tr>
+ {rename%filelogrename}
+ <tr>
+ <th class="author">author:</th>
+ <td class="author">{author|obfuscate}</td>
+ </tr>
+ <tr>
+ <th class="date">date:</th>
+ <td class="date">{date|rfc822date}</td>
+ </tr>
+</table>
+
+
diff --git a/websdk/mercurial/templates/spartan/filerevision.tmpl b/websdk/mercurial/templates/spartan/filerevision.tmpl
new file mode 100644
index 0000000..548d3c6
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/filerevision.tmpl
@@ -0,0 +1,47 @@
+{header}
+<title>{repo|escape}:{file|escape}</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
+<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a>
+<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a>
+<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a>
+<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>{file|escape}</h2>
+
+<table>
+<tr>
+ <td class="metatag">changeset {rev}:</td>
+ <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr>
+{parent%filerevparent}
+{child%filerevchild}
+<tr>
+ <td class="metatag">author:</td>
+ <td>{author|obfuscate}</td></tr>
+<tr>
+ <td class="metatag">date:</td>
+ <td class="date age">{date|rfc822date}</td></tr>
+<tr>
+ <td class="metatag">permissions:</td>
+ <td>{permissions|permissions}</td></tr>
+<tr>
+ <td class="metatag">description:</td>
+ <td>{desc|strip|escape|addbreaks|nonempty}</td>
+</tr>
+</table>
+
+<pre>
+{text%fileline}
+</pre>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/footer.tmpl b/websdk/mercurial/templates/spartan/footer.tmpl
new file mode 100644
index 0000000..665bdb2
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/footer.tmpl
@@ -0,0 +1,9 @@
+<script type="text/javascript">process_dates()</script>
+{motd}
+<div class="logo">
+<a href="{logourl}">
+<img src="{staticurl}{logoimg}" width=75 height=90 border=0 alt="mercurial"></a>
+</div>
+
+</body>
+</html>
diff --git a/websdk/mercurial/templates/spartan/graph.tmpl b/websdk/mercurial/templates/spartan/graph.tmpl
new file mode 100644
index 0000000..67736e0
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/graph.tmpl
@@ -0,0 +1,96 @@
+{header}
+<title>{repo|escape}: graph</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}: tags">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}: tags">
+<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]-->
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>graph</h2>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search1">search:</label>
+<input name="rev" id="search1" type="text" size="30">
+navigate: <small class="navigate">{changenav%navgraph}</small>
+</p>
+</form>
+
+<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript>
+
+<div id="wrapper">
+<ul id="nodebgs"></ul>
+<canvas id="graph" width="480" height="{canvasheight}"></canvas>
+<ul id="graphnodes"></ul>
+</div>
+
+<script type="text/javascript">
+<!-- hide script content
+
+var data = {jsdata|json};
+var graph = new Graph();
+graph.scale({bg_height});
+
+graph.edge = function(x0, y0, x1, y1, color) \{
+
+ this.setColor(color, 0.0, 0.65);
+ this.ctx.beginPath();
+ this.ctx.moveTo(x0, y0);
+ this.ctx.lineTo(x1, y1);
+ this.ctx.stroke();
+
+}
+
+var revlink = '<li style="_STYLE"><span class="desc">';
+revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>';
+revlink += '</span><span class="info">_DATE, by _USER</span></li>';
+
+graph.vertex = function(x, y, color, parity, cur) \{
+
+ this.ctx.beginPath();
+ color = this.setColor(color, 0.25, 0.75);
+ this.ctx.arc(x, y, radius, 0, Math.PI * 2, true);
+ this.ctx.fill();
+
+ var bg = '<li class="bg parity' + parity + '"></li>';
+ var left = (this.columns + 1) * this.bg_height;
+ var nstyle = 'padding-left: ' + left + 'px;';
+ var item = revlink.replace(/_STYLE/, nstyle);
+ item = item.replace(/_PARITY/, 'parity' + parity);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_NODEID/, cur[0]);
+ item = item.replace(/_DESC/, cur[3]);
+ item = item.replace(/_USER/, cur[4]);
+ item = item.replace(/_DATE/, cur[5]);
+
+ return [bg, item];
+
+}
+
+graph.render(data);
+
+// stop hiding script -->
+</script>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search1">search:</label>
+<input name="rev" id="search1" type="text" size="30">
+navigate: <small class="navigate">{changenav%navgraph}</small>
+</p>
+</form>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/header.tmpl b/websdk/mercurial/templates/spartan/header.tmpl
new file mode 100644
index 0000000..cd40d80
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/header.tmpl
@@ -0,0 +1,7 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
+<html>
+<head>
+<link rel="icon" href="{staticurl}hgicon.png" type="image/png">
+<meta name="robots" content="index, nofollow" />
+<link rel="stylesheet" href="{staticurl}style.css" type="text/css" />
+<script type="text/javascript" src="{staticurl}mercurial.js"></script>
diff --git a/websdk/mercurial/templates/spartan/index.tmpl b/websdk/mercurial/templates/spartan/index.tmpl
new file mode 100644
index 0000000..f399813
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/index.tmpl
@@ -0,0 +1,19 @@
+{header}
+<title>Mercurial repositories index</title>
+</head>
+<body>
+
+<h2>Mercurial Repositories</h2>
+
+<table>
+ <tr>
+ <td><a href="?sort={sort_name}">Name</a></td>
+ <td><a href="?sort={sort_description}">Description</a></td>
+ <td><a href="?sort={sort_contact}">Contact</a></td>
+ <td><a href="?sort={sort_lastchange}">Last modified</a></td>
+ <td>&nbsp;</td>
+ </tr>
+ {entries%indexentry}
+</table>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/manifest.tmpl b/websdk/mercurial/templates/spartan/manifest.tmpl
new file mode 100644
index 0000000..24a0973
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/manifest.tmpl
@@ -0,0 +1,29 @@
+{header}
+<title>{repo|escape}: files for changeset {node|short}</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a>
+{archives%archiveentry}
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>files for changeset {node|short}: {path|escape}</h2>
+
+<table cellpadding="0" cellspacing="0">
+<tr class="parity{upparity}">
+ <td><tt>drwxr-xr-x</tt>&nbsp;
+ <td>&nbsp;
+ <td>&nbsp;
+ <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a>
+</tr>
+{dentries%direntry}
+{fentries%fileentry}
+</table>
+{footer}
diff --git a/websdk/mercurial/templates/spartan/map b/websdk/mercurial/templates/spartan/map
new file mode 100644
index 0000000..8280723
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/map
@@ -0,0 +1,183 @@
+default = 'shortlog'
+mimetype = 'text/html; charset={encoding}'
+header = header.tmpl
+footer = footer.tmpl
+search = search.tmpl
+changelog = changelog.tmpl
+shortlog = shortlog.tmpl
+shortlogentry = shortlogentry.tmpl
+graph = graph.tmpl
+naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> '
+filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> '
+filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> '
+filenolink = '{file|escape} '
+fileellipses = '...'
+changelogentry = changelogentry.tmpl
+searchentry = changelogentry.tmpl
+changeset = changeset.tmpl
+manifest = manifest.tmpl
+
+nav = '{before%naventry} {after%naventry}'
+navshort = '{before%navshortentry}{after%navshortentry}'
+navgraph = '{before%navgraphentry}{after%navgraphentry}'
+filenav = '{before%filenaventry}{after%filenaventry}'
+
+direntry = '
+ <tr class="parity{parity}">
+ <td><tt>drwxr-xr-x</tt>&nbsp;
+ <td>&nbsp;
+ <td>&nbsp;
+ <td>
+ <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}/</a>
+ <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">
+ {emptydirs|urlescape}
+ </a>'
+
+fileentry = '
+ <tr class="parity{parity}">
+ <td><tt>{permissions|permissions}</tt>&nbsp;
+ <td align=right><tt class="date">{date|isodate}</tt>&nbsp;
+ <td align=right><tt>{size}</tt>&nbsp;
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>'
+
+filerevision = filerevision.tmpl
+fileannotate = fileannotate.tmpl
+filediff = filediff.tmpl
+filelog = filelog.tmpl
+fileline = '<div class="parity{parity}"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>&nbsp;{line|escape}</div>'
+filelogentry = filelogentry.tmpl
+
+# The &nbsp; ensures that all table cells have content (even if there
+# is an empty line in the annotated file), which in turn ensures that
+# all table rows have equal height.
+annotateline = '
+ <tr class="parity{parity}">
+ <td class="annotate">
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}"
+ title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a>
+ </td>
+ <td>
+ <a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>
+ </td>
+ <td><pre>&nbsp;{line|escape}</pre></td>
+ </tr>'
+difflineplus = '<span class="plusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
+difflineminus = '<span class="minusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
+difflineat = '<span class="atline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>'
+diffline = '<a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}'
+changelogparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent">
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>
+ </td>
+ </tr>'
+changesetparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filerevparent = '
+ <tr>
+ <td class="metatag">parent:</td>
+ <td>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+filerename = '{file|escape}@'
+filelogrename = '
+ <tr>
+ <th>base:</th>
+ <td>
+ <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {file|escape}@{node|short}
+ </a>
+ </td>
+ </tr>'
+fileannotateparent = '
+ <tr>
+ <td class="metatag">parent:</td>
+ <td>
+ <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">
+ {rename%filerename}{node|short}
+ </a>
+ </td>
+ </tr>'
+changesetchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+changelogchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filerevchild = '
+ <tr>
+ <td class="metatag">child:</td>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+fileannotatechild = '
+ <tr>
+ <td class="metatag">child:</td>
+ <td><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+tags = tags.tmpl
+tagentry = '
+ <li class="tagEntry parity{parity}">
+ <tt class="node">{node}</tt>
+ <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a>
+ </li>'
+branches = branches.tmpl
+branchentry = '
+ <li class="tagEntry parity{parity}">
+ <tt class="node">{node}</tt>
+ <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">{branch|escape}</a>
+ </li>'
+diffblock = '<pre class="parity{parity}">{lines}</pre>'
+changelogtag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>'
+changesettag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>'
+filediffparent = '
+ <tr>
+ <th class="parent">parent {rev}:</th>
+ <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filelogparent = '
+ <tr>
+ <th>parent {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filediffchild = '
+ <tr>
+ <th class="child">child {rev}:</th>
+ <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+filelogchild = '
+ <tr>
+ <th>child {rev}:</th>
+ <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td>
+ </tr>'
+indexentry = '
+ <tr class="parity{parity}">
+ <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td>
+ <td>{description}</td>
+ <td>{contact|obfuscate}</td>
+ <td class="age">{lastchange|rfc822date}</td>
+ <td class="indexlinks">
+ <a href="{url}rss-log">RSS</a>
+ <a href="{url}atom-log">Atom</a>
+ {archives%archiveentry}
+ </td>
+ </tr>'
+index = index.tmpl
+archiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a> '
+notfound = notfound.tmpl
+error = error.tmpl
+urlparameter = '{separator}{name}={value|urlescape}'
+hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />'
diff --git a/websdk/mercurial/templates/spartan/notfound.tmpl b/websdk/mercurial/templates/spartan/notfound.tmpl
new file mode 100644
index 0000000..e9e6ba4
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/notfound.tmpl
@@ -0,0 +1,12 @@
+{header}
+<title>Mercurial repository not found</title>
+</head>
+<body>
+
+<h2>Mercurial repository not found</h2>
+
+The specified repository "{repo|escape}" is unknown, sorry.
+
+Please go back to the <a href="{url}">main repository list page</a>.
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/search.tmpl b/websdk/mercurial/templates/spartan/search.tmpl
new file mode 100644
index 0000000..9ace1eb
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/search.tmpl
@@ -0,0 +1,37 @@
+{header}
+<title>{repo|escape}: searching for {query|escape}</title>
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>
+{archives%archiveentry}
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+</div>
+
+<h2>searching for {query|escape}</h2>
+
+<form>
+{sessionvars%hiddenformentry}
+<p>
+search:
+<input name="rev" type="text" width="30" value="{query|escape}">
+</p>
+</form>
+
+{entries}
+
+<form>
+{sessionvars%hiddenformentry}
+<p>
+search:
+<input name="rev" type="text" width="30" value="{query|escape}">
+</p>
+</form>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/shortlog.tmpl b/websdk/mercurial/templates/spartan/shortlog.tmpl
new file mode 100644
index 0000000..1f70a53
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/shortlog.tmpl
@@ -0,0 +1,44 @@
+{header}
+<title>{repo|escape}: shortlog</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-log" title="Atom feed for {repo|escape}">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-log" title="RSS feed for {repo|escape}">
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}tags{sessionvars%urlparameter}">tags</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
+{archives%archiveentry}
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<a type="application/rss+xml" href="{url}rss-log">rss</a>
+<a type="application/rss+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a>
+</div>
+
+<h2>shortlog for {repo|escape}</h2>
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search1">search:</label>
+<input name="rev" id="search1" type="text" size="30">
+navigate: <small class="navigate">{changenav%navshort}</small>
+</p>
+</form>
+
+{entries%shortlogentry}
+
+<form action="{url}log">
+{sessionvars%hiddenformentry}
+<p>
+<label for="search2">search:</label>
+<input name="rev" id="search2" type="text" size="30">
+navigate: <small class="navigate">{changenav%navshort}</small>
+</p>
+</form>
+
+{footer}
diff --git a/websdk/mercurial/templates/spartan/shortlogentry.tmpl b/websdk/mercurial/templates/spartan/shortlogentry.tmpl
new file mode 100644
index 0000000..d104753
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/shortlogentry.tmpl
@@ -0,0 +1,7 @@
+<table class="slogEntry parity{parity}">
+ <tr>
+ <td class="age">{date|rfc822date}</td>
+ <td class="author">{author|person}</td>
+ <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td>
+ </tr>
+</table>
diff --git a/websdk/mercurial/templates/spartan/tags.tmpl b/websdk/mercurial/templates/spartan/tags.tmpl
new file mode 100644
index 0000000..4c41551
--- /dev/null
+++ b/websdk/mercurial/templates/spartan/tags.tmpl
@@ -0,0 +1,27 @@
+{header}
+<title>{repo|escape}: tags</title>
+<link rel="alternate" type="application/atom+xml"
+ href="{url}atom-tags" title="Atom feed for {repo|escape}: tags">
+<link rel="alternate" type="application/rss+xml"
+ href="{url}rss-tags" title="RSS feed for {repo|escape}: tags">
+</head>
+<body>
+
+<div class="buttons">
+<a href="{url}log{sessionvars%urlparameter}">changelog</a>
+<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a>
+<a href="{url}graph{sessionvars%urlparameter}">graph</a>
+<a href="{url}branches{sessionvars%urlparameter}">branches</a>
+<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a>
+<a href="{url}help{sessionvars%urlparameter}">help</a>
+<a type="application/rss+xml" href="{url}rss-tags">rss</a>
+<a type="application/atom+xml" href="{url}atom-tags">atom</a>
+</div>
+
+<h2>tags:</h2>
+
+<ul id="tagEntries">
+{entries%tagentry}
+</ul>
+
+{footer}
diff --git a/websdk/mercurial/templates/static/background.png b/websdk/mercurial/templates/static/background.png
new file mode 100644
index 0000000..af8a0aa
--- /dev/null
+++ b/websdk/mercurial/templates/static/background.png
Binary files differ
diff --git a/websdk/mercurial/templates/static/coal-file.png b/websdk/mercurial/templates/static/coal-file.png
new file mode 100644
index 0000000..7ecf463
--- /dev/null
+++ b/websdk/mercurial/templates/static/coal-file.png
Binary files differ
diff --git a/websdk/mercurial/templates/static/coal-folder.png b/websdk/mercurial/templates/static/coal-folder.png
new file mode 100644
index 0000000..d1b8ecc
--- /dev/null
+++ b/websdk/mercurial/templates/static/coal-folder.png
Binary files differ
diff --git a/websdk/mercurial/templates/static/excanvas.js b/websdk/mercurial/templates/static/excanvas.js
new file mode 100644
index 0000000..9d71658
--- /dev/null
+++ b/websdk/mercurial/templates/static/excanvas.js
@@ -0,0 +1,19 @@
+if(!window.CanvasRenderingContext2D){(function(){var I=Math,i=I.round,L=I.sin,M=I.cos,m=10,A=m/2,Q={init:function(a){var b=a||document;if(/MSIE/.test(navigator.userAgent)&&!window.opera){var c=this;b.attachEvent("onreadystatechange",function(){c.r(b)})}},r:function(a){if(a.readyState=="complete"){if(!a.namespaces["s"]){a.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml")}var b=a.createStyleSheet();b.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}g_vml_\\:*{behavior:url(#default#VML)}";
+var c=a.getElementsByTagName("canvas");for(var d=0;d<c.length;d++){if(!c[d].getContext){this.initElement(c[d])}}}},q:function(a){var b=a.outerHTML,c=a.ownerDocument.createElement(b);if(b.slice(-2)!="/>"){var d="/"+a.tagName,e;while((e=a.nextSibling)&&e.tagName!=d){e.removeNode()}if(e){e.removeNode()}}a.parentNode.replaceChild(c,a);return c},initElement:function(a){a=this.q(a);a.getContext=function(){if(this.l){return this.l}return this.l=new K(this)};a.attachEvent("onpropertychange",V);a.attachEvent("onresize",
+W);var b=a.attributes;if(b.width&&b.width.specified){a.style.width=b.width.nodeValue+"px"}else{a.width=a.clientWidth}if(b.height&&b.height.specified){a.style.height=b.height.nodeValue+"px"}else{a.height=a.clientHeight}return a}};function V(a){var b=a.srcElement;switch(a.propertyName){case "width":b.style.width=b.attributes.width.nodeValue+"px";b.getContext().clearRect();break;case "height":b.style.height=b.attributes.height.nodeValue+"px";b.getContext().clearRect();break}}function W(a){var b=a.srcElement;
+if(b.firstChild){b.firstChild.style.width=b.clientWidth+"px";b.firstChild.style.height=b.clientHeight+"px"}}Q.init();var R=[];for(var E=0;E<16;E++){for(var F=0;F<16;F++){R[E*16+F]=E.toString(16)+F.toString(16)}}function J(){return[[1,0,0],[0,1,0],[0,0,1]]}function G(a,b){var c=J();for(var d=0;d<3;d++){for(var e=0;e<3;e++){var g=0;for(var h=0;h<3;h++){g+=a[d][h]*b[h][e]}c[d][e]=g}}return c}function N(a,b){b.fillStyle=a.fillStyle;b.lineCap=a.lineCap;b.lineJoin=a.lineJoin;b.lineWidth=a.lineWidth;b.miterLimit=
+a.miterLimit;b.shadowBlur=a.shadowBlur;b.shadowColor=a.shadowColor;b.shadowOffsetX=a.shadowOffsetX;b.shadowOffsetY=a.shadowOffsetY;b.strokeStyle=a.strokeStyle;b.d=a.d;b.e=a.e}function O(a){var b,c=1;a=String(a);if(a.substring(0,3)=="rgb"){var d=a.indexOf("(",3),e=a.indexOf(")",d+1),g=a.substring(d+1,e).split(",");b="#";for(var h=0;h<3;h++){b+=R[Number(g[h])]}if(g.length==4&&a.substr(3,1)=="a"){c=g[3]}}else{b=a}return[b,c]}function S(a){switch(a){case "butt":return"flat";case "round":return"round";
+case "square":default:return"square"}}function K(a){this.a=J();this.m=[];this.k=[];this.c=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=m*1;this.globalAlpha=1;this.canvas=a;var b=a.ownerDocument.createElement("div");b.style.width=a.clientWidth+"px";b.style.height=a.clientHeight+"px";b.style.overflow="hidden";b.style.position="absolute";a.appendChild(b);this.j=b;this.d=1;this.e=1}var j=K.prototype;j.clearRect=function(){this.j.innerHTML=
+"";this.c=[]};j.beginPath=function(){this.c=[]};j.moveTo=function(a,b){this.c.push({type:"moveTo",x:a,y:b});this.f=a;this.g=b};j.lineTo=function(a,b){this.c.push({type:"lineTo",x:a,y:b});this.f=a;this.g=b};j.bezierCurveTo=function(a,b,c,d,e,g){this.c.push({type:"bezierCurveTo",cp1x:a,cp1y:b,cp2x:c,cp2y:d,x:e,y:g});this.f=e;this.g=g};j.quadraticCurveTo=function(a,b,c,d){var e=this.f+0.6666666666666666*(a-this.f),g=this.g+0.6666666666666666*(b-this.g),h=e+(c-this.f)/3,l=g+(d-this.g)/3;this.bezierCurveTo(e,
+g,h,l,c,d)};j.arc=function(a,b,c,d,e,g){c*=m;var h=g?"at":"wa",l=a+M(d)*c-A,n=b+L(d)*c-A,o=a+M(e)*c-A,f=b+L(e)*c-A;if(l==o&&!g){l+=0.125}this.c.push({type:h,x:a,y:b,radius:c,xStart:l,yStart:n,xEnd:o,yEnd:f})};j.rect=function(a,b,c,d){this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath()};j.strokeRect=function(a,b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.stroke()};j.fillRect=function(a,
+b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.fill()};j.createLinearGradient=function(a,b,c,d){var e=new H("gradient");return e};j.createRadialGradient=function(a,b,c,d,e,g){var h=new H("gradientradial");h.n=c;h.o=g;h.i.x=a;h.i.y=b;return h};j.drawImage=function(a,b){var c,d,e,g,h,l,n,o,f=a.runtimeStyle.width,k=a.runtimeStyle.height;a.runtimeStyle.width="auto";a.runtimeStyle.height="auto";var q=a.width,r=a.height;a.runtimeStyle.width=
+f;a.runtimeStyle.height=k;if(arguments.length==3){c=arguments[1];d=arguments[2];h=(l=0);n=(e=q);o=(g=r)}else if(arguments.length==5){c=arguments[1];d=arguments[2];e=arguments[3];g=arguments[4];h=(l=0);n=q;o=r}else if(arguments.length==9){h=arguments[1];l=arguments[2];n=arguments[3];o=arguments[4];c=arguments[5];d=arguments[6];e=arguments[7];g=arguments[8]}else{throw"Invalid number of arguments";}var s=this.b(c,d),t=[],v=10,w=10;t.push(" <g_vml_:group",' coordsize="',m*v,",",m*w,'"',' coordorigin="0,0"',
+' style="width:',v,";height:",w,";position:absolute;");if(this.a[0][0]!=1||this.a[0][1]){var x=[];x.push("M11='",this.a[0][0],"',","M12='",this.a[1][0],"',","M21='",this.a[0][1],"',","M22='",this.a[1][1],"',","Dx='",i(s.x/m),"',","Dy='",i(s.y/m),"'");var p=s,y=this.b(c+e,d),z=this.b(c,d+g),B=this.b(c+e,d+g);p.x=Math.max(p.x,y.x,z.x,B.x);p.y=Math.max(p.y,y.y,z.y,B.y);t.push("padding:0 ",i(p.x/m),"px ",i(p.y/m),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",x.join(""),", sizingmethod='clip');")}else{t.push("top:",
+i(s.y/m),"px;left:",i(s.x/m),"px;")}t.push(' ">','<g_vml_:image src="',a.src,'"',' style="width:',m*e,";"," height:",m*g,';"',' cropleft="',h/q,'"',' croptop="',l/r,'"',' cropright="',(q-h-n)/q,'"',' cropbottom="',(r-l-o)/r,'"'," />","</g_vml_:group>");this.j.insertAdjacentHTML("BeforeEnd",t.join(""))};j.stroke=function(a){var b=[],c=O(a?this.fillStyle:this.strokeStyle),d=c[0],e=c[1]*this.globalAlpha,g=10,h=10;b.push("<g_vml_:shape",' fillcolor="',d,'"',' filled="',Boolean(a),'"',' style="position:absolute;width:',
+g,";height:",h,';"',' coordorigin="0 0" coordsize="',m*g," ",m*h,'"',' stroked="',!a,'"',' strokeweight="',this.lineWidth,'"',' strokecolor="',d,'"',' path="');var l={x:null,y:null},n={x:null,y:null};for(var o=0;o<this.c.length;o++){var f=this.c[o];if(f.type=="moveTo"){b.push(" m ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="lineTo"){b.push(" l ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="close"){b.push(" x ")}else if(f.type=="bezierCurveTo"){b.push(" c ");
+var k=this.b(f.x,f.y),q=this.b(f.cp1x,f.cp1y),r=this.b(f.cp2x,f.cp2y);b.push(i(q.x),",",i(q.y),",",i(r.x),",",i(r.y),",",i(k.x),",",i(k.y))}else if(f.type=="at"||f.type=="wa"){b.push(" ",f.type," ");var k=this.b(f.x,f.y),s=this.b(f.xStart,f.yStart),t=this.b(f.xEnd,f.yEnd);b.push(i(k.x-this.d*f.radius),",",i(k.y-this.e*f.radius)," ",i(k.x+this.d*f.radius),",",i(k.y+this.e*f.radius)," ",i(s.x),",",i(s.y)," ",i(t.x),",",i(t.y))}if(k){if(l.x==null||k.x<l.x){l.x=k.x}if(n.x==null||k.x>n.x){n.x=k.x}if(l.y==
+null||k.y<l.y){l.y=k.y}if(n.y==null||k.y>n.y){n.y=k.y}}}b.push(' ">');if(typeof this.fillStyle=="object"){var v={x:"50%",y:"50%"},w=n.x-l.x,x=n.y-l.y,p=w>x?w:x;v.x=i(this.fillStyle.i.x/w*100+50)+"%";v.y=i(this.fillStyle.i.y/x*100+50)+"%";var y=[];if(this.fillStyle.p=="gradientradial"){var z=this.fillStyle.n/p*100,B=this.fillStyle.o/p*100-z}else{var z=0,B=100}var C={offset:null,color:null},D={offset:null,color:null};this.fillStyle.h.sort(function(T,U){return T.offset-U.offset});for(var o=0;o<this.fillStyle.h.length;o++){var u=
+this.fillStyle.h[o];y.push(u.offset*B+z,"% ",u.color,",");if(u.offset>C.offset||C.offset==null){C.offset=u.offset;C.color=u.color}if(u.offset<D.offset||D.offset==null){D.offset=u.offset;D.color=u.color}}y.pop();b.push("<g_vml_:fill",' color="',D.color,'"',' color2="',C.color,'"',' type="',this.fillStyle.p,'"',' focusposition="',v.x,", ",v.y,'"',' colors="',y.join(""),'"',' opacity="',e,'" />')}else if(a){b.push('<g_vml_:fill color="',d,'" opacity="',e,'" />')}else{b.push("<g_vml_:stroke",' opacity="',
+e,'"',' joinstyle="',this.lineJoin,'"',' miterlimit="',this.miterLimit,'"',' endcap="',S(this.lineCap),'"',' weight="',this.lineWidth,'px"',' color="',d,'" />')}b.push("</g_vml_:shape>");this.j.insertAdjacentHTML("beforeEnd",b.join(""));this.c=[]};j.fill=function(){this.stroke(true)};j.closePath=function(){this.c.push({type:"close"})};j.b=function(a,b){return{x:m*(a*this.a[0][0]+b*this.a[1][0]+this.a[2][0])-A,y:m*(a*this.a[0][1]+b*this.a[1][1]+this.a[2][1])-A}};j.save=function(){var a={};N(this,a);
+this.k.push(a);this.m.push(this.a);this.a=G(J(),this.a)};j.restore=function(){N(this.k.pop(),this);this.a=this.m.pop()};j.translate=function(a,b){var c=[[1,0,0],[0,1,0],[a,b,1]];this.a=G(c,this.a)};j.rotate=function(a){var b=M(a),c=L(a),d=[[b,c,0],[-c,b,0],[0,0,1]];this.a=G(d,this.a)};j.scale=function(a,b){this.d*=a;this.e*=b;var c=[[a,0,0],[0,b,0],[0,0,1]];this.a=G(c,this.a)};j.clip=function(){};j.arcTo=function(){};j.createPattern=function(){return new P};function H(a){this.p=a;this.n=0;this.o=
+0;this.h=[];this.i={x:0,y:0}}H.prototype.addColorStop=function(a,b){b=O(b);this.h.push({offset:1-a,color:b})};function P(){}G_vmlCanvasManager=Q;CanvasRenderingContext2D=K;CanvasGradient=H;CanvasPattern=P})()};
diff --git a/websdk/mercurial/templates/static/hgicon.png b/websdk/mercurial/templates/static/hgicon.png
new file mode 100644
index 0000000..60effbc
--- /dev/null
+++ b/websdk/mercurial/templates/static/hgicon.png
Binary files differ
diff --git a/websdk/mercurial/templates/static/hglogo.png b/websdk/mercurial/templates/static/hglogo.png
new file mode 100644
index 0000000..adc6e65
--- /dev/null
+++ b/websdk/mercurial/templates/static/hglogo.png
Binary files differ
diff --git a/websdk/mercurial/templates/static/mercurial.js b/websdk/mercurial/templates/static/mercurial.js
new file mode 100644
index 0000000..3bc27a4
--- /dev/null
+++ b/websdk/mercurial/templates/static/mercurial.js
@@ -0,0 +1,249 @@
+// mercurial.js - JavaScript utility functions
+//
+// Rendering of branch DAGs on the client side
+// Display of elapsed time
+// Show or hide diffstat
+//
+// Copyright 2008 Dirkjan Ochtman <dirkjan AT ochtman DOT nl>
+// Copyright 2006 Alexander Schremmer <alex AT alexanderweb DOT de>
+//
+// derived from code written by Scott James Remnant <scott@ubuntu.com>
+// Copyright 2005 Canonical Ltd.
+//
+// This software may be used and distributed according to the terms
+// of the GNU General Public License, incorporated herein by reference.
+
+var colors = [
+ [ 1.0, 0.0, 0.0 ],
+ [ 1.0, 1.0, 0.0 ],
+ [ 0.0, 1.0, 0.0 ],
+ [ 0.0, 1.0, 1.0 ],
+ [ 0.0, 0.0, 1.0 ],
+ [ 1.0, 0.0, 1.0 ]
+];
+
+function Graph() {
+
+ this.canvas = document.getElementById('graph');
+ if (navigator.userAgent.indexOf('MSIE') >= 0) this.canvas = window.G_vmlCanvasManager.initElement(this.canvas);
+ this.ctx = this.canvas.getContext('2d');
+ this.ctx.strokeStyle = 'rgb(0, 0, 0)';
+ this.ctx.fillStyle = 'rgb(0, 0, 0)';
+ this.cur = [0, 0];
+ this.line_width = 3;
+ this.bg = [0, 4];
+ this.cell = [2, 0];
+ this.columns = 0;
+ this.revlink = '';
+
+ this.scale = function(height) {
+ this.bg_height = height;
+ this.box_size = Math.floor(this.bg_height / 1.2);
+ this.cell_height = this.box_size;
+ }
+
+ function colorPart(num) {
+ num *= 255
+ num = num < 0 ? 0 : num;
+ num = num > 255 ? 255 : num;
+ var digits = Math.round(num).toString(16);
+ if (num < 16) {
+ return '0' + digits;
+ } else {
+ return digits;
+ }
+ }
+
+ this.setColor = function(color, bg, fg) {
+
+ // Set the colour.
+ //
+ // Picks a distinct colour based on an internal wheel; the bg
+ // parameter provides the value that should be assigned to the 'zero'
+ // colours and the fg parameter provides the multiplier that should be
+ // applied to the foreground colours.
+
+ color %= colors.length;
+ var red = (colors[color][0] * fg) || bg;
+ var green = (colors[color][1] * fg) || bg;
+ var blue = (colors[color][2] * fg) || bg;
+ red = Math.round(red * 255);
+ green = Math.round(green * 255);
+ blue = Math.round(blue * 255);
+ var s = 'rgb(' + red + ', ' + green + ', ' + blue + ')';
+ this.ctx.strokeStyle = s;
+ this.ctx.fillStyle = s;
+ return s;
+
+ }
+
+ this.render = function(data) {
+
+ var backgrounds = '';
+ var nodedata = '';
+
+ for (var i in data) {
+
+ var parity = i % 2;
+ this.cell[1] += this.bg_height;
+ this.bg[1] += this.bg_height;
+
+ var cur = data[i];
+ var node = cur[1];
+ var edges = cur[2];
+ var fold = false;
+
+ for (var j in edges) {
+
+ line = edges[j];
+ start = line[0];
+ end = line[1];
+ color = line[2];
+
+ if (end > this.columns || start > this.columns) {
+ this.columns += 1;
+ }
+
+ if (start == this.columns && start > end) {
+ var fold = true;
+ }
+
+ x0 = this.cell[0] + this.box_size * start + this.box_size / 2;
+ y0 = this.bg[1] - this.bg_height / 2;
+ x1 = this.cell[0] + this.box_size * end + this.box_size / 2;
+ y1 = this.bg[1] + this.bg_height / 2;
+
+ this.edge(x0, y0, x1, y1, color);
+
+ }
+
+ // Draw the revision node in the right column
+
+ column = node[0]
+ color = node[1]
+
+ radius = this.box_size / 8;
+ x = this.cell[0] + this.box_size * column + this.box_size / 2;
+ y = this.bg[1] - this.bg_height / 2;
+ var add = this.vertex(x, y, color, parity, cur);
+ backgrounds += add[0];
+ nodedata += add[1];
+
+ if (fold) this.columns -= 1;
+
+ }
+
+ document.getElementById('nodebgs').innerHTML += backgrounds;
+ document.getElementById('graphnodes').innerHTML += nodedata;
+
+ }
+
+}
+
+
+process_dates = (function(document, RegExp, Math, isNaN, Date, _false, _true){
+
+ // derived from code from mercurial/templatefilter.py
+
+ var scales = {
+ 'year': 365 * 24 * 60 * 60,
+ 'month': 30 * 24 * 60 * 60,
+ 'week': 7 * 24 * 60 * 60,
+ 'day': 24 * 60 * 60,
+ 'hour': 60 * 60,
+ 'minute': 60,
+ 'second': 1
+ };
+
+ function format(count, string){
+ var ret = count + ' ' + string;
+ if (count > 1){
+ ret = ret + 's';
+ }
+ return ret;
+ }
+
+ function shortdate(date){
+ var ret = date.getFullYear() + '-';
+ // getMonth() gives a 0-11 result
+ var month = date.getMonth() + 1;
+ if (month <= 9){
+ ret += '0' + month;
+ } else {
+ ret += month;
+ }
+ ret += '-';
+ var day = date.getDate();
+ if (day <= 9){
+ ret += '0' + day;
+ } else {
+ ret += day;
+ }
+ return ret;
+ }
+
+ function age(datestr){
+ var now = new Date();
+ var once = new Date(datestr);
+ if (isNaN(once.getTime())){
+ // parsing error
+ return datestr;
+ }
+
+ var delta = Math.floor((now.getTime() - once.getTime()) / 1000);
+
+ var future = _false;
+ if (delta < 0){
+ future = _true;
+ delta = -delta;
+ if (delta > (30 * scales.year)){
+ return "in the distant future";
+ }
+ }
+
+ if (delta > (2 * scales.year)){
+ return shortdate(once);
+ }
+
+ for (unit in scales){
+ var s = scales[unit];
+ var n = Math.floor(delta / s);
+ if ((n >= 2) || (s == 1)){
+ if (future){
+ return format(n, unit) + ' from now';
+ } else {
+ return format(n, unit) + ' ago';
+ }
+ }
+ }
+ }
+
+ return function(){
+ var nodes = document.getElementsByTagName('*');
+ var ageclass = new RegExp('\\bage\\b');
+ var dateclass = new RegExp('\\bdate\\b');
+ for (var i=0; i<nodes.length; ++i){
+ var node = nodes[i];
+ var classes = node.className;
+ if (ageclass.test(classes)){
+ var agevalue = age(node.textContent);
+ if (dateclass.test(classes)){
+ // We want both: date + (age)
+ node.textContent += ' ('+agevalue+')';
+ } else {
+ node.textContent = agevalue;
+ }
+ }
+ }
+ }
+})(document, RegExp, Math, isNaN, Date, false, true)
+
+function showDiffstat() {
+ document.getElementById('diffstatdetails').style.display = 'inline';
+ document.getElementById('diffstatexpand').style.display = 'none';
+}
+
+function hideDiffstat() {
+ document.getElementById('diffstatdetails').style.display = 'none';
+ document.getElementById('diffstatexpand').style.display = 'inline';
+}
diff --git a/websdk/mercurial/templates/static/style-coal.css b/websdk/mercurial/templates/static/style-coal.css
new file mode 100644
index 0000000..33fb0e6
--- /dev/null
+++ b/websdk/mercurial/templates/static/style-coal.css
@@ -0,0 +1,288 @@
+body {
+ margin: 0;
+ padding: 0;
+ background: black url(background.png) repeat-x;
+ font-family: sans-serif;
+}
+
+.container {
+ padding-right: 150px;
+}
+
+.main {
+ position: relative;
+ background: white;
+ padding: 2em;
+ border-right: 15px solid black;
+ border-bottom: 15px solid black;
+}
+
+#.main {
+ width: 98%;
+}
+
+.overflow {
+ width: 100%;
+ overflow: auto;
+}
+
+.menu {
+ background: #999;
+ padding: 10px;
+ width: 75px;
+ margin: 0;
+ font-size: 80%;
+ text-align: left;
+ position: fixed;
+ top: 27px;
+ left: auto;
+ right: 27px;
+}
+
+#.menu {
+ position: absolute !important;
+ top:expression(eval(document.body.scrollTop + 27));
+}
+
+.menu ul {
+ list-style: none;
+ padding: 0;
+ margin: 10px 0 0 0;
+}
+
+.menu li {
+ margin-bottom: 3px;
+ padding: 2px 4px;
+ background: white;
+ color: black;
+ font-weight: normal;
+}
+
+.menu li.active {
+ background: black;
+ color: white;
+}
+
+.menu img {
+ width: 75px;
+ height: 90px;
+ border: 0;
+}
+
+.menu a { color: black; display: block; }
+
+.search {
+ position: absolute;
+ top: .7em;
+ right: 2em;
+}
+
+form.search div#hint {
+ display: none;
+ position: absolute;
+ top: 40px;
+ right: 0px;
+ width: 190px;
+ padding: 5px;
+ background: #ffc;
+ font-size: 70%;
+ border: 1px solid yellow;
+ -moz-border-radius: 5px; /* this works only in camino/firefox */
+ -webkit-border-radius: 5px; /* this is just for Safari */
+}
+
+form.search:hover div#hint { display: block; }
+
+a { text-decoration:none; }
+.age { white-space:nowrap; }
+.date { white-space:nowrap; }
+.indexlinks { white-space:nowrap; }
+.parity0 { background-color: #f0f0f0; }
+.parity1 { background-color: white; }
+.plusline { color: green; }
+.minusline { color: #dc143c; } /* crimson */
+.atline { color: purple; }
+
+.diffstat-file {
+ white-space: nowrap;
+ font-size: 90%;
+}
+.diffstat-total {
+ white-space: nowrap;
+ font-size: 90%;
+}
+.diffstat-graph {
+ width: 100%;
+}
+.diffstat-add {
+ background-color: green;
+ float: left;
+}
+.diffstat-remove {
+ background-color: red;
+ float: left;
+}
+
+.navigate {
+ text-align: right;
+ font-size: 60%;
+ margin: 1em 0;
+}
+
+.tag {
+ color: #999;
+ font-size: 70%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+.branchhead {
+ color: #000;
+ font-size: 80%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+ul#graphnodes .branchhead {
+ font-size: 75%;
+}
+
+.branchname {
+ color: #000;
+ font-size: 60%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+h3 .branchname {
+ font-size: 80%;
+}
+
+/* Common */
+pre { margin: 0; }
+
+h2 { font-size: 120%; border-bottom: 1px solid #999; }
+h2 a { color: #000; }
+h3 {
+ margin-top: -.7em;
+ font-size: 100%;
+}
+
+/* log and tags tables */
+.bigtable {
+ border-bottom: 1px solid #999;
+ border-collapse: collapse;
+ font-size: 90%;
+ width: 100%;
+ font-weight: normal;
+ text-align: left;
+}
+
+.bigtable td {
+ vertical-align: top;
+}
+
+.bigtable th {
+ padding: 1px 4px;
+ border-bottom: 1px solid #999;
+}
+.bigtable tr { border: none; }
+.bigtable .age { width: 6em; }
+.bigtable .author { width: 12em; }
+.bigtable .description { }
+.bigtable .description .base { font-size: 70%; float: right; line-height: 1.66; }
+.bigtable .node { width: 5em; font-family: monospace;}
+.bigtable .lineno { width: 2em; text-align: right;}
+.bigtable .lineno a { color: #999; font-size: smaller; font-family: monospace;}
+.bigtable .permissions { width: 8em; text-align: left;}
+.bigtable .size { width: 5em; text-align: right; }
+.bigtable .annotate { text-align: right; }
+.bigtable td.annotate { font-size: smaller; }
+.bigtable td.source { font-size: inherit; }
+
+.source, .sourcefirst, .sourcelast {
+ font-family: monospace;
+ white-space: pre;
+ padding: 1px 4px;
+ font-size: 90%;
+}
+.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; }
+.sourcelast { border-top: 1px solid #999; }
+.source a { color: #999; font-size: smaller; font-family: monospace;}
+.bottomline { border-bottom: 1px solid #999; }
+
+.fileline { font-family: monospace; }
+.fileline img { border: 0; }
+
+.tagEntry .closed { color: #99f; }
+
+/* Changeset entry */
+#changesetEntry {
+ border-collapse: collapse;
+ font-size: 90%;
+ width: 100%;
+ margin-bottom: 1em;
+}
+
+#changesetEntry th {
+ padding: 1px 4px;
+ width: 4em;
+ text-align: right;
+ font-weight: normal;
+ color: #999;
+ margin-right: .5em;
+ vertical-align: top;
+}
+
+div.description {
+ border-left: 3px solid #999;
+ margin: 1em 0 1em 0;
+ padding: .3em;
+ white-space: pre;
+ font-family: monospace;
+}
+
+/* Graph */
+div#wrapper {
+ position: relative;
+ border-top: 1px solid black;
+ border-bottom: 1px solid black;
+ margin: 0;
+ padding: 0;
+}
+
+canvas {
+ position: absolute;
+ z-index: 5;
+ top: -0.7em;
+ margin: 0;
+}
+
+ul#graphnodes {
+ position: absolute;
+ z-index: 10;
+ top: -1.0em;
+ list-style: none inside none;
+ padding: 0;
+}
+
+ul#nodebgs {
+ list-style: none inside none;
+ padding: 0;
+ margin: 0;
+ top: -0.7em;
+}
+
+ul#graphnodes li, ul#nodebgs li {
+ height: 39px;
+}
+
+ul#graphnodes li .info {
+ display: block;
+ font-size: 70%;
+ position: relative;
+ top: -3px;
+}
diff --git a/websdk/mercurial/templates/static/style-gitweb.css b/websdk/mercurial/templates/static/style-gitweb.css
new file mode 100644
index 0000000..82f5ef5
--- /dev/null
+++ b/websdk/mercurial/templates/static/style-gitweb.css
@@ -0,0 +1,128 @@
+body { font-family: sans-serif; font-size: 12px; margin:0px; border:solid #d9d8d1; border-width:1px; margin:10px; }
+a { color:#0000cc; }
+a:hover, a:visited, a:active { color:#880000; }
+div.page_header { height:25px; padding:8px; font-size:18px; font-weight:bold; background-color:#d9d8d1; }
+div.page_header a:visited { color:#0000cc; }
+div.page_header a:hover { color:#880000; }
+div.page_nav { padding:8px; }
+div.page_nav a:visited { color:#0000cc; }
+div.page_path { padding:8px; border:solid #d9d8d1; border-width:0px 0px 1px}
+div.page_footer { padding:4px 8px; background-color: #d9d8d1; }
+div.page_footer_text { float:left; color:#555555; font-style:italic; }
+div.page_body { padding:8px; }
+div.title, a.title {
+ display:block; padding:6px 8px;
+ font-weight:bold; background-color:#edece6; text-decoration:none; color:#000000;
+}
+a.title:hover { background-color: #d9d8d1; }
+div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; }
+div.log_body { padding:8px 8px 8px 150px; }
+.age { white-space:nowrap; }
+span.age { position:relative; float:left; width:142px; font-style:italic; }
+div.log_link {
+ padding:0px 8px;
+ font-size:10px; font-family:sans-serif; font-style:normal;
+ position:relative; float:left; width:136px;
+}
+div.list_head { padding:6px 8px 4px; border:solid #d9d8d1; border-width:1px 0px 0px; font-style:italic; }
+a.list { text-decoration:none; color:#000000; }
+a.list:hover { text-decoration:underline; color:#880000; }
+table { padding:8px 4px; }
+th { padding:2px 5px; font-size:12px; text-align:left; }
+tr.light:hover, .parity0:hover { background-color:#edece6; }
+tr.dark, .parity1 { background-color:#f6f6f0; }
+tr.dark:hover, .parity1:hover { background-color:#edece6; }
+td { padding:2px 5px; font-size:12px; vertical-align:top; }
+td.closed { background-color: #99f; }
+td.link { padding:2px 5px; font-family:sans-serif; font-size:10px; }
+td.indexlinks { white-space: nowrap; }
+td.indexlinks a {
+ padding: 2px 5px; line-height: 10px;
+ border: 1px solid;
+ color: #ffffff; background-color: #7777bb;
+ border-color: #aaaadd #333366 #333366 #aaaadd;
+ font-weight: bold; text-align: center; text-decoration: none;
+ font-size: 10px;
+}
+td.indexlinks a:hover { background-color: #6666aa; }
+div.pre { font-family:monospace; font-size:12px; white-space:pre; }
+div.diff_info { font-family:monospace; color:#000099; background-color:#edece6; font-style:italic; }
+div.index_include { border:solid #d9d8d1; border-width:0px 0px 1px; padding:12px 8px; }
+div.search { margin:4px 8px; position:absolute; top:56px; right:12px }
+.linenr { color:#999999; text-decoration:none }
+div.rss_logo { float: right; white-space: nowrap; }
+div.rss_logo a {
+ padding:3px 6px; line-height:10px;
+ border:1px solid; border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e;
+ color:#ffffff; background-color:#ff6600;
+ font-weight:bold; font-family:sans-serif; font-size:10px;
+ text-align:center; text-decoration:none;
+}
+div.rss_logo a:hover { background-color:#ee5500; }
+pre { margin: 0; }
+span.logtags span {
+ padding: 0px 4px;
+ font-size: 10px;
+ font-weight: normal;
+ border: 1px solid;
+ background-color: #ffaaff;
+ border-color: #ffccff #ff00ee #ff00ee #ffccff;
+}
+span.logtags span.tagtag {
+ background-color: #ffffaa;
+ border-color: #ffffcc #ffee00 #ffee00 #ffffcc;
+}
+span.logtags span.branchtag {
+ background-color: #aaffaa;
+ border-color: #ccffcc #00cc33 #00cc33 #ccffcc;
+}
+span.logtags span.inbranchtag {
+ background-color: #d5dde6;
+ border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4;
+}
+span.logtags span.bookmarktag {
+ background-color: #afdffa;
+ border-color: #ccecff #46ace6 #46ace6 #ccecff;
+}
+
+/* Graph */
+div#wrapper {
+ position: relative;
+ margin: 0;
+ padding: 0;
+ margin-top: 3px;
+}
+
+canvas {
+ position: absolute;
+ z-index: 5;
+ top: -0.9em;
+ margin: 0;
+}
+
+ul#nodebgs {
+ list-style: none inside none;
+ padding: 0;
+ margin: 0;
+ top: -0.7em;
+}
+
+ul#graphnodes li, ul#nodebgs li {
+ height: 39px;
+}
+
+ul#graphnodes {
+ position: absolute;
+ z-index: 10;
+ top: -0.8em;
+ list-style: none inside none;
+ padding: 0;
+}
+
+ul#graphnodes li .info {
+ display: block;
+ font-size: 100%;
+ position: relative;
+ top: -3px;
+ font-style: italic;
+}
diff --git a/websdk/mercurial/templates/static/style-monoblue.css b/websdk/mercurial/templates/static/style-monoblue.css
new file mode 100644
index 0000000..c015061
--- /dev/null
+++ b/websdk/mercurial/templates/static/style-monoblue.css
@@ -0,0 +1,479 @@
+/*** Initial Settings ***/
+* {
+ margin: 0;
+ padding: 0;
+ font-weight: normal;
+ font-style: normal;
+}
+
+html {
+ font-size: 100%;
+ font-family: sans-serif;
+}
+
+body {
+ font-size: 77%;
+ margin: 15px 50px;
+ background: #4B4B4C;
+}
+
+a {
+ color:#0000cc;
+ text-decoration: none;
+}
+/*** end of Initial Settings ***/
+
+
+/** common settings **/
+div#container {
+ background: #FFFFFF;
+ position: relative;
+ color: #666;
+}
+
+div.page-header {
+ padding: 50px 20px 0;
+ background: #006699 top left repeat-x;
+ position: relative;
+}
+ div.page-header h1 {
+ margin: 10px 0 30px;
+ font-size: 1.8em;
+ font-weight: bold;
+ font-family: osaka,'MS P Gothic', Georgia, serif;
+ letter-spacing: 1px;
+ color: #DDD;
+ }
+ div.page-header h1 a {
+ font-weight: bold;
+ color: #FFF;
+ }
+ div.page-header a {
+ text-decoration: none;
+ }
+
+ div.page-header form {
+ position: absolute;
+ margin-bottom: 2px;
+ bottom: 0;
+ right: 20px;
+ }
+ div.page-header form label {
+ color: #DDD;
+ }
+ div.page-header form input {
+ padding: 2px;
+ border: solid 1px #DDD;
+ }
+ div.page-header form dl {
+ overflow: hidden;
+ }
+ div.page-header form dl dt {
+ font-size: 1.2em;
+ }
+ div.page-header form dl dt,
+ div.page-header form dl dd {
+ margin: 0 0 0 5px;
+ float: left;
+ height: 24px;
+ line-height: 20px;
+ }
+
+ ul.page-nav {
+ margin: 10px 0 0 0;
+ list-style-type: none;
+ overflow: hidden;
+ width: 900px;
+ }
+ ul.page-nav li {
+ margin: 0 2px 0 0;
+ float: left;
+ width: 80px;
+ height: 24px;
+ font-size: 1.1em;
+ line-height: 24px;
+ text-align: center;
+ }
+ ul.page-nav li.current {
+ background: #FFF;
+ }
+ ul.page-nav li a {
+ height: 24px;
+ color: #666;
+ background: #DDD;
+ display: block;
+ text-decoration: none;
+ }
+ ul.page-nav li a:hover {
+ color:#333;
+ background: #FFF;
+ }
+
+ul.submenu {
+ margin: 10px 0 -10px 20px;
+ list-style-type: none;
+}
+ul.submenu li {
+ margin: 0 10px 0 0;
+ font-size: 1.2em;
+ display: inline;
+}
+
+h2 {
+ margin: 20px 0 10px;
+ height: 30px;
+ line-height: 30px;
+ text-indent: 20px;
+ background: #FFF;
+ font-size: 1.2em;
+ border-top: dotted 1px #D5E1E6;
+ font-weight: bold;
+}
+h2.no-link {
+ color:#006699;
+}
+h2.no-border {
+ color: #FFF;
+ background: #006699;
+ border: 0;
+}
+h2 a {
+ font-weight:bold;
+ color:#006699;
+}
+
+div.page-path {
+ text-align: right;
+ padding: 20px 30px 10px 0;
+ border:solid #d9d8d1;
+ border-width:0px 0px 1px;
+ font-size: 1.2em;
+}
+
+div.page-footer {
+ margin: 50px 0 0;
+ position: relative;
+}
+ div.page-footer p {
+ position: relative;
+ left: 20px;
+ bottom: 5px;
+ font-size: 1.2em;
+ }
+
+ ul.rss-logo {
+ position: absolute;
+ top: -10px;
+ right: 20px;
+ height: 20px;
+ list-style-type: none;
+ }
+ ul.rss-logo li {
+ display: inline;
+ }
+ ul.rss-logo li a {
+ padding: 3px 6px;
+ line-height: 10px;
+ border:1px solid;
+ border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e;
+ color:#ffffff;
+ background-color:#ff6600;
+ font-weight:bold;
+ font-family:sans-serif;
+ font-size:10px;
+ text-align:center;
+ text-decoration:none;
+ }
+ div.rss-logo li a:hover {
+ background-color:#ee5500;
+ }
+
+p.normal {
+ margin: 20px 0 20px 30px;
+ font-size: 1.2em;
+}
+
+table {
+ margin: 10px 0 0 20px;
+ width: 95%;
+ border-collapse: collapse;
+}
+table tr td {
+ font-size: 1.1em;
+}
+table tr td.nowrap {
+ white-space: nowrap;
+}
+table tr td.closed {
+ background-color: #99f;
+}
+/*
+table tr.parity0:hover,
+table tr.parity1:hover {
+ background: #D5E1E6;
+}
+*/
+table tr.parity0 {
+ background: #F1F6F7;
+}
+table tr.parity1 {
+ background: #FFFFFF;
+}
+table tr td {
+ padding: 5px 5px;
+}
+table.annotated tr td {
+ padding: 0px 5px;
+}
+
+span.logtags span {
+ padding: 2px 6px;
+ font-weight: normal;
+ font-size: 11px;
+ border: 1px solid;
+ background-color: #ffaaff;
+ border-color: #ffccff #ff00ee #ff00ee #ffccff;
+}
+span.logtags span.tagtag {
+ background-color: #ffffaa;
+ border-color: #ffffcc #ffee00 #ffee00 #ffffcc;
+}
+span.logtags span.branchtag {
+ background-color: #aaffaa;
+ border-color: #ccffcc #00cc33 #00cc33 #ccffcc;
+}
+span.logtags span.inbranchtag {
+ background-color: #d5dde6;
+ border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4;
+}
+span.logtags span.bookmarktag {
+ background-color: #afdffa;
+ border-color: #ccecff #46ace6 #46ace6 #ccecff;
+}
+
+div.diff pre {
+ margin: 10px 0 0 0;
+}
+div.diff pre span {
+ font-family: monospace;
+ white-space: pre;
+ font-size: 1.2em;
+ padding: 3px 0;
+}
+td.source {
+ white-space: pre;
+ font-family: monospace;
+ margin: 10px 30px 0;
+ font-size: 1.2em;
+ font-family: monospace;
+}
+ div.source div.parity0,
+ div.source div.parity1 {
+ padding: 1px;
+ font-size: 1.2em;
+ }
+ div.source div.parity0 {
+ background: #F1F6F7;
+ }
+ div.source div.parity1 {
+ background: #FFFFFF;
+ }
+div.parity0:hover,
+div.parity1:hover {
+ background: #D5E1E6;
+}
+.linenr {
+ color: #999;
+ text-align: right;
+}
+.lineno {
+ text-align: right;
+}
+.lineno a {
+ color: #999;
+}
+td.linenr {
+ width: 60px;
+}
+
+div#powered-by {
+ position: absolute;
+ width: 75px;
+ top: 15px;
+ right: 20px;
+ font-size: 1.2em;
+}
+div#powered-by a {
+ color: #EEE;
+ text-decoration: none;
+}
+div#powered-by a:hover {
+ text-decoration: underline;
+}
+/*
+div#monoblue-corner-top-left {
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 10px;
+ height: 10px;
+ background: url(./monoblue-corner.png) top left no-repeat !important;
+ background: none;
+}
+div#monoblue-corner-top-right {
+ position: absolute;
+ top: 0;
+ right: 0;
+ width: 10px;
+ height: 10px;
+ background: url(./monoblue-corner.png) top right no-repeat !important;
+ background: none;
+}
+div#monoblue-corner-bottom-left {
+ position: absolute;
+ bottom: 0;
+ left: 0;
+ width: 10px;
+ height: 10px;
+ background: url(./monoblue-corner.png) bottom left no-repeat !important;
+ background: none;
+}
+div#monoblue-corner-bottom-right {
+ position: absolute;
+ bottom: 0;
+ right: 0;
+ width: 10px;
+ height: 10px;
+ background: url(./monoblue-corner.png) bottom right no-repeat !important;
+ background: none;
+}
+*/
+/** end of common settings **/
+
+/** summary **/
+dl.overview {
+ margin: 0 0 0 30px;
+ font-size: 1.1em;
+ overflow: hidden;
+}
+ dl.overview dt,
+ dl.overview dd {
+ margin: 5px 0;
+ float: left;
+ }
+ dl.overview dt {
+ clear: left;
+ font-weight: bold;
+ width: 150px;
+ }
+/** end of summary **/
+
+/** chagelog **/
+h3.changelog {
+ margin: 20px 0 5px 30px;
+ padding: 0 0 2px;
+ font-size: 1.4em;
+ border-bottom: dotted 1px #D5E1E6;
+}
+ul.changelog-entry {
+ margin: 0 0 10px 30px;
+ list-style-type: none;
+ position: relative;
+}
+ul.changelog-entry li span.revdate {
+ font-size: 1.1em;
+}
+ul.changelog-entry li.age {
+ position: absolute;
+ top: -25px;
+ right: 10px;
+ font-size: 1.4em;
+ color: #CCC;
+ font-weight: bold;
+ font-style: italic;
+}
+ul.changelog-entry li span.name {
+ font-size: 1.2em;
+ font-weight: bold;
+}
+ul.changelog-entry li.description {
+ margin: 10px 0 0;
+ font-size: 1.1em;
+}
+/** end of changelog **/
+
+/** file **/
+p.files {
+ margin: 0 0 0 20px;
+ font-size: 2.0em;
+ font-weight: bold;
+}
+/** end of file **/
+
+/** changeset **/
+h3.changeset {
+ margin: 20px 0 5px 20px;
+ padding: 0 0 2px;
+ font-size: 1.6em;
+ border-bottom: dotted 1px #D5E1E6;
+}
+p.changeset-age {
+ position: relative;
+}
+p.changeset-age span {
+ position: absolute;
+ top: -25px;
+ right: 10px;
+ font-size: 1.4em;
+ color: #CCC;
+ font-weight: bold;
+ font-style: italic;
+}
+p.description {
+ margin: 10px 30px 0 30px;
+ padding: 10px;
+ border: solid 1px #CCC;
+ font-size: 1.2em;
+}
+/** end of changeset **/
+
+/** canvas **/
+div#wrapper {
+ position: relative;
+ font-size: 1.2em;
+}
+
+canvas {
+ position: absolute;
+ z-index: 5;
+ top: -0.7em;
+}
+
+ul#nodebgs li.parity0 {
+ background: #F1F6F7;
+}
+
+ul#nodebgs li.parity1 {
+ background: #FFFFFF;
+}
+
+ul#graphnodes {
+ position: absolute;
+ z-index: 10;
+ top: 7px;
+ list-style: none inside none;
+}
+
+ul#nodebgs {
+ list-style: none inside none;
+}
+
+ul#graphnodes li, ul#nodebgs li {
+ height: 39px;
+}
+
+ul#graphnodes li .info {
+ display: block;
+ position: relative;
+}
+/** end of canvas **/
diff --git a/websdk/mercurial/templates/static/style-paper.css b/websdk/mercurial/templates/static/style-paper.css
new file mode 100644
index 0000000..6c8b1c2
--- /dev/null
+++ b/websdk/mercurial/templates/static/style-paper.css
@@ -0,0 +1,277 @@
+body {
+ margin: 0;
+ padding: 0;
+ background: white;
+ font-family: sans-serif;
+}
+
+.container {
+ padding-left: 115px;
+}
+
+.main {
+ position: relative;
+ background: white;
+ padding: 2em 2em 2em 0;
+}
+
+#.main {
+ width: 98%;
+}
+
+.overflow {
+ width: 100%;
+ overflow: auto;
+}
+
+.menu {
+ width: 90px;
+ margin: 0;
+ font-size: 80%;
+ text-align: left;
+ position: absolute;
+ top: 20px;
+ left: 20px;
+ right: auto;
+}
+
+.menu ul {
+ list-style: none;
+ padding: 0;
+ margin: 10px 0 0 0;
+ border-left: 2px solid #999;
+}
+
+.menu li {
+ margin-bottom: 3px;
+ padding: 2px 4px;
+ background: white;
+ color: black;
+ font-weight: normal;
+}
+
+.menu li.active {
+ font-weight: bold;
+}
+
+.menu img {
+ width: 75px;
+ height: 90px;
+ border: 0;
+}
+
+.menu a { color: black; display: block; }
+
+.search {
+ position: absolute;
+ top: .7em;
+ right: 2em;
+}
+
+form.search div#hint {
+ display: none;
+ position: absolute;
+ top: 40px;
+ right: 0px;
+ width: 190px;
+ padding: 5px;
+ background: #ffc;
+ font-size: 70%;
+ border: 1px solid yellow;
+ -moz-border-radius: 5px; /* this works only in camino/firefox */
+ -webkit-border-radius: 5px; /* this is just for Safari */
+}
+
+form.search:hover div#hint { display: block; }
+
+a { text-decoration:none; }
+.age { white-space:nowrap; }
+.date { white-space:nowrap; }
+.indexlinks { white-space:nowrap; }
+.parity0 { background-color: #f0f0f0; }
+.parity1 { background-color: white; }
+.plusline { color: green; }
+.minusline { color: #dc143c; } /* crimson */
+.atline { color: purple; }
+
+.diffstat-file {
+ white-space: nowrap;
+ font-size: 90%;
+}
+.diffstat-total {
+ white-space: nowrap;
+ font-size: 90%;
+}
+.diffstat-graph {
+ width: 100%;
+}
+.diffstat-add {
+ background-color: green;
+ float: left;
+}
+.diffstat-remove {
+ background-color: red;
+ float: left;
+}
+
+.navigate {
+ text-align: right;
+ font-size: 60%;
+ margin: 1em 0;
+}
+
+.tag {
+ color: #999;
+ font-size: 70%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+.branchhead {
+ color: #000;
+ font-size: 80%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+ul#graphnodes .branchhead {
+ font-size: 75%;
+}
+
+.branchname {
+ color: #000;
+ font-size: 60%;
+ font-weight: normal;
+ margin-left: .5em;
+ vertical-align: baseline;
+}
+
+h3 .branchname {
+ font-size: 80%;
+}
+
+/* Common */
+pre { margin: 0; }
+
+h2 { font-size: 120%; border-bottom: 1px solid #999; }
+h2 a { color: #000; }
+h3 {
+ margin-top: -.7em;
+ font-size: 100%;
+}
+
+/* log and tags tables */
+.bigtable {
+ border-bottom: 1px solid #999;
+ border-collapse: collapse;
+ font-size: 90%;
+ width: 100%;
+ font-weight: normal;
+ text-align: left;
+}
+
+.bigtable td {
+ vertical-align: top;
+}
+
+.bigtable th {
+ padding: 1px 4px;
+ border-bottom: 1px solid #999;
+}
+.bigtable tr { border: none; }
+.bigtable .age { width: 7em; }
+.bigtable .author { width: 12em; }
+.bigtable .description { }
+.bigtable .description .base { font-size: 70%; float: right; line-height: 1.66; }
+.bigtable .node { width: 5em; font-family: monospace;}
+.bigtable .permissions { width: 8em; text-align: left;}
+.bigtable .size { width: 5em; text-align: right; }
+.bigtable .annotate { text-align: right; }
+.bigtable td.annotate { font-size: smaller; }
+.bigtable td.source { font-size: inherit; }
+
+.source, .sourcefirst, .sourcelast {
+ font-family: monospace;
+ white-space: pre;
+ padding: 1px 4px;
+ font-size: 90%;
+}
+.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; }
+.sourcelast { border-top: 1px solid #999; }
+.source a { color: #999; font-size: smaller; font-family: monospace;}
+.bottomline { border-bottom: 1px solid #999; }
+
+.fileline { font-family: monospace; }
+.fileline img { border: 0; }
+
+.tagEntry .closed { color: #99f; }
+
+/* Changeset entry */
+#changesetEntry {
+ border-collapse: collapse;
+ font-size: 90%;
+ width: 100%;
+ margin-bottom: 1em;
+}
+
+#changesetEntry th {
+ padding: 1px 4px;
+ width: 4em;
+ text-align: right;
+ font-weight: normal;
+ color: #999;
+ margin-right: .5em;
+ vertical-align: top;
+}
+
+div.description {
+ border-left: 2px solid #999;
+ margin: 1em 0 1em 0;
+ padding: .3em;
+ white-space: pre;
+ font-family: monospace;
+}
+
+/* Graph */
+div#wrapper {
+ position: relative;
+ border-top: 1px solid black;
+ border-bottom: 1px solid black;
+ margin: 0;
+ padding: 0;
+}
+
+canvas {
+ position: absolute;
+ z-index: 5;
+ top: -0.7em;
+ margin: 0;
+}
+
+ul#graphnodes {
+ position: absolute;
+ z-index: 10;
+ top: -1.0em;
+ list-style: none inside none;
+ padding: 0;
+}
+
+ul#nodebgs {
+ list-style: none inside none;
+ padding: 0;
+ margin: 0;
+ top: -0.7em;
+}
+
+ul#graphnodes li, ul#nodebgs li {
+ height: 39px;
+}
+
+ul#graphnodes li .info {
+ display: block;
+ font-size: 70%;
+ position: relative;
+ top: -3px;
+}
diff --git a/websdk/mercurial/templates/static/style.css b/websdk/mercurial/templates/static/style.css
new file mode 100644
index 0000000..66bd96d
--- /dev/null
+++ b/websdk/mercurial/templates/static/style.css
@@ -0,0 +1,105 @@
+a { text-decoration:none; }
+.age { white-space:nowrap; }
+.date { white-space:nowrap; }
+.indexlinks { white-space:nowrap; }
+.parity0 { background-color: #ddd; }
+.parity1 { background-color: #eee; }
+.lineno { width: 60px; color: #aaa; font-size: smaller;
+ text-align: right; }
+.plusline { color: green; }
+.minusline { color: red; }
+.atline { color: purple; }
+.annotate { font-size: smaller; text-align: right; padding-right: 1em; }
+.buttons a {
+ background-color: #666;
+ padding: 2pt;
+ color: white;
+ font-family: sans;
+ font-weight: bold;
+}
+.navigate a {
+ background-color: #ccc;
+ padding: 2pt;
+ font-family: sans;
+ color: black;
+}
+
+.metatag {
+ background-color: #888;
+ color: white;
+ text-align: right;
+}
+
+/* Common */
+pre { margin: 0; }
+
+.logo {
+ float: right;
+ clear: right;
+}
+
+/* Changelog/Filelog entries */
+.logEntry { width: 100%; }
+.logEntry .age { width: 15%; }
+.logEntry th { font-weight: normal; text-align: right; vertical-align: top; }
+.logEntry th.age, .logEntry th.firstline { font-weight: bold; }
+.logEntry th.firstline { text-align: left; width: inherit; }
+
+/* Shortlog entries */
+.slogEntry { width: 100%; }
+.slogEntry .age { width: 8em; }
+.slogEntry td { font-weight: normal; text-align: left; vertical-align: top; }
+.slogEntry td.author { width: 15em; }
+
+/* Tag entries */
+#tagEntries { list-style: none; margin: 0; padding: 0; }
+#tagEntries .tagEntry { list-style: none; margin: 0; padding: 0; }
+
+/* Changeset entry */
+#changesetEntry { }
+#changesetEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; }
+#changesetEntry th.files, #changesetEntry th.description { vertical-align: top; }
+
+/* File diff view */
+#filediffEntry { }
+#filediffEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; }
+
+/* Graph */
+div#wrapper {
+ position: relative;
+ margin: 0;
+ padding: 0;
+}
+
+canvas {
+ position: absolute;
+ z-index: 5;
+ top: -0.6em;
+ margin: 0;
+}
+
+ul#nodebgs {
+ list-style: none inside none;
+ padding: 0;
+ margin: 0;
+ top: -0.7em;
+}
+
+ul#graphnodes li, ul#nodebgs li {
+ height: 39px;
+}
+
+ul#graphnodes {
+ position: absolute;
+ z-index: 10;
+ top: -0.85em;
+ list-style: none inside none;
+ padding: 0;
+}
+
+ul#graphnodes li .info {
+ display: block;
+ font-size: 70%;
+ position: relative;
+ top: -1px;
+}
diff --git a/websdk/mercurial/templates/template-vars.txt b/websdk/mercurial/templates/template-vars.txt
new file mode 100644
index 0000000..772284d
--- /dev/null
+++ b/websdk/mercurial/templates/template-vars.txt
@@ -0,0 +1,42 @@
+repo the name of the repo
+rev a changeset.manifest revision
+node a changeset node
+changesets total number of changesets
+file a filename
+filerev a file revision
+filerevs total number of file revisions
+up the directory of the relevant file
+path a path in the manifest, starting with "/"
+basename a short pathname
+date a date string
+age age in hours, days, etc
+line a line of text (escaped)
+desc a description (escaped, with breaks)
+shortdesc a short description (escaped)
+author a name or email addressv(obfuscated)
+parent a list of the parent
+child a list of the children
+tags a list of tag
+
+header the global page header
+footer the global page footer
+
+files a list of file links
+file_copies a list of pairs of name, source filenames
+dirs a set of directory links
+diff a diff of one or more files
+annotate an annotated file
+entries the entries relevant to the page
+
+url base url of hgweb interface
+logourl base url of logo
+staticurl base url for static resources
+
+
+Templates and commands:
+ changelog(rev) - a page for browsing changesets
+ naventry - a link for jumping to a changeset number
+ filenodelink - jump to file diff
+ fileellipses - printed after maxfiles
+ changelogentry - an entry in the log
+ manifest - browse a manifest as a directory tree
diff --git a/websdk/mercurial/transaction.py b/websdk/mercurial/transaction.py
index 72e2f41..d197295 120000..100644
--- a/websdk/mercurial/transaction.py
+++ b/websdk/mercurial/transaction.py
@@ -1 +1,183 @@
-/usr/share/pyshared/mercurial/transaction.py \ No newline at end of file
+# transaction.py - simple journalling scheme for mercurial
+#
+# This transaction scheme is intended to gracefully handle program
+# errors and interruptions. More serious failures like system crashes
+# can be recovered with an fsck-like tool. As the whole repository is
+# effectively log-structured, this should amount to simply truncating
+# anything that isn't referenced in the changelog.
+#
+# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import os, errno
+import error, util
+
+def active(func):
+ def _active(self, *args, **kwds):
+ if self.count == 0:
+ raise error.Abort(_(
+ 'cannot use transaction when it is already committed/aborted'))
+ return func(self, *args, **kwds)
+ return _active
+
+def _playback(journal, report, opener, entries, unlink=True):
+ for f, o, ignore in entries:
+ if o or not unlink:
+ try:
+ fp = opener(f, 'a')
+ fp.truncate(o)
+ fp.close()
+ except IOError:
+ report(_("failed to truncate %s\n") % f)
+ raise
+ else:
+ try:
+ fp = opener(f)
+ fn = fp.name
+ fp.close()
+ util.unlink(fn)
+ except (IOError, OSError), inst:
+ if inst.errno != errno.ENOENT:
+ raise
+ util.unlink(journal)
+
+class transaction(object):
+ def __init__(self, report, opener, journal, after=None, createmode=None):
+ self.count = 1
+ self.usages = 1
+ self.report = report
+ self.opener = opener
+ self.after = after
+ self.entries = []
+ self.map = {}
+ self.journal = journal
+ self._queue = []
+
+ self.file = util.posixfile(self.journal, "w")
+ if createmode is not None:
+ os.chmod(self.journal, createmode & 0666)
+
+ def __del__(self):
+ if self.journal:
+ self._abort()
+
+ @active
+ def startgroup(self):
+ self._queue.append([])
+
+ @active
+ def endgroup(self):
+ q = self._queue.pop()
+ d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q])
+ self.entries.extend(q)
+ self.file.write(d)
+ self.file.flush()
+
+ @active
+ def add(self, file, offset, data=None):
+ if file in self.map:
+ return
+ if self._queue:
+ self._queue[-1].append((file, offset, data))
+ return
+
+ self.entries.append((file, offset, data))
+ self.map[file] = len(self.entries) - 1
+ # add enough data to the journal to do the truncate
+ self.file.write("%s\0%d\n" % (file, offset))
+ self.file.flush()
+
+ @active
+ def find(self, file):
+ if file in self.map:
+ return self.entries[self.map[file]]
+ return None
+
+ @active
+ def replace(self, file, offset, data=None):
+ '''
+ replace can only replace already committed entries
+ that are not pending in the queue
+ '''
+
+ if file not in self.map:
+ raise KeyError(file)
+ index = self.map[file]
+ self.entries[index] = (file, offset, data)
+ self.file.write("%s\0%d\n" % (file, offset))
+ self.file.flush()
+
+ @active
+ def nest(self):
+ self.count += 1
+ self.usages += 1
+ return self
+
+ def release(self):
+ if self.count > 0:
+ self.usages -= 1
+ # if the transaction scopes are left without being closed, fail
+ if self.count > 0 and self.usages == 0:
+ self._abort()
+
+ def running(self):
+ return self.count > 0
+
+ @active
+ def close(self):
+ '''commit the transaction'''
+ self.count -= 1
+ if self.count != 0:
+ return
+ self.file.close()
+ self.entries = []
+ if self.after:
+ self.after()
+ if os.path.isfile(self.journal):
+ util.unlink(self.journal)
+ self.journal = None
+
+ @active
+ def abort(self):
+ '''abort the transaction (generally called on error, or when the
+ transaction is not explicitly committed before going out of
+ scope)'''
+ self._abort()
+
+ def _abort(self):
+ self.count = 0
+ self.usages = 0
+ self.file.close()
+
+ try:
+ if not self.entries:
+ if self.journal:
+ util.unlink(self.journal)
+ return
+
+ self.report(_("transaction abort!\n"))
+
+ try:
+ _playback(self.journal, self.report, self.opener,
+ self.entries, False)
+ self.report(_("rollback completed\n"))
+ except:
+ self.report(_("rollback failed - please run hg recover\n"))
+ finally:
+ self.journal = None
+
+
+def rollback(opener, file, report):
+ entries = []
+
+ fp = util.posixfile(file)
+ lines = fp.readlines()
+ fp.close()
+ for l in lines:
+ f, o = l.split('\0')
+ entries.append((f, int(o), None))
+
+ _playback(file, report, opener, entries)
diff --git a/websdk/mercurial/treediscovery.py b/websdk/mercurial/treediscovery.py
new file mode 100644
index 0000000..49011c9
--- /dev/null
+++ b/websdk/mercurial/treediscovery.py
@@ -0,0 +1,150 @@
+# discovery.py - protocol changeset discovery functions
+#
+# Copyright 2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid, short
+from i18n import _
+import util, error
+
+def findcommonincoming(repo, remote, heads=None, force=False):
+ """Return a tuple (common, fetch, heads) used to identify the common
+ subset of nodes between repo and remote.
+
+ "common" is a list of (at least) the heads of the common subset.
+ "fetch" is a list of roots of the nodes that would be incoming, to be
+ supplied to changegroupsubset.
+ "heads" is either the supplied heads, or else the remote's heads.
+ """
+
+ m = repo.changelog.nodemap
+ search = []
+ fetch = set()
+ seen = set()
+ seenbranch = set()
+ base = set()
+
+ if not heads:
+ heads = remote.heads()
+
+ if repo.changelog.tip() == nullid:
+ base.add(nullid)
+ if heads != [nullid]:
+ return [nullid], [nullid], list(heads)
+ return [nullid], [], heads
+
+ # assume we're closer to the tip than the root
+ # and start by examining the heads
+ repo.ui.status(_("searching for changes\n"))
+
+ unknown = []
+ for h in heads:
+ if h not in m:
+ unknown.append(h)
+ else:
+ base.add(h)
+
+ if not unknown:
+ return list(base), [], list(heads)
+
+ req = set(unknown)
+ reqcnt = 0
+
+ # search through remote branches
+ # a 'branch' here is a linear segment of history, with four parts:
+ # head, root, first parent, second parent
+ # (a branch always has two parents (or none) by definition)
+ unknown = remote.branches(unknown)
+ while unknown:
+ r = []
+ while unknown:
+ n = unknown.pop(0)
+ if n[0] in seen:
+ continue
+
+ repo.ui.debug("examining %s:%s\n"
+ % (short(n[0]), short(n[1])))
+ if n[0] == nullid: # found the end of the branch
+ pass
+ elif n in seenbranch:
+ repo.ui.debug("branch already found\n")
+ continue
+ elif n[1] and n[1] in m: # do we know the base?
+ repo.ui.debug("found incomplete branch %s:%s\n"
+ % (short(n[0]), short(n[1])))
+ search.append(n[0:2]) # schedule branch range for scanning
+ seenbranch.add(n)
+ else:
+ if n[1] not in seen and n[1] not in fetch:
+ if n[2] in m and n[3] in m:
+ repo.ui.debug("found new changeset %s\n" %
+ short(n[1]))
+ fetch.add(n[1]) # earliest unknown
+ for p in n[2:4]:
+ if p in m:
+ base.add(p) # latest known
+
+ for p in n[2:4]:
+ if p not in req and p not in m:
+ r.append(p)
+ req.add(p)
+ seen.add(n[0])
+
+ if r:
+ reqcnt += 1
+ repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
+ repo.ui.debug("request %d: %s\n" %
+ (reqcnt, " ".join(map(short, r))))
+ for p in xrange(0, len(r), 10):
+ for b in remote.branches(r[p:p + 10]):
+ repo.ui.debug("received %s:%s\n" %
+ (short(b[0]), short(b[1])))
+ unknown.append(b)
+
+ # do binary search on the branches we found
+ while search:
+ newsearch = []
+ reqcnt += 1
+ repo.ui.progress(_('searching'), reqcnt, unit=_('queries'))
+ for n, l in zip(search, remote.between(search)):
+ l.append(n[1])
+ p = n[0]
+ f = 1
+ for i in l:
+ repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
+ if i in m:
+ if f <= 2:
+ repo.ui.debug("found new branch changeset %s\n" %
+ short(p))
+ fetch.add(p)
+ base.add(i)
+ else:
+ repo.ui.debug("narrowed branch search to %s:%s\n"
+ % (short(p), short(i)))
+ newsearch.append((p, i))
+ break
+ p, f = i, f * 2
+ search = newsearch
+
+ # sanity check our fetch list
+ for f in fetch:
+ if f in m:
+ raise error.RepoError(_("already have changeset ")
+ + short(f[:4]))
+
+ base = list(base)
+ if base == [nullid]:
+ if force:
+ repo.ui.warn(_("warning: repository is unrelated\n"))
+ else:
+ raise util.Abort(_("repository is unrelated"))
+
+ repo.ui.debug("found new changesets starting at " +
+ " ".join([short(f) for f in fetch]) + "\n")
+
+ repo.ui.progress(_('searching'), None)
+ repo.ui.debug("%d total queries\n" % reqcnt)
+
+ return base, list(fetch), heads
diff --git a/websdk/mercurial/ui.py b/websdk/mercurial/ui.py
index 43fe077..ef76206 120000..100644
--- a/websdk/mercurial/ui.py
+++ b/websdk/mercurial/ui.py
@@ -1 +1,734 @@
-/usr/share/pyshared/mercurial/ui.py \ No newline at end of file
+# ui.py - user interface bits for mercurial
+#
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import errno, getpass, os, socket, sys, tempfile, traceback
+import config, scmutil, util, error
+
+class ui(object):
+ def __init__(self, src=None):
+ self._buffers = []
+ self.quiet = self.verbose = self.debugflag = self.tracebackflag = False
+ self._reportuntrusted = True
+ self._ocfg = config.config() # overlay
+ self._tcfg = config.config() # trusted
+ self._ucfg = config.config() # untrusted
+ self._trustusers = set()
+ self._trustgroups = set()
+
+ if src:
+ self.fout = src.fout
+ self.ferr = src.ferr
+ self.fin = src.fin
+
+ self._tcfg = src._tcfg.copy()
+ self._ucfg = src._ucfg.copy()
+ self._ocfg = src._ocfg.copy()
+ self._trustusers = src._trustusers.copy()
+ self._trustgroups = src._trustgroups.copy()
+ self.environ = src.environ
+ self.fixconfig()
+ else:
+ self.fout = sys.stdout
+ self.ferr = sys.stderr
+ self.fin = sys.stdin
+
+ # shared read-only environment
+ self.environ = os.environ
+ # we always trust global config files
+ for f in scmutil.rcpath():
+ self.readconfig(f, trust=True)
+
+ def copy(self):
+ return self.__class__(self)
+
+ def _trusted(self, fp, f):
+ st = util.fstat(fp)
+ if util.isowner(st):
+ return True
+
+ tusers, tgroups = self._trustusers, self._trustgroups
+ if '*' in tusers or '*' in tgroups:
+ return True
+
+ user = util.username(st.st_uid)
+ group = util.groupname(st.st_gid)
+ if user in tusers or group in tgroups or user == util.username():
+ return True
+
+ if self._reportuntrusted:
+ self.warn(_('Not trusting file %s from untrusted '
+ 'user %s, group %s\n') % (f, user, group))
+ return False
+
+ def readconfig(self, filename, root=None, trust=False,
+ sections=None, remap=None):
+ try:
+ fp = open(filename)
+ except IOError:
+ if not sections: # ignore unless we were looking for something
+ return
+ raise
+
+ cfg = config.config()
+ trusted = sections or trust or self._trusted(fp, filename)
+
+ try:
+ cfg.read(filename, fp, sections=sections, remap=remap)
+ fp.close()
+ except error.ConfigError, inst:
+ if trusted:
+ raise
+ self.warn(_("Ignored: %s\n") % str(inst))
+
+ if self.plain():
+ for k in ('debug', 'fallbackencoding', 'quiet', 'slash',
+ 'logtemplate', 'style',
+ 'traceback', 'verbose'):
+ if k in cfg['ui']:
+ del cfg['ui'][k]
+ for k, v in cfg.items('defaults'):
+ del cfg['defaults'][k]
+ # Don't remove aliases from the configuration if in the exceptionlist
+ if self.plain('alias'):
+ for k, v in cfg.items('alias'):
+ del cfg['alias'][k]
+
+ if trusted:
+ self._tcfg.update(cfg)
+ self._tcfg.update(self._ocfg)
+ self._ucfg.update(cfg)
+ self._ucfg.update(self._ocfg)
+
+ if root is None:
+ root = os.path.expanduser('~')
+ self.fixconfig(root=root)
+
+ def fixconfig(self, root=None, section=None):
+ if section in (None, 'paths'):
+ # expand vars and ~
+ # translate paths relative to root (or home) into absolute paths
+ root = root or os.getcwd()
+ for c in self._tcfg, self._ucfg, self._ocfg:
+ for n, p in c.items('paths'):
+ if not p:
+ continue
+ if '%%' in p:
+ self.warn(_("(deprecated '%%' in path %s=%s from %s)\n")
+ % (n, p, self.configsource('paths', n)))
+ p = p.replace('%%', '%')
+ p = util.expandpath(p)
+ if not util.hasscheme(p) and not os.path.isabs(p):
+ p = os.path.normpath(os.path.join(root, p))
+ c.set("paths", n, p)
+
+ if section in (None, 'ui'):
+ # update ui options
+ self.debugflag = self.configbool('ui', 'debug')
+ self.verbose = self.debugflag or self.configbool('ui', 'verbose')
+ self.quiet = not self.debugflag and self.configbool('ui', 'quiet')
+ if self.verbose and self.quiet:
+ self.quiet = self.verbose = False
+ self._reportuntrusted = self.debugflag or self.configbool("ui",
+ "report_untrusted", True)
+ self.tracebackflag = self.configbool('ui', 'traceback', False)
+
+ if section in (None, 'trusted'):
+ # update trust information
+ self._trustusers.update(self.configlist('trusted', 'users'))
+ self._trustgroups.update(self.configlist('trusted', 'groups'))
+
+ def setconfig(self, section, name, value, overlay=True):
+ if overlay:
+ self._ocfg.set(section, name, value)
+ self._tcfg.set(section, name, value)
+ self._ucfg.set(section, name, value)
+ self.fixconfig(section=section)
+
+ def _data(self, untrusted):
+ return untrusted and self._ucfg or self._tcfg
+
+ def configsource(self, section, name, untrusted=False):
+ return self._data(untrusted).source(section, name) or 'none'
+
+ def config(self, section, name, default=None, untrusted=False):
+ if isinstance(name, list):
+ alternates = name
+ else:
+ alternates = [name]
+
+ for n in alternates:
+ value = self._data(untrusted).get(section, name, None)
+ if value is not None:
+ name = n
+ break
+ else:
+ value = default
+
+ if self.debugflag and not untrusted and self._reportuntrusted:
+ uvalue = self._ucfg.get(section, name)
+ if uvalue is not None and uvalue != value:
+ self.debug("ignoring untrusted configuration option "
+ "%s.%s = %s\n" % (section, name, uvalue))
+ return value
+
+ def configpath(self, section, name, default=None, untrusted=False):
+ 'get a path config item, expanded relative to repo root or config file'
+ v = self.config(section, name, default, untrusted)
+ if v is None:
+ return None
+ if not os.path.isabs(v) or "://" not in v:
+ src = self.configsource(section, name, untrusted)
+ if ':' in src:
+ base = os.path.dirname(src.rsplit(':')[0])
+ v = os.path.join(base, os.path.expanduser(v))
+ return v
+
+ def configbool(self, section, name, default=False, untrusted=False):
+ """parse a configuration element as a boolean
+
+ >>> u = ui(); s = 'foo'
+ >>> u.setconfig(s, 'true', 'yes')
+ >>> u.configbool(s, 'true')
+ True
+ >>> u.setconfig(s, 'false', 'no')
+ >>> u.configbool(s, 'false')
+ False
+ >>> u.configbool(s, 'unknown')
+ False
+ >>> u.configbool(s, 'unknown', True)
+ True
+ >>> u.setconfig(s, 'invalid', 'somevalue')
+ >>> u.configbool(s, 'invalid')
+ Traceback (most recent call last):
+ ...
+ ConfigError: foo.invalid is not a boolean ('somevalue')
+ """
+
+ v = self.config(section, name, None, untrusted)
+ if v is None:
+ return default
+ if isinstance(v, bool):
+ return v
+ b = util.parsebool(v)
+ if b is None:
+ raise error.ConfigError(_("%s.%s is not a boolean ('%s')")
+ % (section, name, v))
+ return b
+
+ def configint(self, section, name, default=None, untrusted=False):
+ """parse a configuration element as an integer
+
+ >>> u = ui(); s = 'foo'
+ >>> u.setconfig(s, 'int1', '42')
+ >>> u.configint(s, 'int1')
+ 42
+ >>> u.setconfig(s, 'int2', '-42')
+ >>> u.configint(s, 'int2')
+ -42
+ >>> u.configint(s, 'unknown', 7)
+ 7
+ >>> u.setconfig(s, 'invalid', 'somevalue')
+ >>> u.configint(s, 'invalid')
+ Traceback (most recent call last):
+ ...
+ ConfigError: foo.invalid is not an integer ('somevalue')
+ """
+
+ v = self.config(section, name, None, untrusted)
+ if v is None:
+ return default
+ try:
+ return int(v)
+ except ValueError:
+ raise error.ConfigError(_("%s.%s is not an integer ('%s')")
+ % (section, name, v))
+
+ def configlist(self, section, name, default=None, untrusted=False):
+ """parse a configuration element as a list of comma/space separated
+ strings
+
+ >>> u = ui(); s = 'foo'
+ >>> u.setconfig(s, 'list1', 'this,is "a small" ,test')
+ >>> u.configlist(s, 'list1')
+ ['this', 'is', 'a small', 'test']
+ """
+
+ def _parse_plain(parts, s, offset):
+ whitespace = False
+ while offset < len(s) and (s[offset].isspace() or s[offset] == ','):
+ whitespace = True
+ offset += 1
+ if offset >= len(s):
+ return None, parts, offset
+ if whitespace:
+ parts.append('')
+ if s[offset] == '"' and not parts[-1]:
+ return _parse_quote, parts, offset + 1
+ elif s[offset] == '"' and parts[-1][-1] == '\\':
+ parts[-1] = parts[-1][:-1] + s[offset]
+ return _parse_plain, parts, offset + 1
+ parts[-1] += s[offset]
+ return _parse_plain, parts, offset + 1
+
+ def _parse_quote(parts, s, offset):
+ if offset < len(s) and s[offset] == '"': # ""
+ parts.append('')
+ offset += 1
+ while offset < len(s) and (s[offset].isspace() or
+ s[offset] == ','):
+ offset += 1
+ return _parse_plain, parts, offset
+
+ while offset < len(s) and s[offset] != '"':
+ if (s[offset] == '\\' and offset + 1 < len(s)
+ and s[offset + 1] == '"'):
+ offset += 1
+ parts[-1] += '"'
+ else:
+ parts[-1] += s[offset]
+ offset += 1
+
+ if offset >= len(s):
+ real_parts = _configlist(parts[-1])
+ if not real_parts:
+ parts[-1] = '"'
+ else:
+ real_parts[0] = '"' + real_parts[0]
+ parts = parts[:-1]
+ parts.extend(real_parts)
+ return None, parts, offset
+
+ offset += 1
+ while offset < len(s) and s[offset] in [' ', ',']:
+ offset += 1
+
+ if offset < len(s):
+ if offset + 1 == len(s) and s[offset] == '"':
+ parts[-1] += '"'
+ offset += 1
+ else:
+ parts.append('')
+ else:
+ return None, parts, offset
+
+ return _parse_plain, parts, offset
+
+ def _configlist(s):
+ s = s.rstrip(' ,')
+ if not s:
+ return []
+ parser, parts, offset = _parse_plain, [''], 0
+ while parser:
+ parser, parts, offset = parser(parts, s, offset)
+ return parts
+
+ result = self.config(section, name, untrusted=untrusted)
+ if result is None:
+ result = default or []
+ if isinstance(result, basestring):
+ result = _configlist(result.lstrip(' ,\n'))
+ if result is None:
+ result = default or []
+ return result
+
+ def has_section(self, section, untrusted=False):
+ '''tell whether section exists in config.'''
+ return section in self._data(untrusted)
+
+ def configitems(self, section, untrusted=False):
+ items = self._data(untrusted).items(section)
+ if self.debugflag and not untrusted and self._reportuntrusted:
+ for k, v in self._ucfg.items(section):
+ if self._tcfg.get(section, k) != v:
+ self.debug("ignoring untrusted configuration option "
+ "%s.%s = %s\n" % (section, k, v))
+ return items
+
+ def walkconfig(self, untrusted=False):
+ cfg = self._data(untrusted)
+ for section in cfg.sections():
+ for name, value in self.configitems(section, untrusted):
+ yield section, name, value
+
+ def plain(self, feature=None):
+ '''is plain mode active?
+
+ Plain mode means that all configuration variables which affect
+ the behavior and output of Mercurial should be
+ ignored. Additionally, the output should be stable,
+ reproducible and suitable for use in scripts or applications.
+
+ The only way to trigger plain mode is by setting either the
+ `HGPLAIN' or `HGPLAINEXCEPT' environment variables.
+
+ The return value can either be
+ - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT
+ - True otherwise
+ '''
+ if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ:
+ return False
+ exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',')
+ if feature and exceptions:
+ return feature not in exceptions
+ return True
+
+ def username(self):
+ """Return default username to be used in commits.
+
+ Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL
+ and stop searching if one of these is set.
+ If not found and ui.askusername is True, ask the user, else use
+ ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname".
+ """
+ user = os.environ.get("HGUSER")
+ if user is None:
+ user = self.config("ui", "username")
+ if user is not None:
+ user = os.path.expandvars(user)
+ if user is None:
+ user = os.environ.get("EMAIL")
+ if user is None and self.configbool("ui", "askusername"):
+ user = self.prompt(_("enter a commit username:"), default=None)
+ if user is None and not self.interactive():
+ try:
+ user = '%s@%s' % (util.getuser(), socket.getfqdn())
+ self.warn(_("No username found, using '%s' instead\n") % user)
+ except KeyError:
+ pass
+ if not user:
+ raise util.Abort(_('no username supplied (see "hg help config")'))
+ if "\n" in user:
+ raise util.Abort(_("username %s contains a newline\n") % repr(user))
+ return user
+
+ def shortuser(self, user):
+ """Return a short representation of a user name or email address."""
+ if not self.verbose:
+ user = util.shortuser(user)
+ return user
+
+ def expandpath(self, loc, default=None):
+ """Return repository location relative to cwd or from [paths]"""
+ if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')):
+ return loc
+
+ path = self.config('paths', loc)
+ if not path and default is not None:
+ path = self.config('paths', default)
+ return path or loc
+
+ def pushbuffer(self):
+ self._buffers.append([])
+
+ def popbuffer(self, labeled=False):
+ '''pop the last buffer and return the buffered output
+
+ If labeled is True, any labels associated with buffered
+ output will be handled. By default, this has no effect
+ on the output returned, but extensions and GUI tools may
+ handle this argument and returned styled output. If output
+ is being buffered so it can be captured and parsed or
+ processed, labeled should not be set to True.
+ '''
+ return "".join(self._buffers.pop())
+
+ def write(self, *args, **opts):
+ '''write args to output
+
+ By default, this method simply writes to the buffer or stdout,
+ but extensions or GUI tools may override this method,
+ write_err(), popbuffer(), and label() to style output from
+ various parts of hg.
+
+ An optional keyword argument, "label", can be passed in.
+ This should be a string containing label names separated by
+ space. Label names take the form of "topic.type". For example,
+ ui.debug() issues a label of "ui.debug".
+
+ When labeling output for a specific command, a label of
+ "cmdname.type" is recommended. For example, status issues
+ a label of "status.modified" for modified files.
+ '''
+ if self._buffers:
+ self._buffers[-1].extend([str(a) for a in args])
+ else:
+ for a in args:
+ self.fout.write(str(a))
+
+ def write_err(self, *args, **opts):
+ try:
+ if not getattr(self.fout, 'closed', False):
+ self.fout.flush()
+ for a in args:
+ self.ferr.write(str(a))
+ # stderr may be buffered under win32 when redirected to files,
+ # including stdout.
+ if not getattr(self.ferr, 'closed', False):
+ self.ferr.flush()
+ except IOError, inst:
+ if inst.errno not in (errno.EPIPE, errno.EIO):
+ raise
+
+ def flush(self):
+ try: self.fout.flush()
+ except: pass
+ try: self.ferr.flush()
+ except: pass
+
+ def interactive(self):
+ '''is interactive input allowed?
+
+ An interactive session is a session where input can be reasonably read
+ from `sys.stdin'. If this function returns false, any attempt to read
+ from stdin should fail with an error, unless a sensible default has been
+ specified.
+
+ Interactiveness is triggered by the value of the `ui.interactive'
+ configuration variable or - if it is unset - when `sys.stdin' points
+ to a terminal device.
+
+ This function refers to input only; for output, see `ui.formatted()'.
+ '''
+ i = self.configbool("ui", "interactive", None)
+ if i is None:
+ # some environments replace stdin without implementing isatty
+ # usually those are non-interactive
+ return util.isatty(self.fin)
+
+ return i
+
+ def termwidth(self):
+ '''how wide is the terminal in columns?
+ '''
+ if 'COLUMNS' in os.environ:
+ try:
+ return int(os.environ['COLUMNS'])
+ except ValueError:
+ pass
+ return util.termwidth()
+
+ def formatted(self):
+ '''should formatted output be used?
+
+ It is often desirable to format the output to suite the output medium.
+ Examples of this are truncating long lines or colorizing messages.
+ However, this is not often not desirable when piping output into other
+ utilities, e.g. `grep'.
+
+ Formatted output is triggered by the value of the `ui.formatted'
+ configuration variable or - if it is unset - when `sys.stdout' points
+ to a terminal device. Please note that `ui.formatted' should be
+ considered an implementation detail; it is not intended for use outside
+ Mercurial or its extensions.
+
+ This function refers to output only; for input, see `ui.interactive()'.
+ This function always returns false when in plain mode, see `ui.plain()'.
+ '''
+ if self.plain():
+ return False
+
+ i = self.configbool("ui", "formatted", None)
+ if i is None:
+ # some environments replace stdout without implementing isatty
+ # usually those are non-interactive
+ return util.isatty(self.fout)
+
+ return i
+
+ def _readline(self, prompt=''):
+ if util.isatty(self.fin):
+ try:
+ # magically add command line editing support, where
+ # available
+ import readline
+ # force demandimport to really load the module
+ readline.read_history_file
+ # windows sometimes raises something other than ImportError
+ except Exception:
+ pass
+
+ # call write() so output goes through subclassed implementation
+ # e.g. color extension on Windows
+ self.write(prompt)
+
+ # instead of trying to emulate raw_input, swap (self.fin,
+ # self.fout) with (sys.stdin, sys.stdout)
+ oldin = sys.stdin
+ oldout = sys.stdout
+ sys.stdin = self.fin
+ sys.stdout = self.fout
+ line = raw_input(' ')
+ sys.stdin = oldin
+ sys.stdout = oldout
+
+ # When stdin is in binary mode on Windows, it can cause
+ # raw_input() to emit an extra trailing carriage return
+ if os.linesep == '\r\n' and line and line[-1] == '\r':
+ line = line[:-1]
+ return line
+
+ def prompt(self, msg, default="y"):
+ """Prompt user with msg, read response.
+ If ui is not interactive, the default is returned.
+ """
+ if not self.interactive():
+ self.write(msg, ' ', default, "\n")
+ return default
+ try:
+ r = self._readline(self.label(msg, 'ui.prompt'))
+ if not r:
+ return default
+ return r
+ except EOFError:
+ raise util.Abort(_('response expected'))
+
+ def promptchoice(self, msg, choices, default=0):
+ """Prompt user with msg, read response, and ensure it matches
+ one of the provided choices. The index of the choice is returned.
+ choices is a sequence of acceptable responses with the format:
+ ('&None', 'E&xec', 'Sym&link') Responses are case insensitive.
+ If ui is not interactive, the default is returned.
+ """
+ resps = [s[s.index('&')+1].lower() for s in choices]
+ while True:
+ r = self.prompt(msg, resps[default])
+ if r.lower() in resps:
+ return resps.index(r.lower())
+ self.write(_("unrecognized response\n"))
+
+ def getpass(self, prompt=None, default=None):
+ if not self.interactive():
+ return default
+ try:
+ return getpass.getpass(prompt or _('password: '))
+ except EOFError:
+ raise util.Abort(_('response expected'))
+ def status(self, *msg, **opts):
+ '''write status message to output (if ui.quiet is False)
+
+ This adds an output label of "ui.status".
+ '''
+ if not self.quiet:
+ opts['label'] = opts.get('label', '') + ' ui.status'
+ self.write(*msg, **opts)
+ def warn(self, *msg, **opts):
+ '''write warning message to output (stderr)
+
+ This adds an output label of "ui.warning".
+ '''
+ opts['label'] = opts.get('label', '') + ' ui.warning'
+ self.write_err(*msg, **opts)
+ def note(self, *msg, **opts):
+ '''write note to output (if ui.verbose is True)
+
+ This adds an output label of "ui.note".
+ '''
+ if self.verbose:
+ opts['label'] = opts.get('label', '') + ' ui.note'
+ self.write(*msg, **opts)
+ def debug(self, *msg, **opts):
+ '''write debug message to output (if ui.debugflag is True)
+
+ This adds an output label of "ui.debug".
+ '''
+ if self.debugflag:
+ opts['label'] = opts.get('label', '') + ' ui.debug'
+ self.write(*msg, **opts)
+ def edit(self, text, user):
+ (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt",
+ text=True)
+ try:
+ f = os.fdopen(fd, "w")
+ f.write(text)
+ f.close()
+
+ editor = self.geteditor()
+
+ util.system("%s \"%s\"" % (editor, name),
+ environ={'HGUSER': user},
+ onerr=util.Abort, errprefix=_("edit failed"),
+ out=self.fout)
+
+ f = open(name)
+ t = f.read()
+ f.close()
+ finally:
+ os.unlink(name)
+
+ return t
+
+ def traceback(self, exc=None):
+ '''print exception traceback if traceback printing enabled.
+ only to call in exception handler. returns true if traceback
+ printed.'''
+ if self.tracebackflag:
+ if exc:
+ traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr)
+ else:
+ traceback.print_exc(file=self.ferr)
+ return self.tracebackflag
+
+ def geteditor(self):
+ '''return editor to use'''
+ return (os.environ.get("HGEDITOR") or
+ self.config("ui", "editor") or
+ os.environ.get("VISUAL") or
+ os.environ.get("EDITOR", "vi"))
+
+ def progress(self, topic, pos, item="", unit="", total=None):
+ '''show a progress message
+
+ With stock hg, this is simply a debug message that is hidden
+ by default, but with extensions or GUI tools it may be
+ visible. 'topic' is the current operation, 'item' is a
+ non-numeric marker of the current position (ie the currently
+ in-process file), 'pos' is the current numeric position (ie
+ revision, bytes, etc.), unit is a corresponding unit label,
+ and total is the highest expected pos.
+
+ Multiple nested topics may be active at a time.
+
+ All topics should be marked closed by setting pos to None at
+ termination.
+ '''
+
+ if pos is None or not self.debugflag:
+ return
+
+ if unit:
+ unit = ' ' + unit
+ if item:
+ item = ' ' + item
+
+ if total:
+ pct = 100.0 * pos / total
+ self.debug('%s:%s %s/%s%s (%4.2f%%)\n'
+ % (topic, item, pos, total, unit, pct))
+ else:
+ self.debug('%s:%s %s%s\n' % (topic, item, pos, unit))
+
+ def log(self, service, message):
+ '''hook for logging facility extensions
+
+ service should be a readily-identifiable subsystem, which will
+ allow filtering.
+ message should be a newline-terminated string to log.
+ '''
+ pass
+
+ def label(self, msg, label):
+ '''style msg based on supplied label
+
+ Like ui.write(), this just returns msg unchanged, but extensions
+ and GUI tools can override it to allow styling output without
+ writing it.
+
+ ui.write(s, 'label') is equivalent to
+ ui.write(ui.label(s, 'label')).
+ '''
+ return msg
diff --git a/websdk/mercurial/url.py b/websdk/mercurial/url.py
index e2784a9..2430d3a 120000..100644
--- a/websdk/mercurial/url.py
+++ b/websdk/mercurial/url.py
@@ -1 +1,473 @@
-/usr/share/pyshared/mercurial/url.py \ No newline at end of file
+# url.py - HTTP handling for mercurial
+#
+# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
+# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import urllib, urllib2, httplib, os, socket, cStringIO
+from i18n import _
+import keepalive, util, sslutil
+import httpconnection as httpconnectionmod
+
+class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
+ def __init__(self, ui):
+ urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
+ self.ui = ui
+
+ def find_user_password(self, realm, authuri):
+ authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
+ self, realm, authuri)
+ user, passwd = authinfo
+ if user and passwd:
+ self._writedebug(user, passwd)
+ return (user, passwd)
+
+ if not user or not passwd:
+ res = httpconnectionmod.readauthforuri(self.ui, authuri, user)
+ if res:
+ group, auth = res
+ user, passwd = auth.get('username'), auth.get('password')
+ self.ui.debug("using auth.%s.* for authentication\n" % group)
+ if not user or not passwd:
+ if not self.ui.interactive():
+ raise util.Abort(_('http authorization required'))
+
+ self.ui.write(_("http authorization required\n"))
+ self.ui.write(_("realm: %s\n") % realm)
+ if user:
+ self.ui.write(_("user: %s\n") % user)
+ else:
+ user = self.ui.prompt(_("user:"), default=None)
+
+ if not passwd:
+ passwd = self.ui.getpass()
+
+ self.add_password(realm, authuri, user, passwd)
+ self._writedebug(user, passwd)
+ return (user, passwd)
+
+ def _writedebug(self, user, passwd):
+ msg = _('http auth: user %s, password %s\n')
+ self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set'))
+
+ def find_stored_password(self, authuri):
+ return urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
+ self, None, authuri)
+
+class proxyhandler(urllib2.ProxyHandler):
+ def __init__(self, ui):
+ proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
+ # XXX proxyauthinfo = None
+
+ if proxyurl:
+ # proxy can be proper url or host[:port]
+ if not (proxyurl.startswith('http:') or
+ proxyurl.startswith('https:')):
+ proxyurl = 'http://' + proxyurl + '/'
+ proxy = util.url(proxyurl)
+ if not proxy.user:
+ proxy.user = ui.config("http_proxy", "user")
+ proxy.passwd = ui.config("http_proxy", "passwd")
+
+ # see if we should use a proxy for this url
+ no_list = ["localhost", "127.0.0.1"]
+ no_list.extend([p.lower() for
+ p in ui.configlist("http_proxy", "no")])
+ no_list.extend([p.strip().lower() for
+ p in os.getenv("no_proxy", '').split(',')
+ if p.strip()])
+ # "http_proxy.always" config is for running tests on localhost
+ if ui.configbool("http_proxy", "always"):
+ self.no_list = []
+ else:
+ self.no_list = no_list
+
+ proxyurl = str(proxy)
+ proxies = {'http': proxyurl, 'https': proxyurl}
+ ui.debug('proxying through http://%s:%s\n' %
+ (proxy.host, proxy.port))
+ else:
+ proxies = {}
+
+ # urllib2 takes proxy values from the environment and those
+ # will take precedence if found. So, if there's a config entry
+ # defining a proxy, drop the environment ones
+ if ui.config("http_proxy", "host"):
+ for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
+ try:
+ if env in os.environ:
+ del os.environ[env]
+ except OSError:
+ pass
+
+ urllib2.ProxyHandler.__init__(self, proxies)
+ self.ui = ui
+
+ def proxy_open(self, req, proxy, type_):
+ host = req.get_host().split(':')[0]
+ if host in self.no_list:
+ return None
+
+ # work around a bug in Python < 2.4.2
+ # (it leaves a "\n" at the end of Proxy-authorization headers)
+ baseclass = req.__class__
+ class _request(baseclass):
+ def add_header(self, key, val):
+ if key.lower() == 'proxy-authorization':
+ val = val.strip()
+ return baseclass.add_header(self, key, val)
+ req.__class__ = _request
+
+ return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_)
+
+def _gen_sendfile(orgsend):
+ def _sendfile(self, data):
+ # send a file
+ if isinstance(data, httpconnectionmod.httpsendfile):
+ # if auth required, some data sent twice, so rewind here
+ data.seek(0)
+ for chunk in util.filechunkiter(data):
+ orgsend(self, chunk)
+ else:
+ orgsend(self, data)
+ return _sendfile
+
+has_https = util.safehasattr(urllib2, 'HTTPSHandler')
+if has_https:
+ try:
+ _create_connection = socket.create_connection
+ except AttributeError:
+ _GLOBAL_DEFAULT_TIMEOUT = object()
+
+ def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ # lifted from Python 2.6
+
+ msg = "getaddrinfo returns an empty list"
+ host, port = address
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+ if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error, msg:
+ if sock is not None:
+ sock.close()
+
+ raise socket.error, msg
+
+class httpconnection(keepalive.HTTPConnection):
+ # must be able to send big bundle as stream.
+ send = _gen_sendfile(keepalive.HTTPConnection.send)
+
+ def connect(self):
+ if has_https and self.realhostport: # use CONNECT proxy
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((self.host, self.port))
+ if _generic_proxytunnel(self):
+ # we do not support client x509 certificates
+ self.sock = sslutil.ssl_wrap_socket(self.sock, None, None)
+ else:
+ keepalive.HTTPConnection.connect(self)
+
+ def getresponse(self):
+ proxyres = getattr(self, 'proxyres', None)
+ if proxyres:
+ if proxyres.will_close:
+ self.close()
+ self.proxyres = None
+ return proxyres
+ return keepalive.HTTPConnection.getresponse(self)
+
+# general transaction handler to support different ways to handle
+# HTTPS proxying before and after Python 2.6.3.
+def _generic_start_transaction(handler, h, req):
+ tunnel_host = getattr(req, '_tunnel_host', None)
+ if tunnel_host:
+ if tunnel_host[:7] not in ['http://', 'https:/']:
+ tunnel_host = 'https://' + tunnel_host
+ new_tunnel = True
+ else:
+ tunnel_host = req.get_selector()
+ new_tunnel = False
+
+ if new_tunnel or tunnel_host == req.get_full_url(): # has proxy
+ u = util.url(tunnel_host)
+ if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS
+ h.realhostport = ':'.join([u.host, (u.port or '443')])
+ h.headers = req.headers.copy()
+ h.headers.update(handler.parent.addheaders)
+ return
+
+ h.realhostport = None
+ h.headers = None
+
+def _generic_proxytunnel(self):
+ proxyheaders = dict(
+ [(x, self.headers[x]) for x in self.headers
+ if x.lower().startswith('proxy-')])
+ self._set_hostport(self.host, self.port)
+ self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport)
+ for header in proxyheaders.iteritems():
+ self.send('%s: %s\r\n' % header)
+ self.send('\r\n')
+
+ # majority of the following code is duplicated from
+ # httplib.HTTPConnection as there are no adequate places to
+ # override functions to provide the needed functionality
+ res = self.response_class(self.sock,
+ strict=self.strict,
+ method=self._method)
+
+ while True:
+ version, status, reason = res._read_status()
+ if status != httplib.CONTINUE:
+ break
+ while True:
+ skip = res.fp.readline().strip()
+ if not skip:
+ break
+ res.status = status
+ res.reason = reason.strip()
+
+ if res.status == 200:
+ while True:
+ line = res.fp.readline()
+ if line == '\r\n':
+ break
+ return True
+
+ if version == 'HTTP/1.0':
+ res.version = 10
+ elif version.startswith('HTTP/1.'):
+ res.version = 11
+ elif version == 'HTTP/0.9':
+ res.version = 9
+ else:
+ raise httplib.UnknownProtocol(version)
+
+ if res.version == 9:
+ res.length = None
+ res.chunked = 0
+ res.will_close = 1
+ res.msg = httplib.HTTPMessage(cStringIO.StringIO())
+ return False
+
+ res.msg = httplib.HTTPMessage(res.fp)
+ res.msg.fp = None
+
+ # are we using the chunked-style of transfer encoding?
+ trenc = res.msg.getheader('transfer-encoding')
+ if trenc and trenc.lower() == "chunked":
+ res.chunked = 1
+ res.chunk_left = None
+ else:
+ res.chunked = 0
+
+ # will the connection close at the end of the response?
+ res.will_close = res._check_close()
+
+ # do we have a Content-Length?
+ # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+ length = res.msg.getheader('content-length')
+ if length and not res.chunked:
+ try:
+ res.length = int(length)
+ except ValueError:
+ res.length = None
+ else:
+ if res.length < 0: # ignore nonsensical negative lengths
+ res.length = None
+ else:
+ res.length = None
+
+ # does the body have a fixed length? (of zero)
+ if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or
+ 100 <= status < 200 or # 1xx codes
+ res._method == 'HEAD'):
+ res.length = 0
+
+ # if the connection remains open, and we aren't using chunked, and
+ # a content-length was not provided, then assume that the connection
+ # WILL close.
+ if (not res.will_close and
+ not res.chunked and
+ res.length is None):
+ res.will_close = 1
+
+ self.proxyres = res
+
+ return False
+
+class httphandler(keepalive.HTTPHandler):
+ def http_open(self, req):
+ return self.do_open(httpconnection, req)
+
+ def _start_transaction(self, h, req):
+ _generic_start_transaction(self, h, req)
+ return keepalive.HTTPHandler._start_transaction(self, h, req)
+
+if has_https:
+ class httpsconnection(httplib.HTTPSConnection):
+ response_class = keepalive.HTTPResponse
+ # must be able to send big bundle as stream.
+ send = _gen_sendfile(keepalive.safesend)
+ getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection)
+
+ def connect(self):
+ self.sock = _create_connection((self.host, self.port))
+
+ host = self.host
+ if self.realhostport: # use CONNECT proxy
+ _generic_proxytunnel(self)
+ host = self.realhostport.rsplit(':', 1)[0]
+ self.sock = sslutil.ssl_wrap_socket(
+ self.sock, self.key_file, self.cert_file,
+ **sslutil.sslkwargs(self.ui, host))
+ sslutil.validator(self.ui, host)(self.sock)
+
+ class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler):
+ def __init__(self, ui):
+ keepalive.KeepAliveHandler.__init__(self)
+ urllib2.HTTPSHandler.__init__(self)
+ self.ui = ui
+ self.pwmgr = passwordmgr(self.ui)
+
+ def _start_transaction(self, h, req):
+ _generic_start_transaction(self, h, req)
+ return keepalive.KeepAliveHandler._start_transaction(self, h, req)
+
+ def https_open(self, req):
+ # req.get_full_url() does not contain credentials and we may
+ # need them to match the certificates.
+ url = req.get_full_url()
+ user, password = self.pwmgr.find_stored_password(url)
+ res = httpconnectionmod.readauthforuri(self.ui, url, user)
+ if res:
+ group, auth = res
+ self.auth = auth
+ self.ui.debug("using auth.%s.* for authentication\n" % group)
+ else:
+ self.auth = None
+ return self.do_open(self._makeconnection, req)
+
+ def _makeconnection(self, host, port=None, *args, **kwargs):
+ keyfile = None
+ certfile = None
+
+ if len(args) >= 1: # key_file
+ keyfile = args[0]
+ if len(args) >= 2: # cert_file
+ certfile = args[1]
+ args = args[2:]
+
+ # if the user has specified different key/cert files in
+ # hgrc, we prefer these
+ if self.auth and 'key' in self.auth and 'cert' in self.auth:
+ keyfile = self.auth['key']
+ certfile = self.auth['cert']
+
+ conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs)
+ conn.ui = self.ui
+ return conn
+
+class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler):
+ def __init__(self, *args, **kwargs):
+ urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs)
+ self.retried_req = None
+
+ def reset_retry_count(self):
+ # Python 2.6.5 will call this on 401 or 407 errors and thus loop
+ # forever. We disable reset_retry_count completely and reset in
+ # http_error_auth_reqed instead.
+ pass
+
+ def http_error_auth_reqed(self, auth_header, host, req, headers):
+ # Reset the retry counter once for each request.
+ if req is not self.retried_req:
+ self.retried_req = req
+ self.retried = 0
+ # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if
+ # it doesn't know about the auth type requested. This can happen if
+ # somebody is using BasicAuth and types a bad password.
+ try:
+ return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed(
+ self, auth_header, host, req, headers)
+ except ValueError, inst:
+ arg = inst.args[0]
+ if arg.startswith("AbstractDigestAuthHandler doesn't know "):
+ return
+ raise
+
+class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler):
+ def __init__(self, *args, **kwargs):
+ urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs)
+ self.retried_req = None
+
+ def reset_retry_count(self):
+ # Python 2.6.5 will call this on 401 or 407 errors and thus loop
+ # forever. We disable reset_retry_count completely and reset in
+ # http_error_auth_reqed instead.
+ pass
+
+ def http_error_auth_reqed(self, auth_header, host, req, headers):
+ # Reset the retry counter once for each request.
+ if req is not self.retried_req:
+ self.retried_req = req
+ self.retried = 0
+ return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed(
+ self, auth_header, host, req, headers)
+
+handlerfuncs = []
+
+def opener(ui, authinfo=None):
+ '''
+ construct an opener suitable for urllib2
+ authinfo will be added to the password manager
+ '''
+ if ui.configbool('ui', 'usehttp2', False):
+ handlers = [httpconnectionmod.http2handler(ui, passwordmgr(ui))]
+ else:
+ handlers = [httphandler()]
+ if has_https:
+ handlers.append(httpshandler(ui))
+
+ handlers.append(proxyhandler(ui))
+
+ passmgr = passwordmgr(ui)
+ if authinfo is not None:
+ passmgr.add_password(*authinfo)
+ user, passwd = authinfo[2:4]
+ ui.debug('http auth: user %s, password %s\n' %
+ (user, passwd and '*' * len(passwd) or 'not set'))
+
+ handlers.extend((httpbasicauthhandler(passmgr),
+ httpdigestauthhandler(passmgr)))
+ handlers.extend([h(ui, passmgr) for h in handlerfuncs])
+ opener = urllib2.build_opener(*handlers)
+
+ # 1.0 here is the _protocol_ version
+ opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
+ opener.addheaders.append(('Accept', 'application/mercurial-0.1'))
+ return opener
+
+def open(ui, url_, data=None):
+ u = util.url(url_)
+ if u.scheme:
+ u.scheme = u.scheme.lower()
+ url_, authinfo = u.authinfo()
+ else:
+ path = util.normpath(os.path.abspath(url_))
+ url_ = 'file://' + urllib.pathname2url(path)
+ authinfo = None
+ return opener(ui, authinfo).open(url_, data)
diff --git a/websdk/mercurial/util.py b/websdk/mercurial/util.py
index e86996a..7366614 120000..100644
--- a/websdk/mercurial/util.py
+++ b/websdk/mercurial/util.py
@@ -1 +1,1741 @@
-/usr/share/pyshared/mercurial/util.py \ No newline at end of file
+# util.py - Mercurial utility functions and platform specfic implementations
+#
+# Copyright 2005 K. Thananchayan <thananck@yahoo.com>
+# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
+# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Mercurial utility functions and platform specfic implementations.
+
+This contains helper routines that are independent of the SCM core and
+hide platform-specific details from the core.
+"""
+
+from i18n import _
+import error, osutil, encoding
+import errno, re, shutil, sys, tempfile, traceback
+import os, time, datetime, calendar, textwrap, signal
+import imp, socket, urllib
+
+if os.name == 'nt':
+ import windows as platform
+else:
+ import posix as platform
+
+cachestat = platform.cachestat
+checkexec = platform.checkexec
+checklink = platform.checklink
+copymode = platform.copymode
+executablepath = platform.executablepath
+expandglobs = platform.expandglobs
+explainexit = platform.explainexit
+findexe = platform.findexe
+gethgcmd = platform.gethgcmd
+getuser = platform.getuser
+groupmembers = platform.groupmembers
+groupname = platform.groupname
+hidewindow = platform.hidewindow
+isexec = platform.isexec
+isowner = platform.isowner
+localpath = platform.localpath
+lookupreg = platform.lookupreg
+makedir = platform.makedir
+nlinks = platform.nlinks
+normpath = platform.normpath
+normcase = platform.normcase
+nulldev = platform.nulldev
+openhardlinks = platform.openhardlinks
+oslink = platform.oslink
+parsepatchoutput = platform.parsepatchoutput
+pconvert = platform.pconvert
+popen = platform.popen
+posixfile = platform.posixfile
+quotecommand = platform.quotecommand
+realpath = platform.realpath
+rename = platform.rename
+samedevice = platform.samedevice
+samefile = platform.samefile
+samestat = platform.samestat
+setbinary = platform.setbinary
+setflags = platform.setflags
+setsignalhandler = platform.setsignalhandler
+shellquote = platform.shellquote
+spawndetached = platform.spawndetached
+sshargs = platform.sshargs
+statfiles = platform.statfiles
+termwidth = platform.termwidth
+testpid = platform.testpid
+umask = platform.umask
+unlink = platform.unlink
+unlinkpath = platform.unlinkpath
+username = platform.username
+
+# Python compatibility
+
+def sha1(s=''):
+ '''
+ Low-overhead wrapper around Python's SHA support
+
+ >>> f = _fastsha1
+ >>> a = sha1()
+ >>> a = f()
+ >>> a.hexdigest()
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709'
+ '''
+
+ return _fastsha1(s)
+
+_notset = object()
+def safehasattr(thing, attr):
+ return getattr(thing, attr, _notset) is not _notset
+
+def _fastsha1(s=''):
+ # This function will import sha1 from hashlib or sha (whichever is
+ # available) and overwrite itself with it on the first call.
+ # Subsequent calls will go directly to the imported function.
+ if sys.version_info >= (2, 5):
+ from hashlib import sha1 as _sha1
+ else:
+ from sha import sha as _sha1
+ global _fastsha1, sha1
+ _fastsha1 = sha1 = _sha1
+ return _sha1(s)
+
+import __builtin__
+
+if sys.version_info[0] < 3:
+ def fakebuffer(sliceable, offset=0):
+ return sliceable[offset:]
+else:
+ def fakebuffer(sliceable, offset=0):
+ return memoryview(sliceable)[offset:]
+try:
+ buffer
+except NameError:
+ __builtin__.buffer = fakebuffer
+
+import subprocess
+closefds = os.name == 'posix'
+
+def popen2(cmd, env=None, newlines=False):
+ # Setting bufsize to -1 lets the system decide the buffer size.
+ # The default for bufsize is 0, meaning unbuffered. This leads to
+ # poor performance on Mac OS X: http://bugs.python.org/issue4194
+ p = subprocess.Popen(cmd, shell=True, bufsize=-1,
+ close_fds=closefds,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ universal_newlines=newlines,
+ env=env)
+ return p.stdin, p.stdout
+
+def popen3(cmd, env=None, newlines=False):
+ p = subprocess.Popen(cmd, shell=True, bufsize=-1,
+ close_fds=closefds,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=newlines,
+ env=env)
+ return p.stdin, p.stdout, p.stderr
+
+def version():
+ """Return version information if available."""
+ try:
+ import __version__
+ return __version__.version
+ except ImportError:
+ return 'unknown'
+
+# used by parsedate
+defaultdateformats = (
+ '%Y-%m-%d %H:%M:%S',
+ '%Y-%m-%d %I:%M:%S%p',
+ '%Y-%m-%d %H:%M',
+ '%Y-%m-%d %I:%M%p',
+ '%Y-%m-%d',
+ '%m-%d',
+ '%m/%d',
+ '%m/%d/%y',
+ '%m/%d/%Y',
+ '%a %b %d %H:%M:%S %Y',
+ '%a %b %d %I:%M:%S%p %Y',
+ '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
+ '%b %d %H:%M:%S %Y',
+ '%b %d %I:%M:%S%p %Y',
+ '%b %d %H:%M:%S',
+ '%b %d %I:%M:%S%p',
+ '%b %d %H:%M',
+ '%b %d %I:%M%p',
+ '%b %d %Y',
+ '%b %d',
+ '%H:%M:%S',
+ '%I:%M:%S%p',
+ '%H:%M',
+ '%I:%M%p',
+)
+
+extendeddateformats = defaultdateformats + (
+ "%Y",
+ "%Y-%m",
+ "%b",
+ "%b %Y",
+ )
+
+def cachefunc(func):
+ '''cache the result of function calls'''
+ # XXX doesn't handle keywords args
+ cache = {}
+ if func.func_code.co_argcount == 1:
+ # we gain a small amount of time because
+ # we don't need to pack/unpack the list
+ def f(arg):
+ if arg not in cache:
+ cache[arg] = func(arg)
+ return cache[arg]
+ else:
+ def f(*args):
+ if args not in cache:
+ cache[args] = func(*args)
+ return cache[args]
+
+ return f
+
+def lrucachefunc(func):
+ '''cache most recent results of function calls'''
+ cache = {}
+ order = []
+ if func.func_code.co_argcount == 1:
+ def f(arg):
+ if arg not in cache:
+ if len(cache) > 20:
+ del cache[order.pop(0)]
+ cache[arg] = func(arg)
+ else:
+ order.remove(arg)
+ order.append(arg)
+ return cache[arg]
+ else:
+ def f(*args):
+ if args not in cache:
+ if len(cache) > 20:
+ del cache[order.pop(0)]
+ cache[args] = func(*args)
+ else:
+ order.remove(args)
+ order.append(args)
+ return cache[args]
+
+ return f
+
+class propertycache(object):
+ def __init__(self, func):
+ self.func = func
+ self.name = func.__name__
+ def __get__(self, obj, type=None):
+ result = self.func(obj)
+ setattr(obj, self.name, result)
+ return result
+
+def pipefilter(s, cmd):
+ '''filter string S through command CMD, returning its output'''
+ p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ pout, perr = p.communicate(s)
+ return pout
+
+def tempfilter(s, cmd):
+ '''filter string S through a pair of temporary files with CMD.
+ CMD is used as a template to create the real command to be run,
+ with the strings INFILE and OUTFILE replaced by the real names of
+ the temporary files generated.'''
+ inname, outname = None, None
+ try:
+ infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
+ fp = os.fdopen(infd, 'wb')
+ fp.write(s)
+ fp.close()
+ outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
+ os.close(outfd)
+ cmd = cmd.replace('INFILE', inname)
+ cmd = cmd.replace('OUTFILE', outname)
+ code = os.system(cmd)
+ if sys.platform == 'OpenVMS' and code & 1:
+ code = 0
+ if code:
+ raise Abort(_("command '%s' failed: %s") %
+ (cmd, explainexit(code)))
+ fp = open(outname, 'rb')
+ r = fp.read()
+ fp.close()
+ return r
+ finally:
+ try:
+ if inname:
+ os.unlink(inname)
+ except OSError:
+ pass
+ try:
+ if outname:
+ os.unlink(outname)
+ except OSError:
+ pass
+
+filtertable = {
+ 'tempfile:': tempfilter,
+ 'pipe:': pipefilter,
+ }
+
+def filter(s, cmd):
+ "filter a string through a command that transforms its input to its output"
+ for name, fn in filtertable.iteritems():
+ if cmd.startswith(name):
+ return fn(s, cmd[len(name):].lstrip())
+ return pipefilter(s, cmd)
+
+def binary(s):
+ """return true if a string is binary data"""
+ return bool(s and '\0' in s)
+
+def increasingchunks(source, min=1024, max=65536):
+ '''return no less than min bytes per chunk while data remains,
+ doubling min after each chunk until it reaches max'''
+ def log2(x):
+ if not x:
+ return 0
+ i = 0
+ while x:
+ x >>= 1
+ i += 1
+ return i - 1
+
+ buf = []
+ blen = 0
+ for chunk in source:
+ buf.append(chunk)
+ blen += len(chunk)
+ if blen >= min:
+ if min < max:
+ min = min << 1
+ nmin = 1 << log2(blen)
+ if nmin > min:
+ min = nmin
+ if min > max:
+ min = max
+ yield ''.join(buf)
+ blen = 0
+ buf = []
+ if buf:
+ yield ''.join(buf)
+
+Abort = error.Abort
+
+def always(fn):
+ return True
+
+def never(fn):
+ return False
+
+def pathto(root, n1, n2):
+ '''return the relative path from one place to another.
+ root should use os.sep to separate directories
+ n1 should use os.sep to separate directories
+ n2 should use "/" to separate directories
+ returns an os.sep-separated path.
+
+ If n1 is a relative path, it's assumed it's
+ relative to root.
+ n2 should always be relative to root.
+ '''
+ if not n1:
+ return localpath(n2)
+ if os.path.isabs(n1):
+ if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
+ return os.path.join(root, localpath(n2))
+ n2 = '/'.join((pconvert(root), n2))
+ a, b = splitpath(n1), n2.split('/')
+ a.reverse()
+ b.reverse()
+ while a and b and a[-1] == b[-1]:
+ a.pop()
+ b.pop()
+ b.reverse()
+ return os.sep.join((['..'] * len(a)) + b) or '.'
+
+_hgexecutable = None
+
+def mainfrozen():
+ """return True if we are a frozen executable.
+
+ The code supports py2exe (most common, Windows only) and tools/freeze
+ (portable, not much used).
+ """
+ return (safehasattr(sys, "frozen") or # new py2exe
+ safehasattr(sys, "importers") or # old py2exe
+ imp.is_frozen("__main__")) # tools/freeze
+
+def hgexecutable():
+ """return location of the 'hg' executable.
+
+ Defaults to $HG or 'hg' in the search path.
+ """
+ if _hgexecutable is None:
+ hg = os.environ.get('HG')
+ mainmod = sys.modules['__main__']
+ if hg:
+ _sethgexecutable(hg)
+ elif mainfrozen():
+ _sethgexecutable(sys.executable)
+ elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
+ _sethgexecutable(mainmod.__file__)
+ else:
+ exe = findexe('hg') or os.path.basename(sys.argv[0])
+ _sethgexecutable(exe)
+ return _hgexecutable
+
+def _sethgexecutable(path):
+ """set location of the 'hg' executable"""
+ global _hgexecutable
+ _hgexecutable = path
+
+def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
+ '''enhanced shell command execution.
+ run with environment maybe modified, maybe in different dir.
+
+ if command fails and onerr is None, return status. if ui object,
+ print error message and return status, else raise onerr object as
+ exception.
+
+ if out is specified, it is assumed to be a file-like object that has a
+ write() method. stdout and stderr will be redirected to out.'''
+ try:
+ sys.stdout.flush()
+ except Exception:
+ pass
+ def py2shell(val):
+ 'convert python object into string that is useful to shell'
+ if val is None or val is False:
+ return '0'
+ if val is True:
+ return '1'
+ return str(val)
+ origcmd = cmd
+ cmd = quotecommand(cmd)
+ env = dict(os.environ)
+ env.update((k, py2shell(v)) for k, v in environ.iteritems())
+ env['HG'] = hgexecutable()
+ if out is None or out == sys.__stdout__:
+ rc = subprocess.call(cmd, shell=True, close_fds=closefds,
+ env=env, cwd=cwd)
+ else:
+ proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
+ env=env, cwd=cwd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ for line in proc.stdout:
+ out.write(line)
+ proc.wait()
+ rc = proc.returncode
+ if sys.platform == 'OpenVMS' and rc & 1:
+ rc = 0
+ if rc and onerr:
+ errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
+ explainexit(rc)[0])
+ if errprefix:
+ errmsg = '%s: %s' % (errprefix, errmsg)
+ try:
+ onerr.warn(errmsg + '\n')
+ except AttributeError:
+ raise onerr(errmsg)
+ return rc
+
+def checksignature(func):
+ '''wrap a function with code to check for calling errors'''
+ def check(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except TypeError:
+ if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
+ raise error.SignatureError
+ raise
+
+ return check
+
+def copyfile(src, dest):
+ "copy a file, preserving mode and atime/mtime"
+ if os.path.islink(src):
+ try:
+ os.unlink(dest)
+ except OSError:
+ pass
+ os.symlink(os.readlink(src), dest)
+ else:
+ try:
+ shutil.copyfile(src, dest)
+ shutil.copymode(src, dest)
+ except shutil.Error, inst:
+ raise Abort(str(inst))
+
+def copyfiles(src, dst, hardlink=None):
+ """Copy a directory tree using hardlinks if possible"""
+
+ if hardlink is None:
+ hardlink = (os.stat(src).st_dev ==
+ os.stat(os.path.dirname(dst)).st_dev)
+
+ num = 0
+ if os.path.isdir(src):
+ os.mkdir(dst)
+ for name, kind in osutil.listdir(src):
+ srcname = os.path.join(src, name)
+ dstname = os.path.join(dst, name)
+ hardlink, n = copyfiles(srcname, dstname, hardlink)
+ num += n
+ else:
+ if hardlink:
+ try:
+ oslink(src, dst)
+ except (IOError, OSError):
+ hardlink = False
+ shutil.copy(src, dst)
+ else:
+ shutil.copy(src, dst)
+ num += 1
+
+ return hardlink, num
+
+_winreservednames = '''con prn aux nul
+ com1 com2 com3 com4 com5 com6 com7 com8 com9
+ lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
+_winreservedchars = ':*?"<>|'
+def checkwinfilename(path):
+ '''Check that the base-relative path is a valid filename on Windows.
+ Returns None if the path is ok, or a UI string describing the problem.
+
+ >>> checkwinfilename("just/a/normal/path")
+ >>> checkwinfilename("foo/bar/con.xml")
+ "filename contains 'con', which is reserved on Windows"
+ >>> checkwinfilename("foo/con.xml/bar")
+ "filename contains 'con', which is reserved on Windows"
+ >>> checkwinfilename("foo/bar/xml.con")
+ >>> checkwinfilename("foo/bar/AUX/bla.txt")
+ "filename contains 'AUX', which is reserved on Windows"
+ >>> checkwinfilename("foo/bar/bla:.txt")
+ "filename contains ':', which is reserved on Windows"
+ >>> checkwinfilename("foo/bar/b\07la.txt")
+ "filename contains '\\\\x07', which is invalid on Windows"
+ >>> checkwinfilename("foo/bar/bla ")
+ "filename ends with ' ', which is not allowed on Windows"
+ >>> checkwinfilename("../bar")
+ '''
+ for n in path.replace('\\', '/').split('/'):
+ if not n:
+ continue
+ for c in n:
+ if c in _winreservedchars:
+ return _("filename contains '%s', which is reserved "
+ "on Windows") % c
+ if ord(c) <= 31:
+ return _("filename contains %r, which is invalid "
+ "on Windows") % c
+ base = n.split('.')[0]
+ if base and base.lower() in _winreservednames:
+ return _("filename contains '%s', which is reserved "
+ "on Windows") % base
+ t = n[-1]
+ if t in '. ' and n not in '..':
+ return _("filename ends with '%s', which is not allowed "
+ "on Windows") % t
+
+if os.name == 'nt':
+ checkosfilename = checkwinfilename
+else:
+ checkosfilename = platform.checkosfilename
+
+def makelock(info, pathname):
+ try:
+ return os.symlink(info, pathname)
+ except OSError, why:
+ if why.errno == errno.EEXIST:
+ raise
+ except AttributeError: # no symlink in os
+ pass
+
+ ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
+ os.write(ld, info)
+ os.close(ld)
+
+def readlock(pathname):
+ try:
+ return os.readlink(pathname)
+ except OSError, why:
+ if why.errno not in (errno.EINVAL, errno.ENOSYS):
+ raise
+ except AttributeError: # no symlink in os
+ pass
+ fp = posixfile(pathname)
+ r = fp.read()
+ fp.close()
+ return r
+
+def fstat(fp):
+ '''stat file object that may not have fileno method.'''
+ try:
+ return os.fstat(fp.fileno())
+ except AttributeError:
+ return os.stat(fp.name)
+
+# File system features
+
+def checkcase(path):
+ """
+ Check whether the given path is on a case-sensitive filesystem
+
+ Requires a path (like /foo/.hg) ending with a foldable final
+ directory component.
+ """
+ s1 = os.stat(path)
+ d, b = os.path.split(path)
+ p2 = os.path.join(d, b.upper())
+ if path == p2:
+ p2 = os.path.join(d, b.lower())
+ try:
+ s2 = os.stat(p2)
+ if s2 == s1:
+ return False
+ return True
+ except OSError:
+ return True
+
+_fspathcache = {}
+def fspath(name, root):
+ '''Get name in the case stored in the filesystem
+
+ The name is either relative to root, or it is an absolute path starting
+ with root. Note that this function is unnecessary, and should not be
+ called, for case-sensitive filesystems (simply because it's expensive).
+ '''
+ # If name is absolute, make it relative
+ if name.lower().startswith(root.lower()):
+ l = len(root)
+ if name[l] == os.sep or name[l] == os.altsep:
+ l = l + 1
+ name = name[l:]
+
+ if not os.path.lexists(os.path.join(root, name)):
+ return None
+
+ seps = os.sep
+ if os.altsep:
+ seps = seps + os.altsep
+ # Protect backslashes. This gets silly very quickly.
+ seps.replace('\\','\\\\')
+ pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
+ dir = os.path.normcase(os.path.normpath(root))
+ result = []
+ for part, sep in pattern.findall(name):
+ if sep:
+ result.append(sep)
+ continue
+
+ if dir not in _fspathcache:
+ _fspathcache[dir] = os.listdir(dir)
+ contents = _fspathcache[dir]
+
+ lpart = part.lower()
+ lenp = len(part)
+ for n in contents:
+ if lenp == len(n) and n.lower() == lpart:
+ result.append(n)
+ break
+ else:
+ # Cannot happen, as the file exists!
+ result.append(part)
+ dir = os.path.join(dir, lpart)
+
+ return ''.join(result)
+
+def checknlink(testfile):
+ '''check whether hardlink count reporting works properly'''
+
+ # testfile may be open, so we need a separate file for checking to
+ # work around issue2543 (or testfile may get lost on Samba shares)
+ f1 = testfile + ".hgtmp1"
+ if os.path.lexists(f1):
+ return False
+ try:
+ posixfile(f1, 'w').close()
+ except IOError:
+ return False
+
+ f2 = testfile + ".hgtmp2"
+ fd = None
+ try:
+ try:
+ oslink(f1, f2)
+ except OSError:
+ return False
+
+ # nlinks() may behave differently for files on Windows shares if
+ # the file is open.
+ fd = posixfile(f2)
+ return nlinks(f2) > 1
+ finally:
+ if fd is not None:
+ fd.close()
+ for f in (f1, f2):
+ try:
+ os.unlink(f)
+ except OSError:
+ pass
+
+ return False
+
+def endswithsep(path):
+ '''Check path ends with os.sep or os.altsep.'''
+ return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
+
+def splitpath(path):
+ '''Split path by os.sep.
+ Note that this function does not use os.altsep because this is
+ an alternative of simple "xxx.split(os.sep)".
+ It is recommended to use os.path.normpath() before using this
+ function if need.'''
+ return path.split(os.sep)
+
+def gui():
+ '''Are we running in a GUI?'''
+ if sys.platform == 'darwin':
+ if 'SSH_CONNECTION' in os.environ:
+ # handle SSH access to a box where the user is logged in
+ return False
+ elif getattr(osutil, 'isgui', None):
+ # check if a CoreGraphics session is available
+ return osutil.isgui()
+ else:
+ # pure build; use a safe default
+ return True
+ else:
+ return os.name == "nt" or os.environ.get("DISPLAY")
+
+def mktempcopy(name, emptyok=False, createmode=None):
+ """Create a temporary file with the same contents from name
+
+ The permission bits are copied from the original file.
+
+ If the temporary file is going to be truncated immediately, you
+ can use emptyok=True as an optimization.
+
+ Returns the name of the temporary file.
+ """
+ d, fn = os.path.split(name)
+ fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
+ os.close(fd)
+ # Temporary files are created with mode 0600, which is usually not
+ # what we want. If the original file already exists, just copy
+ # its mode. Otherwise, manually obey umask.
+ copymode(name, temp, createmode)
+ if emptyok:
+ return temp
+ try:
+ try:
+ ifp = posixfile(name, "rb")
+ except IOError, inst:
+ if inst.errno == errno.ENOENT:
+ return temp
+ if not getattr(inst, 'filename', None):
+ inst.filename = name
+ raise
+ ofp = posixfile(temp, "wb")
+ for chunk in filechunkiter(ifp):
+ ofp.write(chunk)
+ ifp.close()
+ ofp.close()
+ except:
+ try: os.unlink(temp)
+ except: pass
+ raise
+ return temp
+
+class atomictempfile(object):
+ '''writeable file object that atomically updates a file
+
+ All writes will go to a temporary copy of the original file. Call
+ close() when you are done writing, and atomictempfile will rename
+ the temporary copy to the original name, making the changes
+ visible. If the object is destroyed without being closed, all your
+ writes are discarded.
+ '''
+ def __init__(self, name, mode='w+b', createmode=None):
+ self.__name = name # permanent name
+ self._tempname = mktempcopy(name, emptyok=('w' in mode),
+ createmode=createmode)
+ self._fp = posixfile(self._tempname, mode)
+
+ # delegated methods
+ self.write = self._fp.write
+ self.fileno = self._fp.fileno
+
+ def close(self):
+ if not self._fp.closed:
+ self._fp.close()
+ rename(self._tempname, localpath(self.__name))
+
+ def discard(self):
+ if not self._fp.closed:
+ try:
+ os.unlink(self._tempname)
+ except OSError:
+ pass
+ self._fp.close()
+
+ def __del__(self):
+ if safehasattr(self, '_fp'): # constructor actually did something
+ self.discard()
+
+def makedirs(name, mode=None):
+ """recursive directory creation with parent mode inheritance"""
+ try:
+ os.mkdir(name)
+ except OSError, err:
+ if err.errno == errno.EEXIST:
+ return
+ if err.errno != errno.ENOENT or not name:
+ raise
+ parent = os.path.dirname(os.path.abspath(name))
+ if parent == name:
+ raise
+ makedirs(parent, mode)
+ os.mkdir(name)
+ if mode is not None:
+ os.chmod(name, mode)
+
+def readfile(path):
+ fp = open(path, 'rb')
+ try:
+ return fp.read()
+ finally:
+ fp.close()
+
+def writefile(path, text):
+ fp = open(path, 'wb')
+ try:
+ fp.write(text)
+ finally:
+ fp.close()
+
+def appendfile(path, text):
+ fp = open(path, 'ab')
+ try:
+ fp.write(text)
+ finally:
+ fp.close()
+
+class chunkbuffer(object):
+ """Allow arbitrary sized chunks of data to be efficiently read from an
+ iterator over chunks of arbitrary size."""
+
+ def __init__(self, in_iter):
+ """in_iter is the iterator that's iterating over the input chunks.
+ targetsize is how big a buffer to try to maintain."""
+ def splitbig(chunks):
+ for chunk in chunks:
+ if len(chunk) > 2**20:
+ pos = 0
+ while pos < len(chunk):
+ end = pos + 2 ** 18
+ yield chunk[pos:end]
+ pos = end
+ else:
+ yield chunk
+ self.iter = splitbig(in_iter)
+ self._queue = []
+
+ def read(self, l):
+ """Read L bytes of data from the iterator of chunks of data.
+ Returns less than L bytes if the iterator runs dry."""
+ left = l
+ buf = ''
+ queue = self._queue
+ while left > 0:
+ # refill the queue
+ if not queue:
+ target = 2**18
+ for chunk in self.iter:
+ queue.append(chunk)
+ target -= len(chunk)
+ if target <= 0:
+ break
+ if not queue:
+ break
+
+ chunk = queue.pop(0)
+ left -= len(chunk)
+ if left < 0:
+ queue.insert(0, chunk[left:])
+ buf += chunk[:left]
+ else:
+ buf += chunk
+
+ return buf
+
+def filechunkiter(f, size=65536, limit=None):
+ """Create a generator that produces the data in the file size
+ (default 65536) bytes at a time, up to optional limit (default is
+ to read all data). Chunks may be less than size bytes if the
+ chunk is the last chunk in the file, or the file is a socket or
+ some other type of file that sometimes reads less data than is
+ requested."""
+ assert size >= 0
+ assert limit is None or limit >= 0
+ while True:
+ if limit is None:
+ nbytes = size
+ else:
+ nbytes = min(limit, size)
+ s = nbytes and f.read(nbytes)
+ if not s:
+ break
+ if limit:
+ limit -= len(s)
+ yield s
+
+def makedate():
+ ct = time.time()
+ if ct < 0:
+ hint = _("check your clock")
+ raise Abort(_("negative timestamp: %d") % ct, hint=hint)
+ delta = (datetime.datetime.utcfromtimestamp(ct) -
+ datetime.datetime.fromtimestamp(ct))
+ tz = delta.days * 86400 + delta.seconds
+ return ct, tz
+
+def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
+ """represent a (unixtime, offset) tuple as a localized time.
+ unixtime is seconds since the epoch, and offset is the time zone's
+ number of seconds away from UTC. if timezone is false, do not
+ append time zone to string."""
+ t, tz = date or makedate()
+ if t < 0:
+ t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
+ tz = 0
+ if "%1" in format or "%2" in format:
+ sign = (tz > 0) and "-" or "+"
+ minutes = abs(tz) // 60
+ format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
+ format = format.replace("%2", "%02d" % (minutes % 60))
+ try:
+ t = time.gmtime(float(t) - tz)
+ except ValueError:
+ # time was out of range
+ t = time.gmtime(sys.maxint)
+ s = time.strftime(format, t)
+ return s
+
+def shortdate(date=None):
+ """turn (timestamp, tzoff) tuple into iso 8631 date."""
+ return datestr(date, format='%Y-%m-%d')
+
+def strdate(string, format, defaults=[]):
+ """parse a localized time string and return a (unixtime, offset) tuple.
+ if the string cannot be parsed, ValueError is raised."""
+ def timezone(string):
+ tz = string.split()[-1]
+ if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
+ sign = (tz[0] == "+") and 1 or -1
+ hours = int(tz[1:3])
+ minutes = int(tz[3:5])
+ return -sign * (hours * 60 + minutes) * 60
+ if tz == "GMT" or tz == "UTC":
+ return 0
+ return None
+
+ # NOTE: unixtime = localunixtime + offset
+ offset, date = timezone(string), string
+ if offset is not None:
+ date = " ".join(string.split()[:-1])
+
+ # add missing elements from defaults
+ usenow = False # default to using biased defaults
+ for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
+ found = [True for p in part if ("%"+p) in format]
+ if not found:
+ date += "@" + defaults[part][usenow]
+ format += "@%" + part[0]
+ else:
+ # We've found a specific time element, less specific time
+ # elements are relative to today
+ usenow = True
+
+ timetuple = time.strptime(date, format)
+ localunixtime = int(calendar.timegm(timetuple))
+ if offset is None:
+ # local timezone
+ unixtime = int(time.mktime(timetuple))
+ offset = unixtime - localunixtime
+ else:
+ unixtime = localunixtime + offset
+ return unixtime, offset
+
+def parsedate(date, formats=None, bias={}):
+ """parse a localized date/time and return a (unixtime, offset) tuple.
+
+ The date may be a "unixtime offset" string or in one of the specified
+ formats. If the date already is a (unixtime, offset) tuple, it is returned.
+ """
+ if not date:
+ return 0, 0
+ if isinstance(date, tuple) and len(date) == 2:
+ return date
+ if not formats:
+ formats = defaultdateformats
+ date = date.strip()
+ try:
+ when, offset = map(int, date.split(' '))
+ except ValueError:
+ # fill out defaults
+ now = makedate()
+ defaults = {}
+ for part in ("d", "mb", "yY", "HI", "M", "S"):
+ # this piece is for rounding the specific end of unknowns
+ b = bias.get(part)
+ if b is None:
+ if part[0] in "HMS":
+ b = "00"
+ else:
+ b = "0"
+
+ # this piece is for matching the generic end to today's date
+ n = datestr(now, "%" + part[0])
+
+ defaults[part] = (b, n)
+
+ for format in formats:
+ try:
+ when, offset = strdate(date, format, defaults)
+ except (ValueError, OverflowError):
+ pass
+ else:
+ break
+ else:
+ raise Abort(_('invalid date: %r') % date)
+ # validate explicit (probably user-specified) date and
+ # time zone offset. values must fit in signed 32 bits for
+ # current 32-bit linux runtimes. timezones go from UTC-12
+ # to UTC+14
+ if abs(when) > 0x7fffffff:
+ raise Abort(_('date exceeds 32 bits: %d') % when)
+ if when < 0:
+ raise Abort(_('negative date value: %d') % when)
+ if offset < -50400 or offset > 43200:
+ raise Abort(_('impossible time zone offset: %d') % offset)
+ return when, offset
+
+def matchdate(date):
+ """Return a function that matches a given date match specifier
+
+ Formats include:
+
+ '{date}' match a given date to the accuracy provided
+
+ '<{date}' on or before a given date
+
+ '>{date}' on or after a given date
+
+ >>> p1 = parsedate("10:29:59")
+ >>> p2 = parsedate("10:30:00")
+ >>> p3 = parsedate("10:30:59")
+ >>> p4 = parsedate("10:31:00")
+ >>> p5 = parsedate("Sep 15 10:30:00 1999")
+ >>> f = matchdate("10:30")
+ >>> f(p1[0])
+ False
+ >>> f(p2[0])
+ True
+ >>> f(p3[0])
+ True
+ >>> f(p4[0])
+ False
+ >>> f(p5[0])
+ False
+ """
+
+ def lower(date):
+ d = dict(mb="1", d="1")
+ return parsedate(date, extendeddateformats, d)[0]
+
+ def upper(date):
+ d = dict(mb="12", HI="23", M="59", S="59")
+ for days in ("31", "30", "29"):
+ try:
+ d["d"] = days
+ return parsedate(date, extendeddateformats, d)[0]
+ except:
+ pass
+ d["d"] = "28"
+ return parsedate(date, extendeddateformats, d)[0]
+
+ date = date.strip()
+
+ if not date:
+ raise Abort(_("dates cannot consist entirely of whitespace"))
+ elif date[0] == "<":
+ if not date[1:]:
+ raise Abort(_("invalid day spec, use '<DATE'"))
+ when = upper(date[1:])
+ return lambda x: x <= when
+ elif date[0] == ">":
+ if not date[1:]:
+ raise Abort(_("invalid day spec, use '>DATE'"))
+ when = lower(date[1:])
+ return lambda x: x >= when
+ elif date[0] == "-":
+ try:
+ days = int(date[1:])
+ except ValueError:
+ raise Abort(_("invalid day spec: %s") % date[1:])
+ if days < 0:
+ raise Abort(_("%s must be nonnegative (see 'hg help dates')")
+ % date[1:])
+ when = makedate()[0] - days * 3600 * 24
+ return lambda x: x >= when
+ elif " to " in date:
+ a, b = date.split(" to ")
+ start, stop = lower(a), upper(b)
+ return lambda x: x >= start and x <= stop
+ else:
+ start, stop = lower(date), upper(date)
+ return lambda x: x >= start and x <= stop
+
+def shortuser(user):
+ """Return a short representation of a user name or email address."""
+ f = user.find('@')
+ if f >= 0:
+ user = user[:f]
+ f = user.find('<')
+ if f >= 0:
+ user = user[f + 1:]
+ f = user.find(' ')
+ if f >= 0:
+ user = user[:f]
+ f = user.find('.')
+ if f >= 0:
+ user = user[:f]
+ return user
+
+def email(author):
+ '''get email of author.'''
+ r = author.find('>')
+ if r == -1:
+ r = None
+ return author[author.find('<') + 1:r]
+
+def _ellipsis(text, maxlength):
+ if len(text) <= maxlength:
+ return text, False
+ else:
+ return "%s..." % (text[:maxlength - 3]), True
+
+def ellipsis(text, maxlength=400):
+ """Trim string to at most maxlength (default: 400) characters."""
+ try:
+ # use unicode not to split at intermediate multi-byte sequence
+ utext, truncated = _ellipsis(text.decode(encoding.encoding),
+ maxlength)
+ if not truncated:
+ return text
+ return utext.encode(encoding.encoding)
+ except (UnicodeDecodeError, UnicodeEncodeError):
+ return _ellipsis(text, maxlength)[0]
+
+def bytecount(nbytes):
+ '''return byte count formatted as readable string, with units'''
+
+ units = (
+ (100, 1 << 30, _('%.0f GB')),
+ (10, 1 << 30, _('%.1f GB')),
+ (1, 1 << 30, _('%.2f GB')),
+ (100, 1 << 20, _('%.0f MB')),
+ (10, 1 << 20, _('%.1f MB')),
+ (1, 1 << 20, _('%.2f MB')),
+ (100, 1 << 10, _('%.0f KB')),
+ (10, 1 << 10, _('%.1f KB')),
+ (1, 1 << 10, _('%.2f KB')),
+ (1, 1, _('%.0f bytes')),
+ )
+
+ for multiplier, divisor, format in units:
+ if nbytes >= divisor * multiplier:
+ return format % (nbytes / float(divisor))
+ return units[-1][2] % nbytes
+
+def uirepr(s):
+ # Avoid double backslash in Windows path repr()
+ return repr(s).replace('\\\\', '\\')
+
+# delay import of textwrap
+def MBTextWrapper(**kwargs):
+ class tw(textwrap.TextWrapper):
+ """
+ Extend TextWrapper for width-awareness.
+
+ Neither number of 'bytes' in any encoding nor 'characters' is
+ appropriate to calculate terminal columns for specified string.
+
+ Original TextWrapper implementation uses built-in 'len()' directly,
+ so overriding is needed to use width information of each characters.
+
+ In addition, characters classified into 'ambiguous' width are
+ treated as wide in east asian area, but as narrow in other.
+
+ This requires use decision to determine width of such characters.
+ """
+ def __init__(self, **kwargs):
+ textwrap.TextWrapper.__init__(self, **kwargs)
+
+ # for compatibility between 2.4 and 2.6
+ if getattr(self, 'drop_whitespace', None) is None:
+ self.drop_whitespace = kwargs.get('drop_whitespace', True)
+
+ def _cutdown(self, ucstr, space_left):
+ l = 0
+ colwidth = encoding.ucolwidth
+ for i in xrange(len(ucstr)):
+ l += colwidth(ucstr[i])
+ if space_left < l:
+ return (ucstr[:i], ucstr[i:])
+ return ucstr, ''
+
+ # overriding of base class
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ cut, res = self._cutdown(reversed_chunks[-1], space_left)
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ # this overriding code is imported from TextWrapper of python 2.6
+ # to calculate columns of string by 'encoding.ucolwidth()'
+ def _wrap_chunks(self, chunks):
+ colwidth = encoding.ucolwidth
+
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+ # Arrange in reverse order so items can be efficiently popped
+ # from a stack of chucks.
+ chunks.reverse()
+
+ while chunks:
+
+ # Start the list of chunks that will make up the current line.
+ # cur_len is just the length of all the chunks in cur_line.
+ cur_line = []
+ cur_len = 0
+
+ # Figure out which static string will prefix this line.
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ # Maximum width for this line.
+ width = self.width - len(indent)
+
+ # First chunk on line is whitespace -- drop it, unless this
+ # is the very beginning of the text (ie. no lines started yet).
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
+ del chunks[-1]
+
+ while chunks:
+ l = colwidth(chunks[-1])
+
+ # Can at least squeeze this chunk onto the current line.
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop())
+ cur_len += l
+
+ # Nope, this line is full.
+ else:
+ break
+
+ # The current line is full, and the next chunk is too big to
+ # fit on *any* line (not just this one).
+ if chunks and colwidth(chunks[-1]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+
+ # If the last chunk on this line is all whitespace, drop it.
+ if (self.drop_whitespace and
+ cur_line and cur_line[-1].strip() == ''):
+ del cur_line[-1]
+
+ # Convert current line back to a string and store it in list
+ # of all lines (return value).
+ if cur_line:
+ lines.append(indent + ''.join(cur_line))
+
+ return lines
+
+ global MBTextWrapper
+ MBTextWrapper = tw
+ return tw(**kwargs)
+
+def wrap(line, width, initindent='', hangindent=''):
+ maxindent = max(len(hangindent), len(initindent))
+ if width <= maxindent:
+ # adjust for weird terminal size
+ width = max(78, maxindent + 1)
+ line = line.decode(encoding.encoding, encoding.encodingmode)
+ initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
+ hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
+ wrapper = MBTextWrapper(width=width,
+ initial_indent=initindent,
+ subsequent_indent=hangindent)
+ return wrapper.fill(line).encode(encoding.encoding)
+
+def iterlines(iterator):
+ for chunk in iterator:
+ for line in chunk.splitlines():
+ yield line
+
+def expandpath(path):
+ return os.path.expanduser(os.path.expandvars(path))
+
+def hgcmd():
+ """Return the command used to execute current hg
+
+ This is different from hgexecutable() because on Windows we want
+ to avoid things opening new shell windows like batch files, so we
+ get either the python call or current executable.
+ """
+ if mainfrozen():
+ return [sys.executable]
+ return gethgcmd()
+
+def rundetached(args, condfn):
+ """Execute the argument list in a detached process.
+
+ condfn is a callable which is called repeatedly and should return
+ True once the child process is known to have started successfully.
+ At this point, the child process PID is returned. If the child
+ process fails to start or finishes before condfn() evaluates to
+ True, return -1.
+ """
+ # Windows case is easier because the child process is either
+ # successfully starting and validating the condition or exiting
+ # on failure. We just poll on its PID. On Unix, if the child
+ # process fails to start, it will be left in a zombie state until
+ # the parent wait on it, which we cannot do since we expect a long
+ # running process on success. Instead we listen for SIGCHLD telling
+ # us our child process terminated.
+ terminated = set()
+ def handler(signum, frame):
+ terminated.add(os.wait())
+ prevhandler = None
+ SIGCHLD = getattr(signal, 'SIGCHLD', None)
+ if SIGCHLD is not None:
+ prevhandler = signal.signal(SIGCHLD, handler)
+ try:
+ pid = spawndetached(args)
+ while not condfn():
+ if ((pid in terminated or not testpid(pid))
+ and not condfn()):
+ return -1
+ time.sleep(0.1)
+ return pid
+ finally:
+ if prevhandler is not None:
+ signal.signal(signal.SIGCHLD, prevhandler)
+
+try:
+ any, all = any, all
+except NameError:
+ def any(iterable):
+ for i in iterable:
+ if i:
+ return True
+ return False
+
+ def all(iterable):
+ for i in iterable:
+ if not i:
+ return False
+ return True
+
+def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
+ """Return the result of interpolating items in the mapping into string s.
+
+ prefix is a single character string, or a two character string with
+ a backslash as the first character if the prefix needs to be escaped in
+ a regular expression.
+
+ fn is an optional function that will be applied to the replacement text
+ just before replacement.
+
+ escape_prefix is an optional flag that allows using doubled prefix for
+ its escaping.
+ """
+ fn = fn or (lambda s: s)
+ patterns = '|'.join(mapping.keys())
+ if escape_prefix:
+ patterns += '|' + prefix
+ if len(prefix) > 1:
+ prefix_char = prefix[1:]
+ else:
+ prefix_char = prefix
+ mapping[prefix_char] = prefix_char
+ r = re.compile(r'%s(%s)' % (prefix, patterns))
+ return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
+
+def getport(port):
+ """Return the port for a given network service.
+
+ If port is an integer, it's returned as is. If it's a string, it's
+ looked up using socket.getservbyname(). If there's no matching
+ service, util.Abort is raised.
+ """
+ try:
+ return int(port)
+ except ValueError:
+ pass
+
+ try:
+ return socket.getservbyname(port)
+ except socket.error:
+ raise Abort(_("no port number associated with service '%s'") % port)
+
+_booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
+ '0': False, 'no': False, 'false': False, 'off': False,
+ 'never': False}
+
+def parsebool(s):
+ """Parse s into a boolean.
+
+ If s is not a valid boolean, returns None.
+ """
+ return _booleans.get(s.lower(), None)
+
+_hexdig = '0123456789ABCDEFabcdef'
+_hextochr = dict((a + b, chr(int(a + b, 16)))
+ for a in _hexdig for b in _hexdig)
+
+def _urlunquote(s):
+ """unquote('abc%20def') -> 'abc def'."""
+ res = s.split('%')
+ # fastpath
+ if len(res) == 1:
+ return s
+ s = res[0]
+ for item in res[1:]:
+ try:
+ s += _hextochr[item[:2]] + item[2:]
+ except KeyError:
+ s += '%' + item
+ except UnicodeDecodeError:
+ s += unichr(int(item[:2], 16)) + item[2:]
+ return s
+
+class url(object):
+ r"""Reliable URL parser.
+
+ This parses URLs and provides attributes for the following
+ components:
+
+ <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
+
+ Missing components are set to None. The only exception is
+ fragment, which is set to '' if present but empty.
+
+ If parsefragment is False, fragment is included in query. If
+ parsequery is False, query is included in path. If both are
+ False, both fragment and query are included in path.
+
+ See http://www.ietf.org/rfc/rfc2396.txt for more information.
+
+ Note that for backward compatibility reasons, bundle URLs do not
+ take host names. That means 'bundle://../' has a path of '../'.
+
+ Examples:
+
+ >>> url('http://www.ietf.org/rfc/rfc2396.txt')
+ <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
+ >>> url('ssh://[::1]:2200//home/joe/repo')
+ <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
+ >>> url('file:///home/joe/repo')
+ <url scheme: 'file', path: '/home/joe/repo'>
+ >>> url('file:///c:/temp/foo/')
+ <url scheme: 'file', path: 'c:/temp/foo/'>
+ >>> url('bundle:foo')
+ <url scheme: 'bundle', path: 'foo'>
+ >>> url('bundle://../foo')
+ <url scheme: 'bundle', path: '../foo'>
+ >>> url(r'c:\foo\bar')
+ <url path: 'c:\\foo\\bar'>
+ >>> url(r'\\blah\blah\blah')
+ <url path: '\\\\blah\\blah\\blah'>
+ >>> url(r'\\blah\blah\blah#baz')
+ <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
+
+ Authentication credentials:
+
+ >>> url('ssh://joe:xyz@x/repo')
+ <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
+ >>> url('ssh://joe@x/repo')
+ <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
+
+ Query strings and fragments:
+
+ >>> url('http://host/a?b#c')
+ <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
+ >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
+ <url scheme: 'http', host: 'host', path: 'a?b#c'>
+ """
+
+ _safechars = "!~*'()+"
+ _safepchars = "/!~*'()+"
+ _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
+
+ def __init__(self, path, parsequery=True, parsefragment=True):
+ # We slowly chomp away at path until we have only the path left
+ self.scheme = self.user = self.passwd = self.host = None
+ self.port = self.path = self.query = self.fragment = None
+ self._localpath = True
+ self._hostport = ''
+ self._origpath = path
+
+ if parsefragment and '#' in path:
+ path, self.fragment = path.split('#', 1)
+ if not path:
+ path = None
+
+ # special case for Windows drive letters and UNC paths
+ if hasdriveletter(path) or path.startswith(r'\\'):
+ self.path = path
+ return
+
+ # For compatibility reasons, we can't handle bundle paths as
+ # normal URLS
+ if path.startswith('bundle:'):
+ self.scheme = 'bundle'
+ path = path[7:]
+ if path.startswith('//'):
+ path = path[2:]
+ self.path = path
+ return
+
+ if self._matchscheme(path):
+ parts = path.split(':', 1)
+ if parts[0]:
+ self.scheme, path = parts
+ self._localpath = False
+
+ if not path:
+ path = None
+ if self._localpath:
+ self.path = ''
+ return
+ else:
+ if self._localpath:
+ self.path = path
+ return
+
+ if parsequery and '?' in path:
+ path, self.query = path.split('?', 1)
+ if not path:
+ path = None
+ if not self.query:
+ self.query = None
+
+ # // is required to specify a host/authority
+ if path and path.startswith('//'):
+ parts = path[2:].split('/', 1)
+ if len(parts) > 1:
+ self.host, path = parts
+ path = path
+ else:
+ self.host = parts[0]
+ path = None
+ if not self.host:
+ self.host = None
+ # path of file:///d is /d
+ # path of file:///d:/ is d:/, not /d:/
+ if path and not hasdriveletter(path):
+ path = '/' + path
+
+ if self.host and '@' in self.host:
+ self.user, self.host = self.host.rsplit('@', 1)
+ if ':' in self.user:
+ self.user, self.passwd = self.user.split(':', 1)
+ if not self.host:
+ self.host = None
+
+ # Don't split on colons in IPv6 addresses without ports
+ if (self.host and ':' in self.host and
+ not (self.host.startswith('[') and self.host.endswith(']'))):
+ self._hostport = self.host
+ self.host, self.port = self.host.rsplit(':', 1)
+ if not self.host:
+ self.host = None
+
+ if (self.host and self.scheme == 'file' and
+ self.host not in ('localhost', '127.0.0.1', '[::1]')):
+ raise Abort(_('file:// URLs can only refer to localhost'))
+
+ self.path = path
+
+ # leave the query string escaped
+ for a in ('user', 'passwd', 'host', 'port',
+ 'path', 'fragment'):
+ v = getattr(self, a)
+ if v is not None:
+ setattr(self, a, _urlunquote(v))
+
+ def __repr__(self):
+ attrs = []
+ for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
+ 'query', 'fragment'):
+ v = getattr(self, a)
+ if v is not None:
+ attrs.append('%s: %r' % (a, v))
+ return '<url %s>' % ', '.join(attrs)
+
+ def __str__(self):
+ r"""Join the URL's components back into a URL string.
+
+ Examples:
+
+ >>> str(url('http://user:pw@host:80/?foo#bar'))
+ 'http://user:pw@host:80/?foo#bar'
+ >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
+ 'http://user:pw@host:80/?foo=bar&baz=42'
+ >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
+ 'http://user:pw@host:80/?foo=bar%3dbaz'
+ >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
+ 'ssh://user:pw@[::1]:2200//home/joe#'
+ >>> str(url('http://localhost:80//'))
+ 'http://localhost:80//'
+ >>> str(url('http://localhost:80/'))
+ 'http://localhost:80/'
+ >>> str(url('http://localhost:80'))
+ 'http://localhost:80/'
+ >>> str(url('bundle:foo'))
+ 'bundle:foo'
+ >>> str(url('bundle://../foo'))
+ 'bundle:../foo'
+ >>> str(url('path'))
+ 'path'
+ >>> str(url('file:///tmp/foo/bar'))
+ 'file:///tmp/foo/bar'
+ >>> print url(r'bundle:foo\bar')
+ bundle:foo\bar
+ """
+ if self._localpath:
+ s = self.path
+ if self.scheme == 'bundle':
+ s = 'bundle:' + s
+ if self.fragment:
+ s += '#' + self.fragment
+ return s
+
+ s = self.scheme + ':'
+ if self.user or self.passwd or self.host:
+ s += '//'
+ elif self.scheme and (not self.path or self.path.startswith('/')):
+ s += '//'
+ if self.user:
+ s += urllib.quote(self.user, safe=self._safechars)
+ if self.passwd:
+ s += ':' + urllib.quote(self.passwd, safe=self._safechars)
+ if self.user or self.passwd:
+ s += '@'
+ if self.host:
+ if not (self.host.startswith('[') and self.host.endswith(']')):
+ s += urllib.quote(self.host)
+ else:
+ s += self.host
+ if self.port:
+ s += ':' + urllib.quote(self.port)
+ if self.host:
+ s += '/'
+ if self.path:
+ # TODO: similar to the query string, we should not unescape the
+ # path when we store it, the path might contain '%2f' = '/',
+ # which we should *not* escape.
+ s += urllib.quote(self.path, safe=self._safepchars)
+ if self.query:
+ # we store the query in escaped form.
+ s += '?' + self.query
+ if self.fragment is not None:
+ s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
+ return s
+
+ def authinfo(self):
+ user, passwd = self.user, self.passwd
+ try:
+ self.user, self.passwd = None, None
+ s = str(self)
+ finally:
+ self.user, self.passwd = user, passwd
+ if not self.user:
+ return (s, None)
+ # authinfo[1] is passed to urllib2 password manager, and its
+ # URIs must not contain credentials. The host is passed in the
+ # URIs list because Python < 2.4.3 uses only that to search for
+ # a password.
+ return (s, (None, (s, self.host),
+ self.user, self.passwd or ''))
+
+ def isabs(self):
+ if self.scheme and self.scheme != 'file':
+ return True # remote URL
+ if hasdriveletter(self.path):
+ return True # absolute for our purposes - can't be joined()
+ if self.path.startswith(r'\\'):
+ return True # Windows UNC path
+ if self.path.startswith('/'):
+ return True # POSIX-style
+ return False
+
+ def localpath(self):
+ if self.scheme == 'file' or self.scheme == 'bundle':
+ path = self.path or '/'
+ # For Windows, we need to promote hosts containing drive
+ # letters to paths with drive letters.
+ if hasdriveletter(self._hostport):
+ path = self._hostport + '/' + self.path
+ elif (self.host is not None and self.path
+ and not hasdriveletter(path)):
+ path = '/' + path
+ return path
+ return self._origpath
+
+def hasscheme(path):
+ return bool(url(path).scheme)
+
+def hasdriveletter(path):
+ return path[1:2] == ':' and path[0:1].isalpha()
+
+def urllocalpath(path):
+ return url(path, parsequery=False, parsefragment=False).localpath()
+
+def hidepassword(u):
+ '''hide user credential in a url string'''
+ u = url(u)
+ if u.passwd:
+ u.passwd = '***'
+ return str(u)
+
+def removeauth(u):
+ '''remove all authentication information from a url string'''
+ u = url(u)
+ u.user = u.passwd = None
+ return str(u)
+
+def isatty(fd):
+ try:
+ return fd.isatty()
+ except AttributeError:
+ return False
diff --git a/websdk/mercurial/verify.py b/websdk/mercurial/verify.py
index b0896e0..3e53862 120000..100644
--- a/websdk/mercurial/verify.py
+++ b/websdk/mercurial/verify.py
@@ -1 +1,303 @@
-/usr/share/pyshared/mercurial/verify.py \ No newline at end of file
+# verify.py - repository integrity checking for Mercurial
+#
+# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from node import nullid, short
+from i18n import _
+import os
+import revlog, util, error
+
+def verify(repo):
+ lock = repo.lock()
+ try:
+ return _verify(repo)
+ finally:
+ lock.release()
+
+def _verify(repo):
+ mflinkrevs = {}
+ filelinkrevs = {}
+ filenodes = {}
+ revisions = 0
+ badrevs = set()
+ errors = [0]
+ warnings = [0]
+ ui = repo.ui
+ cl = repo.changelog
+ mf = repo.manifest
+ lrugetctx = util.lrucachefunc(repo.changectx)
+
+ if not repo.cancopy():
+ raise util.Abort(_("cannot verify bundle or remote repos"))
+
+ def err(linkrev, msg, filename=None):
+ if linkrev is not None:
+ badrevs.add(linkrev)
+ else:
+ linkrev = '?'
+ msg = "%s: %s" % (linkrev, msg)
+ if filename:
+ msg = "%s@%s" % (filename, msg)
+ ui.warn(" " + msg + "\n")
+ errors[0] += 1
+
+ def exc(linkrev, msg, inst, filename=None):
+ if isinstance(inst, KeyboardInterrupt):
+ ui.warn(_("interrupted"))
+ raise
+ if not str(inst):
+ inst = repr(inst)
+ err(linkrev, "%s: %s" % (msg, inst), filename)
+
+ def warn(msg):
+ ui.warn(msg + "\n")
+ warnings[0] += 1
+
+ def checklog(obj, name, linkrev):
+ if not len(obj) and (havecl or havemf):
+ err(linkrev, _("empty or missing %s") % name)
+ return
+
+ d = obj.checksize()
+ if d[0]:
+ err(None, _("data length off by %d bytes") % d[0], name)
+ if d[1]:
+ err(None, _("index contains %d extra bytes") % d[1], name)
+
+ if obj.version != revlog.REVLOGV0:
+ if not revlogv1:
+ warn(_("warning: `%s' uses revlog format 1") % name)
+ elif revlogv1:
+ warn(_("warning: `%s' uses revlog format 0") % name)
+
+ def checkentry(obj, i, node, seen, linkrevs, f):
+ lr = obj.linkrev(obj.rev(node))
+ if lr < 0 or (havecl and lr not in linkrevs):
+ if lr < 0 or lr >= len(cl):
+ msg = _("rev %d points to nonexistent changeset %d")
+ else:
+ msg = _("rev %d points to unexpected changeset %d")
+ err(None, msg % (i, lr), f)
+ if linkrevs:
+ if f and len(linkrevs) > 1:
+ try:
+ # attempt to filter down to real linkrevs
+ linkrevs = [l for l in linkrevs
+ if lrugetctx(l)[f].filenode() == node]
+ except:
+ pass
+ warn(_(" (expected %s)") % " ".join(map(str, linkrevs)))
+ lr = None # can't be trusted
+
+ try:
+ p1, p2 = obj.parents(node)
+ if p1 not in seen and p1 != nullid:
+ err(lr, _("unknown parent 1 %s of %s") %
+ (short(p1), short(n)), f)
+ if p2 not in seen and p2 != nullid:
+ err(lr, _("unknown parent 2 %s of %s") %
+ (short(p2), short(p1)), f)
+ except Exception, inst:
+ exc(lr, _("checking parents of %s") % short(node), inst, f)
+
+ if node in seen:
+ err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f)
+ seen[n] = i
+ return lr
+
+ if os.path.exists(repo.sjoin("journal")):
+ ui.warn(_("abandoned transaction found - run hg recover\n"))
+
+ revlogv1 = cl.version != revlog.REVLOGV0
+ if ui.verbose or not revlogv1:
+ ui.status(_("repository uses revlog format %d\n") %
+ (revlogv1 and 1 or 0))
+
+ havecl = len(cl) > 0
+ havemf = len(mf) > 0
+
+ ui.status(_("checking changesets\n"))
+ seen = {}
+ checklog(cl, "changelog", 0)
+ total = len(repo)
+ for i in repo:
+ ui.progress(_('checking'), i, total=total, unit=_('changesets'))
+ n = cl.node(i)
+ checkentry(cl, i, n, seen, [i], "changelog")
+
+ try:
+ changes = cl.read(n)
+ mflinkrevs.setdefault(changes[0], []).append(i)
+ for f in changes[3]:
+ filelinkrevs.setdefault(f, []).append(i)
+ except Exception, inst:
+ exc(i, _("unpacking changeset %s") % short(n), inst)
+ ui.progress(_('checking'), None)
+
+ ui.status(_("checking manifests\n"))
+ seen = {}
+ checklog(mf, "manifest", 0)
+ total = len(mf)
+ for i in mf:
+ ui.progress(_('checking'), i, total=total, unit=_('manifests'))
+ n = mf.node(i)
+ lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest")
+ if n in mflinkrevs:
+ del mflinkrevs[n]
+ else:
+ err(lr, _("%s not in changesets") % short(n), "manifest")
+
+ try:
+ for f, fn in mf.readdelta(n).iteritems():
+ if not f:
+ err(lr, _("file without name in manifest"))
+ elif f != "/dev/null":
+ filenodes.setdefault(f, {}).setdefault(fn, lr)
+ except Exception, inst:
+ exc(lr, _("reading manifest delta %s") % short(n), inst)
+ ui.progress(_('checking'), None)
+
+ ui.status(_("crosschecking files in changesets and manifests\n"))
+
+ total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes)
+ count = 0
+ if havemf:
+ for c, m in sorted([(c, m) for m in mflinkrevs
+ for c in mflinkrevs[m]]):
+ count += 1
+ if m == nullid:
+ continue
+ ui.progress(_('crosschecking'), count, total=total)
+ err(c, _("changeset refers to unknown manifest %s") % short(m))
+ mflinkrevs = None # del is bad here due to scope issues
+
+ for f in sorted(filelinkrevs):
+ count += 1
+ ui.progress(_('crosschecking'), count, total=total)
+ if f not in filenodes:
+ lr = filelinkrevs[f][0]
+ err(lr, _("in changeset but not in manifest"), f)
+
+ if havecl:
+ for f in sorted(filenodes):
+ count += 1
+ ui.progress(_('crosschecking'), count, total=total)
+ if f not in filelinkrevs:
+ try:
+ fl = repo.file(f)
+ lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
+ except:
+ lr = None
+ err(lr, _("in manifest but not in changeset"), f)
+
+ ui.progress(_('crosschecking'), None)
+
+ ui.status(_("checking files\n"))
+
+ storefiles = set()
+ for f, f2, size in repo.store.datafiles():
+ if not f:
+ err(None, _("cannot decode filename '%s'") % f2)
+ elif size > 0 or not revlogv1:
+ storefiles.add(f)
+
+ files = sorted(set(filenodes) | set(filelinkrevs))
+ total = len(files)
+ for i, f in enumerate(files):
+ ui.progress(_('checking'), i, item=f, total=total)
+ try:
+ linkrevs = filelinkrevs[f]
+ except KeyError:
+ # in manifest but not in changelog
+ linkrevs = []
+
+ if linkrevs:
+ lr = linkrevs[0]
+ else:
+ lr = None
+
+ try:
+ fl = repo.file(f)
+ except error.RevlogError, e:
+ err(lr, _("broken revlog! (%s)") % e, f)
+ continue
+
+ for ff in fl.files():
+ try:
+ storefiles.remove(ff)
+ except KeyError:
+ err(lr, _("missing revlog!"), ff)
+
+ checklog(fl, f, lr)
+ seen = {}
+ rp = None
+ for i in fl:
+ revisions += 1
+ n = fl.node(i)
+ lr = checkentry(fl, i, n, seen, linkrevs, f)
+ if f in filenodes:
+ if havemf and n not in filenodes[f]:
+ err(lr, _("%s not in manifests") % (short(n)), f)
+ else:
+ del filenodes[f][n]
+
+ # verify contents
+ try:
+ l = len(fl.read(n))
+ rp = fl.renamed(n)
+ if l != fl.size(i):
+ if len(fl.revision(n)) != fl.size(i):
+ err(lr, _("unpacked size is %s, %s expected") %
+ (l, fl.size(i)), f)
+ except Exception, inst:
+ exc(lr, _("unpacking %s") % short(n), inst, f)
+
+ # check renames
+ try:
+ if rp:
+ if lr is not None and ui.verbose:
+ ctx = lrugetctx(lr)
+ found = False
+ for pctx in ctx.parents():
+ if rp[0] in pctx:
+ found = True
+ break
+ if not found:
+ warn(_("warning: copy source of '%s' not"
+ " in parents of %s") % (f, ctx))
+ fl2 = repo.file(rp[0])
+ if not len(fl2):
+ err(lr, _("empty or missing copy source revlog %s:%s")
+ % (rp[0], short(rp[1])), f)
+ elif rp[1] == nullid:
+ ui.note(_("warning: %s@%s: copy source"
+ " revision is nullid %s:%s\n")
+ % (f, lr, rp[0], short(rp[1])))
+ else:
+ fl2.rev(rp[1])
+ except Exception, inst:
+ exc(lr, _("checking rename of %s") % short(n), inst, f)
+
+ # cross-check
+ if f in filenodes:
+ fns = [(lr, n) for n, lr in filenodes[f].iteritems()]
+ for lr, node in sorted(fns):
+ err(lr, _("%s in manifests not found") % short(node), f)
+ ui.progress(_('checking'), None)
+
+ for f in storefiles:
+ warn(_("warning: orphan revlog '%s'") % f)
+
+ ui.status(_("%d files, %d changesets, %d total revisions\n") %
+ (len(files), len(cl), revisions))
+ if warnings[0]:
+ ui.warn(_("%d warnings encountered!\n") % warnings[0])
+ if errors[0]:
+ ui.warn(_("%d integrity errors encountered!\n") % errors[0])
+ if badrevs:
+ ui.warn(_("(first damaged changeset appears to be %d)\n")
+ % min(badrevs))
+ return 1
diff --git a/websdk/mercurial/win32.py b/websdk/mercurial/win32.py
index f45a5c5..e886caf 120000..100644
--- a/websdk/mercurial/win32.py
+++ b/websdk/mercurial/win32.py
@@ -1 +1,452 @@
-/usr/share/pyshared/mercurial/win32.py \ No newline at end of file
+# win32.py - utility functions that use win32 API
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import encoding
+import ctypes, errno, os, struct, subprocess, random
+
+_kernel32 = ctypes.windll.kernel32
+_advapi32 = ctypes.windll.advapi32
+_user32 = ctypes.windll.user32
+
+_BOOL = ctypes.c_long
+_WORD = ctypes.c_ushort
+_DWORD = ctypes.c_ulong
+_UINT = ctypes.c_uint
+_LONG = ctypes.c_long
+_LPCSTR = _LPSTR = ctypes.c_char_p
+_HANDLE = ctypes.c_void_p
+_HWND = _HANDLE
+
+_INVALID_HANDLE_VALUE = _HANDLE(-1).value
+
+# GetLastError
+_ERROR_SUCCESS = 0
+_ERROR_INVALID_PARAMETER = 87
+_ERROR_INSUFFICIENT_BUFFER = 122
+
+# WPARAM is defined as UINT_PTR (unsigned type)
+# LPARAM is defined as LONG_PTR (signed type)
+if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
+ _WPARAM = ctypes.c_ulong
+ _LPARAM = ctypes.c_long
+elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
+ _WPARAM = ctypes.c_ulonglong
+ _LPARAM = ctypes.c_longlong
+
+class _FILETIME(ctypes.Structure):
+ _fields_ = [('dwLowDateTime', _DWORD),
+ ('dwHighDateTime', _DWORD)]
+
+class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure):
+ _fields_ = [('dwFileAttributes', _DWORD),
+ ('ftCreationTime', _FILETIME),
+ ('ftLastAccessTime', _FILETIME),
+ ('ftLastWriteTime', _FILETIME),
+ ('dwVolumeSerialNumber', _DWORD),
+ ('nFileSizeHigh', _DWORD),
+ ('nFileSizeLow', _DWORD),
+ ('nNumberOfLinks', _DWORD),
+ ('nFileIndexHigh', _DWORD),
+ ('nFileIndexLow', _DWORD)]
+
+# CreateFile
+_FILE_SHARE_READ = 0x00000001
+_FILE_SHARE_WRITE = 0x00000002
+_FILE_SHARE_DELETE = 0x00000004
+
+_OPEN_EXISTING = 3
+
+# SetFileAttributes
+_FILE_ATTRIBUTE_NORMAL = 0x80
+_FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000
+
+# Process Security and Access Rights
+_PROCESS_QUERY_INFORMATION = 0x0400
+
+# GetExitCodeProcess
+_STILL_ACTIVE = 259
+
+# registry
+_HKEY_CURRENT_USER = 0x80000001L
+_HKEY_LOCAL_MACHINE = 0x80000002L
+_KEY_READ = 0x20019
+_REG_SZ = 1
+_REG_DWORD = 4
+
+class _STARTUPINFO(ctypes.Structure):
+ _fields_ = [('cb', _DWORD),
+ ('lpReserved', _LPSTR),
+ ('lpDesktop', _LPSTR),
+ ('lpTitle', _LPSTR),
+ ('dwX', _DWORD),
+ ('dwY', _DWORD),
+ ('dwXSize', _DWORD),
+ ('dwYSize', _DWORD),
+ ('dwXCountChars', _DWORD),
+ ('dwYCountChars', _DWORD),
+ ('dwFillAttribute', _DWORD),
+ ('dwFlags', _DWORD),
+ ('wShowWindow', _WORD),
+ ('cbReserved2', _WORD),
+ ('lpReserved2', ctypes.c_char_p),
+ ('hStdInput', _HANDLE),
+ ('hStdOutput', _HANDLE),
+ ('hStdError', _HANDLE)]
+
+class _PROCESS_INFORMATION(ctypes.Structure):
+ _fields_ = [('hProcess', _HANDLE),
+ ('hThread', _HANDLE),
+ ('dwProcessId', _DWORD),
+ ('dwThreadId', _DWORD)]
+
+_DETACHED_PROCESS = 0x00000008
+_STARTF_USESHOWWINDOW = 0x00000001
+_SW_HIDE = 0
+
+class _COORD(ctypes.Structure):
+ _fields_ = [('X', ctypes.c_short),
+ ('Y', ctypes.c_short)]
+
+class _SMALL_RECT(ctypes.Structure):
+ _fields_ = [('Left', ctypes.c_short),
+ ('Top', ctypes.c_short),
+ ('Right', ctypes.c_short),
+ ('Bottom', ctypes.c_short)]
+
+class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
+ _fields_ = [('dwSize', _COORD),
+ ('dwCursorPosition', _COORD),
+ ('wAttributes', _WORD),
+ ('srWindow', _SMALL_RECT),
+ ('dwMaximumWindowSize', _COORD)]
+
+_STD_ERROR_HANDLE = _DWORD(-12).value
+
+# types of parameters of C functions used (required by pypy)
+
+_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p,
+ _DWORD, _DWORD, _HANDLE]
+_kernel32.CreateFileA.restype = _HANDLE
+
+_kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p]
+_kernel32.GetFileInformationByHandle.restype = _BOOL
+
+_kernel32.CloseHandle.argtypes = [_HANDLE]
+_kernel32.CloseHandle.restype = _BOOL
+
+try:
+ _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p]
+ _kernel32.CreateHardLinkA.restype = _BOOL
+except AttributeError:
+ pass
+
+_kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD]
+_kernel32.SetFileAttributesA.restype = _BOOL
+
+_kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD]
+_kernel32.OpenProcess.restype = _HANDLE
+
+_kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p]
+_kernel32.GetExitCodeProcess.restype = _BOOL
+
+_kernel32.GetLastError.argtypes = []
+_kernel32.GetLastError.restype = _DWORD
+
+_kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD]
+_kernel32.GetModuleFileNameA.restype = _DWORD
+
+_kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p,
+ ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p,
+ ctypes.c_void_p]
+_kernel32.CreateProcessA.restype = _BOOL
+
+_kernel32.ExitProcess.argtypes = [_UINT]
+_kernel32.ExitProcess.restype = None
+
+_kernel32.GetCurrentProcessId.argtypes = []
+_kernel32.GetCurrentProcessId.restype = _DWORD
+
+_SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD)
+_kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL]
+_kernel32.SetConsoleCtrlHandler.restype = _BOOL
+
+_kernel32.GetStdHandle.argtypes = [_DWORD]
+_kernel32.GetStdHandle.restype = _HANDLE
+
+_kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p]
+_kernel32.GetConsoleScreenBufferInfo.restype = _BOOL
+
+_advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD,
+ ctypes.c_void_p]
+_advapi32.RegOpenKeyExA.restype = _LONG
+
+_advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p,
+ ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
+_advapi32.RegQueryValueExA.restype = _LONG
+
+_advapi32.RegCloseKey.argtypes = [_HANDLE]
+_advapi32.RegCloseKey.restype = _LONG
+
+_advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
+_advapi32.GetUserNameA.restype = _BOOL
+
+_user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p]
+_user32.GetWindowThreadProcessId.restype = _DWORD
+
+_user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int]
+_user32.ShowWindow.restype = _BOOL
+
+_WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM)
+_user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM]
+_user32.EnumWindows.restype = _BOOL
+
+def _raiseoserror(name):
+ err = ctypes.WinError()
+ raise OSError(err.errno, '%s: %s' % (name, err.strerror))
+
+def _getfileinfo(name):
+ fh = _kernel32.CreateFileA(name, 0,
+ _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE,
+ None, _OPEN_EXISTING, 0, None)
+ if fh == _INVALID_HANDLE_VALUE:
+ _raiseoserror(name)
+ try:
+ fi = _BY_HANDLE_FILE_INFORMATION()
+ if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)):
+ _raiseoserror(name)
+ return fi
+ finally:
+ _kernel32.CloseHandle(fh)
+
+def oslink(src, dst):
+ try:
+ if not _kernel32.CreateHardLinkA(dst, src, None):
+ _raiseoserror(src)
+ except AttributeError: # Wine doesn't support this function
+ _raiseoserror(src)
+
+def nlinks(name):
+ '''return number of hardlinks for the given file'''
+ return _getfileinfo(name).nNumberOfLinks
+
+def samefile(fpath1, fpath2):
+ '''Returns whether fpath1 and fpath2 refer to the same file. This is only
+ guaranteed to work for files, not directories.'''
+ res1 = _getfileinfo(fpath1)
+ res2 = _getfileinfo(fpath2)
+ return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
+ and res1.nFileIndexHigh == res2.nFileIndexHigh
+ and res1.nFileIndexLow == res2.nFileIndexLow)
+
+def samedevice(fpath1, fpath2):
+ '''Returns whether fpath1 and fpath2 are on the same device. This is only
+ guaranteed to work for files, not directories.'''
+ res1 = _getfileinfo(fpath1)
+ res2 = _getfileinfo(fpath2)
+ return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber
+
+def testpid(pid):
+ '''return True if pid is still running or unable to
+ determine, False otherwise'''
+ h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid)
+ if h:
+ try:
+ status = _DWORD()
+ if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)):
+ return status.value == _STILL_ACTIVE
+ finally:
+ _kernel32.CloseHandle(h)
+ return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER
+
+def lookupreg(key, valname=None, scope=None):
+ ''' Look up a key/value name in the Windows registry.
+
+ valname: value name. If unspecified, the default value for the key
+ is used.
+ scope: optionally specify scope for registry lookup, this can be
+ a sequence of scopes to look up in order. Default (CURRENT_USER,
+ LOCAL_MACHINE).
+ '''
+ byref = ctypes.byref
+ if scope is None:
+ scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE)
+ elif not isinstance(scope, (list, tuple)):
+ scope = (scope,)
+ for s in scope:
+ kh = _HANDLE()
+ res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh))
+ if res != _ERROR_SUCCESS:
+ continue
+ try:
+ size = _DWORD(600)
+ type = _DWORD()
+ buf = ctypes.create_string_buffer(size.value + 1)
+ res = _advapi32.RegQueryValueExA(kh.value, valname, None,
+ byref(type), buf, byref(size))
+ if res != _ERROR_SUCCESS:
+ continue
+ if type.value == _REG_SZ:
+ # never let a Unicode string escape into the wild
+ return encoding.tolocal(buf.value.encode('UTF-8'))
+ elif type.value == _REG_DWORD:
+ fmt = '<L'
+ s = ctypes.string_at(byref(buf), struct.calcsize(fmt))
+ return struct.unpack(fmt, s)[0]
+ finally:
+ _advapi32.RegCloseKey(kh.value)
+
+def executablepath():
+ '''return full path of hg.exe'''
+ size = 600
+ buf = ctypes.create_string_buffer(size + 1)
+ len = _kernel32.GetModuleFileNameA(None, ctypes.byref(buf), size)
+ if len == 0:
+ raise ctypes.WinError()
+ elif len == size:
+ raise ctypes.WinError(_ERROR_INSUFFICIENT_BUFFER)
+ return buf.value
+
+def getuser():
+ '''return name of current user'''
+ size = _DWORD(300)
+ buf = ctypes.create_string_buffer(size.value + 1)
+ if not _advapi32.GetUserNameA(ctypes.byref(buf), ctypes.byref(size)):
+ raise ctypes.WinError()
+ return buf.value
+
+_signalhandler = []
+
+def setsignalhandler():
+ '''Register a termination handler for console events including
+ CTRL+C. python signal handlers do not work well with socket
+ operations.
+ '''
+ def handler(event):
+ _kernel32.ExitProcess(1)
+
+ if _signalhandler:
+ return # already registered
+ h = _SIGNAL_HANDLER(handler)
+ _signalhandler.append(h) # needed to prevent garbage collection
+ if not _kernel32.SetConsoleCtrlHandler(h, True):
+ raise ctypes.WinError()
+
+def hidewindow():
+
+ def callback(hwnd, pid):
+ wpid = _DWORD()
+ _user32.GetWindowThreadProcessId(hwnd, ctypes.byref(wpid))
+ if pid == wpid.value:
+ _user32.ShowWindow(hwnd, _SW_HIDE)
+ return False # stop enumerating windows
+ return True
+
+ pid = _kernel32.GetCurrentProcessId()
+ _user32.EnumWindows(_WNDENUMPROC(callback), pid)
+
+def termwidth():
+ # cmd.exe does not handle CR like a unix console, the CR is
+ # counted in the line length. On 80 columns consoles, if 80
+ # characters are written, the following CR won't apply on the
+ # current line but on the new one. Keep room for it.
+ width = 79
+ # Query stderr to avoid problems with redirections
+ screenbuf = _kernel32.GetStdHandle(
+ _STD_ERROR_HANDLE) # don't close the handle returned
+ if screenbuf is None or screenbuf == _INVALID_HANDLE_VALUE:
+ return width
+ csbi = _CONSOLE_SCREEN_BUFFER_INFO()
+ if not _kernel32.GetConsoleScreenBufferInfo(
+ screenbuf, ctypes.byref(csbi)):
+ return width
+ width = csbi.srWindow.Right - csbi.srWindow.Left
+ return width
+
+def spawndetached(args):
+ # No standard library function really spawns a fully detached
+ # process under win32 because they allocate pipes or other objects
+ # to handle standard streams communications. Passing these objects
+ # to the child process requires handle inheritance to be enabled
+ # which makes really detached processes impossible.
+ si = _STARTUPINFO()
+ si.cb = ctypes.sizeof(_STARTUPINFO)
+ si.dwFlags = _STARTF_USESHOWWINDOW
+ si.wShowWindow = _SW_HIDE
+
+ pi = _PROCESS_INFORMATION()
+
+ env = ''
+ for k in os.environ:
+ env += "%s=%s\0" % (k, os.environ[k])
+ if not env:
+ env = '\0'
+ env += '\0'
+
+ args = subprocess.list2cmdline(args)
+ # Not running the command in shell mode makes python26 hang when
+ # writing to hgweb output socket.
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+
+ res = _kernel32.CreateProcessA(
+ None, args, None, None, False, _DETACHED_PROCESS,
+ env, os.getcwd(), ctypes.byref(si), ctypes.byref(pi))
+ if not res:
+ raise ctypes.WinError()
+
+ return pi.dwProcessId
+
+def unlink(f):
+ '''try to implement POSIX' unlink semantics on Windows'''
+
+ # POSIX allows to unlink and rename open files. Windows has serious
+ # problems with doing that:
+ # - Calling os.unlink (or os.rename) on a file f fails if f or any
+ # hardlinked copy of f has been opened with Python's open(). There is no
+ # way such a file can be deleted or renamed on Windows (other than
+ # scheduling the delete or rename for the next reboot).
+ # - Calling os.unlink on a file that has been opened with Mercurial's
+ # posixfile (or comparable methods) will delay the actual deletion of
+ # the file for as long as the file is held open. The filename is blocked
+ # during that time and cannot be used for recreating a new file under
+ # that same name ("zombie file"). Directories containing such zombie files
+ # cannot be removed or moved.
+ # A file that has been opened with posixfile can be renamed, so we rename
+ # f to a random temporary name before calling os.unlink on it. This allows
+ # callers to recreate f immediately while having other readers do their
+ # implicit zombie filename blocking on a temporary name.
+
+ for tries in xrange(10):
+ temp = '%s-%08x' % (f, random.randint(0, 0xffffffff))
+ try:
+ os.rename(f, temp) # raises OSError EEXIST if temp exists
+ break
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ else:
+ raise IOError, (errno.EEXIST, "No usable temporary filename found")
+
+ try:
+ os.unlink(temp)
+ except OSError:
+ # The unlink might have failed because the READONLY attribute may heave
+ # been set on the original file. Rename works fine with READONLY set,
+ # but not os.unlink. Reset all attributes and try again.
+ _kernel32.SetFileAttributesA(temp, _FILE_ATTRIBUTE_NORMAL)
+ try:
+ os.unlink(temp)
+ except OSError:
+ # The unlink might have failed due to some very rude AV-Scanners.
+ # Leaking a tempfile is the lesser evil than aborting here and
+ # leaving some potentially serious inconsistencies.
+ pass
+
+def makedir(path, notindexed):
+ os.mkdir(path)
+ if notindexed:
+ _kernel32.SetFileAttributesA(path, _FILE_ATTRIBUTE_NOT_CONTENT_INDEXED)
diff --git a/websdk/mercurial/windows.py b/websdk/mercurial/windows.py
index 73c48ea..a5270db 120000..100644
--- a/websdk/mercurial/windows.py
+++ b/websdk/mercurial/windows.py
@@ -1 +1,316 @@
-/usr/share/pyshared/mercurial/windows.py \ No newline at end of file
+# windows.py - Windows utility function implementations for Mercurial
+#
+# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+from i18n import _
+import osutil
+import errno, msvcrt, os, re, sys
+
+import win32
+executablepath = win32.executablepath
+getuser = win32.getuser
+hidewindow = win32.hidewindow
+lookupreg = win32.lookupreg
+makedir = win32.makedir
+nlinks = win32.nlinks
+oslink = win32.oslink
+samedevice = win32.samedevice
+samefile = win32.samefile
+setsignalhandler = win32.setsignalhandler
+spawndetached = win32.spawndetached
+termwidth = win32.termwidth
+testpid = win32.testpid
+unlink = win32.unlink
+
+nulldev = 'NUL:'
+umask = 002
+
+# wrap osutil.posixfile to provide friendlier exceptions
+def posixfile(name, mode='r', buffering=-1):
+ try:
+ return osutil.posixfile(name, mode, buffering)
+ except WindowsError, err:
+ raise IOError(err.errno, '%s: %s' % (name, err.strerror))
+posixfile.__doc__ = osutil.posixfile.__doc__
+
+class winstdout(object):
+ '''stdout on windows misbehaves if sent through a pipe'''
+
+ def __init__(self, fp):
+ self.fp = fp
+
+ def __getattr__(self, key):
+ return getattr(self.fp, key)
+
+ def close(self):
+ try:
+ self.fp.close()
+ except IOError:
+ pass
+
+ def write(self, s):
+ try:
+ # This is workaround for "Not enough space" error on
+ # writing large size of data to console.
+ limit = 16000
+ l = len(s)
+ start = 0
+ self.softspace = 0
+ while start < l:
+ end = start + limit
+ self.fp.write(s[start:end])
+ start = end
+ except IOError, inst:
+ if inst.errno != 0:
+ raise
+ self.close()
+ raise IOError(errno.EPIPE, 'Broken pipe')
+
+ def flush(self):
+ try:
+ return self.fp.flush()
+ except IOError, inst:
+ if inst.errno != errno.EINVAL:
+ raise
+ self.close()
+ raise IOError(errno.EPIPE, 'Broken pipe')
+
+sys.__stdout__ = sys.stdout = winstdout(sys.stdout)
+
+def _is_win_9x():
+ '''return true if run on windows 95, 98 or me.'''
+ try:
+ return sys.getwindowsversion()[3] == 1
+ except AttributeError:
+ return 'command' in os.environ.get('comspec', '')
+
+def openhardlinks():
+ return not _is_win_9x()
+
+def parsepatchoutput(output_line):
+ """parses the output produced by patch and returns the filename"""
+ pf = output_line[14:]
+ if pf[0] == '`':
+ pf = pf[1:-1] # Remove the quotes
+ return pf
+
+def sshargs(sshcmd, host, user, port):
+ '''Build argument list for ssh or Plink'''
+ pflag = 'plink' in sshcmd.lower() and '-P' or '-p'
+ args = user and ("%s@%s" % (user, host)) or host
+ return port and ("%s %s %s" % (args, pflag, port)) or args
+
+def setflags(f, l, x):
+ pass
+
+def copymode(src, dst, mode=None):
+ pass
+
+def checkexec(path):
+ return False
+
+def checklink(path):
+ return False
+
+def setbinary(fd):
+ # When run without console, pipes may expose invalid
+ # fileno(), usually set to -1.
+ fno = getattr(fd, 'fileno', None)
+ if fno is not None and fno() >= 0:
+ msvcrt.setmode(fno(), os.O_BINARY)
+
+def pconvert(path):
+ return '/'.join(path.split(os.sep))
+
+def localpath(path):
+ return path.replace('/', '\\')
+
+def normpath(path):
+ return pconvert(os.path.normpath(path))
+
+normcase = os.path.normcase
+
+def realpath(path):
+ '''
+ Returns the true, canonical file system path equivalent to the given
+ path.
+ '''
+ # TODO: There may be a more clever way to do this that also handles other,
+ # less common file systems.
+ return os.path.normpath(os.path.normcase(os.path.realpath(path)))
+
+def samestat(s1, s2):
+ return False
+
+# A sequence of backslashes is special iff it precedes a double quote:
+# - if there's an even number of backslashes, the double quote is not
+# quoted (i.e. it ends the quoted region)
+# - if there's an odd number of backslashes, the double quote is quoted
+# - in both cases, every pair of backslashes is unquoted into a single
+# backslash
+# (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
+# So, to quote a string, we must surround it in double quotes, double
+# the number of backslashes that preceed double quotes and add another
+# backslash before every double quote (being careful with the double
+# quote we've appended to the end)
+_quotere = None
+def shellquote(s):
+ global _quotere
+ if _quotere is None:
+ _quotere = re.compile(r'(\\*)("|\\$)')
+ return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
+
+def quotecommand(cmd):
+ """Build a command string suitable for os.popen* calls."""
+ if sys.version_info < (2, 7, 1):
+ # Python versions since 2.7.1 do this extra quoting themselves
+ return '"' + cmd + '"'
+ return cmd
+
+def popen(command, mode='r'):
+ # Work around "popen spawned process may not write to stdout
+ # under windows"
+ # http://bugs.python.org/issue1366
+ command += " 2> %s" % nulldev
+ return os.popen(quotecommand(command), mode)
+
+def explainexit(code):
+ return _("exited with status %d") % code, code
+
+# if you change this stub into a real check, please try to implement the
+# username and groupname functions above, too.
+def isowner(st):
+ return True
+
+def findexe(command):
+ '''Find executable for command searching like cmd.exe does.
+ If command is a basename then PATH is searched for command.
+ PATH isn't searched if command is an absolute or relative path.
+ An extension from PATHEXT is found and added if not present.
+ If command isn't found None is returned.'''
+ pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
+ pathexts = [ext for ext in pathext.lower().split(os.pathsep)]
+ if os.path.splitext(command)[1].lower() in pathexts:
+ pathexts = ['']
+
+ def findexisting(pathcommand):
+ 'Will append extension (if needed) and return existing file'
+ for ext in pathexts:
+ executable = pathcommand + ext
+ if os.path.exists(executable):
+ return executable
+ return None
+
+ if os.sep in command:
+ return findexisting(command)
+
+ for path in os.environ.get('PATH', '').split(os.pathsep):
+ executable = findexisting(os.path.join(path, command))
+ if executable is not None:
+ return executable
+ return findexisting(os.path.expanduser(os.path.expandvars(command)))
+
+def statfiles(files):
+ '''Stat each file in files and yield stat or None if file does not exist.
+ Cluster and cache stat per directory to minimize number of OS stat calls.'''
+ ncase = os.path.normcase
+ dircache = {} # dirname -> filename -> status | None if file does not exist
+ for nf in files:
+ nf = ncase(nf)
+ dir, base = os.path.split(nf)
+ if not dir:
+ dir = '.'
+ cache = dircache.get(dir, None)
+ if cache is None:
+ try:
+ dmap = dict([(ncase(n), s)
+ for n, k, s in osutil.listdir(dir, True)])
+ except OSError, err:
+ # handle directory not found in Python version prior to 2.5
+ # Python <= 2.4 returns native Windows code 3 in errno
+ # Python >= 2.5 returns ENOENT and adds winerror field
+ # EINVAL is raised if dir is not a directory.
+ if err.errno not in (3, errno.ENOENT, errno.EINVAL,
+ errno.ENOTDIR):
+ raise
+ dmap = {}
+ cache = dircache.setdefault(dir, dmap)
+ yield cache.get(base, None)
+
+def username(uid=None):
+ """Return the name of the user with the given uid.
+
+ If uid is None, return the name of the current user."""
+ return None
+
+def groupname(gid=None):
+ """Return the name of the group with the given gid.
+
+ If gid is None, return the name of the current group."""
+ return None
+
+def _removedirs(name):
+ """special version of os.removedirs that does not remove symlinked
+ directories or junction points if they actually contain files"""
+ if osutil.listdir(name):
+ return
+ os.rmdir(name)
+ head, tail = os.path.split(name)
+ if not tail:
+ head, tail = os.path.split(head)
+ while head and tail:
+ try:
+ if osutil.listdir(head):
+ return
+ os.rmdir(head)
+ except (ValueError, OSError):
+ break
+ head, tail = os.path.split(head)
+
+def unlinkpath(f):
+ """unlink and remove the directory if it is empty"""
+ unlink(f)
+ # try removing directories that might now be empty
+ try:
+ _removedirs(os.path.dirname(f))
+ except OSError:
+ pass
+
+def rename(src, dst):
+ '''atomically rename file src to dst, replacing dst if it exists'''
+ try:
+ os.rename(src, dst)
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+ unlink(dst)
+ os.rename(src, dst)
+
+def gethgcmd():
+ return [sys.executable] + sys.argv[:1]
+
+def termwidth():
+ # cmd.exe does not handle CR like a unix console, the CR is
+ # counted in the line length. On 80 columns consoles, if 80
+ # characters are written, the following CR won't apply on the
+ # current line but on the new one. Keep room for it.
+ return 79
+
+def groupmembers(name):
+ # Don't support groups on Windows for now
+ raise KeyError()
+
+def isexec(f):
+ return False
+
+class cachestat(object):
+ def __init__(self, path):
+ pass
+
+ def cacheable(self):
+ return False
+
+expandglobs = True
diff --git a/websdk/mercurial/wireproto.py b/websdk/mercurial/wireproto.py
new file mode 100644
index 0000000..d189004
--- /dev/null
+++ b/websdk/mercurial/wireproto.py
@@ -0,0 +1,607 @@
+# wireproto.py - generic wire protocol support functions
+#
+# Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
+#
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+import urllib, tempfile, os, sys
+from i18n import _
+from node import bin, hex
+import changegroup as changegroupmod
+import repo, error, encoding, util, store
+
+# abstract batching support
+
+class future(object):
+ '''placeholder for a value to be set later'''
+ def set(self, value):
+ if util.safehasattr(self, 'value'):
+ raise error.RepoError("future is already set")
+ self.value = value
+
+class batcher(object):
+ '''base class for batches of commands submittable in a single request
+
+ All methods invoked on instances of this class are simply queued and return a
+ a future for the result. Once you call submit(), all the queued calls are
+ performed and the results set in their respective futures.
+ '''
+ def __init__(self):
+ self.calls = []
+ def __getattr__(self, name):
+ def call(*args, **opts):
+ resref = future()
+ self.calls.append((name, args, opts, resref,))
+ return resref
+ return call
+ def submit(self):
+ pass
+
+class localbatch(batcher):
+ '''performs the queued calls directly'''
+ def __init__(self, local):
+ batcher.__init__(self)
+ self.local = local
+ def submit(self):
+ for name, args, opts, resref in self.calls:
+ resref.set(getattr(self.local, name)(*args, **opts))
+
+class remotebatch(batcher):
+ '''batches the queued calls; uses as few roundtrips as possible'''
+ def __init__(self, remote):
+ '''remote must support _submitbatch(encbatch) and _submitone(op, encargs)'''
+ batcher.__init__(self)
+ self.remote = remote
+ def submit(self):
+ req, rsp = [], []
+ for name, args, opts, resref in self.calls:
+ mtd = getattr(self.remote, name)
+ batchablefn = getattr(mtd, 'batchable', None)
+ if batchablefn is not None:
+ batchable = batchablefn(mtd.im_self, *args, **opts)
+ encargsorres, encresref = batchable.next()
+ if encresref:
+ req.append((name, encargsorres,))
+ rsp.append((batchable, encresref, resref,))
+ else:
+ resref.set(encargsorres)
+ else:
+ if req:
+ self._submitreq(req, rsp)
+ req, rsp = [], []
+ resref.set(mtd(*args, **opts))
+ if req:
+ self._submitreq(req, rsp)
+ def _submitreq(self, req, rsp):
+ encresults = self.remote._submitbatch(req)
+ for encres, r in zip(encresults, rsp):
+ batchable, encresref, resref = r
+ encresref.set(encres)
+ resref.set(batchable.next())
+
+def batchable(f):
+ '''annotation for batchable methods
+
+ Such methods must implement a coroutine as follows:
+
+ @batchable
+ def sample(self, one, two=None):
+ # Handle locally computable results first:
+ if not one:
+ yield "a local result", None
+ # Build list of encoded arguments suitable for your wire protocol:
+ encargs = [('one', encode(one),), ('two', encode(two),)]
+ # Create future for injection of encoded result:
+ encresref = future()
+ # Return encoded arguments and future:
+ yield encargs, encresref
+ # Assuming the future to be filled with the result from the batched request
+ # now. Decode it:
+ yield decode(encresref.value)
+
+ The decorator returns a function which wraps this coroutine as a plain method,
+ but adds the original method as an attribute called "batchable", which is
+ used by remotebatch to split the call into separate encoding and decoding
+ phases.
+ '''
+ def plain(*args, **opts):
+ batchable = f(*args, **opts)
+ encargsorres, encresref = batchable.next()
+ if not encresref:
+ return encargsorres # a local result in this case
+ self = args[0]
+ encresref.set(self._submitone(f.func_name, encargsorres))
+ return batchable.next()
+ setattr(plain, 'batchable', f)
+ return plain
+
+# list of nodes encoding / decoding
+
+def decodelist(l, sep=' '):
+ if l:
+ return map(bin, l.split(sep))
+ return []
+
+def encodelist(l, sep=' '):
+ return sep.join(map(hex, l))
+
+# batched call argument encoding
+
+def escapearg(plain):
+ return (plain
+ .replace(':', '::')
+ .replace(',', ':,')
+ .replace(';', ':;')
+ .replace('=', ':='))
+
+def unescapearg(escaped):
+ return (escaped
+ .replace(':=', '=')
+ .replace(':;', ';')
+ .replace(':,', ',')
+ .replace('::', ':'))
+
+# client side
+
+def todict(**args):
+ return args
+
+class wirerepository(repo.repository):
+
+ def batch(self):
+ return remotebatch(self)
+ def _submitbatch(self, req):
+ cmds = []
+ for op, argsdict in req:
+ args = ','.join('%s=%s' % p for p in argsdict.iteritems())
+ cmds.append('%s %s' % (op, args))
+ rsp = self._call("batch", cmds=';'.join(cmds))
+ return rsp.split(';')
+ def _submitone(self, op, args):
+ return self._call(op, **args)
+
+ @batchable
+ def lookup(self, key):
+ self.requirecap('lookup', _('look up remote revision'))
+ f = future()
+ yield todict(key=encoding.fromlocal(key)), f
+ d = f.value
+ success, data = d[:-1].split(" ", 1)
+ if int(success):
+ yield bin(data)
+ self._abort(error.RepoError(data))
+
+ @batchable
+ def heads(self):
+ f = future()
+ yield {}, f
+ d = f.value
+ try:
+ yield decodelist(d[:-1])
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ @batchable
+ def known(self, nodes):
+ f = future()
+ yield todict(nodes=encodelist(nodes)), f
+ d = f.value
+ try:
+ yield [bool(int(f)) for f in d]
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ @batchable
+ def branchmap(self):
+ f = future()
+ yield {}, f
+ d = f.value
+ try:
+ branchmap = {}
+ for branchpart in d.splitlines():
+ branchname, branchheads = branchpart.split(' ', 1)
+ branchname = encoding.tolocal(urllib.unquote(branchname))
+ branchheads = decodelist(branchheads)
+ branchmap[branchname] = branchheads
+ yield branchmap
+ except TypeError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def branches(self, nodes):
+ n = encodelist(nodes)
+ d = self._call("branches", nodes=n)
+ try:
+ br = [tuple(decodelist(b)) for b in d.splitlines()]
+ return br
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+
+ def between(self, pairs):
+ batch = 8 # avoid giant requests
+ r = []
+ for i in xrange(0, len(pairs), batch):
+ n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]])
+ d = self._call("between", pairs=n)
+ try:
+ r.extend(l and decodelist(l) or [] for l in d.splitlines())
+ except ValueError:
+ self._abort(error.ResponseError(_("unexpected response:"), d))
+ return r
+
+ @batchable
+ def pushkey(self, namespace, key, old, new):
+ if not self.capable('pushkey'):
+ yield False, None
+ f = future()
+ yield todict(namespace=encoding.fromlocal(namespace),
+ key=encoding.fromlocal(key),
+ old=encoding.fromlocal(old),
+ new=encoding.fromlocal(new)), f
+ d = f.value
+ try:
+ d = bool(int(d))
+ except ValueError:
+ raise error.ResponseError(
+ _('push failed (unexpected response):'), d)
+ yield d
+
+ @batchable
+ def listkeys(self, namespace):
+ if not self.capable('pushkey'):
+ yield {}, None
+ f = future()
+ yield todict(namespace=encoding.fromlocal(namespace)), f
+ d = f.value
+ r = {}
+ for l in d.splitlines():
+ k, v = l.split('\t')
+ r[encoding.tolocal(k)] = encoding.tolocal(v)
+ yield r
+
+ def stream_out(self):
+ return self._callstream('stream_out')
+
+ def changegroup(self, nodes, kind):
+ n = encodelist(nodes)
+ f = self._callstream("changegroup", roots=n)
+ return changegroupmod.unbundle10(self._decompress(f), 'UN')
+
+ def changegroupsubset(self, bases, heads, kind):
+ self.requirecap('changegroupsubset', _('look up remote changes'))
+ bases = encodelist(bases)
+ heads = encodelist(heads)
+ f = self._callstream("changegroupsubset",
+ bases=bases, heads=heads)
+ return changegroupmod.unbundle10(self._decompress(f), 'UN')
+
+ def getbundle(self, source, heads=None, common=None):
+ self.requirecap('getbundle', _('look up remote changes'))
+ opts = {}
+ if heads is not None:
+ opts['heads'] = encodelist(heads)
+ if common is not None:
+ opts['common'] = encodelist(common)
+ f = self._callstream("getbundle", **opts)
+ return changegroupmod.unbundle10(self._decompress(f), 'UN')
+
+ def unbundle(self, cg, heads, source):
+ '''Send cg (a readable file-like object representing the
+ changegroup to push, typically a chunkbuffer object) to the
+ remote server as a bundle. Return an integer indicating the
+ result of the push (see localrepository.addchangegroup()).'''
+
+ if heads != ['force'] and self.capable('unbundlehash'):
+ heads = encodelist(['hashed',
+ util.sha1(''.join(sorted(heads))).digest()])
+ else:
+ heads = encodelist(heads)
+
+ ret, output = self._callpush("unbundle", cg, heads=heads)
+ if ret == "":
+ raise error.ResponseError(
+ _('push failed:'), output)
+ try:
+ ret = int(ret)
+ except ValueError:
+ raise error.ResponseError(
+ _('push failed (unexpected response):'), ret)
+
+ for l in output.splitlines(True):
+ self.ui.status(_('remote: '), l)
+ return ret
+
+ def debugwireargs(self, one, two, three=None, four=None, five=None):
+ # don't pass optional arguments left at their default value
+ opts = {}
+ if three is not None:
+ opts['three'] = three
+ if four is not None:
+ opts['four'] = four
+ return self._call('debugwireargs', one=one, two=two, **opts)
+
+# server side
+
+class streamres(object):
+ def __init__(self, gen):
+ self.gen = gen
+
+class pushres(object):
+ def __init__(self, res):
+ self.res = res
+
+class pusherr(object):
+ def __init__(self, res):
+ self.res = res
+
+class ooberror(object):
+ def __init__(self, message):
+ self.message = message
+
+def dispatch(repo, proto, command):
+ func, spec = commands[command]
+ args = proto.getargs(spec)
+ return func(repo, proto, *args)
+
+def options(cmd, keys, others):
+ opts = {}
+ for k in keys:
+ if k in others:
+ opts[k] = others[k]
+ del others[k]
+ if others:
+ sys.stderr.write("abort: %s got unexpected arguments %s\n"
+ % (cmd, ",".join(others)))
+ return opts
+
+def batch(repo, proto, cmds, others):
+ res = []
+ for pair in cmds.split(';'):
+ op, args = pair.split(' ', 1)
+ vals = {}
+ for a in args.split(','):
+ if a:
+ n, v = a.split('=')
+ vals[n] = unescapearg(v)
+ func, spec = commands[op]
+ if spec:
+ keys = spec.split()
+ data = {}
+ for k in keys:
+ if k == '*':
+ star = {}
+ for key in vals.keys():
+ if key not in keys:
+ star[key] = vals[key]
+ data['*'] = star
+ else:
+ data[k] = vals[k]
+ result = func(repo, proto, *[data[k] for k in keys])
+ else:
+ result = func(repo, proto)
+ if isinstance(result, ooberror):
+ return result
+ res.append(escapearg(result))
+ return ';'.join(res)
+
+def between(repo, proto, pairs):
+ pairs = [decodelist(p, '-') for p in pairs.split(" ")]
+ r = []
+ for b in repo.between(pairs):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def branchmap(repo, proto):
+ branchmap = repo.branchmap()
+ heads = []
+ for branch, nodes in branchmap.iteritems():
+ branchname = urllib.quote(encoding.fromlocal(branch))
+ branchnodes = encodelist(nodes)
+ heads.append('%s %s' % (branchname, branchnodes))
+ return '\n'.join(heads)
+
+def branches(repo, proto, nodes):
+ nodes = decodelist(nodes)
+ r = []
+ for b in repo.branches(nodes):
+ r.append(encodelist(b) + "\n")
+ return "".join(r)
+
+def capabilities(repo, proto):
+ caps = ('lookup changegroupsubset branchmap pushkey known getbundle '
+ 'unbundlehash batch').split()
+ if _allowstream(repo.ui):
+ requiredformats = repo.requirements & repo.supportedformats
+ # if our local revlogs are just revlogv1, add 'stream' cap
+ if not requiredformats - set(('revlogv1',)):
+ caps.append('stream')
+ # otherwise, add 'streamreqs' detailing our local revlog format
+ else:
+ caps.append('streamreqs=%s' % ','.join(requiredformats))
+ caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority))
+ caps.append('httpheader=1024')
+ return ' '.join(caps)
+
+def changegroup(repo, proto, roots):
+ nodes = decodelist(roots)
+ cg = repo.changegroup(nodes, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def changegroupsubset(repo, proto, bases, heads):
+ bases = decodelist(bases)
+ heads = decodelist(heads)
+ cg = repo.changegroupsubset(bases, heads, 'serve')
+ return streamres(proto.groupchunks(cg))
+
+def debugwireargs(repo, proto, one, two, others):
+ # only accept optional args from the known set
+ opts = options('debugwireargs', ['three', 'four'], others)
+ return repo.debugwireargs(one, two, **opts)
+
+def getbundle(repo, proto, others):
+ opts = options('getbundle', ['heads', 'common'], others)
+ for k, v in opts.iteritems():
+ opts[k] = decodelist(v)
+ cg = repo.getbundle('serve', **opts)
+ return streamres(proto.groupchunks(cg))
+
+def heads(repo, proto):
+ h = repo.heads()
+ return encodelist(h) + "\n"
+
+def hello(repo, proto):
+ '''the hello command returns a set of lines describing various
+ interesting things about the server, in an RFC822-like format.
+ Currently the only one defined is "capabilities", which
+ consists of a line in the form:
+
+ capabilities: space separated list of tokens
+ '''
+ return "capabilities: %s\n" % (capabilities(repo, proto))
+
+def listkeys(repo, proto, namespace):
+ d = repo.listkeys(encoding.tolocal(namespace)).items()
+ t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v))
+ for k, v in d])
+ return t
+
+def lookup(repo, proto, key):
+ try:
+ r = hex(repo.lookup(encoding.tolocal(key)))
+ success = 1
+ except Exception, inst:
+ r = str(inst)
+ success = 0
+ return "%s %s\n" % (success, r)
+
+def known(repo, proto, nodes, others):
+ return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes)))
+
+def pushkey(repo, proto, namespace, key, old, new):
+ # compatibility with pre-1.8 clients which were accidentally
+ # sending raw binary nodes rather than utf-8-encoded hex
+ if len(new) == 20 and new.encode('string-escape') != new:
+ # looks like it could be a binary node
+ try:
+ new.decode('utf-8')
+ new = encoding.tolocal(new) # but cleanly decodes as UTF-8
+ except UnicodeDecodeError:
+ pass # binary, leave unmodified
+ else:
+ new = encoding.tolocal(new) # normal path
+
+ r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key),
+ encoding.tolocal(old), new)
+ return '%s\n' % int(r)
+
+def _allowstream(ui):
+ return ui.configbool('server', 'uncompressed', True, untrusted=True)
+
+def stream(repo, proto):
+ '''If the server supports streaming clone, it advertises the "stream"
+ capability with a value representing the version and flags of the repo
+ it is serving. Client checks to see if it understands the format.
+
+ The format is simple: the server writes out a line with the amount
+ of files, then the total amount of bytes to be transfered (separated
+ by a space). Then, for each file, the server first writes the filename
+ and filesize (separated by the null character), then the file contents.
+ '''
+
+ if not _allowstream(repo.ui):
+ return '1\n'
+
+ entries = []
+ total_bytes = 0
+ try:
+ # get consistent snapshot of repo, lock during scan
+ lock = repo.lock()
+ try:
+ repo.ui.debug('scanning\n')
+ for name, ename, size in repo.store.walk():
+ entries.append((name, size))
+ total_bytes += size
+ finally:
+ lock.release()
+ except error.LockError:
+ return '2\n' # error: 2
+
+ def streamer(repo, entries, total):
+ '''stream out all metadata files in repository.'''
+ yield '0\n' # success
+ repo.ui.debug('%d files, %d bytes to transfer\n' %
+ (len(entries), total_bytes))
+ yield '%d %d\n' % (len(entries), total_bytes)
+ for name, size in entries:
+ repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
+ # partially encode name over the wire for backwards compat
+ yield '%s\0%d\n' % (store.encodedir(name), size)
+ for chunk in util.filechunkiter(repo.sopener(name), limit=size):
+ yield chunk
+
+ return streamres(streamer(repo, entries, total_bytes))
+
+def unbundle(repo, proto, heads):
+ their_heads = decodelist(heads)
+
+ def check_heads():
+ heads = repo.heads()
+ heads_hash = util.sha1(''.join(sorted(heads))).digest()
+ return (their_heads == ['force'] or their_heads == heads or
+ their_heads == ['hashed', heads_hash])
+
+ proto.redirect()
+
+ # fail early if possible
+ if not check_heads():
+ return pusherr('unsynced changes')
+
+ # write bundle data to temporary file because it can be big
+ fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-')
+ fp = os.fdopen(fd, 'wb+')
+ r = 0
+ try:
+ proto.getfile(fp)
+ lock = repo.lock()
+ try:
+ if not check_heads():
+ # someone else committed/pushed/unbundled while we
+ # were transferring data
+ return pusherr('unsynced changes')
+
+ # push can proceed
+ fp.seek(0)
+ gen = changegroupmod.readbundle(fp, None)
+
+ try:
+ r = repo.addchangegroup(gen, 'serve', proto._client(),
+ lock=lock)
+ except util.Abort, inst:
+ sys.stderr.write("abort: %s\n" % inst)
+ finally:
+ lock.release()
+ return pushres(r)
+
+ finally:
+ fp.close()
+ os.unlink(tempname)
+
+commands = {
+ 'batch': (batch, 'cmds *'),
+ 'between': (between, 'pairs'),
+ 'branchmap': (branchmap, ''),
+ 'branches': (branches, 'nodes'),
+ 'capabilities': (capabilities, ''),
+ 'changegroup': (changegroup, 'roots'),
+ 'changegroupsubset': (changegroupsubset, 'bases heads'),
+ 'debugwireargs': (debugwireargs, 'one two *'),
+ 'getbundle': (getbundle, '*'),
+ 'heads': (heads, ''),
+ 'hello': (hello, ''),
+ 'known': (known, 'nodes *'),
+ 'listkeys': (listkeys, 'namespace'),
+ 'lookup': (lookup, 'key'),
+ 'pushkey': (pushkey, 'namespace key old new'),
+ 'stream_out': (stream, ''),
+ 'unbundle': (unbundle, 'heads'),
+}