From 39e7a3959f9bc95bc191ea1ac1ea06db63bf592a Mon Sep 17 00:00:00 2001 From: Sebastian Silva Date: Fri, 02 Dec 2011 20:57:56 +0000 Subject: change mercurial version --- diff --git a/websdk/mercurial/__version__.py b/websdk/mercurial/__version__.py index 02cef30..bc32403 120000..100644 --- a/websdk/mercurial/__version__.py +++ b/websdk/mercurial/__version__.py @@ -1 +1,2 @@ -/usr/share/pyshared/mercurial/__version__.py \ No newline at end of file +# this file is autogenerated by setup.py +version = "2.0.1" diff --git a/websdk/mercurial/ancestor.py b/websdk/mercurial/ancestor.py index 5be3ec0..867d683 120000..100644 --- a/websdk/mercurial/ancestor.py +++ b/websdk/mercurial/ancestor.py @@ -1 +1,91 @@ -/usr/share/pyshared/mercurial/ancestor.py \ No newline at end of file +# ancestor.py - generic DAG ancestor algorithm for mercurial +# +# Copyright 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import heapq + +def ancestor(a, b, pfunc): + """ + Returns the common ancestor of a and b that is furthest from a + root (as measured by longest path) or None if no ancestor is + found. If there are multiple common ancestors at the same + distance, the first one found is returned. + + pfunc must return a list of parent vertices for a given vertex + """ + + if a == b: + return a + + a, b = sorted([a, b]) + + # find depth from root of all ancestors + # depth is stored as a negative for heapq + parentcache = {} + visit = [a, b] + depth = {} + while visit: + vertex = visit[-1] + pl = pfunc(vertex) + parentcache[vertex] = pl + if not pl: + depth[vertex] = 0 + visit.pop() + else: + for p in pl: + if p == a or p == b: # did we find a or b as a parent? + return p # we're done + if p not in depth: + visit.append(p) + if visit[-1] == vertex: + # -(maximum distance of parents + 1) + depth[vertex] = min([depth[p] for p in pl]) - 1 + visit.pop() + + # traverse ancestors in order of decreasing distance from root + def ancestors(vertex): + h = [(depth[vertex], vertex)] + seen = set() + while h: + d, n = heapq.heappop(h) + if n not in seen: + seen.add(n) + yield (d, n) + for p in parentcache[n]: + heapq.heappush(h, (depth[p], p)) + + def generations(vertex): + sg, s = None, set() + for g, v in ancestors(vertex): + if g != sg: + if sg: + yield sg, s + sg, s = g, set((v,)) + else: + s.add(v) + yield sg, s + + x = generations(a) + y = generations(b) + gx = x.next() + gy = y.next() + + # increment each ancestor list until it is closer to root than + # the other, or they match + try: + while True: + if gx[0] == gy[0]: + for v in gx[1]: + if v in gy[1]: + return v + gy = y.next() + gx = x.next() + elif gx[0] > gy[0]: + gy = y.next() + else: + gx = x.next() + except StopIteration: + return None diff --git a/websdk/mercurial/archival.py b/websdk/mercurial/archival.py index d99f47f..a7c271d 120000..100644 --- a/websdk/mercurial/archival.py +++ b/websdk/mercurial/archival.py @@ -1 +1,284 @@ -/usr/share/pyshared/mercurial/archival.py \ No newline at end of file +# archival.py - revision archival for mercurial +# +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from node import hex +import cmdutil +import scmutil, util, encoding +import cStringIO, os, tarfile, time, zipfile +import zlib, gzip + +def tidyprefix(dest, kind, prefix): + '''choose prefix to use for names in archive. make sure prefix is + safe for consumers.''' + + if prefix: + prefix = util.normpath(prefix) + else: + if not isinstance(dest, str): + raise ValueError('dest must be string if no prefix') + prefix = os.path.basename(dest) + lower = prefix.lower() + for sfx in exts.get(kind, []): + if lower.endswith(sfx): + prefix = prefix[:-len(sfx)] + break + lpfx = os.path.normpath(util.localpath(prefix)) + prefix = util.pconvert(lpfx) + if not prefix.endswith('/'): + prefix += '/' + if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: + raise util.Abort(_('archive prefix contains illegal components')) + return prefix + +exts = { + 'tar': ['.tar'], + 'tbz2': ['.tbz2', '.tar.bz2'], + 'tgz': ['.tgz', '.tar.gz'], + 'zip': ['.zip'], + } + +def guesskind(dest): + for kind, extensions in exts.iteritems(): + if util.any(dest.endswith(ext) for ext in extensions): + return kind + return None + + +class tarit(object): + '''write archive to tar file or stream. can write uncompressed, + or compress with gzip or bzip2.''' + + class GzipFileWithTime(gzip.GzipFile): + + def __init__(self, *args, **kw): + timestamp = None + if 'timestamp' in kw: + timestamp = kw.pop('timestamp') + if timestamp is None: + self.timestamp = time.time() + else: + self.timestamp = timestamp + gzip.GzipFile.__init__(self, *args, **kw) + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + # Python 2.6 deprecates self.filename + fname = getattr(self, 'name', None) or self.filename + if fname and fname.endswith('.gz'): + fname = fname[:-3] + flags = 0 + if fname: + flags = gzip.FNAME + self.fileobj.write(chr(flags)) + gzip.write32u(self.fileobj, long(self.timestamp)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def __init__(self, dest, mtime, kind=''): + self.mtime = mtime + self.fileobj = None + + def taropen(name, mode, fileobj=None): + if kind == 'gz': + mode = mode[0] + if not fileobj: + fileobj = open(name, mode + 'b') + gzfileobj = self.GzipFileWithTime(name, mode + 'b', + zlib.Z_BEST_COMPRESSION, + fileobj, timestamp=mtime) + self.fileobj = gzfileobj + return tarfile.TarFile.taropen(name, mode, gzfileobj) + else: + self.fileobj = fileobj + return tarfile.open(name, mode + kind, fileobj) + + if isinstance(dest, str): + self.z = taropen(dest, mode='w:') + else: + # Python 2.5-2.5.1 have a regression that requires a name arg + self.z = taropen(name='', mode='w|', fileobj=dest) + + def addfile(self, name, mode, islink, data): + i = tarfile.TarInfo(name) + i.mtime = self.mtime + i.size = len(data) + if islink: + i.type = tarfile.SYMTYPE + i.mode = 0777 + i.linkname = data + data = None + i.size = 0 + else: + i.mode = mode + data = cStringIO.StringIO(data) + self.z.addfile(i, data) + + def done(self): + self.z.close() + if self.fileobj: + self.fileobj.close() + +class tellable(object): + '''provide tell method for zipfile.ZipFile when writing to http + response file object.''' + + def __init__(self, fp): + self.fp = fp + self.offset = 0 + + def __getattr__(self, key): + return getattr(self.fp, key) + + def write(self, s): + self.fp.write(s) + self.offset += len(s) + + def tell(self): + return self.offset + +class zipit(object): + '''write archive to zip file or stream. can write uncompressed, + or compressed with deflate.''' + + def __init__(self, dest, mtime, compress=True): + if not isinstance(dest, str): + try: + dest.tell() + except (AttributeError, IOError): + dest = tellable(dest) + self.z = zipfile.ZipFile(dest, 'w', + compress and zipfile.ZIP_DEFLATED or + zipfile.ZIP_STORED) + + # Python's zipfile module emits deprecation warnings if we try + # to store files with a date before 1980. + epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) + if mtime < epoch: + mtime = epoch + + self.date_time = time.gmtime(mtime)[:6] + + def addfile(self, name, mode, islink, data): + i = zipfile.ZipInfo(name, self.date_time) + i.compress_type = self.z.compression + # unzip will not honor unix file modes unless file creator is + # set to unix (id 3). + i.create_system = 3 + ftype = 0x8000 # UNX_IFREG in unzip source code + if islink: + mode = 0777 + ftype = 0xa000 # UNX_IFLNK in unzip source code + i.external_attr = (mode | ftype) << 16L + self.z.writestr(i, data) + + def done(self): + self.z.close() + +class fileit(object): + '''write archive as files in directory.''' + + def __init__(self, name, mtime): + self.basedir = name + self.opener = scmutil.opener(self.basedir) + + def addfile(self, name, mode, islink, data): + if islink: + self.opener.symlink(data, name) + return + f = self.opener(name, "w", atomictemp=True) + f.write(data) + f.close() + destfile = os.path.join(self.basedir, name) + os.chmod(destfile, mode) + + def done(self): + pass + +archivers = { + 'files': fileit, + 'tar': tarit, + 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), + 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), + 'uzip': lambda name, mtime: zipit(name, mtime, False), + 'zip': zipit, + } + +def archive(repo, dest, node, kind, decode=True, matchfn=None, + prefix=None, mtime=None, subrepos=False): + '''create archive of repo as it was at node. + + dest can be name of directory, name of archive file, or file + object to write archive to. + + kind is type of archive to create. + + decode tells whether to put files through decode filters from + hgrc. + + matchfn is function to filter names of files to write to archive. + + prefix is name of path to put before every archive member.''' + + if kind == 'files': + if prefix: + raise util.Abort(_('cannot give prefix when archiving to files')) + else: + prefix = tidyprefix(dest, kind, prefix) + + def write(name, mode, islink, getdata): + if matchfn and not matchfn(name): + return + data = getdata() + if decode: + data = repo.wwritedata(name, data) + archiver.addfile(prefix + name, mode, islink, data) + + if kind not in archivers: + raise util.Abort(_("unknown archive type '%s'") % kind) + + ctx = repo[node] + archiver = archivers[kind](dest, mtime or ctx.date()[0]) + + if repo.ui.configbool("ui", "archivemeta", True): + def metadata(): + base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( + repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch())) + + tags = ''.join('tag: %s\n' % t for t in ctx.tags() + if repo.tagtype(t) == 'global') + if not tags: + repo.ui.pushbuffer() + opts = {'template': '{latesttag}\n{latesttagdistance}', + 'style': '', 'patch': None, 'git': None} + cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) + ltags, dist = repo.ui.popbuffer().split('\n') + tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) + tags += 'latesttagdistance: %s\n' % dist + + return base + tags + + write('.hg_archival.txt', 0644, False, metadata) + + total = len(ctx.manifest()) + repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) + for i, f in enumerate(ctx): + ff = ctx.flags(f) + write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data) + repo.ui.progress(_('archiving'), i + 1, item=f, + unit=_('files'), total=total) + repo.ui.progress(_('archiving'), None) + + if subrepos: + for subpath in ctx.substate: + sub = ctx.sub(subpath) + sub.archive(repo.ui, archiver, prefix) + + archiver.done() diff --git a/websdk/mercurial/bookmarks.py b/websdk/mercurial/bookmarks.py new file mode 100644 index 0000000..587cefc --- /dev/null +++ b/websdk/mercurial/bookmarks.py @@ -0,0 +1,213 @@ +# Mercurial bookmark support code +# +# Copyright 2008 David Soria Parra +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from mercurial.i18n import _ +from mercurial.node import hex +from mercurial import encoding, error, util +import errno, os + +def valid(mark): + for c in (':', '\0', '\n', '\r'): + if c in mark: + return False + return True + +def read(repo): + '''Parse .hg/bookmarks file and return a dictionary + + Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values + in the .hg/bookmarks file. + Read the file and return a (name=>nodeid) dictionary + ''' + bookmarks = {} + try: + for line in repo.opener('bookmarks'): + line = line.strip() + if not line: + continue + if ' ' not in line: + repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line) + continue + sha, refspec = line.split(' ', 1) + refspec = encoding.tolocal(refspec) + try: + bookmarks[refspec] = repo.changelog.lookup(sha) + except error.RepoLookupError: + pass + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + return bookmarks + +def readcurrent(repo): + '''Get the current bookmark + + If we use gittishsh branches we have a current bookmark that + we are on. This function returns the name of the bookmark. It + is stored in .hg/bookmarks.current + ''' + mark = None + try: + file = repo.opener('bookmarks.current') + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + return None + try: + # No readline() in posixfile_nt, reading everything is cheap + mark = encoding.tolocal((file.readlines() or [''])[0]) + if mark == '' or mark not in repo._bookmarks: + mark = None + finally: + file.close() + return mark + +def write(repo): + '''Write bookmarks + + Write the given bookmark => hash dictionary to the .hg/bookmarks file + in a format equal to those of localtags. + + We also store a backup of the previous state in undo.bookmarks that + can be copied back on rollback. + ''' + refs = repo._bookmarks + + if repo._bookmarkcurrent not in refs: + setcurrent(repo, None) + for mark in refs.keys(): + if not valid(mark): + raise util.Abort(_("bookmark '%s' contains illegal " + "character" % mark)) + + wlock = repo.wlock() + try: + + file = repo.opener('bookmarks', 'w', atomictemp=True) + for refspec, node in refs.iteritems(): + file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec))) + file.close() + + # touch 00changelog.i so hgweb reloads bookmarks (no lock needed) + try: + os.utime(repo.sjoin('00changelog.i'), None) + except OSError: + pass + + finally: + wlock.release() + +def setcurrent(repo, mark): + '''Set the name of the bookmark that we are currently on + + Set the name of the bookmark that we are on (hg update ). + The name is recorded in .hg/bookmarks.current + ''' + current = repo._bookmarkcurrent + if current == mark: + return + + if mark not in repo._bookmarks: + mark = '' + if not valid(mark): + raise util.Abort(_("bookmark '%s' contains illegal " + "character" % mark)) + + wlock = repo.wlock() + try: + file = repo.opener('bookmarks.current', 'w', atomictemp=True) + file.write(encoding.fromlocal(mark)) + file.close() + finally: + wlock.release() + repo._bookmarkcurrent = mark + +def updatecurrentbookmark(repo, oldnode, curbranch): + try: + update(repo, oldnode, repo.branchtags()[curbranch]) + except KeyError: + if curbranch == "default": # no default branch! + update(repo, oldnode, repo.lookup("tip")) + else: + raise util.Abort(_("branch %s not found") % curbranch) + +def update(repo, parents, node): + marks = repo._bookmarks + update = False + mark = repo._bookmarkcurrent + if mark and marks[mark] in parents: + old = repo[marks[mark]] + new = repo[node] + if new in old.descendants(): + marks[mark] = new.node() + update = True + if update: + repo._writebookmarks(marks) + +def listbookmarks(repo): + # We may try to list bookmarks on a repo type that does not + # support it (e.g., statichttprepository). + marks = getattr(repo, '_bookmarks', {}) + + d = {} + for k, v in marks.iteritems(): + d[k] = hex(v) + return d + +def pushbookmark(repo, key, old, new): + w = repo.wlock() + try: + marks = repo._bookmarks + if hex(marks.get(key, '')) != old: + return False + if new == '': + del marks[key] + else: + if new not in repo: + return False + marks[key] = repo[new].node() + write(repo) + return True + finally: + w.release() + +def updatefromremote(ui, repo, remote): + ui.debug("checking for updated bookmarks\n") + rb = remote.listkeys('bookmarks') + changed = False + for k in rb.keys(): + if k in repo._bookmarks: + nr, nl = rb[k], repo._bookmarks[k] + if nr in repo: + cr = repo[nr] + cl = repo[nl] + if cl.rev() >= cr.rev(): + continue + if cr in cl.descendants(): + repo._bookmarks[k] = cr.node() + changed = True + ui.status(_("updating bookmark %s\n") % k) + else: + ui.warn(_("not updating divergent" + " bookmark %s\n") % k) + if changed: + write(repo) + +def diff(ui, repo, remote): + ui.status(_("searching for changed bookmarks\n")) + + lmarks = repo.listkeys('bookmarks') + rmarks = remote.listkeys('bookmarks') + + diff = sorted(set(rmarks) - set(lmarks)) + for k in diff: + ui.write(" %-25s %s\n" % (k, rmarks[k][:12])) + + if len(diff) <= 0: + ui.status(_("no changed bookmarks found\n")) + return 1 + return 0 diff --git a/websdk/mercurial/bundlerepo.py b/websdk/mercurial/bundlerepo.py index 8da29ad..5ee67b9 120000..100644 --- a/websdk/mercurial/bundlerepo.py +++ b/websdk/mercurial/bundlerepo.py @@ -1 +1,363 @@ -/usr/share/pyshared/mercurial/bundlerepo.py \ No newline at end of file +# bundlerepo.py - repository class for viewing uncompressed bundles +# +# Copyright 2006, 2007 Benoit Boissinot +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Repository class for viewing uncompressed bundles. + +This provides a read-only repository interface to bundles as if they +were part of the actual repository. +""" + +from node import nullid +from i18n import _ +import os, tempfile, shutil +import changegroup, util, mdiff, discovery +import localrepo, changelog, manifest, filelog, revlog, error + +class bundlerevlog(revlog.revlog): + def __init__(self, opener, indexfile, bundle, linkmapper): + # How it works: + # to retrieve a revision, we need to know the offset of + # the revision in the bundle (an unbundle object). + # + # We store this offset in the index (start), to differentiate a + # rev in the bundle and from a rev in the revlog, we check + # len(index[r]). If the tuple is bigger than 7, it is a bundle + # (it is bigger since we store the node to which the delta is) + # + revlog.revlog.__init__(self, opener, indexfile) + self.bundle = bundle + self.basemap = {} + n = len(self) + chain = None + while True: + chunkdata = bundle.deltachunk(chain) + if not chunkdata: + break + node = chunkdata['node'] + p1 = chunkdata['p1'] + p2 = chunkdata['p2'] + cs = chunkdata['cs'] + deltabase = chunkdata['deltabase'] + delta = chunkdata['delta'] + + size = len(delta) + start = bundle.tell() - size + + link = linkmapper(cs) + if node in self.nodemap: + # this can happen if two branches make the same change + chain = node + continue + + for p in (p1, p2): + if not p in self.nodemap: + raise error.LookupError(p, self.indexfile, + _("unknown parent")) + # start, size, full unc. size, base (unused), link, p1, p2, node + e = (revlog.offset_type(start, 0), size, -1, -1, link, + self.rev(p1), self.rev(p2), node) + self.basemap[n] = deltabase + self.index.insert(-1, e) + self.nodemap[node] = n + chain = node + n += 1 + + def inbundle(self, rev): + """is rev from the bundle""" + if rev < 0: + return False + return rev in self.basemap + def bundlebase(self, rev): + return self.basemap[rev] + def _chunk(self, rev): + # Warning: in case of bundle, the diff is against bundlebase, + # not against rev - 1 + # XXX: could use some caching + if not self.inbundle(rev): + return revlog.revlog._chunk(self, rev) + self.bundle.seek(self.start(rev)) + return self.bundle.read(self.length(rev)) + + def revdiff(self, rev1, rev2): + """return or calculate a delta between two revisions""" + if self.inbundle(rev1) and self.inbundle(rev2): + # hot path for bundle + revb = self.rev(self.bundlebase(rev2)) + if revb == rev1: + return self._chunk(rev2) + elif not self.inbundle(rev1) and not self.inbundle(rev2): + return revlog.revlog.revdiff(self, rev1, rev2) + + return mdiff.textdiff(self.revision(self.node(rev1)), + self.revision(self.node(rev2))) + + def revision(self, node): + """return an uncompressed revision of a given""" + if node == nullid: + return "" + + text = None + chain = [] + iter_node = node + rev = self.rev(iter_node) + # reconstruct the revision if it is from a changegroup + while self.inbundle(rev): + if self._cache and self._cache[0] == iter_node: + text = self._cache[2] + break + chain.append(rev) + iter_node = self.bundlebase(rev) + rev = self.rev(iter_node) + if text is None: + text = revlog.revlog.revision(self, iter_node) + + while chain: + delta = self._chunk(chain.pop()) + text = mdiff.patches(text, [delta]) + + p1, p2 = self.parents(node) + if node != revlog.hash(text, p1, p2): + raise error.RevlogError(_("integrity check failed on %s:%d") + % (self.datafile, self.rev(node))) + + self._cache = (node, self.rev(node), text) + return text + + def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): + raise NotImplementedError + def addgroup(self, revs, linkmapper, transaction): + raise NotImplementedError + def strip(self, rev, minlink): + raise NotImplementedError + def checksize(self): + raise NotImplementedError + +class bundlechangelog(bundlerevlog, changelog.changelog): + def __init__(self, opener, bundle): + changelog.changelog.__init__(self, opener) + linkmapper = lambda x: x + bundlerevlog.__init__(self, opener, self.indexfile, bundle, + linkmapper) + +class bundlemanifest(bundlerevlog, manifest.manifest): + def __init__(self, opener, bundle, linkmapper): + manifest.manifest.__init__(self, opener) + bundlerevlog.__init__(self, opener, self.indexfile, bundle, + linkmapper) + +class bundlefilelog(bundlerevlog, filelog.filelog): + def __init__(self, opener, path, bundle, linkmapper, repo): + filelog.filelog.__init__(self, opener, path) + bundlerevlog.__init__(self, opener, self.indexfile, bundle, + linkmapper) + self._repo = repo + + def _file(self, f): + self._repo.file(f) + +class bundlerepository(localrepo.localrepository): + def __init__(self, ui, path, bundlename): + self._tempparent = None + try: + localrepo.localrepository.__init__(self, ui, path) + except error.RepoError: + self._tempparent = tempfile.mkdtemp() + localrepo.instance(ui, self._tempparent, 1) + localrepo.localrepository.__init__(self, ui, self._tempparent) + + if path: + self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename + else: + self._url = 'bundle:' + bundlename + + self.tempfile = None + f = util.posixfile(bundlename, "rb") + self.bundle = changegroup.readbundle(f, bundlename) + if self.bundle.compressed(): + fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", + suffix=".hg10un", dir=self.path) + self.tempfile = temp + fptemp = os.fdopen(fdtemp, 'wb') + + try: + fptemp.write("HG10UN") + while True: + chunk = self.bundle.read(2**18) + if not chunk: + break + fptemp.write(chunk) + finally: + fptemp.close() + + f = util.posixfile(self.tempfile, "rb") + self.bundle = changegroup.readbundle(f, bundlename) + + # dict with the mapping 'filename' -> position in the bundle + self.bundlefilespos = {} + + @util.propertycache + def changelog(self): + # consume the header if it exists + self.bundle.changelogheader() + c = bundlechangelog(self.sopener, self.bundle) + self.manstart = self.bundle.tell() + return c + + @util.propertycache + def manifest(self): + self.bundle.seek(self.manstart) + # consume the header if it exists + self.bundle.manifestheader() + m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev) + self.filestart = self.bundle.tell() + return m + + @util.propertycache + def manstart(self): + self.changelog + return self.manstart + + @util.propertycache + def filestart(self): + self.manifest + return self.filestart + + def url(self): + return self._url + + def file(self, f): + if not self.bundlefilespos: + self.bundle.seek(self.filestart) + while True: + chunkdata = self.bundle.filelogheader() + if not chunkdata: + break + fname = chunkdata['filename'] + self.bundlefilespos[fname] = self.bundle.tell() + while True: + c = self.bundle.deltachunk(None) + if not c: + break + + if f[0] == '/': + f = f[1:] + if f in self.bundlefilespos: + self.bundle.seek(self.bundlefilespos[f]) + return bundlefilelog(self.sopener, f, self.bundle, + self.changelog.rev, self) + else: + return filelog.filelog(self.sopener, f) + + def close(self): + """Close assigned bundle file immediately.""" + self.bundle.close() + if self.tempfile is not None: + os.unlink(self.tempfile) + if self._tempparent: + shutil.rmtree(self._tempparent, True) + + def cancopy(self): + return False + + def getcwd(self): + return os.getcwd() # always outside the repo + + def _writebranchcache(self, branches, tip, tiprev): + # don't overwrite the disk cache with bundle-augmented data + pass + +def instance(ui, path, create): + if create: + raise util.Abort(_('cannot create new bundle repository')) + parentpath = ui.config("bundle", "mainreporoot", "") + if parentpath: + # Try to make the full path relative so we get a nice, short URL. + # In particular, we don't want temp dir names in test outputs. + cwd = os.getcwd() + if parentpath == cwd: + parentpath = '' + else: + cwd = os.path.join(cwd,'') + if parentpath.startswith(cwd): + parentpath = parentpath[len(cwd):] + u = util.url(path) + path = u.localpath() + if u.scheme == 'bundle': + s = path.split("+", 1) + if len(s) == 1: + repopath, bundlename = parentpath, s[0] + else: + repopath, bundlename = s + else: + repopath, bundlename = parentpath, path + return bundlerepository(ui, repopath, bundlename) + +def getremotechanges(ui, repo, other, onlyheads=None, bundlename=None, + force=False): + '''obtains a bundle of changes incoming from other + + "onlyheads" restricts the returned changes to those reachable from the + specified heads. + "bundlename", if given, stores the bundle to this file path permanently; + otherwise it's stored to a temp file and gets deleted again when you call + the returned "cleanupfn". + "force" indicates whether to proceed on unrelated repos. + + Returns a tuple (local, csets, cleanupfn): + + "local" is a local repo from which to obtain the actual incoming changesets; it + is a bundlerepo for the obtained bundle when the original "other" is remote. + "csets" lists the incoming changeset node ids. + "cleanupfn" must be called without arguments when you're done processing the + changes; it closes both the original "other" and the one returned here. + ''' + tmp = discovery.findcommonincoming(repo, other, heads=onlyheads, force=force) + common, incoming, rheads = tmp + if not incoming: + try: + if bundlename: + os.unlink(bundlename) + except OSError: + pass + return other, [], other.close + + bundle = None + bundlerepo = None + localrepo = other + if bundlename or not other.local(): + # create a bundle (uncompressed if other repo is not local) + + if other.capable('getbundle'): + cg = other.getbundle('incoming', common=common, heads=rheads) + elif onlyheads is None and not other.capable('changegroupsubset'): + # compat with older servers when pulling all remote heads + cg = other.changegroup(incoming, "incoming") + rheads = None + else: + cg = other.changegroupsubset(incoming, rheads, 'incoming') + bundletype = other.local() and "HG10BZ" or "HG10UN" + fname = bundle = changegroup.writebundle(cg, bundlename, bundletype) + # keep written bundle? + if bundlename: + bundle = None + if not other.local(): + # use the created uncompressed bundlerepo + localrepo = bundlerepo = bundlerepository(ui, repo.root, fname) + # this repo contains local and other now, so filter out local again + common = repo.heads() + + csets = localrepo.changelog.findmissing(common, rheads) + + def cleanup(): + if bundlerepo: + bundlerepo.close() + if bundle: + os.unlink(bundle) + other.close() + + return (localrepo, csets, cleanup) + diff --git a/websdk/mercurial/byterange.py b/websdk/mercurial/byterange.py index a9d1349..baa67d4 120000..100644 --- a/websdk/mercurial/byterange.py +++ b/websdk/mercurial/byterange.py @@ -1 +1,462 @@ -/usr/share/pyshared/mercurial/byterange.py \ No newline at end of file +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA + +# This file is part of urlgrabber, a high-level cross-protocol url-grabber +# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko + +# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $ + +import os +import stat +import urllib +import urllib2 +import email.Utils + +class RangeError(IOError): + """Error raised when an unsatisfiable range is requested.""" + pass + +class HTTPRangeHandler(urllib2.BaseHandler): + """Handler that enables HTTP Range headers. + + This was extremely simple. The Range header is a HTTP feature to + begin with so all this class does is tell urllib2 that the + "206 Partial Content" reponse from the HTTP server is what we + expected. + + Example: + import urllib2 + import byterange + + range_handler = range.HTTPRangeHandler() + opener = urllib2.build_opener(range_handler) + + # install it + urllib2.install_opener(opener) + + # create Request and set Range header + req = urllib2.Request('http://www.python.org/') + req.header['Range'] = 'bytes=30-50' + f = urllib2.urlopen(req) + """ + + def http_error_206(self, req, fp, code, msg, hdrs): + # 206 Partial Content Response + r = urllib.addinfourl(fp, hdrs, req.get_full_url()) + r.code = code + r.msg = msg + return r + + def http_error_416(self, req, fp, code, msg, hdrs): + # HTTP's Range Not Satisfiable error + raise RangeError('Requested Range Not Satisfiable') + +class RangeableFileObject(object): + """File object wrapper to enable raw range handling. + This was implemented primarilary for handling range + specifications for file:// urls. This object effectively makes + a file object look like it consists only of a range of bytes in + the stream. + + Examples: + # expose 10 bytes, starting at byte position 20, from + # /etc/aliases. + >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30)) + # seek seeks within the range (to position 23 in this case) + >>> fo.seek(3) + # tell tells where your at _within the range_ (position 3 in + # this case) + >>> fo.tell() + # read EOFs if an attempt is made to read past the last + # byte in the range. the following will return only 7 bytes. + >>> fo.read(30) + """ + + def __init__(self, fo, rangetup): + """Create a RangeableFileObject. + fo -- a file like object. only the read() method need be + supported but supporting an optimized seek() is + preferable. + rangetup -- a (firstbyte,lastbyte) tuple specifying the range + to work over. + The file object provided is assumed to be at byte offset 0. + """ + self.fo = fo + (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup) + self.realpos = 0 + self._do_seek(self.firstbyte) + + def __getattr__(self, name): + """This effectively allows us to wrap at the instance level. + Any attribute not found in _this_ object will be searched for + in self.fo. This includes methods.""" + return getattr(self.fo, name) + + def tell(self): + """Return the position within the range. + This is different from fo.seek in that position 0 is the + first byte position of the range tuple. For example, if + this object was created with a range tuple of (500,899), + tell() will return 0 when at byte position 500 of the file. + """ + return (self.realpos - self.firstbyte) + + def seek(self, offset, whence=0): + """Seek within the byte range. + Positioning is identical to that described under tell(). + """ + assert whence in (0, 1, 2) + if whence == 0: # absolute seek + realoffset = self.firstbyte + offset + elif whence == 1: # relative seek + realoffset = self.realpos + offset + elif whence == 2: # absolute from end of file + # XXX: are we raising the right Error here? + raise IOError('seek from end of file not supported.') + + # do not allow seek past lastbyte in range + if self.lastbyte and (realoffset >= self.lastbyte): + realoffset = self.lastbyte + + self._do_seek(realoffset - self.realpos) + + def read(self, size=-1): + """Read within the range. + This method will limit the size read based on the range. + """ + size = self._calc_read_size(size) + rslt = self.fo.read(size) + self.realpos += len(rslt) + return rslt + + def readline(self, size=-1): + """Read lines within the range. + This method will limit the size read based on the range. + """ + size = self._calc_read_size(size) + rslt = self.fo.readline(size) + self.realpos += len(rslt) + return rslt + + def _calc_read_size(self, size): + """Handles calculating the amount of data to read based on + the range. + """ + if self.lastbyte: + if size > -1: + if ((self.realpos + size) >= self.lastbyte): + size = (self.lastbyte - self.realpos) + else: + size = (self.lastbyte - self.realpos) + return size + + def _do_seek(self, offset): + """Seek based on whether wrapped object supports seek(). + offset is relative to the current position (self.realpos). + """ + assert offset >= 0 + seek = getattr(self.fo, 'seek', self._poor_mans_seek) + seek(self.realpos + offset) + self.realpos += offset + + def _poor_mans_seek(self, offset): + """Seek by calling the wrapped file objects read() method. + This is used for file like objects that do not have native + seek support. The wrapped objects read() method is called + to manually seek to the desired position. + offset -- read this number of bytes from the wrapped + file object. + raise RangeError if we encounter EOF before reaching the + specified offset. + """ + pos = 0 + bufsize = 1024 + while pos < offset: + if (pos + bufsize) > offset: + bufsize = offset - pos + buf = self.fo.read(bufsize) + if len(buf) != bufsize: + raise RangeError('Requested Range Not Satisfiable') + pos += bufsize + +class FileRangeHandler(urllib2.FileHandler): + """FileHandler subclass that adds Range support. + This class handles Range headers exactly like an HTTP + server would. + """ + def open_local_file(self, req): + import mimetypes + import email + host = req.get_host() + file = req.get_selector() + localfile = urllib.url2pathname(file) + stats = os.stat(localfile) + size = stats[stat.ST_SIZE] + modified = email.Utils.formatdate(stats[stat.ST_MTIME]) + mtype = mimetypes.guess_type(file)[0] + if host: + host, port = urllib.splitport(host) + if port or socket.gethostbyname(host) not in self.get_names(): + raise urllib2.URLError('file not on local host') + fo = open(localfile,'rb') + brange = req.headers.get('Range', None) + brange = range_header_to_tuple(brange) + assert brange != () + if brange: + (fb, lb) = brange + if lb == '': + lb = size + if fb < 0 or fb > size or lb > size: + raise RangeError('Requested Range Not Satisfiable') + size = (lb - fb) + fo = RangeableFileObject(fo, (fb, lb)) + headers = email.message_from_string( + 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' % + (mtype or 'text/plain', size, modified)) + return urllib.addinfourl(fo, headers, 'file:'+file) + + +# FTP Range Support +# Unfortunately, a large amount of base FTP code had to be copied +# from urllib and urllib2 in order to insert the FTP REST command. +# Code modifications for range support have been commented as +# follows: +# -- range support modifications start/end here + +from urllib import splitport, splituser, splitpasswd, splitattr, \ + unquote, addclosehook, addinfourl +import ftplib +import socket +import sys +import mimetypes +import email + +class FTPRangeHandler(urllib2.FTPHandler): + def ftp_open(self, req): + host = req.get_host() + if not host: + raise IOError('ftp error', 'no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = unquote(user or '') + passwd = unquote(passwd or '') + + try: + host = socket.gethostbyname(host) + except socket.error, msg: + raise urllib2.URLError(msg) + path, attrs = splitattr(req.get_selector()) + dirs = path.split('/') + dirs = map(unquote, dirs) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitattr(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + + # -- range support modifications start here + rest = None + range_tup = range_header_to_tuple(req.headers.get('Range', None)) + assert range_tup != () + if range_tup: + (fb, lb) = range_tup + if fb > 0: + rest = fb + # -- range support modifications end here + + fp, retrlen = fw.retrfile(file, type, rest) + + # -- range support modifications start here + if range_tup: + (fb, lb) = range_tup + if lb == '': + if retrlen is None or retrlen == 0: + raise RangeError('Requested Range Not Satisfiable due' + ' to unobtainable file length.') + lb = retrlen + retrlen = lb - fb + if retrlen < 0: + # beginning of range is larger than file + raise RangeError('Requested Range Not Satisfiable') + else: + retrlen = lb - fb + fp = RangeableFileObject(fp, (0, retrlen)) + # -- range support modifications end here + + headers = "" + mtype = mimetypes.guess_type(req.get_full_url())[0] + if mtype: + headers += "Content-Type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-Length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.get_full_url()) + except ftplib.all_errors, msg: + raise IOError('ftp error', msg), sys.exc_info()[2] + + def connect_ftp(self, user, passwd, host, port, dirs): + fw = ftpwrapper(user, passwd, host, port, dirs) + return fw + +class ftpwrapper(urllib.ftpwrapper): + # range support note: + # this ftpwrapper code is copied directly from + # urllib. The only enhancement is to add the rest + # argument and pass it on to ftp.ntransfercmd + def retrfile(self, file, type, rest=None): + self.endtransfer() + if type in ('d', 'D'): + cmd = 'TYPE A' + isdir = 1 + else: + cmd = 'TYPE ' + type + isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Use nlst to see if the file exists at all + try: + self.ftp.nlst(file) + except ftplib.error_perm, reason: + raise IOError('ftp error', reason), sys.exc_info()[2] + # Restore the transfer mode! + self.ftp.voidcmd(cmd) + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn = self.ftp.ntransfercmd(cmd, rest) + except ftplib.error_perm, reason: + if str(reason).startswith('501'): + # workaround for REST not supported error + fp, retrlen = self.retrfile(file, type) + fp = RangeableFileObject(fp, (rest,'')) + return (fp, retrlen) + elif not str(reason).startswith('550'): + raise IOError('ftp error', reason), sys.exc_info()[2] + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing + if file: + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn = self.ftp.ntransfercmd(cmd) + self.busy = 1 + # Pass back both a suitably decorated object and a retrieval length + return (addclosehook(conn[0].makefile('rb'), + self.endtransfer), conn[1]) + + +#################################################################### +# Range Tuple Functions +# XXX: These range tuple functions might go better in a class. + +_rangere = None +def range_header_to_tuple(range_header): + """Get a (firstbyte,lastbyte) tuple from a Range header value. + + Range headers have the form "bytes=-". This + function pulls the firstbyte and lastbyte values and returns + a (firstbyte,lastbyte) tuple. If lastbyte is not specified in + the header value, it is returned as an empty string in the + tuple. + + Return None if range_header is None + Return () if range_header does not conform to the range spec + pattern. + + """ + global _rangere + if range_header is None: + return None + if _rangere is None: + import re + _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)') + match = _rangere.match(range_header) + if match: + tup = range_tuple_normalize(match.group(1, 2)) + if tup and tup[1]: + tup = (tup[0], tup[1]+1) + return tup + return () + +def range_tuple_to_header(range_tup): + """Convert a range tuple to a Range header value. + Return a string of the form "bytes=-" or None + if no range is needed. + """ + if range_tup is None: + return None + range_tup = range_tuple_normalize(range_tup) + if range_tup: + if range_tup[1]: + range_tup = (range_tup[0], range_tup[1] - 1) + return 'bytes=%s-%s' % range_tup + +def range_tuple_normalize(range_tup): + """Normalize a (first_byte,last_byte) range tuple. + Return a tuple whose first element is guaranteed to be an int + and whose second element will be '' (meaning: the last byte) or + an int. Finally, return None if the normalized tuple == (0,'') + as that is equivelant to retrieving the entire file. + """ + if range_tup is None: + return None + # handle first byte + fb = range_tup[0] + if fb in (None, ''): + fb = 0 + else: + fb = int(fb) + # handle last byte + try: + lb = range_tup[1] + except IndexError: + lb = '' + else: + if lb is None: + lb = '' + elif lb != '': + lb = int(lb) + # check if range is over the entire file + if (fb, lb) == (0, ''): + return None + # check that the range is valid + if lb < fb: + raise RangeError('Invalid byte range: %s-%s' % (fb, lb)) + return (fb, lb) diff --git a/websdk/mercurial/changegroup.py b/websdk/mercurial/changegroup.py index 6ea0821..7ee7b3f 120000..100644 --- a/websdk/mercurial/changegroup.py +++ b/websdk/mercurial/changegroup.py @@ -1 +1,256 @@ -/usr/share/pyshared/mercurial/changegroup.py \ No newline at end of file +# changegroup.py - Mercurial changegroup manipulation functions +# +# Copyright 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from node import nullrev +import mdiff, util +import struct, os, bz2, zlib, tempfile + +_BUNDLE10_DELTA_HEADER = "20s20s20s20s" + +def readexactly(stream, n): + '''read n bytes from stream.read and abort if less was available''' + s = stream.read(n) + if len(s) < n: + raise util.Abort(_("stream ended unexpectedly" + " (got %d bytes, expected %d)") + % (len(s), n)) + return s + +def getchunk(stream): + """return the next chunk from stream as a string""" + d = readexactly(stream, 4) + l = struct.unpack(">l", d)[0] + if l <= 4: + if l: + raise util.Abort(_("invalid chunk length %d") % l) + return "" + return readexactly(stream, l - 4) + +def chunkheader(length): + """return a changegroup chunk header (string)""" + return struct.pack(">l", length + 4) + +def closechunk(): + """return a changegroup chunk header (string) for a zero-length chunk""" + return struct.pack(">l", 0) + +class nocompress(object): + def compress(self, x): + return x + def flush(self): + return "" + +bundletypes = { + "": ("", nocompress), # only when using unbundle on ssh and old http servers + # since the unification ssh accepts a header but there + # is no capability signaling it. + "HG10UN": ("HG10UN", nocompress), + "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()), + "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), +} + +# hgweb uses this list to communicate its preferred type +bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] + +def writebundle(cg, filename, bundletype): + """Write a bundle file and return its filename. + + Existing files will not be overwritten. + If no filename is specified, a temporary file is created. + bz2 compression can be turned off. + The bundle file will be deleted in case of errors. + """ + + fh = None + cleanup = None + try: + if filename: + fh = open(filename, "wb") + else: + fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") + fh = os.fdopen(fd, "wb") + cleanup = filename + + header, compressor = bundletypes[bundletype] + fh.write(header) + z = compressor() + + # parse the changegroup data, otherwise we will block + # in case of sshrepo because we don't know the end of the stream + + # an empty chunkgroup is the end of the changegroup + # a changegroup has at least 2 chunkgroups (changelog and manifest). + # after that, an empty chunkgroup is the end of the changegroup + empty = False + count = 0 + while not empty or count <= 2: + empty = True + count += 1 + while True: + chunk = getchunk(cg) + if not chunk: + break + empty = False + fh.write(z.compress(chunkheader(len(chunk)))) + pos = 0 + while pos < len(chunk): + next = pos + 2**20 + fh.write(z.compress(chunk[pos:next])) + pos = next + fh.write(z.compress(closechunk())) + fh.write(z.flush()) + cleanup = None + return filename + finally: + if fh is not None: + fh.close() + if cleanup is not None: + os.unlink(cleanup) + +def decompressor(fh, alg): + if alg == 'UN': + return fh + elif alg == 'GZ': + def generator(f): + zd = zlib.decompressobj() + for chunk in f: + yield zd.decompress(chunk) + elif alg == 'BZ': + def generator(f): + zd = bz2.BZ2Decompressor() + zd.decompress("BZ") + for chunk in util.filechunkiter(f, 4096): + yield zd.decompress(chunk) + else: + raise util.Abort("unknown bundle compression '%s'" % alg) + return util.chunkbuffer(generator(fh)) + +class unbundle10(object): + deltaheader = _BUNDLE10_DELTA_HEADER + deltaheadersize = struct.calcsize(deltaheader) + def __init__(self, fh, alg): + self._stream = decompressor(fh, alg) + self._type = alg + self.callback = None + def compressed(self): + return self._type != 'UN' + def read(self, l): + return self._stream.read(l) + def seek(self, pos): + return self._stream.seek(pos) + def tell(self): + return self._stream.tell() + def close(self): + return self._stream.close() + + def chunklength(self): + d = readexactly(self._stream, 4) + l = struct.unpack(">l", d)[0] + if l <= 4: + if l: + raise util.Abort(_("invalid chunk length %d") % l) + return 0 + if self.callback: + self.callback() + return l - 4 + + def changelogheader(self): + """v10 does not have a changelog header chunk""" + return {} + + def manifestheader(self): + """v10 does not have a manifest header chunk""" + return {} + + def filelogheader(self): + """return the header of the filelogs chunk, v10 only has the filename""" + l = self.chunklength() + if not l: + return {} + fname = readexactly(self._stream, l) + return dict(filename=fname) + + def _deltaheader(self, headertuple, prevnode): + node, p1, p2, cs = headertuple + if prevnode is None: + deltabase = p1 + else: + deltabase = prevnode + return node, p1, p2, deltabase, cs + + def deltachunk(self, prevnode): + l = self.chunklength() + if not l: + return {} + headerdata = readexactly(self._stream, self.deltaheadersize) + header = struct.unpack(self.deltaheader, headerdata) + delta = readexactly(self._stream, l - self.deltaheadersize) + node, p1, p2, deltabase, cs = self._deltaheader(header, prevnode) + return dict(node=node, p1=p1, p2=p2, cs=cs, + deltabase=deltabase, delta=delta) + +class headerlessfixup(object): + def __init__(self, fh, h): + self._h = h + self._fh = fh + def read(self, n): + if self._h: + d, self._h = self._h[:n], self._h[n:] + if len(d) < n: + d += readexactly(self._fh, n - len(d)) + return d + return readexactly(self._fh, n) + +def readbundle(fh, fname): + header = readexactly(fh, 6) + + if not fname: + fname = "stream" + if not header.startswith('HG') and header.startswith('\0'): + fh = headerlessfixup(fh, header) + header = "HG10UN" + + magic, version, alg = header[0:2], header[2:4], header[4:6] + + if magic != 'HG': + raise util.Abort(_('%s: not a Mercurial bundle') % fname) + if version != '10': + raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) + return unbundle10(fh, alg) + +class bundle10(object): + deltaheader = _BUNDLE10_DELTA_HEADER + def __init__(self, lookup): + self._lookup = lookup + def close(self): + return closechunk() + def fileheader(self, fname): + return chunkheader(len(fname)) + fname + def revchunk(self, revlog, rev, prev): + node = revlog.node(rev) + p1, p2 = revlog.parentrevs(rev) + base = prev + + prefix = '' + if base == nullrev: + delta = revlog.revision(node) + prefix = mdiff.trivialdiffheader(len(delta)) + else: + delta = revlog.revdiff(base, rev) + linknode = self._lookup(revlog, node) + p1n, p2n = revlog.parents(node) + basenode = revlog.node(base) + meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode) + meta += prefix + l = len(meta) + len(delta) + yield chunkheader(l) + yield meta + yield delta + def builddeltaheader(self, node, p1n, p2n, basenode, linknode): + # do nothing with basenode, it is implicitly the previous one in HG10 + return struct.pack(self.deltaheader, node, p1n, p2n, linknode) diff --git a/websdk/mercurial/changelog.py b/websdk/mercurial/changelog.py index 2b80448..55e23c5 120000..100644 --- a/websdk/mercurial/changelog.py +++ b/websdk/mercurial/changelog.py @@ -1 +1,244 @@ -/usr/share/pyshared/mercurial/changelog.py \ No newline at end of file +# changelog.py - changelog class for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import bin, hex, nullid +from i18n import _ +import util, error, revlog, encoding + +def _string_escape(text): + """ + >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)} + >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d + >>> s + 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n' + >>> res = _string_escape(s) + >>> s == res.decode('string_escape') + True + """ + # subset of the string_escape codec + text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r') + return text.replace('\0', '\\0') + +def decodeextra(text): + extra = {} + for l in text.split('\0'): + if l: + k, v = l.decode('string_escape').split(':', 1) + extra[k] = v + return extra + +def encodeextra(d): + # keys must be sorted to produce a deterministic changelog entry + items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)] + return "\0".join(items) + +class appender(object): + '''the changelog index must be updated last on disk, so we use this class + to delay writes to it''' + def __init__(self, fp, buf): + self.data = buf + self.fp = fp + self.offset = fp.tell() + self.size = util.fstat(fp).st_size + + def end(self): + return self.size + len("".join(self.data)) + def tell(self): + return self.offset + def flush(self): + pass + def close(self): + self.fp.close() + + def seek(self, offset, whence=0): + '''virtual file offset spans real file and data''' + if whence == 0: + self.offset = offset + elif whence == 1: + self.offset += offset + elif whence == 2: + self.offset = self.end() + offset + if self.offset < self.size: + self.fp.seek(self.offset) + + def read(self, count=-1): + '''only trick here is reads that span real file and data''' + ret = "" + if self.offset < self.size: + s = self.fp.read(count) + ret = s + self.offset += len(s) + if count > 0: + count -= len(s) + if count != 0: + doff = self.offset - self.size + self.data.insert(0, "".join(self.data)) + del self.data[1:] + s = self.data[0][doff:doff + count] + self.offset += len(s) + ret += s + return ret + + def write(self, s): + self.data.append(str(s)) + self.offset += len(s) + +def delayopener(opener, target, divert, buf): + def o(name, mode='r'): + if name != target: + return opener(name, mode) + if divert: + return opener(name + ".a", mode.replace('a', 'w')) + # otherwise, divert to memory + return appender(opener(name, mode), buf) + return o + +class changelog(revlog.revlog): + def __init__(self, opener): + revlog.revlog.__init__(self, opener, "00changelog.i") + if self._initempty: + # changelogs don't benefit from generaldelta + self.version &= ~revlog.REVLOGGENERALDELTA + self._generaldelta = False + self._realopener = opener + self._delayed = False + self._divert = False + # hiddenrevs: revs that should be hidden by command and tools + self.hiddenrevs = set() + + def delayupdate(self): + "delay visibility of index updates to other readers" + self._delayed = True + self._divert = (len(self) == 0) + self._delaybuf = [] + self.opener = delayopener(self._realopener, self.indexfile, + self._divert, self._delaybuf) + + def finalize(self, tr): + "finalize index updates" + self._delayed = False + self.opener = self._realopener + # move redirected index data back into place + if self._divert: + nfile = self.opener(self.indexfile + ".a") + n = nfile.name + nfile.close() + util.rename(n, n[:-2]) + elif self._delaybuf: + fp = self.opener(self.indexfile, 'a') + fp.write("".join(self._delaybuf)) + fp.close() + self._delaybuf = [] + # split when we're done + self.checkinlinesize(tr) + + def readpending(self, file): + r = revlog.revlog(self.opener, file) + self.index = r.index + self.nodemap = r.nodemap + self._chunkcache = r._chunkcache + + def writepending(self): + "create a file containing the unfinalized state for pretxnchangegroup" + if self._delaybuf: + # make a temporary copy of the index + fp1 = self._realopener(self.indexfile) + fp2 = self._realopener(self.indexfile + ".a", "w") + fp2.write(fp1.read()) + # add pending data + fp2.write("".join(self._delaybuf)) + fp2.close() + # switch modes so finalize can simply rename + self._delaybuf = [] + self._divert = True + + if self._divert: + return True + + return False + + def checkinlinesize(self, tr, fp=None): + if not self._delayed: + revlog.revlog.checkinlinesize(self, tr, fp) + + def read(self, node): + """ + format used: + nodeid\n : manifest node in ascii + user\n : user, no \n or \r allowed + time tz extra\n : date (time is int or float, timezone is int) + : extra is metadatas, encoded and separated by '\0' + : older versions ignore it + files\n\n : files modified by the cset, no \n or \r allowed + (.*) : comment (free text, ideally utf-8) + + changelog v0 doesn't use extra + """ + text = self.revision(node) + if not text: + return (nullid, "", (0, 0), [], "", {'branch': 'default'}) + last = text.index("\n\n") + desc = encoding.tolocal(text[last + 2:]) + l = text[:last].split('\n') + manifest = bin(l[0]) + user = encoding.tolocal(l[1]) + + extra_data = l[2].split(' ', 2) + if len(extra_data) != 3: + time = float(extra_data.pop(0)) + try: + # various tools did silly things with the time zone field. + timezone = int(extra_data[0]) + except ValueError: + timezone = 0 + extra = {} + else: + time, timezone, extra = extra_data + time, timezone = float(time), int(timezone) + extra = decodeextra(extra) + if not extra.get('branch'): + extra['branch'] = 'default' + files = l[3:] + return (manifest, user, (time, timezone), files, desc, extra) + + def add(self, manifest, files, desc, transaction, p1, p2, + user, date=None, extra=None): + # Convert to UTF-8 encoded bytestrings as the very first + # thing: calling any method on a localstr object will turn it + # into a str object and the cached UTF-8 string is thus lost. + user, desc = encoding.fromlocal(user), encoding.fromlocal(desc) + + user = user.strip() + # An empty username or a username with a "\n" will make the + # revision text contain two "\n\n" sequences -> corrupt + # repository since read cannot unpack the revision. + if not user: + raise error.RevlogError(_("empty username")) + if "\n" in user: + raise error.RevlogError(_("username %s contains a newline") + % repr(user)) + + # strip trailing whitespace and leading and trailing empty lines + desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n') + + if date: + parseddate = "%d %d" % util.parsedate(date) + else: + parseddate = "%d %d" % util.makedate() + if extra: + branch = extra.get("branch") + if branch in ("default", ""): + del extra["branch"] + elif branch in (".", "null", "tip"): + raise error.RevlogError(_('the name \'%s\' is reserved') + % branch) + if extra: + extra = encodeextra(extra) + parseddate = "%s %s" % (parseddate, extra) + l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc] + text = "\n".join(l) + return self.addrevision(text, transaction, len(self), p1, p2) diff --git a/websdk/mercurial/cmdutil.py b/websdk/mercurial/cmdutil.py index 26624d7..dfc1094 120000..100644 --- a/websdk/mercurial/cmdutil.py +++ b/websdk/mercurial/cmdutil.py @@ -1 +1,1271 @@ -/usr/share/pyshared/mercurial/cmdutil.py \ No newline at end of file +# cmdutil.py - help for command processing in mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import hex, nullid, nullrev, short +from i18n import _ +import os, sys, errno, re, tempfile +import util, scmutil, templater, patch, error, templatekw, revlog, copies +import match as matchmod +import subrepo + +def parsealiases(cmd): + return cmd.lstrip("^").split("|") + +def findpossible(cmd, table, strict=False): + """ + Return cmd -> (aliases, command table entry) + for each matching command. + Return debug commands (or their aliases) only if no normal command matches. + """ + choice = {} + debugchoice = {} + + if cmd in table: + # short-circuit exact matches, "log" alias beats "^log|history" + keys = [cmd] + else: + keys = table.keys() + + for e in keys: + aliases = parsealiases(e) + found = None + if cmd in aliases: + found = cmd + elif not strict: + for a in aliases: + if a.startswith(cmd): + found = a + break + if found is not None: + if aliases[0].startswith("debug") or found.startswith("debug"): + debugchoice[found] = (aliases, table[e]) + else: + choice[found] = (aliases, table[e]) + + if not choice and debugchoice: + choice = debugchoice + + return choice + +def findcmd(cmd, table, strict=True): + """Return (aliases, command table entry) for command string.""" + choice = findpossible(cmd, table, strict) + + if cmd in choice: + return choice[cmd] + + if len(choice) > 1: + clist = choice.keys() + clist.sort() + raise error.AmbiguousCommand(cmd, clist) + + if choice: + return choice.values()[0] + + raise error.UnknownCommand(cmd) + +def findrepo(p): + while not os.path.isdir(os.path.join(p, ".hg")): + oldp, p = p, os.path.dirname(p) + if p == oldp: + return None + + return p + +def bailifchanged(repo): + if repo.dirstate.p2() != nullid: + raise util.Abort(_('outstanding uncommitted merge')) + modified, added, removed, deleted = repo.status()[:4] + if modified or added or removed or deleted: + raise util.Abort(_("outstanding uncommitted changes")) + ctx = repo[None] + for s in ctx.substate: + if ctx.sub(s).dirty(): + raise util.Abort(_("uncommitted changes in subrepo %s") % s) + +def logmessage(ui, opts): + """ get the log message according to -m and -l option """ + message = opts.get('message') + logfile = opts.get('logfile') + + if message and logfile: + raise util.Abort(_('options --message and --logfile are mutually ' + 'exclusive')) + if not message and logfile: + try: + if logfile == '-': + message = ui.fin.read() + else: + message = '\n'.join(util.readfile(logfile).splitlines()) + except IOError, inst: + raise util.Abort(_("can't read commit message '%s': %s") % + (logfile, inst.strerror)) + return message + +def loglimit(opts): + """get the log limit according to option -l/--limit""" + limit = opts.get('limit') + if limit: + try: + limit = int(limit) + except ValueError: + raise util.Abort(_('limit must be a positive integer')) + if limit <= 0: + raise util.Abort(_('limit must be positive')) + else: + limit = None + return limit + +def makefilename(repo, pat, node, desc=None, + total=None, seqno=None, revwidth=None, pathname=None): + node_expander = { + 'H': lambda: hex(node), + 'R': lambda: str(repo.changelog.rev(node)), + 'h': lambda: short(node), + 'm': lambda: re.sub('[^\w]', '_', str(desc)) + } + expander = { + '%': lambda: '%', + 'b': lambda: os.path.basename(repo.root), + } + + try: + if node: + expander.update(node_expander) + if node: + expander['r'] = (lambda: + str(repo.changelog.rev(node)).zfill(revwidth or 0)) + if total is not None: + expander['N'] = lambda: str(total) + if seqno is not None: + expander['n'] = lambda: str(seqno) + if total is not None and seqno is not None: + expander['n'] = lambda: str(seqno).zfill(len(str(total))) + if pathname is not None: + expander['s'] = lambda: os.path.basename(pathname) + expander['d'] = lambda: os.path.dirname(pathname) or '.' + expander['p'] = lambda: pathname + + newname = [] + patlen = len(pat) + i = 0 + while i < patlen: + c = pat[i] + if c == '%': + i += 1 + c = pat[i] + c = expander[c]() + newname.append(c) + i += 1 + return ''.join(newname) + except KeyError, inst: + raise util.Abort(_("invalid format spec '%%%s' in output filename") % + inst.args[0]) + +def makefileobj(repo, pat, node=None, desc=None, total=None, + seqno=None, revwidth=None, mode='wb', pathname=None): + + writable = mode not in ('r', 'rb') + + if not pat or pat == '-': + fp = writable and repo.ui.fout or repo.ui.fin + if util.safehasattr(fp, 'fileno'): + return os.fdopen(os.dup(fp.fileno()), mode) + else: + # if this fp can't be duped properly, return + # a dummy object that can be closed + class wrappedfileobj(object): + noop = lambda x: None + def __init__(self, f): + self.f = f + def __getattr__(self, attr): + if attr == 'close': + return self.noop + else: + return getattr(self.f, attr) + + return wrappedfileobj(fp) + if util.safehasattr(pat, 'write') and writable: + return pat + if util.safehasattr(pat, 'read') and 'r' in mode: + return pat + return open(makefilename(repo, pat, node, desc, total, seqno, revwidth, + pathname), + mode) + +def openrevlog(repo, cmd, file_, opts): + """opens the changelog, manifest, a filelog or a given revlog""" + cl = opts['changelog'] + mf = opts['manifest'] + msg = None + if cl and mf: + msg = _('cannot specify --changelog and --manifest at the same time') + elif cl or mf: + if file_: + msg = _('cannot specify filename with --changelog or --manifest') + elif not repo: + msg = _('cannot specify --changelog or --manifest ' + 'without a repository') + if msg: + raise util.Abort(msg) + + r = None + if repo: + if cl: + r = repo.changelog + elif mf: + r = repo.manifest + elif file_: + filelog = repo.file(file_) + if len(filelog): + r = filelog + if not r: + if not file_: + raise error.CommandError(cmd, _('invalid arguments')) + if not os.path.isfile(file_): + raise util.Abort(_("revlog '%s' not found") % file_) + r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), + file_[:-2] + ".i") + return r + +def copy(ui, repo, pats, opts, rename=False): + # called with the repo lock held + # + # hgsep => pathname that uses "/" to separate directories + # ossep => pathname that uses os.sep to separate directories + cwd = repo.getcwd() + targets = {} + after = opts.get("after") + dryrun = opts.get("dry_run") + wctx = repo[None] + + def walkpat(pat): + srcs = [] + badstates = after and '?' or '?r' + m = scmutil.match(repo[None], [pat], opts, globbed=True) + for abs in repo.walk(m): + state = repo.dirstate[abs] + rel = m.rel(abs) + exact = m.exact(abs) + if state in badstates: + if exact and state == '?': + ui.warn(_('%s: not copying - file is not managed\n') % rel) + if exact and state == 'r': + ui.warn(_('%s: not copying - file has been marked for' + ' remove\n') % rel) + continue + # abs: hgsep + # rel: ossep + srcs.append((abs, rel, exact)) + return srcs + + # abssrc: hgsep + # relsrc: ossep + # otarget: ossep + def copyfile(abssrc, relsrc, otarget, exact): + abstarget = scmutil.canonpath(repo.root, cwd, otarget) + reltarget = repo.pathto(abstarget, cwd) + target = repo.wjoin(abstarget) + src = repo.wjoin(abssrc) + state = repo.dirstate[abstarget] + + scmutil.checkportable(ui, abstarget) + + # check for collisions + prevsrc = targets.get(abstarget) + if prevsrc is not None: + ui.warn(_('%s: not overwriting - %s collides with %s\n') % + (reltarget, repo.pathto(abssrc, cwd), + repo.pathto(prevsrc, cwd))) + return + + # check for overwrites + exists = os.path.lexists(target) + if not after and exists or after and state in 'mn': + if not opts['force']: + ui.warn(_('%s: not overwriting - file exists\n') % + reltarget) + return + + if after: + if not exists: + if rename: + ui.warn(_('%s: not recording move - %s does not exist\n') % + (relsrc, reltarget)) + else: + ui.warn(_('%s: not recording copy - %s does not exist\n') % + (relsrc, reltarget)) + return + elif not dryrun: + try: + if exists: + os.unlink(target) + targetdir = os.path.dirname(target) or '.' + if not os.path.isdir(targetdir): + os.makedirs(targetdir) + util.copyfile(src, target) + srcexists = True + except IOError, inst: + if inst.errno == errno.ENOENT: + ui.warn(_('%s: deleted in working copy\n') % relsrc) + srcexists = False + else: + ui.warn(_('%s: cannot copy - %s\n') % + (relsrc, inst.strerror)) + return True # report a failure + + if ui.verbose or not exact: + if rename: + ui.status(_('moving %s to %s\n') % (relsrc, reltarget)) + else: + ui.status(_('copying %s to %s\n') % (relsrc, reltarget)) + + targets[abstarget] = abssrc + + # fix up dirstate + scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget, + dryrun=dryrun, cwd=cwd) + if rename and not dryrun: + if not after and srcexists: + util.unlinkpath(repo.wjoin(abssrc)) + wctx.forget([abssrc]) + + # pat: ossep + # dest ossep + # srcs: list of (hgsep, hgsep, ossep, bool) + # return: function that takes hgsep and returns ossep + def targetpathfn(pat, dest, srcs): + if os.path.isdir(pat): + abspfx = scmutil.canonpath(repo.root, cwd, pat) + abspfx = util.localpath(abspfx) + if destdirexists: + striplen = len(os.path.split(abspfx)[0]) + else: + striplen = len(abspfx) + if striplen: + striplen += len(os.sep) + res = lambda p: os.path.join(dest, util.localpath(p)[striplen:]) + elif destdirexists: + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + res = lambda p: dest + return res + + # pat: ossep + # dest ossep + # srcs: list of (hgsep, hgsep, ossep, bool) + # return: function that takes hgsep and returns ossep + def targetpathafterfn(pat, dest, srcs): + if matchmod.patkind(pat): + # a mercurial pattern + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + abspfx = scmutil.canonpath(repo.root, cwd, pat) + if len(abspfx) < len(srcs[0][0]): + # A directory. Either the target path contains the last + # component of the source path or it does not. + def evalpath(striplen): + score = 0 + for s in srcs: + t = os.path.join(dest, util.localpath(s[0])[striplen:]) + if os.path.lexists(t): + score += 1 + return score + + abspfx = util.localpath(abspfx) + striplen = len(abspfx) + if striplen: + striplen += len(os.sep) + if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])): + score = evalpath(striplen) + striplen1 = len(os.path.split(abspfx)[0]) + if striplen1: + striplen1 += len(os.sep) + if evalpath(striplen1) > score: + striplen = striplen1 + res = lambda p: os.path.join(dest, + util.localpath(p)[striplen:]) + else: + # a file + if destdirexists: + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + res = lambda p: dest + return res + + + pats = scmutil.expandpats(pats) + if not pats: + raise util.Abort(_('no source or destination specified')) + if len(pats) == 1: + raise util.Abort(_('no destination specified')) + dest = pats.pop() + destdirexists = os.path.isdir(dest) and not os.path.islink(dest) + if not destdirexists: + if len(pats) > 1 or matchmod.patkind(pats[0]): + raise util.Abort(_('with multiple sources, destination must be an ' + 'existing directory')) + if util.endswithsep(dest): + raise util.Abort(_('destination %s is not a directory') % dest) + + tfn = targetpathfn + if after: + tfn = targetpathafterfn + copylist = [] + for pat in pats: + srcs = walkpat(pat) + if not srcs: + continue + copylist.append((tfn(pat, dest, srcs), srcs)) + if not copylist: + raise util.Abort(_('no files to copy')) + + errors = 0 + for targetpath, srcs in copylist: + for abssrc, relsrc, exact in srcs: + if copyfile(abssrc, relsrc, targetpath(abssrc), exact): + errors += 1 + + if errors: + ui.warn(_('(consider using --after)\n')) + + return errors != 0 + +def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None, + runargs=None, appendpid=False): + '''Run a command as a service.''' + + if opts['daemon'] and not opts['daemon_pipefds']: + # Signal child process startup with file removal + lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-') + os.close(lockfd) + try: + if not runargs: + runargs = util.hgcmd() + sys.argv[1:] + runargs.append('--daemon-pipefds=%s' % lockpath) + # Don't pass --cwd to the child process, because we've already + # changed directory. + for i in xrange(1, len(runargs)): + if runargs[i].startswith('--cwd='): + del runargs[i] + break + elif runargs[i].startswith('--cwd'): + del runargs[i:i + 2] + break + def condfn(): + return not os.path.exists(lockpath) + pid = util.rundetached(runargs, condfn) + if pid < 0: + raise util.Abort(_('child process failed to start')) + finally: + try: + os.unlink(lockpath) + except OSError, e: + if e.errno != errno.ENOENT: + raise + if parentfn: + return parentfn(pid) + else: + return + + if initfn: + initfn() + + if opts['pid_file']: + mode = appendpid and 'a' or 'w' + fp = open(opts['pid_file'], mode) + fp.write(str(os.getpid()) + '\n') + fp.close() + + if opts['daemon_pipefds']: + lockpath = opts['daemon_pipefds'] + try: + os.setsid() + except AttributeError: + pass + os.unlink(lockpath) + util.hidewindow() + sys.stdout.flush() + sys.stderr.flush() + + nullfd = os.open(util.nulldev, os.O_RDWR) + logfilefd = nullfd + if logfile: + logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND) + os.dup2(nullfd, 0) + os.dup2(logfilefd, 1) + os.dup2(logfilefd, 2) + if nullfd not in (0, 1, 2): + os.close(nullfd) + if logfile and logfilefd not in (0, 1, 2): + os.close(logfilefd) + + if runfn: + return runfn() + +def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, + opts=None): + '''export changesets as hg patches.''' + + total = len(revs) + revwidth = max([len(str(rev)) for rev in revs]) + + def single(rev, seqno, fp): + ctx = repo[rev] + node = ctx.node() + parents = [p.node() for p in ctx.parents() if p] + branch = ctx.branch() + if switch_parent: + parents.reverse() + prev = (parents and parents[0]) or nullid + + shouldclose = False + if not fp: + desc_lines = ctx.description().rstrip().split('\n') + desc = desc_lines[0] #Commit always has a first line. + fp = makefileobj(repo, template, node, desc=desc, total=total, + seqno=seqno, revwidth=revwidth, mode='ab') + if fp != template: + shouldclose = True + if fp != sys.stdout and util.safehasattr(fp, 'name'): + repo.ui.note("%s\n" % fp.name) + + fp.write("# HG changeset patch\n") + fp.write("# User %s\n" % ctx.user()) + fp.write("# Date %d %d\n" % ctx.date()) + if branch and branch != 'default': + fp.write("# Branch %s\n" % branch) + fp.write("# Node ID %s\n" % hex(node)) + fp.write("# Parent %s\n" % hex(prev)) + if len(parents) > 1: + fp.write("# Parent %s\n" % hex(parents[1])) + fp.write(ctx.description().rstrip()) + fp.write("\n\n") + + for chunk in patch.diff(repo, prev, node, opts=opts): + fp.write(chunk) + + if shouldclose: + fp.close() + + for seqno, rev in enumerate(revs): + single(rev, seqno + 1, fp) + +def diffordiffstat(ui, repo, diffopts, node1, node2, match, + changes=None, stat=False, fp=None, prefix='', + listsubrepos=False): + '''show diff or diffstat.''' + if fp is None: + write = ui.write + else: + def write(s, **kw): + fp.write(s) + + if stat: + diffopts = diffopts.copy(context=0) + width = 80 + if not ui.plain(): + width = ui.termwidth() + chunks = patch.diff(repo, node1, node2, match, changes, diffopts, + prefix=prefix) + for chunk, label in patch.diffstatui(util.iterlines(chunks), + width=width, + git=diffopts.git): + write(chunk, label=label) + else: + for chunk, label in patch.diffui(repo, node1, node2, match, + changes, diffopts, prefix=prefix): + write(chunk, label=label) + + if listsubrepos: + ctx1 = repo[node1] + ctx2 = repo[node2] + for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + if node2 is not None: + node2 = ctx2.substate[subpath][1] + submatch = matchmod.narrowmatcher(subpath, match) + sub.diff(diffopts, node2, submatch, changes=changes, + stat=stat, fp=fp, prefix=prefix) + +class changeset_printer(object): + '''show changeset information when templating not requested.''' + + def __init__(self, ui, repo, patch, diffopts, buffered): + self.ui = ui + self.repo = repo + self.buffered = buffered + self.patch = patch + self.diffopts = diffopts + self.header = {} + self.hunk = {} + self.lastheader = None + self.footer = None + + def flush(self, rev): + if rev in self.header: + h = self.header[rev] + if h != self.lastheader: + self.lastheader = h + self.ui.write(h) + del self.header[rev] + if rev in self.hunk: + self.ui.write(self.hunk[rev]) + del self.hunk[rev] + return 1 + return 0 + + def close(self): + if self.footer: + self.ui.write(self.footer) + + def show(self, ctx, copies=None, matchfn=None, **props): + if self.buffered: + self.ui.pushbuffer() + self._show(ctx, copies, matchfn, props) + self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True) + else: + self._show(ctx, copies, matchfn, props) + + def _show(self, ctx, copies, matchfn, props): + '''show a single changeset or file revision''' + changenode = ctx.node() + rev = ctx.rev() + + if self.ui.quiet: + self.ui.write("%d:%s\n" % (rev, short(changenode)), + label='log.node') + return + + log = self.repo.changelog + date = util.datestr(ctx.date()) + + hexfunc = self.ui.debugflag and hex or short + + parents = [(p, hexfunc(log.node(p))) + for p in self._meaningful_parentrevs(log, rev)] + + self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)), + label='log.changeset') + + branch = ctx.branch() + # don't show the default branch name + if branch != 'default': + self.ui.write(_("branch: %s\n") % branch, + label='log.branch') + for bookmark in self.repo.nodebookmarks(changenode): + self.ui.write(_("bookmark: %s\n") % bookmark, + label='log.bookmark') + for tag in self.repo.nodetags(changenode): + self.ui.write(_("tag: %s\n") % tag, + label='log.tag') + for parent in parents: + self.ui.write(_("parent: %d:%s\n") % parent, + label='log.parent') + + if self.ui.debugflag: + mnode = ctx.manifestnode() + self.ui.write(_("manifest: %d:%s\n") % + (self.repo.manifest.rev(mnode), hex(mnode)), + label='ui.debug log.manifest') + self.ui.write(_("user: %s\n") % ctx.user(), + label='log.user') + self.ui.write(_("date: %s\n") % date, + label='log.date') + + if self.ui.debugflag: + files = self.repo.status(log.parents(changenode)[0], changenode)[:3] + for key, value in zip([_("files:"), _("files+:"), _("files-:")], + files): + if value: + self.ui.write("%-12s %s\n" % (key, " ".join(value)), + label='ui.debug log.files') + elif ctx.files() and self.ui.verbose: + self.ui.write(_("files: %s\n") % " ".join(ctx.files()), + label='ui.note log.files') + if copies and self.ui.verbose: + copies = ['%s (%s)' % c for c in copies] + self.ui.write(_("copies: %s\n") % ' '.join(copies), + label='ui.note log.copies') + + extra = ctx.extra() + if extra and self.ui.debugflag: + for key, value in sorted(extra.items()): + self.ui.write(_("extra: %s=%s\n") + % (key, value.encode('string_escape')), + label='ui.debug log.extra') + + description = ctx.description().strip() + if description: + if self.ui.verbose: + self.ui.write(_("description:\n"), + label='ui.note log.description') + self.ui.write(description, + label='ui.note log.description') + self.ui.write("\n\n") + else: + self.ui.write(_("summary: %s\n") % + description.splitlines()[0], + label='log.summary') + self.ui.write("\n") + + self.showpatch(changenode, matchfn) + + def showpatch(self, node, matchfn): + if not matchfn: + matchfn = self.patch + if matchfn: + stat = self.diffopts.get('stat') + diff = self.diffopts.get('patch') + diffopts = patch.diffopts(self.ui, self.diffopts) + prev = self.repo.changelog.parents(node)[0] + if stat: + diffordiffstat(self.ui, self.repo, diffopts, prev, node, + match=matchfn, stat=True) + if diff: + if stat: + self.ui.write("\n") + diffordiffstat(self.ui, self.repo, diffopts, prev, node, + match=matchfn, stat=False) + self.ui.write("\n") + + def _meaningful_parentrevs(self, log, rev): + """Return list of meaningful (or all if debug) parentrevs for rev. + + For merges (two non-nullrev revisions) both parents are meaningful. + Otherwise the first parent revision is considered meaningful if it + is not the preceding revision. + """ + parents = log.parentrevs(rev) + if not self.ui.debugflag and parents[1] == nullrev: + if parents[0] >= rev - 1: + parents = [] + else: + parents = [parents[0]] + return parents + + +class changeset_templater(changeset_printer): + '''format changeset information.''' + + def __init__(self, ui, repo, patch, diffopts, mapfile, buffered): + changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) + formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) + defaulttempl = { + 'parent': '{rev}:{node|formatnode} ', + 'manifest': '{rev}:{node|formatnode}', + 'file_copy': '{name} ({source})', + 'extra': '{key}={value|stringescape}' + } + # filecopy is preserved for compatibility reasons + defaulttempl['filecopy'] = defaulttempl['file_copy'] + self.t = templater.templater(mapfile, {'formatnode': formatnode}, + cache=defaulttempl) + self.cache = {} + + def use_template(self, t): + '''set template string to use''' + self.t.cache['changeset'] = t + + def _meaningful_parentrevs(self, ctx): + """Return list of meaningful (or all if debug) parentrevs for rev. + """ + parents = ctx.parents() + if len(parents) > 1: + return parents + if self.ui.debugflag: + return [parents[0], self.repo['null']] + if parents[0].rev() >= ctx.rev() - 1: + return [] + return parents + + def _show(self, ctx, copies, matchfn, props): + '''show a single changeset or file revision''' + + showlist = templatekw.showlist + + # showparents() behaviour depends on ui trace level which + # causes unexpected behaviours at templating level and makes + # it harder to extract it in a standalone function. Its + # behaviour cannot be changed so leave it here for now. + def showparents(**args): + ctx = args['ctx'] + parents = [[('rev', p.rev()), ('node', p.hex())] + for p in self._meaningful_parentrevs(ctx)] + return showlist('parent', parents, **args) + + props = props.copy() + props.update(templatekw.keywords) + props['parents'] = showparents + props['templ'] = self.t + props['ctx'] = ctx + props['repo'] = self.repo + props['revcache'] = {'copies': copies} + props['cache'] = self.cache + + # find correct templates for current mode + + tmplmodes = [ + (True, None), + (self.ui.verbose, 'verbose'), + (self.ui.quiet, 'quiet'), + (self.ui.debugflag, 'debug'), + ] + + types = {'header': '', 'footer':'', 'changeset': 'changeset'} + for mode, postfix in tmplmodes: + for type in types: + cur = postfix and ('%s_%s' % (type, postfix)) or type + if mode and cur in self.t: + types[type] = cur + + try: + + # write header + if types['header']: + h = templater.stringify(self.t(types['header'], **props)) + if self.buffered: + self.header[ctx.rev()] = h + else: + if self.lastheader != h: + self.lastheader = h + self.ui.write(h) + + # write changeset metadata, then patch if requested + key = types['changeset'] + self.ui.write(templater.stringify(self.t(key, **props))) + self.showpatch(ctx.node(), matchfn) + + if types['footer']: + if not self.footer: + self.footer = templater.stringify(self.t(types['footer'], + **props)) + + except KeyError, inst: + msg = _("%s: no key named '%s'") + raise util.Abort(msg % (self.t.mapfile, inst.args[0])) + except SyntaxError, inst: + raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) + +def show_changeset(ui, repo, opts, buffered=False): + """show one changeset using template or regular display. + + Display format will be the first non-empty hit of: + 1. option 'template' + 2. option 'style' + 3. [ui] setting 'logtemplate' + 4. [ui] setting 'style' + If all of these values are either the unset or the empty string, + regular display via changeset_printer() is done. + """ + # options + patch = False + if opts.get('patch') or opts.get('stat'): + patch = scmutil.matchall(repo) + + tmpl = opts.get('template') + style = None + if tmpl: + tmpl = templater.parsestring(tmpl, quoted=False) + else: + style = opts.get('style') + + # ui settings + if not (tmpl or style): + tmpl = ui.config('ui', 'logtemplate') + if tmpl: + tmpl = templater.parsestring(tmpl) + else: + style = util.expandpath(ui.config('ui', 'style', '')) + + if not (tmpl or style): + return changeset_printer(ui, repo, patch, opts, buffered) + + mapfile = None + if style and not tmpl: + mapfile = style + if not os.path.split(mapfile)[0]: + mapname = (templater.templatepath('map-cmdline.' + mapfile) + or templater.templatepath(mapfile)) + if mapname: + mapfile = mapname + + try: + t = changeset_templater(ui, repo, patch, opts, mapfile, buffered) + except SyntaxError, inst: + raise util.Abort(inst.args[0]) + if tmpl: + t.use_template(tmpl) + return t + +def finddate(ui, repo, date): + """Find the tipmost changeset that matches the given date spec""" + + df = util.matchdate(date) + m = scmutil.matchall(repo) + results = {} + + def prep(ctx, fns): + d = ctx.date() + if df(d[0]): + results[ctx.rev()] = d + + for ctx in walkchangerevs(repo, m, {'rev': None}, prep): + rev = ctx.rev() + if rev in results: + ui.status(_("Found revision %s from %s\n") % + (rev, util.datestr(results[rev]))) + return str(rev) + + raise util.Abort(_("revision matching date not found")) + +def walkchangerevs(repo, match, opts, prepare): + '''Iterate over files and the revs in which they changed. + + Callers most commonly need to iterate backwards over the history + in which they are interested. Doing so has awful (quadratic-looking) + performance, so we use iterators in a "windowed" way. + + We walk a window of revisions in the desired order. Within the + window, we first walk forwards to gather data, then in the desired + order (usually backwards) to display it. + + This function returns an iterator yielding contexts. Before + yielding each context, the iterator will first call the prepare + function on each context in the window in forward order.''' + + def increasing_windows(start, end, windowsize=8, sizelimit=512): + if start < end: + while start < end: + yield start, min(windowsize, end - start) + start += windowsize + if windowsize < sizelimit: + windowsize *= 2 + else: + while start > end: + yield start, min(windowsize, start - end - 1) + start -= windowsize + if windowsize < sizelimit: + windowsize *= 2 + + follow = opts.get('follow') or opts.get('follow_first') + + if not len(repo): + return [] + + if follow: + defrange = '%s:0' % repo['.'].rev() + else: + defrange = '-1:0' + revs = scmutil.revrange(repo, opts['rev'] or [defrange]) + if not revs: + return [] + wanted = set() + slowpath = match.anypats() or (match.files() and opts.get('removed')) + fncache = {} + change = util.cachefunc(repo.changectx) + + # First step is to fill wanted, the set of revisions that we want to yield. + # When it does not induce extra cost, we also fill fncache for revisions in + # wanted: a cache of filenames that were changed (ctx.files()) and that + # match the file filtering conditions. + + if not slowpath and not match.files(): + # No files, no patterns. Display all revs. + wanted = set(revs) + copies = [] + + if not slowpath: + # We only have to read through the filelog to find wanted revisions + + minrev, maxrev = min(revs), max(revs) + def filerevgen(filelog, last): + """ + Only files, no patterns. Check the history of each file. + + Examines filelog entries within minrev, maxrev linkrev range + Returns an iterator yielding (linkrev, parentlinkrevs, copied) + tuples in backwards order + """ + cl_count = len(repo) + revs = [] + for j in xrange(0, last + 1): + linkrev = filelog.linkrev(j) + if linkrev < minrev: + continue + # only yield rev for which we have the changelog, it can + # happen while doing "hg log" during a pull or commit + if linkrev >= cl_count: + break + + parentlinkrevs = [] + for p in filelog.parentrevs(j): + if p != nullrev: + parentlinkrevs.append(filelog.linkrev(p)) + n = filelog.node(j) + revs.append((linkrev, parentlinkrevs, + follow and filelog.renamed(n))) + + return reversed(revs) + def iterfiles(): + for filename in match.files(): + yield filename, None + for filename_node in copies: + yield filename_node + for file_, node in iterfiles(): + filelog = repo.file(file_) + if not len(filelog): + if node is None: + # A zero count may be a directory or deleted file, so + # try to find matching entries on the slow path. + if follow: + raise util.Abort( + _('cannot follow nonexistent file: "%s"') % file_) + slowpath = True + break + else: + continue + + if node is None: + last = len(filelog) - 1 + else: + last = filelog.rev(node) + + + # keep track of all ancestors of the file + ancestors = set([filelog.linkrev(last)]) + + # iterate from latest to oldest revision + for rev, flparentlinkrevs, copied in filerevgen(filelog, last): + if not follow: + if rev > maxrev: + continue + else: + # Note that last might not be the first interesting + # rev to us: + # if the file has been changed after maxrev, we'll + # have linkrev(last) > maxrev, and we still need + # to explore the file graph + if rev not in ancestors: + continue + # XXX insert 1327 fix here + if flparentlinkrevs: + ancestors.update(flparentlinkrevs) + + fncache.setdefault(rev, []).append(file_) + wanted.add(rev) + if copied: + copies.append(copied) + if slowpath: + # We have to read the changelog to match filenames against + # changed files + + if follow: + raise util.Abort(_('can only follow copies/renames for explicit ' + 'filenames')) + + # The slow path checks files modified in every changeset. + for i in sorted(revs): + ctx = change(i) + matches = filter(match, ctx.files()) + if matches: + fncache[i] = matches + wanted.add(i) + + class followfilter(object): + def __init__(self, onlyfirst=False): + self.startrev = nullrev + self.roots = set() + self.onlyfirst = onlyfirst + + def match(self, rev): + def realparents(rev): + if self.onlyfirst: + return repo.changelog.parentrevs(rev)[0:1] + else: + return filter(lambda x: x != nullrev, + repo.changelog.parentrevs(rev)) + + if self.startrev == nullrev: + self.startrev = rev + return True + + if rev > self.startrev: + # forward: all descendants + if not self.roots: + self.roots.add(self.startrev) + for parent in realparents(rev): + if parent in self.roots: + self.roots.add(rev) + return True + else: + # backwards: all parents + if not self.roots: + self.roots.update(realparents(self.startrev)) + if rev in self.roots: + self.roots.remove(rev) + self.roots.update(realparents(rev)) + return True + + return False + + # it might be worthwhile to do this in the iterator if the rev range + # is descending and the prune args are all within that range + for rev in opts.get('prune', ()): + rev = repo.changelog.rev(repo.lookup(rev)) + ff = followfilter() + stop = min(revs[0], revs[-1]) + for x in xrange(rev, stop - 1, -1): + if ff.match(x): + wanted.discard(x) + + # Now that wanted is correctly initialized, we can iterate over the + # revision range, yielding only revisions in wanted. + def iterate(): + if follow and not match.files(): + ff = followfilter(onlyfirst=opts.get('follow_first')) + def want(rev): + return ff.match(rev) and rev in wanted + else: + def want(rev): + return rev in wanted + + for i, window in increasing_windows(0, len(revs)): + nrevs = [rev for rev in revs[i:i + window] if want(rev)] + for rev in sorted(nrevs): + fns = fncache.get(rev) + ctx = change(rev) + if not fns: + def fns_generator(): + for f in ctx.files(): + if match(f): + yield f + fns = fns_generator() + prepare(ctx, fns) + for rev in nrevs: + yield change(rev) + return iterate() + +def add(ui, repo, match, dryrun, listsubrepos, prefix): + join = lambda f: os.path.join(prefix, f) + bad = [] + oldbad = match.bad + match.bad = lambda x, y: bad.append(x) or oldbad(x, y) + names = [] + wctx = repo[None] + cca = None + abort, warn = scmutil.checkportabilityalert(ui) + if abort or warn: + cca = scmutil.casecollisionauditor(ui, abort, wctx) + for f in repo.walk(match): + exact = match.exact(f) + if exact or f not in repo.dirstate: + if cca: + cca(f) + names.append(f) + if ui.verbose or not exact: + ui.status(_('adding %s\n') % match.rel(join(f))) + + if listsubrepos: + for subpath in wctx.substate: + sub = wctx.sub(subpath) + try: + submatch = matchmod.narrowmatcher(subpath, match) + bad.extend(sub.add(ui, submatch, dryrun, prefix)) + except error.LookupError: + ui.status(_("skipping missing subrepository: %s\n") + % join(subpath)) + + if not dryrun: + rejected = wctx.add(names, prefix) + bad.extend(f for f in rejected if f in match.files()) + return bad + +def duplicatecopies(repo, rev, p1, p2): + "Reproduce copies found in the source revision in the dirstate for grafts" + # Here we simulate the copies and renames in the source changeset + cop, diver = copies.copies(repo, repo[rev], repo[p1], repo[p2], True) + m1 = repo[rev].manifest() + m2 = repo[p1].manifest() + for k, v in cop.iteritems(): + if k in m1: + if v in m1 or v in m2: + repo.dirstate.copy(v, k) + if v in m2 and v not in m1 and k in m2: + repo.dirstate.remove(v) + +def commit(ui, repo, commitfunc, pats, opts): + '''commit the specified files or all outstanding changes''' + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + message = logmessage(ui, opts) + + # extract addremove carefully -- this function can be called from a command + # that doesn't support addremove + if opts.get('addremove'): + scmutil.addremove(repo, pats, opts) + + return commitfunc(ui, repo, message, + scmutil.match(repo[None], pats, opts), opts) + +def commiteditor(repo, ctx, subs): + if ctx.description(): + return ctx.description() + return commitforceeditor(repo, ctx, subs) + +def commitforceeditor(repo, ctx, subs): + edittext = [] + modified, added, removed = ctx.modified(), ctx.added(), ctx.removed() + if ctx.description(): + edittext.append(ctx.description()) + edittext.append("") + edittext.append("") # Empty line between message and comments. + edittext.append(_("HG: Enter commit message." + " Lines beginning with 'HG:' are removed.")) + edittext.append(_("HG: Leave message empty to abort commit.")) + edittext.append("HG: --") + edittext.append(_("HG: user: %s") % ctx.user()) + if ctx.p2(): + edittext.append(_("HG: branch merge")) + if ctx.branch(): + edittext.append(_("HG: branch '%s'") % ctx.branch()) + edittext.extend([_("HG: subrepo %s") % s for s in subs]) + edittext.extend([_("HG: added %s") % f for f in added]) + edittext.extend([_("HG: changed %s") % f for f in modified]) + edittext.extend([_("HG: removed %s") % f for f in removed]) + if not added and not modified and not removed: + edittext.append(_("HG: no files changed")) + edittext.append("") + # run editor in the repository root + olddir = os.getcwd() + os.chdir(repo.root) + text = repo.ui.edit("\n".join(edittext), ctx.user()) + text = re.sub("(?m)^HG:.*(\n|$)", "", text) + os.chdir(olddir) + + if not text.strip(): + raise util.Abort(_("empty commit message")) + + return text + +def command(table): + '''returns a function object bound to table which can be used as + a decorator for populating table as a command table''' + + def cmd(name, options, synopsis=None): + def decorator(func): + if synopsis: + table[name] = func, options[:], synopsis + else: + table[name] = func, options[:] + return func + return decorator + + return cmd diff --git a/websdk/mercurial/commands.py b/websdk/mercurial/commands.py index dcb9954..7ed52fb 120000..100644 --- a/websdk/mercurial/commands.py +++ b/websdk/mercurial/commands.py @@ -1 +1,5672 @@ -/usr/share/pyshared/mercurial/commands.py \ No newline at end of file +# commands.py - command processing for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import hex, bin, nullid, nullrev, short +from lock import release +from i18n import _, gettext +import os, re, difflib, time, tempfile, errno +import hg, scmutil, util, revlog, extensions, copies, error, bookmarks +import patch, help, url, encoding, templatekw, discovery +import archival, changegroup, cmdutil, hbisect +import sshserver, hgweb, hgweb.server, commandserver +import merge as mergemod +import minirst, revset, fileset +import dagparser, context, simplemerge +import random, setdiscovery, treediscovery, dagutil + +table = {} + +command = cmdutil.command(table) + +# common command options + +globalopts = [ + ('R', 'repository', '', + _('repository root directory or name of overlay bundle file'), + _('REPO')), + ('', 'cwd', '', + _('change working directory'), _('DIR')), + ('y', 'noninteractive', None, + _('do not prompt, automatically pick the first choice for all prompts')), + ('q', 'quiet', None, _('suppress output')), + ('v', 'verbose', None, _('enable additional output')), + ('', 'config', [], + _('set/override config option (use \'section.name=value\')'), + _('CONFIG')), + ('', 'debug', None, _('enable debugging output')), + ('', 'debugger', None, _('start debugger')), + ('', 'encoding', encoding.encoding, _('set the charset encoding'), + _('ENCODE')), + ('', 'encodingmode', encoding.encodingmode, + _('set the charset encoding mode'), _('MODE')), + ('', 'traceback', None, _('always print a traceback on exception')), + ('', 'time', None, _('time how long the command takes')), + ('', 'profile', None, _('print command execution profile')), + ('', 'version', None, _('output version information and exit')), + ('h', 'help', None, _('display help and exit')), +] + +dryrunopts = [('n', 'dry-run', None, + _('do not perform actions, just print output'))] + +remoteopts = [ + ('e', 'ssh', '', + _('specify ssh command to use'), _('CMD')), + ('', 'remotecmd', '', + _('specify hg command to run on the remote side'), _('CMD')), + ('', 'insecure', None, + _('do not verify server certificate (ignoring web.cacerts config)')), +] + +walkopts = [ + ('I', 'include', [], + _('include names matching the given patterns'), _('PATTERN')), + ('X', 'exclude', [], + _('exclude names matching the given patterns'), _('PATTERN')), +] + +commitopts = [ + ('m', 'message', '', + _('use text as commit message'), _('TEXT')), + ('l', 'logfile', '', + _('read commit message from file'), _('FILE')), +] + +commitopts2 = [ + ('d', 'date', '', + _('record the specified date as commit date'), _('DATE')), + ('u', 'user', '', + _('record the specified user as committer'), _('USER')), +] + +templateopts = [ + ('', 'style', '', + _('display using template map file'), _('STYLE')), + ('', 'template', '', + _('display with template'), _('TEMPLATE')), +] + +logopts = [ + ('p', 'patch', None, _('show patch')), + ('g', 'git', None, _('use git extended diff format')), + ('l', 'limit', '', + _('limit number of changes displayed'), _('NUM')), + ('M', 'no-merges', None, _('do not show merges')), + ('', 'stat', None, _('output diffstat-style summary of changes')), +] + templateopts + +diffopts = [ + ('a', 'text', None, _('treat all files as text')), + ('g', 'git', None, _('use git extended diff format')), + ('', 'nodates', None, _('omit dates from diff headers')) +] + +diffopts2 = [ + ('p', 'show-function', None, _('show which function each change is in')), + ('', 'reverse', None, _('produce a diff that undoes the changes')), + ('w', 'ignore-all-space', None, + _('ignore white space when comparing lines')), + ('b', 'ignore-space-change', None, + _('ignore changes in the amount of white space')), + ('B', 'ignore-blank-lines', None, + _('ignore changes whose lines are all blank')), + ('U', 'unified', '', + _('number of lines of context to show'), _('NUM')), + ('', 'stat', None, _('output diffstat-style summary of changes')), +] + +mergetoolopts = [ + ('t', 'tool', '', _('specify merge tool')), +] + +similarityopts = [ + ('s', 'similarity', '', + _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY')) +] + +subrepoopts = [ + ('S', 'subrepos', None, + _('recurse into subrepositories')) +] + +# Commands start here, listed alphabetically + +@command('^add', + walkopts + subrepoopts + dryrunopts, + _('[OPTION]... [FILE]...')) +def add(ui, repo, *pats, **opts): + """add the specified files on the next commit + + Schedule files to be version controlled and added to the + repository. + + The files will be added to the repository at the next commit. To + undo an add before that, see :hg:`forget`. + + If no names are given, add all files to the repository. + + .. container:: verbose + + An example showing how new (unknown) files are added + automatically by :hg:`add`:: + + $ ls + foo.c + $ hg status + ? foo.c + $ hg add + adding foo.c + $ hg status + A foo.c + + Returns 0 if all files are successfully added. + """ + + m = scmutil.match(repo[None], pats, opts) + rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'), + opts.get('subrepos'), prefix="") + return rejected and 1 or 0 + +@command('addremove', + similarityopts + walkopts + dryrunopts, + _('[OPTION]... [FILE]...')) +def addremove(ui, repo, *pats, **opts): + """add all new files, delete all missing files + + Add all new files and remove all missing files from the + repository. + + New files are ignored if they match any of the patterns in + ``.hgignore``. As with add, these changes take effect at the next + commit. + + Use the -s/--similarity option to detect renamed files. With a + parameter greater than 0, this compares every removed file with + every added file and records those similar enough as renames. This + option takes a percentage between 0 (disabled) and 100 (files must + be identical) as its parameter. Detecting renamed files this way + can be expensive. After using this option, :hg:`status -C` can be + used to check which files were identified as moved or renamed. + + Returns 0 if all files are successfully added. + """ + try: + sim = float(opts.get('similarity') or 100) + except ValueError: + raise util.Abort(_('similarity must be a number')) + if sim < 0 or sim > 100: + raise util.Abort(_('similarity must be between 0 and 100')) + return scmutil.addremove(repo, pats, opts, similarity=sim / 100.0) + +@command('^annotate|blame', + [('r', 'rev', '', _('annotate the specified revision'), _('REV')), + ('', 'follow', None, + _('follow copies/renames and list the filename (DEPRECATED)')), + ('', 'no-follow', None, _("don't follow copies and renames")), + ('a', 'text', None, _('treat all files as text')), + ('u', 'user', None, _('list the author (long with -v)')), + ('f', 'file', None, _('list the filename')), + ('d', 'date', None, _('list the date (short with -q)')), + ('n', 'number', None, _('list the revision number (default)')), + ('c', 'changeset', None, _('list the changeset')), + ('l', 'line-number', None, _('show line number at the first appearance')) + ] + walkopts, + _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')) +def annotate(ui, repo, *pats, **opts): + """show changeset information by line for each file + + List changes in files, showing the revision id responsible for + each line + + This command is useful for discovering when a change was made and + by whom. + + Without the -a/--text option, annotate will avoid processing files + it detects as binary. With -a, annotate will annotate the file + anyway, although the results will probably be neither useful + nor desirable. + + Returns 0 on success. + """ + if opts.get('follow'): + # --follow is deprecated and now just an alias for -f/--file + # to mimic the behavior of Mercurial before version 1.5 + opts['file'] = True + + datefunc = ui.quiet and util.shortdate or util.datestr + getdate = util.cachefunc(lambda x: datefunc(x[0].date())) + + if not pats: + raise util.Abort(_('at least one filename or pattern is required')) + + opmap = [('user', ' ', lambda x: ui.shortuser(x[0].user())), + ('number', ' ', lambda x: str(x[0].rev())), + ('changeset', ' ', lambda x: short(x[0].node())), + ('date', ' ', getdate), + ('file', ' ', lambda x: x[0].path()), + ('line_number', ':', lambda x: str(x[1])), + ] + + if (not opts.get('user') and not opts.get('changeset') + and not opts.get('date') and not opts.get('file')): + opts['number'] = True + + linenumber = opts.get('line_number') is not None + if linenumber and (not opts.get('changeset')) and (not opts.get('number')): + raise util.Abort(_('at least one of -n/-c is required for -l')) + + funcmap = [(func, sep) for op, sep, func in opmap if opts.get(op)] + funcmap[0] = (funcmap[0][0], '') # no separator in front of first column + + def bad(x, y): + raise util.Abort("%s: %s" % (x, y)) + + ctx = scmutil.revsingle(repo, opts.get('rev')) + m = scmutil.match(ctx, pats, opts) + m.bad = bad + follow = not opts.get('no_follow') + for abs in ctx.walk(m): + fctx = ctx[abs] + if not opts.get('text') and util.binary(fctx.data()): + ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs)) + continue + + lines = fctx.annotate(follow=follow, linenumber=linenumber) + pieces = [] + + for f, sep in funcmap: + l = [f(n) for n, dummy in lines] + if l: + sized = [(x, encoding.colwidth(x)) for x in l] + ml = max([w for x, w in sized]) + pieces.append(["%s%s%s" % (sep, ' ' * (ml - w), x) + for x, w in sized]) + + if pieces: + for p, l in zip(zip(*pieces), lines): + ui.write("%s: %s" % ("".join(p), l[1])) + +@command('archive', + [('', 'no-decode', None, _('do not pass files through decoders')), + ('p', 'prefix', '', _('directory prefix for files in archive'), + _('PREFIX')), + ('r', 'rev', '', _('revision to distribute'), _('REV')), + ('t', 'type', '', _('type of distribution to create'), _('TYPE')), + ] + subrepoopts + walkopts, + _('[OPTION]... DEST')) +def archive(ui, repo, dest, **opts): + '''create an unversioned archive of a repository revision + + By default, the revision used is the parent of the working + directory; use -r/--rev to specify a different revision. + + The archive type is automatically detected based on file + extension (or override using -t/--type). + + .. container:: verbose + + Examples: + + - create a zip file containing the 1.0 release:: + + hg archive -r 1.0 project-1.0.zip + + - create a tarball excluding .hg files:: + + hg archive project.tar.gz -X ".hg*" + + Valid types are: + + :``files``: a directory full of files (default) + :``tar``: tar archive, uncompressed + :``tbz2``: tar archive, compressed using bzip2 + :``tgz``: tar archive, compressed using gzip + :``uzip``: zip archive, uncompressed + :``zip``: zip archive, compressed using deflate + + The exact name of the destination archive or directory is given + using a format string; see :hg:`help export` for details. + + Each member added to an archive file has a directory prefix + prepended. Use -p/--prefix to specify a format string for the + prefix. The default is the basename of the archive, with suffixes + removed. + + Returns 0 on success. + ''' + + ctx = scmutil.revsingle(repo, opts.get('rev')) + if not ctx: + raise util.Abort(_('no working directory: please specify a revision')) + node = ctx.node() + dest = cmdutil.makefilename(repo, dest, node) + if os.path.realpath(dest) == repo.root: + raise util.Abort(_('repository root cannot be destination')) + + kind = opts.get('type') or archival.guesskind(dest) or 'files' + prefix = opts.get('prefix') + + if dest == '-': + if kind == 'files': + raise util.Abort(_('cannot archive plain files to stdout')) + dest = cmdutil.makefileobj(repo, dest) + if not prefix: + prefix = os.path.basename(repo.root) + '-%h' + + prefix = cmdutil.makefilename(repo, prefix, node) + matchfn = scmutil.match(ctx, [], opts) + archival.archive(repo, dest, node, kind, not opts.get('no_decode'), + matchfn, prefix, subrepos=opts.get('subrepos')) + +@command('backout', + [('', 'merge', None, _('merge with old dirstate parent after backout')), + ('', 'parent', '', + _('parent to choose when backing out merge (DEPRECATED)'), _('REV')), + ('r', 'rev', '', _('revision to backout'), _('REV')), + ] + mergetoolopts + walkopts + commitopts + commitopts2, + _('[OPTION]... [-r] REV')) +def backout(ui, repo, node=None, rev=None, **opts): + '''reverse effect of earlier changeset + + Prepare a new changeset with the effect of REV undone in the + current working directory. + + If REV is the parent of the working directory, then this new changeset + is committed automatically. Otherwise, hg needs to merge the + changes and the merged result is left uncommitted. + + .. note:: + backout cannot be used to fix either an unwanted or + incorrect merge. + + .. container:: verbose + + By default, the pending changeset will have one parent, + maintaining a linear history. With --merge, the pending + changeset will instead have two parents: the old parent of the + working directory and a new child of REV that simply undoes REV. + + Before version 1.7, the behavior without --merge was equivalent + to specifying --merge followed by :hg:`update --clean .` to + cancel the merge and leave the child of REV as a head to be + merged separately. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success. + ''' + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if not rev: + rev = node + + if not rev: + raise util.Abort(_("please specify a revision to backout")) + + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + + cmdutil.bailifchanged(repo) + node = scmutil.revsingle(repo, rev).node() + + op1, op2 = repo.dirstate.parents() + a = repo.changelog.ancestor(op1, node) + if a != node: + raise util.Abort(_('cannot backout change on a different branch')) + + p1, p2 = repo.changelog.parents(node) + if p1 == nullid: + raise util.Abort(_('cannot backout a change with no parents')) + if p2 != nullid: + if not opts.get('parent'): + raise util.Abort(_('cannot backout a merge changeset')) + p = repo.lookup(opts['parent']) + if p not in (p1, p2): + raise util.Abort(_('%s is not a parent of %s') % + (short(p), short(node))) + parent = p + else: + if opts.get('parent'): + raise util.Abort(_('cannot use --parent on non-merge changeset')) + parent = p1 + + # the backout should appear on the same branch + branch = repo.dirstate.branch() + hg.clean(repo, node, show_stats=False) + repo.dirstate.setbranch(branch) + revert_opts = opts.copy() + revert_opts['date'] = None + revert_opts['all'] = True + revert_opts['rev'] = hex(parent) + revert_opts['no_backup'] = None + revert(ui, repo, **revert_opts) + if not opts.get('merge') and op1 != node: + try: + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.update(repo, op1) + finally: + ui.setconfig('ui', 'forcemerge', '') + + commit_opts = opts.copy() + commit_opts['addremove'] = False + if not commit_opts['message'] and not commit_opts['logfile']: + # we don't translate commit messages + commit_opts['message'] = "Backed out changeset %s" % short(node) + commit_opts['force_editor'] = True + commit(ui, repo, **commit_opts) + def nice(node): + return '%d:%s' % (repo.changelog.rev(node), short(node)) + ui.status(_('changeset %s backs out changeset %s\n') % + (nice(repo.changelog.tip()), nice(node))) + if opts.get('merge') and op1 != node: + hg.clean(repo, op1, show_stats=False) + ui.status(_('merging with changeset %s\n') + % nice(repo.changelog.tip())) + try: + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.merge(repo, hex(repo.changelog.tip())) + finally: + ui.setconfig('ui', 'forcemerge', '') + return 0 + +@command('bisect', + [('r', 'reset', False, _('reset bisect state')), + ('g', 'good', False, _('mark changeset good')), + ('b', 'bad', False, _('mark changeset bad')), + ('s', 'skip', False, _('skip testing changeset')), + ('e', 'extend', False, _('extend the bisect range')), + ('c', 'command', '', _('use command to check changeset state'), _('CMD')), + ('U', 'noupdate', False, _('do not update to target'))], + _("[-gbsr] [-U] [-c CMD] [REV]")) +def bisect(ui, repo, rev=None, extra=None, command=None, + reset=None, good=None, bad=None, skip=None, extend=None, + noupdate=None): + """subdivision search of changesets + + This command helps to find changesets which introduce problems. To + use, mark the earliest changeset you know exhibits the problem as + bad, then mark the latest changeset which is free from the problem + as good. Bisect will update your working directory to a revision + for testing (unless the -U/--noupdate option is specified). Once + you have performed tests, mark the working directory as good or + bad, and bisect will either update to another candidate changeset + or announce that it has found the bad revision. + + As a shortcut, you can also use the revision argument to mark a + revision as good or bad without checking it out first. + + If you supply a command, it will be used for automatic bisection. + Its exit status will be used to mark revisions as good or bad: + status 0 means good, 125 means to skip the revision, 127 + (command not found) will abort the bisection, and any other + non-zero exit status means the revision is bad. + + .. container:: verbose + + Some examples: + + - start a bisection with known bad revision 12, and good revision 34:: + + hg bisect --bad 34 + hg bisect --good 12 + + - advance the current bisection by marking current revision as good or + bad:: + + hg bisect --good + hg bisect --bad + + - mark the current revision, or a known revision, to be skipped (eg. if + that revision is not usable because of another issue):: + + hg bisect --skip + hg bisect --skip 23 + + - forget the current bisection:: + + hg bisect --reset + + - use 'make && make tests' to automatically find the first broken + revision:: + + hg bisect --reset + hg bisect --bad 34 + hg bisect --good 12 + hg bisect --command 'make && make tests' + + - see all changesets whose states are already known in the current + bisection:: + + hg log -r "bisect(pruned)" + + - see all changesets that took part in the current bisection:: + + hg log -r "bisect(range)" + + - with the graphlog extension, you can even get a nice graph:: + + hg log --graph -r "bisect(range)" + + See :hg:`help revsets` for more about the `bisect()` keyword. + + Returns 0 on success. + """ + def extendbisectrange(nodes, good): + # bisect is incomplete when it ends on a merge node and + # one of the parent was not checked. + parents = repo[nodes[0]].parents() + if len(parents) > 1: + side = good and state['bad'] or state['good'] + num = len(set(i.node() for i in parents) & set(side)) + if num == 1: + return parents[0].ancestor(parents[1]) + return None + + def print_result(nodes, good): + displayer = cmdutil.show_changeset(ui, repo, {}) + if len(nodes) == 1: + # narrowed it down to a single revision + if good: + ui.write(_("The first good revision is:\n")) + else: + ui.write(_("The first bad revision is:\n")) + displayer.show(repo[nodes[0]]) + extendnode = extendbisectrange(nodes, good) + if extendnode is not None: + ui.write(_('Not all ancestors of this changeset have been' + ' checked.\nUse bisect --extend to continue the ' + 'bisection from\nthe common ancestor, %s.\n') + % extendnode) + else: + # multiple possible revisions + if good: + ui.write(_("Due to skipped revisions, the first " + "good revision could be any of:\n")) + else: + ui.write(_("Due to skipped revisions, the first " + "bad revision could be any of:\n")) + for n in nodes: + displayer.show(repo[n]) + displayer.close() + + def check_state(state, interactive=True): + if not state['good'] or not state['bad']: + if (good or bad or skip or reset) and interactive: + return + if not state['good']: + raise util.Abort(_('cannot bisect (no known good revisions)')) + else: + raise util.Abort(_('cannot bisect (no known bad revisions)')) + return True + + # backward compatibility + if rev in "good bad reset init".split(): + ui.warn(_("(use of 'hg bisect ' is deprecated)\n")) + cmd, rev, extra = rev, extra, None + if cmd == "good": + good = True + elif cmd == "bad": + bad = True + else: + reset = True + elif extra or good + bad + skip + reset + extend + bool(command) > 1: + raise util.Abort(_('incompatible arguments')) + + if reset: + p = repo.join("bisect.state") + if os.path.exists(p): + os.unlink(p) + return + + state = hbisect.load_state(repo) + + if command: + changesets = 1 + try: + while changesets: + # update state + status = util.system(command, out=ui.fout) + if status == 125: + transition = "skip" + elif status == 0: + transition = "good" + # status < 0 means process was killed + elif status == 127: + raise util.Abort(_("failed to execute %s") % command) + elif status < 0: + raise util.Abort(_("%s killed") % command) + else: + transition = "bad" + ctx = scmutil.revsingle(repo, rev) + rev = None # clear for future iterations + state[transition].append(ctx.node()) + ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition)) + check_state(state, interactive=False) + # bisect + nodes, changesets, good = hbisect.bisect(repo.changelog, state) + # update to next check + cmdutil.bailifchanged(repo) + hg.clean(repo, nodes[0], show_stats=False) + finally: + hbisect.save_state(repo, state) + print_result(nodes, good) + return + + # update state + + if rev: + nodes = [repo.lookup(i) for i in scmutil.revrange(repo, [rev])] + else: + nodes = [repo.lookup('.')] + + if good or bad or skip: + if good: + state['good'] += nodes + elif bad: + state['bad'] += nodes + elif skip: + state['skip'] += nodes + hbisect.save_state(repo, state) + + if not check_state(state): + return + + # actually bisect + nodes, changesets, good = hbisect.bisect(repo.changelog, state) + if extend: + if not changesets: + extendnode = extendbisectrange(nodes, good) + if extendnode is not None: + ui.write(_("Extending search to changeset %d:%s\n" + % (extendnode.rev(), extendnode))) + if noupdate: + return + cmdutil.bailifchanged(repo) + return hg.clean(repo, extendnode.node()) + raise util.Abort(_("nothing to extend")) + + if changesets == 0: + print_result(nodes, good) + else: + assert len(nodes) == 1 # only a single node can be tested next + node = nodes[0] + # compute the approximate number of remaining tests + tests, size = 0, 2 + while size <= changesets: + tests, size = tests + 1, size * 2 + rev = repo.changelog.rev(node) + ui.write(_("Testing changeset %d:%s " + "(%d changesets remaining, ~%d tests)\n") + % (rev, short(node), changesets, tests)) + if not noupdate: + cmdutil.bailifchanged(repo) + return hg.clean(repo, node) + +@command('bookmarks', + [('f', 'force', False, _('force')), + ('r', 'rev', '', _('revision'), _('REV')), + ('d', 'delete', False, _('delete a given bookmark')), + ('m', 'rename', '', _('rename a given bookmark'), _('NAME')), + ('i', 'inactive', False, _('do not mark a new bookmark active'))], + _('hg bookmarks [-f] [-d] [-i] [-m NAME] [-r REV] [NAME]')) +def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, + rename=None, inactive=False): + '''track a line of development with movable markers + + Bookmarks are pointers to certain commits that move when + committing. Bookmarks are local. They can be renamed, copied and + deleted. It is possible to use bookmark names in :hg:`merge` and + :hg:`update` to merge and update respectively to a given bookmark. + + You can use :hg:`bookmark NAME` to set a bookmark on the working + directory's parent revision with the given name. If you specify + a revision using -r REV (where REV may be an existing bookmark), + the bookmark is assigned to that revision. + + Bookmarks can be pushed and pulled between repositories (see :hg:`help + push` and :hg:`help pull`). This requires both the local and remote + repositories to support bookmarks. For versions prior to 1.8, this means + the bookmarks extension must be enabled. + ''' + hexfn = ui.debugflag and hex or short + marks = repo._bookmarks + cur = repo.changectx('.').node() + + if rename: + if rename not in marks: + raise util.Abort(_("bookmark '%s' does not exist") % rename) + if mark in marks and not force: + raise util.Abort(_("bookmark '%s' already exists " + "(use -f to force)") % mark) + if mark is None: + raise util.Abort(_("new bookmark name required")) + marks[mark] = marks[rename] + if repo._bookmarkcurrent == rename and not inactive: + bookmarks.setcurrent(repo, mark) + del marks[rename] + bookmarks.write(repo) + return + + if delete: + if mark is None: + raise util.Abort(_("bookmark name required")) + if mark not in marks: + raise util.Abort(_("bookmark '%s' does not exist") % mark) + if mark == repo._bookmarkcurrent: + bookmarks.setcurrent(repo, None) + del marks[mark] + bookmarks.write(repo) + return + + if mark is not None: + if "\n" in mark: + raise util.Abort(_("bookmark name cannot contain newlines")) + mark = mark.strip() + if not mark: + raise util.Abort(_("bookmark names cannot consist entirely of " + "whitespace")) + if inactive and mark == repo._bookmarkcurrent: + bookmarks.setcurrent(repo, None) + return + if mark in marks and not force: + raise util.Abort(_("bookmark '%s' already exists " + "(use -f to force)") % mark) + if ((mark in repo.branchtags() or mark == repo.dirstate.branch()) + and not force): + raise util.Abort( + _("a bookmark cannot have the name of an existing branch")) + if rev: + marks[mark] = repo.lookup(rev) + else: + marks[mark] = repo.changectx('.').node() + if not inactive and repo.changectx('.').node() == marks[mark]: + bookmarks.setcurrent(repo, mark) + bookmarks.write(repo) + return + + if mark is None: + if rev: + raise util.Abort(_("bookmark name required")) + if len(marks) == 0: + ui.status(_("no bookmarks set\n")) + else: + for bmark, n in sorted(marks.iteritems()): + current = repo._bookmarkcurrent + if bmark == current and n == cur: + prefix, label = '*', 'bookmarks.current' + else: + prefix, label = ' ', '' + + if ui.quiet: + ui.write("%s\n" % bmark, label=label) + else: + ui.write(" %s %-25s %d:%s\n" % ( + prefix, bmark, repo.changelog.rev(n), hexfn(n)), + label=label) + return + +@command('branch', + [('f', 'force', None, + _('set branch name even if it shadows an existing branch')), + ('C', 'clean', None, _('reset branch name to parent branch name'))], + _('[-fC] [NAME]')) +def branch(ui, repo, label=None, **opts): + """set or show the current branch name + + With no argument, show the current branch name. With one argument, + set the working directory branch name (the branch will not exist + in the repository until the next commit). Standard practice + recommends that primary development take place on the 'default' + branch. + + Unless -f/--force is specified, branch will not let you set a + branch name that already exists, even if it's inactive. + + Use -C/--clean to reset the working directory branch to that of + the parent of the working directory, negating a previous branch + change. + + Use the command :hg:`update` to switch to an existing branch. Use + :hg:`commit --close-branch` to mark this branch as closed. + + .. note:: + Branch names are permanent. Use :hg:`bookmark` to create a + light-weight bookmark instead. See :hg:`help glossary` for more + information about named branches and bookmarks. + + Returns 0 on success. + """ + + if opts.get('clean'): + label = repo[None].p1().branch() + repo.dirstate.setbranch(label) + ui.status(_('reset working directory to branch %s\n') % label) + elif label: + if not opts.get('force') and label in repo.branchtags(): + if label not in [p.branch() for p in repo.parents()]: + raise util.Abort(_('a branch of the same name already exists'), + # i18n: "it" refers to an existing branch + hint=_("use 'hg update' to switch to it")) + repo.dirstate.setbranch(label) + ui.status(_('marked working directory as branch %s\n') % label) + else: + ui.write("%s\n" % repo.dirstate.branch()) + +@command('branches', + [('a', 'active', False, _('show only branches that have unmerged heads')), + ('c', 'closed', False, _('show normal and closed branches'))], + _('[-ac]')) +def branches(ui, repo, active=False, closed=False): + """list repository named branches + + List the repository's named branches, indicating which ones are + inactive. If -c/--closed is specified, also list branches which have + been marked closed (see :hg:`commit --close-branch`). + + If -a/--active is specified, only show active branches. A branch + is considered active if it contains repository heads. + + Use the command :hg:`update` to switch to an existing branch. + + Returns 0. + """ + + hexfunc = ui.debugflag and hex or short + activebranches = [repo[n].branch() for n in repo.heads()] + def testactive(tag, node): + realhead = tag in activebranches + open = node in repo.branchheads(tag, closed=False) + return realhead and open + branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag) + for tag, node in repo.branchtags().items()], + reverse=True) + + for isactive, node, tag in branches: + if (not active) or isactive: + if ui.quiet: + ui.write("%s\n" % tag) + else: + hn = repo.lookup(node) + if isactive: + label = 'branches.active' + notice = '' + elif hn not in repo.branchheads(tag, closed=False): + if not closed: + continue + label = 'branches.closed' + notice = _(' (closed)') + else: + label = 'branches.inactive' + notice = _(' (inactive)') + if tag == repo.dirstate.branch(): + label = 'branches.current' + rev = str(node).rjust(31 - encoding.colwidth(tag)) + rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset') + tag = ui.label(tag, label) + ui.write("%s %s%s\n" % (tag, rev, notice)) + +@command('bundle', + [('f', 'force', None, _('run even when the destination is unrelated')), + ('r', 'rev', [], _('a changeset intended to be added to the destination'), + _('REV')), + ('b', 'branch', [], _('a specific branch you would like to bundle'), + _('BRANCH')), + ('', 'base', [], + _('a base changeset assumed to be available at the destination'), + _('REV')), + ('a', 'all', None, _('bundle all changesets in the repository')), + ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE')), + ] + remoteopts, + _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]')) +def bundle(ui, repo, fname, dest=None, **opts): + """create a changegroup file + + Generate a compressed changegroup file collecting changesets not + known to be in another repository. + + If you omit the destination repository, then hg assumes the + destination will have all the nodes you specify with --base + parameters. To create a bundle containing all changesets, use + -a/--all (or --base null). + + You can change compression method with the -t/--type option. + The available compression methods are: none, bzip2, and + gzip (by default, bundles are compressed using bzip2). + + The bundle file can then be transferred using conventional means + and applied to another repository with the unbundle or pull + command. This is useful when direct push and pull are not + available or when exporting an entire repository is undesirable. + + Applying bundles preserves all changeset contents including + permissions, copy/rename information, and revision history. + + Returns 0 on success, 1 if no changes found. + """ + revs = None + if 'rev' in opts: + revs = scmutil.revrange(repo, opts['rev']) + + if opts.get('all'): + base = ['null'] + else: + base = scmutil.revrange(repo, opts.get('base')) + if base: + if dest: + raise util.Abort(_("--base is incompatible with specifying " + "a destination")) + common = [repo.lookup(rev) for rev in base] + heads = revs and map(repo.lookup, revs) or revs + else: + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + other = hg.peer(repo, opts, dest) + revs, checkout = hg.addbranchrevs(repo, other, branches, revs) + heads = revs and map(repo.lookup, revs) or revs + common, outheads = discovery.findcommonoutgoing(repo, other, + onlyheads=heads, + force=opts.get('force')) + + cg = repo.getbundle('bundle', common=common, heads=heads) + if not cg: + ui.status(_("no changes found\n")) + return 1 + + bundletype = opts.get('type', 'bzip2').lower() + btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'} + bundletype = btypes.get(bundletype) + if bundletype not in changegroup.bundletypes: + raise util.Abort(_('unknown bundle type specified with --type')) + + changegroup.writebundle(cg, fname, bundletype) + +@command('cat', + [('o', 'output', '', + _('print output to file with formatted name'), _('FORMAT')), + ('r', 'rev', '', _('print the given revision'), _('REV')), + ('', 'decode', None, _('apply any matching decode filter')), + ] + walkopts, + _('[OPTION]... FILE...')) +def cat(ui, repo, file1, *pats, **opts): + """output the current or given revision of files + + Print the specified files as they were at the given revision. If + no revision is given, the parent of the working directory is used, + or tip if no revision is checked out. + + Output may be to a file, in which case the name of the file is + given using a format string. The formatting rules are the same as + for the export command, with the following additions: + + :``%s``: basename of file being printed + :``%d``: dirname of file being printed, or '.' if in repository root + :``%p``: root-relative path name of file being printed + + Returns 0 on success. + """ + ctx = scmutil.revsingle(repo, opts.get('rev')) + err = 1 + m = scmutil.match(ctx, (file1,) + pats, opts) + for abs in ctx.walk(m): + fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), + pathname=abs) + data = ctx[abs].data() + if opts.get('decode'): + data = repo.wwritedata(abs, data) + fp.write(data) + fp.close() + err = 0 + return err + +@command('^clone', + [('U', 'noupdate', None, + _('the clone will include an empty working copy (only a repository)')), + ('u', 'updaterev', '', _('revision, tag or branch to check out'), _('REV')), + ('r', 'rev', [], _('include the specified changeset'), _('REV')), + ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')), + ('', 'pull', None, _('use pull protocol to copy metadata')), + ('', 'uncompressed', None, _('use uncompressed transfer (fast over LAN)')), + ] + remoteopts, + _('[OPTION]... SOURCE [DEST]')) +def clone(ui, source, dest=None, **opts): + """make a copy of an existing repository + + Create a copy of an existing repository in a new directory. + + If no destination directory name is specified, it defaults to the + basename of the source. + + The location of the source is added to the new repository's + ``.hg/hgrc`` file, as the default to be used for future pulls. + + Only local paths and ``ssh://`` URLs are supported as + destinations. For ``ssh://`` destinations, no working directory or + ``.hg/hgrc`` will be created on the remote side. + + To pull only a subset of changesets, specify one or more revisions + identifiers with -r/--rev or branches with -b/--branch. The + resulting clone will contain only the specified changesets and + their ancestors. These options (or 'clone src#rev dest') imply + --pull, even for local source repositories. Note that specifying a + tag will include the tagged changeset but not the changeset + containing the tag. + + To check out a particular version, use -u/--update, or + -U/--noupdate to create a clone with no working directory. + + .. container:: verbose + + For efficiency, hardlinks are used for cloning whenever the + source and destination are on the same filesystem (note this + applies only to the repository data, not to the working + directory). Some filesystems, such as AFS, implement hardlinking + incorrectly, but do not report errors. In these cases, use the + --pull option to avoid hardlinking. + + In some cases, you can clone repositories and the working + directory using full hardlinks with :: + + $ cp -al REPO REPOCLONE + + This is the fastest way to clone, but it is not always safe. The + operation is not atomic (making sure REPO is not modified during + the operation is up to you) and you have to make sure your + editor breaks hardlinks (Emacs and most Linux Kernel tools do + so). Also, this is not compatible with certain extensions that + place their metadata under the .hg directory, such as mq. + + Mercurial will update the working directory to the first applicable + revision from this list: + + a) null if -U or the source repository has no changesets + b) if -u . and the source repository is local, the first parent of + the source repository's working directory + c) the changeset specified with -u (if a branch name, this means the + latest head of that branch) + d) the changeset specified with -r + e) the tipmost head specified with -b + f) the tipmost head specified with the url#branch source syntax + g) the tipmost head of the default branch + h) tip + + Examples: + + - clone a remote repository to a new directory named hg/:: + + hg clone http://selenic.com/hg + + - create a lightweight local clone:: + + hg clone project/ project-feature/ + + - clone from an absolute path on an ssh server (note double-slash):: + + hg clone ssh://user@server//home/projects/alpha/ + + - do a high-speed clone over a LAN while checking out a + specified version:: + + hg clone --uncompressed http://server/repo -u 1.5 + + - create a repository without changesets after a particular revision:: + + hg clone -r 04e544 experimental/ good/ + + - clone (and track) a particular named branch:: + + hg clone http://selenic.com/hg#stable + + See :hg:`help urls` for details on specifying URLs. + + Returns 0 on success. + """ + if opts.get('noupdate') and opts.get('updaterev'): + raise util.Abort(_("cannot specify both --noupdate and --updaterev")) + + r = hg.clone(ui, opts, source, dest, + pull=opts.get('pull'), + stream=opts.get('uncompressed'), + rev=opts.get('rev'), + update=opts.get('updaterev') or not opts.get('noupdate'), + branch=opts.get('branch')) + + return r is None + +@command('^commit|ci', + [('A', 'addremove', None, + _('mark new/missing files as added/removed before committing')), + ('', 'close-branch', None, + _('mark a branch as closed, hiding it from the branch list')), + ] + walkopts + commitopts + commitopts2 + subrepoopts, + _('[OPTION]... [FILE]...')) +def commit(ui, repo, *pats, **opts): + """commit the specified files or all outstanding changes + + Commit changes to the given files into the repository. Unlike a + centralized SCM, this operation is a local operation. See + :hg:`push` for a way to actively distribute your changes. + + If a list of files is omitted, all changes reported by :hg:`status` + will be committed. + + If you are committing the result of a merge, do not provide any + filenames or -I/-X filters. + + If no commit message is specified, Mercurial starts your + configured editor where you can enter a message. In case your + commit fails, you will find a backup of your message in + ``.hg/last-message.txt``. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success, 1 if nothing changed. + """ + if opts.get('subrepos'): + # Let --subrepos on the command line overide config setting. + ui.setconfig('ui', 'commitsubrepos', True) + + extra = {} + if opts.get('close_branch'): + if repo['.'].node() not in repo.branchheads(): + # The topo heads set is included in the branch heads set of the + # current branch, so it's sufficient to test branchheads + raise util.Abort(_('can only close branch heads')) + extra['close'] = 1 + e = cmdutil.commiteditor + if opts.get('force_editor'): + e = cmdutil.commitforceeditor + + def commitfunc(ui, repo, message, match, opts): + return repo.commit(message, opts.get('user'), opts.get('date'), match, + editor=e, extra=extra) + + branch = repo[None].branch() + bheads = repo.branchheads(branch) + + node = cmdutil.commit(ui, repo, commitfunc, pats, opts) + if not node: + stat = repo.status(match=scmutil.match(repo[None], pats, opts)) + if stat[3]: + ui.status(_("nothing changed (%d missing files, see 'hg status')\n") + % len(stat[3])) + else: + ui.status(_("nothing changed\n")) + return 1 + + ctx = repo[node] + parents = ctx.parents() + + if (bheads and node not in bheads and not + [x for x in parents if x.node() in bheads and x.branch() == branch]): + ui.status(_('created new head\n')) + # The message is not printed for initial roots. For the other + # changesets, it is printed in the following situations: + # + # Par column: for the 2 parents with ... + # N: null or no parent + # B: parent is on another named branch + # C: parent is a regular non head changeset + # H: parent was a branch head of the current branch + # Msg column: whether we print "created new head" message + # In the following, it is assumed that there already exists some + # initial branch heads of the current branch, otherwise nothing is + # printed anyway. + # + # Par Msg Comment + # NN y additional topo root + # + # BN y additional branch root + # CN y additional topo head + # HN n usual case + # + # BB y weird additional branch root + # CB y branch merge + # HB n merge with named branch + # + # CC y additional head from merge + # CH n merge with a head + # + # HH n head merge: head count decreases + + if not opts.get('close_branch'): + for r in parents: + if r.extra().get('close') and r.branch() == branch: + ui.status(_('reopening closed branch head %d\n') % r) + + if ui.debugflag: + ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex())) + elif ui.verbose: + ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx)) + +@command('copy|cp', + [('A', 'after', None, _('record a copy that has already occurred')), + ('f', 'force', None, _('forcibly copy over an existing managed file')), + ] + walkopts + dryrunopts, + _('[OPTION]... [SOURCE]... DEST')) +def copy(ui, repo, *pats, **opts): + """mark files as copied for the next commit + + Mark dest as having copies of source files. If dest is a + directory, copies are put in that directory. If dest is a file, + the source must be a single file. + + By default, this command copies the contents of files as they + exist in the working directory. If invoked with -A/--after, the + operation is recorded, but no copying is performed. + + This command takes effect with the next commit. To undo a copy + before that, see :hg:`revert`. + + Returns 0 on success, 1 if errors are encountered. + """ + wlock = repo.wlock(False) + try: + return cmdutil.copy(ui, repo, pats, opts) + finally: + wlock.release() + +@command('debugancestor', [], _('[INDEX] REV1 REV2')) +def debugancestor(ui, repo, *args): + """find the ancestor revision of two revisions in a given index""" + if len(args) == 3: + index, rev1, rev2 = args + r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), index) + lookup = r.lookup + elif len(args) == 2: + if not repo: + raise util.Abort(_("there is no Mercurial repository here " + "(.hg not found)")) + rev1, rev2 = args + r = repo.changelog + lookup = repo.lookup + else: + raise util.Abort(_('either two or three arguments required')) + a = r.ancestor(lookup(rev1), lookup(rev2)) + ui.write("%d:%s\n" % (r.rev(a), hex(a))) + +@command('debugbuilddag', + [('m', 'mergeable-file', None, _('add single file mergeable changes')), + ('o', 'overwritten-file', None, _('add single file all revs overwrite')), + ('n', 'new-file', None, _('add new file at each rev'))], + _('[OPTION]... [TEXT]')) +def debugbuilddag(ui, repo, text=None, + mergeable_file=False, + overwritten_file=False, + new_file=False): + """builds a repo with a given DAG from scratch in the current empty repo + + The description of the DAG is read from stdin if not given on the + command line. + + Elements: + + - "+n" is a linear run of n nodes based on the current default parent + - "." is a single node based on the current default parent + - "$" resets the default parent to null (implied at the start); + otherwise the default parent is always the last node created + - " 0: + raise util.Abort(_('repository is not empty')) + + # determine number of revs in DAG + total = 0 + for type, data in dagparser.parsedag(text): + if type == 'n': + total += 1 + + if mergeable_file: + linesperrev = 2 + # make a file with k lines per rev + initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)] + initialmergedlines.append("") + + tags = [] + + tr = repo.transaction("builddag") + try: + + at = -1 + atbranch = 'default' + nodeids = [] + ui.progress(_('building'), 0, unit=_('revisions'), total=total) + for type, data in dagparser.parsedag(text): + if type == 'n': + ui.note('node %s\n' % str(data)) + id, ps = data + + files = [] + fctxs = {} + + p2 = None + if mergeable_file: + fn = "mf" + p1 = repo[ps[0]] + if len(ps) > 1: + p2 = repo[ps[1]] + pa = p1.ancestor(p2) + base, local, other = [x[fn].data() for x in pa, p1, p2] + m3 = simplemerge.Merge3Text(base, local, other) + ml = [l.strip() for l in m3.merge_lines()] + ml.append("") + elif at > 0: + ml = p1[fn].data().split("\n") + else: + ml = initialmergedlines + ml[id * linesperrev] += " r%i" % id + mergedtext = "\n".join(ml) + files.append(fn) + fctxs[fn] = context.memfilectx(fn, mergedtext) + + if overwritten_file: + fn = "of" + files.append(fn) + fctxs[fn] = context.memfilectx(fn, "r%i\n" % id) + + if new_file: + fn = "nf%i" % id + files.append(fn) + fctxs[fn] = context.memfilectx(fn, "r%i\n" % id) + if len(ps) > 1: + if not p2: + p2 = repo[ps[1]] + for fn in p2: + if fn.startswith("nf"): + files.append(fn) + fctxs[fn] = p2[fn] + + def fctxfn(repo, cx, path): + return fctxs.get(path) + + if len(ps) == 0 or ps[0] < 0: + pars = [None, None] + elif len(ps) == 1: + pars = [nodeids[ps[0]], None] + else: + pars = [nodeids[p] for p in ps] + cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn, + date=(id, 0), + user="debugbuilddag", + extra={'branch': atbranch}) + nodeid = repo.commitctx(cx) + nodeids.append(nodeid) + at = id + elif type == 'l': + id, name = data + ui.note('tag %s\n' % name) + tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) + elif type == 'a': + ui.note('branch %s\n' % data) + atbranch = data + ui.progress(_('building'), id, unit=_('revisions'), total=total) + tr.close() + finally: + ui.progress(_('building'), None) + tr.release() + + if tags: + repo.opener.write("localtags", "".join(tags)) + +@command('debugbundle', [('a', 'all', None, _('show all details'))], _('FILE')) +def debugbundle(ui, bundlepath, all=None, **opts): + """lists the contents of a bundle""" + f = url.open(ui, bundlepath) + try: + gen = changegroup.readbundle(f, bundlepath) + if all: + ui.write("format: id, p1, p2, cset, delta base, len(delta)\n") + + def showchunks(named): + ui.write("\n%s\n" % named) + chain = None + while True: + chunkdata = gen.deltachunk(chain) + if not chunkdata: + break + node = chunkdata['node'] + p1 = chunkdata['p1'] + p2 = chunkdata['p2'] + cs = chunkdata['cs'] + deltabase = chunkdata['deltabase'] + delta = chunkdata['delta'] + ui.write("%s %s %s %s %s %s\n" % + (hex(node), hex(p1), hex(p2), + hex(cs), hex(deltabase), len(delta))) + chain = node + + chunkdata = gen.changelogheader() + showchunks("changelog") + chunkdata = gen.manifestheader() + showchunks("manifest") + while True: + chunkdata = gen.filelogheader() + if not chunkdata: + break + fname = chunkdata['filename'] + showchunks(fname) + else: + chunkdata = gen.changelogheader() + chain = None + while True: + chunkdata = gen.deltachunk(chain) + if not chunkdata: + break + node = chunkdata['node'] + ui.write("%s\n" % hex(node)) + chain = node + finally: + f.close() + +@command('debugcheckstate', [], '') +def debugcheckstate(ui, repo): + """validate the correctness of the current dirstate""" + parent1, parent2 = repo.dirstate.parents() + m1 = repo[parent1].manifest() + m2 = repo[parent2].manifest() + errors = 0 + for f in repo.dirstate: + state = repo.dirstate[f] + if state in "nr" and f not in m1: + ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) + errors += 1 + if state in "a" and f in m1: + ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) + errors += 1 + if state in "m" and f not in m1 and f not in m2: + ui.warn(_("%s in state %s, but not in either manifest\n") % + (f, state)) + errors += 1 + for f in m1: + state = repo.dirstate[f] + if state not in "nrm": + ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) + errors += 1 + if errors: + error = _(".hg/dirstate inconsistent with current parent's manifest") + raise util.Abort(error) + +@command('debugcommands', [], _('[COMMAND]')) +def debugcommands(ui, cmd='', *args): + """list all available commands and options""" + for cmd, vals in sorted(table.iteritems()): + cmd = cmd.split('|')[0].strip('^') + opts = ', '.join([i[1] for i in vals[1]]) + ui.write('%s: %s\n' % (cmd, opts)) + +@command('debugcomplete', + [('o', 'options', None, _('show the command options'))], + _('[-o] CMD')) +def debugcomplete(ui, cmd='', **opts): + """returns the completion list associated with the given command""" + + if opts.get('options'): + options = [] + otables = [globalopts] + if cmd: + aliases, entry = cmdutil.findcmd(cmd, table, False) + otables.append(entry[1]) + for t in otables: + for o in t: + if "(DEPRECATED)" in o[3]: + continue + if o[0]: + options.append('-%s' % o[0]) + options.append('--%s' % o[1]) + ui.write("%s\n" % "\n".join(options)) + return + + cmdlist = cmdutil.findpossible(cmd, table) + if ui.verbose: + cmdlist = [' '.join(c[0]) for c in cmdlist.values()] + ui.write("%s\n" % "\n".join(sorted(cmdlist))) + +@command('debugdag', + [('t', 'tags', None, _('use tags as labels')), + ('b', 'branches', None, _('annotate with branch names')), + ('', 'dots', None, _('use dots for runs')), + ('s', 'spaces', None, _('separate elements by spaces'))], + _('[OPTION]... [FILE [REV]...]')) +def debugdag(ui, repo, file_=None, *revs, **opts): + """format the changelog or an index DAG as a concise textual description + + If you pass a revlog index, the revlog's DAG is emitted. If you list + revision numbers, they get labelled in the output as rN. + + Otherwise, the changelog DAG of the current repo is emitted. + """ + spaces = opts.get('spaces') + dots = opts.get('dots') + if file_: + rlog = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_) + revs = set((int(r) for r in revs)) + def events(): + for r in rlog: + yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1))) + if r in revs: + yield 'l', (r, "r%i" % r) + elif repo: + cl = repo.changelog + tags = opts.get('tags') + branches = opts.get('branches') + if tags: + labels = {} + for l, n in repo.tags().items(): + labels.setdefault(cl.rev(n), []).append(l) + def events(): + b = "default" + for r in cl: + if branches: + newb = cl.read(cl.node(r))[5]['branch'] + if newb != b: + yield 'a', newb + b = newb + yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1))) + if tags: + ls = labels.get(r) + if ls: + for l in ls: + yield 'l', (r, l) + else: + raise util.Abort(_('need repo for changelog dag')) + + for line in dagparser.dagtextlines(events(), + addspaces=spaces, + wraplabels=True, + wrapannotations=True, + wrapnonlinear=dots, + usedots=dots, + maxlinewidth=70): + ui.write(line) + ui.write("\n") + +@command('debugdata', + [('c', 'changelog', False, _('open changelog')), + ('m', 'manifest', False, _('open manifest'))], + _('-c|-m|FILE REV')) +def debugdata(ui, repo, file_, rev = None, **opts): + """dump the contents of a data file revision""" + if opts.get('changelog') or opts.get('manifest'): + file_, rev = None, file_ + elif rev is None: + raise error.CommandError('debugdata', _('invalid arguments')) + r = cmdutil.openrevlog(repo, 'debugdata', file_, opts) + try: + ui.write(r.revision(r.lookup(rev))) + except KeyError: + raise util.Abort(_('invalid revision identifier %s') % rev) + +@command('debugdate', + [('e', 'extended', None, _('try extended date formats'))], + _('[-e] DATE [RANGE]')) +def debugdate(ui, date, range=None, **opts): + """parse and display a date""" + if opts["extended"]: + d = util.parsedate(date, util.extendeddateformats) + else: + d = util.parsedate(date) + ui.write("internal: %s %s\n" % d) + ui.write("standard: %s\n" % util.datestr(d)) + if range: + m = util.matchdate(range) + ui.write("match: %s\n" % m(d[0])) + +@command('debugdiscovery', + [('', 'old', None, _('use old-style discovery')), + ('', 'nonheads', None, + _('use old-style discovery with non-heads included')), + ] + remoteopts, + _('[-l REV] [-r REV] [-b BRANCH]... [OTHER]')) +def debugdiscovery(ui, repo, remoteurl="default", **opts): + """runs the changeset discovery protocol in isolation""" + remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl), opts.get('branch')) + remote = hg.peer(repo, opts, remoteurl) + ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) + + # make sure tests are repeatable + random.seed(12323) + + def doit(localheads, remoteheads): + if opts.get('old'): + if localheads: + raise util.Abort('cannot use localheads with old style discovery') + common, _in, hds = treediscovery.findcommonincoming(repo, remote, + force=True) + common = set(common) + if not opts.get('nonheads'): + ui.write("unpruned common: %s\n" % " ".join([short(n) + for n in common])) + dag = dagutil.revlogdag(repo.changelog) + all = dag.ancestorset(dag.internalizeall(common)) + common = dag.externalizeall(dag.headsetofconnecteds(all)) + else: + common, any, hds = setdiscovery.findcommonheads(ui, repo, remote) + common = set(common) + rheads = set(hds) + lheads = set(repo.heads()) + ui.write("common heads: %s\n" % " ".join([short(n) for n in common])) + if lheads <= common: + ui.write("local is subset\n") + elif rheads <= common: + ui.write("remote is subset\n") + + serverlogs = opts.get('serverlog') + if serverlogs: + for filename in serverlogs: + logfile = open(filename, 'r') + try: + line = logfile.readline() + while line: + parts = line.strip().split(';') + op = parts[1] + if op == 'cg': + pass + elif op == 'cgss': + doit(parts[2].split(' '), parts[3].split(' ')) + elif op == 'unb': + doit(parts[3].split(' '), parts[2].split(' ')) + line = logfile.readline() + finally: + logfile.close() + + else: + remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, + opts.get('remote_head')) + localrevs = opts.get('local_head') + doit(localrevs, remoterevs) + +@command('debugfileset', [], ('REVSPEC')) +def debugfileset(ui, repo, expr): + '''parse and apply a fileset specification''' + if ui.verbose: + tree = fileset.parse(expr)[0] + ui.note(tree, "\n") + + for f in fileset.getfileset(repo[None], expr): + ui.write("%s\n" % f) + +@command('debugfsinfo', [], _('[PATH]')) +def debugfsinfo(ui, path = "."): + """show information detected about current filesystem""" + util.writefile('.debugfsinfo', '') + ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no')) + ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no')) + ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo') + and 'yes' or 'no')) + os.unlink('.debugfsinfo') + +@command('debuggetbundle', + [('H', 'head', [], _('id of head node'), _('ID')), + ('C', 'common', [], _('id of common node'), _('ID')), + ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))], + _('REPO FILE [-H|-C ID]...')) +def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): + """retrieves a bundle from a repo + + Every ID must be a full-length hex node id string. Saves the bundle to the + given file. + """ + repo = hg.peer(ui, opts, repopath) + if not repo.capable('getbundle'): + raise util.Abort("getbundle() not supported by target repository") + args = {} + if common: + args['common'] = [bin(s) for s in common] + if head: + args['heads'] = [bin(s) for s in head] + bundle = repo.getbundle('debug', **args) + + bundletype = opts.get('type', 'bzip2').lower() + btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'} + bundletype = btypes.get(bundletype) + if bundletype not in changegroup.bundletypes: + raise util.Abort(_('unknown bundle type specified with --type')) + changegroup.writebundle(bundle, bundlepath, bundletype) + +@command('debugignore', [], '') +def debugignore(ui, repo, *values, **opts): + """display the combined ignore pattern""" + ignore = repo.dirstate._ignore + includepat = getattr(ignore, 'includepat', None) + if includepat is not None: + ui.write("%s\n" % includepat) + else: + raise util.Abort(_("no ignore patterns found")) + +@command('debugindex', + [('c', 'changelog', False, _('open changelog')), + ('m', 'manifest', False, _('open manifest')), + ('f', 'format', 0, _('revlog format'), _('FORMAT'))], + _('[-f FORMAT] -c|-m|FILE')) +def debugindex(ui, repo, file_ = None, **opts): + """dump the contents of an index file""" + r = cmdutil.openrevlog(repo, 'debugindex', file_, opts) + format = opts.get('format', 0) + if format not in (0, 1): + raise util.Abort(_("unknown format %d") % format) + + generaldelta = r.version & revlog.REVLOGGENERALDELTA + if generaldelta: + basehdr = ' delta' + else: + basehdr = ' base' + + if format == 0: + ui.write(" rev offset length " + basehdr + " linkrev" + " nodeid p1 p2\n") + elif format == 1: + ui.write(" rev flag offset length" + " size " + basehdr + " link p1 p2 nodeid\n") + + for i in r: + node = r.node(i) + if generaldelta: + base = r.deltaparent(i) + else: + base = r.chainbase(i) + if format == 0: + try: + pp = r.parents(node) + except: + pp = [nullid, nullid] + ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( + i, r.start(i), r.length(i), base, r.linkrev(i), + short(node), short(pp[0]), short(pp[1]))) + elif format == 1: + pr = r.parentrevs(i) + ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( + i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), + base, r.linkrev(i), pr[0], pr[1], short(node))) + +@command('debugindexdot', [], _('FILE')) +def debugindexdot(ui, repo, file_): + """dump an index DAG as a graphviz dot file""" + r = None + if repo: + filelog = repo.file(file_) + if len(filelog): + r = filelog + if not r: + r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False), file_) + ui.write("digraph G {\n") + for i in r: + node = r.node(i) + pp = r.parents(node) + ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) + if pp[1] != nullid: + ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) + ui.write("}\n") + +@command('debuginstall', [], '') +def debuginstall(ui): + '''test Mercurial installation + + Returns 0 on success. + ''' + + def writetemp(contents): + (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") + f = os.fdopen(fd, "wb") + f.write(contents) + f.close() + return name + + problems = 0 + + # encoding + ui.status(_("Checking encoding (%s)...\n") % encoding.encoding) + try: + encoding.fromlocal("test") + except util.Abort, inst: + ui.write(" %s\n" % inst) + ui.write(_(" (check that your locale is properly set)\n")) + problems += 1 + + # compiled modules + ui.status(_("Checking installed modules (%s)...\n") + % os.path.dirname(__file__)) + try: + import bdiff, mpatch, base85, osutil + dir(bdiff), dir(mpatch), dir(base85), dir(osutil) # quiet pyflakes + except Exception, inst: + ui.write(" %s\n" % inst) + ui.write(_(" One or more extensions could not be found")) + ui.write(_(" (check that you compiled the extensions)\n")) + problems += 1 + + # templates + import templater + p = templater.templatepath() + ui.status(_("Checking templates (%s)...\n") % ' '.join(p)) + try: + templater.templater(templater.templatepath("map-cmdline.default")) + except Exception, inst: + ui.write(" %s\n" % inst) + ui.write(_(" (templates seem to have been installed incorrectly)\n")) + problems += 1 + + # editor + ui.status(_("Checking commit editor...\n")) + editor = ui.geteditor() + cmdpath = util.findexe(editor) or util.findexe(editor.split()[0]) + if not cmdpath: + if editor == 'vi': + ui.write(_(" No commit editor set and can't find vi in PATH\n")) + ui.write(_(" (specify a commit editor in your configuration" + " file)\n")) + else: + ui.write(_(" Can't find editor '%s' in PATH\n") % editor) + ui.write(_(" (specify a commit editor in your configuration" + " file)\n")) + problems += 1 + + # check username + ui.status(_("Checking username...\n")) + try: + ui.username() + except util.Abort, e: + ui.write(" %s\n" % e) + ui.write(_(" (specify a username in your configuration file)\n")) + problems += 1 + + if not problems: + ui.status(_("No problems detected\n")) + else: + ui.write(_("%s problems detected," + " please check your install!\n") % problems) + + return problems + +@command('debugknown', [], _('REPO ID...')) +def debugknown(ui, repopath, *ids, **opts): + """test whether node ids are known to a repo + + Every ID must be a full-length hex node id string. Returns a list of 0s and 1s + indicating unknown/known. + """ + repo = hg.peer(ui, opts, repopath) + if not repo.capable('known'): + raise util.Abort("known() not supported by target repository") + flags = repo.known([bin(s) for s in ids]) + ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) + +@command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]')) +def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): + '''access the pushkey key/value protocol + + With two args, list the keys in the given namespace. + + With five args, set a key to new if it currently is set to old. + Reports success or failure. + ''' + + target = hg.peer(ui, {}, repopath) + if keyinfo: + key, old, new = keyinfo + r = target.pushkey(namespace, key, old, new) + ui.status(str(r) + '\n') + return not r + else: + for k, v in target.listkeys(namespace).iteritems(): + ui.write("%s\t%s\n" % (k.encode('string-escape'), + v.encode('string-escape'))) + +@command('debugrebuildstate', + [('r', 'rev', '', _('revision to rebuild to'), _('REV'))], + _('[-r REV] [REV]')) +def debugrebuildstate(ui, repo, rev="tip"): + """rebuild the dirstate as it would look like for the given revision""" + ctx = scmutil.revsingle(repo, rev) + wlock = repo.wlock() + try: + repo.dirstate.rebuild(ctx.node(), ctx.manifest()) + finally: + wlock.release() + +@command('debugrename', + [('r', 'rev', '', _('revision to debug'), _('REV'))], + _('[-r REV] FILE')) +def debugrename(ui, repo, file1, *pats, **opts): + """dump rename information""" + + ctx = scmutil.revsingle(repo, opts.get('rev')) + m = scmutil.match(ctx, (file1,) + pats, opts) + for abs in ctx.walk(m): + fctx = ctx[abs] + o = fctx.filelog().renamed(fctx.filenode()) + rel = m.rel(abs) + if o: + ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) + else: + ui.write(_("%s not renamed\n") % rel) + +@command('debugrevlog', + [('c', 'changelog', False, _('open changelog')), + ('m', 'manifest', False, _('open manifest')), + ('d', 'dump', False, _('dump index data'))], + _('-c|-m|FILE')) +def debugrevlog(ui, repo, file_ = None, **opts): + """show data and statistics about a revlog""" + r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) + + if opts.get("dump"): + numrevs = len(r) + ui.write("# rev p1rev p2rev start end deltastart base p1 p2" + " rawsize totalsize compression heads\n") + ts = 0 + heads = set() + for rev in xrange(numrevs): + dbase = r.deltaparent(rev) + if dbase == -1: + dbase = rev + cbase = r.chainbase(rev) + p1, p2 = r.parentrevs(rev) + rs = r.rawsize(rev) + ts = ts + rs + heads -= set(r.parentrevs(rev)) + heads.add(rev) + ui.write("%d %d %d %d %d %d %d %d %d %d %d %d %d\n" % + (rev, p1, p2, r.start(rev), r.end(rev), + r.start(dbase), r.start(cbase), + r.start(p1), r.start(p2), + rs, ts, ts / r.end(rev), len(heads))) + return 0 + + v = r.version + format = v & 0xFFFF + flags = [] + gdelta = False + if v & revlog.REVLOGNGINLINEDATA: + flags.append('inline') + if v & revlog.REVLOGGENERALDELTA: + gdelta = True + flags.append('generaldelta') + if not flags: + flags = ['(none)'] + + nummerges = 0 + numfull = 0 + numprev = 0 + nump1 = 0 + nump2 = 0 + numother = 0 + nump1prev = 0 + nump2prev = 0 + chainlengths = [] + + datasize = [None, 0, 0L] + fullsize = [None, 0, 0L] + deltasize = [None, 0, 0L] + + def addsize(size, l): + if l[0] is None or size < l[0]: + l[0] = size + if size > l[1]: + l[1] = size + l[2] += size + + numrevs = len(r) + for rev in xrange(numrevs): + p1, p2 = r.parentrevs(rev) + delta = r.deltaparent(rev) + if format > 0: + addsize(r.rawsize(rev), datasize) + if p2 != nullrev: + nummerges += 1 + size = r.length(rev) + if delta == nullrev: + chainlengths.append(0) + numfull += 1 + addsize(size, fullsize) + else: + chainlengths.append(chainlengths[delta] + 1) + addsize(size, deltasize) + if delta == rev - 1: + numprev += 1 + if delta == p1: + nump1prev += 1 + elif delta == p2: + nump2prev += 1 + elif delta == p1: + nump1 += 1 + elif delta == p2: + nump2 += 1 + elif delta != nullrev: + numother += 1 + + numdeltas = numrevs - numfull + numoprev = numprev - nump1prev - nump2prev + totalrawsize = datasize[2] + datasize[2] /= numrevs + fulltotal = fullsize[2] + fullsize[2] /= numfull + deltatotal = deltasize[2] + deltasize[2] /= numrevs - numfull + totalsize = fulltotal + deltatotal + avgchainlen = sum(chainlengths) / numrevs + compratio = totalrawsize / totalsize + + basedfmtstr = '%%%dd\n' + basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' + + def dfmtstr(max): + return basedfmtstr % len(str(max)) + def pcfmtstr(max, padding=0): + return basepcfmtstr % (len(str(max)), ' ' * padding) + + def pcfmt(value, total): + return (value, 100 * float(value) / total) + + ui.write('format : %d\n' % format) + ui.write('flags : %s\n' % ', '.join(flags)) + + ui.write('\n') + fmt = pcfmtstr(totalsize) + fmt2 = dfmtstr(totalsize) + ui.write('revisions : ' + fmt2 % numrevs) + ui.write(' merges : ' + fmt % pcfmt(nummerges, numrevs)) + ui.write(' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)) + ui.write('revisions : ' + fmt2 % numrevs) + ui.write(' full : ' + fmt % pcfmt(numfull, numrevs)) + ui.write(' deltas : ' + fmt % pcfmt(numdeltas, numrevs)) + ui.write('revision size : ' + fmt2 % totalsize) + ui.write(' full : ' + fmt % pcfmt(fulltotal, totalsize)) + ui.write(' deltas : ' + fmt % pcfmt(deltatotal, totalsize)) + + ui.write('\n') + fmt = dfmtstr(max(avgchainlen, compratio)) + ui.write('avg chain length : ' + fmt % avgchainlen) + ui.write('compression ratio : ' + fmt % compratio) + + if format > 0: + ui.write('\n') + ui.write('uncompressed data size (min/max/avg) : %d / %d / %d\n' + % tuple(datasize)) + ui.write('full revision size (min/max/avg) : %d / %d / %d\n' + % tuple(fullsize)) + ui.write('delta size (min/max/avg) : %d / %d / %d\n' + % tuple(deltasize)) + + if numdeltas > 0: + ui.write('\n') + fmt = pcfmtstr(numdeltas) + fmt2 = pcfmtstr(numdeltas, 4) + ui.write('deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)) + if numprev > 0: + ui.write(' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)) + ui.write(' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)) + ui.write(' other : ' + fmt2 % pcfmt(numoprev, numprev)) + if gdelta: + ui.write('deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)) + ui.write('deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)) + ui.write('deltas against other : ' + fmt % pcfmt(numother, numdeltas)) + +@command('debugrevspec', [], ('REVSPEC')) +def debugrevspec(ui, repo, expr): + '''parse and apply a revision specification''' + if ui.verbose: + tree = revset.parse(expr)[0] + ui.note(tree, "\n") + newtree = revset.findaliases(ui, tree) + if newtree != tree: + ui.note(newtree, "\n") + func = revset.match(ui, expr) + for c in func(repo, range(len(repo))): + ui.write("%s\n" % c) + +@command('debugsetparents', [], _('REV1 [REV2]')) +def debugsetparents(ui, repo, rev1, rev2=None): + """manually set the parents of the current working directory + + This is useful for writing repository conversion tools, but should + be used with care. + + Returns 0 on success. + """ + + r1 = scmutil.revsingle(repo, rev1).node() + r2 = scmutil.revsingle(repo, rev2, 'null').node() + + wlock = repo.wlock() + try: + repo.dirstate.setparents(r1, r2) + finally: + wlock.release() + +@command('debugstate', + [('', 'nodates', None, _('do not display the saved mtime')), + ('', 'datesort', None, _('sort by saved mtime'))], + _('[OPTION]...')) +def debugstate(ui, repo, nodates=None, datesort=None): + """show the contents of the current dirstate""" + timestr = "" + showdate = not nodates + if datesort: + keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename + else: + keyfunc = None # sort by filename + for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc): + if showdate: + if ent[3] == -1: + # Pad or slice to locale representation + locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", + time.localtime(0))) + timestr = 'unset' + timestr = (timestr[:locale_len] + + ' ' * (locale_len - len(timestr))) + else: + timestr = time.strftime("%Y-%m-%d %H:%M:%S ", + time.localtime(ent[3])) + if ent[1] & 020000: + mode = 'lnk' + else: + mode = '%3o' % (ent[1] & 0777) + ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) + for f in repo.dirstate.copies(): + ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) + +@command('debugsub', + [('r', 'rev', '', + _('revision to check'), _('REV'))], + _('[-r REV] [REV]')) +def debugsub(ui, repo, rev=None): + ctx = scmutil.revsingle(repo, rev, None) + for k, v in sorted(ctx.substate.items()): + ui.write('path %s\n' % k) + ui.write(' source %s\n' % v[0]) + ui.write(' revision %s\n' % v[1]) + +@command('debugwalk', walkopts, _('[OPTION]... [FILE]...')) +def debugwalk(ui, repo, *pats, **opts): + """show how files match on given patterns""" + m = scmutil.match(repo[None], pats, opts) + items = list(repo.walk(m)) + if not items: + return + fmt = 'f %%-%ds %%-%ds %%s' % ( + max([len(abs) for abs in items]), + max([len(m.rel(abs)) for abs in items])) + for abs in items: + line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '') + ui.write("%s\n" % line.rstrip()) + +@command('debugwireargs', + [('', 'three', '', 'three'), + ('', 'four', '', 'four'), + ('', 'five', '', 'five'), + ] + remoteopts, + _('REPO [OPTIONS]... [ONE [TWO]]')) +def debugwireargs(ui, repopath, *vals, **opts): + repo = hg.peer(ui, opts, repopath) + for opt in remoteopts: + del opts[opt[1]] + args = {} + for k, v in opts.iteritems(): + if v: + args[k] = v + # run twice to check that we don't mess up the stream for the next command + res1 = repo.debugwireargs(*vals, **args) + res2 = repo.debugwireargs(*vals, **args) + ui.write("%s\n" % res1) + if res1 != res2: + ui.warn("%s\n" % res2) + +@command('^diff', + [('r', 'rev', [], _('revision'), _('REV')), + ('c', 'change', '', _('change made by revision'), _('REV')) + ] + diffopts + diffopts2 + walkopts + subrepoopts, + _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...')) +def diff(ui, repo, *pats, **opts): + """diff repository (or selected files) + + Show differences between revisions for the specified files. + + Differences between files are shown using the unified diff format. + + .. note:: + diff may generate unexpected results for merges, as it will + default to comparing against the working directory's first + parent changeset if no revisions are specified. + + When two revision arguments are given, then changes are shown + between those revisions. If only one revision is specified then + that revision is compared to the working directory, and, when no + revisions are specified, the working directory files are compared + to its parent. + + Alternatively you can specify -c/--change with a revision to see + the changes in that changeset relative to its first parent. + + Without the -a/--text option, diff will avoid generating diffs of + files it detects as binary. With -a, diff will generate a diff + anyway, probably with undesirable results. + + Use the -g/--git option to generate diffs in the git extended diff + format. For more information, read :hg:`help diffs`. + + .. container:: verbose + + Examples: + + - compare a file in the current working directory to its parent:: + + hg diff foo.c + + - compare two historical versions of a directory, with rename info:: + + hg diff --git -r 1.0:1.2 lib/ + + - get change stats relative to the last change on some date:: + + hg diff --stat -r "date('may 2')" + + - diff all newly-added files that contain a keyword:: + + hg diff "set:added() and grep(GNU)" + + - compare a revision and its parents:: + + hg diff -c 9353 # compare against first parent + hg diff -r 9353^:9353 # same using revset syntax + hg diff -r 9353^2:9353 # compare against the second parent + + Returns 0 on success. + """ + + revs = opts.get('rev') + change = opts.get('change') + stat = opts.get('stat') + reverse = opts.get('reverse') + + if revs and change: + msg = _('cannot specify --rev and --change at the same time') + raise util.Abort(msg) + elif change: + node2 = scmutil.revsingle(repo, change, None).node() + node1 = repo[node2].p1().node() + else: + node1, node2 = scmutil.revpair(repo, revs) + + if reverse: + node1, node2 = node2, node1 + + diffopts = patch.diffopts(ui, opts) + m = scmutil.match(repo[node2], pats, opts) + cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, + listsubrepos=opts.get('subrepos')) + +@command('^export', + [('o', 'output', '', + _('print output to file with formatted name'), _('FORMAT')), + ('', 'switch-parent', None, _('diff against the second parent')), + ('r', 'rev', [], _('revisions to export'), _('REV')), + ] + diffopts, + _('[OPTION]... [-o OUTFILESPEC] REV...')) +def export(ui, repo, *changesets, **opts): + """dump the header and diffs for one or more changesets + + Print the changeset header and diffs for one or more revisions. + + The information shown in the changeset header is: author, date, + branch name (if non-default), changeset hash, parent(s) and commit + comment. + + .. note:: + export may generate unexpected diff output for merge + changesets, as it will compare the merge changeset against its + first parent only. + + Output may be to a file, in which case the name of the file is + given using a format string. The formatting rules are as follows: + + :``%%``: literal "%" character + :``%H``: changeset hash (40 hexadecimal digits) + :``%N``: number of patches being generated + :``%R``: changeset revision number + :``%b``: basename of the exporting repository + :``%h``: short-form changeset hash (12 hexadecimal digits) + :``%m``: first line of the commit message (only alphanumeric characters) + :``%n``: zero-padded sequence number, starting at 1 + :``%r``: zero-padded changeset revision number + + Without the -a/--text option, export will avoid generating diffs + of files it detects as binary. With -a, export will generate a + diff anyway, probably with undesirable results. + + Use the -g/--git option to generate diffs in the git extended diff + format. See :hg:`help diffs` for more information. + + With the --switch-parent option, the diff will be against the + second parent. It can be useful to review a merge. + + .. container:: verbose + + Examples: + + - use export and import to transplant a bugfix to the current + branch:: + + hg export -r 9353 | hg import - + + - export all the changesets between two revisions to a file with + rename information:: + + hg export --git -r 123:150 > changes.txt + + - split outgoing changes into a series of patches with + descriptive names:: + + hg export -r "outgoing()" -o "%n-%m.patch" + + Returns 0 on success. + """ + changesets += tuple(opts.get('rev', [])) + if not changesets: + raise util.Abort(_("export requires at least one changeset")) + revs = scmutil.revrange(repo, changesets) + if len(revs) > 1: + ui.note(_('exporting patches:\n')) + else: + ui.note(_('exporting patch:\n')) + cmdutil.export(repo, revs, template=opts.get('output'), + switch_parent=opts.get('switch_parent'), + opts=patch.diffopts(ui, opts)) + +@command('^forget', walkopts, _('[OPTION]... FILE...')) +def forget(ui, repo, *pats, **opts): + """forget the specified files on the next commit + + Mark the specified files so they will no longer be tracked + after the next commit. + + This only removes files from the current branch, not from the + entire project history, and it does not delete them from the + working directory. + + To undo a forget before the next commit, see :hg:`add`. + + .. container:: verbose + + Examples: + + - forget newly-added binary files:: + + hg forget "set:added() and binary()" + + - forget files that would be excluded by .hgignore:: + + hg forget "set:hgignore()" + + Returns 0 on success. + """ + + if not pats: + raise util.Abort(_('no files specified')) + + m = scmutil.match(repo[None], pats, opts) + s = repo.status(match=m, clean=True) + forget = sorted(s[0] + s[1] + s[3] + s[6]) + errs = 0 + + for f in m.files(): + if f not in repo.dirstate and not os.path.isdir(m.rel(f)): + if os.path.exists(m.rel(f)): + ui.warn(_('not removing %s: file is already untracked\n') + % m.rel(f)) + errs = 1 + + for f in forget: + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + repo[None].forget(forget) + return errs + +@command( + 'graft', + [('c', 'continue', False, _('resume interrupted graft')), + ('e', 'edit', False, _('invoke editor on commit messages')), + ('D', 'currentdate', False, + _('record the current date as commit date')), + ('U', 'currentuser', False, + _('record the current user as committer'), _('DATE'))] + + commitopts2 + mergetoolopts, + _('[OPTION]... REVISION...')) +def graft(ui, repo, *revs, **opts): + '''copy changes from other branches onto the current branch + + This command uses Mercurial's merge logic to copy individual + changes from other branches without merging branches in the + history graph. This is sometimes known as 'backporting' or + 'cherry-picking'. By default, graft will copy user, date, and + description from the source changesets. + + Changesets that are ancestors of the current revision, that have + already been grafted, or that are merges will be skipped. + + If a graft merge results in conflicts, the graft process is + aborted so that the current merge can be manually resolved. Once + all conflicts are addressed, the graft process can be continued + with the -c/--continue option. + + .. note:: + The -c/--continue option does not reapply earlier options. + + .. container:: verbose + + Examples: + + - copy a single change to the stable branch and edit its description:: + + hg update stable + hg graft --edit 9393 + + - graft a range of changesets with one exception, updating dates:: + + hg graft -D "2085::2093 and not 2091" + + - continue a graft after resolving conflicts:: + + hg graft -c + + - show the source of a grafted changeset:: + + hg log --debug -r tip + + Returns 0 on successful completion. + ''' + + if not opts.get('user') and opts.get('currentuser'): + opts['user'] = ui.username() + if not opts.get('date') and opts.get('currentdate'): + opts['date'] = "%d %d" % util.makedate() + + editor = None + if opts.get('edit'): + editor = cmdutil.commitforceeditor + + cont = False + if opts['continue']: + cont = True + if revs: + raise util.Abort(_("can't specify --continue and revisions")) + # read in unfinished revisions + try: + nodes = repo.opener.read('graftstate').splitlines() + revs = [repo[node].rev() for node in nodes] + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + raise util.Abort(_("no graft state found, can't continue")) + else: + cmdutil.bailifchanged(repo) + if not revs: + raise util.Abort(_('no revisions specified')) + revs = scmutil.revrange(repo, revs) + + # check for merges + for ctx in repo.set('%ld and merge()', revs): + ui.warn(_('skipping ungraftable merge revision %s\n') % ctx.rev()) + revs.remove(ctx.rev()) + if not revs: + return -1 + + # check for ancestors of dest branch + for ctx in repo.set('::. and %ld', revs): + ui.warn(_('skipping ancestor revision %s\n') % ctx.rev()) + revs.remove(ctx.rev()) + if not revs: + return -1 + + # analyze revs for earlier grafts + ids = {} + for ctx in repo.set("%ld", revs): + ids[ctx.hex()] = ctx.rev() + n = ctx.extra().get('source') + if n: + ids[n] = ctx.rev() + + # check ancestors for earlier grafts + ui.debug('scanning for duplicate grafts\n') + for ctx in repo.set("::. - ::%ld", revs): + n = ctx.extra().get('source') + if n in ids: + r = repo[n].rev() + if r in revs: + ui.warn(_('skipping already grafted revision %s\n') % r) + revs.remove(r) + elif ids[n] in revs: + ui.warn(_('skipping already grafted revision %s ' + '(same origin %d)\n') % (ids[n], r)) + revs.remove(ids[n]) + elif ctx.hex() in ids: + r = ids[ctx.hex()] + ui.warn(_('skipping already grafted revision %s ' + '(was grafted from %d)\n') % (r, ctx.rev())) + revs.remove(r) + if not revs: + return -1 + + for pos, ctx in enumerate(repo.set("%ld", revs)): + current = repo['.'] + ui.status(_('grafting revision %s\n') % ctx.rev()) + + # we don't merge the first commit when continuing + if not cont: + # perform the graft merge with p1(rev) as 'ancestor' + try: + # ui.forcemerge is an internal variable, do not document + repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + stats = mergemod.update(repo, ctx.node(), True, True, False, + ctx.p1().node()) + finally: + ui.setconfig('ui', 'forcemerge', '') + # drop the second merge parent + repo.dirstate.setparents(current.node(), nullid) + repo.dirstate.write() + # fix up dirstate for copies and renames + cmdutil.duplicatecopies(repo, ctx.rev(), current.node(), nullid) + # report any conflicts + if stats and stats[3] > 0: + # write out state for --continue + nodelines = [repo[rev].hex() + "\n" for rev in revs[pos:]] + repo.opener.write('graftstate', ''.join(nodelines)) + raise util.Abort( + _("unresolved conflicts, can't continue"), + hint=_('use hg resolve and hg graft --continue')) + else: + cont = False + + # commit + source = ctx.extra().get('source') + if not source: + source = ctx.hex() + extra = {'source': source} + user = ctx.user() + if opts.get('user'): + user = opts['user'] + date = ctx.date() + if opts.get('date'): + date = opts['date'] + repo.commit(text=ctx.description(), user=user, + date=date, extra=extra, editor=editor) + + # remove state when we complete successfully + if os.path.exists(repo.join('graftstate')): + util.unlinkpath(repo.join('graftstate')) + + return 0 + +@command('grep', + [('0', 'print0', None, _('end fields with NUL')), + ('', 'all', None, _('print all revisions that match')), + ('a', 'text', None, _('treat all files as text')), + ('f', 'follow', None, + _('follow changeset history,' + ' or file history across copies and renames')), + ('i', 'ignore-case', None, _('ignore case when matching')), + ('l', 'files-with-matches', None, + _('print only filenames and revisions that match')), + ('n', 'line-number', None, _('print matching line numbers')), + ('r', 'rev', [], + _('only search files changed within revision range'), _('REV')), + ('u', 'user', None, _('list the author (long with -v)')), + ('d', 'date', None, _('list the date (short with -q)')), + ] + walkopts, + _('[OPTION]... PATTERN [FILE]...')) +def grep(ui, repo, pattern, *pats, **opts): + """search for a pattern in specified files and revisions + + Search revisions of files for a regular expression. + + This command behaves differently than Unix grep. It only accepts + Python/Perl regexps. It searches repository history, not the + working directory. It always prints the revision number in which a + match appears. + + By default, grep only prints output for the first revision of a + file in which it finds a match. To get it to print every revision + that contains a change in match status ("-" for a match that + becomes a non-match, or "+" for a non-match that becomes a match), + use the --all flag. + + Returns 0 if a match is found, 1 otherwise. + """ + reflags = 0 + if opts.get('ignore_case'): + reflags |= re.I + try: + regexp = re.compile(pattern, reflags) + except re.error, inst: + ui.warn(_("grep: invalid match pattern: %s\n") % inst) + return 1 + sep, eol = ':', '\n' + if opts.get('print0'): + sep = eol = '\0' + + getfile = util.lrucachefunc(repo.file) + + def matchlines(body): + begin = 0 + linenum = 0 + while True: + match = regexp.search(body, begin) + if not match: + break + mstart, mend = match.span() + linenum += body.count('\n', begin, mstart) + 1 + lstart = body.rfind('\n', begin, mstart) + 1 or begin + begin = body.find('\n', mend) + 1 or len(body) + 1 + lend = begin - 1 + yield linenum, mstart - lstart, mend - lstart, body[lstart:lend] + + class linestate(object): + def __init__(self, line, linenum, colstart, colend): + self.line = line + self.linenum = linenum + self.colstart = colstart + self.colend = colend + + def __hash__(self): + return hash((self.linenum, self.line)) + + def __eq__(self, other): + return self.line == other.line + + matches = {} + copies = {} + def grepbody(fn, rev, body): + matches[rev].setdefault(fn, []) + m = matches[rev][fn] + for lnum, cstart, cend, line in matchlines(body): + s = linestate(line, lnum, cstart, cend) + m.append(s) + + def difflinestates(a, b): + sm = difflib.SequenceMatcher(None, a, b) + for tag, alo, ahi, blo, bhi in sm.get_opcodes(): + if tag == 'insert': + for i in xrange(blo, bhi): + yield ('+', b[i]) + elif tag == 'delete': + for i in xrange(alo, ahi): + yield ('-', a[i]) + elif tag == 'replace': + for i in xrange(alo, ahi): + yield ('-', a[i]) + for i in xrange(blo, bhi): + yield ('+', b[i]) + + def display(fn, ctx, pstates, states): + rev = ctx.rev() + datefunc = ui.quiet and util.shortdate or util.datestr + found = False + filerevmatches = {} + def binary(): + flog = getfile(fn) + return util.binary(flog.read(ctx.filenode(fn))) + + if opts.get('all'): + iter = difflinestates(pstates, states) + else: + iter = [('', l) for l in states] + for change, l in iter: + cols = [fn, str(rev)] + before, match, after = None, None, None + if opts.get('line_number'): + cols.append(str(l.linenum)) + if opts.get('all'): + cols.append(change) + if opts.get('user'): + cols.append(ui.shortuser(ctx.user())) + if opts.get('date'): + cols.append(datefunc(ctx.date())) + if opts.get('files_with_matches'): + c = (fn, rev) + if c in filerevmatches: + continue + filerevmatches[c] = 1 + else: + before = l.line[:l.colstart] + match = l.line[l.colstart:l.colend] + after = l.line[l.colend:] + ui.write(sep.join(cols)) + if before is not None: + if not opts.get('text') and binary(): + ui.write(sep + " Binary file matches") + else: + ui.write(sep + before) + ui.write(match, label='grep.match') + ui.write(after) + ui.write(eol) + found = True + return found + + skip = {} + revfiles = {} + matchfn = scmutil.match(repo[None], pats, opts) + found = False + follow = opts.get('follow') + + def prep(ctx, fns): + rev = ctx.rev() + pctx = ctx.p1() + parent = pctx.rev() + matches.setdefault(rev, {}) + matches.setdefault(parent, {}) + files = revfiles.setdefault(rev, []) + for fn in fns: + flog = getfile(fn) + try: + fnode = ctx.filenode(fn) + except error.LookupError: + continue + + copied = flog.renamed(fnode) + copy = follow and copied and copied[0] + if copy: + copies.setdefault(rev, {})[fn] = copy + if fn in skip: + if copy: + skip[copy] = True + continue + files.append(fn) + + if fn not in matches[rev]: + grepbody(fn, rev, flog.read(fnode)) + + pfn = copy or fn + if pfn not in matches[parent]: + try: + fnode = pctx.filenode(pfn) + grepbody(pfn, parent, flog.read(fnode)) + except error.LookupError: + pass + + for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): + rev = ctx.rev() + parent = ctx.p1().rev() + for fn in sorted(revfiles.get(rev, [])): + states = matches[rev][fn] + copy = copies.get(rev, {}).get(fn) + if fn in skip: + if copy: + skip[copy] = True + continue + pstates = matches.get(parent, {}).get(copy or fn, []) + if pstates or states: + r = display(fn, ctx, pstates, states) + found = found or r + if r and not opts.get('all'): + skip[fn] = True + if copy: + skip[copy] = True + del matches[rev] + del revfiles[rev] + + return not found + +@command('heads', + [('r', 'rev', '', + _('show only heads which are descendants of STARTREV'), _('STARTREV')), + ('t', 'topo', False, _('show topological heads only')), + ('a', 'active', False, _('show active branchheads only (DEPRECATED)')), + ('c', 'closed', False, _('show normal and closed branch heads')), + ] + templateopts, + _('[-ac] [-r STARTREV] [REV]...')) +def heads(ui, repo, *branchrevs, **opts): + """show current repository heads or show branch heads + + With no arguments, show all repository branch heads. + + Repository "heads" are changesets with no child changesets. They are + where development generally takes place and are the usual targets + for update and merge operations. Branch heads are changesets that have + no child changeset on the same branch. + + If one or more REVs are given, only branch heads on the branches + associated with the specified changesets are shown. This means + that you can use :hg:`heads foo` to see the heads on a branch + named ``foo``. + + If -c/--closed is specified, also show branch heads marked closed + (see :hg:`commit --close-branch`). + + If STARTREV is specified, only those heads that are descendants of + STARTREV will be displayed. + + If -t/--topo is specified, named branch mechanics will be ignored and only + changesets without children will be shown. + + Returns 0 if matching heads are found, 1 if not. + """ + + start = None + if 'rev' in opts: + start = scmutil.revsingle(repo, opts['rev'], None).node() + + if opts.get('topo'): + heads = [repo[h] for h in repo.heads(start)] + else: + heads = [] + for branch in repo.branchmap(): + heads += repo.branchheads(branch, start, opts.get('closed')) + heads = [repo[h] for h in heads] + + if branchrevs: + branches = set(repo[br].branch() for br in branchrevs) + heads = [h for h in heads if h.branch() in branches] + + if opts.get('active') and branchrevs: + dagheads = repo.heads(start) + heads = [h for h in heads if h.node() in dagheads] + + if branchrevs: + haveheads = set(h.branch() for h in heads) + if branches - haveheads: + headless = ', '.join(b for b in branches - haveheads) + msg = _('no open branch heads found on branches %s') + if opts.get('rev'): + msg += _(' (started at %s)' % opts['rev']) + ui.warn((msg + '\n') % headless) + + if not heads: + return 1 + + heads = sorted(heads, key=lambda x: -x.rev()) + displayer = cmdutil.show_changeset(ui, repo, opts) + for ctx in heads: + displayer.show(ctx) + displayer.close() + +@command('help', + [('e', 'extension', None, _('show only help for extensions')), + ('c', 'command', None, _('show only help for commands'))], + _('[-ec] [TOPIC]')) +def help_(ui, name=None, unknowncmd=False, full=True, **opts): + """show help for a given topic or a help overview + + With no arguments, print a list of commands with short help messages. + + Given a topic, extension, or command name, print help for that + topic. + + Returns 0 if successful. + """ + + textwidth = min(ui.termwidth(), 80) - 2 + + def optrst(options): + data = [] + multioccur = False + for option in options: + if len(option) == 5: + shortopt, longopt, default, desc, optlabel = option + else: + shortopt, longopt, default, desc = option + optlabel = _("VALUE") # default label + + if _("DEPRECATED") in desc and not ui.verbose: + continue + + so = '' + if shortopt: + so = '-' + shortopt + lo = '--' + longopt + if default: + desc += _(" (default: %s)") % default + + if isinstance(default, list): + lo += " %s [+]" % optlabel + multioccur = True + elif (default is not None) and not isinstance(default, bool): + lo += " %s" % optlabel + + data.append((so, lo, desc)) + + rst = minirst.maketable(data, 1) + + if multioccur: + rst += _("\n[+] marked option can be specified multiple times\n") + + return rst + + # list all option lists + def opttext(optlist, width): + rst = '' + if not optlist: + return '' + + for title, options in optlist: + rst += '\n%s\n' % title + if options: + rst += "\n" + rst += optrst(options) + rst += '\n' + + return '\n' + minirst.format(rst, width) + + def addglobalopts(optlist, aliases): + if ui.quiet: + return [] + + if ui.verbose: + optlist.append((_("global options:"), globalopts)) + if name == 'shortlist': + optlist.append((_('use "hg help" for the full list ' + 'of commands'), ())) + else: + if name == 'shortlist': + msg = _('use "hg help" for the full list of commands ' + 'or "hg -v" for details') + elif name and not full: + msg = _('use "hg help %s" to show the full help text' % name) + elif aliases: + msg = _('use "hg -v help%s" to show builtin aliases and ' + 'global options') % (name and " " + name or "") + else: + msg = _('use "hg -v help %s" to show more info') % name + optlist.append((msg, ())) + + def helpcmd(name): + try: + aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd) + except error.AmbiguousCommand, inst: + # py3k fix: except vars can't be used outside the scope of the + # except block, nor can be used inside a lambda. python issue4617 + prefix = inst.args[0] + select = lambda c: c.lstrip('^').startswith(prefix) + helplist(select) + return + + # check if it's an invalid alias and display its error if it is + if getattr(entry[0], 'badalias', False): + if not unknowncmd: + entry[0](ui) + return + + rst = "" + + # synopsis + if len(entry) > 2: + if entry[2].startswith('hg'): + rst += "%s\n" % entry[2] + else: + rst += 'hg %s %s\n' % (aliases[0], entry[2]) + else: + rst += 'hg %s\n' % aliases[0] + + # aliases + if full and not ui.quiet and len(aliases) > 1: + rst += _("\naliases: %s\n") % ', '.join(aliases[1:]) + + # description + doc = gettext(entry[0].__doc__) + if not doc: + doc = _("(no help text available)") + if util.safehasattr(entry[0], 'definition'): # aliased command + if entry[0].definition.startswith('!'): # shell alias + doc = _('shell alias for::\n\n %s') % entry[0].definition[1:] + else: + doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc) + if ui.quiet or not full: + doc = doc.splitlines()[0] + rst += "\n" + doc + "\n" + + # check if this command shadows a non-trivial (multi-line) + # extension help text + try: + mod = extensions.find(name) + doc = gettext(mod.__doc__) or '' + if '\n' in doc.strip(): + msg = _('use "hg help -e %s" to show help for ' + 'the %s extension') % (name, name) + rst += '\n%s\n' % msg + except KeyError: + pass + + # options + if not ui.quiet and entry[1]: + rst += '\noptions:\n\n' + rst += optrst(entry[1]) + + if ui.verbose: + rst += '\nglobal options:\n\n' + rst += optrst(globalopts) + + keep = ui.verbose and ['verbose'] or [] + formatted, pruned = minirst.format(rst, textwidth, keep=keep) + ui.write(formatted) + + if not ui.verbose: + if not full: + ui.write(_('\nuse "hg help %s" to show the full help text\n') + % name) + elif not ui.quiet: + ui.write(_('\nuse "hg -v help %s" to show more info\n') % name) + + + def helplist(select=None): + # list of commands + if name == "shortlist": + header = _('basic commands:\n\n') + else: + header = _('list of commands:\n\n') + + h = {} + cmds = {} + for c, e in table.iteritems(): + f = c.split("|", 1)[0] + if select and not select(f): + continue + if (not select and name != 'shortlist' and + e[0].__module__ != __name__): + continue + if name == "shortlist" and not f.startswith("^"): + continue + f = f.lstrip("^") + if not ui.debugflag and f.startswith("debug"): + continue + doc = e[0].__doc__ + if doc and 'DEPRECATED' in doc and not ui.verbose: + continue + doc = gettext(doc) + if not doc: + doc = _("(no help text available)") + h[f] = doc.splitlines()[0].rstrip() + cmds[f] = c.lstrip("^") + + if not h: + ui.status(_('no commands defined\n')) + return + + ui.status(header) + fns = sorted(h) + m = max(map(len, fns)) + for f in fns: + if ui.verbose: + commands = cmds[f].replace("|",", ") + ui.write(" %s:\n %s\n"%(commands, h[f])) + else: + ui.write('%s\n' % (util.wrap(h[f], textwidth, + initindent=' %-*s ' % (m, f), + hangindent=' ' * (m + 4)))) + + if not name: + text = help.listexts(_('enabled extensions:'), extensions.enabled()) + if text: + ui.write("\n%s" % minirst.format(text, textwidth)) + + ui.write(_("\nadditional help topics:\n\n")) + topics = [] + for names, header, doc in help.helptable: + topics.append((sorted(names, key=len, reverse=True)[0], header)) + topics_len = max([len(s[0]) for s in topics]) + for t, desc in topics: + ui.write(" %-*s %s\n" % (topics_len, t, desc)) + + optlist = [] + addglobalopts(optlist, True) + ui.write(opttext(optlist, textwidth)) + + def helptopic(name): + for names, header, doc in help.helptable: + if name in names: + break + else: + raise error.UnknownCommand(name) + + # description + if not doc: + doc = _("(no help text available)") + if util.safehasattr(doc, '__call__'): + doc = doc() + + ui.write("%s\n\n" % header) + ui.write("%s" % minirst.format(doc, textwidth, indent=4)) + try: + cmdutil.findcmd(name, table) + ui.write(_('\nuse "hg help -c %s" to see help for ' + 'the %s command\n') % (name, name)) + except error.UnknownCommand: + pass + + def helpext(name): + try: + mod = extensions.find(name) + doc = gettext(mod.__doc__) or _('no help text available') + except KeyError: + mod = None + doc = extensions.disabledext(name) + if not doc: + raise error.UnknownCommand(name) + + if '\n' not in doc: + head, tail = doc, "" + else: + head, tail = doc.split('\n', 1) + ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head)) + if tail: + ui.write(minirst.format(tail, textwidth)) + ui.status('\n') + + if mod: + try: + ct = mod.cmdtable + except AttributeError: + ct = {} + modcmds = set([c.split('|', 1)[0] for c in ct]) + helplist(modcmds.__contains__) + else: + ui.write(_('use "hg help extensions" for information on enabling ' + 'extensions\n')) + + def helpextcmd(name): + cmd, ext, mod = extensions.disabledcmd(ui, name, ui.config('ui', 'strict')) + doc = gettext(mod.__doc__).splitlines()[0] + + msg = help.listexts(_("'%s' is provided by the following " + "extension:") % cmd, {ext: doc}, indent=4) + ui.write(minirst.format(msg, textwidth)) + ui.write('\n') + ui.write(_('use "hg help extensions" for information on enabling ' + 'extensions\n')) + + if name and name != 'shortlist': + i = None + if unknowncmd: + queries = (helpextcmd,) + elif opts.get('extension'): + queries = (helpext,) + elif opts.get('command'): + queries = (helpcmd,) + else: + queries = (helptopic, helpcmd, helpext, helpextcmd) + for f in queries: + try: + f(name) + i = None + break + except error.UnknownCommand, inst: + i = inst + if i: + raise i + else: + # program name + ui.status(_("Mercurial Distributed SCM\n")) + ui.status('\n') + helplist() + + +@command('identify|id', + [('r', 'rev', '', + _('identify the specified revision'), _('REV')), + ('n', 'num', None, _('show local revision number')), + ('i', 'id', None, _('show global revision id')), + ('b', 'branch', None, _('show branch')), + ('t', 'tags', None, _('show tags')), + ('B', 'bookmarks', None, _('show bookmarks'))], + _('[-nibtB] [-r REV] [SOURCE]')) +def identify(ui, repo, source=None, rev=None, + num=None, id=None, branch=None, tags=None, bookmarks=None): + """identify the working copy or specified revision + + Print a summary identifying the repository state at REV using one or + two parent hash identifiers, followed by a "+" if the working + directory has uncommitted changes, the branch name (if not default), + a list of tags, and a list of bookmarks. + + When REV is not given, print a summary of the current state of the + repository. + + Specifying a path to a repository root or Mercurial bundle will + cause lookup to operate on that repository/bundle. + + .. container:: verbose + + Examples: + + - generate a build identifier for the working directory:: + + hg id --id > build-id.dat + + - find the revision corresponding to a tag:: + + hg id -n -r 1.3 + + - check the most recent revision of a remote repository:: + + hg id -r tip http://selenic.com/hg/ + + Returns 0 if successful. + """ + + if not repo and not source: + raise util.Abort(_("there is no Mercurial repository here " + "(.hg not found)")) + + hexfunc = ui.debugflag and hex or short + default = not (num or id or branch or tags or bookmarks) + output = [] + revs = [] + + if source: + source, branches = hg.parseurl(ui.expandpath(source)) + repo = hg.peer(ui, {}, source) + revs, checkout = hg.addbranchrevs(repo, repo, branches, None) + + if not repo.local(): + if num or branch or tags: + raise util.Abort( + _("can't query remote revision number, branch, or tags")) + if not rev and revs: + rev = revs[0] + if not rev: + rev = "tip" + + remoterev = repo.lookup(rev) + if default or id: + output = [hexfunc(remoterev)] + + def getbms(): + bms = [] + + if 'bookmarks' in repo.listkeys('namespaces'): + hexremoterev = hex(remoterev) + bms = [bm for bm, bmr in repo.listkeys('bookmarks').iteritems() + if bmr == hexremoterev] + + return bms + + if bookmarks: + output.extend(getbms()) + elif default and not ui.quiet: + # multiple bookmarks for a single parent separated by '/' + bm = '/'.join(getbms()) + if bm: + output.append(bm) + else: + if not rev: + ctx = repo[None] + parents = ctx.parents() + changed = "" + if default or id or num: + changed = util.any(repo.status()) and "+" or "" + if default or id: + output = ["%s%s" % + ('+'.join([hexfunc(p.node()) for p in parents]), changed)] + if num: + output.append("%s%s" % + ('+'.join([str(p.rev()) for p in parents]), changed)) + else: + ctx = scmutil.revsingle(repo, rev) + if default or id: + output = [hexfunc(ctx.node())] + if num: + output.append(str(ctx.rev())) + + if default and not ui.quiet: + b = ctx.branch() + if b != 'default': + output.append("(%s)" % b) + + # multiple tags for a single parent separated by '/' + t = '/'.join(ctx.tags()) + if t: + output.append(t) + + # multiple bookmarks for a single parent separated by '/' + bm = '/'.join(ctx.bookmarks()) + if bm: + output.append(bm) + else: + if branch: + output.append(ctx.branch()) + + if tags: + output.extend(ctx.tags()) + + if bookmarks: + output.extend(ctx.bookmarks()) + + ui.write("%s\n" % ' '.join(output)) + +@command('import|patch', + [('p', 'strip', 1, + _('directory strip option for patch. This has the same ' + 'meaning as the corresponding patch option'), _('NUM')), + ('b', 'base', '', _('base path (DEPRECATED)'), _('PATH')), + ('e', 'edit', False, _('invoke editor on commit messages')), + ('f', 'force', None, _('skip check for outstanding uncommitted changes')), + ('', 'no-commit', None, + _("don't commit, just update the working directory")), + ('', 'bypass', None, + _("apply patch without touching the working directory")), + ('', 'exact', None, + _('apply patch to the nodes from which it was generated')), + ('', 'import-branch', None, + _('use any branch information in patch (implied by --exact)'))] + + commitopts + commitopts2 + similarityopts, + _('[OPTION]... PATCH...')) +def import_(ui, repo, patch1=None, *patches, **opts): + """import an ordered set of patches + + Import a list of patches and commit them individually (unless + --no-commit is specified). + + If there are outstanding changes in the working directory, import + will abort unless given the -f/--force flag. + + You can import a patch straight from a mail message. Even patches + as attachments work (to use the body part, it must have type + text/plain or text/x-patch). From and Subject headers of email + message are used as default committer and commit message. All + text/plain body parts before first diff are added to commit + message. + + If the imported patch was generated by :hg:`export`, user and + description from patch override values from message headers and + body. Values given on command line with -m/--message and -u/--user + override these. + + If --exact is specified, import will set the working directory to + the parent of each patch before applying it, and will abort if the + resulting changeset has a different ID than the one recorded in + the patch. This may happen due to character set problems or other + deficiencies in the text patch format. + + Use --bypass to apply and commit patches directly to the + repository, not touching the working directory. Without --exact, + patches will be applied on top of the working directory parent + revision. + + With -s/--similarity, hg will attempt to discover renames and + copies in the patch in the same way as 'addremove'. + + To read a patch from standard input, use "-" as the patch name. If + a URL is specified, the patch will be downloaded from it. + See :hg:`help dates` for a list of formats valid for -d/--date. + + .. container:: verbose + + Examples: + + - import a traditional patch from a website and detect renames:: + + hg import -s 80 http://example.com/bugfix.patch + + - import a changeset from an hgweb server:: + + hg import http://www.selenic.com/hg/rev/5ca8c111e9aa + + - import all the patches in an Unix-style mbox:: + + hg import incoming-patches.mbox + + - attempt to exactly restore an exported changeset (not always + possible):: + + hg import --exact proposed-fix.patch + + Returns 0 on success. + """ + + if not patch1: + raise util.Abort(_('need at least one patch to import')) + + patches = (patch1,) + patches + + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + + editor = cmdutil.commiteditor + if opts.get('edit'): + editor = cmdutil.commitforceeditor + + update = not opts.get('bypass') + if not update and opts.get('no_commit'): + raise util.Abort(_('cannot use --no-commit with --bypass')) + try: + sim = float(opts.get('similarity') or 0) + except ValueError: + raise util.Abort(_('similarity must be a number')) + if sim < 0 or sim > 100: + raise util.Abort(_('similarity must be between 0 and 100')) + if sim and not update: + raise util.Abort(_('cannot use --similarity with --bypass')) + + if (opts.get('exact') or not opts.get('force')) and update: + cmdutil.bailifchanged(repo) + + base = opts["base"] + strip = opts["strip"] + wlock = lock = tr = None + msgs = [] + + def checkexact(repo, n, nodeid): + if opts.get('exact') and hex(n) != nodeid: + repo.rollback() + raise util.Abort(_('patch is damaged or loses information')) + + def tryone(ui, hunk, parents): + tmpname, message, user, date, branch, nodeid, p1, p2 = \ + patch.extract(ui, hunk) + + if not tmpname: + return (None, None) + msg = _('applied to working directory') + + try: + cmdline_message = cmdutil.logmessage(ui, opts) + if cmdline_message: + # pickup the cmdline msg + message = cmdline_message + elif message: + # pickup the patch msg + message = message.strip() + else: + # launch the editor + message = None + ui.debug('message:\n%s\n' % message) + + if len(parents) == 1: + parents.append(repo[nullid]) + if opts.get('exact'): + if not nodeid or not p1: + raise util.Abort(_('not a Mercurial patch')) + p1 = repo[p1] + p2 = repo[p2 or nullid] + elif p2: + try: + p1 = repo[p1] + p2 = repo[p2] + # Without any options, consider p2 only if the + # patch is being applied on top of the recorded + # first parent. + if p1 != parents[0]: + p1 = parents[0] + p2 = repo[nullid] + except error.RepoError: + p1, p2 = parents + else: + p1, p2 = parents + + n = None + if update: + if p1 != parents[0]: + hg.clean(repo, p1.node()) + if p2 != parents[1]: + repo.dirstate.setparents(p1.node(), p2.node()) + + if opts.get('exact') or opts.get('import_branch'): + repo.dirstate.setbranch(branch or 'default') + + files = set() + patch.patch(ui, repo, tmpname, strip=strip, files=files, + eolmode=None, similarity=sim / 100.0) + files = list(files) + if opts.get('no_commit'): + if message: + msgs.append(message) + else: + if opts.get('exact') or p2: + # If you got here, you either use --force and know what + # you are doing or used --exact or a merge patch while + # being updated to its first parent. + m = None + else: + m = scmutil.matchfiles(repo, files or []) + n = repo.commit(message, opts.get('user') or user, + opts.get('date') or date, match=m, + editor=editor) + checkexact(repo, n, nodeid) + else: + if opts.get('exact') or opts.get('import_branch'): + branch = branch or 'default' + else: + branch = p1.branch() + store = patch.filestore() + try: + files = set() + try: + patch.patchrepo(ui, repo, p1, store, tmpname, strip, + files, eolmode=None) + except patch.PatchError, e: + raise util.Abort(str(e)) + memctx = patch.makememctx(repo, (p1.node(), p2.node()), + message, + opts.get('user') or user, + opts.get('date') or date, + branch, files, store, + editor=cmdutil.commiteditor) + repo.savecommitmessage(memctx.description()) + n = memctx.commit() + checkexact(repo, n, nodeid) + finally: + store.close() + if n: + # i18n: refers to a short changeset id + msg = _('created %s') % short(n) + return (msg, n) + finally: + os.unlink(tmpname) + + try: + try: + wlock = repo.wlock() + lock = repo.lock() + tr = repo.transaction('import') + parents = repo.parents() + for patchurl in patches: + if patchurl == '-': + ui.status(_('applying patch from stdin\n')) + patchfile = ui.fin + patchurl = 'stdin' # for error message + else: + patchurl = os.path.join(base, patchurl) + ui.status(_('applying %s\n') % patchurl) + patchfile = url.open(ui, patchurl) + + haspatch = False + for hunk in patch.split(patchfile): + (msg, node) = tryone(ui, hunk, parents) + if msg: + haspatch = True + ui.note(msg + '\n') + if update or opts.get('exact'): + parents = repo.parents() + else: + parents = [repo[node]] + + if not haspatch: + raise util.Abort(_('%s: no diffs found') % patchurl) + + tr.close() + if msgs: + repo.savecommitmessage('\n* * *\n'.join(msgs)) + except: + # wlock.release() indirectly calls dirstate.write(): since + # we're crashing, we do not want to change the working dir + # parent after all, so make sure it writes nothing + repo.dirstate.invalidate() + raise + finally: + if tr: + tr.release() + release(lock, wlock) + +@command('incoming|in', + [('f', 'force', None, + _('run even if remote repository is unrelated')), + ('n', 'newest-first', None, _('show newest record first')), + ('', 'bundle', '', + _('file to store the bundles into'), _('FILE')), + ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')), + ('B', 'bookmarks', False, _("compare bookmarks")), + ('b', 'branch', [], + _('a specific branch you would like to pull'), _('BRANCH')), + ] + logopts + remoteopts + subrepoopts, + _('[-p] [-n] [-M] [-f] [-r REV]... [--bundle FILENAME] [SOURCE]')) +def incoming(ui, repo, source="default", **opts): + """show new changesets found in source + + Show new changesets found in the specified path/URL or the default + pull location. These are the changesets that would have been pulled + if a pull at the time you issued this command. + + For remote repository, using --bundle avoids downloading the + changesets twice if the incoming is followed by a pull. + + See pull for valid source format details. + + Returns 0 if there are incoming changes, 1 otherwise. + """ + if opts.get('bundle') and opts.get('subrepos'): + raise util.Abort(_('cannot combine --bundle and --subrepos')) + + if opts.get('bookmarks'): + source, branches = hg.parseurl(ui.expandpath(source), + opts.get('branch')) + other = hg.peer(repo, opts, source) + if 'bookmarks' not in other.listkeys('namespaces'): + ui.warn(_("remote doesn't support bookmarks\n")) + return 0 + ui.status(_('comparing with %s\n') % util.hidepassword(source)) + return bookmarks.diff(ui, repo, other) + + repo._subtoppath = ui.expandpath(source) + try: + return hg.incoming(ui, repo, source, opts) + finally: + del repo._subtoppath + + +@command('^init', remoteopts, _('[-e CMD] [--remotecmd CMD] [DEST]')) +def init(ui, dest=".", **opts): + """create a new repository in the given directory + + Initialize a new repository in the given directory. If the given + directory does not exist, it will be created. + + If no directory is given, the current directory is used. + + It is possible to specify an ``ssh://`` URL as the destination. + See :hg:`help urls` for more information. + + Returns 0 on success. + """ + hg.peer(ui, opts, ui.expandpath(dest), create=True) + +@command('locate', + [('r', 'rev', '', _('search the repository as it is in REV'), _('REV')), + ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), + ('f', 'fullpath', None, _('print complete paths from the filesystem root')), + ] + walkopts, + _('[OPTION]... [PATTERN]...')) +def locate(ui, repo, *pats, **opts): + """locate files matching specific patterns + + Print files under Mercurial control in the working directory whose + names match the given patterns. + + By default, this command searches all directories in the working + directory. To search just the current directory and its + subdirectories, use "--include .". + + If no patterns are given to match, this command prints the names + of all files under Mercurial control in the working directory. + + If you want to feed the output of this command into the "xargs" + command, use the -0 option to both this command and "xargs". This + will avoid the problem of "xargs" treating single filenames that + contain whitespace as multiple filenames. + + Returns 0 if a match is found, 1 otherwise. + """ + end = opts.get('print0') and '\0' or '\n' + rev = scmutil.revsingle(repo, opts.get('rev'), None).node() + + ret = 1 + m = scmutil.match(repo[rev], pats, opts, default='relglob') + m.bad = lambda x, y: False + for abs in repo[rev].walk(m): + if not rev and abs not in repo.dirstate: + continue + if opts.get('fullpath'): + ui.write(repo.wjoin(abs), end) + else: + ui.write(((pats and m.rel(abs)) or abs), end) + ret = 0 + + return ret + +@command('^log|history', + [('f', 'follow', None, + _('follow changeset history, or file history across copies and renames')), + ('', 'follow-first', None, + _('only follow the first parent of merge changesets')), + ('d', 'date', '', _('show revisions matching date spec'), _('DATE')), + ('C', 'copies', None, _('show copied files')), + ('k', 'keyword', [], + _('do case-insensitive search for a given text'), _('TEXT')), + ('r', 'rev', [], _('show the specified revision or range'), _('REV')), + ('', 'removed', None, _('include revisions where files were removed')), + ('m', 'only-merges', None, _('show only merges')), + ('u', 'user', [], _('revisions committed by user'), _('USER')), + ('', 'only-branch', [], + _('show only changesets within the given named branch (DEPRECATED)'), + _('BRANCH')), + ('b', 'branch', [], + _('show changesets within the given named branch'), _('BRANCH')), + ('P', 'prune', [], + _('do not display revision or any of its ancestors'), _('REV')), + ('', 'hidden', False, _('show hidden changesets')), + ] + logopts + walkopts, + _('[OPTION]... [FILE]')) +def log(ui, repo, *pats, **opts): + """show revision history of entire repository or files + + Print the revision history of the specified files or the entire + project. + + If no revision range is specified, the default is ``tip:0`` unless + --follow is set, in which case the working directory parent is + used as the starting revision. + + File history is shown without following rename or copy history of + files. Use -f/--follow with a filename to follow history across + renames and copies. --follow without a filename will only show + ancestors or descendants of the starting revision. + + By default this command prints revision number and changeset id, + tags, non-trivial parents, user, date and time, and a summary for + each commit. When the -v/--verbose switch is used, the list of + changed files and full commit message are shown. + + .. note:: + log -p/--patch may generate unexpected diff output for merge + changesets, as it will only compare the merge changeset against + its first parent. Also, only files different from BOTH parents + will appear in files:. + + .. note:: + for performance reasons, log FILE may omit duplicate changes + made on branches and will not show deletions. To see all + changes including duplicates and deletions, use the --removed + switch. + + .. container:: verbose + + Some examples: + + - changesets with full descriptions and file lists:: + + hg log -v + + - changesets ancestral to the working directory:: + + hg log -f + + - last 10 commits on the current branch:: + + hg log -l 10 -b . + + - changesets showing all modifications of a file, including removals:: + + hg log --removed file.c + + - all changesets that touch a directory, with diffs, excluding merges:: + + hg log -Mp lib/ + + - all revision numbers that match a keyword:: + + hg log -k bug --template "{rev}\\n" + + - check if a given changeset is included is a tagged release:: + + hg log -r "a21ccf and ancestor(1.9)" + + - find all changesets by some user in a date range:: + + hg log -k alice -d "may 2008 to jul 2008" + + - summary of all changesets after the last tag:: + + hg log -r "last(tagged())::" --template "{desc|firstline}\\n" + + See :hg:`help dates` for a list of formats valid for -d/--date. + + See :hg:`help revisions` and :hg:`help revsets` for more about + specifying revisions. + + Returns 0 on success. + """ + + matchfn = scmutil.match(repo[None], pats, opts) + limit = cmdutil.loglimit(opts) + count = 0 + + endrev = None + if opts.get('copies') and opts.get('rev'): + endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1 + + df = False + if opts["date"]: + df = util.matchdate(opts["date"]) + + branches = opts.get('branch', []) + opts.get('only_branch', []) + opts['branch'] = [repo.lookupbranch(b) for b in branches] + + displayer = cmdutil.show_changeset(ui, repo, opts, True) + def prep(ctx, fns): + rev = ctx.rev() + parents = [p for p in repo.changelog.parentrevs(rev) + if p != nullrev] + if opts.get('no_merges') and len(parents) == 2: + return + if opts.get('only_merges') and len(parents) != 2: + return + if opts.get('branch') and ctx.branch() not in opts['branch']: + return + if not opts.get('hidden') and ctx.hidden(): + return + if df and not df(ctx.date()[0]): + return + if opts['user'] and not [k for k in opts['user'] + if k.lower() in ctx.user().lower()]: + return + if opts.get('keyword'): + for k in [kw.lower() for kw in opts['keyword']]: + if (k in ctx.user().lower() or + k in ctx.description().lower() or + k in " ".join(ctx.files()).lower()): + break + else: + return + + copies = None + if opts.get('copies') and rev: + copies = [] + getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) + for fn in ctx.files(): + rename = getrenamed(fn, rev) + if rename: + copies.append((fn, rename[0])) + + revmatchfn = None + if opts.get('patch') or opts.get('stat'): + if opts.get('follow') or opts.get('follow_first'): + # note: this might be wrong when following through merges + revmatchfn = scmutil.match(repo[None], fns, default='path') + else: + revmatchfn = matchfn + + displayer.show(ctx, copies=copies, matchfn=revmatchfn) + + for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): + if count == limit: + break + if displayer.flush(ctx.rev()): + count += 1 + displayer.close() + +@command('manifest', + [('r', 'rev', '', _('revision to display'), _('REV')), + ('', 'all', False, _("list files from all revisions"))], + _('[-r REV]')) +def manifest(ui, repo, node=None, rev=None, **opts): + """output the current or given revision of the project manifest + + Print a list of version controlled files for the given revision. + If no revision is given, the first parent of the working directory + is used, or the null revision if no revision is checked out. + + With -v, print file permissions, symlink and executable bits. + With --debug, print file revision hashes. + + If option --all is specified, the list of all files from all revisions + is printed. This includes deleted and renamed files. + + Returns 0 on success. + """ + if opts.get('all'): + if rev or node: + raise util.Abort(_("can't specify a revision with --all")) + + res = [] + prefix = "data/" + suffix = ".i" + plen = len(prefix) + slen = len(suffix) + lock = repo.lock() + try: + for fn, b, size in repo.store.datafiles(): + if size != 0 and fn[-slen:] == suffix and fn[:plen] == prefix: + res.append(fn[plen:-slen]) + finally: + lock.release() + for f in sorted(res): + ui.write("%s\n" % f) + return + + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if not node: + node = rev + + decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '} + ctx = scmutil.revsingle(repo, node) + for f in ctx: + if ui.debugflag: + ui.write("%40s " % hex(ctx.manifest()[f])) + if ui.verbose: + ui.write(decor[ctx.flags(f)]) + ui.write("%s\n" % f) + +@command('^merge', + [('f', 'force', None, _('force a merge with outstanding changes')), + ('r', 'rev', '', _('revision to merge'), _('REV')), + ('P', 'preview', None, + _('review revisions to merge (no merge is performed)')) + ] + mergetoolopts, + _('[-P] [-f] [[-r] REV]')) +def merge(ui, repo, node=None, **opts): + """merge working directory with another revision + + The current working directory is updated with all changes made in + the requested revision since the last common predecessor revision. + + Files that changed between either parent are marked as changed for + the next commit and a commit must be performed before any further + updates to the repository are allowed. The next commit will have + two parents. + + ``--tool`` can be used to specify the merge tool used for file + merges. It overrides the HGMERGE environment variable and your + configuration files. See :hg:`help merge-tools` for options. + + If no revision is specified, the working directory's parent is a + head revision, and the current branch contains exactly one other + head, the other head is merged with by default. Otherwise, an + explicit revision with which to merge with must be provided. + + :hg:`resolve` must be used to resolve unresolved files. + + To undo an uncommitted merge, use :hg:`update --clean .` which + will check out a clean copy of the original merge parent, losing + all changes. + + Returns 0 on success, 1 if there are unresolved files. + """ + + if opts.get('rev') and node: + raise util.Abort(_("please specify just one revision")) + if not node: + node = opts.get('rev') + + if not node: + branch = repo[None].branch() + bheads = repo.branchheads(branch) + if len(bheads) > 2: + raise util.Abort(_("branch '%s' has %d heads - " + "please merge with an explicit rev") + % (branch, len(bheads)), + hint=_("run 'hg heads .' to see heads")) + + parent = repo.dirstate.p1() + if len(bheads) == 1: + if len(repo.heads()) > 1: + raise util.Abort(_("branch '%s' has one head - " + "please merge with an explicit rev") + % branch, + hint=_("run 'hg heads' to see all heads")) + msg = _('there is nothing to merge') + if parent != repo.lookup(repo[None].branch()): + msg = _('%s - use "hg update" instead') % msg + raise util.Abort(msg) + + if parent not in bheads: + raise util.Abort(_('working directory not at a head revision'), + hint=_("use 'hg update' or merge with an " + "explicit revision")) + node = parent == bheads[0] and bheads[-1] or bheads[0] + else: + node = scmutil.revsingle(repo, node).node() + + if opts.get('preview'): + # find nodes that are ancestors of p2 but not of p1 + p1 = repo.lookup('.') + p2 = repo.lookup(node) + nodes = repo.changelog.findmissing(common=[p1], heads=[p2]) + + displayer = cmdutil.show_changeset(ui, repo, opts) + for node in nodes: + displayer.show(repo[node]) + displayer.close() + return 0 + + try: + # ui.forcemerge is an internal variable, do not document + repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.merge(repo, node, force=opts.get('force')) + finally: + ui.setconfig('ui', 'forcemerge', '') + +@command('outgoing|out', + [('f', 'force', None, _('run even when the destination is unrelated')), + ('r', 'rev', [], + _('a changeset intended to be included in the destination'), _('REV')), + ('n', 'newest-first', None, _('show newest record first')), + ('B', 'bookmarks', False, _('compare bookmarks')), + ('b', 'branch', [], _('a specific branch you would like to push'), + _('BRANCH')), + ] + logopts + remoteopts + subrepoopts, + _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')) +def outgoing(ui, repo, dest=None, **opts): + """show changesets not found in the destination + + Show changesets not found in the specified destination repository + or the default push location. These are the changesets that would + be pushed if a push was requested. + + See pull for details of valid destination formats. + + Returns 0 if there are outgoing changes, 1 otherwise. + """ + + if opts.get('bookmarks'): + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + other = hg.peer(repo, opts, dest) + if 'bookmarks' not in other.listkeys('namespaces'): + ui.warn(_("remote doesn't support bookmarks\n")) + return 0 + ui.status(_('comparing with %s\n') % util.hidepassword(dest)) + return bookmarks.diff(ui, other, repo) + + repo._subtoppath = ui.expandpath(dest or 'default-push', dest or 'default') + try: + return hg.outgoing(ui, repo, dest, opts) + finally: + del repo._subtoppath + +@command('parents', + [('r', 'rev', '', _('show parents of the specified revision'), _('REV')), + ] + templateopts, + _('[-r REV] [FILE]')) +def parents(ui, repo, file_=None, **opts): + """show the parents of the working directory or revision + + Print the working directory's parent revisions. If a revision is + given via -r/--rev, the parent of that revision will be printed. + If a file argument is given, the revision in which the file was + last changed (before the working directory revision or the + argument to --rev if given) is printed. + + Returns 0 on success. + """ + + ctx = scmutil.revsingle(repo, opts.get('rev'), None) + + if file_: + m = scmutil.match(ctx, (file_,), opts) + if m.anypats() or len(m.files()) != 1: + raise util.Abort(_('can only specify an explicit filename')) + file_ = m.files()[0] + filenodes = [] + for cp in ctx.parents(): + if not cp: + continue + try: + filenodes.append(cp.filenode(file_)) + except error.LookupError: + pass + if not filenodes: + raise util.Abort(_("'%s' not found in manifest!") % file_) + fl = repo.file(file_) + p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes] + else: + p = [cp.node() for cp in ctx.parents()] + + displayer = cmdutil.show_changeset(ui, repo, opts) + for n in p: + if n != nullid: + displayer.show(repo[n]) + displayer.close() + +@command('paths', [], _('[NAME]')) +def paths(ui, repo, search=None): + """show aliases for remote repositories + + Show definition of symbolic path name NAME. If no name is given, + show definition of all available names. + + Option -q/--quiet suppresses all output when searching for NAME + and shows only the path names when listing all definitions. + + Path names are defined in the [paths] section of your + configuration file and in ``/etc/mercurial/hgrc``. If run inside a + repository, ``.hg/hgrc`` is used, too. + + The path names ``default`` and ``default-push`` have a special + meaning. When performing a push or pull operation, they are used + as fallbacks if no location is specified on the command-line. + When ``default-push`` is set, it will be used for push and + ``default`` will be used for pull; otherwise ``default`` is used + as the fallback for both. When cloning a repository, the clone + source is written as ``default`` in ``.hg/hgrc``. Note that + ``default`` and ``default-push`` apply to all inbound (e.g. + :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and + :hg:`bundle`) operations. + + See :hg:`help urls` for more information. + + Returns 0 on success. + """ + if search: + for name, path in ui.configitems("paths"): + if name == search: + ui.status("%s\n" % util.hidepassword(path)) + return + if not ui.quiet: + ui.warn(_("not found!\n")) + return 1 + else: + for name, path in ui.configitems("paths"): + if ui.quiet: + ui.write("%s\n" % name) + else: + ui.write("%s = %s\n" % (name, util.hidepassword(path))) + +def postincoming(ui, repo, modheads, optupdate, checkout): + if modheads == 0: + return + if optupdate: + try: + return hg.update(repo, checkout) + except util.Abort, inst: + ui.warn(_("not updating: %s\n" % str(inst))) + return 0 + if modheads > 1: + currentbranchheads = len(repo.branchheads()) + if currentbranchheads == modheads: + ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n")) + elif currentbranchheads > 1: + ui.status(_("(run 'hg heads .' to see heads, 'hg merge' to merge)\n")) + else: + ui.status(_("(run 'hg heads' to see heads)\n")) + else: + ui.status(_("(run 'hg update' to get a working copy)\n")) + +@command('^pull', + [('u', 'update', None, + _('update to new branch head if changesets were pulled')), + ('f', 'force', None, _('run even when remote repository is unrelated')), + ('r', 'rev', [], _('a remote changeset intended to be added'), _('REV')), + ('B', 'bookmark', [], _("bookmark to pull"), _('BOOKMARK')), + ('b', 'branch', [], _('a specific branch you would like to pull'), + _('BRANCH')), + ] + remoteopts, + _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')) +def pull(ui, repo, source="default", **opts): + """pull changes from the specified source + + Pull changes from a remote repository to a local one. + + This finds all changes from the repository at the specified path + or URL and adds them to a local repository (the current one unless + -R is specified). By default, this does not update the copy of the + project in the working directory. + + Use :hg:`incoming` if you want to see what would have been added + by a pull at the time you issued this command. If you then decide + to add those changes to the repository, you should use :hg:`pull + -r X` where ``X`` is the last changeset listed by :hg:`incoming`. + + If SOURCE is omitted, the 'default' path will be used. + See :hg:`help urls` for more information. + + Returns 0 on success, 1 if an update had unresolved files. + """ + source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) + other = hg.peer(repo, opts, source) + ui.status(_('pulling from %s\n') % util.hidepassword(source)) + revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) + + if opts.get('bookmark'): + if not revs: + revs = [] + rb = other.listkeys('bookmarks') + for b in opts['bookmark']: + if b not in rb: + raise util.Abort(_('remote bookmark %s not found!') % b) + revs.append(rb[b]) + + if revs: + try: + revs = [other.lookup(rev) for rev in revs] + except error.CapabilityError: + err = _("other repository doesn't support revision lookup, " + "so a rev cannot be specified.") + raise util.Abort(err) + + modheads = repo.pull(other, heads=revs, force=opts.get('force')) + bookmarks.updatefromremote(ui, repo, other) + if checkout: + checkout = str(repo.changelog.rev(other.lookup(checkout))) + repo._subtoppath = source + try: + ret = postincoming(ui, repo, modheads, opts.get('update'), checkout) + + finally: + del repo._subtoppath + + # update specified bookmarks + if opts.get('bookmark'): + for b in opts['bookmark']: + # explicit pull overrides local bookmark if any + ui.status(_("importing bookmark %s\n") % b) + repo._bookmarks[b] = repo[rb[b]].node() + bookmarks.write(repo) + + return ret + +@command('^push', + [('f', 'force', None, _('force push')), + ('r', 'rev', [], + _('a changeset intended to be included in the destination'), + _('REV')), + ('B', 'bookmark', [], _("bookmark to push"), _('BOOKMARK')), + ('b', 'branch', [], + _('a specific branch you would like to push'), _('BRANCH')), + ('', 'new-branch', False, _('allow pushing a new branch')), + ] + remoteopts, + _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')) +def push(ui, repo, dest=None, **opts): + """push changes to the specified destination + + Push changesets from the local repository to the specified + destination. + + This operation is symmetrical to pull: it is identical to a pull + in the destination repository from the current one. + + By default, push will not allow creation of new heads at the + destination, since multiple heads would make it unclear which head + to use. In this situation, it is recommended to pull and merge + before pushing. + + Use --new-branch if you want to allow push to create a new named + branch that is not present at the destination. This allows you to + only create a new branch without forcing other changes. + + Use -f/--force to override the default behavior and push all + changesets on all branches. + + If -r/--rev is used, the specified revision and all its ancestors + will be pushed to the remote repository. + + Please see :hg:`help urls` for important details about ``ssh://`` + URLs. If DESTINATION is omitted, a default path will be used. + + Returns 0 if push was successful, 1 if nothing to push. + """ + + if opts.get('bookmark'): + for b in opts['bookmark']: + # translate -B options to -r so changesets get pushed + if b in repo._bookmarks: + opts.setdefault('rev', []).append(b) + else: + # if we try to push a deleted bookmark, translate it to null + # this lets simultaneous -r, -b options continue working + opts.setdefault('rev', []).append("null") + + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + ui.status(_('pushing to %s\n') % util.hidepassword(dest)) + revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) + other = hg.peer(repo, opts, dest) + if revs: + revs = [repo.lookup(rev) for rev in revs] + + repo._subtoppath = dest + try: + # push subrepos depth-first for coherent ordering + c = repo[''] + subs = c.substate # only repos that are committed + for s in sorted(subs): + if not c.sub(s).push(opts.get('force')): + return False + finally: + del repo._subtoppath + result = repo.push(other, opts.get('force'), revs=revs, + newbranch=opts.get('new_branch')) + + result = (result == 0) + + if opts.get('bookmark'): + rb = other.listkeys('bookmarks') + for b in opts['bookmark']: + # explicit push overrides remote bookmark if any + if b in repo._bookmarks: + ui.status(_("exporting bookmark %s\n") % b) + new = repo[b].hex() + elif b in rb: + ui.status(_("deleting remote bookmark %s\n") % b) + new = '' # delete + else: + ui.warn(_('bookmark %s does not exist on the local ' + 'or remote repository!\n') % b) + return 2 + old = rb.get(b, '') + r = other.pushkey('bookmarks', b, old, new) + if not r: + ui.warn(_('updating bookmark %s failed!\n') % b) + if not result: + result = 2 + + return result + +@command('recover', []) +def recover(ui, repo): + """roll back an interrupted transaction + + Recover from an interrupted commit or pull. + + This command tries to fix the repository status after an + interrupted operation. It should only be necessary when Mercurial + suggests it. + + Returns 0 if successful, 1 if nothing to recover or verify fails. + """ + if repo.recover(): + return hg.verify(repo) + return 1 + +@command('^remove|rm', + [('A', 'after', None, _('record delete for missing files')), + ('f', 'force', None, + _('remove (and delete) file even if added or modified')), + ] + walkopts, + _('[OPTION]... FILE...')) +def remove(ui, repo, *pats, **opts): + """remove the specified files on the next commit + + Schedule the indicated files for removal from the current branch. + + This command schedules the files to be removed at the next commit. + To undo a remove before that, see :hg:`revert`. To undo added + files, see :hg:`forget`. + + .. container:: verbose + + -A/--after can be used to remove only files that have already + been deleted, -f/--force can be used to force deletion, and -Af + can be used to remove files from the next revision without + deleting them from the working directory. + + The following table details the behavior of remove for different + file states (columns) and option combinations (rows). The file + states are Added [A], Clean [C], Modified [M] and Missing [!] + (as reported by :hg:`status`). The actions are Warn, Remove + (from branch) and Delete (from disk): + + ======= == == == == + A C M ! + ======= == == == == + none W RD W R + -f R RD RD R + -A W W W R + -Af R R R R + ======= == == == == + + Note that remove never deletes files in Added [A] state from the + working directory, not even if option --force is specified. + + Returns 0 on success, 1 if any warnings encountered. + """ + + ret = 0 + after, force = opts.get('after'), opts.get('force') + if not pats and not after: + raise util.Abort(_('no files specified')) + + m = scmutil.match(repo[None], pats, opts) + s = repo.status(match=m, clean=True) + modified, added, deleted, clean = s[0], s[1], s[3], s[6] + + for f in m.files(): + if f not in repo.dirstate and not os.path.isdir(m.rel(f)): + if os.path.exists(m.rel(f)): + ui.warn(_('not removing %s: file is untracked\n') % m.rel(f)) + ret = 1 + + if force: + list = modified + deleted + clean + added + elif after: + list = deleted + for f in modified + added + clean: + ui.warn(_('not removing %s: file still exists (use -f' + ' to force removal)\n') % m.rel(f)) + ret = 1 + else: + list = deleted + clean + for f in modified: + ui.warn(_('not removing %s: file is modified (use -f' + ' to force removal)\n') % m.rel(f)) + ret = 1 + for f in added: + ui.warn(_('not removing %s: file has been marked for add' + ' (use forget to undo)\n') % m.rel(f)) + ret = 1 + + for f in sorted(list): + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + wlock = repo.wlock() + try: + if not after: + for f in list: + if f in added: + continue # we never unlink added files on remove + try: + util.unlinkpath(repo.wjoin(f)) + except OSError, inst: + if inst.errno != errno.ENOENT: + raise + repo[None].forget(list) + finally: + wlock.release() + + return ret + +@command('rename|move|mv', + [('A', 'after', None, _('record a rename that has already occurred')), + ('f', 'force', None, _('forcibly copy over an existing managed file')), + ] + walkopts + dryrunopts, + _('[OPTION]... SOURCE... DEST')) +def rename(ui, repo, *pats, **opts): + """rename files; equivalent of copy + remove + + Mark dest as copies of sources; mark sources for deletion. If dest + is a directory, copies are put in that directory. If dest is a + file, there can only be one source. + + By default, this command copies the contents of files as they + exist in the working directory. If invoked with -A/--after, the + operation is recorded, but no copying is performed. + + This command takes effect at the next commit. To undo a rename + before that, see :hg:`revert`. + + Returns 0 on success, 1 if errors are encountered. + """ + wlock = repo.wlock(False) + try: + return cmdutil.copy(ui, repo, pats, opts, rename=True) + finally: + wlock.release() + +@command('resolve', + [('a', 'all', None, _('select all unresolved files')), + ('l', 'list', None, _('list state of files needing merge')), + ('m', 'mark', None, _('mark files as resolved')), + ('u', 'unmark', None, _('mark files as unresolved')), + ('n', 'no-status', None, _('hide status prefix'))] + + mergetoolopts + walkopts, + _('[OPTION]... [FILE]...')) +def resolve(ui, repo, *pats, **opts): + """redo merges or set/view the merge status of files + + Merges with unresolved conflicts are often the result of + non-interactive merging using the ``internal:merge`` configuration + setting, or a command-line merge tool like ``diff3``. The resolve + command is used to manage the files involved in a merge, after + :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the + working directory must have two parents). + + The resolve command can be used in the following ways: + + - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified + files, discarding any previous merge attempts. Re-merging is not + performed for files already marked as resolved. Use ``--all/-a`` + to select all unresolved files. ``--tool`` can be used to specify + the merge tool used for the given files. It overrides the HGMERGE + environment variable and your configuration files. Previous file + contents are saved with a ``.orig`` suffix. + + - :hg:`resolve -m [FILE]`: mark a file as having been resolved + (e.g. after having manually fixed-up the files). The default is + to mark all unresolved files. + + - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The + default is to mark all resolved files. + + - :hg:`resolve -l`: list files which had or still have conflicts. + In the printed list, ``U`` = unresolved and ``R`` = resolved. + + Note that Mercurial will not let you commit files with unresolved + merge conflicts. You must use :hg:`resolve -m ...` before you can + commit after a conflicting merge. + + Returns 0 on success, 1 if any files fail a resolve attempt. + """ + + all, mark, unmark, show, nostatus = \ + [opts.get(o) for o in 'all mark unmark list no_status'.split()] + + if (show and (mark or unmark)) or (mark and unmark): + raise util.Abort(_("too many options specified")) + if pats and all: + raise util.Abort(_("can't specify --all and patterns")) + if not (all or pats or show or mark or unmark): + raise util.Abort(_('no files or directories specified; ' + 'use --all to remerge all files')) + + ms = mergemod.mergestate(repo) + m = scmutil.match(repo[None], pats, opts) + ret = 0 + + for f in ms: + if m(f): + if show: + if nostatus: + ui.write("%s\n" % f) + else: + ui.write("%s %s\n" % (ms[f].upper(), f), + label='resolve.' + + {'u': 'unresolved', 'r': 'resolved'}[ms[f]]) + elif mark: + ms.mark(f, "r") + elif unmark: + ms.mark(f, "u") + else: + wctx = repo[None] + mctx = wctx.parents()[-1] + + # backup pre-resolve (merge uses .orig for its own purposes) + a = repo.wjoin(f) + util.copyfile(a, a + ".resolve") + + try: + # resolve file + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + if ms.resolve(f, wctx, mctx): + ret = 1 + finally: + ui.setconfig('ui', 'forcemerge', '') + + # replace filemerge's .orig file with our resolve file + util.rename(a + ".resolve", a + ".orig") + + ms.commit() + return ret + +@command('revert', + [('a', 'all', None, _('revert all changes when no arguments given')), + ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), + ('r', 'rev', '', _('revert to the specified revision'), _('REV')), + ('C', 'no-backup', None, _('do not save backup copies of files')), + ] + walkopts + dryrunopts, + _('[OPTION]... [-r REV] [NAME]...')) +def revert(ui, repo, *pats, **opts): + """restore files to their checkout state + + .. note:: + To check out earlier revisions, you should use :hg:`update REV`. + To cancel a merge (and lose your changes), use :hg:`update --clean .`. + + With no revision specified, revert the specified files or directories + to the contents they had in the parent of the working directory. + This restores the contents of files to an unmodified + state and unschedules adds, removes, copies, and renames. If the + working directory has two parents, you must explicitly specify a + revision. + + Using the -r/--rev or -d/--date options, revert the given files or + directories to their states as of a specific revision. Because + revert does not change the working directory parents, this will + cause these files to appear modified. This can be helpful to "back + out" some or all of an earlier change. See :hg:`backout` for a + related method. + + Modified files are saved with a .orig suffix before reverting. + To disable these backups, use --no-backup. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success. + """ + + if opts.get("date"): + if opts.get("rev"): + raise util.Abort(_("you can't specify a revision and a date")) + opts["rev"] = cmdutil.finddate(ui, repo, opts["date"]) + + parent, p2 = repo.dirstate.parents() + if not opts.get('rev') and p2 != nullid: + # revert after merge is a trap for new users (issue2915) + raise util.Abort(_('uncommitted merge with no revision specified'), + hint=_('use "hg update" or see "hg help revert"')) + + ctx = scmutil.revsingle(repo, opts.get('rev')) + node = ctx.node() + + if not pats and not opts.get('all'): + msg = _("no files or directories specified") + if p2 != nullid: + hint = _("uncommitted merge, use --all to discard all changes," + " or 'hg update -C .' to abort the merge") + raise util.Abort(msg, hint=hint) + dirty = util.any(repo.status()) + if node != parent: + if dirty: + hint = _("uncommitted changes, use --all to discard all" + " changes, or 'hg update %s' to update") % ctx.rev() + else: + hint = _("use --all to revert all files," + " or 'hg update %s' to update") % ctx.rev() + elif dirty: + hint = _("uncommitted changes, use --all to discard all changes") + else: + hint = _("use --all to revert all files") + raise util.Abort(msg, hint=hint) + + mf = ctx.manifest() + if node == parent: + pmf = mf + else: + pmf = None + + # need all matching names in dirstate and manifest of target rev, + # so have to walk both. do not print errors if files exist in one + # but not other. + + names = {} + + wlock = repo.wlock() + try: + # walk dirstate. + + m = scmutil.match(repo[None], pats, opts) + m.bad = lambda x, y: False + for abs in repo.walk(m): + names[abs] = m.rel(abs), m.exact(abs) + + # walk target manifest. + + def badfn(path, msg): + if path in names: + return + if path in repo[node].substate: + ui.warn("%s: %s\n" % (m.rel(path), + 'reverting subrepos is unsupported')) + return + path_ = path + '/' + for f in names: + if f.startswith(path_): + return + ui.warn("%s: %s\n" % (m.rel(path), msg)) + + m = scmutil.match(repo[node], pats, opts) + m.bad = badfn + for abs in repo[node].walk(m): + if abs not in names: + names[abs] = m.rel(abs), m.exact(abs) + + m = scmutil.matchfiles(repo, names) + changes = repo.status(match=m)[:4] + modified, added, removed, deleted = map(set, changes) + + # if f is a rename, also revert the source + cwd = repo.getcwd() + for f in added: + src = repo.dirstate.copied(f) + if src and src not in names and repo.dirstate[src] == 'r': + removed.add(src) + names[src] = (repo.pathto(src, cwd), True) + + def removeforget(abs): + if repo.dirstate[abs] == 'a': + return _('forgetting %s\n') + return _('removing %s\n') + + revert = ([], _('reverting %s\n')) + add = ([], _('adding %s\n')) + remove = ([], removeforget) + undelete = ([], _('undeleting %s\n')) + + disptable = ( + # dispatch table: + # file state + # action if in target manifest + # action if not in target manifest + # make backup if in target manifest + # make backup if not in target manifest + (modified, revert, remove, True, True), + (added, revert, remove, True, False), + (removed, undelete, None, False, False), + (deleted, revert, remove, False, False), + ) + + for abs, (rel, exact) in sorted(names.items()): + mfentry = mf.get(abs) + target = repo.wjoin(abs) + def handle(xlist, dobackup): + xlist[0].append(abs) + if (dobackup and not opts.get('no_backup') and + os.path.lexists(target)): + bakname = "%s.orig" % rel + ui.note(_('saving current version of %s as %s\n') % + (rel, bakname)) + if not opts.get('dry_run'): + util.rename(target, bakname) + if ui.verbose or not exact: + msg = xlist[1] + if not isinstance(msg, basestring): + msg = msg(abs) + ui.status(msg % rel) + for table, hitlist, misslist, backuphit, backupmiss in disptable: + if abs not in table: + continue + # file has changed in dirstate + if mfentry: + handle(hitlist, backuphit) + elif misslist is not None: + handle(misslist, backupmiss) + break + else: + if abs not in repo.dirstate: + if mfentry: + handle(add, True) + elif exact: + ui.warn(_('file not managed: %s\n') % rel) + continue + # file has not changed in dirstate + if node == parent: + if exact: + ui.warn(_('no changes needed to %s\n') % rel) + continue + if pmf is None: + # only need parent manifest in this unlikely case, + # so do not read by default + pmf = repo[parent].manifest() + if abs in pmf and mfentry: + # if version of file is same in parent and target + # manifests, do nothing + if (pmf[abs] != mfentry or + pmf.flags(abs) != mf.flags(abs)): + handle(revert, False) + else: + handle(remove, False) + + if not opts.get('dry_run'): + def checkout(f): + fc = ctx[f] + repo.wwrite(f, fc.data(), fc.flags()) + + audit_path = scmutil.pathauditor(repo.root) + for f in remove[0]: + if repo.dirstate[f] == 'a': + repo.dirstate.drop(f) + continue + audit_path(f) + try: + util.unlinkpath(repo.wjoin(f)) + except OSError: + pass + repo.dirstate.remove(f) + + normal = None + if node == parent: + # We're reverting to our parent. If possible, we'd like status + # to report the file as clean. We have to use normallookup for + # merges to avoid losing information about merged/dirty files. + if p2 != nullid: + normal = repo.dirstate.normallookup + else: + normal = repo.dirstate.normal + for f in revert[0]: + checkout(f) + if normal: + normal(f) + + for f in add[0]: + checkout(f) + repo.dirstate.add(f) + + normal = repo.dirstate.normallookup + if node == parent and p2 == nullid: + normal = repo.dirstate.normal + for f in undelete[0]: + checkout(f) + normal(f) + + finally: + wlock.release() + +@command('rollback', dryrunopts + + [('f', 'force', False, _('ignore safety measures'))]) +def rollback(ui, repo, **opts): + """roll back the last transaction (dangerous) + + This command should be used with care. There is only one level of + rollback, and there is no way to undo a rollback. It will also + restore the dirstate at the time of the last transaction, losing + any dirstate changes since that time. This command does not alter + the working directory. + + Transactions are used to encapsulate the effects of all commands + that create new changesets or propagate existing changesets into a + repository. For example, the following commands are transactional, + and their effects can be rolled back: + + - commit + - import + - pull + - push (with this repository as the destination) + - unbundle + + It's possible to lose data with rollback: commit, update back to + an older changeset, and then rollback. The update removes the + changes you committed from the working directory, and rollback + removes them from history. To avoid data loss, you must pass + --force in this case. + + This command is not intended for use on public repositories. Once + changes are visible for pull by other users, rolling a transaction + back locally is ineffective (someone else may already have pulled + the changes). Furthermore, a race is possible with readers of the + repository; for example an in-progress pull from the repository + may fail if a rollback is performed. + + Returns 0 on success, 1 if no rollback data is available. + """ + return repo.rollback(dryrun=opts.get('dry_run'), + force=opts.get('force')) + +@command('root', []) +def root(ui, repo): + """print the root (top) of the current working directory + + Print the root directory of the current repository. + + Returns 0 on success. + """ + ui.write(repo.root + "\n") + +@command('^serve', + [('A', 'accesslog', '', _('name of access log file to write to'), + _('FILE')), + ('d', 'daemon', None, _('run server in background')), + ('', 'daemon-pipefds', '', _('used internally by daemon mode'), _('NUM')), + ('E', 'errorlog', '', _('name of error log file to write to'), _('FILE')), + # use string type, then we can check if something was passed + ('p', 'port', '', _('port to listen on (default: 8000)'), _('PORT')), + ('a', 'address', '', _('address to listen on (default: all interfaces)'), + _('ADDR')), + ('', 'prefix', '', _('prefix path to serve from (default: server root)'), + _('PREFIX')), + ('n', 'name', '', + _('name to show in web pages (default: working directory)'), _('NAME')), + ('', 'web-conf', '', + _('name of the hgweb config file (see "hg help hgweb")'), _('FILE')), + ('', 'webdir-conf', '', _('name of the hgweb config file (DEPRECATED)'), + _('FILE')), + ('', 'pid-file', '', _('name of file to write process ID to'), _('FILE')), + ('', 'stdio', None, _('for remote clients')), + ('', 'cmdserver', '', _('for remote clients'), _('MODE')), + ('t', 'templates', '', _('web templates to use'), _('TEMPLATE')), + ('', 'style', '', _('template style to use'), _('STYLE')), + ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')), + ('', 'certificate', '', _('SSL certificate file'), _('FILE'))], + _('[OPTION]...')) +def serve(ui, repo, **opts): + """start stand-alone webserver + + Start a local HTTP repository browser and pull server. You can use + this for ad-hoc sharing and browsing of repositories. It is + recommended to use a real web server to serve a repository for + longer periods of time. + + Please note that the server does not implement access control. + This means that, by default, anybody can read from the server and + nobody can write to it by default. Set the ``web.allow_push`` + option to ``*`` to allow everybody to push to the server. You + should use a real web server if you need to authenticate users. + + By default, the server logs accesses to stdout and errors to + stderr. Use the -A/--accesslog and -E/--errorlog options to log to + files. + + To have the server choose a free port number to listen on, specify + a port number of 0; in this case, the server will print the port + number it uses. + + Returns 0 on success. + """ + + if opts["stdio"] and opts["cmdserver"]: + raise util.Abort(_("cannot use --stdio with --cmdserver")) + + def checkrepo(): + if repo is None: + raise error.RepoError(_("There is no Mercurial repository here" + " (.hg not found)")) + + if opts["stdio"]: + checkrepo() + s = sshserver.sshserver(ui, repo) + s.serve_forever() + + if opts["cmdserver"]: + checkrepo() + s = commandserver.server(ui, repo, opts["cmdserver"]) + return s.serve() + + # this way we can check if something was given in the command-line + if opts.get('port'): + opts['port'] = util.getport(opts.get('port')) + + baseui = repo and repo.baseui or ui + optlist = ("name templates style address port prefix ipv6" + " accesslog errorlog certificate encoding") + for o in optlist.split(): + val = opts.get(o, '') + if val in (None, ''): # should check against default options instead + continue + baseui.setconfig("web", o, val) + if repo and repo.ui != baseui: + repo.ui.setconfig("web", o, val) + + o = opts.get('web_conf') or opts.get('webdir_conf') + if not o: + if not repo: + raise error.RepoError(_("There is no Mercurial repository" + " here (.hg not found)")) + o = repo.root + + app = hgweb.hgweb(o, baseui=ui) + + class service(object): + def init(self): + util.setsignalhandler() + self.httpd = hgweb.server.create_server(ui, app) + + if opts['port'] and not ui.verbose: + return + + if self.httpd.prefix: + prefix = self.httpd.prefix.strip('/') + '/' + else: + prefix = '' + + port = ':%d' % self.httpd.port + if port == ':80': + port = '' + + bindaddr = self.httpd.addr + if bindaddr == '0.0.0.0': + bindaddr = '*' + elif ':' in bindaddr: # IPv6 + bindaddr = '[%s]' % bindaddr + + fqaddr = self.httpd.fqaddr + if ':' in fqaddr: + fqaddr = '[%s]' % fqaddr + if opts['port']: + write = ui.status + else: + write = ui.write + write(_('listening at http://%s%s/%s (bound to %s:%d)\n') % + (fqaddr, port, prefix, bindaddr, self.httpd.port)) + + def run(self): + self.httpd.serve_forever() + + service = service() + + cmdutil.service(opts, initfn=service.init, runfn=service.run) + +@command('showconfig|debugconfig', + [('u', 'untrusted', None, _('show untrusted configuration options'))], + _('[-u] [NAME]...')) +def showconfig(ui, repo, *values, **opts): + """show combined config settings from all hgrc files + + With no arguments, print names and values of all config items. + + With one argument of the form section.name, print just the value + of that config item. + + With multiple arguments, print names and values of all config + items with matching section names. + + With --debug, the source (filename and line number) is printed + for each config item. + + Returns 0 on success. + """ + + for f in scmutil.rcpath(): + ui.debug('read config from: %s\n' % f) + untrusted = bool(opts.get('untrusted')) + if values: + sections = [v for v in values if '.' not in v] + items = [v for v in values if '.' in v] + if len(items) > 1 or items and sections: + raise util.Abort(_('only one config item permitted')) + for section, name, value in ui.walkconfig(untrusted=untrusted): + value = str(value).replace('\n', '\\n') + sectname = section + '.' + name + if values: + for v in values: + if v == section: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + elif v == sectname: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write(value, '\n') + else: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + +@command('^status|st', + [('A', 'all', None, _('show status of all files')), + ('m', 'modified', None, _('show only modified files')), + ('a', 'added', None, _('show only added files')), + ('r', 'removed', None, _('show only removed files')), + ('d', 'deleted', None, _('show only deleted (but tracked) files')), + ('c', 'clean', None, _('show only files without changes')), + ('u', 'unknown', None, _('show only unknown (not tracked) files')), + ('i', 'ignored', None, _('show only ignored files')), + ('n', 'no-status', None, _('hide status prefix')), + ('C', 'copies', None, _('show source of copied files')), + ('0', 'print0', None, _('end filenames with NUL, for use with xargs')), + ('', 'rev', [], _('show difference from revision'), _('REV')), + ('', 'change', '', _('list the changed files of a revision'), _('REV')), + ] + walkopts + subrepoopts, + _('[OPTION]... [FILE]...')) +def status(ui, repo, *pats, **opts): + """show changed files in the working directory + + Show status of files in the repository. If names are given, only + files that match are shown. Files that are clean or ignored or + the source of a copy/move operation, are not listed unless + -c/--clean, -i/--ignored, -C/--copies or -A/--all are given. + Unless options described with "show only ..." are given, the + options -mardu are used. + + Option -q/--quiet hides untracked (unknown and ignored) files + unless explicitly requested with -u/--unknown or -i/--ignored. + + .. note:: + status may appear to disagree with diff if permissions have + changed or a merge has occurred. The standard diff format does + not report permission changes and diff only reports changes + relative to one merge parent. + + If one revision is given, it is used as the base revision. + If two revisions are given, the differences between them are + shown. The --change option can also be used as a shortcut to list + the changed files of a revision from its first parent. + + The codes used to show the status of files are:: + + M = modified + A = added + R = removed + C = clean + ! = missing (deleted by non-hg command, but still tracked) + ? = not tracked + I = ignored + = origin of the previous file listed as A (added) + + .. container:: verbose + + Examples: + + - show changes in the working directory relative to a changeset: + + hg status --rev 9353 + + - show all changes including copies in an existing changeset:: + + hg status --copies --change 9353 + + - get a NUL separated list of added files, suitable for xargs:: + + hg status -an0 + + Returns 0 on success. + """ + + revs = opts.get('rev') + change = opts.get('change') + + if revs and change: + msg = _('cannot specify --rev and --change at the same time') + raise util.Abort(msg) + elif change: + node2 = repo.lookup(change) + node1 = repo[node2].p1().node() + else: + node1, node2 = scmutil.revpair(repo, revs) + + cwd = (pats and repo.getcwd()) or '' + end = opts.get('print0') and '\0' or '\n' + copy = {} + states = 'modified added removed deleted unknown ignored clean'.split() + show = [k for k in states if opts.get(k)] + if opts.get('all'): + show += ui.quiet and (states[:4] + ['clean']) or states + if not show: + show = ui.quiet and states[:4] or states[:5] + + stat = repo.status(node1, node2, scmutil.match(repo[node2], pats, opts), + 'ignored' in show, 'clean' in show, 'unknown' in show, + opts.get('subrepos')) + changestates = zip(states, 'MAR!?IC', stat) + + if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'): + ctxn = repo[nullid] + ctx1 = repo[node1] + ctx2 = repo[node2] + added = stat[1] + if node2 is None: + added = stat[0] + stat[1] # merged? + + for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems(): + if k in added: + copy[k] = v + elif v in added: + copy[v] = k + + for state, char, files in changestates: + if state in show: + format = "%s %%s%s" % (char, end) + if opts.get('no_status'): + format = "%%s%s" % end + + for f in files: + ui.write(format % repo.pathto(f, cwd), + label='status.' + state) + if f in copy: + ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end), + label='status.copied') + +@command('^summary|sum', + [('', 'remote', None, _('check for push and pull'))], '[--remote]') +def summary(ui, repo, **opts): + """summarize working directory state + + This generates a brief summary of the working directory state, + including parents, branch, commit status, and available updates. + + With the --remote option, this will check the default paths for + incoming and outgoing changes. This can be time-consuming. + + Returns 0 on success. + """ + + ctx = repo[None] + parents = ctx.parents() + pnode = parents[0].node() + marks = [] + + for p in parents: + # label with log.changeset (instead of log.parent) since this + # shows a working directory parent *changeset*: + ui.write(_('parent: %d:%s ') % (p.rev(), str(p)), + label='log.changeset') + ui.write(' '.join(p.tags()), label='log.tag') + if p.bookmarks(): + marks.extend(p.bookmarks()) + if p.rev() == -1: + if not len(repo): + ui.write(_(' (empty repository)')) + else: + ui.write(_(' (no revision checked out)')) + ui.write('\n') + if p.description(): + ui.status(' ' + p.description().splitlines()[0].strip() + '\n', + label='log.summary') + + branch = ctx.branch() + bheads = repo.branchheads(branch) + m = _('branch: %s\n') % branch + if branch != 'default': + ui.write(m, label='log.branch') + else: + ui.status(m, label='log.branch') + + if marks: + current = repo._bookmarkcurrent + ui.write(_('bookmarks:'), label='log.bookmark') + if current is not None: + try: + marks.remove(current) + ui.write(' *' + current, label='bookmarks.current') + except ValueError: + # current bookmark not in parent ctx marks + pass + for m in marks: + ui.write(' ' + m, label='log.bookmark') + ui.write('\n', label='log.bookmark') + + st = list(repo.status(unknown=True))[:6] + + c = repo.dirstate.copies() + copied, renamed = [], [] + for d, s in c.iteritems(): + if s in st[2]: + st[2].remove(s) + renamed.append(d) + else: + copied.append(d) + if d in st[1]: + st[1].remove(d) + st.insert(3, renamed) + st.insert(4, copied) + + ms = mergemod.mergestate(repo) + st.append([f for f in ms if ms[f] == 'u']) + + subs = [s for s in ctx.substate if ctx.sub(s).dirty()] + st.append(subs) + + labels = [ui.label(_('%d modified'), 'status.modified'), + ui.label(_('%d added'), 'status.added'), + ui.label(_('%d removed'), 'status.removed'), + ui.label(_('%d renamed'), 'status.copied'), + ui.label(_('%d copied'), 'status.copied'), + ui.label(_('%d deleted'), 'status.deleted'), + ui.label(_('%d unknown'), 'status.unknown'), + ui.label(_('%d ignored'), 'status.ignored'), + ui.label(_('%d unresolved'), 'resolve.unresolved'), + ui.label(_('%d subrepos'), 'status.modified')] + t = [] + for s, l in zip(st, labels): + if s: + t.append(l % len(s)) + + t = ', '.join(t) + cleanworkdir = False + + if len(parents) > 1: + t += _(' (merge)') + elif branch != parents[0].branch(): + t += _(' (new branch)') + elif (parents[0].extra().get('close') and + pnode in repo.branchheads(branch, closed=True)): + t += _(' (head closed)') + elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]): + t += _(' (clean)') + cleanworkdir = True + elif pnode not in bheads: + t += _(' (new branch head)') + + if cleanworkdir: + ui.status(_('commit: %s\n') % t.strip()) + else: + ui.write(_('commit: %s\n') % t.strip()) + + # all ancestors of branch heads - all ancestors of parent = new csets + new = [0] * len(repo) + cl = repo.changelog + for a in [cl.rev(n) for n in bheads]: + new[a] = 1 + for a in cl.ancestors(*[cl.rev(n) for n in bheads]): + new[a] = 1 + for a in [p.rev() for p in parents]: + if a >= 0: + new[a] = 0 + for a in cl.ancestors(*[p.rev() for p in parents]): + new[a] = 0 + new = sum(new) + + if new == 0: + ui.status(_('update: (current)\n')) + elif pnode not in bheads: + ui.write(_('update: %d new changesets (update)\n') % new) + else: + ui.write(_('update: %d new changesets, %d branch heads (merge)\n') % + (new, len(bheads))) + + if opts.get('remote'): + t = [] + source, branches = hg.parseurl(ui.expandpath('default')) + other = hg.peer(repo, {}, source) + revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) + ui.debug('comparing with %s\n' % util.hidepassword(source)) + repo.ui.pushbuffer() + commoninc = discovery.findcommonincoming(repo, other) + _common, incoming, _rheads = commoninc + repo.ui.popbuffer() + if incoming: + t.append(_('1 or more incoming')) + + dest, branches = hg.parseurl(ui.expandpath('default-push', 'default')) + revs, checkout = hg.addbranchrevs(repo, repo, branches, None) + if source != dest: + other = hg.peer(repo, {}, dest) + commoninc = None + ui.debug('comparing with %s\n' % util.hidepassword(dest)) + repo.ui.pushbuffer() + common, outheads = discovery.findcommonoutgoing(repo, other, + commoninc=commoninc) + repo.ui.popbuffer() + o = repo.changelog.findmissing(common=common, heads=outheads) + if o: + t.append(_('%d outgoing') % len(o)) + if 'bookmarks' in other.listkeys('namespaces'): + lmarks = repo.listkeys('bookmarks') + rmarks = other.listkeys('bookmarks') + diff = set(rmarks) - set(lmarks) + if len(diff) > 0: + t.append(_('%d incoming bookmarks') % len(diff)) + diff = set(lmarks) - set(rmarks) + if len(diff) > 0: + t.append(_('%d outgoing bookmarks') % len(diff)) + + if t: + ui.write(_('remote: %s\n') % (', '.join(t))) + else: + ui.status(_('remote: (synced)\n')) + +@command('tag', + [('f', 'force', None, _('force tag')), + ('l', 'local', None, _('make the tag local')), + ('r', 'rev', '', _('revision to tag'), _('REV')), + ('', 'remove', None, _('remove a tag')), + # -l/--local is already there, commitopts cannot be used + ('e', 'edit', None, _('edit commit message')), + ('m', 'message', '', _('use as commit message'), _('TEXT')), + ] + commitopts2, + _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')) +def tag(ui, repo, name1, *names, **opts): + """add one or more tags for the current or given revision + + Name a particular revision using . + + Tags are used to name particular revisions of the repository and are + very useful to compare different revisions, to go back to significant + earlier versions or to mark branch points as releases, etc. Changing + an existing tag is normally disallowed; use -f/--force to override. + + If no revision is given, the parent of the working directory is + used, or tip if no revision is checked out. + + To facilitate version control, distribution, and merging of tags, + they are stored as a file named ".hgtags" which is managed similarly + to other project files and can be hand-edited if necessary. This + also means that tagging creates a new commit. The file + ".hg/localtags" is used for local tags (not shared among + repositories). + + Tag commits are usually made at the head of a branch. If the parent + of the working directory is not a branch head, :hg:`tag` aborts; use + -f/--force to force the tag commit to be based on a non-head + changeset. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Since tag names have priority over branch names during revision + lookup, using an existing branch name as a tag name is discouraged. + + Returns 0 on success. + """ + + rev_ = "." + names = [t.strip() for t in (name1,) + names] + if len(names) != len(set(names)): + raise util.Abort(_('tag names must be unique')) + for n in names: + if n in ['tip', '.', 'null']: + raise util.Abort(_("the name '%s' is reserved") % n) + if not n: + raise util.Abort(_('tag names cannot consist entirely of whitespace')) + if opts.get('rev') and opts.get('remove'): + raise util.Abort(_("--rev and --remove are incompatible")) + if opts.get('rev'): + rev_ = opts['rev'] + message = opts.get('message') + if opts.get('remove'): + expectedtype = opts.get('local') and 'local' or 'global' + for n in names: + if not repo.tagtype(n): + raise util.Abort(_("tag '%s' does not exist") % n) + if repo.tagtype(n) != expectedtype: + if expectedtype == 'global': + raise util.Abort(_("tag '%s' is not a global tag") % n) + else: + raise util.Abort(_("tag '%s' is not a local tag") % n) + rev_ = nullid + if not message: + # we don't translate commit messages + message = 'Removed tag %s' % ', '.join(names) + elif not opts.get('force'): + for n in names: + if n in repo.tags(): + raise util.Abort(_("tag '%s' already exists " + "(use -f to force)") % n) + if not opts.get('local'): + p1, p2 = repo.dirstate.parents() + if p2 != nullid: + raise util.Abort(_('uncommitted merge')) + bheads = repo.branchheads() + if not opts.get('force') and bheads and p1 not in bheads: + raise util.Abort(_('not at a branch head (use -f to force)')) + r = scmutil.revsingle(repo, rev_).node() + + if not message: + # we don't translate commit messages + message = ('Added tag %s for changeset %s' % + (', '.join(names), short(r))) + + date = opts.get('date') + if date: + date = util.parsedate(date) + + if opts.get('edit'): + message = ui.edit(message, ui.username()) + + repo.tag(names, r, message, opts.get('local'), opts.get('user'), date) + +@command('tags', [], '') +def tags(ui, repo): + """list repository tags + + This lists both regular and local tags. When the -v/--verbose + switch is used, a third column "local" is printed for local tags. + + Returns 0 on success. + """ + + hexfunc = ui.debugflag and hex or short + tagtype = "" + + for t, n in reversed(repo.tagslist()): + if ui.quiet: + ui.write("%s\n" % t, label='tags.normal') + continue + + hn = hexfunc(n) + r = "%5d:%s" % (repo.changelog.rev(n), hn) + rev = ui.label(r, 'log.changeset') + spaces = " " * (30 - encoding.colwidth(t)) + + tag = ui.label(t, 'tags.normal') + if ui.verbose: + if repo.tagtype(t) == 'local': + tagtype = " local" + tag = ui.label(t, 'tags.local') + else: + tagtype = "" + ui.write("%s%s %s%s\n" % (tag, spaces, rev, tagtype)) + +@command('tip', + [('p', 'patch', None, _('show patch')), + ('g', 'git', None, _('use git extended diff format')), + ] + templateopts, + _('[-p] [-g]')) +def tip(ui, repo, **opts): + """show the tip revision + + The tip revision (usually just called the tip) is the changeset + most recently added to the repository (and therefore the most + recently changed head). + + If you have just made a commit, that commit will be the tip. If + you have just pulled changes from another repository, the tip of + that repository becomes the current tip. The "tip" tag is special + and cannot be renamed or assigned to a different changeset. + + Returns 0 on success. + """ + displayer = cmdutil.show_changeset(ui, repo, opts) + displayer.show(repo[len(repo) - 1]) + displayer.close() + +@command('unbundle', + [('u', 'update', None, + _('update to new branch head if changesets were unbundled'))], + _('[-u] FILE...')) +def unbundle(ui, repo, fname1, *fnames, **opts): + """apply one or more changegroup files + + Apply one or more compressed changegroup files generated by the + bundle command. + + Returns 0 on success, 1 if an update has unresolved files. + """ + fnames = (fname1,) + fnames + + lock = repo.lock() + wc = repo['.'] + try: + for fname in fnames: + f = url.open(ui, fname) + gen = changegroup.readbundle(f, fname) + modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname, + lock=lock) + bookmarks.updatecurrentbookmark(repo, wc.node(), wc.branch()) + finally: + lock.release() + return postincoming(ui, repo, modheads, opts.get('update'), None) + +@command('^update|up|checkout|co', + [('C', 'clean', None, _('discard uncommitted changes (no backup)')), + ('c', 'check', None, + _('update across branches if no uncommitted changes')), + ('d', 'date', '', _('tipmost revision matching date'), _('DATE')), + ('r', 'rev', '', _('revision'), _('REV'))], + _('[-c] [-C] [-d DATE] [[-r] REV]')) +def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False): + """update working directory (or switch revisions) + + Update the repository's working directory to the specified + changeset. If no changeset is specified, update to the tip of the + current named branch. + + If the changeset is not a descendant of the working directory's + parent, the update is aborted. With the -c/--check option, the + working directory is checked for uncommitted changes; if none are + found, the working directory is updated to the specified + changeset. + + Update sets the working directory's parent revison to the specified + changeset (see :hg:`help parents`). + + The following rules apply when the working directory contains + uncommitted changes: + + 1. If neither -c/--check nor -C/--clean is specified, and if + the requested changeset is an ancestor or descendant of + the working directory's parent, the uncommitted changes + are merged into the requested changeset and the merged + result is left uncommitted. If the requested changeset is + not an ancestor or descendant (that is, it is on another + branch), the update is aborted and the uncommitted changes + are preserved. + + 2. With the -c/--check option, the update is aborted and the + uncommitted changes are preserved. + + 3. With the -C/--clean option, uncommitted changes are discarded and + the working directory is updated to the requested changeset. + + Use null as the changeset to remove the working directory (like + :hg:`clone -U`). + + If you want to revert just one file to an older revision, use + :hg:`revert [-r REV] NAME`. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success, 1 if there are unresolved files. + """ + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if rev is None or rev == '': + rev = node + + # if we defined a bookmark, we have to remember the original bookmark name + brev = rev + rev = scmutil.revsingle(repo, rev, rev).rev() + + if check and clean: + raise util.Abort(_("cannot specify both -c/--check and -C/--clean")) + + if check: + # we could use dirty() but we can ignore merge and branch trivia + c = repo[None] + if c.modified() or c.added() or c.removed(): + raise util.Abort(_("uncommitted local changes")) + + if date: + if rev is not None: + raise util.Abort(_("you can't specify a revision and a date")) + rev = cmdutil.finddate(ui, repo, date) + + if clean or check: + ret = hg.clean(repo, rev) + else: + ret = hg.update(repo, rev) + + if brev in repo._bookmarks: + bookmarks.setcurrent(repo, brev) + + return ret + +@command('verify', []) +def verify(ui, repo): + """verify the integrity of the repository + + Verify the integrity of the current repository. + + This will perform an extensive check of the repository's + integrity, validating the hashes and checksums of each entry in + the changelog, manifest, and tracked files, as well as the + integrity of their crosslinks and indices. + + Returns 0 on success, 1 if errors are encountered. + """ + return hg.verify(repo) + +@command('version', []) +def version_(ui): + """output version and copyright information""" + ui.write(_("Mercurial Distributed SCM (version %s)\n") + % util.version()) + ui.status(_( + "(see http://mercurial.selenic.com for more information)\n" + "\nCopyright (C) 2005-2011 Matt Mackall and others\n" + "This is free software; see the source for copying conditions. " + "There is NO\nwarranty; " + "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" + )) + +norepo = ("clone init version help debugcommands debugcomplete" + " debugdate debuginstall debugfsinfo debugpushkey debugwireargs" + " debugknown debuggetbundle debugbundle") +optionalrepo = ("identify paths serve showconfig debugancestor debugdag" + " debugdata debugindex debugindexdot debugrevlog") diff --git a/websdk/mercurial/commandserver.py b/websdk/mercurial/commandserver.py new file mode 100644 index 0000000..27d589d --- /dev/null +++ b/websdk/mercurial/commandserver.py @@ -0,0 +1,237 @@ +# commandserver.py - communicate with Mercurial's API over a pipe +# +# Copyright Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import struct +import sys, os +import dispatch, encoding, util + +logfile = None + +def log(*args): + if not logfile: + return + + for a in args: + logfile.write(str(a)) + + logfile.flush() + +class channeledoutput(object): + """ + Write data from in_ to out in the following format: + + data length (unsigned int), + data + """ + def __init__(self, in_, out, channel): + self.in_ = in_ + self.out = out + self.channel = channel + + def write(self, data): + if not data: + return + self.out.write(struct.pack('>cI', self.channel, len(data))) + self.out.write(data) + self.out.flush() + + def __getattr__(self, attr): + if attr in ('isatty', 'fileno'): + raise AttributeError, attr + return getattr(self.in_, attr) + +class channeledinput(object): + """ + Read data from in_. + + Requests for input are written to out in the following format: + channel identifier - 'I' for plain input, 'L' line based (1 byte) + how many bytes to send at most (unsigned int), + + The client replies with: + data length (unsigned int), 0 meaning EOF + data + """ + + maxchunksize = 4 * 1024 + + def __init__(self, in_, out, channel): + self.in_ = in_ + self.out = out + self.channel = channel + + def read(self, size=-1): + if size < 0: + # if we need to consume all the clients input, ask for 4k chunks + # so the pipe doesn't fill up risking a deadlock + size = self.maxchunksize + s = self._read(size, self.channel) + buf = s + while s: + s = self._read(size, self.channel) + buf += s + + return buf + else: + return self._read(size, self.channel) + + def _read(self, size, channel): + if not size: + return '' + assert size > 0 + + # tell the client we need at most size bytes + self.out.write(struct.pack('>cI', channel, size)) + self.out.flush() + + length = self.in_.read(4) + length = struct.unpack('>I', length)[0] + if not length: + return '' + else: + return self.in_.read(length) + + def readline(self, size=-1): + if size < 0: + size = self.maxchunksize + s = self._read(size, 'L') + buf = s + # keep asking for more until there's either no more or + # we got a full line + while s and s[-1] != '\n': + s = self._read(size, 'L') + buf += s + + return buf + else: + return self._read(size, 'L') + + def __iter__(self): + return self + + def next(self): + l = self.readline() + if not l: + raise StopIteration + return l + + def __getattr__(self, attr): + if attr in ('isatty', 'fileno'): + raise AttributeError, attr + return getattr(self.in_, attr) + +class server(object): + """ + Listens for commands on stdin, runs them and writes the output on a channel + based stream to stdout. + """ + def __init__(self, ui, repo, mode): + self.cwd = os.getcwd() + + logpath = ui.config("cmdserver", "log", None) + if logpath: + global logfile + if logpath == '-': + # write log on a special 'd'ebug channel + logfile = channeledoutput(sys.stdout, sys.stdout, 'd') + else: + logfile = open(logpath, 'a') + + # the ui here is really the repo ui so take its baseui so we don't end up + # with its local configuration + self.ui = repo.baseui + self.repo = repo + self.repoui = repo.ui + + if mode == 'pipe': + self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e') + self.cout = channeledoutput(sys.stdout, sys.stdout, 'o') + self.cin = channeledinput(sys.stdin, sys.stdout, 'I') + self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r') + + self.client = sys.stdin + else: + raise util.Abort(_('unknown mode %s') % mode) + + def _read(self, size): + if not size: + return '' + + data = self.client.read(size) + + # is the other end closed? + if not data: + raise EOFError() + + return data + + def runcommand(self): + """ reads a list of \0 terminated arguments, executes + and writes the return code to the result channel """ + + length = struct.unpack('>I', self._read(4))[0] + if not length: + args = [] + else: + args = self._read(length).split('\0') + + # copy the uis so changes (e.g. --config or --verbose) don't + # persist between requests + copiedui = self.ui.copy() + self.repo.baseui = copiedui + self.repo.ui = self.repo.dirstate._ui = self.repoui.copy() + self.repo.invalidate() + + req = dispatch.request(args[:], copiedui, self.repo, self.cin, + self.cout, self.cerr) + + ret = dispatch.dispatch(req) or 0 # might return None + + # restore old cwd + if '--cwd' in args: + os.chdir(self.cwd) + + self.cresult.write(struct.pack('>i', int(ret))) + + def getencoding(self): + """ writes the current encoding to the result channel """ + self.cresult.write(encoding.encoding) + + def serveone(self): + cmd = self.client.readline()[:-1] + if cmd: + handler = self.capabilities.get(cmd) + if handler: + handler(self) + else: + # clients are expected to check what commands are supported by + # looking at the servers capabilities + raise util.Abort(_('unknown command %s') % cmd) + + return cmd != '' + + capabilities = {'runcommand' : runcommand, + 'getencoding' : getencoding} + + def serve(self): + hellomsg = 'capabilities: ' + ' '.join(self.capabilities.keys()) + hellomsg += '\n' + hellomsg += 'encoding: ' + encoding.encoding + + # write the hello msg in -one- chunk + self.cout.write(hellomsg) + + try: + while self.serveone(): + pass + except EOFError: + # we'll get here if the client disconnected while we were reading + # its request + return 1 + + return 0 diff --git a/websdk/mercurial/config.py b/websdk/mercurial/config.py index 25ec40e..2556ef1 120000..100644 --- a/websdk/mercurial/config.py +++ b/websdk/mercurial/config.py @@ -1 +1,149 @@ -/usr/share/pyshared/mercurial/config.py \ No newline at end of file +# config.py - configuration parsing for Mercurial +# +# Copyright 2009 Matt Mackall and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import error, util +import re, os, errno + +class sortdict(dict): + 'a simple sorted dictionary' + def __init__(self, data=None): + self._list = [] + if data: + self.update(data) + def copy(self): + return sortdict(self) + def __setitem__(self, key, val): + if key in self: + self._list.remove(key) + self._list.append(key) + dict.__setitem__(self, key, val) + def __iter__(self): + return self._list.__iter__() + def update(self, src): + for k in src: + self[k] = src[k] + def clear(self): + dict.clear(self) + self._list = [] + def items(self): + return [(k, self[k]) for k in self._list] + def __delitem__(self, key): + dict.__delitem__(self, key) + self._list.remove(key) + +class config(object): + def __init__(self, data=None): + self._data = {} + self._source = {} + if data: + for k in data._data: + self._data[k] = data[k].copy() + self._source = data._source.copy() + def copy(self): + return config(self) + def __contains__(self, section): + return section in self._data + def __getitem__(self, section): + return self._data.get(section, {}) + def __iter__(self): + for d in self.sections(): + yield d + def update(self, src): + for s in src: + if s not in self: + self._data[s] = sortdict() + self._data[s].update(src._data[s]) + self._source.update(src._source) + def get(self, section, item, default=None): + return self._data.get(section, {}).get(item, default) + def source(self, section, item): + return self._source.get((section, item), "") + def sections(self): + return sorted(self._data.keys()) + def items(self, section): + return self._data.get(section, {}).items() + def set(self, section, item, value, source=""): + if section not in self: + self._data[section] = sortdict() + self._data[section][item] = value + self._source[(section, item)] = source + + def parse(self, src, data, sections=None, remap=None, include=None): + sectionre = re.compile(r'\[([^\[]+)\]') + itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)') + contre = re.compile(r'\s+(\S|\S.*\S)\s*$') + emptyre = re.compile(r'(;|#|\s*$)') + commentre = re.compile(r'(;|#)') + unsetre = re.compile(r'%unset\s+(\S+)') + includere = re.compile(r'%include\s+(\S|\S.*\S)\s*$') + section = "" + item = None + line = 0 + cont = False + + for l in data.splitlines(True): + line += 1 + if cont: + if commentre.match(l): + continue + m = contre.match(l) + if m: + if sections and section not in sections: + continue + v = self.get(section, item) + "\n" + m.group(1) + self.set(section, item, v, "%s:%d" % (src, line)) + continue + item = None + cont = False + m = includere.match(l) + if m: + inc = util.expandpath(m.group(1)) + base = os.path.dirname(src) + inc = os.path.normpath(os.path.join(base, inc)) + if include: + try: + include(inc, remap=remap, sections=sections) + except IOError, inst: + if inst.errno != errno.ENOENT: + raise error.ParseError(_("cannot include %s (%s)") + % (inc, inst.strerror), + "%s:%s" % (src, line)) + continue + if emptyre.match(l): + continue + m = sectionre.match(l) + if m: + section = m.group(1) + if remap: + section = remap.get(section, section) + if section not in self: + self._data[section] = sortdict() + continue + m = itemre.match(l) + if m: + item = m.group(1) + cont = True + if sections and section not in sections: + continue + self.set(section, item, m.group(2), "%s:%d" % (src, line)) + continue + m = unsetre.match(l) + if m: + name = m.group(1) + if sections and section not in sections: + continue + if self.get(section, name) is not None: + del self._data[section][name] + continue + + raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line))) + + def read(self, path, fp=None, sections=None, remap=None): + if not fp: + fp = util.posixfile(path) + self.parse(path, fp.read(), sections, remap, self.read) diff --git a/websdk/mercurial/context.py b/websdk/mercurial/context.py index 8d686b7..58f4385 120000..100644 --- a/websdk/mercurial/context.py +++ b/websdk/mercurial/context.py @@ -1 +1,1137 @@ -/usr/share/pyshared/mercurial/context.py \ No newline at end of file +# context.py - changeset and file context objects for mercurial +# +# Copyright 2006, 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, nullrev, short, hex +from i18n import _ +import ancestor, bdiff, error, util, scmutil, subrepo, patch, encoding +import match as matchmod +import os, errno, stat + +propertycache = util.propertycache + +class changectx(object): + """A changecontext object makes access to data related to a particular + changeset convenient.""" + def __init__(self, repo, changeid=''): + """changeid is a revision number, node, or tag""" + if changeid == '': + changeid = '.' + self._repo = repo + if isinstance(changeid, (long, int)): + self._rev = changeid + self._node = self._repo.changelog.node(changeid) + else: + self._node = self._repo.lookup(changeid) + self._rev = self._repo.changelog.rev(self._node) + + def __str__(self): + return short(self.node()) + + def __int__(self): + return self.rev() + + def __repr__(self): + return "" % str(self) + + def __hash__(self): + try: + return hash(self._rev) + except AttributeError: + return id(self) + + def __eq__(self, other): + try: + return self._rev == other._rev + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __nonzero__(self): + return self._rev != nullrev + + @propertycache + def _changeset(self): + return self._repo.changelog.read(self.node()) + + @propertycache + def _manifest(self): + return self._repo.manifest.read(self._changeset[0]) + + @propertycache + def _manifestdelta(self): + return self._repo.manifest.readdelta(self._changeset[0]) + + @propertycache + def _parents(self): + p = self._repo.changelog.parentrevs(self._rev) + if p[1] == nullrev: + p = p[:-1] + return [changectx(self._repo, x) for x in p] + + @propertycache + def substate(self): + return subrepo.state(self, self._repo.ui) + + def __contains__(self, key): + return key in self._manifest + + def __getitem__(self, key): + return self.filectx(key) + + def __iter__(self): + for f in sorted(self._manifest): + yield f + + def changeset(self): + return self._changeset + def manifest(self): + return self._manifest + def manifestnode(self): + return self._changeset[0] + + def rev(self): + return self._rev + def node(self): + return self._node + def hex(self): + return hex(self._node) + def user(self): + return self._changeset[1] + def date(self): + return self._changeset[2] + def files(self): + return self._changeset[3] + def description(self): + return self._changeset[4] + def branch(self): + return encoding.tolocal(self._changeset[5].get("branch")) + def extra(self): + return self._changeset[5] + def tags(self): + return self._repo.nodetags(self._node) + def bookmarks(self): + return self._repo.nodebookmarks(self._node) + def hidden(self): + return self._rev in self._repo.changelog.hiddenrevs + + def parents(self): + """return contexts for each parent changeset""" + return self._parents + + def p1(self): + return self._parents[0] + + def p2(self): + if len(self._parents) == 2: + return self._parents[1] + return changectx(self._repo, -1) + + def children(self): + """return contexts for each child changeset""" + c = self._repo.changelog.children(self._node) + return [changectx(self._repo, x) for x in c] + + def ancestors(self): + for a in self._repo.changelog.ancestors(self._rev): + yield changectx(self._repo, a) + + def descendants(self): + for d in self._repo.changelog.descendants(self._rev): + yield changectx(self._repo, d) + + def _fileinfo(self, path): + if '_manifest' in self.__dict__: + try: + return self._manifest[path], self._manifest.flags(path) + except KeyError: + raise error.LookupError(self._node, path, + _('not found in manifest')) + if '_manifestdelta' in self.__dict__ or path in self.files(): + if path in self._manifestdelta: + return self._manifestdelta[path], self._manifestdelta.flags(path) + node, flag = self._repo.manifest.find(self._changeset[0], path) + if not node: + raise error.LookupError(self._node, path, + _('not found in manifest')) + + return node, flag + + def filenode(self, path): + return self._fileinfo(path)[0] + + def flags(self, path): + try: + return self._fileinfo(path)[1] + except error.LookupError: + return '' + + def filectx(self, path, fileid=None, filelog=None): + """get a file context from this changeset""" + if fileid is None: + fileid = self.filenode(path) + return filectx(self._repo, path, fileid=fileid, + changectx=self, filelog=filelog) + + def ancestor(self, c2): + """ + return the ancestor context of self and c2 + """ + # deal with workingctxs + n2 = c2._node + if n2 is None: + n2 = c2._parents[0]._node + n = self._repo.changelog.ancestor(self._node, n2) + return changectx(self._repo, n) + + def walk(self, match): + fset = set(match.files()) + # for dirstate.walk, files=['.'] means "walk the whole tree". + # follow that here, too + fset.discard('.') + for fn in self: + for ffn in fset: + # match if the file is the exact name or a directory + if ffn == fn or fn.startswith("%s/" % ffn): + fset.remove(ffn) + break + if match(fn): + yield fn + for fn in sorted(fset): + if match.bad(fn, _('no such file in rev %s') % self) and match(fn): + yield fn + + def sub(self, path): + return subrepo.subrepo(self, path) + + def match(self, pats=[], include=None, exclude=None, default='glob'): + r = self._repo + return matchmod.match(r.root, r.getcwd(), pats, + include, exclude, default, + auditor=r.auditor, ctx=self) + + def diff(self, ctx2=None, match=None, **opts): + """Returns a diff generator for the given contexts and matcher""" + if ctx2 is None: + ctx2 = self.p1() + if ctx2 is not None and not isinstance(ctx2, changectx): + ctx2 = self._repo[ctx2] + diffopts = patch.diffopts(self._repo.ui, opts) + return patch.diff(self._repo, ctx2.node(), self.node(), + match=match, opts=diffopts) + +class filectx(object): + """A filecontext object makes access to data related to a particular + filerevision convenient.""" + def __init__(self, repo, path, changeid=None, fileid=None, + filelog=None, changectx=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + self._repo = repo + self._path = path + + assert (changeid is not None + or fileid is not None + or changectx is not None), \ + ("bad args: changeid=%r, fileid=%r, changectx=%r" + % (changeid, fileid, changectx)) + + if filelog: + self._filelog = filelog + + if changeid is not None: + self._changeid = changeid + if changectx is not None: + self._changectx = changectx + if fileid is not None: + self._fileid = fileid + + @propertycache + def _changectx(self): + return changectx(self._repo, self._changeid) + + @propertycache + def _filelog(self): + return self._repo.file(self._path) + + @propertycache + def _changeid(self): + if '_changectx' in self.__dict__: + return self._changectx.rev() + else: + return self._filelog.linkrev(self._filerev) + + @propertycache + def _filenode(self): + if '_fileid' in self.__dict__: + return self._filelog.lookup(self._fileid) + else: + return self._changectx.filenode(self._path) + + @propertycache + def _filerev(self): + return self._filelog.rev(self._filenode) + + @propertycache + def _repopath(self): + return self._path + + def __nonzero__(self): + try: + self._filenode + return True + except error.LookupError: + # file is missing + return False + + def __str__(self): + return "%s@%s" % (self.path(), short(self.node())) + + def __repr__(self): + return "" % str(self) + + def __hash__(self): + try: + return hash((self._path, self._filenode)) + except AttributeError: + return id(self) + + def __eq__(self, other): + try: + return (self._path == other._path + and self._filenode == other._filenode) + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def filectx(self, fileid): + '''opens an arbitrary revision of the file without + opening a new filelog''' + return filectx(self._repo, self._path, fileid=fileid, + filelog=self._filelog) + + def filerev(self): + return self._filerev + def filenode(self): + return self._filenode + def flags(self): + return self._changectx.flags(self._path) + def filelog(self): + return self._filelog + + def rev(self): + if '_changectx' in self.__dict__: + return self._changectx.rev() + if '_changeid' in self.__dict__: + return self._changectx.rev() + return self._filelog.linkrev(self._filerev) + + def linkrev(self): + return self._filelog.linkrev(self._filerev) + def node(self): + return self._changectx.node() + def hex(self): + return hex(self.node()) + def user(self): + return self._changectx.user() + def date(self): + return self._changectx.date() + def files(self): + return self._changectx.files() + def description(self): + return self._changectx.description() + def branch(self): + return self._changectx.branch() + def extra(self): + return self._changectx.extra() + def manifest(self): + return self._changectx.manifest() + def changectx(self): + return self._changectx + + def data(self): + return self._filelog.read(self._filenode) + def path(self): + return self._path + def size(self): + return self._filelog.size(self._filerev) + + def cmp(self, fctx): + """compare with other file context + + returns True if different than fctx. + """ + if (fctx._filerev is None and self._repo._encodefilterpats + or self.size() == fctx.size()): + return self._filelog.cmp(self._filenode, fctx.data()) + + return True + + def renamed(self): + """check if file was actually renamed in this changeset revision + + If rename logged in file revision, we report copy for changeset only + if file revisions linkrev points back to the changeset in question + or both changeset parents contain different file revisions. + """ + + renamed = self._filelog.renamed(self._filenode) + if not renamed: + return renamed + + if self.rev() == self.linkrev(): + return renamed + + name = self.path() + fnode = self._filenode + for p in self._changectx.parents(): + try: + if fnode == p.filenode(name): + return None + except error.LookupError: + pass + return renamed + + def parents(self): + p = self._path + fl = self._filelog + pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)] + + r = self._filelog.renamed(self._filenode) + if r: + pl[0] = (r[0], r[1], None) + + return [filectx(self._repo, p, fileid=n, filelog=l) + for p, n, l in pl if n != nullid] + + def p1(self): + return self.parents()[0] + + def p2(self): + p = self.parents() + if len(p) == 2: + return p[1] + return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) + + def children(self): + # hard for renames + c = self._filelog.children(self._filenode) + return [filectx(self._repo, self._path, fileid=x, + filelog=self._filelog) for x in c] + + def annotate(self, follow=False, linenumber=None): + '''returns a list of tuples of (ctx, line) for each line + in the file, where ctx is the filectx of the node where + that line was last changed. + This returns tuples of ((ctx, linenumber), line) for each line, + if "linenumber" parameter is NOT "None". + In such tuples, linenumber means one at the first appearance + in the managed file. + To reduce annotation cost, + this returns fixed value(False is used) as linenumber, + if "linenumber" parameter is "False".''' + + def decorate_compat(text, rev): + return ([rev] * len(text.splitlines()), text) + + def without_linenumber(text, rev): + return ([(rev, False)] * len(text.splitlines()), text) + + def with_linenumber(text, rev): + size = len(text.splitlines()) + return ([(rev, i) for i in xrange(1, size + 1)], text) + + decorate = (((linenumber is None) and decorate_compat) or + (linenumber and with_linenumber) or + without_linenumber) + + def pair(parent, child): + for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]): + child[0][b1:b2] = parent[0][a1:a2] + return child + + getlog = util.lrucachefunc(lambda x: self._repo.file(x)) + def getctx(path, fileid): + log = path == self._path and self._filelog or getlog(path) + return filectx(self._repo, path, fileid=fileid, filelog=log) + getctx = util.lrucachefunc(getctx) + + def parents(f): + # we want to reuse filectx objects as much as possible + p = f._path + if f._filerev is None: # working dir + pl = [(n.path(), n.filerev()) for n in f.parents()] + else: + pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)] + + if follow: + r = f.renamed() + if r: + pl[0] = (r[0], getlog(r[0]).rev(r[1])) + + return [getctx(p, n) for p, n in pl if n != nullrev] + + # use linkrev to find the first changeset where self appeared + if self.rev() != self.linkrev(): + base = self.filectx(self.filerev()) + else: + base = self + + # This algorithm would prefer to be recursive, but Python is a + # bit recursion-hostile. Instead we do an iterative + # depth-first search. + + visit = [base] + hist = {} + pcache = {} + needed = {base: 1} + while visit: + f = visit[-1] + if f not in pcache: + pcache[f] = parents(f) + + ready = True + pl = pcache[f] + for p in pl: + if p not in hist: + ready = False + visit.append(p) + needed[p] = needed.get(p, 0) + 1 + if ready: + visit.pop() + curr = decorate(f.data(), f) + for p in pl: + curr = pair(hist[p], curr) + if needed[p] == 1: + del hist[p] + else: + needed[p] -= 1 + + hist[f] = curr + pcache[f] = [] + + return zip(hist[base][0], hist[base][1].splitlines(True)) + + def ancestor(self, fc2, actx=None): + """ + find the common ancestor file context, if any, of self, and fc2 + + If actx is given, it must be the changectx of the common ancestor + of self's and fc2's respective changesets. + """ + + if actx is None: + actx = self.changectx().ancestor(fc2.changectx()) + + # the trivial case: changesets are unrelated, files must be too + if not actx: + return None + + # the easy case: no (relevant) renames + if fc2.path() == self.path() and self.path() in actx: + return actx[self.path()] + acache = {} + + # prime the ancestor cache for the working directory + for c in (self, fc2): + if c._filerev is None: + pl = [(n.path(), n.filenode()) for n in c.parents()] + acache[(c._path, None)] = pl + + flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog} + def parents(vertex): + if vertex in acache: + return acache[vertex] + f, n = vertex + if f not in flcache: + flcache[f] = self._repo.file(f) + fl = flcache[f] + pl = [(f, p) for p in fl.parents(n) if p != nullid] + re = fl.renamed(n) + if re: + pl.append(re) + acache[vertex] = pl + return pl + + a, b = (self._path, self._filenode), (fc2._path, fc2._filenode) + v = ancestor.ancestor(a, b, parents) + if v: + f, n = v + return filectx(self._repo, f, fileid=n, filelog=flcache[f]) + + return None + + def ancestors(self): + visit = {} + c = self + while True: + for parent in c.parents(): + visit[(parent.rev(), parent.node())] = parent + if not visit: + break + c = visit.pop(max(visit)) + yield c + +class workingctx(changectx): + """A workingctx object makes access to data related to + the current working directory convenient. + date - any valid date string or (unixtime, offset), or None. + user - username string, or None. + extra - a dictionary of extra values, or None. + changes - a list of file lists as returned by localrepo.status() + or None to use the repository status. + """ + def __init__(self, repo, text="", user=None, date=None, extra=None, + changes=None): + self._repo = repo + self._rev = None + self._node = None + self._text = text + if date: + self._date = util.parsedate(date) + if user: + self._user = user + if changes: + self._status = list(changes[:4]) + self._unknown = changes[4] + self._ignored = changes[5] + self._clean = changes[6] + else: + self._unknown = None + self._ignored = None + self._clean = None + + self._extra = {} + if extra: + self._extra = extra.copy() + if 'branch' not in self._extra: + try: + branch = encoding.fromlocal(self._repo.dirstate.branch()) + except UnicodeDecodeError: + raise util.Abort(_('branch name not in UTF-8!')) + self._extra['branch'] = branch + if self._extra['branch'] == '': + self._extra['branch'] = 'default' + + def __str__(self): + return str(self._parents[0]) + "+" + + def __repr__(self): + return "" % str(self) + + def __nonzero__(self): + return True + + def __contains__(self, key): + return self._repo.dirstate[key] not in "?r" + + def _buildflagfunc(self): + # Create a fallback function for getting file flags when the + # filesystem doesn't support them + + copiesget = self._repo.dirstate.copies().get + + if len(self._parents) < 2: + # when we have one parent, it's easy: copy from parent + man = self._parents[0].manifest() + def func(f): + f = copiesget(f, f) + return man.flags(f) + else: + # merges are tricky: we try to reconstruct the unstored + # result from the merge (issue1802) + p1, p2 = self._parents + pa = p1.ancestor(p2) + m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() + + def func(f): + f = copiesget(f, f) # may be wrong for merges with copies + fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) + if fl1 == fl2: + return fl1 + if fl1 == fla: + return fl2 + if fl2 == fla: + return fl1 + return '' # punt for conflicts + + return func + + @propertycache + def _flagfunc(self): + return self._repo.dirstate.flagfunc(self._buildflagfunc) + + @propertycache + def _manifest(self): + """generate a manifest corresponding to the working directory""" + + if self._unknown is None: + self.status(unknown=True) + + man = self._parents[0].manifest().copy() + if len(self._parents) > 1: + man2 = self.p2().manifest() + def getman(f): + if f in man: + return man + return man2 + else: + getman = lambda f: man + + copied = self._repo.dirstate.copies() + ff = self._flagfunc + modified, added, removed, deleted = self._status + unknown = self._unknown + for i, l in (("a", added), ("m", modified), ("u", unknown)): + for f in l: + orig = copied.get(f, f) + man[f] = getman(orig).get(orig, nullid) + i + try: + man.set(f, ff(f)) + except OSError: + pass + + for f in deleted + removed: + if f in man: + del man[f] + + return man + + def __iter__(self): + d = self._repo.dirstate + for f in d: + if d[f] != 'r': + yield f + + @propertycache + def _status(self): + return self._repo.status()[:4] + + @propertycache + def _user(self): + return self._repo.ui.username() + + @propertycache + def _date(self): + return util.makedate() + + @propertycache + def _parents(self): + p = self._repo.dirstate.parents() + if p[1] == nullid: + p = p[:-1] + self._parents = [changectx(self._repo, x) for x in p] + return self._parents + + def status(self, ignored=False, clean=False, unknown=False): + """Explicit status query + Unless this method is used to query the working copy status, the + _status property will implicitly read the status using its default + arguments.""" + stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown) + self._unknown = self._ignored = self._clean = None + if unknown: + self._unknown = stat[4] + if ignored: + self._ignored = stat[5] + if clean: + self._clean = stat[6] + self._status = stat[:4] + return stat + + def manifest(self): + return self._manifest + def user(self): + return self._user or self._repo.ui.username() + def date(self): + return self._date + def description(self): + return self._text + def files(self): + return sorted(self._status[0] + self._status[1] + self._status[2]) + + def modified(self): + return self._status[0] + def added(self): + return self._status[1] + def removed(self): + return self._status[2] + def deleted(self): + return self._status[3] + def unknown(self): + assert self._unknown is not None # must call status first + return self._unknown + def ignored(self): + assert self._ignored is not None # must call status first + return self._ignored + def clean(self): + assert self._clean is not None # must call status first + return self._clean + def branch(self): + return encoding.tolocal(self._extra['branch']) + def extra(self): + return self._extra + + def tags(self): + t = [] + for p in self.parents(): + t.extend(p.tags()) + return t + + def bookmarks(self): + b = [] + for p in self.parents(): + b.extend(p.bookmarks()) + return b + + def children(self): + return [] + + def flags(self, path): + if '_manifest' in self.__dict__: + try: + return self._manifest.flags(path) + except KeyError: + return '' + + try: + return self._flagfunc(path) + except OSError: + return '' + + def filectx(self, path, filelog=None): + """get a file context from the working directory""" + return workingfilectx(self._repo, path, workingctx=self, + filelog=filelog) + + def ancestor(self, c2): + """return the ancestor context of self and c2""" + return self._parents[0].ancestor(c2) # punt on two parents for now + + def walk(self, match): + return sorted(self._repo.dirstate.walk(match, self.substate.keys(), + True, False)) + + def dirty(self, missing=False): + "check whether a working directory is modified" + # check subrepos first + for s in self.substate: + if self.sub(s).dirty(): + return True + # check current working dir + return (self.p2() or self.branch() != self.p1().branch() or + self.modified() or self.added() or self.removed() or + (missing and self.deleted())) + + def add(self, list, prefix=""): + join = lambda f: os.path.join(prefix, f) + wlock = self._repo.wlock() + ui, ds = self._repo.ui, self._repo.dirstate + try: + rejected = [] + for f in list: + scmutil.checkportable(ui, join(f)) + p = self._repo.wjoin(f) + try: + st = os.lstat(p) + except OSError: + ui.warn(_("%s does not exist!\n") % join(f)) + rejected.append(f) + continue + if st.st_size > 10000000: + ui.warn(_("%s: up to %d MB of RAM may be required " + "to manage this file\n" + "(use 'hg revert %s' to cancel the " + "pending addition)\n") + % (f, 3 * st.st_size // 1000000, join(f))) + if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): + ui.warn(_("%s not added: only files and symlinks " + "supported currently\n") % join(f)) + rejected.append(p) + elif ds[f] in 'amn': + ui.warn(_("%s already tracked!\n") % join(f)) + elif ds[f] == 'r': + ds.normallookup(f) + else: + ds.add(f) + return rejected + finally: + wlock.release() + + def forget(self, files): + wlock = self._repo.wlock() + try: + for f in files: + if self._repo.dirstate[f] != 'a': + self._repo.dirstate.remove(f) + elif f not in self._repo.dirstate: + self._repo.ui.warn(_("%s not tracked!\n") % f) + else: + self._repo.dirstate.drop(f) + finally: + wlock.release() + + def ancestors(self): + for a in self._repo.changelog.ancestors( + *[p.rev() for p in self._parents]): + yield changectx(self._repo, a) + + def undelete(self, list): + pctxs = self.parents() + wlock = self._repo.wlock() + try: + for f in list: + if self._repo.dirstate[f] != 'r': + self._repo.ui.warn(_("%s not removed!\n") % f) + else: + fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] + t = fctx.data() + self._repo.wwrite(f, t, fctx.flags()) + self._repo.dirstate.normal(f) + finally: + wlock.release() + + def copy(self, source, dest): + p = self._repo.wjoin(dest) + if not os.path.lexists(p): + self._repo.ui.warn(_("%s does not exist!\n") % dest) + elif not (os.path.isfile(p) or os.path.islink(p)): + self._repo.ui.warn(_("copy failed: %s is not a file or a " + "symbolic link\n") % dest) + else: + wlock = self._repo.wlock() + try: + if self._repo.dirstate[dest] in '?r': + self._repo.dirstate.add(dest) + self._repo.dirstate.copy(source, dest) + finally: + wlock.release() + +class workingfilectx(filectx): + """A workingfilectx object makes access to data related to a particular + file in the working directory convenient.""" + def __init__(self, repo, path, filelog=None, workingctx=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + self._repo = repo + self._path = path + self._changeid = None + self._filerev = self._filenode = None + + if filelog: + self._filelog = filelog + if workingctx: + self._changectx = workingctx + + @propertycache + def _changectx(self): + return workingctx(self._repo) + + def __nonzero__(self): + return True + + def __str__(self): + return "%s@%s" % (self.path(), self._changectx) + + def __repr__(self): + return "" % str(self) + + def data(self): + return self._repo.wread(self._path) + def renamed(self): + rp = self._repo.dirstate.copied(self._path) + if not rp: + return None + return rp, self._changectx._parents[0]._manifest.get(rp, nullid) + + def parents(self): + '''return parent filectxs, following copies if necessary''' + def filenode(ctx, path): + return ctx._manifest.get(path, nullid) + + path = self._path + fl = self._filelog + pcl = self._changectx._parents + renamed = self.renamed() + + if renamed: + pl = [renamed + (None,)] + else: + pl = [(path, filenode(pcl[0], path), fl)] + + for pc in pcl[1:]: + pl.append((path, filenode(pc, path), fl)) + + return [filectx(self._repo, p, fileid=n, filelog=l) + for p, n, l in pl if n != nullid] + + def children(self): + return [] + + def size(self): + return os.lstat(self._repo.wjoin(self._path)).st_size + def date(self): + t, tz = self._changectx.date() + try: + return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz) + except OSError, err: + if err.errno != errno.ENOENT: + raise + return (t, tz) + + def cmp(self, fctx): + """compare with other file context + + returns True if different than fctx. + """ + # fctx should be a filectx (not a wfctx) + # invert comparison to reuse the same code path + return fctx.cmp(self) + +class memctx(object): + """Use memctx to perform in-memory commits via localrepo.commitctx(). + + Revision information is supplied at initialization time while + related files data and is made available through a callback + mechanism. 'repo' is the current localrepo, 'parents' is a + sequence of two parent revisions identifiers (pass None for every + missing parent), 'text' is the commit message and 'files' lists + names of files touched by the revision (normalized and relative to + repository root). + + filectxfn(repo, memctx, path) is a callable receiving the + repository, the current memctx object and the normalized path of + requested file, relative to repository root. It is fired by the + commit function for every file in 'files', but calls order is + undefined. If the file is available in the revision being + committed (updated or added), filectxfn returns a memfilectx + object. If the file was removed, filectxfn raises an + IOError. Moved files are represented by marking the source file + removed and the new file added with copy information (see + memfilectx). + + user receives the committer name and defaults to current + repository username, date is the commit date in any format + supported by util.parsedate() and defaults to current date, extra + is a dictionary of metadata or is left empty. + """ + def __init__(self, repo, parents, text, files, filectxfn, user=None, + date=None, extra=None): + self._repo = repo + self._rev = None + self._node = None + self._text = text + self._date = date and util.parsedate(date) or util.makedate() + self._user = user + parents = [(p or nullid) for p in parents] + p1, p2 = parents + self._parents = [changectx(self._repo, p) for p in (p1, p2)] + files = sorted(set(files)) + self._status = [files, [], [], [], []] + self._filectxfn = filectxfn + + self._extra = extra and extra.copy() or {} + if self._extra.get('branch', '') == '': + self._extra['branch'] = 'default' + + def __str__(self): + return str(self._parents[0]) + "+" + + def __int__(self): + return self._rev + + def __nonzero__(self): + return True + + def __getitem__(self, key): + return self.filectx(key) + + def p1(self): + return self._parents[0] + def p2(self): + return self._parents[1] + + def user(self): + return self._user or self._repo.ui.username() + def date(self): + return self._date + def description(self): + return self._text + def files(self): + return self.modified() + def modified(self): + return self._status[0] + def added(self): + return self._status[1] + def removed(self): + return self._status[2] + def deleted(self): + return self._status[3] + def unknown(self): + return self._status[4] + def ignored(self): + return self._status[5] + def clean(self): + return self._status[6] + def branch(self): + return encoding.tolocal(self._extra['branch']) + def extra(self): + return self._extra + def flags(self, f): + return self[f].flags() + + def parents(self): + """return contexts for each parent changeset""" + return self._parents + + def filectx(self, path, filelog=None): + """get a file context from the working directory""" + return self._filectxfn(self._repo, self, path) + + def commit(self): + """commit context to the repo""" + return self._repo.commitctx(self) + +class memfilectx(object): + """memfilectx represents an in-memory file to commit. + + See memctx for more details. + """ + def __init__(self, path, data, islink=False, isexec=False, copied=None): + """ + path is the normalized file path relative to repository root. + data is the file content as a string. + islink is True if the file is a symbolic link. + isexec is True if the file is executable. + copied is the source file path if current file was copied in the + revision being committed, or None.""" + self._path = path + self._data = data + self._flags = (islink and 'l' or '') + (isexec and 'x' or '') + self._copied = None + if copied: + self._copied = (copied, nullid) + + def __nonzero__(self): + return True + def __str__(self): + return "%s@%s" % (self.path(), self._changectx) + def path(self): + return self._path + def data(self): + return self._data + def flags(self): + return self._flags + def isexec(self): + return 'x' in self._flags + def islink(self): + return 'l' in self._flags + def renamed(self): + return self._copied diff --git a/websdk/mercurial/copies.py b/websdk/mercurial/copies.py index 4da6846..abd16fa 120000..100644 --- a/websdk/mercurial/copies.py +++ b/websdk/mercurial/copies.py @@ -1 +1,267 @@ -/usr/share/pyshared/mercurial/copies.py \ No newline at end of file +# copies.py - copy detection for Mercurial +# +# Copyright 2008 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import util +import heapq + +def _nonoverlap(d1, d2, d3): + "Return list of elements in d1 not in d2 or d3" + return sorted([d for d in d1 if d not in d3 and d not in d2]) + +def _dirname(f): + s = f.rfind("/") + if s == -1: + return "" + return f[:s] + +def _dirs(files): + d = set() + for f in files: + f = _dirname(f) + while f not in d: + d.add(f) + f = _dirname(f) + return d + +def _findlimit(repo, a, b): + """Find the earliest revision that's an ancestor of a or b but not both, + None if no such revision exists. + """ + # basic idea: + # - mark a and b with different sides + # - if a parent's children are all on the same side, the parent is + # on that side, otherwise it is on no side + # - walk the graph in topological order with the help of a heap; + # - add unseen parents to side map + # - clear side of any parent that has children on different sides + # - track number of interesting revs that might still be on a side + # - track the lowest interesting rev seen + # - quit when interesting revs is zero + + cl = repo.changelog + working = len(cl) # pseudo rev for the working directory + if a is None: + a = working + if b is None: + b = working + + side = {a: -1, b: 1} + visit = [-a, -b] + heapq.heapify(visit) + interesting = len(visit) + hascommonancestor = False + limit = working + + while interesting: + r = -heapq.heappop(visit) + if r == working: + parents = [cl.rev(p) for p in repo.dirstate.parents()] + else: + parents = cl.parentrevs(r) + for p in parents: + if p < 0: + continue + if p not in side: + # first time we see p; add it to visit + side[p] = side[r] + if side[p]: + interesting += 1 + heapq.heappush(visit, -p) + elif side[p] and side[p] != side[r]: + # p was interesting but now we know better + side[p] = 0 + interesting -= 1 + hascommonancestor = True + if side[r]: + limit = r # lowest rev visited + interesting -= 1 + + if not hascommonancestor: + return None + return limit + +def copies(repo, c1, c2, ca, checkdirs=False): + """ + Find moves and copies between context c1 and c2 + """ + # avoid silly behavior for update from empty dir + if not c1 or not c2 or c1 == c2: + return {}, {} + + # avoid silly behavior for parent -> working dir + if c2.node() is None and c1.node() == repo.dirstate.p1(): + return repo.dirstate.copies(), {} + + limit = _findlimit(repo, c1.rev(), c2.rev()) + if limit is None: + # no common ancestor, no copies + return {}, {} + m1 = c1.manifest() + m2 = c2.manifest() + ma = ca.manifest() + + def makectx(f, n): + if len(n) != 20: # in a working context? + if c1.rev() is None: + return c1.filectx(f) + return c2.filectx(f) + return repo.filectx(f, fileid=n) + + ctx = util.lrucachefunc(makectx) + copy = {} + fullcopy = {} + diverge = {} + + def related(f1, f2, limit): + # Walk back to common ancestor to see if the two files originate + # from the same file. Since workingfilectx's rev() is None it messes + # up the integer comparison logic, hence the pre-step check for + # None (f1 and f2 can only be workingfilectx's initially). + + if f1 == f2: + return f1 # a match + + g1, g2 = f1.ancestors(), f2.ancestors() + try: + f1r, f2r = f1.rev(), f2.rev() + + if f1r is None: + f1 = g1.next() + if f2r is None: + f2 = g2.next() + + while True: + f1r, f2r = f1.rev(), f2.rev() + if f1r > f2r: + f1 = g1.next() + elif f2r > f1r: + f2 = g2.next() + elif f1 == f2: + return f1 # a match + elif f1r == f2r or f1r < limit or f2r < limit: + return False # copy no longer relevant + except StopIteration: + return False + + def checkcopies(f, m1, m2): + '''check possible copies of f from m1 to m2''' + of = None + seen = set([f]) + for oc in ctx(f, m1[f]).ancestors(): + ocr = oc.rev() + of = oc.path() + if of in seen: + # check limit late - grab last rename before + if ocr < limit: + break + continue + seen.add(of) + + fullcopy[f] = of # remember for dir rename detection + if of not in m2: + continue # no match, keep looking + if m2[of] == ma.get(of): + break # no merge needed, quit early + c2 = ctx(of, m2[of]) + cr = related(oc, c2, ca.rev()) + if cr and (of == f or of == c2.path()): # non-divergent + copy[f] = of + of = None + break + + if of in ma: + diverge.setdefault(of, []).append(f) + + repo.ui.debug(" searching for copies back to rev %d\n" % limit) + + u1 = _nonoverlap(m1, m2, ma) + u2 = _nonoverlap(m2, m1, ma) + + if u1: + repo.ui.debug(" unmatched files in local:\n %s\n" + % "\n ".join(u1)) + if u2: + repo.ui.debug(" unmatched files in other:\n %s\n" + % "\n ".join(u2)) + + for f in u1: + checkcopies(f, m1, m2) + for f in u2: + checkcopies(f, m2, m1) + + diverge2 = set() + for of, fl in diverge.items(): + if len(fl) == 1 or of in c2: + del diverge[of] # not actually divergent, or not a rename + else: + diverge2.update(fl) # reverse map for below + + if fullcopy: + repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n") + for f in fullcopy: + note = "" + if f in copy: + note += "*" + if f in diverge2: + note += "!" + repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note)) + del diverge2 + + if not fullcopy or not checkdirs: + return copy, diverge + + repo.ui.debug(" checking for directory renames\n") + + # generate a directory move map + d1, d2 = _dirs(m1), _dirs(m2) + invalid = set() + dirmove = {} + + # examine each file copy for a potential directory move, which is + # when all the files in a directory are moved to a new directory + for dst, src in fullcopy.iteritems(): + dsrc, ddst = _dirname(src), _dirname(dst) + if dsrc in invalid: + # already seen to be uninteresting + continue + elif dsrc in d1 and ddst in d1: + # directory wasn't entirely moved locally + invalid.add(dsrc) + elif dsrc in d2 and ddst in d2: + # directory wasn't entirely moved remotely + invalid.add(dsrc) + elif dsrc in dirmove and dirmove[dsrc] != ddst: + # files from the same directory moved to two different places + invalid.add(dsrc) + else: + # looks good so far + dirmove[dsrc + "/"] = ddst + "/" + + for i in invalid: + if i in dirmove: + del dirmove[i] + del d1, d2, invalid + + if not dirmove: + return copy, diverge + + for d in dirmove: + repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d])) + + # check unaccounted nonoverlapping files against directory moves + for f in u1 + u2: + if f not in fullcopy: + for d in dirmove: + if f.startswith(d): + # new file added in a directory that was moved, move it + df = dirmove[d] + f[len(d):] + if df not in copy: + copy[f] = df + repo.ui.debug(" file %s -> %s\n" % (f, copy[f])) + break + + return copy, diverge diff --git a/websdk/mercurial/dagparser.py b/websdk/mercurial/dagparser.py index f94cbde..e02faa5 120000..100644 --- a/websdk/mercurial/dagparser.py +++ b/websdk/mercurial/dagparser.py @@ -1 +1,474 @@ -/usr/share/pyshared/mercurial/dagparser.py \ No newline at end of file +# dagparser.py - parser and generator for concise description of DAGs +# +# Copyright 2010 Peter Arrenbrecht +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import re, string +import util +from i18n import _ + +def parsedag(desc): + '''parses a DAG from a concise textual description; generates events + + "+n" is a linear run of n nodes based on the current default parent + "." is a single node based on the current default parent + "$" resets the default parent to -1 (implied at the start); + otherwise the default parent is always the last node created + ">> len(list(parsedag(""" + ... + ... +3 # 3 nodes in linear run + ... :forkhere # a label for the last of the 3 nodes from above + ... +5 # 5 more nodes on one branch + ... :mergethis # label again + ... >> list(parsedag("")) + [] + + A simple linear run: + + >>> list(parsedag("+3")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + Some non-standard ways to define such runs: + + >>> list(parsedag("+1+2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + >>> list(parsedag("+1*1*")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + >>> list(parsedag("*")) + [('n', (0, [-1]))] + + >>> list(parsedag("...")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + A fork and a join, using numeric back references: + + >>> list(parsedag("+2*2*/2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))] + + >>> list(parsedag("+2<2+1/2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))] + + Placing a label: + + >>> list(parsedag("+1 :mylabel +1")) + [('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))] + + An empty label (silly, really): + + >>> list(parsedag("+1:+1")) + [('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))] + + Fork and join, but with labels instead of numeric back references: + + >>> list(parsedag("+1:f +1:p2 *f */p2")) + [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')), + ('n', (2, [0])), ('n', (3, [2, 1]))] + + >>> list(parsedag("+1:f +1:p2 >> list(parsedag("+1 $ +1")) + [('n', (0, [-1])), ('n', (1, [-1]))] + + Annotations, which are meant to introduce sticky state for subsequent nodes: + + >>> list(parsedag("+1 @ann +1")) + [('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))] + + >>> list(parsedag('+1 @"my annotation" +1')) + [('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))] + + Commands, which are meant to operate on the most recently created node: + + >>> list(parsedag("+1 !cmd +1")) + [('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))] + + >>> list(parsedag('+1 !"my command" +1')) + [('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))] + + >>> list(parsedag('+1 !!my command line\\n +1')) + [('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))] + + Comments, which extend to the end of the line: + + >>> list(parsedag('+1 # comment\\n+1')) + [('n', (0, [-1])), ('n', (1, [0]))] + + Error: + + >>> try: list(parsedag('+1 bad')) + ... except Exception, e: print e + invalid character in dag description: bad... + + ''' + if not desc: + return + + wordchars = string.ascii_letters + string.digits + + labels = {} + p1 = -1 + r = 0 + + def resolve(ref): + if not ref: + return p1 + elif ref[0] in string.digits: + return r - int(ref) + else: + return labels[ref] + + chiter = (c for c in desc) + + def nextch(): + try: + return chiter.next() + except StopIteration: + return '\0' + + def nextrun(c, allow): + s = '' + while c in allow: + s += c + c = nextch() + return c, s + + def nextdelimited(c, limit, escape): + s = '' + while c != limit: + if c == escape: + c = nextch() + s += c + c = nextch() + return nextch(), s + + def nextstring(c): + if c == '"': + return nextdelimited(nextch(), '"', '\\') + else: + return nextrun(c, wordchars) + + c = nextch() + while c != '\0': + while c in string.whitespace: + c = nextch() + if c == '.': + yield 'n', (r, [p1]) + p1 = r + r += 1 + c = nextch() + elif c == '+': + c, digs = nextrun(nextch(), string.digits) + n = int(digs) + for i in xrange(0, n): + yield 'n', (r, [p1]) + p1 = r + r += 1 + elif c in '*/': + if c == '*': + c = nextch() + c, pref = nextstring(c) + prefs = [pref] + while c == '/': + c, pref = nextstring(nextch()) + prefs.append(pref) + ps = [resolve(ref) for ref in prefs] + yield 'n', (r, ps) + p1 = r + r += 1 + elif c == '<': + c, ref = nextstring(nextch()) + p1 = resolve(ref) + elif c == ':': + c, name = nextstring(nextch()) + labels[name] = p1 + yield 'l', (p1, name) + elif c == '@': + c, text = nextstring(nextch()) + yield 'a', text + elif c == '!': + c = nextch() + if c == '!': + cmd = '' + c = nextch() + while c not in '\n\r\0': + cmd += c + c = nextch() + yield 'C', cmd + else: + c, cmd = nextstring(c) + yield 'c', cmd + elif c == '#': + while c not in '\n\r\0': + c = nextch() + elif c == '$': + p1 = -1 + c = nextch() + elif c == '\0': + return # in case it was preceded by whitespace + else: + s = '' + i = 0 + while c != '\0' and i < 10: + s += c + i += 1 + c = nextch() + raise util.Abort(_("invalid character in dag description: %s...") % s) + +def dagtextlines(events, + addspaces=True, + wraplabels=False, + wrapannotations=False, + wrapcommands=False, + wrapnonlinear=False, + usedots=False, + maxlinewidth=70): + '''generates single lines for dagtext()''' + + def wrapstring(text): + if re.match("^[0-9a-z]*$", text): + return text + return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"' + + def gen(): + labels = {} + run = 0 + wantr = 0 + needroot = False + for kind, data in events: + if kind == 'n': + r, ps = data + + # sanity check + if r != wantr: + raise util.Abort(_("expected id %i, got %i") % (wantr, r)) + if not ps: + ps = [-1] + else: + for p in ps: + if p >= r: + raise util.Abort(_("parent id %i is larger than " + "current id %i") % (p, r)) + wantr += 1 + + # new root? + p1 = r - 1 + if len(ps) == 1 and ps[0] == -1: + if needroot: + if run: + yield '+' + str(run) + run = 0 + if wrapnonlinear: + yield '\n' + yield '$' + p1 = -1 + else: + needroot = True + if len(ps) == 1 and ps[0] == p1: + if usedots: + yield "." + else: + run += 1 + else: + if run: + yield '+' + str(run) + run = 0 + if wrapnonlinear: + yield '\n' + prefs = [] + for p in ps: + if p == p1: + prefs.append('') + elif p in labels: + prefs.append(labels[p]) + else: + prefs.append(str(r - p)) + yield '*' + '/'.join(prefs) + else: + if run: + yield '+' + str(run) + run = 0 + if kind == 'l': + rid, name = data + labels[rid] = name + yield ':' + name + if wraplabels: + yield '\n' + elif kind == 'c': + yield '!' + wrapstring(data) + if wrapcommands: + yield '\n' + elif kind == 'C': + yield '!!' + data + yield '\n' + elif kind == 'a': + if wrapannotations: + yield '\n' + yield '@' + wrapstring(data) + elif kind == '#': + yield '#' + data + yield '\n' + else: + raise util.Abort(_("invalid event type in dag: %s") + % str((type, data))) + if run: + yield '+' + str(run) + + line = '' + for part in gen(): + if part == '\n': + if line: + yield line + line = '' + else: + if len(line) + len(part) >= maxlinewidth: + yield line + line = '' + elif addspaces and line and part != '.': + line += ' ' + line += part + if line: + yield line + +def dagtext(dag, + addspaces=True, + wraplabels=False, + wrapannotations=False, + wrapcommands=False, + wrapnonlinear=False, + usedots=False, + maxlinewidth=70): + '''generates lines of a textual representation for a dag event stream + + events should generate what parsedag() does, so: + + ('n', (id, [parentids])) for node creation + ('l', (id, labelname)) for labels on nodes + ('a', text) for annotations + ('c', text) for commands + ('C', text) for line commands ('!!') + ('#', text) for comment lines + + Parent nodes must come before child nodes. + + Examples + -------- + + Linear run: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [0]))]) + '+2' + + Two roots: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))]) + '+1 $ +1' + + Fork and join: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), + ... ('n', (3, [2, 1]))]) + '+2 *2 */2' + + Fork and join with labels: + + >>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), + ... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))]) + '+1 :f +1 :p2 *f */p2' + + Annotations: + + >>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]) + '+1 @ann +1' + + >>> dagtext([('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]) + '+1 @"my annotation" +1' + + Commands: + + >>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]) + '+1 !cmd +1' + + >>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]) + '+1 !"my command" +1' + + >>> dagtext([('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]) + '+1 !!my command line\\n+1' + + Comments: + + >>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))]) + '+1 # comment\\n+1' + + >>> dagtext([]) + '' + + Combining parsedag and dagtext: + + >>> dagtext(parsedag('+1 :f +1 :p2 *f */p2')) + '+1 :f +1 :p2 *f */p2' + + ''' + return "\n".join(dagtextlines(dag, + addspaces, + wraplabels, + wrapannotations, + wrapcommands, + wrapnonlinear, + usedots, + maxlinewidth)) diff --git a/websdk/mercurial/dagutil.py b/websdk/mercurial/dagutil.py new file mode 100644 index 0000000..7908e63 --- /dev/null +++ b/websdk/mercurial/dagutil.py @@ -0,0 +1,277 @@ +# dagutil.py - dag utilities for mercurial +# +# Copyright 2010 Benoit Boissinot +# and Peter Arrenbrecht +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullrev +from i18n import _ + + +class basedag(object): + '''generic interface for DAGs + + terms: + "ix" (short for index) identifies a nodes internally, + "id" identifies one externally. + + All params are ixs unless explicitly suffixed otherwise. + Pluralized params are lists or sets. + ''' + + def __init__(self): + self._inverse = None + + def nodeset(self): + '''set of all node idxs''' + raise NotImplementedError() + + def heads(self): + '''list of head ixs''' + raise NotImplementedError() + + def parents(self, ix): + '''list of parents ixs of ix''' + raise NotImplementedError() + + def inverse(self): + '''inverse DAG, where parents becomes children, etc.''' + raise NotImplementedError() + + def ancestorset(self, starts, stops=None): + ''' + set of all ancestors of starts (incl), but stop walk at stops (excl) + ''' + raise NotImplementedError() + + def descendantset(self, starts, stops=None): + ''' + set of all descendants of starts (incl), but stop walk at stops (excl) + ''' + return self.inverse().ancestorset(starts, stops) + + def headsetofconnecteds(self, ixs): + ''' + subset of connected list of ixs so that no node has a descendant in it + + By "connected list" we mean that if an ancestor and a descendant are in + the list, then so is at least one path connecting them. + ''' + raise NotImplementedError() + + def externalize(self, ix): + '''return a list of (or set if given a set) of node ids''' + return self._externalize(ix) + + def externalizeall(self, ixs): + '''return a list of (or set if given a set) of node ids''' + ids = self._externalizeall(ixs) + if isinstance(ixs, set): + return set(ids) + return list(ids) + + def internalize(self, id): + '''return a list of (or set if given a set) of node ixs''' + return self._internalize(id) + + def internalizeall(self, ids, filterunknown=False): + '''return a list of (or set if given a set) of node ids''' + ixs = self._internalizeall(ids, filterunknown) + if isinstance(ids, set): + return set(ixs) + return list(ixs) + + +class genericdag(basedag): + '''generic implementations for DAGs''' + + def ancestorset(self, starts, stops=None): + stops = stops and set(stops) or set() + seen = set() + pending = list(starts) + while pending: + n = pending.pop() + if n not in seen and n not in stops: + seen.add(n) + pending.extend(self.parents(n)) + return seen + + def headsetofconnecteds(self, ixs): + hds = set(ixs) + if not hds: + return hds + for n in ixs: + for p in self.parents(n): + hds.discard(p) + assert hds + return hds + + +class revlogbaseddag(basedag): + '''generic dag interface to a revlog''' + + def __init__(self, revlog, nodeset): + basedag.__init__(self) + self._revlog = revlog + self._heads = None + self._nodeset = nodeset + + def nodeset(self): + return self._nodeset + + def heads(self): + if self._heads is None: + self._heads = self._getheads() + return self._heads + + def _externalize(self, ix): + return self._revlog.index[ix][7] + def _externalizeall(self, ixs): + idx = self._revlog.index + return [idx[i][7] for i in ixs] + + def _internalize(self, id): + ix = self._revlog.rev(id) + if ix == nullrev: + raise LookupError(id, self._revlog.indexfile, _('nullid')) + return ix + def _internalizeall(self, ids, filterunknown): + rl = self._revlog + if filterunknown: + return [r for r in map(rl.nodemap.get, ids) + if r is not None and r != nullrev] + return map(self._internalize, ids) + + +class revlogdag(revlogbaseddag): + '''dag interface to a revlog''' + + def __init__(self, revlog): + revlogbaseddag.__init__(self, revlog, set(xrange(len(revlog)))) + + def _getheads(self): + return [r for r in self._revlog.headrevs() if r != nullrev] + + def parents(self, ix): + rlog = self._revlog + idx = rlog.index + revdata = idx[ix] + prev = revdata[5] + if prev != nullrev: + prev2 = revdata[6] + if prev2 == nullrev: + return [prev] + return [prev, prev2] + prev2 = revdata[6] + if prev2 != nullrev: + return [prev2] + return [] + + def inverse(self): + if self._inverse is None: + self._inverse = inverserevlogdag(self) + return self._inverse + + def ancestorset(self, starts, stops=None): + rlog = self._revlog + idx = rlog.index + stops = stops and set(stops) or set() + seen = set() + pending = list(starts) + while pending: + rev = pending.pop() + if rev not in seen and rev not in stops: + seen.add(rev) + revdata = idx[rev] + for i in [5, 6]: + prev = revdata[i] + if prev != nullrev: + pending.append(prev) + return seen + + def headsetofconnecteds(self, ixs): + if not ixs: + return set() + rlog = self._revlog + idx = rlog.index + headrevs = set(ixs) + for rev in ixs: + revdata = idx[rev] + for i in [5, 6]: + prev = revdata[i] + if prev != nullrev: + headrevs.discard(prev) + assert headrevs + return headrevs + + def linearize(self, ixs): + '''linearize and topologically sort a list of revisions + + The linearization process tries to create long runs of revs where + a child rev comes immediately after its first parent. This is done by + visiting the heads of the given revs in inverse topological order, + and for each visited rev, visiting its second parent, then its first + parent, then adding the rev itself to the output list. + ''' + sorted = [] + visit = list(self.headsetofconnecteds(ixs)) + visit.sort(reverse=True) + finished = set() + + while visit: + cur = visit.pop() + if cur < 0: + cur = -cur - 1 + if cur not in finished: + sorted.append(cur) + finished.add(cur) + else: + visit.append(-cur - 1) + visit += [p for p in self.parents(cur) + if p in ixs and p not in finished] + assert len(sorted) == len(ixs) + return sorted + + +class inverserevlogdag(revlogbaseddag, genericdag): + '''inverse of an existing revlog dag; see revlogdag.inverse()''' + + def __init__(self, orig): + revlogbaseddag.__init__(self, orig._revlog, orig._nodeset) + self._orig = orig + self._children = {} + self._roots = [] + self._walkfrom = len(self._revlog) - 1 + + def _walkto(self, walkto): + rev = self._walkfrom + cs = self._children + roots = self._roots + idx = self._revlog.index + while rev >= walkto: + data = idx[rev] + isroot = True + for prev in [data[5], data[6]]: # parent revs + if prev != nullrev: + cs.setdefault(prev, []).append(rev) + isroot = False + if isroot: + roots.append(rev) + rev -= 1 + self._walkfrom = rev + + def _getheads(self): + self._walkto(nullrev) + return self._roots + + def parents(self, ix): + if ix is None: + return [] + if ix <= self._walkfrom: + self._walkto(ix) + return self._children.get(ix, []) + + def inverse(self): + return self._orig diff --git a/websdk/mercurial/demandimport.py b/websdk/mercurial/demandimport.py index 2498fbb..e439487 120000..100644 --- a/websdk/mercurial/demandimport.py +++ b/websdk/mercurial/demandimport.py @@ -1 +1,155 @@ -/usr/share/pyshared/mercurial/demandimport.py \ No newline at end of file +# demandimport.py - global demand-loading of modules for Mercurial +# +# Copyright 2006, 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +''' +demandimport - automatic demandloading of modules + +To enable this module, do: + + import demandimport; demandimport.enable() + +Imports of the following forms will be demand-loaded: + + import a, b.c + import a.b as c + from a import b,c # a will be loaded immediately + +These imports will not be delayed: + + from a import * + b = __import__(a) +''' + +import __builtin__ +_origimport = __import__ + +nothing = object() + +try: + _origimport(__builtin__.__name__, {}, {}, None, -1) +except TypeError: # no level argument + def _import(name, globals, locals, fromlist, level): + "call _origimport with no level argument" + return _origimport(name, globals, locals, fromlist) +else: + _import = _origimport + +class _demandmod(object): + """module demand-loader and proxy""" + def __init__(self, name, globals, locals): + if '.' in name: + head, rest = name.split('.', 1) + after = [rest] + else: + head = name + after = [] + object.__setattr__(self, "_data", (head, globals, locals, after)) + object.__setattr__(self, "_module", None) + def _extend(self, name): + """add to the list of submodules to load""" + self._data[3].append(name) + def _load(self): + if not self._module: + head, globals, locals, after = self._data + mod = _origimport(head, globals, locals) + # load submodules + def subload(mod, p): + h, t = p, None + if '.' in p: + h, t = p.split('.', 1) + if getattr(mod, h, nothing) is nothing: + setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__)) + elif t: + subload(getattr(mod, h), t) + + for x in after: + subload(mod, x) + + # are we in the locals dictionary still? + if locals and locals.get(head) == self: + locals[head] = mod + object.__setattr__(self, "_module", mod) + + def __repr__(self): + if self._module: + return "" % self._data[0] + return "" % self._data[0] + def __call__(self, *args, **kwargs): + raise TypeError("%s object is not callable" % repr(self)) + def __getattribute__(self, attr): + if attr in ('_data', '_extend', '_load', '_module'): + return object.__getattribute__(self, attr) + self._load() + return getattr(self._module, attr) + def __setattr__(self, attr, val): + self._load() + setattr(self._module, attr, val) + +def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1): + if not locals or name in ignore or fromlist == ('*',): + # these cases we can't really delay + return _import(name, globals, locals, fromlist, level) + elif not fromlist: + # import a [as b] + if '.' in name: # a.b + base, rest = name.split('.', 1) + # email.__init__ loading email.mime + if globals and globals.get('__name__', None) == base: + return _import(name, globals, locals, fromlist, level) + # if a is already demand-loaded, add b to its submodule list + if base in locals: + if isinstance(locals[base], _demandmod): + locals[base]._extend(rest) + return locals[base] + return _demandmod(name, globals, locals) + else: + if level != -1: + # from . import b,c,d or from .a import b,c,d + return _origimport(name, globals, locals, fromlist, level) + # from a import b,c,d + mod = _origimport(name, globals, locals) + # recurse down the module chain + for comp in name.split('.')[1:]: + if getattr(mod, comp, nothing) is nothing: + setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__)) + mod = getattr(mod, comp) + for x in fromlist: + # set requested submodules for demand load + if getattr(mod, x, nothing) is nothing: + setattr(mod, x, _demandmod(x, mod.__dict__, locals)) + return mod + +ignore = [ + '_hashlib', + '_xmlplus', + 'fcntl', + 'win32com.gen_py', + '_winreg', # 2.7 mimetypes needs immediate ImportError + 'pythoncom', + # imported by tarfile, not available under Windows + 'pwd', + 'grp', + # imported by profile, itself imported by hotshot.stats, + # not available under Windows + 'resource', + # this trips up many extension authors + 'gtk', + # setuptools' pkg_resources.py expects "from __main__ import x" to + # raise ImportError if x not defined + '__main__', + '_ssl', # conditional imports in the stdlib, issue1964 + 'rfc822', + 'mimetools', + ] + +def enable(): + "enable global demand-loading of modules" + __builtin__.__import__ = _demandimport + +def disable(): + "disable global demand-loading of modules" + __builtin__.__import__ = _origimport diff --git a/websdk/mercurial/dirstate.py b/websdk/mercurial/dirstate.py index 95a0490..50af321 120000..100644 --- a/websdk/mercurial/dirstate.py +++ b/websdk/mercurial/dirstate.py @@ -1 +1,725 @@ -/usr/share/pyshared/mercurial/dirstate.py \ No newline at end of file +# dirstate.py - working directory tracking for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid +from i18n import _ +import scmutil, util, ignore, osutil, parsers, encoding +import struct, os, stat, errno +import cStringIO + +_format = ">cllll" +propertycache = util.propertycache + +def _finddirs(path): + pos = path.rfind('/') + while pos != -1: + yield path[:pos] + pos = path.rfind('/', 0, pos) + +def _incdirs(dirs, path): + for base in _finddirs(path): + if base in dirs: + dirs[base] += 1 + return + dirs[base] = 1 + +def _decdirs(dirs, path): + for base in _finddirs(path): + if dirs[base] > 1: + dirs[base] -= 1 + return + del dirs[base] + +class dirstate(object): + + def __init__(self, opener, ui, root, validate): + '''Create a new dirstate object. + + opener is an open()-like callable that can be used to open the + dirstate file; root is the root of the directory tracked by + the dirstate. + ''' + self._opener = opener + self._validate = validate + self._root = root + self._rootdir = os.path.join(root, '') + self._dirty = False + self._dirtypl = False + self._lastnormaltime = None + self._ui = ui + + @propertycache + def _map(self): + '''Return the dirstate contents as a map from filename to + (state, mode, size, time).''' + self._read() + return self._map + + @propertycache + def _copymap(self): + self._read() + return self._copymap + + @propertycache + def _foldmap(self): + f = {} + for name in self._map: + f[util.normcase(name)] = name + return f + + @propertycache + def _branch(self): + try: + return self._opener.read("branch").strip() or "default" + except IOError: + return "default" + + @propertycache + def _pl(self): + try: + fp = self._opener("dirstate") + st = fp.read(40) + fp.close() + l = len(st) + if l == 40: + return st[:20], st[20:40] + elif l > 0 and l < 40: + raise util.Abort(_('working directory state appears damaged!')) + except IOError, err: + if err.errno != errno.ENOENT: + raise + return [nullid, nullid] + + @propertycache + def _dirs(self): + dirs = {} + for f, s in self._map.iteritems(): + if s[0] != 'r': + _incdirs(dirs, f) + return dirs + + @propertycache + def _ignore(self): + files = [self._join('.hgignore')] + for name, path in self._ui.configitems("ui"): + if name == 'ignore' or name.startswith('ignore.'): + files.append(util.expandpath(path)) + return ignore.ignore(self._root, files, self._ui.warn) + + @propertycache + def _slash(self): + return self._ui.configbool('ui', 'slash') and os.sep != '/' + + @propertycache + def _checklink(self): + return util.checklink(self._root) + + @propertycache + def _checkexec(self): + return util.checkexec(self._root) + + @propertycache + def _checkcase(self): + return not util.checkcase(self._join('.hg')) + + def _join(self, f): + # much faster than os.path.join() + # it's safe because f is always a relative path + return self._rootdir + f + + def flagfunc(self, buildfallback): + if self._checklink and self._checkexec: + def f(x): + p = self._join(x) + if os.path.islink(p): + return 'l' + if util.isexec(p): + return 'x' + return '' + return f + + fallback = buildfallback() + if self._checklink: + def f(x): + if os.path.islink(self._join(x)): + return 'l' + if 'x' in fallback(x): + return 'x' + return '' + return f + if self._checkexec: + def f(x): + if 'l' in fallback(x): + return 'l' + if util.isexec(self._join(x)): + return 'x' + return '' + return f + else: + return fallback + + def getcwd(self): + cwd = os.getcwd() + if cwd == self._root: + return '' + # self._root ends with a path separator if self._root is '/' or 'C:\' + rootsep = self._root + if not util.endswithsep(rootsep): + rootsep += os.sep + if cwd.startswith(rootsep): + return cwd[len(rootsep):] + else: + # we're outside the repo. return an absolute path. + return cwd + + def pathto(self, f, cwd=None): + if cwd is None: + cwd = self.getcwd() + path = util.pathto(self._root, cwd, f) + if self._slash: + return util.normpath(path) + return path + + def __getitem__(self, key): + '''Return the current state of key (a filename) in the dirstate. + + States are: + n normal + m needs merging + r marked for removal + a marked for addition + ? not tracked + ''' + return self._map.get(key, ("?",))[0] + + def __contains__(self, key): + return key in self._map + + def __iter__(self): + for x in sorted(self._map): + yield x + + def parents(self): + return [self._validate(p) for p in self._pl] + + def p1(self): + return self._validate(self._pl[0]) + + def p2(self): + return self._validate(self._pl[1]) + + def branch(self): + return encoding.tolocal(self._branch) + + def setparents(self, p1, p2=nullid): + self._dirty = self._dirtypl = True + self._pl = p1, p2 + + def setbranch(self, branch): + if branch in ['tip', '.', 'null']: + raise util.Abort(_('the name \'%s\' is reserved') % branch) + self._branch = encoding.fromlocal(branch) + self._opener.write("branch", self._branch + '\n') + + def _read(self): + self._map = {} + self._copymap = {} + try: + st = self._opener.read("dirstate") + except IOError, err: + if err.errno != errno.ENOENT: + raise + return + if not st: + return + + p = parsers.parse_dirstate(self._map, self._copymap, st) + if not self._dirtypl: + self._pl = p + + def invalidate(self): + for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs", + "_ignore"): + if a in self.__dict__: + delattr(self, a) + self._lastnormaltime = None + self._dirty = False + + def copy(self, source, dest): + """Mark dest as a copy of source. Unmark dest if source is None.""" + if source == dest: + return + self._dirty = True + if source is not None: + self._copymap[dest] = source + elif dest in self._copymap: + del self._copymap[dest] + + def copied(self, file): + return self._copymap.get(file, None) + + def copies(self): + return self._copymap + + def _droppath(self, f): + if self[f] not in "?r" and "_dirs" in self.__dict__: + _decdirs(self._dirs, f) + + def _addpath(self, f, check=False): + oldstate = self[f] + if check or oldstate == "r": + scmutil.checkfilename(f) + if f in self._dirs: + raise util.Abort(_('directory %r already in dirstate') % f) + # shadows + for d in _finddirs(f): + if d in self._dirs: + break + if d in self._map and self[d] != 'r': + raise util.Abort( + _('file %r in dirstate clashes with %r') % (d, f)) + if oldstate in "?r" and "_dirs" in self.__dict__: + _incdirs(self._dirs, f) + + def normal(self, f): + '''Mark a file normal and clean.''' + self._dirty = True + self._addpath(f) + s = os.lstat(self._join(f)) + mtime = int(s.st_mtime) + self._map[f] = ('n', s.st_mode, s.st_size, mtime) + if f in self._copymap: + del self._copymap[f] + if mtime > self._lastnormaltime: + # Remember the most recent modification timeslot for status(), + # to make sure we won't miss future size-preserving file content + # modifications that happen within the same timeslot. + self._lastnormaltime = mtime + + def normallookup(self, f): + '''Mark a file normal, but possibly dirty.''' + if self._pl[1] != nullid and f in self._map: + # if there is a merge going on and the file was either + # in state 'm' (-1) or coming from other parent (-2) before + # being removed, restore that state. + entry = self._map[f] + if entry[0] == 'r' and entry[2] in (-1, -2): + source = self._copymap.get(f) + if entry[2] == -1: + self.merge(f) + elif entry[2] == -2: + self.otherparent(f) + if source: + self.copy(source, f) + return + if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: + return + self._dirty = True + self._addpath(f) + self._map[f] = ('n', 0, -1, -1) + if f in self._copymap: + del self._copymap[f] + + def otherparent(self, f): + '''Mark as coming from the other parent, always dirty.''' + if self._pl[1] == nullid: + raise util.Abort(_("setting %r to other parent " + "only allowed in merges") % f) + self._dirty = True + self._addpath(f) + self._map[f] = ('n', 0, -2, -1) + if f in self._copymap: + del self._copymap[f] + + def add(self, f): + '''Mark a file added.''' + self._dirty = True + self._addpath(f, True) + self._map[f] = ('a', 0, -1, -1) + if f in self._copymap: + del self._copymap[f] + + def remove(self, f): + '''Mark a file removed.''' + self._dirty = True + self._droppath(f) + size = 0 + if self._pl[1] != nullid and f in self._map: + # backup the previous state + entry = self._map[f] + if entry[0] == 'm': # merge + size = -1 + elif entry[0] == 'n' and entry[2] == -2: # other parent + size = -2 + self._map[f] = ('r', 0, size, 0) + if size == 0 and f in self._copymap: + del self._copymap[f] + + def merge(self, f): + '''Mark a file merged.''' + self._dirty = True + s = os.lstat(self._join(f)) + self._addpath(f) + self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) + if f in self._copymap: + del self._copymap[f] + + def drop(self, f): + '''Drop a file from the dirstate''' + if f in self._map: + self._dirty = True + self._droppath(f) + del self._map[f] + + def _normalize(self, path, isknown): + normed = util.normcase(path) + folded = self._foldmap.get(normed, None) + if folded is None: + if isknown or not os.path.lexists(os.path.join(self._root, path)): + folded = path + else: + folded = self._foldmap.setdefault(normed, + util.fspath(path, self._root)) + return folded + + def normalize(self, path, isknown=False): + ''' + normalize the case of a pathname when on a casefolding filesystem + + isknown specifies whether the filename came from walking the + disk, to avoid extra filesystem access + + The normalized case is determined based on the following precedence: + + - version of name already stored in the dirstate + - version of name stored on disk + - version provided via command arguments + ''' + + if self._checkcase: + return self._normalize(path, isknown) + return path + + def clear(self): + self._map = {} + if "_dirs" in self.__dict__: + delattr(self, "_dirs") + self._copymap = {} + self._pl = [nullid, nullid] + self._lastnormaltime = None + self._dirty = True + + def rebuild(self, parent, files): + self.clear() + for f in files: + if 'x' in files.flags(f): + self._map[f] = ('n', 0777, -1, 0) + else: + self._map[f] = ('n', 0666, -1, 0) + self._pl = (parent, nullid) + self._dirty = True + + def write(self): + if not self._dirty: + return + st = self._opener("dirstate", "w", atomictemp=True) + + # use the modification time of the newly created temporary file as the + # filesystem's notion of 'now' + now = int(util.fstat(st).st_mtime) + + cs = cStringIO.StringIO() + copymap = self._copymap + pack = struct.pack + write = cs.write + write("".join(self._pl)) + for f, e in self._map.iteritems(): + if e[0] == 'n' and e[3] == now: + # The file was last modified "simultaneously" with the current + # write to dirstate (i.e. within the same second for file- + # systems with a granularity of 1 sec). This commonly happens + # for at least a couple of files on 'update'. + # The user could change the file without changing its size + # within the same second. Invalidate the file's stat data in + # dirstate, forcing future 'status' calls to compare the + # contents of the file. This prevents mistakenly treating such + # files as clean. + e = (e[0], 0, -1, -1) # mark entry as 'unset' + self._map[f] = e + + if f in copymap: + f = "%s\0%s" % (f, copymap[f]) + e = pack(_format, e[0], e[1], e[2], e[3], len(f)) + write(e) + write(f) + st.write(cs.getvalue()) + st.close() + self._lastnormaltime = None + self._dirty = self._dirtypl = False + + def _dirignore(self, f): + if f == '.': + return False + if self._ignore(f): + return True + for p in _finddirs(f): + if self._ignore(p): + return True + return False + + def walk(self, match, subrepos, unknown, ignored): + ''' + Walk recursively through the directory tree, finding all files + matched by match. + + Return a dict mapping filename to stat-like object (either + mercurial.osutil.stat instance or return value of os.stat()). + ''' + + def fwarn(f, msg): + self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) + return False + + def badtype(mode): + kind = _('unknown') + if stat.S_ISCHR(mode): + kind = _('character device') + elif stat.S_ISBLK(mode): + kind = _('block device') + elif stat.S_ISFIFO(mode): + kind = _('fifo') + elif stat.S_ISSOCK(mode): + kind = _('socket') + elif stat.S_ISDIR(mode): + kind = _('directory') + return _('unsupported file type (type is %s)') % kind + + ignore = self._ignore + dirignore = self._dirignore + if ignored: + ignore = util.never + dirignore = util.never + elif not unknown: + # if unknown and ignored are False, skip step 2 + ignore = util.always + dirignore = util.always + + matchfn = match.matchfn + badfn = match.bad + dmap = self._map + normpath = util.normpath + listdir = osutil.listdir + lstat = os.lstat + getkind = stat.S_IFMT + dirkind = stat.S_IFDIR + regkind = stat.S_IFREG + lnkkind = stat.S_IFLNK + join = self._join + work = [] + wadd = work.append + + exact = skipstep3 = False + if matchfn == match.exact: # match.exact + exact = True + dirignore = util.always # skip step 2 + elif match.files() and not match.anypats(): # match.match, no patterns + skipstep3 = True + + if self._checkcase: + normalize = self._normalize + skipstep3 = False + else: + normalize = lambda x, y: x + + files = sorted(match.files()) + subrepos.sort() + i, j = 0, 0 + while i < len(files) and j < len(subrepos): + subpath = subrepos[j] + "/" + if files[i] < subpath: + i += 1 + continue + while i < len(files) and files[i].startswith(subpath): + del files[i] + j += 1 + + if not files or '.' in files: + files = [''] + results = dict.fromkeys(subrepos) + results['.hg'] = None + + # step 1: find all explicit files + for ff in files: + nf = normalize(normpath(ff), False) + if nf in results: + continue + + try: + st = lstat(join(nf)) + kind = getkind(st.st_mode) + if kind == dirkind: + skipstep3 = False + if nf in dmap: + #file deleted on disk but still in dirstate + results[nf] = None + match.dir(nf) + if not dirignore(nf): + wadd(nf) + elif kind == regkind or kind == lnkkind: + results[nf] = st + else: + badfn(ff, badtype(kind)) + if nf in dmap: + results[nf] = None + except OSError, inst: + if nf in dmap: # does it exactly match a file? + results[nf] = None + else: # does it match a directory? + prefix = nf + "/" + for fn in dmap: + if fn.startswith(prefix): + match.dir(nf) + skipstep3 = False + break + else: + badfn(ff, inst.strerror) + + # step 2: visit subdirectories + while work: + nd = work.pop() + skip = None + if nd == '.': + nd = '' + else: + skip = '.hg' + try: + entries = listdir(join(nd), stat=True, skip=skip) + except OSError, inst: + if inst.errno == errno.EACCES: + fwarn(nd, inst.strerror) + continue + raise + for f, kind, st in entries: + nf = normalize(nd and (nd + "/" + f) or f, True) + if nf not in results: + if kind == dirkind: + if not ignore(nf): + match.dir(nf) + wadd(nf) + if nf in dmap and matchfn(nf): + results[nf] = None + elif kind == regkind or kind == lnkkind: + if nf in dmap: + if matchfn(nf): + results[nf] = st + elif matchfn(nf) and not ignore(nf): + results[nf] = st + elif nf in dmap and matchfn(nf): + results[nf] = None + + # step 3: report unseen items in the dmap hash + if not skipstep3 and not exact: + visit = sorted([f for f in dmap if f not in results and matchfn(f)]) + for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): + if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): + st = None + results[nf] = st + for s in subrepos: + del results[s] + del results['.hg'] + return results + + def status(self, match, subrepos, ignored, clean, unknown): + '''Determine the status of the working copy relative to the + dirstate and return a tuple of lists (unsure, modified, added, + removed, deleted, unknown, ignored, clean), where: + + unsure: + files that might have been modified since the dirstate was + written, but need to be read to be sure (size is the same + but mtime differs) + modified: + files that have definitely been modified since the dirstate + was written (different size or mode) + added: + files that have been explicitly added with hg add + removed: + files that have been explicitly removed with hg remove + deleted: + files that have been deleted through other means ("missing") + unknown: + files not in the dirstate that are not ignored + ignored: + files not in the dirstate that are ignored + (by _dirignore()) + clean: + files that have definitely not been modified since the + dirstate was written + ''' + listignored, listclean, listunknown = ignored, clean, unknown + lookup, modified, added, unknown, ignored = [], [], [], [], [] + removed, deleted, clean = [], [], [] + + dmap = self._map + ladd = lookup.append # aka "unsure" + madd = modified.append + aadd = added.append + uadd = unknown.append + iadd = ignored.append + radd = removed.append + dadd = deleted.append + cadd = clean.append + + lnkkind = stat.S_IFLNK + + for fn, st in self.walk(match, subrepos, listunknown, + listignored).iteritems(): + if fn not in dmap: + if (listignored or match.exact(fn)) and self._dirignore(fn): + if listignored: + iadd(fn) + elif listunknown: + uadd(fn) + continue + + state, mode, size, time = dmap[fn] + + if not st and state in "nma": + dadd(fn) + elif state == 'n': + # The "mode & lnkkind != lnkkind or self._checklink" + # lines are an expansion of "islink => checklink" + # where islink means "is this a link?" and checklink + # means "can we check links?". + mtime = int(st.st_mtime) + if (size >= 0 and + (size != st.st_size + or ((mode ^ st.st_mode) & 0100 and self._checkexec)) + and (mode & lnkkind != lnkkind or self._checklink) + or size == -2 # other parent + or fn in self._copymap): + madd(fn) + elif (mtime != time + and (mode & lnkkind != lnkkind or self._checklink)): + ladd(fn) + elif mtime == self._lastnormaltime: + # fn may have been changed in the same timeslot without + # changing its size. This can happen if we quickly do + # multiple commits in a single transaction. + # Force lookup, so we don't miss such a racy file change. + ladd(fn) + elif listclean: + cadd(fn) + elif state == 'm': + madd(fn) + elif state == 'a': + aadd(fn) + elif state == 'r': + radd(fn) + + return (lookup, modified, added, removed, deleted, unknown, ignored, + clean) diff --git a/websdk/mercurial/discovery.py b/websdk/mercurial/discovery.py index 77607e3..83b0c43 120000..100644 --- a/websdk/mercurial/discovery.py +++ b/websdk/mercurial/discovery.py @@ -1 +1,192 @@ -/usr/share/pyshared/mercurial/discovery.py \ No newline at end of file +# discovery.py - protocol changeset discovery functions +# +# Copyright 2010 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, short +from i18n import _ +import util, setdiscovery, treediscovery + +def findcommonincoming(repo, remote, heads=None, force=False): + """Return a tuple (common, anyincoming, heads) used to identify the common + subset of nodes between repo and remote. + + "common" is a list of (at least) the heads of the common subset. + "anyincoming" is testable as a boolean indicating if any nodes are missing + locally. If remote does not support getbundle, this actually is a list of + roots of the nodes that would be incoming, to be supplied to + changegroupsubset. No code except for pull should be relying on this fact + any longer. + "heads" is either the supplied heads, or else the remote's heads. + + If you pass heads and they are all known locally, the reponse lists justs + these heads in "common" and in "heads". + + Please use findcommonoutgoing to compute the set of outgoing nodes to give + extensions a good hook into outgoing. + """ + + if not remote.capable('getbundle'): + return treediscovery.findcommonincoming(repo, remote, heads, force) + + if heads: + allknown = True + nm = repo.changelog.nodemap + for h in heads: + if nm.get(h) is None: + allknown = False + break + if allknown: + return (heads, False, heads) + + res = setdiscovery.findcommonheads(repo.ui, repo, remote, + abortwhenunrelated=not force) + common, anyinc, srvheads = res + return (list(common), anyinc, heads or list(srvheads)) + +def findcommonoutgoing(repo, other, onlyheads=None, force=False, commoninc=None): + '''Return a tuple (common, anyoutgoing, heads) used to identify the set + of nodes present in repo but not in other. + + If onlyheads is given, only nodes ancestral to nodes in onlyheads (inclusive) + are included. If you already know the local repo's heads, passing them in + onlyheads is faster than letting them be recomputed here. + + If commoninc is given, it must the the result of a prior call to + findcommonincoming(repo, other, force) to avoid recomputing it here. + + The returned tuple is meant to be passed to changelog.findmissing.''' + common, _any, _hds = commoninc or findcommonincoming(repo, other, force=force) + return (common, onlyheads or repo.heads()) + +def prepush(repo, remote, force, revs, newbranch): + '''Analyze the local and remote repositories and determine which + changesets need to be pushed to the remote. Return value depends + on circumstances: + + If we are not going to push anything, return a tuple (None, + outgoing) where outgoing is 0 if there are no outgoing + changesets and 1 if there are, but we refuse to push them + (e.g. would create new remote heads). + + Otherwise, return a tuple (changegroup, remoteheads), where + changegroup is a readable file-like object whose read() returns + successive changegroup chunks ready to be sent over the wire and + remoteheads is the list of remote heads.''' + commoninc = findcommonincoming(repo, remote, force=force) + common, revs = findcommonoutgoing(repo, remote, onlyheads=revs, + commoninc=commoninc, force=force) + _common, inc, remoteheads = commoninc + + cl = repo.changelog + outg = cl.findmissing(common, revs) + + if not outg: + repo.ui.status(_("no changes found\n")) + return None, 1 + + if not force and remoteheads != [nullid]: + if remote.capable('branchmap'): + # Check for each named branch if we're creating new remote heads. + # To be a remote head after push, node must be either: + # - unknown locally + # - a local outgoing head descended from update + # - a remote head that's known locally and not + # ancestral to an outgoing head + + # 1. Create set of branches involved in the push. + branches = set(repo[n].branch() for n in outg) + + # 2. Check for new branches on the remote. + remotemap = remote.branchmap() + newbranches = branches - set(remotemap) + if newbranches and not newbranch: # new branch requires --new-branch + branchnames = ', '.join(sorted(newbranches)) + raise util.Abort(_("push creates new remote branches: %s!") + % branchnames, + hint=_("use 'hg push --new-branch' to create" + " new remote branches")) + branches.difference_update(newbranches) + + # 3. Construct the initial oldmap and newmap dicts. + # They contain information about the remote heads before and + # after the push, respectively. + # Heads not found locally are not included in either dict, + # since they won't be affected by the push. + # unsynced contains all branches with incoming changesets. + oldmap = {} + newmap = {} + unsynced = set() + for branch in branches: + remotebrheads = remotemap[branch] + prunedbrheads = [h for h in remotebrheads if h in cl.nodemap] + oldmap[branch] = prunedbrheads + newmap[branch] = list(prunedbrheads) + if len(remotebrheads) > len(prunedbrheads): + unsynced.add(branch) + + # 4. Update newmap with outgoing changes. + # This will possibly add new heads and remove existing ones. + ctxgen = (repo[n] for n in outg) + repo._updatebranchcache(newmap, ctxgen) + + else: + # 1-4b. old servers: Check for new topological heads. + # Construct {old,new}map with branch = None (topological branch). + # (code based on _updatebranchcache) + oldheads = set(h for h in remoteheads if h in cl.nodemap) + newheads = oldheads.union(outg) + if len(newheads) > 1: + for latest in reversed(outg): + if latest not in newheads: + continue + minhrev = min(cl.rev(h) for h in newheads) + reachable = cl.reachable(latest, cl.node(minhrev)) + reachable.remove(latest) + newheads.difference_update(reachable) + branches = set([None]) + newmap = {None: newheads} + oldmap = {None: oldheads} + unsynced = inc and branches or set() + + # 5. Check for new heads. + # If there are more heads after the push than before, a suitable + # error message, depending on unsynced status, is displayed. + error = None + for branch in branches: + newhs = set(newmap[branch]) + oldhs = set(oldmap[branch]) + if len(newhs) > len(oldhs): + dhs = list(newhs - oldhs) + if error is None: + if branch not in ('default', None): + error = _("push creates new remote head %s " + "on branch '%s'!") % (short(dhs[0]), branch) + else: + error = _("push creates new remote head %s!" + ) % short(dhs[0]) + if branch in unsynced: + hint = _("you should pull and merge or " + "use push -f to force") + else: + hint = _("did you forget to merge? " + "use push -f to force") + if branch is not None: + repo.ui.note(_("new remote heads on branch '%s'\n") % branch) + for h in dhs: + repo.ui.note(_("new remote head %s\n") % short(h)) + if error: + raise util.Abort(error, hint=hint) + + # 6. Check for unsynced changes on involved branches. + if unsynced: + repo.ui.warn(_("note: unsynced remote changes!\n")) + + if revs is None: + # use the fast path, no race possible on push + cg = repo._changegroup(outg, 'push') + else: + cg = repo.getbundle('push', heads=revs, common=common) + return cg, remoteheads diff --git a/websdk/mercurial/dispatch.py b/websdk/mercurial/dispatch.py index 611f8a9..f678c45 120000..100644 --- a/websdk/mercurial/dispatch.py +++ b/websdk/mercurial/dispatch.py @@ -1 +1,738 @@ -/usr/share/pyshared/mercurial/dispatch.py \ No newline at end of file +# dispatch.py - command dispatching for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re +import util, commands, hg, fancyopts, extensions, hook, error +import cmdutil, encoding +import ui as uimod + +class request(object): + def __init__(self, args, ui=None, repo=None, fin=None, fout=None, ferr=None): + self.args = args + self.ui = ui + self.repo = repo + + # input/output/error streams + self.fin = fin + self.fout = fout + self.ferr = ferr + +def run(): + "run the command in sys.argv" + sys.exit(dispatch(request(sys.argv[1:]))) + +def dispatch(req): + "run the command specified in req.args" + if req.ferr: + ferr = req.ferr + elif req.ui: + ferr = req.ui.ferr + else: + ferr = sys.stderr + + try: + if not req.ui: + req.ui = uimod.ui() + if '--traceback' in req.args: + req.ui.setconfig('ui', 'traceback', 'on') + + # set ui streams from the request + if req.fin: + req.ui.fin = req.fin + if req.fout: + req.ui.fout = req.fout + if req.ferr: + req.ui.ferr = req.ferr + except util.Abort, inst: + ferr.write(_("abort: %s\n") % inst) + if inst.hint: + ferr.write(_("(%s)\n") % inst.hint) + return -1 + except error.ParseError, inst: + if len(inst.args) > 1: + ferr.write(_("hg: parse error at %s: %s\n") % + (inst.args[1], inst.args[0])) + else: + ferr.write(_("hg: parse error: %s\n") % inst.args[0]) + return -1 + + return _runcatch(req) + +def _runcatch(req): + def catchterm(*args): + raise error.SignalInterrupt + + ui = req.ui + try: + for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM': + num = getattr(signal, name, None) + if num: + signal.signal(num, catchterm) + except ValueError: + pass # happens if called in a thread + + try: + try: + # enter the debugger before command execution + if '--debugger' in req.args: + ui.warn(_("entering debugger - " + "type c to continue starting hg or h for help\n")) + pdb.set_trace() + try: + return _dispatch(req) + finally: + ui.flush() + except: + # enter the debugger when we hit an exception + if '--debugger' in req.args: + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + ui.traceback() + raise + + # Global exception handling, alphabetically + # Mercurial-specific first, followed by built-in and library exceptions + except error.AmbiguousCommand, inst: + ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") % + (inst.args[0], " ".join(inst.args[1]))) + except error.ParseError, inst: + if len(inst.args) > 1: + ui.warn(_("hg: parse error at %s: %s\n") % + (inst.args[1], inst.args[0])) + else: + ui.warn(_("hg: parse error: %s\n") % inst.args[0]) + return -1 + except error.LockHeld, inst: + if inst.errno == errno.ETIMEDOUT: + reason = _('timed out waiting for lock held by %s') % inst.locker + else: + reason = _('lock held by %s') % inst.locker + ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) + except error.LockUnavailable, inst: + ui.warn(_("abort: could not lock %s: %s\n") % + (inst.desc or inst.filename, inst.strerror)) + except error.CommandError, inst: + if inst.args[0]: + ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) + commands.help_(ui, inst.args[0], full=False, command=True) + else: + ui.warn(_("hg: %s\n") % inst.args[1]) + commands.help_(ui, 'shortlist') + except error.OutOfBandError, inst: + ui.warn(_("abort: remote error:\n")) + ui.warn(''.join(inst.args)) + except error.RepoError, inst: + ui.warn(_("abort: %s!\n") % inst) + if inst.hint: + ui.warn(_("(%s)\n") % inst.hint) + except error.ResponseError, inst: + ui.warn(_("abort: %s") % inst.args[0]) + if not isinstance(inst.args[1], basestring): + ui.warn(" %r\n" % (inst.args[1],)) + elif not inst.args[1]: + ui.warn(_(" empty string\n")) + else: + ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) + except error.RevlogError, inst: + ui.warn(_("abort: %s!\n") % inst) + except error.SignalInterrupt: + ui.warn(_("killed!\n")) + except error.UnknownCommand, inst: + ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) + try: + # check if the command is in a disabled extension + # (but don't check for extensions themselves) + commands.help_(ui, inst.args[0], unknowncmd=True) + except error.UnknownCommand: + commands.help_(ui, 'shortlist') + except util.Abort, inst: + ui.warn(_("abort: %s\n") % inst) + if inst.hint: + ui.warn(_("(%s)\n") % inst.hint) + except ImportError, inst: + ui.warn(_("abort: %s!\n") % inst) + m = str(inst).split()[-1] + if m in "mpatch bdiff".split(): + ui.warn(_("(did you forget to compile extensions?)\n")) + elif m in "zlib".split(): + ui.warn(_("(is your Python install correct?)\n")) + except IOError, inst: + if util.safehasattr(inst, "code"): + ui.warn(_("abort: %s\n") % inst) + elif util.safehasattr(inst, "reason"): + try: # usually it is in the form (errno, strerror) + reason = inst.reason.args[1] + except (AttributeError, IndexError): + # it might be anything, for example a string + reason = inst.reason + ui.warn(_("abort: error: %s\n") % reason) + elif util.safehasattr(inst, "args") and inst.args[0] == errno.EPIPE: + if ui.debugflag: + ui.warn(_("broken pipe\n")) + elif getattr(inst, "strerror", None): + if getattr(inst, "filename", None): + ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) + else: + ui.warn(_("abort: %s\n") % inst.strerror) + else: + raise + except OSError, inst: + if getattr(inst, "filename", None): + ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) + else: + ui.warn(_("abort: %s\n") % inst.strerror) + except KeyboardInterrupt: + try: + ui.warn(_("interrupted!\n")) + except IOError, inst: + if inst.errno == errno.EPIPE: + if ui.debugflag: + ui.warn(_("\nbroken pipe\n")) + else: + raise + except MemoryError: + ui.warn(_("abort: out of memory\n")) + except SystemExit, inst: + # Commands shouldn't sys.exit directly, but give a return code. + # Just in case catch this and and pass exit code to caller. + return inst.code + except socket.error, inst: + ui.warn(_("abort: %s\n") % inst.args[-1]) + except: + ui.warn(_("** unknown exception encountered," + " please report by visiting\n")) + ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n")) + ui.warn(_("** Python %s\n") % sys.version.replace('\n', '')) + ui.warn(_("** Mercurial Distributed SCM (version %s)\n") + % util.version()) + ui.warn(_("** Extensions loaded: %s\n") + % ", ".join([x[0] for x in extensions.extensions()])) + raise + + return -1 + +def aliasargs(fn, givenargs): + args = getattr(fn, 'args', []) + if args and givenargs: + cmd = ' '.join(map(util.shellquote, args)) + + nums = [] + def replacer(m): + num = int(m.group(1)) - 1 + nums.append(num) + return givenargs[num] + cmd = re.sub(r'\$(\d+|\$)', replacer, cmd) + givenargs = [x for i, x in enumerate(givenargs) + if i not in nums] + args = shlex.split(cmd) + return args + givenargs + +class cmdalias(object): + def __init__(self, name, definition, cmdtable): + self.name = self.cmd = name + self.cmdname = '' + self.definition = definition + self.args = [] + self.opts = [] + self.help = '' + self.norepo = True + self.badalias = False + + try: + aliases, entry = cmdutil.findcmd(self.name, cmdtable) + for alias, e in cmdtable.iteritems(): + if e is entry: + self.cmd = alias + break + self.shadows = True + except error.UnknownCommand: + self.shadows = False + + if not self.definition: + def fn(ui, *args): + ui.warn(_("no definition for alias '%s'\n") % self.name) + return 1 + self.fn = fn + self.badalias = True + + return + + if self.definition.startswith('!'): + self.shell = True + def fn(ui, *args): + env = {'HG_ARGS': ' '.join((self.name,) + args)} + def _checkvar(m): + if m.groups()[0] == '$': + return m.group() + elif int(m.groups()[0]) <= len(args): + return m.group() + else: + ui.debug("No argument found for substitution " + "of %i variable in alias '%s' definition." + % (int(m.groups()[0]), self.name)) + return '' + cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:]) + replace = dict((str(i + 1), arg) for i, arg in enumerate(args)) + replace['0'] = self.name + replace['@'] = ' '.join(args) + cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True) + return util.system(cmd, environ=env, out=ui.fout) + self.fn = fn + return + + args = shlex.split(self.definition) + self.cmdname = cmd = args.pop(0) + args = map(util.expandpath, args) + + for invalidarg in ("--cwd", "-R", "--repository", "--repo"): + if _earlygetopt([invalidarg], args): + def fn(ui, *args): + ui.warn(_("error in definition for alias '%s': %s may only " + "be given on the command line\n") + % (self.name, invalidarg)) + return 1 + + self.fn = fn + self.badalias = True + return + + try: + tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1] + if len(tableentry) > 2: + self.fn, self.opts, self.help = tableentry + else: + self.fn, self.opts = tableentry + + self.args = aliasargs(self.fn, args) + if cmd not in commands.norepo.split(' '): + self.norepo = False + if self.help.startswith("hg " + cmd): + # drop prefix in old-style help lines so hg shows the alias + self.help = self.help[4 + len(cmd):] + self.__doc__ = self.fn.__doc__ + + except error.UnknownCommand: + def fn(ui, *args): + ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \ + % (self.name, cmd)) + try: + # check if the command is in a disabled extension + commands.help_(ui, cmd, unknowncmd=True) + except error.UnknownCommand: + pass + return 1 + self.fn = fn + self.badalias = True + except error.AmbiguousCommand: + def fn(ui, *args): + ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \ + % (self.name, cmd)) + return 1 + self.fn = fn + self.badalias = True + + def __call__(self, ui, *args, **opts): + if self.shadows: + ui.debug("alias '%s' shadows command '%s'\n" % + (self.name, self.cmdname)) + + if util.safehasattr(self, 'shell'): + return self.fn(ui, *args, **opts) + else: + try: + util.checksignature(self.fn)(ui, *args, **opts) + except error.SignatureError: + args = ' '.join([self.cmdname] + self.args) + ui.debug("alias '%s' expands to '%s'\n" % (self.name, args)) + raise + +def addaliases(ui, cmdtable): + # aliases are processed after extensions have been loaded, so they + # may use extension commands. Aliases can also use other alias definitions, + # but only if they have been defined prior to the current definition. + for alias, definition in ui.configitems('alias'): + aliasdef = cmdalias(alias, definition, cmdtable) + + try: + olddef = cmdtable[aliasdef.cmd][0] + if olddef.definition == aliasdef.definition: + continue + except (KeyError, AttributeError): + # definition might not exist or it might not be a cmdalias + pass + + cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help) + if aliasdef.norepo: + commands.norepo += ' %s' % alias + +def _parse(ui, args): + options = {} + cmdoptions = {} + + try: + args = fancyopts.fancyopts(args, commands.globalopts, options) + except fancyopts.getopt.GetoptError, inst: + raise error.CommandError(None, inst) + + if args: + cmd, args = args[0], args[1:] + aliases, entry = cmdutil.findcmd(cmd, commands.table, + ui.config("ui", "strict")) + cmd = aliases[0] + args = aliasargs(entry[0], args) + defaults = ui.config("defaults", cmd) + if defaults: + args = map(util.expandpath, shlex.split(defaults)) + args + c = list(entry[1]) + else: + cmd = None + c = [] + + # combine global options into local + for o in commands.globalopts: + c.append((o[0], o[1], options[o[1]], o[3])) + + try: + args = fancyopts.fancyopts(args, c, cmdoptions, True) + except fancyopts.getopt.GetoptError, inst: + raise error.CommandError(cmd, inst) + + # separate global options back out + for o in commands.globalopts: + n = o[1] + options[n] = cmdoptions[n] + del cmdoptions[n] + + return (cmd, cmd and entry[0] or None, args, options, cmdoptions) + +def _parseconfig(ui, config): + """parse the --config options from the command line""" + configs = [] + + for cfg in config: + try: + name, value = cfg.split('=', 1) + section, name = name.split('.', 1) + if not section or not name: + raise IndexError + ui.setconfig(section, name, value) + configs.append((section, name, value)) + except (IndexError, ValueError): + raise util.Abort(_('malformed --config option: %r ' + '(use --config section.name=value)') % cfg) + + return configs + +def _earlygetopt(aliases, args): + """Return list of values for an option (or aliases). + + The values are listed in the order they appear in args. + The options and values are removed from args. + """ + try: + argcount = args.index("--") + except ValueError: + argcount = len(args) + shortopts = [opt for opt in aliases if len(opt) == 2] + values = [] + pos = 0 + while pos < argcount: + if args[pos] in aliases: + if pos + 1 >= argcount: + # ignore and let getopt report an error if there is no value + break + del args[pos] + values.append(args.pop(pos)) + argcount -= 2 + elif args[pos][:2] in shortopts: + # short option can have no following space, e.g. hg log -Rfoo + values.append(args.pop(pos)[2:]) + argcount -= 1 + else: + pos += 1 + return values + +def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions): + # run pre-hook, and abort if it fails + ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs), + pats=cmdpats, opts=cmdoptions) + if ret: + return ret + ret = _runcommand(ui, options, cmd, d) + # run post-hook, passing command result + hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), + result=ret, pats=cmdpats, opts=cmdoptions) + return ret + +def _getlocal(ui, rpath): + """Return (path, local ui object) for the given target path. + + Takes paths in [cwd]/.hg/hgrc into account." + """ + try: + wd = os.getcwd() + except OSError, e: + raise util.Abort(_("error getting current working directory: %s") % + e.strerror) + path = cmdutil.findrepo(wd) or "" + if not path: + lui = ui + else: + lui = ui.copy() + lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) + + if rpath and rpath[-1]: + path = lui.expandpath(rpath[-1]) + lui = ui.copy() + lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) + + return path, lui + +def _checkshellalias(lui, ui, args): + norepo = commands.norepo + options = {} + + try: + args = fancyopts.fancyopts(args, commands.globalopts, options) + except fancyopts.getopt.GetoptError: + return + + if not args: + return + + cmdtable = commands.table.copy() + addaliases(lui, cmdtable) + + cmd = args[0] + try: + aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict")) + except (error.AmbiguousCommand, error.UnknownCommand): + commands.norepo = norepo + return + + cmd = aliases[0] + fn = entry[0] + + if cmd and util.safehasattr(fn, 'shell'): + d = lambda: fn(ui, *args[1:]) + return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {}) + + commands.norepo = norepo + +_loaded = set() +def _dispatch(req): + args = req.args + ui = req.ui + + # read --config before doing anything else + # (e.g. to change trust settings for reading .hg/hgrc) + cfgs = _parseconfig(ui, _earlygetopt(['--config'], args)) + + # check for cwd + cwd = _earlygetopt(['--cwd'], args) + if cwd: + os.chdir(cwd[-1]) + + rpath = _earlygetopt(["-R", "--repository", "--repo"], args) + path, lui = _getlocal(ui, rpath) + + # Now that we're operating in the right directory/repository with + # the right config settings, check for shell aliases + shellaliasfn = _checkshellalias(lui, ui, args) + if shellaliasfn: + return shellaliasfn() + + # Configure extensions in phases: uisetup, extsetup, cmdtable, and + # reposetup. Programs like TortoiseHg will call _dispatch several + # times so we keep track of configured extensions in _loaded. + extensions.loadall(lui) + exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded] + # Propagate any changes to lui.__class__ by extensions + ui.__class__ = lui.__class__ + + # (uisetup and extsetup are handled in extensions.loadall) + + for name, module in exts: + cmdtable = getattr(module, 'cmdtable', {}) + overrides = [cmd for cmd in cmdtable if cmd in commands.table] + if overrides: + ui.warn(_("extension '%s' overrides commands: %s\n") + % (name, " ".join(overrides))) + commands.table.update(cmdtable) + _loaded.add(name) + + # (reposetup is handled in hg.repository) + + addaliases(lui, commands.table) + + # check for fallback encoding + fallback = lui.config('ui', 'fallbackencoding') + if fallback: + encoding.fallbackencoding = fallback + + fullargs = args + cmd, func, args, options, cmdoptions = _parse(lui, args) + + if options["config"]: + raise util.Abort(_("option --config may not be abbreviated!")) + if options["cwd"]: + raise util.Abort(_("option --cwd may not be abbreviated!")) + if options["repository"]: + raise util.Abort(_( + "Option -R has to be separated from other options (e.g. not -qR) " + "and --repository may only be abbreviated as --repo!")) + + if options["encoding"]: + encoding.encoding = options["encoding"] + if options["encodingmode"]: + encoding.encodingmode = options["encodingmode"] + if options["time"]: + def get_times(): + t = os.times() + if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() + t = (t[0], t[1], t[2], t[3], time.clock()) + return t + s = get_times() + def print_time(): + t = get_times() + ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % + (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) + atexit.register(print_time) + + uis = set([ui, lui]) + + if req.repo: + uis.add(req.repo.ui) + + # copy configs that were passed on the cmdline (--config) to the repo ui + for cfg in cfgs: + req.repo.ui.setconfig(*cfg) + + if options['verbose'] or options['debug'] or options['quiet']: + for opt in ('verbose', 'debug', 'quiet'): + val = str(bool(options[opt])) + for ui_ in uis: + ui_.setconfig('ui', opt, val) + + if options['traceback']: + for ui_ in uis: + ui_.setconfig('ui', 'traceback', 'on') + + if options['noninteractive']: + for ui_ in uis: + ui_.setconfig('ui', 'interactive', 'off') + + if cmdoptions.get('insecure', False): + for ui_ in uis: + ui_.setconfig('web', 'cacerts', '') + + if options['version']: + return commands.version_(ui) + if options['help']: + return commands.help_(ui, cmd) + elif not cmd: + return commands.help_(ui, 'shortlist') + + repo = None + cmdpats = args[:] + if cmd not in commands.norepo.split(): + # use the repo from the request only if we don't have -R + if not rpath and not cwd: + repo = req.repo + + if repo: + # set the descriptors of the repo ui to those of ui + repo.ui.fin = ui.fin + repo.ui.fout = ui.fout + repo.ui.ferr = ui.ferr + else: + try: + repo = hg.repository(ui, path=path) + if not repo.local(): + raise util.Abort(_("repository '%s' is not local") % path) + repo.ui.setconfig("bundle", "mainreporoot", repo.root) + except error.RequirementError: + raise + except error.RepoError: + if cmd not in commands.optionalrepo.split(): + if args and not path: # try to infer -R from command args + repos = map(cmdutil.findrepo, args) + guess = repos[0] + if guess and repos.count(guess) == len(repos): + req.args = ['--repository', guess] + fullargs + return _dispatch(req) + if not path: + raise error.RepoError(_("no repository found in '%s'" + " (.hg not found)") % os.getcwd()) + raise + if repo: + ui = repo.ui + args.insert(0, repo) + elif rpath: + ui.warn(_("warning: --repository ignored\n")) + + msg = ' '.join(' ' in a and repr(a) or a for a in fullargs) + ui.log("command", msg + "\n") + d = lambda: util.checksignature(func)(ui, *args, **cmdoptions) + try: + return runcommand(lui, repo, cmd, fullargs, ui, options, d, + cmdpats, cmdoptions) + finally: + if repo and repo != req.repo: + repo.close() + +def _runcommand(ui, options, cmd, cmdfunc): + def checkargs(): + try: + return cmdfunc() + except error.SignatureError: + raise error.CommandError(cmd, _("invalid arguments")) + + if options['profile']: + format = ui.config('profiling', 'format', default='text') + + if not format in ['text', 'kcachegrind']: + ui.warn(_("unrecognized profiling format '%s'" + " - Ignored\n") % format) + format = 'text' + + output = ui.config('profiling', 'output') + + if output: + path = ui.expandpath(output) + ostream = open(path, 'wb') + else: + ostream = sys.stderr + + try: + from mercurial import lsprof + except ImportError: + raise util.Abort(_( + 'lsprof not available - install from ' + 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/')) + p = lsprof.Profiler() + p.enable(subcalls=True) + try: + return checkargs() + finally: + p.disable() + + if format == 'kcachegrind': + import lsprofcalltree + calltree = lsprofcalltree.KCacheGrind(p) + calltree.output(ostream) + else: + # format == 'text' + stats = lsprof.Stats(p.getstats()) + stats.sort() + stats.pprint(top=10, file=ostream, climit=5) + + if output: + ostream.close() + else: + return checkargs() diff --git a/websdk/mercurial/encoding.py b/websdk/mercurial/encoding.py index 6af0112..f6197b2 120000..100644 --- a/websdk/mercurial/encoding.py +++ b/websdk/mercurial/encoding.py @@ -1 +1,173 @@ -/usr/share/pyshared/mercurial/encoding.py \ No newline at end of file +# encoding.py - character transcoding support for Mercurial +# +# Copyright 2005-2009 Matt Mackall and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import error +import unicodedata, locale, os + +def _getpreferredencoding(): + ''' + On darwin, getpreferredencoding ignores the locale environment and + always returns mac-roman. http://bugs.python.org/issue6202 fixes this + for Python 2.7 and up. This is the same corrected code for earlier + Python versions. + + However, we can't use a version check for this method, as some distributions + patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman + encoding, as it is unlikely that this encoding is the actually expected. + ''' + try: + locale.CODESET + except AttributeError: + # Fall back to parsing environment variables :-( + return locale.getdefaultlocale()[1] + + oldloc = locale.setlocale(locale.LC_CTYPE) + locale.setlocale(locale.LC_CTYPE, "") + result = locale.nl_langinfo(locale.CODESET) + locale.setlocale(locale.LC_CTYPE, oldloc) + + return result + +_encodingfixers = { + '646': lambda: 'ascii', + 'ANSI_X3.4-1968': lambda: 'ascii', + 'mac-roman': _getpreferredencoding +} + +try: + encoding = os.environ.get("HGENCODING") + if not encoding: + encoding = locale.getpreferredencoding() or 'ascii' + encoding = _encodingfixers.get(encoding, lambda: encoding)() +except locale.Error: + encoding = 'ascii' +encodingmode = os.environ.get("HGENCODINGMODE", "strict") +fallbackencoding = 'ISO-8859-1' + +class localstr(str): + '''This class allows strings that are unmodified to be + round-tripped to the local encoding and back''' + def __new__(cls, u, l): + s = str.__new__(cls, l) + s._utf8 = u + return s + def __hash__(self): + return hash(self._utf8) # avoid collisions in local string space + +def tolocal(s): + """ + Convert a string from internal UTF-8 to local encoding + + All internal strings should be UTF-8 but some repos before the + implementation of locale support may contain latin1 or possibly + other character sets. We attempt to decode everything strictly + using UTF-8, then Latin-1, and failing that, we use UTF-8 and + replace unknown characters. + + The localstr class is used to cache the known UTF-8 encoding of + strings next to their local representation to allow lossless + round-trip conversion back to UTF-8. + + >>> u = 'foo: \\xc3\\xa4' # utf-8 + >>> l = tolocal(u) + >>> l + 'foo: ?' + >>> fromlocal(l) + 'foo: \\xc3\\xa4' + >>> u2 = 'foo: \\xc3\\xa1' + >>> d = { l: 1, tolocal(u2): 2 } + >>> d # no collision + {'foo: ?': 1, 'foo: ?': 2} + >>> 'foo: ?' in d + False + >>> l1 = 'foo: \\xe4' # historical latin1 fallback + >>> l = tolocal(l1) + >>> l + 'foo: ?' + >>> fromlocal(l) # magically in utf-8 + 'foo: \\xc3\\xa4' + """ + + for e in ('UTF-8', fallbackencoding): + try: + u = s.decode(e) # attempt strict decoding + r = u.encode(encoding, "replace") + if u == r.decode(encoding): + # r is a safe, non-lossy encoding of s + return r + elif e == 'UTF-8': + return localstr(s, r) + else: + return localstr(u.encode('UTF-8'), r) + + except LookupError, k: + raise error.Abort("%s, please check your locale settings" % k) + except UnicodeDecodeError: + pass + u = s.decode("utf-8", "replace") # last ditch + return u.encode(encoding, "replace") # can't round-trip + +def fromlocal(s): + """ + Convert a string from the local character encoding to UTF-8 + + We attempt to decode strings using the encoding mode set by + HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown + characters will cause an error message. Other modes include + 'replace', which replaces unknown characters with a special + Unicode character, and 'ignore', which drops the character. + """ + + # can we do a lossless round-trip? + if isinstance(s, localstr): + return s._utf8 + + try: + return s.decode(encoding, encodingmode).encode("utf-8") + except UnicodeDecodeError, inst: + sub = s[max(0, inst.start - 10):inst.start + 10] + raise error.Abort("decoding near '%s': %s!" % (sub, inst)) + except LookupError, k: + raise error.Abort("%s, please check your locale settings" % k) + +# How to treat ambiguous-width characters. Set to 'wide' to treat as wide. +wide = (os.environ.get("HGENCODINGAMBIGUOUS", "narrow") == "wide" + and "WFA" or "WF") + +def colwidth(s): + "Find the column width of a string for display in the local encoding" + return ucolwidth(s.decode(encoding, 'replace')) + +def ucolwidth(d): + "Find the column width of a Unicode string for display" + eaw = getattr(unicodedata, 'east_asian_width', None) + if eaw is not None: + return sum([eaw(c) in wide and 2 or 1 for c in d]) + return len(d) + +def getcols(s, start, c): + '''Use colwidth to find a c-column substring of s starting at byte + index start''' + for x in xrange(start + c, len(s)): + t = s[start:x] + if colwidth(t) == c: + return t + +def lower(s): + "best-effort encoding-aware case-folding of local string s" + try: + if isinstance(s, localstr): + u = s._utf8.decode("utf-8") + else: + u = s.decode(encoding, encodingmode) + + lu = u.lower() + if u == lu: + return s # preserve localstring + return lu.encode(encoding) + except UnicodeError: + return s.lower() # we don't know how to fold this except in ASCII diff --git a/websdk/mercurial/error.py b/websdk/mercurial/error.py index 06ca817..462b971 120000..100644 --- a/websdk/mercurial/error.py +++ b/websdk/mercurial/error.py @@ -1 +1,90 @@ -/usr/share/pyshared/mercurial/error.py \ No newline at end of file +# error.py - Mercurial exceptions +# +# Copyright 2005-2008 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Mercurial exceptions. + +This allows us to catch exceptions at higher levels without forcing +imports. +""" + +# Do not import anything here, please + +class RevlogError(Exception): + pass + +class LookupError(RevlogError, KeyError): + def __init__(self, name, index, message): + self.name = name + if isinstance(name, str) and len(name) == 20: + from node import short + name = short(name) + RevlogError.__init__(self, '%s@%s: %s' % (index, name, message)) + + def __str__(self): + return RevlogError.__str__(self) + +class CommandError(Exception): + """Exception raised on errors in parsing the command line.""" + +class Abort(Exception): + """Raised if a command needs to print an error and exit.""" + def __init__(self, *args, **kw): + Exception.__init__(self, *args) + self.hint = kw.get('hint') + +class ConfigError(Abort): + 'Exception raised when parsing config files' + +class OutOfBandError(Exception): + 'Exception raised when a remote repo reports failure' + +class ParseError(Exception): + 'Exception raised when parsing config files (msg[, pos])' + +class RepoError(Exception): + def __init__(self, *args, **kw): + Exception.__init__(self, *args) + self.hint = kw.get('hint') + +class RepoLookupError(RepoError): + pass + +class CapabilityError(RepoError): + pass + +class RequirementError(RepoError): + """Exception raised if .hg/requires has an unknown entry.""" + pass + +class LockError(IOError): + def __init__(self, errno, strerror, filename, desc): + IOError.__init__(self, errno, strerror, filename) + self.desc = desc + +class LockHeld(LockError): + def __init__(self, errno, filename, desc, locker): + LockError.__init__(self, errno, 'Lock held', filename, desc) + self.locker = locker + +class LockUnavailable(LockError): + pass + +class ResponseError(Exception): + """Raised to print an error with part of output and exit.""" + +class UnknownCommand(Exception): + """Exception raised if command is not in the command table.""" + +class AmbiguousCommand(Exception): + """Exception raised if command shortcut matches more than one command.""" + +# derived from KeyboardInterrupt to simplify some breakout code +class SignalInterrupt(KeyboardInterrupt): + """Exception raised on SIGTERM and SIGHUP.""" + +class SignatureError(Exception): + pass diff --git a/websdk/mercurial/extensions.py b/websdk/mercurial/extensions.py index 85006cf..9cfb49d 120000..100644 --- a/websdk/mercurial/extensions.py +++ b/websdk/mercurial/extensions.py @@ -1 +1,355 @@ -/usr/share/pyshared/mercurial/extensions.py \ No newline at end of file +# extensions.py - extension handling for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import imp, os +import util, cmdutil, error +from i18n import _, gettext + +_extensions = {} +_order = [] +_ignore = ['hbisect', 'bookmarks', 'parentrevspec'] + +def extensions(): + for name in _order: + module = _extensions[name] + if module: + yield name, module + +def find(name): + '''return module with given extension name''' + mod = None + try: + mod = _extensions[name] + except KeyError: + for k, v in _extensions.iteritems(): + if k.endswith('.' + name) or k.endswith('/' + name): + mod = v + break + if not mod: + raise KeyError(name) + return mod + +def loadpath(path, module_name): + module_name = module_name.replace('.', '_') + path = util.expandpath(path) + if os.path.isdir(path): + # module/__init__.py style + d, f = os.path.split(path.rstrip('/')) + fd, fpath, desc = imp.find_module(f, [d]) + return imp.load_module(module_name, fd, fpath, desc) + else: + return imp.load_source(module_name, path) + +def load(ui, name, path): + # unused ui argument kept for backwards compatibility + if name.startswith('hgext.') or name.startswith('hgext/'): + shortname = name[6:] + else: + shortname = name + if shortname in _ignore: + return None + if shortname in _extensions: + return _extensions[shortname] + _extensions[shortname] = None + if path: + # the module will be loaded in sys.modules + # choose an unique name so that it doesn't + # conflicts with other modules + mod = loadpath(path, 'hgext.%s' % name) + else: + def importh(name): + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + try: + mod = importh("hgext.%s" % name) + except ImportError, err: + ui.debug('could not import hgext.%s (%s): trying %s\n' + % (name, err, name)) + mod = importh(name) + _extensions[shortname] = mod + _order.append(shortname) + return mod + +def loadall(ui): + result = ui.configitems("extensions") + newindex = len(_order) + for (name, path) in result: + if path: + if path[0] == '!': + continue + try: + load(ui, name, path) + except KeyboardInterrupt: + raise + except Exception, inst: + if path: + ui.warn(_("*** failed to import extension %s from %s: %s\n") + % (name, path, inst)) + else: + ui.warn(_("*** failed to import extension %s: %s\n") + % (name, inst)) + if ui.traceback(): + return 1 + + for name in _order[newindex:]: + uisetup = getattr(_extensions[name], 'uisetup', None) + if uisetup: + uisetup(ui) + + for name in _order[newindex:]: + extsetup = getattr(_extensions[name], 'extsetup', None) + if extsetup: + try: + extsetup(ui) + except TypeError: + if extsetup.func_code.co_argcount != 0: + raise + extsetup() # old extsetup with no ui argument + +def wrapcommand(table, command, wrapper): + '''Wrap the command named `command' in table + + Replace command in the command table with wrapper. The wrapped command will + be inserted into the command table specified by the table argument. + + The wrapper will be called like + + wrapper(orig, *args, **kwargs) + + where orig is the original (wrapped) function, and *args, **kwargs + are the arguments passed to it. + ''' + assert util.safehasattr(wrapper, '__call__') + aliases, entry = cmdutil.findcmd(command, table) + for alias, e in table.iteritems(): + if e is entry: + key = alias + break + + origfn = entry[0] + def wrap(*args, **kwargs): + return util.checksignature(wrapper)( + util.checksignature(origfn), *args, **kwargs) + + wrap.__doc__ = getattr(origfn, '__doc__') + wrap.__module__ = getattr(origfn, '__module__') + + newentry = list(entry) + newentry[0] = wrap + table[key] = tuple(newentry) + return entry + +def wrapfunction(container, funcname, wrapper): + '''Wrap the function named funcname in container + + Replace the funcname member in the given container with the specified + wrapper. The container is typically a module, class, or instance. + + The wrapper will be called like + + wrapper(orig, *args, **kwargs) + + where orig is the original (wrapped) function, and *args, **kwargs + are the arguments passed to it. + + Wrapping methods of the repository object is not recommended since + it conflicts with extensions that extend the repository by + subclassing. All extensions that need to extend methods of + localrepository should use this subclassing trick: namely, + reposetup() should look like + + def reposetup(ui, repo): + class myrepo(repo.__class__): + def whatever(self, *args, **kwargs): + [...extension stuff...] + super(myrepo, self).whatever(*args, **kwargs) + [...extension stuff...] + + repo.__class__ = myrepo + + In general, combining wrapfunction() with subclassing does not + work. Since you cannot control what other extensions are loaded by + your end users, you should play nicely with others by using the + subclass trick. + ''' + assert util.safehasattr(wrapper, '__call__') + def wrap(*args, **kwargs): + return wrapper(origfn, *args, **kwargs) + + origfn = getattr(container, funcname) + assert util.safehasattr(origfn, '__call__') + setattr(container, funcname, wrap) + return origfn + +def _disabledpaths(strip_init=False): + '''find paths of disabled extensions. returns a dict of {name: path} + removes /__init__.py from packages if strip_init is True''' + import hgext + extpath = os.path.dirname(os.path.abspath(hgext.__file__)) + try: # might not be a filesystem path + files = os.listdir(extpath) + except OSError: + return {} + + exts = {} + for e in files: + if e.endswith('.py'): + name = e.rsplit('.', 1)[0] + path = os.path.join(extpath, e) + else: + name = e + path = os.path.join(extpath, e, '__init__.py') + if not os.path.exists(path): + continue + if strip_init: + path = os.path.dirname(path) + if name in exts or name in _order or name == '__init__': + continue + exts[name] = path + return exts + +def _moduledoc(file): + '''return the top-level python documentation for the given file + + Loosely inspired by pydoc.source_synopsis(), but rewritten to + handle triple quotes and to return the whole text instead of just + the synopsis''' + result = [] + + line = file.readline() + while line[:1] == '#' or not line.strip(): + line = file.readline() + if not line: + break + + start = line[:3] + if start == '"""' or start == "'''": + line = line[3:] + while line: + if line.rstrip().endswith(start): + line = line.split(start)[0] + if line: + result.append(line) + break + elif not line: + return None # unmatched delimiter + result.append(line) + line = file.readline() + else: + return None + + return ''.join(result) + +def _disabledhelp(path): + '''retrieve help synopsis of a disabled extension (without importing)''' + try: + file = open(path) + except IOError: + return + else: + doc = _moduledoc(file) + file.close() + + if doc: # extracting localized synopsis + return gettext(doc).splitlines()[0] + else: + return _('(no help text available)') + +def disabled(): + '''find disabled extensions from hgext. returns a dict of {name: desc}''' + try: + from hgext import __index__ + return dict((name, gettext(desc)) + for name, desc in __index__.docs.iteritems() + if name not in _order) + except ImportError: + pass + + paths = _disabledpaths() + if not paths: + return None + + exts = {} + for name, path in paths.iteritems(): + doc = _disabledhelp(path) + if doc: + exts[name] = doc + + return exts + +def disabledext(name): + '''find a specific disabled extension from hgext. returns desc''' + try: + from hgext import __index__ + if name in _order: # enabled + return + else: + return gettext(__index__.docs.get(name)) + except ImportError: + pass + + paths = _disabledpaths() + if name in paths: + return _disabledhelp(paths[name]) + +def disabledcmd(ui, cmd, strict=False): + '''import disabled extensions until cmd is found. + returns (cmdname, extname, doc)''' + + paths = _disabledpaths(strip_init=True) + if not paths: + raise error.UnknownCommand(cmd) + + def findcmd(cmd, name, path): + try: + mod = loadpath(path, 'hgext.%s' % name) + except Exception: + return + try: + aliases, entry = cmdutil.findcmd(cmd, + getattr(mod, 'cmdtable', {}), strict) + except (error.AmbiguousCommand, error.UnknownCommand): + return + except Exception: + ui.warn(_('warning: error finding commands in %s\n') % path) + ui.traceback() + return + for c in aliases: + if c.startswith(cmd): + cmd = c + break + else: + cmd = aliases[0] + return (cmd, name, mod) + + # first, search for an extension with the same name as the command + path = paths.pop(cmd, None) + if path: + ext = findcmd(cmd, cmd, path) + if ext: + return ext + + # otherwise, interrogate each extension until there's a match + for name, path in paths.iteritems(): + ext = findcmd(cmd, name, path) + if ext: + return ext + + raise error.UnknownCommand(cmd) + +def enabled(): + '''return a dict of {name: desc} of extensions''' + exts = {} + for ename, ext in extensions(): + doc = (gettext(ext.__doc__) or _('(no help text available)')) + ename = ename.split('.')[-1] + exts[ename] = doc.splitlines()[0].strip() + + return exts diff --git a/websdk/mercurial/fancyopts.py b/websdk/mercurial/fancyopts.py index bce1d13..ae18083 120000..100644 --- a/websdk/mercurial/fancyopts.py +++ b/websdk/mercurial/fancyopts.py @@ -1 +1,117 @@ -/usr/share/pyshared/mercurial/fancyopts.py \ No newline at end of file +# fancyopts.py - better command line parsing +# +# Copyright 2005-2009 Matt Mackall and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import getopt + +def gnugetopt(args, options, longoptions): + """Parse options mostly like getopt.gnu_getopt. + + This is different from getopt.gnu_getopt in that an argument of - will + become an argument of - instead of vanishing completely. + """ + extraargs = [] + if '--' in args: + stopindex = args.index('--') + extraargs = args[stopindex + 1:] + args = args[:stopindex] + opts, parseargs = getopt.getopt(args, options, longoptions) + args = [] + while parseargs: + arg = parseargs.pop(0) + if arg and arg[0] == '-' and len(arg) > 1: + parseargs.insert(0, arg) + topts, newparseargs = getopt.getopt(parseargs, options, longoptions) + opts = opts + topts + parseargs = newparseargs + else: + args.append(arg) + args.extend(extraargs) + return opts, args + + +def fancyopts(args, options, state, gnu=False): + """ + read args, parse options, and store options in state + + each option is a tuple of: + + short option or '' + long option + default value + description + option value label(optional) + + option types include: + + boolean or none - option sets variable in state to true + string - parameter string is stored in state + list - parameter string is added to a list + integer - parameter strings is stored as int + function - call function with parameter + + non-option args are returned + """ + namelist = [] + shortlist = '' + argmap = {} + defmap = {} + + for option in options: + if len(option) == 5: + short, name, default, comment, dummy = option + else: + short, name, default, comment = option + # convert opts to getopt format + oname = name + name = name.replace('-', '_') + + argmap['-' + short] = argmap['--' + oname] = name + defmap[name] = default + + # copy defaults to state + if isinstance(default, list): + state[name] = default[:] + elif getattr(default, '__call__', False): + state[name] = None + else: + state[name] = default + + # does it take a parameter? + if not (default is None or default is True or default is False): + if short: + short += ':' + if oname: + oname += '=' + if short: + shortlist += short + if name: + namelist.append(oname) + + # parse arguments + if gnu: + parse = gnugetopt + else: + parse = getopt.getopt + opts, args = parse(args, shortlist, namelist) + + # transfer result to state + for opt, val in opts: + name = argmap[opt] + t = type(defmap[name]) + if t is type(fancyopts): + state[name] = defmap[name](val) + elif t is type(1): + state[name] = int(val) + elif t is type(''): + state[name] = val + elif t is type([]): + state[name].append(val) + elif t is type(None) or t is type(False): + state[name] = True + + # return unparsed args + return args diff --git a/websdk/mercurial/filelog.py b/websdk/mercurial/filelog.py index 302d041..33f3d0c 120000..100644 --- a/websdk/mercurial/filelog.py +++ b/websdk/mercurial/filelog.py @@ -1 +1,92 @@ -/usr/share/pyshared/mercurial/filelog.py \ No newline at end of file +# filelog.py - file history class for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import revlog +import re + +_mdre = re.compile('\1\n') +def _parsemeta(text): + """return (metadatadict, keylist, metadatasize)""" + # text can be buffer, so we can't use .startswith or .index + if text[:2] != '\1\n': + return None, None, None + s = _mdre.search(text, 2).start() + mtext = text[2:s] + meta = {} + keys = [] + for l in mtext.splitlines(): + k, v = l.split(": ", 1) + meta[k] = v + keys.append(k) + return meta, keys, (s + 2) + +def _packmeta(meta, keys=None): + if not keys: + keys = sorted(meta.iterkeys()) + return "".join("%s: %s\n" % (k, meta[k]) for k in keys) + +class filelog(revlog.revlog): + def __init__(self, opener, path): + revlog.revlog.__init__(self, opener, + "/".join(("data", path + ".i"))) + + def read(self, node): + t = self.revision(node) + if not t.startswith('\1\n'): + return t + s = t.index('\1\n', 2) + return t[s + 2:] + + def add(self, text, meta, transaction, link, p1=None, p2=None): + if meta or text.startswith('\1\n'): + text = "\1\n%s\1\n%s" % (_packmeta(meta), text) + return self.addrevision(text, transaction, link, p1, p2) + + def renamed(self, node): + if self.parents(node)[0] != revlog.nullid: + return False + t = self.revision(node) + m = _parsemeta(t)[0] + if m and "copy" in m: + return (m["copy"], revlog.bin(m["copyrev"])) + return False + + def size(self, rev): + """return the size of a given revision""" + + # for revisions with renames, we have to go the slow way + node = self.node(rev) + if self.renamed(node): + return len(self.read(node)) + + # XXX if self.read(node).startswith("\1\n"), this returns (size+4) + return revlog.revlog.size(self, rev) + + def cmp(self, node, text): + """compare text with a given file revision + + returns True if text is different than what is stored. + """ + + t = text + if text.startswith('\1\n'): + t = '\1\n\1\n' + text + + samehashes = not revlog.revlog.cmp(self, node, t) + if samehashes: + return False + + # renaming a file produces a different hash, even if the data + # remains unchanged. Check if it's the case (slow): + if self.renamed(node): + t2 = self.read(node) + return t2 != text + + return True + + def _file(self, f): + return filelog(self.opener, f) diff --git a/websdk/mercurial/filemerge.py b/websdk/mercurial/filemerge.py index 9179d38..bff2158 120000..100644 --- a/websdk/mercurial/filemerge.py +++ b/websdk/mercurial/filemerge.py @@ -1 +1,271 @@ -/usr/share/pyshared/mercurial/filemerge.py \ No newline at end of file +# filemerge.py - file-level merge handling for Mercurial +# +# Copyright 2006, 2007, 2008 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import short +from i18n import _ +import util, simplemerge, match, error +import os, tempfile, re, filecmp + +def _toolstr(ui, tool, part, default=""): + return ui.config("merge-tools", tool + "." + part, default) + +def _toolbool(ui, tool, part, default=False): + return ui.configbool("merge-tools", tool + "." + part, default) + +def _toollist(ui, tool, part, default=[]): + return ui.configlist("merge-tools", tool + "." + part, default) + +_internal = ['internal:' + s + for s in 'fail local other merge prompt dump'.split()] + +def _findtool(ui, tool): + if tool in _internal: + return tool + for kn in ("regkey", "regkeyalt"): + k = _toolstr(ui, tool, kn) + if not k: + continue + p = util.lookupreg(k, _toolstr(ui, tool, "regname")) + if p: + p = util.findexe(p + _toolstr(ui, tool, "regappend")) + if p: + return p + exe = _toolstr(ui, tool, "executable", tool) + return util.findexe(util.expandpath(exe)) + +def _picktool(repo, ui, path, binary, symlink): + def check(tool, pat, symlink, binary): + tmsg = tool + if pat: + tmsg += " specified for " + pat + if not _findtool(ui, tool): + if pat: # explicitly requested tool deserves a warning + ui.warn(_("couldn't find merge tool %s\n") % tmsg) + else: # configured but non-existing tools are more silent + ui.note(_("couldn't find merge tool %s\n") % tmsg) + elif symlink and not _toolbool(ui, tool, "symlink"): + ui.warn(_("tool %s can't handle symlinks\n") % tmsg) + elif binary and not _toolbool(ui, tool, "binary"): + ui.warn(_("tool %s can't handle binary\n") % tmsg) + elif not util.gui() and _toolbool(ui, tool, "gui"): + ui.warn(_("tool %s requires a GUI\n") % tmsg) + else: + return True + return False + + # forcemerge comes from command line arguments, highest priority + force = ui.config('ui', 'forcemerge') + if force: + toolpath = _findtool(ui, force) + if toolpath: + return (force, '"' + toolpath + '"') + else: + # mimic HGMERGE if given tool not found + return (force, force) + + # HGMERGE takes next precedence + hgmerge = os.environ.get("HGMERGE") + if hgmerge: + return (hgmerge, hgmerge) + + # then patterns + for pat, tool in ui.configitems("merge-patterns"): + mf = match.match(repo.root, '', [pat]) + if mf(path) and check(tool, pat, symlink, False): + toolpath = _findtool(ui, tool) + return (tool, '"' + toolpath + '"') + + # then merge tools + tools = {} + for k, v in ui.configitems("merge-tools"): + t = k.split('.')[0] + if t not in tools: + tools[t] = int(_toolstr(ui, t, "priority", "0")) + names = tools.keys() + tools = sorted([(-p, t) for t, p in tools.items()]) + uimerge = ui.config("ui", "merge") + if uimerge: + if uimerge not in names: + return (uimerge, uimerge) + tools.insert(0, (None, uimerge)) # highest priority + tools.append((None, "hgmerge")) # the old default, if found + for p, t in tools: + if check(t, None, symlink, binary): + toolpath = _findtool(ui, t) + return (t, '"' + toolpath + '"') + # internal merge as last resort + return (not (symlink or binary) and "internal:merge" or None, None) + +def _eoltype(data): + "Guess the EOL type of a file" + if '\0' in data: # binary + return None + if '\r\n' in data: # Windows + return '\r\n' + if '\r' in data: # Old Mac + return '\r' + if '\n' in data: # UNIX + return '\n' + return None # unknown + +def _matcheol(file, origfile): + "Convert EOL markers in a file to match origfile" + tostyle = _eoltype(util.readfile(origfile)) + if tostyle: + data = util.readfile(file) + style = _eoltype(data) + if style: + newdata = data.replace(style, tostyle) + if newdata != data: + util.writefile(file, newdata) + +def filemerge(repo, mynode, orig, fcd, fco, fca): + """perform a 3-way merge in the working directory + + mynode = parent node before merge + orig = original local filename before merge + fco = other file context + fca = ancestor file context + fcd = local file context for current/destination file + """ + + def temp(prefix, ctx): + pre = "%s~%s." % (os.path.basename(ctx.path()), prefix) + (fd, name) = tempfile.mkstemp(prefix=pre) + data = repo.wwritedata(ctx.path(), ctx.data()) + f = os.fdopen(fd, "wb") + f.write(data) + f.close() + return name + + def isbin(ctx): + try: + return util.binary(ctx.data()) + except IOError: + return False + + if not fco.cmp(fcd): # files identical? + return None + + ui = repo.ui + fd = fcd.path() + binary = isbin(fcd) or isbin(fco) or isbin(fca) + symlink = 'l' in fcd.flags() + fco.flags() + tool, toolpath = _picktool(repo, ui, fd, binary, symlink) + ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" % + (tool, fd, binary, symlink)) + + if not tool or tool == 'internal:prompt': + tool = "internal:local" + if ui.promptchoice(_(" no tool found to merge %s\n" + "keep (l)ocal or take (o)ther?") % fd, + (_("&Local"), _("&Other")), 0): + tool = "internal:other" + if tool == "internal:local": + return 0 + if tool == "internal:other": + repo.wwrite(fd, fco.data(), fco.flags()) + return 0 + if tool == "internal:fail": + return 1 + + # do the actual merge + a = repo.wjoin(fd) + b = temp("base", fca) + c = temp("other", fco) + out = "" + back = a + ".orig" + util.copyfile(a, back) + + if orig != fco.path(): + ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd)) + else: + ui.status(_("merging %s\n") % fd) + + ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca)) + + # do we attempt to simplemerge first? + try: + premerge = _toolbool(ui, tool, "premerge", not (binary or symlink)) + except error.ConfigError: + premerge = _toolstr(ui, tool, "premerge").lower() + valid = 'keep'.split() + if premerge not in valid: + _valid = ', '.join(["'" + v + "'" for v in valid]) + raise error.ConfigError(_("%s.premerge not valid " + "('%s' is neither boolean nor %s)") % + (tool, premerge, _valid)) + + if premerge: + r = simplemerge.simplemerge(ui, a, b, c, quiet=True) + if not r: + ui.debug(" premerge successful\n") + os.unlink(back) + os.unlink(b) + os.unlink(c) + return 0 + if premerge != 'keep': + util.copyfile(back, a) # restore from backup and try again + + env = dict(HG_FILE=fd, + HG_MY_NODE=short(mynode), + HG_OTHER_NODE=str(fco.changectx()), + HG_BASE_NODE=str(fca.changectx()), + HG_MY_ISLINK='l' in fcd.flags(), + HG_OTHER_ISLINK='l' in fco.flags(), + HG_BASE_ISLINK='l' in fca.flags()) + + if tool == "internal:merge": + r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other']) + elif tool == 'internal:dump': + a = repo.wjoin(fd) + util.copyfile(a, a + ".local") + repo.wwrite(fd + ".other", fco.data(), fco.flags()) + repo.wwrite(fd + ".base", fca.data(), fca.flags()) + return 1 # unresolved + else: + args = _toolstr(ui, tool, "args", '$local $base $other') + if "$output" in args: + out, a = a, back # read input from backup, write to original + replace = dict(local=a, base=b, other=c, output=out) + args = util.interpolate(r'\$', replace, args, + lambda s: '"%s"' % util.localpath(s)) + r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env, + out=ui.fout) + + if not r and (_toolbool(ui, tool, "checkconflicts") or + 'conflicts' in _toollist(ui, tool, "check")): + if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(), + re.MULTILINE): + r = 1 + + checked = False + if 'prompt' in _toollist(ui, tool, "check"): + checked = True + if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd, + (_("&Yes"), _("&No")), 1): + r = 1 + + if not r and not checked and (_toolbool(ui, tool, "checkchanged") or + 'changed' in _toollist(ui, tool, "check")): + if filecmp.cmp(repo.wjoin(fd), back): + if ui.promptchoice(_(" output file %s appears unchanged\n" + "was merge successful (yn)?") % fd, + (_("&Yes"), _("&No")), 1): + r = 1 + + if _toolbool(ui, tool, "fixeol"): + _matcheol(repo.wjoin(fd), back) + + if r: + ui.warn(_("merging %s failed!\n") % fd) + else: + os.unlink(back) + + os.unlink(b) + os.unlink(c) + return r diff --git a/websdk/mercurial/fileset.py b/websdk/mercurial/fileset.py new file mode 100644 index 0000000..95a2c17 --- /dev/null +++ b/websdk/mercurial/fileset.py @@ -0,0 +1,440 @@ +# fileset.py - file set queries for mercurial +# +# Copyright 2010 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import parser, error, util, merge, re +from i18n import _ + +elements = { + "(": (20, ("group", 1, ")"), ("func", 1, ")")), + "-": (5, ("negate", 19), ("minus", 5)), + "not": (10, ("not", 10)), + "!": (10, ("not", 10)), + "and": (5, None, ("and", 5)), + "&": (5, None, ("and", 5)), + "or": (4, None, ("or", 4)), + "|": (4, None, ("or", 4)), + "+": (4, None, ("or", 4)), + ",": (2, None, ("list", 2)), + ")": (0, None, None), + "symbol": (0, ("symbol",), None), + "string": (0, ("string",), None), + "end": (0, None, None), +} + +keywords = set(['and', 'or', 'not']) + +globchars = ".*{}[]?/\\" + +def tokenize(program): + pos, l = 0, len(program) + while pos < l: + c = program[pos] + if c.isspace(): # skip inter-token whitespace + pass + elif c in "(),-|&+!": # handle simple operators + yield (c, None, pos) + elif (c in '"\'' or c == 'r' and + program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings + if c == 'r': + pos += 1 + c = program[pos] + decode = lambda x: x + else: + decode = lambda x: x.decode('string-escape') + pos += 1 + s = pos + while pos < l: # find closing quote + d = program[pos] + if d == '\\': # skip over escaped characters + pos += 2 + continue + if d == c: + yield ('string', decode(program[s:pos]), s) + break + pos += 1 + else: + raise error.ParseError(_("unterminated string"), s) + elif c.isalnum() or c in globchars or ord(c) > 127: + # gather up a symbol/keyword + s = pos + pos += 1 + while pos < l: # find end of symbol + d = program[pos] + if not (d.isalnum() or d in globchars or ord(d) > 127): + break + pos += 1 + sym = program[s:pos] + if sym in keywords: # operator keywords + yield (sym, None, s) + else: + yield ('symbol', sym, s) + pos -= 1 + else: + raise error.ParseError(_("syntax error"), pos) + pos += 1 + yield ('end', None, pos) + +parse = parser.parser(tokenize, elements).parse + +def getstring(x, err): + if x and (x[0] == 'string' or x[0] == 'symbol'): + return x[1] + raise error.ParseError(err) + +def getset(mctx, x): + if not x: + raise error.ParseError(_("missing argument")) + return methods[x[0]](mctx, *x[1:]) + +def stringset(mctx, x): + m = mctx.matcher([x]) + return [f for f in mctx.subset if m(f)] + +def andset(mctx, x, y): + return getset(mctx.narrow(getset(mctx, x)), y) + +def orset(mctx, x, y): + # needs optimizing + xl = getset(mctx, x) + yl = getset(mctx, y) + return xl + [f for f in yl if f not in xl] + +def notset(mctx, x): + s = set(getset(mctx, x)) + return [r for r in mctx.subset if r not in s] + +def listset(mctx, a, b): + raise error.ParseError(_("can't use a list in this context")) + +def modified(mctx, x): + """``modified()`` + File that is modified according to status. + """ + # i18n: "modified" is a keyword + getargs(x, 0, 0, _("modified takes no arguments")) + s = mctx.status()[0] + return [f for f in mctx.subset if f in s] + +def added(mctx, x): + """``added()`` + File that is added according to status. + """ + # i18n: "added" is a keyword + getargs(x, 0, 0, _("added takes no arguments")) + s = mctx.status()[1] + return [f for f in mctx.subset if f in s] + +def removed(mctx, x): + """``removed()`` + File that is removed according to status. + """ + # i18n: "removed" is a keyword + getargs(x, 0, 0, _("removed takes no arguments")) + s = mctx.status()[2] + return [f for f in mctx.subset if f in s] + +def deleted(mctx, x): + """``deleted()`` + File that is deleted according to status. + """ + # i18n: "deleted" is a keyword + getargs(x, 0, 0, _("deleted takes no arguments")) + s = mctx.status()[3] + return [f for f in mctx.subset if f in s] + +def unknown(mctx, x): + """``unknown()`` + File that is unknown according to status. These files will only be + considered if this predicate is used. + """ + # i18n: "unknown" is a keyword + getargs(x, 0, 0, _("unknown takes no arguments")) + s = mctx.status()[4] + return [f for f in mctx.subset if f in s] + +def ignored(mctx, x): + """``ignored()`` + File that is ignored according to status. These files will only be + considered if this predicate is used. + """ + # i18n: "ignored" is a keyword + getargs(x, 0, 0, _("ignored takes no arguments")) + s = mctx.status()[5] + return [f for f in mctx.subset if f in s] + +def clean(mctx, x): + """``clean()`` + File that is clean according to status. + """ + # i18n: "clean" is a keyword + getargs(x, 0, 0, _("clean takes no arguments")) + s = mctx.status()[6] + return [f for f in mctx.subset if f in s] + +def func(mctx, a, b): + if a[0] == 'symbol' and a[1] in symbols: + return symbols[a[1]](mctx, b) + raise error.ParseError(_("not a function: %s") % a[1]) + +def getlist(x): + if not x: + return [] + if x[0] == 'list': + return getlist(x[1]) + [x[2]] + return [x] + +def getargs(x, min, max, err): + l = getlist(x) + if len(l) < min or len(l) > max: + raise error.ParseError(err) + return l + +def binary(mctx, x): + """``binary()`` + File that appears to be binary (contains NUL bytes). + """ + # i18n: "binary" is a keyword + getargs(x, 0, 0, _("binary takes no arguments")) + return [f for f in mctx.subset if util.binary(mctx.ctx[f].data())] + +def exec_(mctx, x): + """``exec()`` + File that is marked as executable. + """ + # i18n: "exec" is a keyword + getargs(x, 0, 0, _("exec takes no arguments")) + return [f for f in mctx.subset if mctx.ctx.flags(f) == 'x'] + +def symlink(mctx, x): + """``symlink()`` + File that is marked as a symlink. + """ + # i18n: "symlink" is a keyword + getargs(x, 0, 0, _("symlink takes no arguments")) + return [f for f in mctx.subset if mctx.ctx.flags(f) == 'l'] + +def resolved(mctx, x): + """``resolved()`` + File that is marked resolved according to the resolve state. + """ + # i18n: "resolved" is a keyword + getargs(x, 0, 0, _("resolved takes no arguments")) + if mctx.ctx.rev() is not None: + return [] + ms = merge.mergestate(mctx.ctx._repo) + return [f for f in mctx.subset if f in ms and ms[f] == 'r'] + +def unresolved(mctx, x): + """``unresolved()`` + File that is marked unresolved according to the resolve state. + """ + # i18n: "unresolved" is a keyword + getargs(x, 0, 0, _("unresolved takes no arguments")) + if mctx.ctx.rev() is not None: + return [] + ms = merge.mergestate(mctx.ctx._repo) + return [f for f in mctx.subset if f in ms and ms[f] == 'u'] + +def hgignore(mctx, x): + """``hgignore()`` + File that matches the active .hgignore pattern. + """ + getargs(x, 0, 0, _("hgignore takes no arguments")) + ignore = mctx.ctx._repo.dirstate._ignore + return [f for f in mctx.subset if ignore(f)] + +def grep(mctx, x): + """``grep(regex)`` + File contains the given regular expression. + """ + pat = getstring(x, _("grep requires a pattern")) + r = re.compile(pat) + return [f for f in mctx.subset if r.search(mctx.ctx[f].data())] + +_units = dict(k=2**10, K=2**10, kB=2**10, KB=2**10, + M=2**20, MB=2**20, G=2**30, GB=2**30) + +def _sizetoint(s): + try: + s = s.strip() + for k, v in _units.items(): + if s.endswith(k): + return int(float(s[:-len(k)]) * v) + return int(s) + except ValueError: + raise error.ParseError(_("couldn't parse size: %s") % s) + +def _sizetomax(s): + try: + s = s.strip() + for k, v in _units.items(): + if s.endswith(k): + # max(4k) = 5k - 1, max(4.5k) = 4.6k - 1 + n = s[:-len(k)] + inc = 1.0 + if "." in n: + inc /= 10 ** len(n.split(".")[1]) + return int((float(n) + inc) * v) - 1 + # no extension, this is a precise value + return int(s) + except ValueError: + raise error.ParseError(_("couldn't parse size: %s") % s) + +def size(mctx, x): + """``size(expression)`` + File size matches the given expression. Examples: + + - 1k (files from 1024 to 2047 bytes) + - < 20k (files less than 20480 bytes) + - >= .5MB (files at least 524288 bytes) + - 4k - 1MB (files from 4096 bytes to 1048576 bytes) + """ + + # i18n: "size" is a keyword + expr = getstring(x, _("size requires an expression")).strip() + if '-' in expr: # do we have a range? + a, b = expr.split('-', 1) + a = _sizetoint(a) + b = _sizetoint(b) + m = lambda x: x >= a and x <= b + elif expr.startswith("<="): + a = _sizetoint(expr[2:]) + m = lambda x: x <= a + elif expr.startswith("<"): + a = _sizetoint(expr[1:]) + m = lambda x: x < a + elif expr.startswith(">="): + a = _sizetoint(expr[2:]) + m = lambda x: x >= a + elif expr.startswith(">"): + a = _sizetoint(expr[1:]) + m = lambda x: x > a + elif expr[0].isdigit or expr[0] == '.': + a = _sizetoint(expr) + b = _sizetomax(expr) + m = lambda x: x >= a and x <= b + else: + raise error.ParseError(_("couldn't parse size: %s") % expr) + + return [f for f in mctx.subset if m(mctx.ctx[f].size())] + +def encoding(mctx, x): + """``encoding(name)`` + File can be successfully decoded with the given character + encoding. May not be useful for encodings other than ASCII and + UTF-8. + """ + + # i18n: "encoding" is a keyword + enc = getstring(x, _("encoding requires an encoding name")) + + s = [] + for f in mctx.subset: + d = mctx.ctx[f].data() + try: + d.decode(enc) + except LookupError: + raise util.Abort(_("unknown encoding '%s'") % enc) + except UnicodeDecodeError: + continue + s.append(f) + + return s + +def copied(mctx, x): + """``copied()`` + File that is recorded as being copied. + """ + # i18n: "copied" is a keyword + getargs(x, 0, 0, _("copied takes no arguments")) + s = [] + for f in mctx.subset: + p = mctx.ctx[f].parents() + if p and p[0].path() != f: + s.append(f) + return s + +symbols = { + 'added': added, + 'binary': binary, + 'clean': clean, + 'copied': copied, + 'deleted': deleted, + 'encoding': encoding, + 'exec': exec_, + 'grep': grep, + 'ignored': ignored, + 'hgignore': hgignore, + 'modified': modified, + 'removed': removed, + 'resolved': resolved, + 'size': size, + 'symlink': symlink, + 'unknown': unknown, + 'unresolved': unresolved, +} + +methods = { + 'string': stringset, + 'symbol': stringset, + 'and': andset, + 'or': orset, + 'list': listset, + 'group': getset, + 'not': notset, + 'func': func, +} + +class matchctx(object): + def __init__(self, ctx, subset=None, status=None): + self.ctx = ctx + self.subset = subset + self._status = status + def status(self): + return self._status + def matcher(self, patterns): + return self.ctx.match(patterns) + def filter(self, files): + return [f for f in files if f in self.subset] + def narrow(self, files): + return matchctx(self.ctx, self.filter(files), self._status) + +def _intree(funcs, tree): + if isinstance(tree, tuple): + if tree[0] == 'func' and tree[1][0] == 'symbol': + if tree[1][1] in funcs: + return True + for s in tree[1:]: + if _intree(funcs, s): + return True + return False + +def getfileset(ctx, expr): + tree, pos = parse(expr) + if (pos != len(expr)): + raise error.ParseError(_("invalid token"), pos) + + # do we need status info? + if _intree(['modified', 'added', 'removed', 'deleted', + 'unknown', 'ignored', 'clean'], tree): + unknown = _intree(['unknown'], tree) + ignored = _intree(['ignored'], tree) + + r = ctx._repo + status = r.status(ctx.p1(), ctx, + unknown=unknown, ignored=ignored, clean=True) + subset = [] + for c in status: + subset.extend(c) + else: + status = None + subset = ctx.walk(ctx.match([])) + + return getset(matchctx(ctx, subset, status), tree) + +# tell hggettext to extract docstrings from these functions: +i18nfunctions = symbols.values() diff --git a/websdk/mercurial/graphmod.py b/websdk/mercurial/graphmod.py index 0fda9ed..314f2b8 120000..100644 --- a/websdk/mercurial/graphmod.py +++ b/websdk/mercurial/graphmod.py @@ -1 +1,139 @@ -/usr/share/pyshared/mercurial/graphmod.py \ No newline at end of file +# Revision graph generator for Mercurial +# +# Copyright 2008 Dirkjan Ochtman +# Copyright 2007 Joel Rosdahl +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""supports walking the history as DAGs suitable for graphical output + +The most basic format we use is that of:: + + (id, type, data, [parentids]) + +The node and parent ids are arbitrary integers which identify a node in the +context of the graph returned. Type is a constant specifying the node type. +Data depends on type. +""" + +from mercurial.node import nullrev + +CHANGESET = 'C' + +def dagwalker(repo, revs): + """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + + This generator function walks through revisions (which should be ordered + from bigger to lower). It returns a tuple for each node. The node and parent + ids are arbitrary integers which identify a node in the context of the graph + returned. + """ + if not revs: + return + + cl = repo.changelog + lowestrev = min(revs) + gpcache = {} + + knownrevs = set(revs) + for rev in revs: + ctx = repo[rev] + parents = sorted(set([p.rev() for p in ctx.parents() + if p.rev() in knownrevs])) + mpars = [p.rev() for p in ctx.parents() if + p.rev() != nullrev and p.rev() not in parents] + + for mpar in mpars: + gp = gpcache.get(mpar) + if gp is None: + gp = gpcache[mpar] = grandparent(cl, lowestrev, revs, mpar) + if not gp: + parents.append(mpar) + else: + parents.extend(g for g in gp if g not in parents) + + yield (ctx.rev(), CHANGESET, ctx, parents) + +def nodes(repo, nodes): + """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + + This generator function walks the given nodes. It only returns parents + that are in nodes, too. + """ + include = set(nodes) + for node in nodes: + ctx = repo[node] + parents = set([p.rev() for p in ctx.parents() if p.node() in include]) + yield (ctx.rev(), CHANGESET, ctx, sorted(parents)) + +def colored(dag): + """annotates a DAG with colored edge information + + For each DAG node this function emits tuples:: + + (id, type, data, (col, color), [(col, nextcol, color)]) + + with the following new elements: + + - Tuple (col, color) with column and color index for the current node + - A list of tuples indicating the edges between the current node and its + parents. + """ + seen = [] + colors = {} + newcolor = 1 + for (cur, type, data, parents) in dag: + + # Compute seen and next + if cur not in seen: + seen.append(cur) # new head + colors[cur] = newcolor + newcolor += 1 + + col = seen.index(cur) + color = colors.pop(cur) + next = seen[:] + + # Add parents to next + addparents = [p for p in parents if p not in next] + next[col:col + 1] = addparents + + # Set colors for the parents + for i, p in enumerate(addparents): + if not i: + colors[p] = color + else: + colors[p] = newcolor + newcolor += 1 + + # Add edges to the graph + edges = [] + for ecol, eid in enumerate(seen): + if eid in next: + edges.append((ecol, next.index(eid), colors[eid])) + elif eid == cur: + for p in parents: + edges.append((ecol, next.index(p), color)) + + # Yield and move on + yield (cur, type, data, (col, color), edges) + seen = next + +def grandparent(cl, lowestrev, roots, head): + """Return all ancestors of head in roots which revision is + greater or equal to lowestrev. + """ + pending = set([head]) + seen = set() + kept = set() + llowestrev = max(nullrev, lowestrev) + while pending: + r = pending.pop() + if r >= llowestrev and r not in seen: + if r in roots: + kept.add(r) + else: + pending.update([p for p in cl.parentrevs(r)]) + seen.add(r) + return sorted(kept) diff --git a/websdk/mercurial/hbisect.py b/websdk/mercurial/hbisect.py index a38c013..bce6030 120000..100644 --- a/websdk/mercurial/hbisect.py +++ b/websdk/mercurial/hbisect.py @@ -1 +1,258 @@ -/usr/share/pyshared/mercurial/hbisect.py \ No newline at end of file +# changelog bisection for mercurial +# +# Copyright 2007 Matt Mackall +# Copyright 2005, 2006 Benoit Boissinot +# +# Inspired by git bisect, extension skeleton taken from mq.py. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, error +from i18n import _ +from node import short, hex +import util + +def bisect(changelog, state): + """find the next node (if any) for testing during a bisect search. + returns a (nodes, number, good) tuple. + + 'nodes' is the final result of the bisect if 'number' is 0. + Otherwise 'number' indicates the remaining possible candidates for + the search and 'nodes' contains the next bisect target. + 'good' is True if bisect is searching for a first good changeset, False + if searching for a first bad one. + """ + + clparents = changelog.parentrevs + skip = set([changelog.rev(n) for n in state['skip']]) + + def buildancestors(bad, good): + # only the earliest bad revision matters + badrev = min([changelog.rev(n) for n in bad]) + goodrevs = [changelog.rev(n) for n in good] + goodrev = min(goodrevs) + # build visit array + ancestors = [None] * (len(changelog) + 1) # an extra for [-1] + + # set nodes descended from goodrevs + for rev in goodrevs: + ancestors[rev] = [] + for rev in xrange(goodrev + 1, len(changelog)): + for prev in clparents(rev): + if ancestors[prev] == []: + ancestors[rev] = [] + + # clear good revs from array + for rev in goodrevs: + ancestors[rev] = None + for rev in xrange(len(changelog), goodrev, -1): + if ancestors[rev] is None: + for prev in clparents(rev): + ancestors[prev] = None + + if ancestors[badrev] is None: + return badrev, None + return badrev, ancestors + + good = False + badrev, ancestors = buildancestors(state['bad'], state['good']) + if not ancestors: # looking for bad to good transition? + good = True + badrev, ancestors = buildancestors(state['good'], state['bad']) + bad = changelog.node(badrev) + if not ancestors: # now we're confused + if len(state['bad']) == 1 and len(state['good']) == 1: + raise util.Abort(_("starting revisions are not directly related")) + raise util.Abort(_("inconsistent state, %s:%s is good and bad") + % (badrev, short(bad))) + + # build children dict + children = {} + visit = [badrev] + candidates = [] + while visit: + rev = visit.pop(0) + if ancestors[rev] == []: + candidates.append(rev) + for prev in clparents(rev): + if prev != -1: + if prev in children: + children[prev].append(rev) + else: + children[prev] = [rev] + visit.append(prev) + + candidates.sort() + # have we narrowed it down to one entry? + # or have all other possible candidates besides 'bad' have been skipped? + tot = len(candidates) + unskipped = [c for c in candidates if (c not in skip) and (c != badrev)] + if tot == 1 or not unskipped: + return ([changelog.node(rev) for rev in candidates], 0, good) + perfect = tot // 2 + + # find the best node to test + best_rev = None + best_len = -1 + poison = set() + for rev in candidates: + if rev in poison: + # poison children + poison.update(children.get(rev, [])) + continue + + a = ancestors[rev] or [rev] + ancestors[rev] = None + + x = len(a) # number of ancestors + y = tot - x # number of non-ancestors + value = min(x, y) # how good is this test? + if value > best_len and rev not in skip: + best_len = value + best_rev = rev + if value == perfect: # found a perfect candidate? quit early + break + + if y < perfect and rev not in skip: # all downhill from here? + # poison children + poison.update(children.get(rev, [])) + continue + + for c in children.get(rev, []): + if ancestors[c]: + ancestors[c] = list(set(ancestors[c] + a)) + else: + ancestors[c] = a + [c] + + assert best_rev is not None + best_node = changelog.node(best_rev) + + return ([best_node], tot, good) + + +def load_state(repo): + state = {'good': [], 'bad': [], 'skip': []} + if os.path.exists(repo.join("bisect.state")): + for l in repo.opener("bisect.state"): + kind, node = l[:-1].split() + node = repo.lookup(node) + if kind not in state: + raise util.Abort(_("unknown bisect kind %s") % kind) + state[kind].append(node) + return state + + +def save_state(repo, state): + f = repo.opener("bisect.state", "w", atomictemp=True) + wlock = repo.wlock() + try: + for kind in state: + for node in state[kind]: + f.write("%s %s\n" % (kind, hex(node))) + f.close() + finally: + wlock.release() + +def get(repo, status): + """ + Return a list of revision(s) that match the given status: + + - ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip + - ``goods``, ``bads`` : csets topologicaly good/bad + - ``range`` : csets taking part in the bisection + - ``pruned`` : csets that are goods, bads or skipped + - ``untested`` : csets whose fate is yet unknown + - ``ignored`` : csets ignored due to DAG topology + """ + state = load_state(repo) + if status in ('good', 'bad', 'skip'): + return [repo.changelog.rev(n) for n in state[status]] + else: + # In the floowing sets, we do *not* call 'bisect()' with more + # than one level of recusrsion, because that can be very, very + # time consuming. Instead, we always develop the expression as + # much as possible. + + # 'range' is all csets that make the bisection: + # - have a good ancestor and a bad descendant, or conversely + # that's because the bisection can go either way + range = '( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )' + + _t = [c.rev() for c in repo.set('bisect(good)::bisect(bad)')] + # The sets of topologically good or bad csets + if len(_t) == 0: + # Goods are topologically after bads + goods = 'bisect(good)::' # Pruned good csets + bads = '::bisect(bad)' # Pruned bad csets + else: + # Goods are topologically before bads + goods = '::bisect(good)' # Pruned good csets + bads = 'bisect(bad)::' # Pruned bad csets + + # 'pruned' is all csets whose fate is already known: good, bad, skip + skips = 'bisect(skip)' # Pruned skipped csets + pruned = '( (%s) | (%s) | (%s) )' % (goods, bads, skips) + + # 'untested' is all cset that are- in 'range', but not in 'pruned' + untested = '( (%s) - (%s) )' % (range, pruned) + + # 'ignored' is all csets that were not used during the bisection + # due to DAG topology, but may however have had an impact. + # Eg., a branch merged between bads and goods, but whose branch- + # point is out-side of the range. + iba = '::bisect(bad) - ::bisect(good)' # Ignored bads' ancestors + iga = '::bisect(good) - ::bisect(bad)' # Ignored goods' ancestors + ignored = '( ( (%s) | (%s) ) - (%s) )' % (iba, iga, range) + + if status == 'range': + return [c.rev() for c in repo.set(range)] + elif status == 'pruned': + return [c.rev() for c in repo.set(pruned)] + elif status == 'untested': + return [c.rev() for c in repo.set(untested)] + elif status == 'ignored': + return [c.rev() for c in repo.set(ignored)] + elif status == "goods": + return [c.rev() for c in repo.set(goods)] + elif status == "bads": + return [c.rev() for c in repo.set(bads)] + + else: + raise error.ParseError(_('invalid bisect state')) + +def label(repo, node, short=False): + rev = repo.changelog.rev(node) + + # Try explicit sets + if rev in get(repo, 'good'): + # i18n: bisect changeset status + return _('good') + if rev in get(repo, 'bad'): + # i18n: bisect changeset status + return _('bad') + if rev in get(repo, 'skip'): + # i18n: bisect changeset status + return _('skipped') + if rev in get(repo, 'untested'): + # i18n: bisect changeset status + return _('untested') + if rev in get(repo, 'ignored'): + # i18n: bisect changeset status + return _('ignored') + + # Try implicit sets + if rev in get(repo, 'goods'): + # i18n: bisect changeset status + return _('good (implicit)') + if rev in get(repo, 'bads'): + # i18n: bisect changeset status + return _('bad (implicit)') + + return None + +def shortlabel(label): + if label: + return label[0].upper() + + return None diff --git a/websdk/mercurial/help.py b/websdk/mercurial/help.py index 687e967..26da99c 120000..100644 --- a/websdk/mercurial/help.py +++ b/websdk/mercurial/help.py @@ -1 +1,109 @@ -/usr/share/pyshared/mercurial/help.py \ No newline at end of file +# help.py - help data for mercurial +# +# Copyright 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import gettext, _ +import sys, os +import extensions, revset, fileset, templatekw, templatefilters +import util + +def listexts(header, exts, indent=1): + '''return a text listing of the given extensions''' + if not exts: + return '' + maxlength = max(len(e) for e in exts) + result = '\n%s\n\n' % header + for name, desc in sorted(exts.iteritems()): + result += '%s%-*s %s\n' % (' ' * indent, maxlength + 2, + ':%s:' % name, desc) + return result + +def extshelp(): + doc = loaddoc('extensions')() + doc += listexts(_('enabled extensions:'), extensions.enabled()) + doc += listexts(_('disabled extensions:'), extensions.disabled()) + return doc + +def loaddoc(topic): + """Return a delayed loader for help/topic.txt.""" + + def loader(): + if util.mainfrozen(): + module = sys.executable + else: + module = __file__ + base = os.path.dirname(module) + + for dir in ('.', '..'): + docdir = os.path.join(base, dir, 'help') + if os.path.isdir(docdir): + break + + path = os.path.join(docdir, topic + ".txt") + doc = gettext(util.readfile(path)) + for rewriter in helphooks.get(topic, []): + doc = rewriter(topic, doc) + return doc + + return loader + +helptable = sorted([ + (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), + (["dates"], _("Date Formats"), loaddoc('dates')), + (["patterns"], _("File Name Patterns"), loaddoc('patterns')), + (['environment', 'env'], _('Environment Variables'), + loaddoc('environment')), + (['revs', 'revisions'], _('Specifying Single Revisions'), + loaddoc('revisions')), + (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'), + loaddoc('multirevs')), + (['revset', 'revsets'], _("Specifying Revision Sets"), loaddoc('revsets')), + (['fileset', 'filesets'], _("Specifying File Sets"), loaddoc('filesets')), + (['diffs'], _('Diff Formats'), loaddoc('diffs')), + (['merge-tools'], _('Merge Tools'), loaddoc('merge-tools')), + (['templating', 'templates'], _('Template Usage'), + loaddoc('templates')), + (['urls'], _('URL Paths'), loaddoc('urls')), + (["extensions"], _("Using additional features"), extshelp), + (["subrepo", "subrepos"], _("Subrepositories"), loaddoc('subrepos')), + (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')), + (["glossary"], _("Glossary"), loaddoc('glossary')), + (["hgignore", "ignore"], _("syntax for Mercurial ignore files"), + loaddoc('hgignore')), +]) + +# Map topics to lists of callable taking the current topic help and +# returning the updated version +helphooks = {} + +def addtopichook(topic, rewriter): + helphooks.setdefault(topic, []).append(rewriter) + +def makeitemsdoc(topic, doc, marker, items): + """Extract docstring from the items key to function mapping, build a + .single documentation block and use it to overwrite the marker in doc + """ + entries = [] + for name in sorted(items): + text = (items[name].__doc__ or '').rstrip() + if not text: + continue + text = gettext(text) + lines = text.splitlines() + lines[1:] = [(' ' + l.strip()) for l in lines[1:]] + entries.append('\n'.join(lines)) + entries = '\n\n'.join(entries) + return doc.replace(marker, entries) + +def addtopicsymbols(topic, marker, symbols): + def add(topic, doc): + return makeitemsdoc(topic, doc, marker, symbols) + addtopichook(topic, add) + +addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols) +addtopicsymbols('revsets', '.. predicatesmarker', revset.symbols) +addtopicsymbols('templates', '.. keywordsmarker', templatekw.keywords) +addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters) diff --git a/websdk/mercurial/help/config.txt b/websdk/mercurial/help/config.txt new file mode 100644 index 0000000..d259600 --- /dev/null +++ b/websdk/mercurial/help/config.txt @@ -0,0 +1,1312 @@ +The Mercurial system uses a set of configuration files to control +aspects of its behavior. + +The configuration files use a simple ini-file format. A configuration +file consists of sections, led by a ``[section]`` header and followed +by ``name = value`` entries:: + + [ui] + username = Firstname Lastname + verbose = True + +The above entries will be referred to as ``ui.username`` and +``ui.verbose``, respectively. See the Syntax section below. + +Files +----- + +Mercurial reads configuration data from several files, if they exist. +These files do not exist by default and you will have to create the +appropriate configuration files yourself: global configuration like +the username setting is typically put into +``%USERPROFILE%\mercurial.ini`` or ``$HOME/.hgrc`` and local +configuration is put into the per-repository ``/.hg/hgrc`` file. + +The names of these files depend on the system on which Mercurial is +installed. ``*.rc`` files from a single directory are read in +alphabetical order, later ones overriding earlier ones. Where multiple +paths are given below, settings from earlier paths override later +ones. + +| (Unix, Windows) ``/.hg/hgrc`` + + Per-repository configuration options that only apply in a + particular repository. This file is not version-controlled, and + will not get transferred during a "clone" operation. Options in + this file override options in all other configuration files. On + Unix, most of this file will be ignored if it doesn't belong to a + trusted user or to a trusted group. See the documentation for the + ``[trusted]`` section below for more details. + +| (Unix) ``$HOME/.hgrc`` +| (Windows) ``%USERPROFILE%\.hgrc`` +| (Windows) ``%USERPROFILE%\Mercurial.ini`` +| (Windows) ``%HOME%\.hgrc`` +| (Windows) ``%HOME%\Mercurial.ini`` + + Per-user configuration file(s), for the user running Mercurial. On + Windows 9x, ``%HOME%`` is replaced by ``%APPDATA%``. Options in these + files apply to all Mercurial commands executed by this user in any + directory. Options in these files override per-system and per-installation + options. + +| (Unix) ``/etc/mercurial/hgrc`` +| (Unix) ``/etc/mercurial/hgrc.d/*.rc`` + + Per-system configuration files, for the system on which Mercurial + is running. Options in these files apply to all Mercurial commands + executed by any user in any directory. Options in these files + override per-installation options. + +| (Unix) ``/etc/mercurial/hgrc`` +| (Unix) ``/etc/mercurial/hgrc.d/*.rc`` + + Per-installation configuration files, searched for in the + directory where Mercurial is installed. ```` is the + parent directory of the **hg** executable (or symlink) being run. For + example, if installed in ``/shared/tools/bin/hg``, Mercurial will look + in ``/shared/tools/etc/mercurial/hgrc``. Options in these files apply + to all Mercurial commands executed by any user in any directory. + +| (Windows) ``\Mercurial.ini`` **or** +| (Windows) ``\hgrc.d\*.rc`` **or** +| (Windows) ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` + + Per-installation/system configuration files, for the system on + which Mercurial is running. Options in these files apply to all + Mercurial commands executed by any user in any directory. Registry + keys contain PATH-like strings, every part of which must reference + a ``Mercurial.ini`` file or be a directory where ``*.rc`` files will + be read. Mercurial checks each of these locations in the specified + order until one or more configuration files are detected. If the + pywin32 extensions are not installed, Mercurial will only look for + site-wide configuration in ``C:\Mercurial\Mercurial.ini``. + +Syntax +------ + +A configuration file consists of sections, led by a ``[section]`` header +and followed by ``name = value`` entries (sometimes called +``configuration keys``):: + + [spam] + eggs=ham + green= + eggs + +Each line contains one entry. If the lines that follow are indented, +they are treated as continuations of that entry. Leading whitespace is +removed from values. Empty lines are skipped. Lines beginning with +``#`` or ``;`` are ignored and may be used to provide comments. + +Configuration keys can be set multiple times, in which case Mercurial +will use the value that was configured last. As an example:: + + [spam] + eggs=large + ham=serrano + eggs=small + +This would set the configuration key named ``eggs`` to ``small``. + +It is also possible to define a section multiple times. A section can +be redefined on the same and/or on different configuration files. For +example:: + + [foo] + eggs=large + ham=serrano + eggs=small + + [bar] + eggs=ham + green= + eggs + + [foo] + ham=prosciutto + eggs=medium + bread=toasted + +This would set the ``eggs``, ``ham``, and ``bread`` configuration keys +of the ``foo`` section to ``medium``, ``prosciutto``, and ``toasted``, +respectively. As you can see there only thing that matters is the last +value that was set for each of the configuration keys. + +If a configuration key is set multiple times in different +configuration files the final value will depend on the order in which +the different configuration files are read, with settings from earlier +paths overriding later ones as described on the ``Files`` section +above. + +A line of the form ``%include file`` will include ``file`` into the +current configuration file. The inclusion is recursive, which means +that included files can include other files. Filenames are relative to +the configuration file in which the ``%include`` directive is found. +Environment variables and ``~user`` constructs are expanded in +``file``. This lets you do something like:: + + %include ~/.hgrc.d/$HOST.rc + +to include a different configuration file on each computer you use. + +A line with ``%unset name`` will remove ``name`` from the current +section, if it has been set previously. + +The values are either free-form text strings, lists of text strings, +or Boolean values. Boolean values can be set to true using any of "1", +"yes", "true", or "on" and to false using "0", "no", "false", or "off" +(all case insensitive). + +List values are separated by whitespace or comma, except when values are +placed in double quotation marks:: + + allow_read = "John Doe, PhD", brian, betty + +Quotation marks can be escaped by prefixing them with a backslash. Only +quotation marks at the beginning of a word is counted as a quotation +(e.g., ``foo"bar baz`` is the list of ``foo"bar`` and ``baz``). + +Sections +-------- + +This section describes the different sections that may appear in a +Mercurial configuration file, the purpose of each section, its possible +keys, and their possible values. + +``alias`` +""""""""" + +Defines command aliases. +Aliases allow you to define your own commands in terms of other +commands (or aliases), optionally including arguments. Positional +arguments in the form of ``$1``, ``$2``, etc in the alias definition +are expanded by Mercurial before execution. Positional arguments not +already used by ``$N`` in the definition are put at the end of the +command to be executed. + +Alias definitions consist of lines of the form:: + + = []... + +For example, this definition:: + + latest = log --limit 5 + +creates a new command ``latest`` that shows only the five most recent +changesets. You can define subsequent aliases using earlier ones:: + + stable5 = latest -b stable + +.. note:: It is possible to create aliases with the same names as + existing commands, which will then override the original + definitions. This is almost always a bad idea! + +An alias can start with an exclamation point (``!``) to make it a +shell alias. A shell alias is executed with the shell and will let you +run arbitrary commands. As an example, :: + + echo = !echo + +will let you do ``hg echo foo`` to have ``foo`` printed in your +terminal. A better example might be:: + + purge = !$HG status --no-status --unknown -0 | xargs -0 rm + +which will make ``hg purge`` delete all unknown files in the +repository in the same manner as the purge extension. + +Shell aliases are executed in an environment where ``$HG`` expand to +the path of the Mercurial that was used to execute the alias. This is +useful when you want to call further Mercurial commands in a shell +alias, as was done above for the purge alias. In addition, +``$HG_ARGS`` expand to the arguments given to Mercurial. In the ``hg +echo foo`` call above, ``$HG_ARGS`` would expand to ``echo foo``. + +.. note:: Some global configuration options such as ``-R`` are + processed before shell aliases and will thus not be passed to + aliases. + +``auth`` +"""""""" + +Authentication credentials for HTTP authentication. This section +allows you to store usernames and passwords for use when logging +*into* HTTP servers. See the ``[web]`` configuration section if +you want to configure *who* can login to your HTTP server. + +Each line has the following format:: + + . = + +where ```` is used to group arguments into authentication +entries. Example:: + + foo.prefix = hg.intevation.org/mercurial + foo.username = foo + foo.password = bar + foo.schemes = http https + + bar.prefix = secure.example.org + bar.key = path/to/file.key + bar.cert = path/to/file.cert + bar.schemes = https + +Supported arguments: + +``prefix`` + Either ``*`` or a URI prefix with or without the scheme part. + The authentication entry with the longest matching prefix is used + (where ``*`` matches everything and counts as a match of length + 1). If the prefix doesn't include a scheme, the match is performed + against the URI with its scheme stripped as well, and the schemes + argument, q.v., is then subsequently consulted. + +``username`` + Optional. Username to authenticate with. If not given, and the + remote site requires basic or digest authentication, the user will + be prompted for it. Environment variables are expanded in the + username letting you do ``foo.username = $USER``. If the URI + includes a username, only ``[auth]`` entries with a matching + username or without a username will be considered. + +``password`` + Optional. Password to authenticate with. If not given, and the + remote site requires basic or digest authentication, the user + will be prompted for it. + +``key`` + Optional. PEM encoded client certificate key file. Environment + variables are expanded in the filename. + +``cert`` + Optional. PEM encoded client certificate chain file. Environment + variables are expanded in the filename. + +``schemes`` + Optional. Space separated list of URI schemes to use this + authentication entry with. Only used if the prefix doesn't include + a scheme. Supported schemes are http and https. They will match + static-http and static-https respectively, as well. + Default: https. + +If no suitable authentication entry is found, the user is prompted +for credentials as usual if required by the remote. + + +``decode/encode`` +""""""""""""""""" + +Filters for transforming files on checkout/checkin. This would +typically be used for newline processing or other +localization/canonicalization of files. + +Filters consist of a filter pattern followed by a filter command. +Filter patterns are globs by default, rooted at the repository root. +For example, to match any file ending in ``.txt`` in the root +directory only, use the pattern ``*.txt``. To match any file ending +in ``.c`` anywhere in the repository, use the pattern ``**.c``. +For each file only the first matching filter applies. + +The filter command can start with a specifier, either ``pipe:`` or +``tempfile:``. If no specifier is given, ``pipe:`` is used by default. + +A ``pipe:`` command must accept data on stdin and return the transformed +data on stdout. + +Pipe example:: + + [encode] + # uncompress gzip files on checkin to improve delta compression + # note: not necessarily a good idea, just an example + *.gz = pipe: gunzip + + [decode] + # recompress gzip files when writing them to the working dir (we + # can safely omit "pipe:", because it's the default) + *.gz = gzip + +A ``tempfile:`` command is a template. The string ``INFILE`` is replaced +with the name of a temporary file that contains the data to be +filtered by the command. The string ``OUTFILE`` is replaced with the name +of an empty temporary file, where the filtered data must be written by +the command. + +.. note:: The tempfile mechanism is recommended for Windows systems, + where the standard shell I/O redirection operators often have + strange effects and may corrupt the contents of your files. + +This filter mechanism is used internally by the ``eol`` extension to +translate line ending characters between Windows (CRLF) and Unix (LF) +format. We suggest you use the ``eol`` extension for convenience. + + +``defaults`` +"""""""""""" + +(defaults are deprecated. Don't use them. Use aliases instead) + +Use the ``[defaults]`` section to define command defaults, i.e. the +default options/arguments to pass to the specified commands. + +The following example makes :hg:`log` run in verbose mode, and +:hg:`status` show only the modified files, by default:: + + [defaults] + log = -v + status = -m + +The actual commands, instead of their aliases, must be used when +defining command defaults. The command defaults will also be applied +to the aliases of the commands defined. + + +``diff`` +"""""""" + +Settings used when displaying diffs. Everything except for ``unified`` is a +Boolean and defaults to False. + +``git`` + Use git extended diff format. + +``nodates`` + Don't include dates in diff headers. + +``showfunc`` + Show which function each change is in. + +``ignorews`` + Ignore white space when comparing lines. + +``ignorewsamount`` + Ignore changes in the amount of white space. + +``ignoreblanklines`` + Ignore changes whose lines are all blank. + +``unified`` + Number of lines of context to show. + +``email`` +""""""""" + +Settings for extensions that send email messages. + +``from`` + Optional. Email address to use in "From" header and SMTP envelope + of outgoing messages. + +``to`` + Optional. Comma-separated list of recipients' email addresses. + +``cc`` + Optional. Comma-separated list of carbon copy recipients' + email addresses. + +``bcc`` + Optional. Comma-separated list of blind carbon copy recipients' + email addresses. + +``method`` + Optional. Method to use to send email messages. If value is ``smtp`` + (default), use SMTP (see the ``[smtp]`` section for configuration). + Otherwise, use as name of program to run that acts like sendmail + (takes ``-f`` option for sender, list of recipients on command line, + message on stdin). Normally, setting this to ``sendmail`` or + ``/usr/sbin/sendmail`` is enough to use sendmail to send messages. + +``charsets`` + Optional. Comma-separated list of character sets considered + convenient for recipients. Addresses, headers, and parts not + containing patches of outgoing messages will be encoded in the + first character set to which conversion from local encoding + (``$HGENCODING``, ``ui.fallbackencoding``) succeeds. If correct + conversion fails, the text in question is sent as is. Defaults to + empty (explicit) list. + + Order of outgoing email character sets: + + 1. ``us-ascii``: always first, regardless of settings + 2. ``email.charsets``: in order given by user + 3. ``ui.fallbackencoding``: if not in email.charsets + 4. ``$HGENCODING``: if not in email.charsets + 5. ``utf-8``: always last, regardless of settings + +Email example:: + + [email] + from = Joseph User + method = /usr/sbin/sendmail + # charsets for western Europeans + # us-ascii, utf-8 omitted, as they are tried first and last + charsets = iso-8859-1, iso-8859-15, windows-1252 + + +``extensions`` +"""""""""""""" + +Mercurial has an extension mechanism for adding new features. To +enable an extension, create an entry for it in this section. + +If you know that the extension is already in Python's search path, +you can give the name of the module, followed by ``=``, with nothing +after the ``=``. + +Otherwise, give a name that you choose, followed by ``=``, followed by +the path to the ``.py`` file (including the file name extension) that +defines the extension. + +To explicitly disable an extension that is enabled in an hgrc of +broader scope, prepend its path with ``!``, as in ``foo = !/ext/path`` +or ``foo = !`` when path is not supplied. + +Example for ``~/.hgrc``:: + + [extensions] + # (the mq extension will get loaded from Mercurial's path) + mq = + # (this extension will get loaded from the file specified) + myfeature = ~/.hgext/myfeature.py + + +``hostfingerprints`` +"""""""""""""""""""" + +Fingerprints of the certificates of known HTTPS servers. +A HTTPS connection to a server with a fingerprint configured here will +only succeed if the servers certificate matches the fingerprint. +This is very similar to how ssh known hosts works. +The fingerprint is the SHA-1 hash value of the DER encoded certificate. +The CA chain and web.cacerts is not used for servers with a fingerprint. + +For example:: + + [hostfingerprints] + hg.intevation.org = 38:76:52:7c:87:26:9a:8f:4a:f8:d3:de:08:45:3b:ea:d6:4b:ee:cc + +This feature is only supported when using Python 2.6 or later. + + +``format`` +"""""""""" + +``usestore`` + Enable or disable the "store" repository format which improves + compatibility with systems that fold case or otherwise mangle + filenames. Enabled by default. Disabling this option will allow + you to store longer filenames in some situations at the expense of + compatibility and ensures that the on-disk format of newly created + repositories will be compatible with Mercurial before version 0.9.4. + +``usefncache`` + Enable or disable the "fncache" repository format which enhances + the "store" repository format (which has to be enabled to use + fncache) to allow longer filenames and avoids using Windows + reserved names, e.g. "nul". Enabled by default. Disabling this + option ensures that the on-disk format of newly created + repositories will be compatible with Mercurial before version 1.1. + +``dotencode`` + Enable or disable the "dotencode" repository format which enhances + the "fncache" repository format (which has to be enabled to use + dotencode) to avoid issues with filenames starting with ._ on + Mac OS X and spaces on Windows. Enabled by default. Disabling this + option ensures that the on-disk format of newly created + repositories will be compatible with Mercurial before version 1.7. + +``merge-patterns`` +"""""""""""""""""" + +This section specifies merge tools to associate with particular file +patterns. Tools matched here will take precedence over the default +merge tool. Patterns are globs by default, rooted at the repository +root. + +Example:: + + [merge-patterns] + **.c = kdiff3 + **.jpg = myimgmerge + +``merge-tools`` +""""""""""""""" + +This section configures external merge tools to use for file-level +merges. + +Example ``~/.hgrc``:: + + [merge-tools] + # Override stock tool location + kdiff3.executable = ~/bin/kdiff3 + # Specify command line + kdiff3.args = $base $local $other -o $output + # Give higher priority + kdiff3.priority = 1 + + # Define new tool + myHtmlTool.args = -m $local $other $base $output + myHtmlTool.regkey = Software\FooSoftware\HtmlMerge + myHtmlTool.priority = 1 + +Supported arguments: + +``priority`` + The priority in which to evaluate this tool. + Default: 0. + +``executable`` + Either just the name of the executable or its pathname. On Windows, + the path can use environment variables with ${ProgramFiles} syntax. + Default: the tool name. + +``args`` + The arguments to pass to the tool executable. You can refer to the + files being merged as well as the output file through these + variables: ``$base``, ``$local``, ``$other``, ``$output``. + Default: ``$local $base $other`` + +``premerge`` + Attempt to run internal non-interactive 3-way merge tool before + launching external tool. Options are ``true``, ``false``, or ``keep`` + to leave markers in the file if the premerge fails. + Default: True + +``binary`` + This tool can merge binary files. Defaults to False, unless tool + was selected by file pattern match. + +``symlink`` + This tool can merge symlinks. Defaults to False, even if tool was + selected by file pattern match. + +``check`` + A list of merge success-checking options: + + ``changed`` + Ask whether merge was successful when the merged file shows no changes. + ``conflicts`` + Check whether there are conflicts even though the tool reported success. + ``prompt`` + Always prompt for merge success, regardless of success reported by tool. + +``checkchanged`` + True is equivalent to ``check = changed``. + Default: False + +``checkconflicts`` + True is equivalent to ``check = conflicts``. + Default: False + +``fixeol`` + Attempt to fix up EOL changes caused by the merge tool. + Default: False + +``gui`` + This tool requires a graphical interface to run. Default: False + +``regkey`` + Windows registry key which describes install location of this + tool. Mercurial will search for this key first under + ``HKEY_CURRENT_USER`` and then under ``HKEY_LOCAL_MACHINE``. + Default: None + +``regkeyalt`` + An alternate Windows registry key to try if the first key is not + found. The alternate key uses the same ``regname`` and ``regappend`` + semantics of the primary key. The most common use for this key + is to search for 32bit applications on 64bit operating systems. + Default: None + +``regname`` + Name of value to read from specified registry key. Defaults to the + unnamed (default) value. + +``regappend`` + String to append to the value read from the registry, typically + the executable name of the tool. + Default: None + + +``hooks`` +""""""""" + +Commands or Python functions that get automatically executed by +various actions such as starting or finishing a commit. Multiple +hooks can be run for the same action by appending a suffix to the +action. Overriding a site-wide hook can be done by changing its +value or setting it to an empty string. + +Example ``.hg/hgrc``:: + + [hooks] + # update working directory after adding changesets + changegroup.update = hg update + # do not use the site-wide hook + incoming = + incoming.email = /my/email/hook + incoming.autobuild = /my/build/hook + +Most hooks are run with environment variables set that give useful +additional information. For each hook below, the environment +variables it is passed are listed with names of the form ``$HG_foo``. + +``changegroup`` + Run after a changegroup has been added via push, pull or unbundle. + ID of the first new changeset is in ``$HG_NODE``. URL from which + changes came is in ``$HG_URL``. + +``commit`` + Run after a changeset has been created in the local repository. ID + of the newly created changeset is in ``$HG_NODE``. Parent changeset + IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``. + +``incoming`` + Run after a changeset has been pulled, pushed, or unbundled into + the local repository. The ID of the newly arrived changeset is in + ``$HG_NODE``. URL that was source of changes came is in ``$HG_URL``. + +``outgoing`` + Run after sending changes from local repository to another. ID of + first changeset sent is in ``$HG_NODE``. Source of operation is in + ``$HG_SOURCE``; see "preoutgoing" hook for description. + +``post-`` + Run after successful invocations of the associated command. The + contents of the command line are passed as ``$HG_ARGS`` and the result + code in ``$HG_RESULT``. Parsed command line arguments are passed as + ``$HG_PATS`` and ``$HG_OPTS``. These contain string representations of + the python data internally passed to . ``$HG_OPTS`` is a + dictionary of options (with unspecified options set to their defaults). + ``$HG_PATS`` is a list of arguments. Hook failure is ignored. + +``pre-`` + Run before executing the associated command. The contents of the + command line are passed as ``$HG_ARGS``. Parsed command line arguments + are passed as ``$HG_PATS`` and ``$HG_OPTS``. These contain string + representations of the data internally passed to . ``$HG_OPTS`` + is a dictionary of options (with unspecified options set to their + defaults). ``$HG_PATS`` is a list of arguments. If the hook returns + failure, the command doesn't execute and Mercurial returns the failure + code. + +``prechangegroup`` + Run before a changegroup is added via push, pull or unbundle. Exit + status 0 allows the changegroup to proceed. Non-zero status will + cause the push, pull or unbundle to fail. URL from which changes + will come is in ``$HG_URL``. + +``precommit`` + Run before starting a local commit. Exit status 0 allows the + commit to proceed. Non-zero status will cause the commit to fail. + Parent changeset IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``. + +``prelistkeys`` + Run before listing pushkeys (like bookmarks) in the + repository. Non-zero status will cause failure. The key namespace is + in ``$HG_NAMESPACE``. + +``preoutgoing`` + Run before collecting changes to send from the local repository to + another. Non-zero status will cause failure. This lets you prevent + pull over HTTP or SSH. Also prevents against local pull, push + (outbound) or bundle commands, but not effective, since you can + just copy files instead then. Source of operation is in + ``$HG_SOURCE``. If "serve", operation is happening on behalf of remote + SSH or HTTP repository. If "push", "pull" or "bundle", operation + is happening on behalf of repository on same system. + +``prepushkey`` + Run before a pushkey (like a bookmark) is added to the + repository. Non-zero status will cause the key to be rejected. The + key namespace is in ``$HG_NAMESPACE``, the key is in ``$HG_KEY``, + the old value (if any) is in ``$HG_OLD``, and the new value is in + ``$HG_NEW``. + +``pretag`` + Run before creating a tag. Exit status 0 allows the tag to be + created. Non-zero status will cause the tag to fail. ID of + changeset to tag is in ``$HG_NODE``. Name of tag is in ``$HG_TAG``. Tag is + local if ``$HG_LOCAL=1``, in repository if ``$HG_LOCAL=0``. + +``pretxnchangegroup`` + Run after a changegroup has been added via push, pull or unbundle, + but before the transaction has been committed. Changegroup is + visible to hook program. This lets you validate incoming changes + before accepting them. Passed the ID of the first new changeset in + ``$HG_NODE``. Exit status 0 allows the transaction to commit. Non-zero + status will cause the transaction to be rolled back and the push, + pull or unbundle will fail. URL that was source of changes is in + ``$HG_URL``. + +``pretxncommit`` + Run after a changeset has been created but the transaction not yet + committed. Changeset is visible to hook program. This lets you + validate commit message and changes. Exit status 0 allows the + commit to proceed. Non-zero status will cause the transaction to + be rolled back. ID of changeset is in ``$HG_NODE``. Parent changeset + IDs are in ``$HG_PARENT1`` and ``$HG_PARENT2``. + +``preupdate`` + Run before updating the working directory. Exit status 0 allows + the update to proceed. Non-zero status will prevent the update. + Changeset ID of first new parent is in ``$HG_PARENT1``. If merge, ID + of second new parent is in ``$HG_PARENT2``. + +``listkeys`` + Run after listing pushkeys (like bookmarks) in the repository. The + key namespace is in ``$HG_NAMESPACE``. ``$HG_VALUES`` is a + dictionary containing the keys and values. + +``pushkey`` + Run after a pushkey (like a bookmark) is added to the + repository. The key namespace is in ``$HG_NAMESPACE``, the key is in + ``$HG_KEY``, the old value (if any) is in ``$HG_OLD``, and the new + value is in ``$HG_NEW``. + +``tag`` + Run after a tag is created. ID of tagged changeset is in ``$HG_NODE``. + Name of tag is in ``$HG_TAG``. Tag is local if ``$HG_LOCAL=1``, in + repository if ``$HG_LOCAL=0``. + +``update`` + Run after updating the working directory. Changeset ID of first + new parent is in ``$HG_PARENT1``. If merge, ID of second new parent is + in ``$HG_PARENT2``. If the update succeeded, ``$HG_ERROR=0``. If the + update failed (e.g. because conflicts not resolved), ``$HG_ERROR=1``. + +.. note:: It is generally better to use standard hooks rather than the + generic pre- and post- command hooks as they are guaranteed to be + called in the appropriate contexts for influencing transactions. + Also, hooks like "commit" will be called in all contexts that + generate a commit (e.g. tag) and not just the commit command. + +.. note:: Environment variables with empty values may not be passed to + hooks on platforms such as Windows. As an example, ``$HG_PARENT2`` + will have an empty value under Unix-like platforms for non-merge + changesets, while it will not be available at all under Windows. + +The syntax for Python hooks is as follows:: + + hookname = python:modulename.submodule.callable + hookname = python:/path/to/python/module.py:callable + +Python hooks are run within the Mercurial process. Each hook is +called with at least three keyword arguments: a ui object (keyword +``ui``), a repository object (keyword ``repo``), and a ``hooktype`` +keyword that tells what kind of hook is used. Arguments listed as +environment variables above are passed as keyword arguments, with no +``HG_`` prefix, and names in lower case. + +If a Python hook returns a "true" value or raises an exception, this +is treated as a failure. + + +``http_proxy`` +"""""""""""""" + +Used to access web-based Mercurial repositories through a HTTP +proxy. + +``host`` + Host name and (optional) port of the proxy server, for example + "myproxy:8000". + +``no`` + Optional. Comma-separated list of host names that should bypass + the proxy. + +``passwd`` + Optional. Password to authenticate with at the proxy server. + +``user`` + Optional. User name to authenticate with at the proxy server. + +``always`` + Optional. Always use the proxy, even for localhost and any entries + in ``http_proxy.no``. True or False. Default: False. + +``smtp`` +"""""""" + +Configuration for extensions that need to send email messages. + +``host`` + Host name of mail server, e.g. "mail.example.com". + +``port`` + Optional. Port to connect to on mail server. Default: 25. + +``tls`` + Optional. Method to enable TLS when connecting to mail server: starttls, + smtps or none. Default: none. + +``username`` + Optional. User name for authenticating with the SMTP server. + Default: none. + +``password`` + Optional. Password for authenticating with the SMTP server. If not + specified, interactive sessions will prompt the user for a + password; non-interactive sessions will fail. Default: none. + +``local_hostname`` + Optional. It's the hostname that the sender can use to identify + itself to the MTA. + + +``patch`` +""""""""" + +Settings used when applying patches, for instance through the 'import' +command or with Mercurial Queues extension. + +``eol`` + When set to 'strict' patch content and patched files end of lines + are preserved. When set to ``lf`` or ``crlf``, both files end of + lines are ignored when patching and the result line endings are + normalized to either LF (Unix) or CRLF (Windows). When set to + ``auto``, end of lines are again ignored while patching but line + endings in patched files are normalized to their original setting + on a per-file basis. If target file does not exist or has no end + of line, patch line endings are preserved. + Default: strict. + + +``paths`` +""""""""" + +Assigns symbolic names to repositories. The left side is the +symbolic name, and the right gives the directory or URL that is the +location of the repository. Default paths can be declared by setting +the following entries. + +``default`` + Directory or URL to use when pulling if no source is specified. + Default is set to repository from which the current repository was + cloned. + +``default-push`` + Optional. Directory or URL to use when pushing if no destination + is specified. + + +``profiling`` +""""""""""""" + +Specifies profiling format and file output. In this section +description, 'profiling data' stands for the raw data collected +during profiling, while 'profiling report' stands for a statistical +text report generated from the profiling data. The profiling is done +using lsprof. + +``format`` + Profiling format. + Default: text. + + ``text`` + Generate a profiling report. When saving to a file, it should be + noted that only the report is saved, and the profiling data is + not kept. + ``kcachegrind`` + Format profiling data for kcachegrind use: when saving to a + file, the generated file can directly be loaded into + kcachegrind. + +``output`` + File path where profiling data or report should be saved. If the + file exists, it is replaced. Default: None, data is printed on + stderr + +``revsetalias`` +""""""""""""""" + +Alias definitions for revsets. See :hg:`help revsets` for details. + +``server`` +"""""""""" + +Controls generic server settings. + +``uncompressed`` + Whether to allow clients to clone a repository using the + uncompressed streaming protocol. This transfers about 40% more + data than a regular clone, but uses less memory and CPU on both + server and client. Over a LAN (100 Mbps or better) or a very fast + WAN, an uncompressed streaming clone is a lot faster (~10x) than a + regular clone. Over most WAN connections (anything slower than + about 6 Mbps), uncompressed streaming is slower, because of the + extra data transfer overhead. This mode will also temporarily hold + the write lock while determining what data to transfer. + Default is True. + +``validate`` + Whether to validate the completeness of pushed changesets by + checking that all new file revisions specified in manifests are + present. Default is False. + +``subpaths`` +"""""""""""" + +Defines subrepositories source locations rewriting rules of the form:: + + = + +Where ``pattern`` is a regular expression matching the source and +``replacement`` is the replacement string used to rewrite it. Groups +can be matched in ``pattern`` and referenced in ``replacements``. For +instance:: + + http://server/(.*)-hg/ = http://hg.server/\1/ + +rewrites ``http://server/foo-hg/`` into ``http://hg.server/foo/``. + +All patterns are applied in definition order. + +``trusted`` +""""""""""" + +Mercurial will not use the settings in the +``.hg/hgrc`` file from a repository if it doesn't belong to a trusted +user or to a trusted group, as various hgrc features allow arbitrary +commands to be run. This issue is often encountered when configuring +hooks or extensions for shared repositories or servers. However, +the web interface will use some safe settings from the ``[web]`` +section. + +This section specifies what users and groups are trusted. The +current user is always trusted. To trust everybody, list a user or a +group with name ``*``. These settings must be placed in an +*already-trusted file* to take effect, such as ``$HOME/.hgrc`` of the +user or service running Mercurial. + +``users`` + Comma-separated list of trusted users. + +``groups`` + Comma-separated list of trusted groups. + + +``ui`` +"""""" + +User interface controls. + +``archivemeta`` + Whether to include the .hg_archival.txt file containing meta data + (hashes for the repository base and for tip) in archives created + by the :hg:`archive` command or downloaded via hgweb. + Default is True. + +``askusername`` + Whether to prompt for a username when committing. If True, and + neither ``$HGUSER`` nor ``$EMAIL`` has been specified, then the user will + be prompted to enter a username. If no username is entered, the + default ``USER@HOST`` is used instead. + Default is False. + +``commitsubrepos`` + Whether to commit modified subrepositories when committing the + parent repository. If False and one subrepository has uncommitted + changes, abort the commit. + Default is False. + +``debug`` + Print debugging information. True or False. Default is False. + +``editor`` + The editor to use during a commit. Default is ``$EDITOR`` or ``vi``. + +``fallbackencoding`` + Encoding to try if it's not possible to decode the changelog using + UTF-8. Default is ISO-8859-1. + +``ignore`` + A file to read per-user ignore patterns from. This file should be + in the same format as a repository-wide .hgignore file. This + option supports hook syntax, so if you want to specify multiple + ignore files, you can do so by setting something like + ``ignore.other = ~/.hgignore2``. For details of the ignore file + format, see the ``hgignore(5)`` man page. + +``interactive`` + Allow to prompt the user. True or False. Default is True. + +``logtemplate`` + Template string for commands that print changesets. + +``merge`` + The conflict resolution program to use during a manual merge. + For more information on merge tools see :hg:`help merge-tools`. + For configuring merge tools see the ``[merge-tools]`` section. + +``portablefilenames`` + Check for portable filenames. Can be ``warn``, ``ignore`` or ``abort``. + Default is ``warn``. + If set to ``warn`` (or ``true``), a warning message is printed on POSIX + platforms, if a file with a non-portable filename is added (e.g. a file + with a name that can't be created on Windows because it contains reserved + parts like ``AUX``, reserved characters like ``:``, or would cause a case + collision with an existing file). + If set to ``ignore`` (or ``false``), no warning is printed. + If set to ``abort``, the command is aborted. + On Windows, this configuration option is ignored and the command aborted. + +``quiet`` + Reduce the amount of output printed. True or False. Default is False. + +``remotecmd`` + remote command to use for clone/push/pull operations. Default is ``hg``. + +``report_untrusted`` + Warn if a ``.hg/hgrc`` file is ignored due to not being owned by a + trusted user or group. True or False. Default is True. + +``slash`` + Display paths using a slash (``/``) as the path separator. This + only makes a difference on systems where the default path + separator is not the slash character (e.g. Windows uses the + backslash character (``\``)). + Default is False. + +``ssh`` + command to use for SSH connections. Default is ``ssh``. + +``strict`` + Require exact command names, instead of allowing unambiguous + abbreviations. True or False. Default is False. + +``style`` + Name of style to use for command output. + +``timeout`` + The timeout used when a lock is held (in seconds), a negative value + means no timeout. Default is 600. + +``traceback`` + Mercurial always prints a traceback when an unknown exception + occurs. Setting this to True will make Mercurial print a traceback + on all exceptions, even those recognized by Mercurial (such as + IOError or MemoryError). Default is False. + +``username`` + The committer of a changeset created when running "commit". + Typically a person's name and email address, e.g. ``Fred Widget + ``. Default is ``$EMAIL`` or ``username@hostname``. If + the username in hgrc is empty, it has to be specified manually or + in a different hgrc file (e.g. ``$HOME/.hgrc``, if the admin set + ``username =`` in the system hgrc). Environment variables in the + username are expanded. + +``verbose`` + Increase the amount of output printed. True or False. Default is False. + + +``web`` +""""""" + +Web interface configuration. The settings in this section apply to +both the builtin webserver (started by :hg:`serve`) and the script you +run through a webserver (``hgweb.cgi`` and the derivatives for FastCGI +and WSGI). + +The Mercurial webserver does no authentication (it does not prompt for +usernames and passwords to validate *who* users are), but it does do +authorization (it grants or denies access for *authenticated users* +based on settings in this section). You must either configure your +webserver to do authentication for you, or disable the authorization +checks. + +For a quick setup in a trusted environment, e.g., a private LAN, where +you want it to accept pushes from anybody, you can use the following +command line:: + + $ hg --config web.allow_push=* --config web.push_ssl=False serve + +Note that this will allow anybody to push anything to the server and +that this should not be used for public servers. + +The full set of options is: + +``accesslog`` + Where to output the access log. Default is stdout. + +``address`` + Interface address to bind to. Default is all. + +``allow_archive`` + List of archive format (bz2, gz, zip) allowed for downloading. + Default is empty. + +``allowbz2`` + (DEPRECATED) Whether to allow .tar.bz2 downloading of repository + revisions. + Default is False. + +``allowgz`` + (DEPRECATED) Whether to allow .tar.gz downloading of repository + revisions. + Default is False. + +``allowpull`` + Whether to allow pulling from the repository. Default is True. + +``allow_push`` + Whether to allow pushing to the repository. If empty or not set, + push is not allowed. If the special value ``*``, any remote user can + push, including unauthenticated users. Otherwise, the remote user + must have been authenticated, and the authenticated user name must + be present in this list. The contents of the allow_push list are + examined after the deny_push list. + +``guessmime`` + Control MIME types for raw download of file content. + Set to True to let hgweb guess the content type from the file + extension. This will serve HTML files as ``text/html`` and might + allow cross-site scripting attacks when serving untrusted + repositories. Default is False. + +``allow_read`` + If the user has not already been denied repository access due to + the contents of deny_read, this list determines whether to grant + repository access to the user. If this list is not empty, and the + user is unauthenticated or not present in the list, then access is + denied for the user. If the list is empty or not set, then access + is permitted to all users by default. Setting allow_read to the + special value ``*`` is equivalent to it not being set (i.e. access + is permitted to all users). The contents of the allow_read list are + examined after the deny_read list. + +``allowzip`` + (DEPRECATED) Whether to allow .zip downloading of repository + revisions. Default is False. This feature creates temporary files. + +``baseurl`` + Base URL to use when publishing URLs in other locations, so + third-party tools like email notification hooks can construct + URLs. Example: ``http://hgserver/repos/``. + +``cacerts`` + Path to file containing a list of PEM encoded certificate + authority certificates. Environment variables and ``~user`` + constructs are expanded in the filename. If specified on the + client, then it will verify the identity of remote HTTPS servers + with these certificates. The form must be as follows:: + + -----BEGIN CERTIFICATE----- + ... (certificate in base64 PEM encoding) ... + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + ... (certificate in base64 PEM encoding) ... + -----END CERTIFICATE----- + + This feature is only supported when using Python 2.6 or later. If you wish + to use it with earlier versions of Python, install the backported + version of the ssl library that is available from + ``http://pypi.python.org``. + + You can use OpenSSL's CA certificate file if your platform has one. + On most Linux systems this will be ``/etc/ssl/certs/ca-certificates.crt``. + Otherwise you will have to generate this file manually. + + To disable SSL verification temporarily, specify ``--insecure`` from + command line. + +``cache`` + Whether to support caching in hgweb. Defaults to True. + +``contact`` + Name or email address of the person in charge of the repository. + Defaults to ui.username or ``$EMAIL`` or "unknown" if unset or empty. + +``deny_push`` + Whether to deny pushing to the repository. If empty or not set, + push is not denied. If the special value ``*``, all remote users are + denied push. Otherwise, unauthenticated users are all denied, and + any authenticated user name present in this list is also denied. The + contents of the deny_push list are examined before the allow_push list. + +``deny_read`` + Whether to deny reading/viewing of the repository. If this list is + not empty, unauthenticated users are all denied, and any + authenticated user name present in this list is also denied access to + the repository. If set to the special value ``*``, all remote users + are denied access (rarely needed ;). If deny_read is empty or not set, + the determination of repository access depends on the presence and + content of the allow_read list (see description). If both + deny_read and allow_read are empty or not set, then access is + permitted to all users by default. If the repository is being + served via hgwebdir, denied users will not be able to see it in + the list of repositories. The contents of the deny_read list have + priority over (are examined before) the contents of the allow_read + list. + +``descend`` + hgwebdir indexes will not descend into subdirectories. Only repositories + directly in the current path will be shown (other repositories are still + available from the index corresponding to their containing path). + +``description`` + Textual description of the repository's purpose or contents. + Default is "unknown". + +``encoding`` + Character encoding name. Default is the current locale charset. + Example: "UTF-8" + +``errorlog`` + Where to output the error log. Default is stderr. + +``hidden`` + Whether to hide the repository in the hgwebdir index. + Default is False. + +``ipv6`` + Whether to use IPv6. Default is False. + +``logoimg`` + File name of the logo image that some templates display on each page. + The file name is relative to ``staticurl``. That is, the full path to + the logo image is "staticurl/logoimg". + If unset, ``hglogo.png`` will be used. + +``logourl`` + Base URL to use for logos. If unset, ``http://mercurial.selenic.com/`` + will be used. + +``name`` + Repository name to use in the web interface. Default is current + working directory. + +``maxchanges`` + Maximum number of changes to list on the changelog. Default is 10. + +``maxfiles`` + Maximum number of files to list per changeset. Default is 10. + +``port`` + Port to listen on. Default is 8000. + +``prefix`` + Prefix path to serve from. Default is '' (server root). + +``push_ssl`` + Whether to require that inbound pushes be transported over SSL to + prevent password sniffing. Default is True. + +``staticurl`` + Base URL to use for static files. If unset, static files (e.g. the + hgicon.png favicon) will be served by the CGI script itself. Use + this setting to serve them directly with the HTTP server. + Example: ``http://hgserver/static/``. + +``stripes`` + How many lines a "zebra stripe" should span in multiline output. + Default is 1; set to 0 to disable. + +``style`` + Which template map style to use. + +``templates`` + Where to find the HTML templates. Default is install path. diff --git a/websdk/mercurial/help/dates.txt b/websdk/mercurial/help/dates.txt new file mode 100644 index 0000000..5f48979 --- /dev/null +++ b/websdk/mercurial/help/dates.txt @@ -0,0 +1,36 @@ +Some commands allow the user to specify a date, e.g.: + +- backout, commit, import, tag: Specify the commit date. +- log, revert, update: Select revision(s) by date. + +Many date formats are valid. Here are some examples: + +- ``Wed Dec 6 13:18:29 2006`` (local timezone assumed) +- ``Dec 6 13:18 -0600`` (year assumed, time offset provided) +- ``Dec 6 13:18 UTC`` (UTC and GMT are aliases for +0000) +- ``Dec 6`` (midnight) +- ``13:18`` (today assumed) +- ``3:39`` (3:39AM assumed) +- ``3:39pm`` (15:39) +- ``2006-12-06 13:18:29`` (ISO 8601 format) +- ``2006-12-6 13:18`` +- ``2006-12-6`` +- ``12-6`` +- ``12/6`` +- ``12/6/6`` (Dec 6 2006) + +Lastly, there is Mercurial's internal format: + +- ``1165432709 0`` (Wed Dec 6 13:18:29 2006 UTC) + +This is the internal representation format for dates. The first number +is the number of seconds since the epoch (1970-01-01 00:00 UTC). The +second is the offset of the local timezone, in seconds west of UTC +(negative if the timezone is east of UTC). + +The log command also accepts date ranges: + +- ``DATE`` - on or after a given date/time +- ``DATE to DATE`` - a date range, inclusive +- ``-DAYS`` - within a given number of days of today diff --git a/websdk/mercurial/help/diffs.txt b/websdk/mercurial/help/diffs.txt new file mode 100644 index 0000000..9ede0a5 --- /dev/null +++ b/websdk/mercurial/help/diffs.txt @@ -0,0 +1,29 @@ +Mercurial's default format for showing changes between two versions of +a file is compatible with the unified format of GNU diff, which can be +used by GNU patch and many other standard tools. + +While this standard format is often enough, it does not encode the +following information: + +- executable status and other permission bits +- copy or rename information +- changes in binary files +- creation or deletion of empty files + +Mercurial also supports the extended diff format from the git VCS +which addresses these limitations. The git diff format is not produced +by default because a few widespread tools still do not understand this +format. + +This means that when generating diffs from a Mercurial repository +(e.g. with :hg:`export`), you should be careful about things like file +copies and renames or other things mentioned above, because when +applying a standard diff to a different repository, this extra +information is lost. Mercurial's internal operations (like push and +pull) are not affected by this, because they use an internal binary +format for communicating changes. + +To make Mercurial produce the git extended diff format, use the --git +option available for many commands, or set 'git = True' in the [diff] +section of your configuration file. You do not need to set this option +when importing diffs in this format or using them in the mq extension. diff --git a/websdk/mercurial/help/environment.txt b/websdk/mercurial/help/environment.txt new file mode 100644 index 0000000..5e6771f --- /dev/null +++ b/websdk/mercurial/help/environment.txt @@ -0,0 +1,101 @@ +HG + Path to the 'hg' executable, automatically passed when running + hooks, extensions or external tools. If unset or empty, this is + the hg executable's name if it's frozen, or an executable named + 'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on + Windows) is searched. + +HGEDITOR + This is the name of the editor to run when committing. See EDITOR. + + (deprecated, use configuration file) + +HGENCODING + This overrides the default locale setting detected by Mercurial. + This setting is used to convert data including usernames, + changeset descriptions, tag names, and branches. This setting can + be overridden with the --encoding command-line option. + +HGENCODINGMODE + This sets Mercurial's behavior for handling unknown characters + while transcoding user input. The default is "strict", which + causes Mercurial to abort if it can't map a character. Other + settings include "replace", which replaces unknown characters, and + "ignore", which drops them. This setting can be overridden with + the --encodingmode command-line option. + +HGENCODINGAMBIGUOUS + This sets Mercurial's behavior for handling characters with + "ambiguous" widths like accented Latin characters with East Asian + fonts. By default, Mercurial assumes ambiguous characters are + narrow, set this variable to "wide" if such characters cause + formatting problems. + +HGMERGE + An executable to use for resolving merge conflicts. The program + will be executed with three arguments: local file, remote file, + ancestor file. + + (deprecated, use configuration file) + +HGRCPATH + A list of files or directories to search for configuration + files. Item separator is ":" on Unix, ";" on Windows. If HGRCPATH + is not set, platform default search path is used. If empty, only + the .hg/hgrc from the current repository is read. + + For each element in HGRCPATH: + + - if it's a directory, all files ending with .rc are added + - otherwise, the file itself will be added + +HGPLAIN + When set, this disables any configuration settings that might + change Mercurial's default output. This includes encoding, + defaults, verbose mode, debug mode, quiet mode, tracebacks, and + localization. This can be useful when scripting against Mercurial + in the face of existing user configuration. + + Equivalent options set via command line flags or environment + variables are not overridden. + +HGPLAINEXCEPT + This is a comma-separated list of features to preserve when + HGPLAIN is enabled. Currently the only value supported is "i18n", + which preserves internationalization in plain mode. + + Setting HGPLAINEXCEPT to anything (even an empty string) will + enable plain mode. + +HGUSER + This is the string used as the author of a commit. If not set, + available values will be considered in this order: + + - HGUSER (deprecated) + - configuration files from the HGRCPATH + - EMAIL + - interactive prompt + - LOGNAME (with ``@hostname`` appended) + + (deprecated, use configuration file) + +EMAIL + May be used as the author of a commit; see HGUSER. + +LOGNAME + May be used as the author of a commit; see HGUSER. + +VISUAL + This is the name of the editor to use when committing. See EDITOR. + +EDITOR + Sometimes Mercurial needs to open a text file in an editor for a + user to modify, for example when writing commit messages. The + editor it uses is determined by looking at the environment + variables HGEDITOR, VISUAL and EDITOR, in that order. The first + non-empty one is chosen. If all of them are empty, the editor + defaults to 'vi'. + +PYTHONPATH + This is used by Python to find imported modules and may need to be + set appropriately if this Mercurial is not installed system-wide. diff --git a/websdk/mercurial/help/extensions.txt b/websdk/mercurial/help/extensions.txt new file mode 100644 index 0000000..f3d2992 --- /dev/null +++ b/websdk/mercurial/help/extensions.txt @@ -0,0 +1,33 @@ +Mercurial has the ability to add new features through the use of +extensions. Extensions may add new commands, add options to +existing commands, change the default behavior of commands, or +implement hooks. + +Extensions are not loaded by default for a variety of reasons: +they can increase startup overhead; they may be meant for advanced +usage only; they may provide potentially dangerous abilities (such +as letting you destroy or modify history); they might not be ready +for prime time; or they may alter some usual behaviors of stock +Mercurial. It is thus up to the user to activate extensions as +needed. + +To enable the "foo" extension, either shipped with Mercurial or in the +Python search path, create an entry for it in your configuration file, +like this:: + + [extensions] + foo = + +You may also specify the full path to an extension:: + + [extensions] + myfeature = ~/.hgext/myfeature.py + +To explicitly disable an extension enabled in a configuration file of +broader scope, prepend its path with !:: + + [extensions] + # disabling extension bar residing in /path/to/extension/bar.py + bar = !/path/to/extension/bar.py + # ditto, but no path was supplied for extension baz + baz = ! diff --git a/websdk/mercurial/help/filesets.txt b/websdk/mercurial/help/filesets.txt new file mode 100644 index 0000000..c929d79 --- /dev/null +++ b/websdk/mercurial/help/filesets.txt @@ -0,0 +1,65 @@ +Mercurial supports a functional language for selecting a set of +files. + +Like other file patterns, this pattern type is indicated by a prefix, +'set:'. The language supports a number of predicates which are joined +by infix operators. Parenthesis can be used for grouping. + +Identifiers such as filenames or patterns must be quoted with single +or double quotes if they contain characters outside of +``[.*{}[]?/\_a-zA-Z0-9\x80-\xff]`` or if they match one of the +predefined predicates. This generally applies to file patterns other +than globs and arguments for predicates. + +Special characters can be used in quoted identifiers by escaping them, +e.g., ``\n`` is interpreted as a newline. To prevent them from being +interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``. + +There is a single prefix operator: + +``not x`` + Files not in x. Short form is ``! x``. + +These are the supported infix operators: + +``x and y`` + The intersection of files in x and y. Short form is ``x & y``. + +``x or y`` + The union of files in x and y. There are two alternative short + forms: ``x | y`` and ``x + y``. + +``x - y`` + Files in x but not in y. + +The following predicates are supported: + +.. predicatesmarker + +Some sample queries: + +- Show status of files that appear to be binary in the working directory:: + + hg status -A "set:binary()" + +- Forget files that are in .hgignore but are already tracked:: + + hg forget "set:hgignore() and not ignored()" + +- Find text files that contain a string:: + + hg locate "set:grep(magic) and not binary()" + +- Find C files in a non-standard encoding:: + + hg locate "set:**.c and not encoding(ascii)" + +- Revert copies of large binary files:: + + hg revert "set:copied() and binary() and size('>1M')" + +- Remove files listed in foo.lst that contain the letter a or b:: + + hg remove "set: 'listfile:foo.lst' and (**a* or **b*)" + +See also :hg:`help patterns`. diff --git a/websdk/mercurial/help/glossary.txt b/websdk/mercurial/help/glossary.txt new file mode 100644 index 0000000..074a31d --- /dev/null +++ b/websdk/mercurial/help/glossary.txt @@ -0,0 +1,386 @@ +Ancestor + Any changeset that can be reached by an unbroken chain of parent + changesets from a given changeset. More precisely, the ancestors + of a changeset can be defined by two properties: a parent of a + changeset is an ancestor, and a parent of an ancestor is an + ancestor. See also: 'Descendant'. + +Bookmark + Bookmarks are pointers to certain commits that move when + committing. They are similar to tags in that it is possible to use + bookmark names in all places where Mercurial expects a changeset + ID, e.g., with :hg:`update`. Unlike tags, bookmarks move along + when you make a commit. + + Bookmarks can be renamed, copied and deleted. Bookmarks are local, + unless they are explicitly pushed or pulled between repositories. + Pushing and pulling bookmarks allow you to collaborate with others + on a branch without creating a named branch. + +Branch + (Noun) A child changeset that has been created from a parent that + is not a head. These are known as topological branches, see + 'Branch, topological'. If a topological branch is named, it becomes + a named branch. If a topological branch is not named, it becomes + an anonymous branch. See 'Branch, anonymous' and 'Branch, named'. + + Branches may be created when changes are pulled from or pushed to + a remote repository, since new heads may be created by these + operations. Note that the term branch can also be used informally + to describe a development process in which certain development is + done independently of other development. This is sometimes done + explicitly with a named branch, but it can also be done locally, + using bookmarks or clones and anonymous branches. + + Example: "The experimental branch". + + (Verb) The action of creating a child changeset which results in + its parent having more than one child. + + Example: "I'm going to branch at X". + +Branch, anonymous + Every time a new child changeset is created from a parent that is not + a head and the name of the branch is not changed, a new anonymous + branch is created. + +Branch, closed + A named branch whose branch heads have all been closed. + +Branch, default + The branch assigned to a changeset when no name has previously been + assigned. + +Branch head + See 'Head, branch'. + +Branch, inactive + If a named branch has no topological heads, it is considered to be + inactive. As an example, a feature branch becomes inactive when it + is merged into the default branch. The :hg:`branches` command + shows inactive branches by default, though they can be hidden with + :hg:`branches --active`. + + NOTE: this concept is deprecated because it is too implicit. + Branches should now be explicitly closed using :hg:`commit + --close-branch` when they are no longer needed. + +Branch, named + A collection of changesets which have the same branch name. By + default, children of a changeset in a named branch belong to the + same named branch. A child can be explicitly assigned to a + different branch. See :hg:`help branch`, :hg:`help branches` and + :hg:`commit --close-branch` for more information on managing + branches. + + Named branches can be thought of as a kind of namespace, dividing + the collection of changesets that comprise the repository into a + collection of disjoint subsets. A named branch is not necessarily + a topological branch. If a new named branch is created from the + head of another named branch, or the default branch, but no + further changesets are added to that previous branch, then that + previous branch will be a branch in name only. + +Branch tip + See 'Tip, branch'. + +Branch, topological + Every time a new child changeset is created from a parent that is + not a head, a new topological branch is created. If a topological + branch is named, it becomes a named branch. If a topological + branch is not named, it becomes an anonymous branch of the + current, possibly default, branch. + +Changelog + A record of the changesets in the order in which they were added + to the repository. This includes details such as changeset id, + author, commit message, date, and list of changed files. + +Changeset + A snapshot of the state of the repository used to record a change. + +Changeset, child + The converse of parent changeset: if P is a parent of C, then C is + a child of P. There is no limit to the number of children that a + changeset may have. + +Changeset id + A SHA-1 hash that uniquely identifies a changeset. It may be + represented as either a "long" 40 hexadecimal digit string, or a + "short" 12 hexadecimal digit string. + +Changeset, merge + A changeset with two parents. This occurs when a merge is + committed. + +Changeset, parent + A revision upon which a child changeset is based. Specifically, a + parent changeset of a changeset C is a changeset whose node + immediately precedes C in the DAG. Changesets have at most two + parents. + +Checkout + (Noun) The working directory being updated to a specific + revision. This use should probably be avoided where possible, as + changeset is much more appropriate than checkout in this context. + + Example: "I'm using checkout X." + + (Verb) Updating the working directory to a specific changeset. See + :hg:`help update`. + + Example: "I'm going to check out changeset X." + +Child changeset + See 'Changeset, child'. + +Close changeset + See 'Changeset, close'. + +Closed branch + See 'Branch, closed'. + +Clone + (Noun) An entire or partial copy of a repository. The partial + clone must be in the form of a revision and its ancestors. + + Example: "Is your clone up to date?". + + (Verb) The process of creating a clone, using :hg:`clone`. + + Example: "I'm going to clone the repository". + +Closed branch head + See 'Head, closed branch'. + +Commit + (Noun) A synonym for changeset. + + Example: "Is the bug fixed in your recent commit?" + + (Verb) The act of recording changes to a repository. When files + are committed in a working directory, Mercurial finds the + differences between the committed files and their parent + changeset, creating a new changeset in the repository. + + Example: "You should commit those changes now." + +Cset + A common abbreviation of the term changeset. + +DAG + The repository of changesets of a distributed version control + system (DVCS) can be described as a directed acyclic graph (DAG), + consisting of nodes and edges, where nodes correspond to + changesets and edges imply a parent -> child relation. This graph + can be visualized by graphical tools such as :hg:`glog` + (graphlog). In Mercurial, the DAG is limited by the requirement + for children to have at most two parents. + +Default branch + See 'Branch, default'. + +Descendant + Any changeset that can be reached by a chain of child changesets + from a given changeset. More precisely, the descendants of a + changeset can be defined by two properties: the child of a + changeset is a descendant, and the child of a descendant is a + descendant. See also: 'Ancestor'. + +Diff + (Noun) The difference between the contents and attributes of files + in two changesets or a changeset and the current working + directory. The difference is usually represented in a standard + form called a "diff" or "patch". The "git diff" format is used + when the changes include copies, renames, or changes to file + attributes, none of which can be represented/handled by classic + "diff" and "patch". + + Example: "Did you see my correction in the diff?" + + (Verb) Diffing two changesets is the action of creating a diff or + patch. + + Example: "If you diff with changeset X, you will see what I mean." + +Directory, working + The working directory represents the state of the files tracked by + Mercurial, that will be recorded in the next commit. The working + directory initially corresponds to the snapshot at an existing + changeset, known as the parent of the working directory. See + 'Parent, working directory'. The state may be modified by changes + to the files introduced manually or by a merge. The repository + metadata exists in the .hg directory inside the working directory. + +Graph + See DAG and :hg:`help graphlog`. + +Head + The term 'head' may be used to refer to both a branch head or a + repository head, depending on the context. See 'Head, branch' and + 'Head, repository' for specific definitions. + + Heads are where development generally takes place and are the + usual targets for update and merge operations. + +Head, branch + A changeset with no descendants on the same named branch. + +Head, closed branch + A changeset that marks a head as no longer interesting. The closed + head is no longer listed by :hg:`heads`. A branch is considered + closed when all its heads are closed and consequently is not + listed by :hg:`branches`. + +Head, repository + A topological head which has not been closed. + +Head, topological + A changeset with no children in the repository. + +History, immutable + Once committed, changesets cannot be altered. Extensions which + appear to change history actually create new changesets that + replace existing ones, and then destroy the old changesets. Doing + so in public repositories can result in old changesets being + reintroduced to the repository. + +History, rewriting + The changesets in a repository are immutable. However, extensions + to Mercurial can be used to alter the repository, usually in such + a way as to preserve changeset contents. + +Immutable history + See 'History, immutable'. + +Merge changeset + See 'Changeset, merge'. + +Manifest + Each changeset has a manifest, which is the list of files that are + tracked by the changeset. + +Merge + Used to bring together divergent branches of work. When you update + to a changeset and then merge another changeset, you bring the + history of the latter changeset into your working directory. Once + conflicts are resolved (and marked), this merge may be committed + as a merge changeset, bringing two branches together in the DAG. + +Named branch + See 'Branch, named'. + +Null changeset + The empty changeset. It is the parent state of newly-initialized + repositories and repositories with no checked out revision. It is + thus the parent of root changesets and the effective ancestor when + merging unrelated changesets. Can be specified by the alias 'null' + or by the changeset ID '000000000000'. + +Parent + See 'Changeset, parent'. + +Parent changeset + See 'Changeset, parent'. + +Parent, working directory + The working directory parent reflects a virtual revision which is + the child of the changeset (or two changesets with an uncommitted + merge) shown by :hg:`parents`. This is changed with + :hg:`update`. Other commands to see the working directory parent + are :hg:`summary` and :hg:`id`. Can be specified by the alias ".". + +Patch + (Noun) The product of a diff operation. + + Example: "I've sent you my patch." + + (Verb) The process of using a patch file to transform one + changeset into another. + + Example: "You will need to patch that revision." + +Pull + An operation in which changesets in a remote repository which are + not in the local repository are brought into the local + repository. Note that this operation without special arguments + only updates the repository, it does not update the files in the + working directory. See :hg:`help pull`. + +Push + An operation in which changesets in a local repository which are + not in a remote repository are sent to the remote repository. Note + that this operation only adds changesets which have been committed + locally to the remote repository. Uncommitted changes are not + sent. See :hg:`help push`. + +Repository + The metadata describing all recorded states of a collection of + files. Each recorded state is represented by a changeset. A + repository is usually (but not always) found in the ``.hg`` + subdirectory of a working directory. Any recorded state can be + recreated by "updating" a working directory to a specific + changeset. + +Repository head + See 'Head, repository'. + +Revision + A state of the repository at some point in time. Earlier revisions + can be updated to by using :hg:`update`. See also 'Revision + number'; See also 'Changeset'. + +Revision number + This integer uniquely identifies a changeset in a specific + repository. It represents the order in which changesets were added + to a repository, starting with revision number 0. Note that the + revision number may be different in each clone of a repository. To + identify changesets uniquely between different clones, see + 'Changeset id'. + +Revlog + History storage mechanism used by Mercurial. It is a form of delta + encoding, with occasional full revision of data followed by delta + of each successive revision. It includes data and an index + pointing to the data. + +Rewriting history + See 'History, rewriting'. + +Root + A changeset that has only the null changeset as its parent. Most + repositories have only a single root changeset. + +Tag + An alternative name given to a changeset. Tags can be used in all + places where Mercurial expects a changeset ID, e.g., with + :hg:`update`. The creation of a tag is stored in the history and + will thus automatically be shared with other using push and pull. + +Tip + The changeset with the highest revision number. It is the changeset + most recently added in a repository. + +Tip, branch + The head of a given branch with the highest revision number. When + a branch name is used as a revision identifier, it refers to the + branch tip. See also 'Branch, head'. Note that because revision + numbers may be different in different repository clones, the + branch tip may be different in different cloned repositories. + +Update + (Noun) Another synonym of changeset. + + Example: "I've pushed an update". + + (Verb) This term is usually used to describe updating the state of + the working directory to that of a specific changeset. See + :hg:`help update`. + + Example: "You should update". + +Working directory + See 'Directory, working'. + +Working directory parent + See 'Parent, working directory'. diff --git a/websdk/mercurial/help/hgignore.txt b/websdk/mercurial/help/hgignore.txt new file mode 100644 index 0000000..538f4b5 --- /dev/null +++ b/websdk/mercurial/help/hgignore.txt @@ -0,0 +1,80 @@ +Synopsis +-------- + +The Mercurial system uses a file called ``.hgignore`` in the root +directory of a repository to control its behavior when it searches +for files that it is not currently tracking. + +Description +----------- + +The working directory of a Mercurial repository will often contain +files that should not be tracked by Mercurial. These include backup +files created by editors and build products created by compilers. +These files can be ignored by listing them in a ``.hgignore`` file in +the root of the working directory. The ``.hgignore`` file must be +created manually. It is typically put under version control, so that +the settings will propagate to other repositories with push and pull. + +An untracked file is ignored if its path relative to the repository +root directory, or any prefix path of that path, is matched against +any pattern in ``.hgignore``. + +For example, say we have an untracked file, ``file.c``, at +``a/b/file.c`` inside our repository. Mercurial will ignore ``file.c`` +if any pattern in ``.hgignore`` matches ``a/b/file.c``, ``a/b`` or ``a``. + +In addition, a Mercurial configuration file can reference a set of +per-user or global ignore files. See the ``ignore`` configuration +key on the ``[ui]`` section of :hg:`help config` for details of how to +configure these files. + +To control Mercurial's handling of files that it manages, many +commands support the ``-I`` and ``-X`` options; see +:hg:`help ` and :hg:`help patterns` for details. + +Syntax +------ + +An ignore file is a plain text file consisting of a list of patterns, +with one pattern per line. Empty lines are skipped. The ``#`` +character is treated as a comment character, and the ``\`` character +is treated as an escape character. + +Mercurial supports several pattern syntaxes. The default syntax used +is Python/Perl-style regular expressions. + +To change the syntax used, use a line of the following form:: + + syntax: NAME + +where ``NAME`` is one of the following: + +``regexp`` + Regular expression, Python/Perl syntax. +``glob`` + Shell-style glob. + +The chosen syntax stays in effect when parsing all patterns that +follow, until another syntax is selected. + +Neither glob nor regexp patterns are rooted. A glob-syntax pattern of +the form ``*.c`` will match a file ending in ``.c`` in any directory, +and a regexp pattern of the form ``\.c$`` will do the same. To root a +regexp pattern, start it with ``^``. + +Example +------- + +Here is an example ignore file. :: + + # use glob syntax. + syntax: glob + + *.elc + *.pyc + *~ + + # switch to regexp syntax. + syntax: regexp + ^\.pc/ diff --git a/websdk/mercurial/help/hgweb.txt b/websdk/mercurial/help/hgweb.txt new file mode 100644 index 0000000..fa901e6 --- /dev/null +++ b/websdk/mercurial/help/hgweb.txt @@ -0,0 +1,47 @@ +Mercurial's internal web server, hgweb, can serve either a single +repository, or a collection of them. In the latter case, a special +configuration file can be used to specify the repository paths to use +and global web configuration options. + +This file uses the same syntax as other Mercurial configuration files, +but only the following sections are recognized: + + - web + - paths + - collections + +The ``web`` section can specify all the settings described in the web +section of the hgrc(5) documentation. See :hg:`help config` for +information on where to find the manual page. + +The ``paths`` section provides mappings of physical repository +paths to virtual ones. For instance:: + + [paths] + projects/a = /foo/bar + projects/b = /baz/quux + web/root = /real/root/* + / = /real/root2/* + virtual/root2 = /real/root2/** + +- The first two entries make two repositories in different directories + appear under the same directory in the web interface +- The third entry maps every Mercurial repository found in '/real/root' + into 'web/root'. This format is preferred over the [collections] one, + since using absolute paths as configuration keys is not supported on every + platform (especially on Windows). +- The fourth entry is a special case mapping all repositories in + '/real/root2' in the root of the virtual directory. +- The fifth entry recursively finds all repositories under the real + root, and maps their relative paths under the virtual root. + +The ``collections`` section provides mappings of trees of physical +repositories paths to virtual ones, though the paths syntax is generally +preferred. For instance:: + + [collections] + /foo = /foo + +Here, the left side will be stripped off all repositories found in the +right side. Thus ``/foo/bar`` and ``foo/quux/baz`` will be listed as +``bar`` and ``quux/baz`` respectively. diff --git a/websdk/mercurial/help/merge-tools.txt b/websdk/mercurial/help/merge-tools.txt new file mode 100644 index 0000000..7324fe4 --- /dev/null +++ b/websdk/mercurial/help/merge-tools.txt @@ -0,0 +1,110 @@ +To merge files Mercurial uses merge tools. + +A merge tool combines two different versions of a file into a merged +file. Merge tools are given the two files and the greatest common +ancestor of the two file versions, so they can determine the changes +made on both branches. + +Merge tools are used both for :hg:`resolve`, :hg:`merge`, :hg:`update`, +:hg:`backout` and in several extensions. + +Usually, the merge tool tries to automatically reconcile the files by +combining all non-overlapping changes that occurred separately in +the two different evolutions of the same initial base file. Furthermore, some +interactive merge programs make it easier to manually resolve +conflicting merges, either in a graphical way, or by inserting some +conflict markers. Mercurial does not include any interactive merge +programs but relies on external tools for that. + +Available merge tools +""""""""""""""""""""" + +External merge tools and their properties are configured in the +merge-tools configuration section - see hgrc(5) - but they can often just +be named by their executable. + +A merge tool is generally usable if its executable can be found on the +system and if it can handle the merge. The executable is found if it +is an absolute or relative executable path or the name of an +application in the executable search path. The tool is assumed to be +able to handle the merge if it can handle symlinks if the file is a +symlink, if it can handle binary files if the file is binary, and if a +GUI is available if the tool requires a GUI. + +There are some internal merge tools which can be used. The internal +merge tools are: + +``internal:merge`` + Uses the internal non-interactive simple merge algorithm for merging + files. It will fail if there are any conflicts and leave markers in + the partially merged file. + +``internal:fail`` + Rather than attempting to merge files that were modified on both + branches, it marks them as unresolved. The resolve command must be + used to resolve these conflicts. + +``internal:local`` + Uses the local version of files as the merged version. + +``internal:other`` + Uses the other version of files as the merged version. + +``internal:prompt`` + Asks the user which of the local or the other version to keep as + the merged version. + +``internal:dump`` + Creates three versions of the files to merge, containing the + contents of local, other and base. These files can then be used to + perform a merge manually. If the file to be merged is named + ``a.txt``, these files will accordingly be named ``a.txt.local``, + ``a.txt.other`` and ``a.txt.base`` and they will be placed in the + same directory as ``a.txt``. + +Internal tools are always available and do not require a GUI but will by default +not handle symlinks or binary files. + +Choosing a merge tool +""""""""""""""""""""" + +Mercurial uses these rules when deciding which merge tool to use: + +1. If a tool has been specified with the --tool option to merge or resolve, it + is used. If it is the name of a tool in the merge-tools configuration, its + configuration is used. Otherwise the specified tool must be executable by + the shell. + +2. If the ``HGMERGE`` environment variable is present, its value is used and + must be executable by the shell. + +3. If the filename of the file to be merged matches any of the patterns in the + merge-patterns configuration section, the first usable merge tool + corresponding to a matching pattern is used. Here, binary capabilities of the + merge tool are not considered. + +4. If ui.merge is set it will be considered next. If the value is not the name + of a configured tool, the specified value is used and must be executable by + the shell. Otherwise the named tool is used if it is usable. + +5. If any usable merge tools are present in the merge-tools configuration + section, the one with the highest priority is used. + +6. If a program named ``hgmerge`` can be found on the system, it is used - but + it will by default not be used for symlinks and binary files. + +7. If the file to be merged is not binary and is not a symlink, then + ``internal:merge`` is used. + +8. The merge of the file fails and must be resolved before commit. + +.. note:: + After selecting a merge program, Mercurial will by default attempt + to merge the files using a simple merge algorithm first. Only if it doesn't + succeed because of conflicting changes Mercurial will actually execute the + merge program. Whether to use the simple merge algorithm first can be + controlled by the premerge setting of the merge tool. Premerge is enabled by + default unless the file is binary or a symlink. + +See the merge-tools and ui sections of hgrc(5) for details on the +configuration of merge tools. diff --git a/websdk/mercurial/help/multirevs.txt b/websdk/mercurial/help/multirevs.txt new file mode 100644 index 0000000..c8a2833 --- /dev/null +++ b/websdk/mercurial/help/multirevs.txt @@ -0,0 +1,13 @@ +When Mercurial accepts more than one revision, they may be specified +individually, or provided as a topologically continuous range, +separated by the ":" character. + +The syntax of range notation is [BEGIN]:[END], where BEGIN and END are +revision identifiers. Both BEGIN and END are optional. If BEGIN is not +specified, it defaults to revision number 0. If END is not specified, +it defaults to the tip. The range ":" thus means "all revisions". + +If BEGIN is greater than END, revisions are treated in reverse order. + +A range acts as a closed interval. This means that a range of 3:5 +gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6. diff --git a/websdk/mercurial/help/patterns.txt b/websdk/mercurial/help/patterns.txt new file mode 100644 index 0000000..c13453e --- /dev/null +++ b/websdk/mercurial/help/patterns.txt @@ -0,0 +1,53 @@ +Mercurial accepts several notations for identifying one or more files +at a time. + +By default, Mercurial treats filenames as shell-style extended glob +patterns. + +Alternate pattern notations must be specified explicitly. + +To use a plain path name without any pattern matching, start it with +``path:``. These path names must completely match starting at the +current repository root. + +To use an extended glob, start a name with ``glob:``. Globs are rooted +at the current directory; a glob such as ``*.c`` will only match files +in the current directory ending with ``.c``. + +The supported glob syntax extensions are ``**`` to match any string +across path separators and ``{a,b}`` to mean "a or b". + +To use a Perl/Python regular expression, start a name with ``re:``. +Regexp pattern matching is anchored at the root of the repository. + +To read name patterns from a file, use ``listfile:`` or ``listfile0:``. +The latter expects null delimited patterns while the former expects line +feeds. Each string read from the file is itself treated as a file +pattern. + +Plain examples:: + + path:foo/bar a name bar in a directory named foo in the root + of the repository + path:path:name a file or directory named "path:name" + +Glob examples:: + + glob:*.c any name ending in ".c" in the current directory + *.c any name ending in ".c" in the current directory + **.c any name ending in ".c" in any subdirectory of the + current directory including itself. + foo/*.c any name ending in ".c" in the directory foo + foo/**.c any name ending in ".c" in any subdirectory of foo + including itself. + +Regexp examples:: + + re:.*\.c$ any name ending in ".c", anywhere in the repository + +File examples:: + + listfile:list.txt read list from list.txt with one file pattern per line + listfile0:list.txt read list from list.txt with null byte delimiters + +See also :hg:`help filesets`. diff --git a/websdk/mercurial/help/revisions.txt b/websdk/mercurial/help/revisions.txt new file mode 100644 index 0000000..309f8e2 --- /dev/null +++ b/websdk/mercurial/help/revisions.txt @@ -0,0 +1,29 @@ +Mercurial supports several ways to specify individual revisions. + +A plain integer is treated as a revision number. Negative integers are +treated as sequential offsets from the tip, with -1 denoting the tip, +-2 denoting the revision prior to the tip, and so forth. + +A 40-digit hexadecimal string is treated as a unique revision +identifier. + +A hexadecimal string less than 40 characters long is treated as a +unique revision identifier and is referred to as a short-form +identifier. A short-form identifier is only valid if it is the prefix +of exactly one full-length identifier. + +Any other string is treated as a tag or branch name. A tag name is a +symbolic name associated with a revision identifier. A branch name +denotes the tipmost revision of that branch. Tag and branch names must +not contain the ":" character. + +The reserved name "tip" is a special tag that always identifies the +most recent revision. + +The reserved name "null" indicates the null revision. This is the +revision of an empty repository, and the parent of revision 0. + +The reserved name "." indicates the working directory parent. If no +working directory is checked out, it is equivalent to null. If an +uncommitted merge is in progress, "." is the revision of the first +parent. diff --git a/websdk/mercurial/help/revsets.txt b/websdk/mercurial/help/revsets.txt new file mode 100644 index 0000000..48838c3 --- /dev/null +++ b/websdk/mercurial/help/revsets.txt @@ -0,0 +1,122 @@ +Mercurial supports a functional language for selecting a set of +revisions. + +The language supports a number of predicates which are joined by infix +operators. Parenthesis can be used for grouping. + +Identifiers such as branch names must be quoted with single or double +quotes if they contain characters outside of +``[._a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined +predicates. + +Special characters can be used in quoted identifiers by escaping them, +e.g., ``\n`` is interpreted as a newline. To prevent them from being +interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``. + +There is a single prefix operator: + +``not x`` + Changesets not in x. Short form is ``! x``. + +These are the supported infix operators: + +``x::y`` + A DAG range, meaning all changesets that are descendants of x and + ancestors of y, including x and y themselves. If the first endpoint + is left out, this is equivalent to ``ancestors(y)``, if the second + is left out it is equivalent to ``descendants(x)``. + + An alternative syntax is ``x..y``. + +``x:y`` + All changesets with revision numbers between x and y, both + inclusive. Either endpoint can be left out, they default to 0 and + tip. + +``x and y`` + The intersection of changesets in x and y. Short form is ``x & y``. + +``x or y`` + The union of changesets in x and y. There are two alternative short + forms: ``x | y`` and ``x + y``. + +``x - y`` + Changesets in x but not in y. + +``x^n`` + The nth parent of x, n == 0, 1, or 2. + For n == 0, x; for n == 1, the first parent of each changeset in x; + for n == 2, the second parent of changeset in x. + +``x~n`` + The nth first ancestor of x; ``x~0`` is x; ``x~3`` is ``x^^^``. + +There is a single postfix operator: + +``x^`` + Equivalent to ``x^1``, the first parent of each changeset in x. + + +The following predicates are supported: + +.. predicatesmarker + +New predicates (known as "aliases") can be defined, using any combination of +existing predicates or other aliases. An alias definition looks like:: + + = + +in the ``revsetalias`` section of a Mercurial configuration file. Arguments +of the form `$1`, `$2`, etc. are substituted from the alias into the +definition. + +For example, + +:: + + [revsetalias] + h = heads() + d($1) = sort($1, date) + rs($1, $2) = reverse(sort($1, $2)) + +defines three aliases, ``h``, ``d``, and ``rs``. ``rs(0:tip, author)`` is +exactly equivalent to ``reverse(sort(0:tip, author))``. + +Command line equivalents for :hg:`log`:: + + -f -> ::. + -d x -> date(x) + -k x -> keyword(x) + -m -> merge() + -u x -> user(x) + -b x -> branch(x) + -P x -> !::x + -l x -> limit(expr, x) + +Some sample queries: + +- Changesets on the default branch:: + + hg log -r "branch(default)" + +- Changesets on the default branch since tag 1.5 (excluding merges):: + + hg log -r "branch(default) and 1.5:: and not merge()" + +- Open branch heads:: + + hg log -r "head() and not closed()" + +- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect + ``hgext/*``:: + + hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')" + +- Changesets committed in May 2008, sorted by user:: + + hg log -r "sort(date('May 2008'), user)" + +- Changesets mentioning "bug" or "issue" that are not in a tagged + release:: + + hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tagged())" diff --git a/websdk/mercurial/help/subrepos.txt b/websdk/mercurial/help/subrepos.txt new file mode 100644 index 0000000..df14173 --- /dev/null +++ b/websdk/mercurial/help/subrepos.txt @@ -0,0 +1,135 @@ +Subrepositories let you nest external repositories or projects into a +parent Mercurial repository, and make commands operate on them as a +group. + +Mercurial currently supports Mercurial, Git, and Subversion +subrepositories. + +Subrepositories are made of three components: + +1. Nested repository checkouts. They can appear anywhere in the + parent working directory. + +2. Nested repository references. They are defined in ``.hgsub`` and + tell where the subrepository checkouts come from. Mercurial + subrepositories are referenced like: + + path/to/nested = https://example.com/nested/repo/path + + Git and Subversion subrepos are also supported: + + path/to/nested = [git]git://example.com/nested/repo/path + path/to/nested = [svn]https://example.com/nested/trunk/path + + where ``path/to/nested`` is the checkout location relatively to the + parent Mercurial root, and ``https://example.com/nested/repo/path`` + is the source repository path. The source can also reference a + filesystem path. + + Note that ``.hgsub`` does not exist by default in Mercurial + repositories, you have to create and add it to the parent + repository before using subrepositories. + +3. Nested repository states. They are defined in ``.hgsubstate`` and + capture whatever information is required to restore the + subrepositories to the state they were committed in a parent + repository changeset. Mercurial automatically record the nested + repositories states when committing in the parent repository. + + .. note:: + The ``.hgsubstate`` file should not be edited manually. + + +Adding a Subrepository +---------------------- + +If ``.hgsub`` does not exist, create it and add it to the parent +repository. Clone or checkout the external projects where you want it +to live in the parent repository. Edit ``.hgsub`` and add the +subrepository entry as described above. At this point, the +subrepository is tracked and the next commit will record its state in +``.hgsubstate`` and bind it to the committed changeset. + +Synchronizing a Subrepository +----------------------------- + +Subrepos do not automatically track the latest changeset of their +sources. Instead, they are updated to the changeset that corresponds +with the changeset checked out in the top-level changeset. This is so +developers always get a consistent set of compatible code and +libraries when they update. + +Thus, updating subrepos is a manual process. Simply check out target +subrepo at the desired revision, test in the top-level repo, then +commit in the parent repository to record the new combination. + +Deleting a Subrepository +------------------------ + +To remove a subrepository from the parent repository, delete its +reference from ``.hgsub``, then remove its files. + +Interaction with Mercurial Commands +----------------------------------- + +:add: add does not recurse in subrepos unless -S/--subrepos is + specified. Git and Subversion subrepositories are currently + silently ignored. + +:archive: archive does not recurse in subrepositories unless + -S/--subrepos is specified. + +:commit: commit creates a consistent snapshot of the state of the + entire project and its subrepositories. If any subrepositories + have been modified, Mercurial will abort. Mercurial can be made + to instead commit all modified subrepositories by specifying + -S/--subrepos, or setting "ui.commitsubrepos=True" in a + configuration file (see :hg:`help config`). After there are no + longer any modified subrepositories, it records their state and + finally commits it in the parent repository. + +:diff: diff does not recurse in subrepos unless -S/--subrepos is + specified. Changes are displayed as usual, on the subrepositories + elements. Git and Subversion subrepositories are currently + silently ignored. + +:incoming: incoming does not recurse in subrepos unless -S/--subrepos + is specified. Git and Subversion subrepositories are currently + silently ignored. + +:outgoing: outgoing does not recurse in subrepos unless -S/--subrepos + is specified. Git and Subversion subrepositories are currently + silently ignored. + +:pull: pull is not recursive since it is not clear what to pull prior + to running :hg:`update`. Listing and retrieving all + subrepositories changes referenced by the parent repository pulled + changesets is expensive at best, impossible in the Subversion + case. + +:push: Mercurial will automatically push all subrepositories first + when the parent repository is being pushed. This ensures new + subrepository changes are available when referenced by top-level + repositories. Push is a no-op for Subversion subrepositories. + +:status: status does not recurse into subrepositories unless + -S/--subrepos is specified. Subrepository changes are displayed as + regular Mercurial changes on the subrepository + elements. Subversion subrepositories are currently silently + ignored. + +:update: update restores the subrepos in the state they were + originally committed in target changeset. If the recorded + changeset is not available in the current subrepository, Mercurial + will pull it in first before updating. This means that updating + can require network access when using subrepositories. + +Remapping Subrepositories Sources +--------------------------------- + +A subrepository source location may change during a project life, +invalidating references stored in the parent repository history. To +fix this, rewriting rules can be defined in parent repository ``hgrc`` +file or in Mercurial configuration. See the ``[subpaths]`` section in +hgrc(5) for more details. + diff --git a/websdk/mercurial/help/templates.txt b/websdk/mercurial/help/templates.txt new file mode 100644 index 0000000..ffc7b34 --- /dev/null +++ b/websdk/mercurial/help/templates.txt @@ -0,0 +1,40 @@ +Mercurial allows you to customize output of commands through +templates. You can either pass in a template from the command +line, via the --template option, or select an existing +template-style (--style). + +You can customize output for any "log-like" command: log, +outgoing, incoming, tip, parents, heads and glog. + +Four styles are packaged with Mercurial: default (the style used +when no explicit preference is passed), compact, changelog, +and xml. +Usage:: + + $ hg log -r1 --style changelog + +A template is a piece of text, with markup to invoke variable +expansion:: + + $ hg log -r1 --template "{node}\n" + b56ce7b07c52de7d5fd79fb89701ea538af65746 + +Strings in curly braces are called keywords. The availability of +keywords depends on the exact context of the templater. These +keywords are usually available for templating a log-like command: + +.. keywordsmarker + +The "date" keyword does not produce human-readable output. If you +want to use a date in your output, you can use a filter to process +it. Filters are functions which return a string based on the input +variable. Be sure to use the stringify filter first when you're +applying a string-input filter to a list-like input variable. +You can also use a chain of filters to get the desired output:: + + $ hg tip --template "{date|isodate}\n" + 2008-08-21 18:22 +0000 + +List of filters: + +.. filtersmarker diff --git a/websdk/mercurial/help/urls.txt b/websdk/mercurial/help/urls.txt new file mode 100644 index 0000000..016d5bd --- /dev/null +++ b/websdk/mercurial/help/urls.txt @@ -0,0 +1,66 @@ +Valid URLs are of the form:: + + local/filesystem/path[#revision] + file://local/filesystem/path[#revision] + http://[user[:pass]@]host[:port]/[path][#revision] + https://[user[:pass]@]host[:port]/[path][#revision] + ssh://[user@]host[:port]/[path][#revision] + +Paths in the local filesystem can either point to Mercurial +repositories or to bundle files (as created by :hg:`bundle` or :hg:` +incoming --bundle`). See also :hg:`help paths`. + +An optional identifier after # indicates a particular branch, tag, or +changeset to use from the remote repository. See also :hg:`help +revisions`. + +Some features, such as pushing to http:// and https:// URLs are only +possible if the feature is explicitly enabled on the remote Mercurial +server. + +Note that the security of HTTPS URLs depends on proper configuration of +web.cacerts. + +Some notes about using SSH with Mercurial: + +- SSH requires an accessible shell account on the destination machine + and a copy of hg in the remote path or specified with as remotecmd. +- path is relative to the remote user's home directory by default. Use + an extra slash at the start of a path to specify an absolute path:: + + ssh://example.com//tmp/repository + +- Mercurial doesn't use its own compression via SSH; the right thing + to do is to configure it in your ~/.ssh/config, e.g.:: + + Host *.mylocalnetwork.example.com + Compression no + Host * + Compression yes + + Alternatively specify "ssh -C" as your ssh command in your + configuration file or with the --ssh command line option. + +These URLs can all be stored in your configuration file with path +aliases under the [paths] section like so:: + + [paths] + alias1 = URL1 + alias2 = URL2 + ... + +You can then use the alias for any command that uses a URL (for +example :hg:`pull alias1` will be treated as :hg:`pull URL1`). + +Two path aliases are special because they are used as defaults when +you do not provide the URL to a command: + +default: + When you create a repository with hg clone, the clone command saves + the location of the source repository as the new repository's + 'default' path. This is then used when you omit path from push- and + pull-like commands (including incoming and outgoing). + +default-push: + The push command will look for a path named 'default-push', and + prefer it over 'default' if both are defined. diff --git a/websdk/mercurial/hg.py b/websdk/mercurial/hg.py index 061265c..bea0424 120000..100644 --- a/websdk/mercurial/hg.py +++ b/websdk/mercurial/hg.py @@ -1 +1,581 @@ -/usr/share/pyshared/mercurial/hg.py \ No newline at end of file +# hg.py - repository classes for mercurial +# +# Copyright 2005-2007 Matt Mackall +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from lock import release +from node import hex, nullid +import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo, bookmarks +import lock, util, extensions, error, node +import cmdutil, discovery +import merge as mergemod +import verify as verifymod +import errno, os, shutil + +def _local(path): + path = util.expandpath(util.urllocalpath(path)) + return (os.path.isfile(path) and bundlerepo or localrepo) + +def addbranchrevs(lrepo, repo, branches, revs): + hashbranch, branches = branches + if not hashbranch and not branches: + return revs or None, revs and revs[0] or None + revs = revs and list(revs) or [] + if not repo.capable('branchmap'): + if branches: + raise util.Abort(_("remote branch lookup not supported")) + revs.append(hashbranch) + return revs, revs[0] + branchmap = repo.branchmap() + + def primary(branch): + if branch == '.': + if not lrepo or not lrepo.local(): + raise util.Abort(_("dirstate branch not accessible")) + branch = lrepo.dirstate.branch() + if branch in branchmap: + revs.extend(node.hex(r) for r in reversed(branchmap[branch])) + return True + else: + return False + + for branch in branches: + if not primary(branch): + raise error.RepoLookupError(_("unknown branch '%s'") % branch) + if hashbranch: + if not primary(hashbranch): + revs.append(hashbranch) + return revs, revs[0] + +def parseurl(path, branches=None): + '''parse url#branch, returning (url, (branch, branches))''' + + u = util.url(path) + branch = None + if u.fragment: + branch = u.fragment + u.fragment = None + return str(u), (branch, branches or []) + +schemes = { + 'bundle': bundlerepo, + 'file': _local, + 'http': httprepo, + 'https': httprepo, + 'ssh': sshrepo, + 'static-http': statichttprepo, +} + +def _peerlookup(path): + u = util.url(path) + scheme = u.scheme or 'file' + thing = schemes.get(scheme) or schemes['file'] + try: + return thing(path) + except TypeError: + return thing + +def islocal(repo): + '''return true if repo or path is local''' + if isinstance(repo, str): + try: + return _peerlookup(repo).islocal(repo) + except AttributeError: + return False + return repo.local() + +def repository(ui, path='', create=False): + """return a repository object for the specified path""" + repo = _peerlookup(path).instance(ui, path, create) + ui = getattr(repo, "ui", ui) + for name, module in extensions.extensions(): + hook = getattr(module, 'reposetup', None) + if hook: + hook(ui, repo) + return repo + +def peer(uiorrepo, opts, path, create=False): + '''return a repository peer for the specified path''' + rui = remoteui(uiorrepo, opts) + return repository(rui, path, create) + +def defaultdest(source): + '''return default destination of clone if none is given''' + return os.path.basename(os.path.normpath(source)) + +def share(ui, source, dest=None, update=True): + '''create a shared repository''' + + if not islocal(source): + raise util.Abort(_('can only share local repositories')) + + if not dest: + dest = defaultdest(source) + else: + dest = ui.expandpath(dest) + + if isinstance(source, str): + origsource = ui.expandpath(source) + source, branches = parseurl(origsource) + srcrepo = repository(ui, source) + rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) + else: + srcrepo = source + origsource = source = srcrepo.url() + checkout = None + + sharedpath = srcrepo.sharedpath # if our source is already sharing + + root = os.path.realpath(dest) + roothg = os.path.join(root, '.hg') + + if os.path.exists(roothg): + raise util.Abort(_('destination already exists')) + + if not os.path.isdir(root): + os.mkdir(root) + util.makedir(roothg, notindexed=True) + + requirements = '' + try: + requirements = srcrepo.opener.read('requires') + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + + requirements += 'shared\n' + util.writefile(os.path.join(roothg, 'requires'), requirements) + util.writefile(os.path.join(roothg, 'sharedpath'), sharedpath) + + r = repository(ui, root) + + default = srcrepo.ui.config('paths', 'default') + if default: + fp = r.opener("hgrc", "w", text=True) + fp.write("[paths]\n") + fp.write("default = %s\n" % default) + fp.close() + + if update: + r.ui.status(_("updating working directory\n")) + if update is not True: + checkout = update + for test in (checkout, 'default', 'tip'): + if test is None: + continue + try: + uprev = r.lookup(test) + break + except error.RepoLookupError: + continue + _update(r, uprev) + +def copystore(ui, srcrepo, destpath): + '''copy files from store of srcrepo in destpath + + returns destlock + ''' + destlock = None + try: + hardlink = None + num = 0 + for f in srcrepo.store.copylist(): + src = os.path.join(srcrepo.sharedpath, f) + dst = os.path.join(destpath, f) + dstbase = os.path.dirname(dst) + if dstbase and not os.path.exists(dstbase): + os.mkdir(dstbase) + if os.path.exists(src): + if dst.endswith('data'): + # lock to avoid premature writing to the target + destlock = lock.lock(os.path.join(dstbase, "lock")) + hardlink, n = util.copyfiles(src, dst, hardlink) + num += n + if hardlink: + ui.debug("linked %d files\n" % num) + else: + ui.debug("copied %d files\n" % num) + return destlock + except: + release(destlock) + raise + +def clone(ui, peeropts, source, dest=None, pull=False, rev=None, + update=True, stream=False, branch=None): + """Make a copy of an existing repository. + + Create a copy of an existing repository in a new directory. The + source and destination are URLs, as passed to the repository + function. Returns a pair of repository objects, the source and + newly created destination. + + The location of the source is added to the new repository's + .hg/hgrc file, as the default to be used for future pulls and + pushes. + + If an exception is raised, the partly cloned/updated destination + repository will be deleted. + + Arguments: + + source: repository object or URL + + dest: URL of destination repository to create (defaults to base + name of source repository) + + pull: always pull from source repository, even in local case + + stream: stream raw data uncompressed from repository (fast over + LAN, slow over WAN) + + rev: revision to clone up to (implies pull=True) + + update: update working directory after clone completes, if + destination is local repository (True means update to default rev, + anything else is treated as a revision) + + branch: branches to clone + """ + + if isinstance(source, str): + origsource = ui.expandpath(source) + source, branch = parseurl(origsource, branch) + srcrepo = repository(remoteui(ui, peeropts), source) + else: + srcrepo = source + branch = (None, branch or []) + origsource = source = srcrepo.url() + rev, checkout = addbranchrevs(srcrepo, srcrepo, branch, rev) + + if dest is None: + dest = defaultdest(source) + ui.status(_("destination directory: %s\n") % dest) + else: + dest = ui.expandpath(dest) + + dest = util.urllocalpath(dest) + source = util.urllocalpath(source) + + if os.path.exists(dest): + if not os.path.isdir(dest): + raise util.Abort(_("destination '%s' already exists") % dest) + elif os.listdir(dest): + raise util.Abort(_("destination '%s' is not empty") % dest) + + class DirCleanup(object): + def __init__(self, dir_): + self.rmtree = shutil.rmtree + self.dir_ = dir_ + def close(self): + self.dir_ = None + def cleanup(self): + if self.dir_: + self.rmtree(self.dir_, True) + + srclock = destlock = dircleanup = None + try: + abspath = origsource + if islocal(origsource): + abspath = os.path.abspath(util.urllocalpath(origsource)) + + if islocal(dest): + dircleanup = DirCleanup(dest) + + copy = False + if srcrepo.cancopy() and islocal(dest): + copy = not pull and not rev + + if copy: + try: + # we use a lock here because if we race with commit, we + # can end up with extra data in the cloned revlogs that's + # not pointed to by changesets, thus causing verify to + # fail + srclock = srcrepo.lock(wait=False) + except error.LockError: + copy = False + + if copy: + srcrepo.hook('preoutgoing', throw=True, source='clone') + hgdir = os.path.realpath(os.path.join(dest, ".hg")) + if not os.path.exists(dest): + os.mkdir(dest) + else: + # only clean up directories we create ourselves + dircleanup.dir_ = hgdir + try: + destpath = hgdir + util.makedir(destpath, notindexed=True) + except OSError, inst: + if inst.errno == errno.EEXIST: + dircleanup.close() + raise util.Abort(_("destination '%s' already exists") + % dest) + raise + + destlock = copystore(ui, srcrepo, destpath) + + # we need to re-init the repo after manually copying the data + # into it + destrepo = repository(remoteui(ui, peeropts), dest) + srcrepo.hook('outgoing', source='clone', + node=node.hex(node.nullid)) + else: + try: + destrepo = repository(remoteui(ui, peeropts), dest, + create=True) + except OSError, inst: + if inst.errno == errno.EEXIST: + dircleanup.close() + raise util.Abort(_("destination '%s' already exists") + % dest) + raise + + revs = None + if rev: + if not srcrepo.capable('lookup'): + raise util.Abort(_("src repository does not support " + "revision lookup and so doesn't " + "support clone by revision")) + revs = [srcrepo.lookup(r) for r in rev] + checkout = revs[0] + if destrepo.local(): + destrepo.clone(srcrepo, heads=revs, stream=stream) + elif srcrepo.local(): + srcrepo.push(destrepo, revs=revs) + else: + raise util.Abort(_("clone from remote to remote not supported")) + + if dircleanup: + dircleanup.close() + + if destrepo.local(): + fp = destrepo.opener("hgrc", "w", text=True) + fp.write("[paths]\n") + u = util.url(abspath) + u.passwd = None + defaulturl = str(u) + fp.write("default = %s\n" % defaulturl) + fp.close() + + destrepo.ui.setconfig('paths', 'default', defaulturl) + + if update: + if update is not True: + checkout = update + if srcrepo.local(): + checkout = srcrepo.lookup(update) + for test in (checkout, 'default', 'tip'): + if test is None: + continue + try: + uprev = destrepo.lookup(test) + break + except error.RepoLookupError: + continue + bn = destrepo[uprev].branch() + destrepo.ui.status(_("updating to branch %s\n") % bn) + _update(destrepo, uprev) + + # clone all bookmarks + if destrepo.local() and srcrepo.capable("pushkey"): + rb = srcrepo.listkeys('bookmarks') + for k, n in rb.iteritems(): + try: + m = destrepo.lookup(n) + destrepo._bookmarks[k] = m + except error.RepoLookupError: + pass + if rb: + bookmarks.write(destrepo) + elif srcrepo.local() and destrepo.capable("pushkey"): + for k, n in srcrepo._bookmarks.iteritems(): + destrepo.pushkey('bookmarks', k, '', hex(n)) + + return srcrepo, destrepo + finally: + release(srclock, destlock) + if dircleanup is not None: + dircleanup.cleanup() + +def _showstats(repo, stats): + repo.ui.status(_("%d files updated, %d files merged, " + "%d files removed, %d files unresolved\n") % stats) + +def update(repo, node): + """update the working directory to node, merging linear changes""" + stats = mergemod.update(repo, node, False, False, None) + _showstats(repo, stats) + if stats[3]: + repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) + return stats[3] > 0 + +# naming conflict in clone() +_update = update + +def clean(repo, node, show_stats=True): + """forcibly switch the working directory to node, clobbering changes""" + stats = mergemod.update(repo, node, False, True, None) + if show_stats: + _showstats(repo, stats) + return stats[3] > 0 + +def merge(repo, node, force=None, remind=True): + """Branch merge with node, resolving changes. Return true if any + unresolved conflicts.""" + stats = mergemod.update(repo, node, True, force, False) + _showstats(repo, stats) + if stats[3]: + repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " + "or 'hg update -C .' to abandon\n")) + elif remind: + repo.ui.status(_("(branch merge, don't forget to commit)\n")) + return stats[3] > 0 + +def _incoming(displaychlist, subreporecurse, ui, repo, source, + opts, buffered=False): + """ + Helper for incoming / gincoming. + displaychlist gets called with + (remoterepo, incomingchangesetlist, displayer) parameters, + and is supposed to contain only code that can't be unified. + """ + source, branches = parseurl(ui.expandpath(source), opts.get('branch')) + other = peer(repo, opts, source) + ui.status(_('comparing with %s\n') % util.hidepassword(source)) + revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) + + if revs: + revs = [other.lookup(rev) for rev in revs] + other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, + revs, opts["bundle"], opts["force"]) + try: + if not chlist: + ui.status(_("no changes found\n")) + return subreporecurse() + + displayer = cmdutil.show_changeset(ui, other, opts, buffered) + + # XXX once graphlog extension makes it into core, + # should be replaced by a if graph/else + displaychlist(other, chlist, displayer) + + displayer.close() + finally: + cleanupfn() + subreporecurse() + return 0 # exit code is zero since we found incoming changes + +def incoming(ui, repo, source, opts): + def subreporecurse(): + ret = 1 + if opts.get('subrepos'): + ctx = repo[None] + for subpath in sorted(ctx.substate): + sub = ctx.sub(subpath) + ret = min(ret, sub.incoming(ui, source, opts)) + return ret + + def display(other, chlist, displayer): + limit = cmdutil.loglimit(opts) + if opts.get('newest_first'): + chlist.reverse() + count = 0 + for n in chlist: + if limit is not None and count >= limit: + break + parents = [p for p in other.changelog.parents(n) if p != nullid] + if opts.get('no_merges') and len(parents) == 2: + continue + count += 1 + displayer.show(other[n]) + return _incoming(display, subreporecurse, ui, repo, source, opts) + +def _outgoing(ui, repo, dest, opts): + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = parseurl(dest, opts.get('branch')) + ui.status(_('comparing with %s\n') % util.hidepassword(dest)) + revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) + if revs: + revs = [repo.lookup(rev) for rev in revs] + + other = peer(repo, opts, dest) + common, outheads = discovery.findcommonoutgoing(repo, other, revs, + force=opts.get('force')) + o = repo.changelog.findmissing(common, outheads) + if not o: + ui.status(_("no changes found\n")) + return None + return o + +def outgoing(ui, repo, dest, opts): + def recurse(): + ret = 1 + if opts.get('subrepos'): + ctx = repo[None] + for subpath in sorted(ctx.substate): + sub = ctx.sub(subpath) + ret = min(ret, sub.outgoing(ui, dest, opts)) + return ret + + limit = cmdutil.loglimit(opts) + o = _outgoing(ui, repo, dest, opts) + if o is None: + return recurse() + + if opts.get('newest_first'): + o.reverse() + displayer = cmdutil.show_changeset(ui, repo, opts) + count = 0 + for n in o: + if limit is not None and count >= limit: + break + parents = [p for p in repo.changelog.parents(n) if p != nullid] + if opts.get('no_merges') and len(parents) == 2: + continue + count += 1 + displayer.show(repo[n]) + displayer.close() + recurse() + return 0 # exit code is zero since we found outgoing changes + +def revert(repo, node, choose): + """revert changes to revision in node without updating dirstate""" + return mergemod.update(repo, node, False, True, choose)[3] > 0 + +def verify(repo): + """verify the consistency of a repository""" + return verifymod.verify(repo) + +def remoteui(src, opts): + 'build a remote ui from ui or repo and opts' + if util.safehasattr(src, 'baseui'): # looks like a repository + dst = src.baseui.copy() # drop repo-specific config + src = src.ui # copy target options from repo + else: # assume it's a global ui object + dst = src.copy() # keep all global options + + # copy ssh-specific options + for o in 'ssh', 'remotecmd': + v = opts.get(o) or src.config('ui', o) + if v: + dst.setconfig("ui", o, v) + + # copy bundle-specific options + r = src.config('bundle', 'mainreporoot') + if r: + dst.setconfig('bundle', 'mainreporoot', r) + + # copy selected local settings to the remote ui + for sect in ('auth', 'hostfingerprints', 'http_proxy'): + for key, val in src.configitems(sect): + dst.setconfig(sect, key, val) + v = src.config('web', 'cacerts') + if v: + dst.setconfig('web', 'cacerts', util.expandpath(v)) + + return dst diff --git a/websdk/mercurial/hgweb/__init__.py b/websdk/mercurial/hgweb/__init__.py index 4131dec..dd4d089 120000..100644 --- a/websdk/mercurial/hgweb/__init__.py +++ b/websdk/mercurial/hgweb/__init__.py @@ -1 +1,31 @@ -/usr/share/pyshared/mercurial/hgweb/__init__.py \ No newline at end of file +# hgweb/__init__.py - web interface to a mercurial repository +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +import hgweb_mod, hgwebdir_mod + +def hgweb(config, name=None, baseui=None): + '''create an hgweb wsgi object + + config can be one of: + - repo object (single repo view) + - path to repo (single repo view) + - path to config file (multi-repo view) + - dict of virtual:real pairs (multi-repo view) + - list of virtual:real tuples (multi-repo view) + ''' + + if ((isinstance(config, str) and not os.path.isdir(config)) or + isinstance(config, dict) or isinstance(config, list)): + # create a multi-dir interface + return hgwebdir_mod.hgwebdir(config, baseui=baseui) + return hgweb_mod.hgweb(config, name=name, baseui=baseui) + +def hgwebdir(config, baseui=None): + return hgwebdir_mod.hgwebdir(config, baseui=baseui) + diff --git a/websdk/mercurial/hgweb/common.py b/websdk/mercurial/hgweb/common.py index 6bb6c19..2c2c76c 120000..100644 --- a/websdk/mercurial/hgweb/common.py +++ b/websdk/mercurial/hgweb/common.py @@ -1 +1,186 @@ -/usr/share/pyshared/mercurial/hgweb/common.py \ No newline at end of file +# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005, 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import errno, mimetypes, os + +HTTP_OK = 200 +HTTP_NOT_MODIFIED = 304 +HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 +HTTP_METHOD_NOT_ALLOWED = 405 +HTTP_SERVER_ERROR = 500 + + +def checkauthz(hgweb, req, op): + '''Check permission for operation based on request data (including + authentication info). Return if op allowed, else raise an ErrorResponse + exception.''' + + user = req.env.get('REMOTE_USER') + + deny_read = hgweb.configlist('web', 'deny_read') + if deny_read and (not user or deny_read == ['*'] or user in deny_read): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') + + allow_read = hgweb.configlist('web', 'allow_read') + result = (not allow_read) or (allow_read == ['*']) + if not (result or user in allow_read): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') + + if op == 'pull' and not hgweb.allowpull: + raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized') + elif op == 'pull' or op is None: # op is None for interface requests + return + + # enforce that you can only push using POST requests + if req.env['REQUEST_METHOD'] != 'POST': + msg = 'push requires POST request' + raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) + + # require ssl by default for pushing, auth info cannot be sniffed + # and replayed + scheme = req.env.get('wsgi.url_scheme') + if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https': + raise ErrorResponse(HTTP_OK, 'ssl required') + + deny = hgweb.configlist('web', 'deny_push') + if deny and (not user or deny == ['*'] or user in deny): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') + + allow = hgweb.configlist('web', 'allow_push') + result = allow and (allow == ['*'] or user in allow) + if not result: + raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') + +# Hooks for hgweb permission checks; extensions can add hooks here. +# Each hook is invoked like this: hook(hgweb, request, operation), +# where operation is either read, pull or push. Hooks should either +# raise an ErrorResponse exception, or just return. +# +# It is possible to do both authentication and authorization through +# this. +permhooks = [checkauthz] + + +class ErrorResponse(Exception): + def __init__(self, code, message=None, headers=[]): + if message is None: + message = _statusmessage(code) + Exception.__init__(self) + self.code = code + self.message = message + self.headers = headers + def __str__(self): + return self.message + +class continuereader(object): + def __init__(self, f, write): + self.f = f + self._write = write + self.continued = False + + def read(self, amt=-1): + if not self.continued: + self.continued = True + self._write('HTTP/1.1 100 Continue\r\n\r\n') + return self.f.read(amt) + + def __getattr__(self, attr): + if attr in ('close', 'readline', 'readlines', '__iter__'): + return getattr(self.f, attr) + raise AttributeError() + +def _statusmessage(code): + from BaseHTTPServer import BaseHTTPRequestHandler + responses = BaseHTTPRequestHandler.responses + return responses.get(code, ('Error', 'Unknown error'))[0] + +def statusmessage(code, message=None): + return '%d %s' % (code, message or _statusmessage(code)) + +def get_stat(spath): + """stat changelog if it exists, spath otherwise""" + cl_path = os.path.join(spath, "00changelog.i") + if os.path.exists(cl_path): + return os.stat(cl_path) + else: + return os.stat(spath) + +def get_mtime(spath): + return get_stat(spath).st_mtime + +def staticfile(directory, fname, req): + """return a file inside directory with guessed Content-Type header + + fname always uses '/' as directory separator and isn't allowed to + contain unusual path components. + Content-Type is guessed using the mimetypes module. + Return an empty string if fname is illegal or file not found. + + """ + parts = fname.split('/') + for part in parts: + if (part in ('', os.curdir, os.pardir) or + os.sep in part or os.altsep is not None and os.altsep in part): + return "" + fpath = os.path.join(*parts) + if isinstance(directory, str): + directory = [directory] + for d in directory: + path = os.path.join(d, fpath) + if os.path.exists(path): + break + try: + os.stat(path) + ct = mimetypes.guess_type(path)[0] or "text/plain" + req.respond(HTTP_OK, ct, length = os.path.getsize(path)) + fp = open(path, 'rb') + data = fp.read() + fp.close() + return data + except TypeError: + raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename') + except OSError, err: + if err.errno == errno.ENOENT: + raise ErrorResponse(HTTP_NOT_FOUND) + else: + raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror) + +def paritygen(stripecount, offset=0): + """count parity of horizontal stripes for easier reading""" + if stripecount and offset: + # account for offset, e.g. due to building the list in reverse + count = (stripecount + offset) % stripecount + parity = (stripecount + offset) / stripecount & 1 + else: + count = 0 + parity = 0 + while True: + yield parity + count += 1 + if stripecount and count >= stripecount: + parity = 1 - parity + count = 0 + +def get_contact(config): + """Return repo contact information or empty string. + + web.contact is the primary source, but if that is not set, try + ui.username or $EMAIL as a fallback to display something useful. + """ + return (config("web", "contact") or + config("ui", "username") or + os.environ.get("EMAIL") or "") + +def caching(web, req): + tag = str(web.mtime) + if req.env.get('HTTP_IF_NONE_MATCH') == tag: + raise ErrorResponse(HTTP_NOT_MODIFIED) + req.headers.append(('ETag', tag)) diff --git a/websdk/mercurial/hgweb/hgweb_mod.py b/websdk/mercurial/hgweb/hgweb_mod.py index 473f623..9f8f3ff 120000..100644 --- a/websdk/mercurial/hgweb/hgweb_mod.py +++ b/websdk/mercurial/hgweb/hgweb_mod.py @@ -1 +1,305 @@ -/usr/share/pyshared/mercurial/hgweb/hgweb_mod.py \ No newline at end of file +# hgweb/hgweb_mod.py - Web interface for a repository. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +from mercurial import ui, hg, hook, error, encoding, templater, util +from common import get_stat, ErrorResponse, permhooks, caching +from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST +from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR +from request import wsgirequest +import webcommands, protocol, webutil + +perms = { + 'changegroup': 'pull', + 'changegroupsubset': 'pull', + 'getbundle': 'pull', + 'stream_out': 'pull', + 'listkeys': 'pull', + 'unbundle': 'push', + 'pushkey': 'push', +} + +class hgweb(object): + def __init__(self, repo, name=None, baseui=None): + if isinstance(repo, str): + if baseui: + u = baseui.copy() + else: + u = ui.ui() + self.repo = hg.repository(u, repo) + else: + self.repo = repo + + self.repo.ui.setconfig('ui', 'report_untrusted', 'off') + self.repo.ui.setconfig('ui', 'interactive', 'off') + hook.redirect(True) + self.mtime = -1 + self.size = -1 + self.reponame = name + self.archives = 'zip', 'gz', 'bz2' + self.stripecount = 1 + # a repo owner may set web.templates in .hg/hgrc to get any file + # readable by the user running the CGI script + self.templatepath = self.config('web', 'templates') + + # The CGI scripts are often run by a user different from the repo owner. + # Trust the settings from the .hg/hgrc files by default. + def config(self, section, name, default=None, untrusted=True): + return self.repo.ui.config(section, name, default, + untrusted=untrusted) + + def configbool(self, section, name, default=False, untrusted=True): + return self.repo.ui.configbool(section, name, default, + untrusted=untrusted) + + def configlist(self, section, name, default=None, untrusted=True): + return self.repo.ui.configlist(section, name, default, + untrusted=untrusted) + + def refresh(self, request=None): + if request: + self.repo.ui.environ = request.env + st = get_stat(self.repo.spath) + # compare changelog size in addition to mtime to catch + # rollbacks made less than a second ago + if st.st_mtime != self.mtime or st.st_size != self.size: + self.mtime = st.st_mtime + self.size = st.st_size + self.repo = hg.repository(self.repo.ui, self.repo.root) + self.maxchanges = int(self.config("web", "maxchanges", 10)) + self.stripecount = int(self.config("web", "stripes", 1)) + self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) + self.maxfiles = int(self.config("web", "maxfiles", 10)) + self.allowpull = self.configbool("web", "allowpull", True) + encoding.encoding = self.config("web", "encoding", + encoding.encoding) + + def run(self): + if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): + raise RuntimeError("This function is only intended to be " + "called while running as a CGI script.") + import mercurial.hgweb.wsgicgi as wsgicgi + wsgicgi.launch(self) + + def __call__(self, env, respond): + req = wsgirequest(env, respond) + return self.run_wsgi(req) + + def run_wsgi(self, req): + + self.refresh(req) + + # work with CGI variables to create coherent structure + # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME + + req.url = req.env['SCRIPT_NAME'] + if not req.url.endswith('/'): + req.url += '/' + if 'REPO_NAME' in req.env: + req.url += req.env['REPO_NAME'] + '/' + + if 'PATH_INFO' in req.env: + parts = req.env['PATH_INFO'].strip('/').split('/') + repo_parts = req.env.get('REPO_NAME', '').split('/') + if parts[:len(repo_parts)] == repo_parts: + parts = parts[len(repo_parts):] + query = '/'.join(parts) + else: + query = req.env['QUERY_STRING'].split('&', 1)[0] + query = query.split(';', 1)[0] + + # process this if it's a protocol request + # protocol bits don't need to create any URLs + # and the clients always use the old URL structure + + cmd = req.form.get('cmd', [''])[0] + if protocol.iscmd(cmd): + try: + if query: + raise ErrorResponse(HTTP_NOT_FOUND) + if cmd in perms: + self.check_perm(req, perms[cmd]) + return protocol.call(self.repo, req, cmd) + except ErrorResponse, inst: + # A client that sends unbundle without 100-continue will + # break if we respond early. + if (cmd == 'unbundle' and + (req.env.get('HTTP_EXPECT', + '').lower() != '100-continue') or + req.env.get('X-HgHttp2', '')): + req.drain() + req.respond(inst, protocol.HGTYPE) + return '0\n%s\n' % inst.message + + # translate user-visible url structure to internal structure + + args = query.split('/', 2) + if 'cmd' not in req.form and args and args[0]: + + cmd = args.pop(0) + style = cmd.rfind('-') + if style != -1: + req.form['style'] = [cmd[:style]] + cmd = cmd[style + 1:] + + # avoid accepting e.g. style parameter as command + if util.safehasattr(webcommands, cmd): + req.form['cmd'] = [cmd] + else: + cmd = '' + + if cmd == 'static': + req.form['file'] = ['/'.join(args)] + else: + if args and args[0]: + node = args.pop(0) + req.form['node'] = [node] + if args: + req.form['file'] = args + + ua = req.env.get('HTTP_USER_AGENT', '') + if cmd == 'rev' and 'mercurial' in ua: + req.form['style'] = ['raw'] + + if cmd == 'archive': + fn = req.form['node'][0] + for type_, spec in self.archive_specs.iteritems(): + ext = spec[2] + if fn.endswith(ext): + req.form['node'] = [fn[:-len(ext)]] + req.form['type'] = [type_] + + # process the web interface request + + try: + tmpl = self.templater(req) + ctype = tmpl('mimetype', encoding=encoding.encoding) + ctype = templater.stringify(ctype) + + # check read permissions non-static content + if cmd != 'static': + self.check_perm(req, None) + + if cmd == '': + req.form['cmd'] = [tmpl.cache['default']] + cmd = req.form['cmd'][0] + + if self.configbool('web', 'cache', True): + caching(self, req) # sets ETag header or raises NOT_MODIFIED + if cmd not in webcommands.__all__: + msg = 'no such method: %s' % cmd + raise ErrorResponse(HTTP_BAD_REQUEST, msg) + elif cmd == 'file' and 'raw' in req.form.get('style', []): + self.ctype = ctype + content = webcommands.rawfile(self, req, tmpl) + else: + content = getattr(webcommands, cmd)(self, req, tmpl) + req.respond(HTTP_OK, ctype) + + return content + + except error.LookupError, err: + req.respond(HTTP_NOT_FOUND, ctype) + msg = str(err) + if 'manifest' not in msg: + msg = 'revision not found: %s' % err.name + return tmpl('error', error=msg) + except (error.RepoError, error.RevlogError), inst: + req.respond(HTTP_SERVER_ERROR, ctype) + return tmpl('error', error=str(inst)) + except ErrorResponse, inst: + req.respond(inst, ctype) + if inst.code == HTTP_NOT_MODIFIED: + # Not allowed to return a body on a 304 + return [''] + return tmpl('error', error=inst.message) + + def templater(self, req): + + # determine scheme, port and server name + # this is needed to create absolute urls + + proto = req.env.get('wsgi.url_scheme') + if proto == 'https': + proto = 'https' + default_port = "443" + else: + proto = 'http' + default_port = "80" + + port = req.env["SERVER_PORT"] + port = port != default_port and (":" + port) or "" + urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) + logourl = self.config("web", "logourl", "http://mercurial.selenic.com/") + logoimg = self.config("web", "logoimg", "hglogo.png") + staticurl = self.config("web", "staticurl") or req.url + 'static/' + if not staticurl.endswith('/'): + staticurl += '/' + + # some functions for the templater + + def header(**map): + yield tmpl('header', encoding=encoding.encoding, **map) + + def footer(**map): + yield tmpl("footer", **map) + + def motd(**map): + yield self.config("web", "motd", "") + + # figure out which style to use + + vars = {} + styles = ( + req.form.get('style', [None])[0], + self.config('web', 'style'), + 'paper', + ) + style, mapfile = templater.stylemap(styles, self.templatepath) + if style == styles[0]: + vars['style'] = style + + start = req.url[-1] == '?' and '&' or '?' + sessionvars = webutil.sessionvars(vars, start) + + if not self.reponame: + self.reponame = (self.config("web", "name") + or req.env.get('REPO_NAME') + or req.url.strip('/') or self.repo.root) + + # create the templater + + tmpl = templater.templater(mapfile, + defaults={"url": req.url, + "logourl": logourl, + "logoimg": logoimg, + "staticurl": staticurl, + "urlbase": urlbase, + "repo": self.reponame, + "header": header, + "footer": footer, + "motd": motd, + "sessionvars": sessionvars + }) + return tmpl + + def archivelist(self, nodeid): + allowed = self.configlist("web", "allow_archive") + for i, spec in self.archive_specs.iteritems(): + if i in allowed or self.configbool("web", "allow" + i): + yield {"type" : i, "extension" : spec[2], "node" : nodeid} + + archive_specs = { + 'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None), + 'gz': ('application/x-gzip', 'tgz', '.tar.gz', None), + 'zip': ('application/zip', 'zip', '.zip', None), + } + + def check_perm(self, req, op): + for hook in permhooks: + hook(self, req, op) diff --git a/websdk/mercurial/hgweb/hgwebdir_mod.py b/websdk/mercurial/hgweb/hgwebdir_mod.py index cdbca6c..a22a00b 120000..100644 --- a/websdk/mercurial/hgweb/hgwebdir_mod.py +++ b/websdk/mercurial/hgweb/hgwebdir_mod.py @@ -1 +1,399 @@ -/usr/share/pyshared/mercurial/hgweb/hgwebdir_mod.py \ No newline at end of file +# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005, 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, re, time +from mercurial.i18n import _ +from mercurial import ui, hg, scmutil, util, templater +from mercurial import error, encoding +from common import ErrorResponse, get_mtime, staticfile, paritygen, \ + get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR +from hgweb_mod import hgweb +from request import wsgirequest +import webutil + +def cleannames(items): + return [(util.pconvert(name).strip('/'), path) for name, path in items] + +def findrepos(paths): + repos = [] + for prefix, root in cleannames(paths): + roothead, roottail = os.path.split(root) + # "foo = /bar/*" makes every subrepo of /bar/ to be + # mounted as foo/subrepo + # and "foo = /bar/**" also recurses into the subdirectories, + # remember to use it without working dir. + try: + recurse = {'*': False, '**': True}[roottail] + except KeyError: + repos.append((prefix, root)) + continue + roothead = os.path.normpath(os.path.abspath(roothead)) + paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse) + repos.extend(urlrepos(prefix, roothead, paths)) + return repos + +def urlrepos(prefix, roothead, paths): + """yield url paths and filesystem paths from a list of repo paths + + >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq] + >>> conv(urlrepos('hg', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) + [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')] + >>> conv(urlrepos('', '/opt', ['/opt/r', '/opt/r/r', '/opt'])) + [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')] + """ + for path in paths: + path = os.path.normpath(path) + yield (prefix + '/' + + util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path + +def geturlcgivars(baseurl, port): + """ + Extract CGI variables from baseurl + + >>> geturlcgivars("http://host.org/base", "80") + ('host.org', '80', '/base') + >>> geturlcgivars("http://host.org:8000/base", "80") + ('host.org', '8000', '/base') + >>> geturlcgivars('/base', 8000) + ('', '8000', '/base') + >>> geturlcgivars("base", '8000') + ('', '8000', '/base') + >>> geturlcgivars("http://host", '8000') + ('host', '8000', '/') + >>> geturlcgivars("http://host/", '8000') + ('host', '8000', '/') + """ + u = util.url(baseurl) + name = u.host or '' + if u.port: + port = u.port + path = u.path or "" + if not path.startswith('/'): + path = '/' + path + + return name, str(port), path + +class hgwebdir(object): + refreshinterval = 20 + + def __init__(self, conf, baseui=None): + self.conf = conf + self.baseui = baseui + self.lastrefresh = 0 + self.motd = None + self.refresh() + + def refresh(self): + if self.lastrefresh + self.refreshinterval > time.time(): + return + + if self.baseui: + u = self.baseui.copy() + else: + u = ui.ui() + u.setconfig('ui', 'report_untrusted', 'off') + u.setconfig('ui', 'interactive', 'off') + + if not isinstance(self.conf, (dict, list, tuple)): + map = {'paths': 'hgweb-paths'} + if not os.path.exists(self.conf): + raise util.Abort(_('config file %s not found!') % self.conf) + u.readconfig(self.conf, remap=map, trust=True) + paths = [] + for name, ignored in u.configitems('hgweb-paths'): + for path in u.configlist('hgweb-paths', name): + paths.append((name, path)) + elif isinstance(self.conf, (list, tuple)): + paths = self.conf + elif isinstance(self.conf, dict): + paths = self.conf.items() + + repos = findrepos(paths) + for prefix, root in u.configitems('collections'): + prefix = util.pconvert(prefix) + for path in scmutil.walkrepos(root, followsym=True): + repo = os.path.normpath(path) + name = util.pconvert(repo) + if name.startswith(prefix): + name = name[len(prefix):] + repos.append((name.lstrip('/'), repo)) + + self.repos = repos + self.ui = u + encoding.encoding = self.ui.config('web', 'encoding', + encoding.encoding) + self.style = self.ui.config('web', 'style', 'paper') + self.templatepath = self.ui.config('web', 'templates', None) + self.stripecount = self.ui.config('web', 'stripes', 1) + if self.stripecount: + self.stripecount = int(self.stripecount) + self._baseurl = self.ui.config('web', 'baseurl') + self.lastrefresh = time.time() + + def run(self): + if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): + raise RuntimeError("This function is only intended to be " + "called while running as a CGI script.") + import mercurial.hgweb.wsgicgi as wsgicgi + wsgicgi.launch(self) + + def __call__(self, env, respond): + req = wsgirequest(env, respond) + return self.run_wsgi(req) + + def read_allowed(self, ui, req): + """Check allow_read and deny_read config options of a repo's ui object + to determine user permissions. By default, with neither option set (or + both empty), allow all users to read the repo. There are two ways a + user can be denied read access: (1) deny_read is not empty, and the + user is unauthenticated or deny_read contains user (or *), and (2) + allow_read is not empty and the user is not in allow_read. Return True + if user is allowed to read the repo, else return False.""" + + user = req.env.get('REMOTE_USER') + + deny_read = ui.configlist('web', 'deny_read', untrusted=True) + if deny_read and (not user or deny_read == ['*'] or user in deny_read): + return False + + allow_read = ui.configlist('web', 'allow_read', untrusted=True) + # by default, allow reading if no allow_read option has been set + if (not allow_read) or (allow_read == ['*']) or (user in allow_read): + return True + + return False + + def run_wsgi(self, req): + try: + try: + self.refresh() + + virtual = req.env.get("PATH_INFO", "").strip('/') + tmpl = self.templater(req) + ctype = tmpl('mimetype', encoding=encoding.encoding) + ctype = templater.stringify(ctype) + + # a static file + if virtual.startswith('static/') or 'static' in req.form: + if virtual.startswith('static/'): + fname = virtual[7:] + else: + fname = req.form['static'][0] + static = templater.templatepath('static') + return (staticfile(static, fname, req),) + + # top-level index + elif not virtual: + req.respond(HTTP_OK, ctype) + return self.makeindex(req, tmpl) + + # nested indexes and hgwebs + + repos = dict(self.repos) + virtualrepo = virtual + while virtualrepo: + real = repos.get(virtualrepo) + if real: + req.env['REPO_NAME'] = virtualrepo + try: + repo = hg.repository(self.ui, real) + return hgweb(repo).run_wsgi(req) + except IOError, inst: + msg = inst.strerror + raise ErrorResponse(HTTP_SERVER_ERROR, msg) + except error.RepoError, inst: + raise ErrorResponse(HTTP_SERVER_ERROR, str(inst)) + + up = virtualrepo.rfind('/') + if up < 0: + break + virtualrepo = virtualrepo[:up] + + # browse subdirectories + subdir = virtual + '/' + if [r for r in repos if r.startswith(subdir)]: + req.respond(HTTP_OK, ctype) + return self.makeindex(req, tmpl, subdir) + + # prefixes not found + req.respond(HTTP_NOT_FOUND, ctype) + return tmpl("notfound", repo=virtual) + + except ErrorResponse, err: + req.respond(err, ctype) + return tmpl('error', error=err.message or '') + finally: + tmpl = None + + def makeindex(self, req, tmpl, subdir=""): + + def archivelist(ui, nodeid, url): + allowed = ui.configlist("web", "allow_archive", untrusted=True) + archives = [] + for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]: + if i[0] in allowed or ui.configbool("web", "allow" + i[0], + untrusted=True): + archives.append({"type" : i[0], "extension": i[1], + "node": nodeid, "url": url}) + return archives + + def rawentries(subdir="", **map): + + descend = self.ui.configbool('web', 'descend', True) + for name, path in self.repos: + + if not name.startswith(subdir): + continue + name = name[len(subdir):] + if not descend and '/' in name: + continue + + u = self.ui.copy() + try: + u.readconfig(os.path.join(path, '.hg', 'hgrc')) + except Exception, e: + u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e)) + continue + def get(section, name, default=None): + return u.config(section, name, default, untrusted=True) + + if u.configbool("web", "hidden", untrusted=True): + continue + + if not self.read_allowed(u, req): + continue + + parts = [name] + if 'PATH_INFO' in req.env: + parts.insert(0, req.env['PATH_INFO'].rstrip('/')) + if req.env['SCRIPT_NAME']: + parts.insert(0, req.env['SCRIPT_NAME']) + url = re.sub(r'/+', '/', '/'.join(parts) + '/') + + # update time with local timezone + try: + r = hg.repository(self.ui, path) + except IOError: + u.warn(_('error accessing repository at %s\n') % path) + continue + except error.RepoError: + u.warn(_('error accessing repository at %s\n') % path) + continue + try: + d = (get_mtime(r.spath), util.makedate()[1]) + except OSError: + continue + + contact = get_contact(get) + description = get("web", "description", "") + name = get("web", "name", name) + row = dict(contact=contact or "unknown", + contact_sort=contact.upper() or "unknown", + name=name, + name_sort=name, + url=url, + description=description or "unknown", + description_sort=description.upper() or "unknown", + lastchange=d, + lastchange_sort=d[1]-d[0], + archives=archivelist(u, "tip", url)) + yield row + + sortdefault = None, False + def entries(sortcolumn="", descending=False, subdir="", **map): + rows = rawentries(subdir=subdir, **map) + + if sortcolumn and sortdefault != (sortcolumn, descending): + sortkey = '%s_sort' % sortcolumn + rows = sorted(rows, key=lambda x: x[sortkey], + reverse=descending) + for row, parity in zip(rows, paritygen(self.stripecount)): + row['parity'] = parity + yield row + + self.refresh() + sortable = ["name", "description", "contact", "lastchange"] + sortcolumn, descending = sortdefault + if 'sort' in req.form: + sortcolumn = req.form['sort'][0] + descending = sortcolumn.startswith('-') + if descending: + sortcolumn = sortcolumn[1:] + if sortcolumn not in sortable: + sortcolumn = "" + + sort = [("sort_%s" % column, + "%s%s" % ((not descending and column == sortcolumn) + and "-" or "", column)) + for column in sortable] + + self.refresh() + self.updatereqenv(req.env) + + return tmpl("index", entries=entries, subdir=subdir, + sortcolumn=sortcolumn, descending=descending, + **dict(sort)) + + def templater(self, req): + + def header(**map): + yield tmpl('header', encoding=encoding.encoding, **map) + + def footer(**map): + yield tmpl("footer", **map) + + def motd(**map): + if self.motd is not None: + yield self.motd + else: + yield config('web', 'motd', '') + + def config(section, name, default=None, untrusted=True): + return self.ui.config(section, name, default, untrusted) + + self.updatereqenv(req.env) + + url = req.env.get('SCRIPT_NAME', '') + if not url.endswith('/'): + url += '/' + + vars = {} + styles = ( + req.form.get('style', [None])[0], + config('web', 'style'), + 'paper' + ) + style, mapfile = templater.stylemap(styles, self.templatepath) + if style == styles[0]: + vars['style'] = style + + start = url[-1] == '?' and '&' or '?' + sessionvars = webutil.sessionvars(vars, start) + logourl = config('web', 'logourl', 'http://mercurial.selenic.com/') + logoimg = config('web', 'logoimg', 'hglogo.png') + staticurl = config('web', 'staticurl') or url + 'static/' + if not staticurl.endswith('/'): + staticurl += '/' + + tmpl = templater.templater(mapfile, + defaults={"header": header, + "footer": footer, + "motd": motd, + "url": url, + "logourl": logourl, + "logoimg": logoimg, + "staticurl": staticurl, + "sessionvars": sessionvars}) + return tmpl + + def updatereqenv(self, env): + if self._baseurl is not None: + name, port, path = geturlcgivars(self._baseurl, env['SERVER_PORT']) + env['SERVER_NAME'] = name + env['SERVER_PORT'] = port + env['SCRIPT_NAME'] = path diff --git a/websdk/mercurial/hgweb/protocol.py b/websdk/mercurial/hgweb/protocol.py index c5d1fb4..84aa6b1 120000..100644 --- a/websdk/mercurial/hgweb/protocol.py +++ b/websdk/mercurial/hgweb/protocol.py @@ -1 +1,97 @@ -/usr/share/pyshared/mercurial/hgweb/protocol.py \ No newline at end of file +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import cgi, cStringIO, zlib, urllib +from mercurial import util, wireproto +from common import HTTP_OK + +HGTYPE = 'application/mercurial-0.1' +HGERRTYPE = 'application/hg-error' + +class webproto(object): + def __init__(self, req, ui): + self.req = req + self.response = '' + self.ui = ui + def getargs(self, args): + knownargs = self._args() + data = {} + keys = args.split() + for k in keys: + if k == '*': + star = {} + for key in knownargs.keys(): + if key != 'cmd' and key not in keys: + star[key] = knownargs[key][0] + data['*'] = star + else: + data[k] = knownargs[k][0] + return [data[k] for k in keys] + def _args(self): + args = self.req.form.copy() + chunks = [] + i = 1 + while True: + h = self.req.env.get('HTTP_X_HGARG_' + str(i)) + if h is None: + break + chunks += [h] + i += 1 + args.update(cgi.parse_qs(''.join(chunks), keep_blank_values=True)) + return args + def getfile(self, fp): + length = int(self.req.env['CONTENT_LENGTH']) + for s in util.filechunkiter(self.req, limit=length): + fp.write(s) + def redirect(self): + self.oldio = self.ui.fout, self.ui.ferr + self.ui.ferr = self.ui.fout = cStringIO.StringIO() + def restore(self): + val = self.ui.fout.getvalue() + self.ui.ferr, self.ui.fout = self.oldio + return val + def groupchunks(self, cg): + z = zlib.compressobj() + while True: + chunk = cg.read(4096) + if not chunk: + break + yield z.compress(chunk) + yield z.flush() + def _client(self): + return 'remote:%s:%s:%s' % ( + self.req.env.get('wsgi.url_scheme') or 'http', + urllib.quote(self.req.env.get('REMOTE_HOST', '')), + urllib.quote(self.req.env.get('REMOTE_USER', ''))) + +def iscmd(cmd): + return cmd in wireproto.commands + +def call(repo, req, cmd): + p = webproto(req, repo.ui) + rsp = wireproto.dispatch(repo, p, cmd) + if isinstance(rsp, str): + req.respond(HTTP_OK, HGTYPE, length=len(rsp)) + return [rsp] + elif isinstance(rsp, wireproto.streamres): + req.respond(HTTP_OK, HGTYPE) + return rsp.gen + elif isinstance(rsp, wireproto.pushres): + val = p.restore() + req.respond(HTTP_OK, HGTYPE) + return ['%d\n%s' % (rsp.res, val)] + elif isinstance(rsp, wireproto.pusherr): + # drain the incoming bundle + req.drain() + p.restore() + rsp = '0\n%s\n' % rsp.res + req.respond(HTTP_OK, HGTYPE, length=len(rsp)) + return [rsp] + elif isinstance(rsp, wireproto.ooberror): + rsp = rsp.message + req.respond(HTTP_OK, HGERRTYPE, length=len(rsp)) + return [rsp] diff --git a/websdk/mercurial/hgweb/request.py b/websdk/mercurial/hgweb/request.py index 6113c10..1fddc95 120000..100644 --- a/websdk/mercurial/hgweb/request.py +++ b/websdk/mercurial/hgweb/request.py @@ -1 +1,147 @@ -/usr/share/pyshared/mercurial/hgweb/request.py \ No newline at end of file +# hgweb/request.py - An http request from either CGI or the standalone server. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005, 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import socket, cgi, errno +from mercurial import util +from common import ErrorResponse, statusmessage, HTTP_NOT_MODIFIED + +shortcuts = { + 'cl': [('cmd', ['changelog']), ('rev', None)], + 'sl': [('cmd', ['shortlog']), ('rev', None)], + 'cs': [('cmd', ['changeset']), ('node', None)], + 'f': [('cmd', ['file']), ('filenode', None)], + 'fl': [('cmd', ['filelog']), ('filenode', None)], + 'fd': [('cmd', ['filediff']), ('node', None)], + 'fa': [('cmd', ['annotate']), ('filenode', None)], + 'mf': [('cmd', ['manifest']), ('manifest', None)], + 'ca': [('cmd', ['archive']), ('node', None)], + 'tags': [('cmd', ['tags'])], + 'tip': [('cmd', ['changeset']), ('node', ['tip'])], + 'static': [('cmd', ['static']), ('file', None)] +} + +def normalize(form): + # first expand the shortcuts + for k in shortcuts.iterkeys(): + if k in form: + for name, value in shortcuts[k]: + if value is None: + value = form[k] + form[name] = value + del form[k] + # And strip the values + for k, v in form.iteritems(): + form[k] = [i.strip() for i in v] + return form + +class wsgirequest(object): + def __init__(self, wsgienv, start_response): + version = wsgienv['wsgi.version'] + if (version < (1, 0)) or (version >= (2, 0)): + raise RuntimeError("Unknown and unsupported WSGI version %d.%d" + % version) + self.inp = wsgienv['wsgi.input'] + self.err = wsgienv['wsgi.errors'] + self.threaded = wsgienv['wsgi.multithread'] + self.multiprocess = wsgienv['wsgi.multiprocess'] + self.run_once = wsgienv['wsgi.run_once'] + self.env = wsgienv + self.form = normalize(cgi.parse(self.inp, + self.env, + keep_blank_values=1)) + self._start_response = start_response + self.server_write = None + self.headers = [] + + def __iter__(self): + return iter([]) + + def read(self, count=-1): + return self.inp.read(count) + + def drain(self): + '''need to read all data from request, httplib is half-duplex''' + length = int(self.env.get('CONTENT_LENGTH') or 0) + for s in util.filechunkiter(self.inp, limit=length): + pass + + def respond(self, status, type=None, filename=None, length=0): + if self._start_response is not None: + + self.httphdr(type, filename, length) + if not self.headers: + raise RuntimeError("request.write called before headers sent") + + for k, v in self.headers: + if not isinstance(v, str): + raise TypeError('header value must be string: %r' % v) + + if isinstance(status, ErrorResponse): + self.header(status.headers) + if status.code == HTTP_NOT_MODIFIED: + # RFC 2616 Section 10.3.5: 304 Not Modified has cases where + # it MUST NOT include any headers other than these and no + # body + self.headers = [(k, v) for (k, v) in self.headers if + k in ('Date', 'ETag', 'Expires', + 'Cache-Control', 'Vary')] + status = statusmessage(status.code, status.message) + elif status == 200: + status = '200 Script output follows' + elif isinstance(status, int): + status = statusmessage(status) + + self.server_write = self._start_response(status, self.headers) + self._start_response = None + self.headers = [] + + def write(self, thing): + if util.safehasattr(thing, "__iter__"): + for part in thing: + self.write(part) + else: + thing = str(thing) + try: + self.server_write(thing) + except socket.error, inst: + if inst[0] != errno.ECONNRESET: + raise + + def writelines(self, lines): + for line in lines: + self.write(line) + + def flush(self): + return None + + def close(self): + return None + + def header(self, headers=[('Content-Type','text/html')]): + self.headers.extend(headers) + + def httphdr(self, type=None, filename=None, length=0, headers={}): + headers = headers.items() + if type is not None: + headers.append(('Content-Type', type)) + if filename: + filename = (filename.split('/')[-1] + .replace('\\', '\\\\').replace('"', '\\"')) + headers.append(('Content-Disposition', + 'inline; filename="%s"' % filename)) + if length: + headers.append(('Content-Length', str(length))) + self.header(headers) + +def wsgiapplication(app_maker): + '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() + can and should now be used as a WSGI application.''' + application = app_maker() + def run_wsgi(env, respond): + return application(env, respond) + return run_wsgi diff --git a/websdk/mercurial/hgweb/server.py b/websdk/mercurial/hgweb/server.py index 92f92ad..3ad99f4 120000..100644 --- a/websdk/mercurial/hgweb/server.py +++ b/websdk/mercurial/hgweb/server.py @@ -1 +1,320 @@ -/usr/share/pyshared/mercurial/hgweb/server.py \ No newline at end of file +# hgweb/server.py - The standalone hg web server. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback +from mercurial import util, error +from mercurial.hgweb import common +from mercurial.i18n import _ + +def _splitURI(uri): + """ Return path and query splited from uri + + Just like CGI environment, the path is unquoted, the query is + not. + """ + if '?' in uri: + path, query = uri.split('?', 1) + else: + path, query = uri, '' + return urllib.unquote(path), query + +class _error_logger(object): + def __init__(self, handler): + self.handler = handler + def flush(self): + pass + def write(self, str): + self.writelines(str.split('\n')) + def writelines(self, seq): + for msg in seq: + self.handler.log_error("HG error: %s", msg) + +class _httprequesthandler(BaseHTTPServer.BaseHTTPRequestHandler): + + url_scheme = 'http' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + """Prepare .socket of new HTTPServer instance""" + pass + + def __init__(self, *args, **kargs): + self.protocol_version = 'HTTP/1.1' + BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs) + + def _log_any(self, fp, format, *args): + fp.write("%s - - [%s] %s\n" % (self.client_address[0], + self.log_date_time_string(), + format % args)) + fp.flush() + + def log_error(self, format, *args): + self._log_any(self.server.errorlog, format, *args) + + def log_message(self, format, *args): + self._log_any(self.server.accesslog, format, *args) + + def log_request(self, code='-', size='-'): + xheaders = [h for h in self.headers.items() if h[0].startswith('x-')] + self.log_message('"%s" %s %s%s', + self.requestline, str(code), str(size), + ''.join([' %s:%s' % h for h in sorted(xheaders)])) + + def do_write(self): + try: + self.do_hgweb() + except socket.error, inst: + if inst[0] != errno.EPIPE: + raise + + def do_POST(self): + try: + self.do_write() + except Exception: + self._start_response("500 Internal Server Error", []) + self._write("Internal Server Error") + tb = "".join(traceback.format_exception(*sys.exc_info())) + self.log_error("Exception happened during processing " + "request '%s':\n%s", self.path, tb) + + def do_GET(self): + self.do_POST() + + def do_hgweb(self): + path, query = _splitURI(self.path) + + env = {} + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['REQUEST_METHOD'] = self.command + env['SERVER_NAME'] = self.server.server_name + env['SERVER_PORT'] = str(self.server.server_port) + env['REQUEST_URI'] = self.path + env['SCRIPT_NAME'] = self.server.prefix + env['PATH_INFO'] = path[len(self.server.prefix):] + env['REMOTE_HOST'] = self.client_address[0] + env['REMOTE_ADDR'] = self.client_address[0] + if query: + env['QUERY_STRING'] = query + + if self.headers.typeheader is None: + env['CONTENT_TYPE'] = self.headers.type + else: + env['CONTENT_TYPE'] = self.headers.typeheader + length = self.headers.getheader('content-length') + if length: + env['CONTENT_LENGTH'] = length + for header in [h for h in self.headers.keys() + if h not in ('content-type', 'content-length')]: + hkey = 'HTTP_' + header.replace('-', '_').upper() + hval = self.headers.getheader(header) + hval = hval.replace('\n', '').strip() + if hval: + env[hkey] = hval + env['SERVER_PROTOCOL'] = self.request_version + env['wsgi.version'] = (1, 0) + env['wsgi.url_scheme'] = self.url_scheme + if env.get('HTTP_EXPECT', '').lower() == '100-continue': + self.rfile = common.continuereader(self.rfile, self.wfile.write) + + env['wsgi.input'] = self.rfile + env['wsgi.errors'] = _error_logger(self) + env['wsgi.multithread'] = isinstance(self.server, + SocketServer.ThreadingMixIn) + env['wsgi.multiprocess'] = isinstance(self.server, + SocketServer.ForkingMixIn) + env['wsgi.run_once'] = 0 + + self.close_connection = True + self.saved_status = None + self.saved_headers = [] + self.sent_headers = False + self.length = None + for chunk in self.server.application(env, self._start_response): + self._write(chunk) + + def send_headers(self): + if not self.saved_status: + raise AssertionError("Sending headers before " + "start_response() called") + saved_status = self.saved_status.split(None, 1) + saved_status[0] = int(saved_status[0]) + self.send_response(*saved_status) + should_close = True + for h in self.saved_headers: + self.send_header(*h) + if h[0].lower() == 'content-length': + should_close = False + self.length = int(h[1]) + # The value of the Connection header is a list of case-insensitive + # tokens separated by commas and optional whitespace. + if 'close' in [token.strip().lower() for token in + self.headers.get('connection', '').split(',')]: + should_close = True + if should_close: + self.send_header('Connection', 'close') + self.close_connection = should_close + self.end_headers() + self.sent_headers = True + + def _start_response(self, http_status, headers, exc_info=None): + code, msg = http_status.split(None, 1) + code = int(code) + self.saved_status = http_status + bad_headers = ('connection', 'transfer-encoding') + self.saved_headers = [h for h in headers + if h[0].lower() not in bad_headers] + return self._write + + def _write(self, data): + if not self.saved_status: + raise AssertionError("data written before start_response() called") + elif not self.sent_headers: + self.send_headers() + if self.length is not None: + if len(data) > self.length: + raise AssertionError("Content-length header sent, but more " + "bytes than specified are being written.") + self.length = self.length - len(data) + self.wfile.write(data) + self.wfile.flush() + +class _httprequesthandleropenssl(_httprequesthandler): + """HTTPS handler based on pyOpenSSL""" + + url_scheme = 'https' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + try: + import OpenSSL + OpenSSL.SSL.Context + except ImportError: + raise util.Abort(_("SSL support is unavailable")) + ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) + ctx.use_privatekey_file(ssl_cert) + ctx.use_certificate_file(ssl_cert) + sock = socket.socket(httpserver.address_family, httpserver.socket_type) + httpserver.socket = OpenSSL.SSL.Connection(ctx, sock) + httpserver.server_bind() + httpserver.server_activate() + + def setup(self): + self.connection = self.request + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) + + def do_write(self): + import OpenSSL + try: + _httprequesthandler.do_write(self) + except OpenSSL.SSL.SysCallError, inst: + if inst.args[0] != errno.EPIPE: + raise + + def handle_one_request(self): + import OpenSSL + try: + _httprequesthandler.handle_one_request(self) + except (OpenSSL.SSL.SysCallError, OpenSSL.SSL.ZeroReturnError): + self.close_connection = True + pass + +class _httprequesthandlerssl(_httprequesthandler): + """HTTPS handler based on Pythons ssl module (introduced in 2.6)""" + + url_scheme = 'https' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + try: + import ssl + ssl.wrap_socket + except ImportError: + raise util.Abort(_("SSL support is unavailable")) + httpserver.socket = ssl.wrap_socket(httpserver.socket, server_side=True, + certfile=ssl_cert, ssl_version=ssl.PROTOCOL_SSLv23) + + def setup(self): + self.connection = self.request + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) + +try: + from threading import activeCount + activeCount() # silence pyflakes + _mixin = SocketServer.ThreadingMixIn +except ImportError: + if util.safehasattr(os, "fork"): + _mixin = SocketServer.ForkingMixIn + else: + class _mixin(object): + pass + +def openlog(opt, default): + if opt and opt != '-': + return open(opt, 'a') + return default + +class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer): + + # SO_REUSEADDR has broken semantics on windows + if os.name == 'nt': + allow_reuse_address = 0 + + def __init__(self, ui, app, addr, handler, **kwargs): + BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs) + self.daemon_threads = True + self.application = app + + handler.preparehttpserver(self, ui.config('web', 'certificate')) + + prefix = ui.config('web', 'prefix', '') + if prefix: + prefix = '/' + prefix.strip('/') + self.prefix = prefix + + alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout) + elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr) + self.accesslog = alog + self.errorlog = elog + + self.addr, self.port = self.socket.getsockname()[0:2] + self.fqaddr = socket.getfqdn(addr[0]) + +class IPv6HTTPServer(MercurialHTTPServer): + address_family = getattr(socket, 'AF_INET6', None) + def __init__(self, *args, **kwargs): + if self.address_family is None: + raise error.RepoError(_('IPv6 is not available on this system')) + super(IPv6HTTPServer, self).__init__(*args, **kwargs) + +def create_server(ui, app): + + if ui.config('web', 'certificate'): + if sys.version_info >= (2, 6): + handler = _httprequesthandlerssl + else: + handler = _httprequesthandleropenssl + else: + handler = _httprequesthandler + + if ui.configbool('web', 'ipv6'): + cls = IPv6HTTPServer + else: + cls = MercurialHTTPServer + + # ugly hack due to python issue5853 (for threaded use) + import mimetypes; mimetypes.init() + + address = ui.config('web', 'address', '') + port = util.getport(ui.config('web', 'port', 8000)) + try: + return cls(ui, app, (address, port), handler) + except socket.error, inst: + raise util.Abort(_("cannot start server at '%s:%d': %s") + % (address, port, inst.args[1])) diff --git a/websdk/mercurial/hgweb/webcommands.py b/websdk/mercurial/hgweb/webcommands.py index 29141b6..fc43ca3 120000..100644 --- a/websdk/mercurial/hgweb/webcommands.py +++ b/websdk/mercurial/hgweb/webcommands.py @@ -1 +1,842 @@ -/usr/share/pyshared/mercurial/hgweb/webcommands.py \ No newline at end of file +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, mimetypes, re, cgi, copy +import webutil +from mercurial import error, encoding, archival, templater, templatefilters +from mercurial.node import short, hex +from mercurial.util import binary +from common import paritygen, staticfile, get_contact, ErrorResponse +from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND +from mercurial import graphmod +from mercurial import help as helpmod +from mercurial.i18n import _ + +# __all__ is populated with the allowed commands. Be sure to add to it if +# you're adding a new command, or the new command won't work. + +__all__ = [ + 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev', + 'manifest', 'tags', 'bookmarks', 'branches', 'summary', 'filediff', 'diff', + 'annotate', 'filelog', 'archive', 'static', 'graph', 'help', +] + +def log(web, req, tmpl): + if 'file' in req.form and req.form['file'][0]: + return filelog(web, req, tmpl) + else: + return changelog(web, req, tmpl) + +def rawfile(web, req, tmpl): + guessmime = web.configbool('web', 'guessmime', False) + + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + if not path: + content = manifest(web, req, tmpl) + req.respond(HTTP_OK, web.ctype) + return content + + try: + fctx = webutil.filectx(web.repo, req) + except error.LookupError, inst: + try: + content = manifest(web, req, tmpl) + req.respond(HTTP_OK, web.ctype) + return content + except ErrorResponse: + raise inst + + path = fctx.path() + text = fctx.data() + mt = 'application/binary' + if guessmime: + mt = mimetypes.guess_type(path)[0] + if mt is None: + mt = binary(text) and 'application/binary' or 'text/plain' + if mt.startswith('text/'): + mt += '; charset="%s"' % encoding.encoding + + req.respond(HTTP_OK, mt, path, len(text)) + return [text] + +def _filerevision(web, tmpl, fctx): + f = fctx.path() + text = fctx.data() + parity = paritygen(web.stripecount) + + if binary(text): + mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' + text = '(binary:%s)' % mt + + def lines(): + for lineno, t in enumerate(text.splitlines(True)): + yield {"line": t, + "lineid": "l%d" % (lineno + 1), + "linenumber": "% 6d" % (lineno + 1), + "parity": parity.next()} + + return tmpl("filerevision", + file=f, + path=webutil.up(f), + text=lines(), + rev=fctx.rev(), + node=fctx.hex(), + author=fctx.user(), + date=fctx.date(), + desc=fctx.description(), + branch=webutil.nodebranchnodefault(fctx), + parent=webutil.parents(fctx), + child=webutil.children(fctx), + rename=webutil.renamelink(fctx), + permissions=fctx.manifest().flags(f)) + +def file(web, req, tmpl): + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + if not path: + return manifest(web, req, tmpl) + try: + return _filerevision(web, tmpl, webutil.filectx(web.repo, req)) + except error.LookupError, inst: + try: + return manifest(web, req, tmpl) + except ErrorResponse: + raise inst + +def _search(web, req, tmpl): + + query = req.form['rev'][0] + revcount = web.maxchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = max(revcount / 2, 1) + lessvars['rev'] = query + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + morevars['rev'] = query + + def changelist(**map): + count = 0 + qw = query.lower().split() + + def revgen(): + for i in xrange(len(web.repo) - 1, 0, -100): + l = [] + for j in xrange(max(0, i - 100), i + 1): + ctx = web.repo[j] + l.append(ctx) + l.reverse() + for e in l: + yield e + + for ctx in revgen(): + miss = 0 + for q in qw: + if not (q in ctx.user().lower() or + q in ctx.description().lower() or + q in " ".join(ctx.files()).lower()): + miss = 1 + break + if miss: + continue + + count += 1 + n = ctx.node() + showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) + files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) + + yield tmpl('searchentry', + parity=parity.next(), + author=ctx.user(), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + changelogtag=showtags, + desc=ctx.description(), + date=ctx.date(), + files=files, + rev=ctx.rev(), + node=hex(n), + tags=webutil.nodetagsdict(web.repo, n), + bookmarks=webutil.nodebookmarksdict(web.repo, n), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + + if count >= revcount: + break + + tip = web.repo['tip'] + parity = paritygen(web.stripecount) + + return tmpl('search', query=query, node=tip.hex(), + entries=changelist, archives=web.archivelist("tip"), + morevars=morevars, lessvars=lessvars) + +def changelog(web, req, tmpl, shortlog=False): + + if 'node' in req.form: + ctx = webutil.changectx(web.repo, req) + else: + if 'rev' in req.form: + hi = req.form['rev'][0] + else: + hi = len(web.repo) - 1 + try: + ctx = web.repo[hi] + except error.RepoError: + return _search(web, req, tmpl) # XXX redirect to 404 page? + + def changelist(limit=0, **map): + l = [] # build a list in forward order for efficiency + for i in xrange(start, end): + ctx = web.repo[i] + n = ctx.node() + showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) + files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) + + l.insert(0, {"parity": parity.next(), + "author": ctx.user(), + "parent": webutil.parents(ctx, i - 1), + "child": webutil.children(ctx, i + 1), + "changelogtag": showtags, + "desc": ctx.description(), + "date": ctx.date(), + "files": files, + "rev": i, + "node": hex(n), + "tags": webutil.nodetagsdict(web.repo, n), + "bookmarks": webutil.nodebookmarksdict(web.repo, n), + "inbranch": webutil.nodeinbranch(web.repo, ctx), + "branches": webutil.nodebranchdict(web.repo, ctx) + }) + + if limit > 0: + l = l[:limit] + + for e in l: + yield e + + revcount = shortlog and web.maxshortchanges or web.maxchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = max(revcount / 2, 1) + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + count = len(web.repo) + pos = ctx.rev() + start = max(0, pos - revcount + 1) + end = min(count, start + revcount) + pos = end - 1 + parity = paritygen(web.stripecount, offset=start - end) + + changenav = webutil.revnavgen(pos, revcount, count, web.repo.changectx) + + return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav, + node=ctx.hex(), rev=pos, changesets=count, + entries=lambda **x: changelist(limit=0,**x), + latestentry=lambda **x: changelist(limit=1,**x), + archives=web.archivelist("tip"), revcount=revcount, + morevars=morevars, lessvars=lessvars) + +def shortlog(web, req, tmpl): + return changelog(web, req, tmpl, shortlog = True) + +def changeset(web, req, tmpl): + ctx = webutil.changectx(web.repo, req) + showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node()) + showbookmarks = webutil.showbookmark(web.repo, tmpl, 'changesetbookmark', + ctx.node()) + showbranch = webutil.nodebranchnodefault(ctx) + + files = [] + parity = paritygen(web.stripecount) + for f in ctx.files(): + template = f in ctx and 'filenodelink' or 'filenolink' + files.append(tmpl(template, + node=ctx.hex(), file=f, + parity=parity.next())) + + style = web.config('web', 'style', 'paper') + if 'style' in req.form: + style = req.form['style'][0] + + parity = paritygen(web.stripecount) + diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style) + + parity = paritygen(web.stripecount) + diffstatgen = webutil.diffstatgen(ctx) + diffstat = webutil.diffstat(tmpl, ctx, diffstatgen, parity) + + return tmpl('changeset', + diff=diffs, + rev=ctx.rev(), + node=ctx.hex(), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + changesettag=showtags, + changesetbookmark=showbookmarks, + changesetbranch=showbranch, + author=ctx.user(), + desc=ctx.description(), + date=ctx.date(), + files=files, + diffsummary=lambda **x: webutil.diffsummary(diffstatgen), + diffstat=diffstat, + archives=web.archivelist(ctx.hex()), + tags=webutil.nodetagsdict(web.repo, ctx.node()), + bookmarks=webutil.nodebookmarksdict(web.repo, ctx.node()), + branch=webutil.nodebranchnodefault(ctx), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + +rev = changeset + +def manifest(web, req, tmpl): + ctx = webutil.changectx(web.repo, req) + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + mf = ctx.manifest() + node = ctx.node() + + files = {} + dirs = {} + parity = paritygen(web.stripecount) + + if path and path[-1] != "/": + path += "/" + l = len(path) + abspath = "/" + path + + for f, n in mf.iteritems(): + if f[:l] != path: + continue + remain = f[l:] + elements = remain.split('/') + if len(elements) == 1: + files[remain] = f + else: + h = dirs # need to retain ref to dirs (root) + for elem in elements[0:-1]: + if elem not in h: + h[elem] = {} + h = h[elem] + if len(h) > 1: + break + h[None] = None # denotes files present + + if mf and not files and not dirs: + raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) + + def filelist(**map): + for f in sorted(files): + full = files[f] + + fctx = ctx.filectx(full) + yield {"file": full, + "parity": parity.next(), + "basename": f, + "date": fctx.date(), + "size": fctx.size(), + "permissions": mf.flags(full)} + + def dirlist(**map): + for d in sorted(dirs): + + emptydirs = [] + h = dirs[d] + while isinstance(h, dict) and len(h) == 1: + k, v = h.items()[0] + if v: + emptydirs.append(k) + h = v + + path = "%s%s" % (abspath, d) + yield {"parity": parity.next(), + "path": path, + "emptydirs": "/".join(emptydirs), + "basename": d} + + return tmpl("manifest", + rev=ctx.rev(), + node=hex(node), + path=abspath, + up=webutil.up(abspath), + upparity=parity.next(), + fentries=filelist, + dentries=dirlist, + archives=web.archivelist(hex(node)), + tags=webutil.nodetagsdict(web.repo, node), + bookmarks=webutil.nodebookmarksdict(web.repo, node), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + +def tags(web, req, tmpl): + i = web.repo.tagslist() + i.reverse() + parity = paritygen(web.stripecount) + + def entries(notip=False, limit=0, **map): + count = 0 + for k, n in i: + if notip and k == "tip": + continue + if limit > 0 and count >= limit: + continue + count = count + 1 + yield {"parity": parity.next(), + "tag": k, + "date": web.repo[n].date(), + "node": hex(n)} + + return tmpl("tags", + node=hex(web.repo.changelog.tip()), + entries=lambda **x: entries(False, 0, **x), + entriesnotip=lambda **x: entries(True, 0, **x), + latestentry=lambda **x: entries(True, 1, **x)) + +def bookmarks(web, req, tmpl): + i = web.repo._bookmarks.items() + parity = paritygen(web.stripecount) + + def entries(limit=0, **map): + count = 0 + for k, n in sorted(i): + if limit > 0 and count >= limit: + continue + count = count + 1 + yield {"parity": parity.next(), + "bookmark": k, + "date": web.repo[n].date(), + "node": hex(n)} + + return tmpl("bookmarks", + node=hex(web.repo.changelog.tip()), + entries=lambda **x: entries(0, **x), + latestentry=lambda **x: entries(1, **x)) + +def branches(web, req, tmpl): + tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems()) + heads = web.repo.heads() + parity = paritygen(web.stripecount) + sortkey = lambda ctx: ('close' not in ctx.extra(), ctx.rev()) + + def entries(limit, **map): + count = 0 + for ctx in sorted(tips, key=sortkey, reverse=True): + if limit > 0 and count >= limit: + return + count += 1 + if not web.repo.branchheads(ctx.branch()): + status = 'closed' + elif ctx.node() not in heads: + status = 'inactive' + else: + status = 'open' + yield {'parity': parity.next(), + 'branch': ctx.branch(), + 'status': status, + 'node': ctx.hex(), + 'date': ctx.date()} + + return tmpl('branches', node=hex(web.repo.changelog.tip()), + entries=lambda **x: entries(0, **x), + latestentry=lambda **x: entries(1, **x)) + +def summary(web, req, tmpl): + i = web.repo.tagslist() + i.reverse() + + def tagentries(**map): + parity = paritygen(web.stripecount) + count = 0 + for k, n in i: + if k == "tip": # skip tip + continue + + count += 1 + if count > 10: # limit to 10 tags + break + + yield tmpl("tagentry", + parity=parity.next(), + tag=k, + node=hex(n), + date=web.repo[n].date()) + + def bookmarks(**map): + parity = paritygen(web.stripecount) + b = web.repo._bookmarks.items() + for k, n in sorted(b)[:10]: # limit to 10 bookmarks + yield {'parity': parity.next(), + 'bookmark': k, + 'date': web.repo[n].date(), + 'node': hex(n)} + + def branches(**map): + parity = paritygen(web.stripecount) + + b = web.repo.branchtags() + l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()] + for r, n, t in sorted(l): + yield {'parity': parity.next(), + 'branch': t, + 'node': hex(n), + 'date': web.repo[n].date()} + + def changelist(**map): + parity = paritygen(web.stripecount, offset=start - end) + l = [] # build a list in forward order for efficiency + for i in xrange(start, end): + ctx = web.repo[i] + n = ctx.node() + hn = hex(n) + + l.insert(0, tmpl( + 'shortlogentry', + parity=parity.next(), + author=ctx.user(), + desc=ctx.description(), + date=ctx.date(), + rev=i, + node=hn, + tags=webutil.nodetagsdict(web.repo, n), + bookmarks=webutil.nodebookmarksdict(web.repo, n), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx))) + + yield l + + tip = web.repo['tip'] + count = len(web.repo) + start = max(0, count - web.maxchanges) + end = min(count, start + web.maxchanges) + + return tmpl("summary", + desc=web.config("web", "description", "unknown"), + owner=get_contact(web.config) or "unknown", + lastchange=tip.date(), + tags=tagentries, + bookmarks=bookmarks, + branches=branches, + shortlog=changelist, + node=tip.hex(), + archives=web.archivelist("tip")) + +def filediff(web, req, tmpl): + fctx, ctx = None, None + try: + fctx = webutil.filectx(web.repo, req) + except LookupError: + ctx = webutil.changectx(web.repo, req) + path = webutil.cleanpath(web.repo, req.form['file'][0]) + if path not in ctx.files(): + raise + + if fctx is not None: + n = fctx.node() + path = fctx.path() + else: + n = ctx.node() + # path already defined in except clause + + parity = paritygen(web.stripecount) + style = web.config('web', 'style', 'paper') + if 'style' in req.form: + style = req.form['style'][0] + + diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity, style) + rename = fctx and webutil.renamelink(fctx) or [] + ctx = fctx and fctx or ctx + return tmpl("filediff", + file=path, + node=hex(n), + rev=ctx.rev(), + date=ctx.date(), + desc=ctx.description(), + author=ctx.user(), + rename=rename, + branch=webutil.nodebranchnodefault(ctx), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + diff=diffs) + +diff = filediff + +def annotate(web, req, tmpl): + fctx = webutil.filectx(web.repo, req) + f = fctx.path() + parity = paritygen(web.stripecount) + + def annotate(**map): + last = None + if binary(fctx.data()): + mt = (mimetypes.guess_type(fctx.path())[0] + or 'application/octet-stream') + lines = enumerate([((fctx.filectx(fctx.filerev()), 1), + '(binary:%s)' % mt)]) + else: + lines = enumerate(fctx.annotate(follow=True, linenumber=True)) + for lineno, ((f, targetline), l) in lines: + fnode = f.filenode() + + if last != fnode: + last = fnode + + yield {"parity": parity.next(), + "node": f.hex(), + "rev": f.rev(), + "author": f.user(), + "desc": f.description(), + "file": f.path(), + "targetline": targetline, + "line": l, + "lineid": "l%d" % (lineno + 1), + "linenumber": "% 6d" % (lineno + 1), + "revdate": f.date()} + + return tmpl("fileannotate", + file=f, + annotate=annotate, + path=webutil.up(f), + rev=fctx.rev(), + node=fctx.hex(), + author=fctx.user(), + date=fctx.date(), + desc=fctx.description(), + rename=webutil.renamelink(fctx), + branch=webutil.nodebranchnodefault(fctx), + parent=webutil.parents(fctx), + child=webutil.children(fctx), + permissions=fctx.manifest().flags(f)) + +def filelog(web, req, tmpl): + + try: + fctx = webutil.filectx(web.repo, req) + f = fctx.path() + fl = fctx.filelog() + except error.LookupError: + f = webutil.cleanpath(web.repo, req.form['file'][0]) + fl = web.repo.file(f) + numrevs = len(fl) + if not numrevs: # file doesn't exist at all + raise + rev = webutil.changectx(web.repo, req).rev() + first = fl.linkrev(0) + if rev < first: # current rev is from before file existed + raise + frev = numrevs - 1 + while fl.linkrev(frev) > rev: + frev -= 1 + fctx = web.repo.filectx(f, fl.linkrev(frev)) + + revcount = web.maxshortchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = max(revcount / 2, 1) + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + count = fctx.filerev() + 1 + start = max(0, fctx.filerev() - revcount + 1) # first rev on this page + end = min(count, start + revcount) # last rev on this page + parity = paritygen(web.stripecount, offset=start - end) + + def entries(limit=0, **map): + l = [] + + repo = web.repo + for i in xrange(start, end): + iterfctx = fctx.filectx(i) + + l.insert(0, {"parity": parity.next(), + "filerev": i, + "file": f, + "node": iterfctx.hex(), + "author": iterfctx.user(), + "date": iterfctx.date(), + "rename": webutil.renamelink(iterfctx), + "parent": webutil.parents(iterfctx), + "child": webutil.children(iterfctx), + "desc": iterfctx.description(), + "tags": webutil.nodetagsdict(repo, iterfctx.node()), + "bookmarks": webutil.nodebookmarksdict( + repo, iterfctx.node()), + "branch": webutil.nodebranchnodefault(iterfctx), + "inbranch": webutil.nodeinbranch(repo, iterfctx), + "branches": webutil.nodebranchdict(repo, iterfctx)}) + + if limit > 0: + l = l[:limit] + + for e in l: + yield e + + nodefunc = lambda x: fctx.filectx(fileid=x) + nav = webutil.revnavgen(end - 1, revcount, count, nodefunc) + return tmpl("filelog", file=f, node=fctx.hex(), nav=nav, + entries=lambda **x: entries(limit=0, **x), + latestentry=lambda **x: entries(limit=1, **x), + revcount=revcount, morevars=morevars, lessvars=lessvars) + +def archive(web, req, tmpl): + type_ = req.form.get('type', [None])[0] + allowed = web.configlist("web", "allow_archive") + key = req.form['node'][0] + + if type_ not in web.archives: + msg = 'Unsupported archive type: %s' % type_ + raise ErrorResponse(HTTP_NOT_FOUND, msg) + + if not ((type_ in allowed or + web.configbool("web", "allow" + type_, False))): + msg = 'Archive type not allowed: %s' % type_ + raise ErrorResponse(HTTP_FORBIDDEN, msg) + + reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame)) + cnode = web.repo.lookup(key) + arch_version = key + if cnode == key or key == 'tip': + arch_version = short(cnode) + name = "%s-%s" % (reponame, arch_version) + mimetype, artype, extension, encoding = web.archive_specs[type_] + headers = [ + ('Content-Type', mimetype), + ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) + ] + if encoding: + headers.append(('Content-Encoding', encoding)) + req.header(headers) + req.respond(HTTP_OK) + archival.archive(web.repo, req, cnode, artype, prefix=name) + return [] + + +def static(web, req, tmpl): + fname = req.form['file'][0] + # a repo owner may set web.static in .hg/hgrc to get any file + # readable by the user running the CGI script + static = web.config("web", "static", None, untrusted=False) + if not static: + tp = web.templatepath or templater.templatepath() + if isinstance(tp, str): + tp = [tp] + static = [os.path.join(p, 'static') for p in tp] + return [staticfile(static, fname, req)] + +def graph(web, req, tmpl): + + rev = webutil.changectx(web.repo, req).rev() + bg_height = 39 + revcount = web.maxshortchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + revcount = max(revcount, 1) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = max(revcount / 2, 1) + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + max_rev = len(web.repo) - 1 + revcount = min(max_rev, revcount) + revnode = web.repo.changelog.node(rev) + revnode_hex = hex(revnode) + uprev = min(max_rev, rev + revcount) + downrev = max(0, rev - revcount) + count = len(web.repo) + changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx) + startrev = rev + # if starting revision is less than 60 set it to uprev + if rev < web.maxshortchanges: + startrev = uprev + + dag = graphmod.dagwalker(web.repo, range(startrev, downrev - 1, -1)) + tree = list(graphmod.colored(dag)) + canvasheight = (len(tree) + 1) * bg_height - 27 + data = [] + for (id, type, ctx, vtx, edges) in tree: + if type != graphmod.CHANGESET: + continue + node = str(ctx) + age = templatefilters.age(ctx.date()) + desc = templatefilters.firstline(ctx.description()) + desc = cgi.escape(templatefilters.nonempty(desc)) + user = cgi.escape(templatefilters.person(ctx.user())) + branch = ctx.branch() + branch = branch, web.repo.branchtags().get(branch) == ctx.node() + data.append((node, vtx, edges, desc, user, age, branch, ctx.tags(), + ctx.bookmarks())) + + return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev, + lessvars=lessvars, morevars=morevars, downrev=downrev, + canvasheight=canvasheight, jsdata=data, bg_height=bg_height, + node=revnode_hex, changenav=changenav) + +def _getdoc(e): + doc = e[0].__doc__ + if doc: + doc = doc.split('\n')[0] + else: + doc = _('(no help text available)') + return doc + +def help(web, req, tmpl): + from mercurial import commands # avoid cycle + + topicname = req.form.get('node', [None])[0] + if not topicname: + def topics(**map): + for entries, summary, _ in helpmod.helptable: + entries = sorted(entries, key=len) + yield {'topic': entries[-1], 'summary': summary} + + early, other = [], [] + primary = lambda s: s.split('|')[0] + for c, e in commands.table.iteritems(): + doc = _getdoc(e) + if 'DEPRECATED' in doc or c.startswith('debug'): + continue + cmd = primary(c) + if cmd.startswith('^'): + early.append((cmd[1:], doc)) + else: + other.append((cmd, doc)) + + early.sort() + other.sort() + + def earlycommands(**map): + for c, doc in early: + yield {'topic': c, 'summary': doc} + + def othercommands(**map): + for c, doc in other: + yield {'topic': c, 'summary': doc} + + return tmpl('helptopics', topics=topics, earlycommands=earlycommands, + othercommands=othercommands, title='Index') + + u = webutil.wsgiui() + u.pushbuffer() + try: + commands.help_(u, topicname) + except error.UnknownCommand: + raise ErrorResponse(HTTP_NOT_FOUND) + doc = u.popbuffer() + return tmpl('help', topic=topicname, doc=doc) diff --git a/websdk/mercurial/hgweb/webutil.py b/websdk/mercurial/hgweb/webutil.py index 5ff5492..bad7cdd 120000..100644 --- a/websdk/mercurial/hgweb/webutil.py +++ b/websdk/mercurial/hgweb/webutil.py @@ -1 +1,269 @@ -/usr/share/pyshared/mercurial/hgweb/webutil.py \ No newline at end of file +# hgweb/webutil.py - utility library for the web interface. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, copy +from mercurial import match, patch, scmutil, error, ui, util +from mercurial.i18n import _ +from mercurial.node import hex, nullid + +def up(p): + if p[0] != "/": + p = "/" + p + if p[-1] == "/": + p = p[:-1] + up = os.path.dirname(p) + if up == "/": + return "/" + return up + "/" + +def revnavgen(pos, pagelen, limit, nodefunc): + def seq(factor, limit=None): + if limit: + yield limit + if limit >= 20 and limit <= 40: + yield 50 + else: + yield 1 * factor + yield 3 * factor + for f in seq(factor * 10): + yield f + + navbefore = [] + navafter = [] + + last = 0 + for f in seq(1, pagelen): + if f < pagelen or f <= last: + continue + if f > limit: + break + last = f + if pos + f < limit: + navafter.append(("+%d" % f, hex(nodefunc(pos + f).node()))) + if pos - f >= 0: + navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) + + navafter.append(("tip", "tip")) + try: + navbefore.insert(0, ("(0)", hex(nodefunc('0').node()))) + except error.RepoError: + pass + + def gen(l): + def f(**map): + for label, node in l: + yield {"label": label, "node": node} + return f + + return (dict(before=gen(navbefore), after=gen(navafter)),) + +def _siblings(siblings=[], hiderev=None): + siblings = [s for s in siblings if s.node() != nullid] + if len(siblings) == 1 and siblings[0].rev() == hiderev: + return + for s in siblings: + d = {'node': s.hex(), 'rev': s.rev()} + d['user'] = s.user() + d['date'] = s.date() + d['description'] = s.description() + d['branch'] = s.branch() + if util.safehasattr(s, 'path'): + d['file'] = s.path() + yield d + +def parents(ctx, hide=None): + return _siblings(ctx.parents(), hide) + +def children(ctx, hide=None): + return _siblings(ctx.children(), hide) + +def renamelink(fctx): + r = fctx.renamed() + if r: + return [dict(file=r[0], node=hex(r[1]))] + return [] + +def nodetagsdict(repo, node): + return [{"name": i} for i in repo.nodetags(node)] + +def nodebookmarksdict(repo, node): + return [{"name": i} for i in repo.nodebookmarks(node)] + +def nodebranchdict(repo, ctx): + branches = [] + branch = ctx.branch() + # If this is an empty repo, ctx.node() == nullid, + # ctx.branch() == 'default', but branchtags() is + # an empty dict. Using dict.get avoids a traceback. + if repo.branchtags().get(branch) == ctx.node(): + branches.append({"name": branch}) + return branches + +def nodeinbranch(repo, ctx): + branches = [] + branch = ctx.branch() + if branch != 'default' and repo.branchtags().get(branch) != ctx.node(): + branches.append({"name": branch}) + return branches + +def nodebranchnodefault(ctx): + branches = [] + branch = ctx.branch() + if branch != 'default': + branches.append({"name": branch}) + return branches + +def showtag(repo, tmpl, t1, node=nullid, **args): + for t in repo.nodetags(node): + yield tmpl(t1, tag=t, **args) + +def showbookmark(repo, tmpl, t1, node=nullid, **args): + for t in repo.nodebookmarks(node): + yield tmpl(t1, bookmark=t, **args) + +def cleanpath(repo, path): + path = path.lstrip('/') + return scmutil.canonpath(repo.root, '', path) + +def changectx(repo, req): + changeid = "tip" + if 'node' in req.form: + changeid = req.form['node'][0] + elif 'manifest' in req.form: + changeid = req.form['manifest'][0] + + try: + ctx = repo[changeid] + except error.RepoError: + man = repo.manifest + ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))] + + return ctx + +def filectx(repo, req): + path = cleanpath(repo, req.form['file'][0]) + if 'node' in req.form: + changeid = req.form['node'][0] + else: + changeid = req.form['filenode'][0] + try: + fctx = repo[changeid][path] + except error.RepoError: + fctx = repo.filectx(path, fileid=changeid) + + return fctx + +def listfilediffs(tmpl, files, node, max): + for f in files[:max]: + yield tmpl('filedifflink', node=hex(node), file=f) + if len(files) > max: + yield tmpl('fileellipses') + +def diffs(repo, tmpl, ctx, files, parity, style): + + def countgen(): + start = 1 + while True: + yield start + start += 1 + + blockcount = countgen() + def prettyprintlines(diff): + blockno = blockcount.next() + for lineno, l in enumerate(diff.splitlines(True)): + lineno = "%d.%d" % (blockno, lineno + 1) + if l.startswith('+'): + ltype = "difflineplus" + elif l.startswith('-'): + ltype = "difflineminus" + elif l.startswith('@'): + ltype = "difflineat" + else: + ltype = "diffline" + yield tmpl(ltype, + line=l, + lineid="l%s" % lineno, + linenumber="% 8s" % lineno) + + if files: + m = match.exact(repo.root, repo.getcwd(), files) + else: + m = match.always(repo.root, repo.getcwd()) + + diffopts = patch.diffopts(repo.ui, untrusted=True) + parents = ctx.parents() + node1 = parents and parents[0].node() or nullid + node2 = ctx.node() + + block = [] + for chunk in patch.diff(repo, node1, node2, m, opts=diffopts): + if chunk.startswith('diff') and block: + yield tmpl('diffblock', parity=parity.next(), + lines=prettyprintlines(''.join(block))) + block = [] + if chunk.startswith('diff') and style != 'raw': + chunk = ''.join(chunk.splitlines(True)[1:]) + block.append(chunk) + yield tmpl('diffblock', parity=parity.next(), + lines=prettyprintlines(''.join(block))) + +def diffstatgen(ctx): + '''Generator function that provides the diffstat data.''' + + stats = patch.diffstatdata(util.iterlines(ctx.diff())) + maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats) + while True: + yield stats, maxname, maxtotal, addtotal, removetotal, binary + +def diffsummary(statgen): + '''Return a short summary of the diff.''' + + stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next() + return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % ( + len(stats), addtotal, removetotal) + +def diffstat(tmpl, ctx, statgen, parity): + '''Return a diffstat template for each file in the diff.''' + + stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next() + files = ctx.files() + + def pct(i): + if maxtotal == 0: + return 0 + return (float(i) / maxtotal) * 100 + + fileno = 0 + for filename, adds, removes, isbinary in stats: + template = filename in files and 'diffstatlink' or 'diffstatnolink' + total = adds + removes + fileno += 1 + yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno, + total=total, addpct=pct(adds), removepct=pct(removes), + parity=parity.next()) + +class sessionvars(object): + def __init__(self, vars, start='?'): + self.start = start + self.vars = vars + def __getitem__(self, key): + return self.vars[key] + def __setitem__(self, key, value): + self.vars[key] = value + def __copy__(self): + return sessionvars(copy.copy(self.vars), self.start) + def __iter__(self): + separator = self.start + for key, value in self.vars.iteritems(): + yield {'name': key, 'value': str(value), 'separator': separator} + separator = '&' + +class wsgiui(ui.ui): + # default termwidth breaks under mod_wsgi + def termwidth(self): + return 80 diff --git a/websdk/mercurial/hgweb/wsgicgi.py b/websdk/mercurial/hgweb/wsgicgi.py index fa03838..5d18d8e 120000..100644 --- a/websdk/mercurial/hgweb/wsgicgi.py +++ b/websdk/mercurial/hgweb/wsgicgi.py @@ -1 +1,81 @@ -/usr/share/pyshared/mercurial/hgweb/wsgicgi.py \ No newline at end of file +# hgweb/wsgicgi.py - CGI->WSGI translator +# +# Copyright 2006 Eric Hopper +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +# +# This was originally copied from the public domain code at +# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side + +import os, sys +from mercurial import util +from mercurial.hgweb import common + +def launch(application): + util.setbinary(sys.stdin) + util.setbinary(sys.stdout) + + environ = dict(os.environ.iteritems()) + environ.setdefault('PATH_INFO', '') + if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'): + # IIS includes script_name in path_info + scriptname = environ['SCRIPT_NAME'] + if environ['PATH_INFO'].startswith(scriptname): + environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):] + + stdin = sys.stdin + if environ.get('HTTP_EXPECT', '').lower() == '100-continue': + stdin = common.continuereader(stdin, sys.stdout.write) + + environ['wsgi.input'] = stdin + environ['wsgi.errors'] = sys.stderr + environ['wsgi.version'] = (1, 0) + environ['wsgi.multithread'] = False + environ['wsgi.multiprocess'] = True + environ['wsgi.run_once'] = True + + if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'): + environ['wsgi.url_scheme'] = 'https' + else: + environ['wsgi.url_scheme'] = 'http' + + headers_set = [] + headers_sent = [] + out = sys.stdout + + def write(data): + if not headers_set: + raise AssertionError("write() before start_response()") + + elif not headers_sent: + # Before the first output, send the stored headers + status, response_headers = headers_sent[:] = headers_set + out.write('Status: %s\r\n' % status) + for header in response_headers: + out.write('%s: %s\r\n' % header) + out.write('\r\n') + + out.write(data) + out.flush() + + def start_response(status, response_headers, exc_info=None): + if exc_info: + try: + if headers_sent: + # Re-raise original exception if headers sent + raise exc_info[0](exc_info[1], exc_info[2]) + finally: + exc_info = None # avoid dangling circular ref + elif headers_set: + raise AssertionError("Headers already set!") + + headers_set[:] = [status, response_headers] + return write + + content = application(environ, start_response) + try: + for chunk in content: + write(chunk) + finally: + getattr(content, 'close', lambda : None)() diff --git a/websdk/mercurial/hook.py b/websdk/mercurial/hook.py index 6d162db..88987ee 120000..100644 --- a/websdk/mercurial/hook.py +++ b/websdk/mercurial/hook.py @@ -1 +1,173 @@ -/usr/share/pyshared/mercurial/hook.py \ No newline at end of file +# hook.py - hook support for mercurial +# +# Copyright 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, sys +import extensions, util + +def _pythonhook(ui, repo, name, hname, funcname, args, throw): + '''call python hook. hook is callable object, looked up as + name in python module. if callable returns "true", hook + fails, else passes. if hook raises exception, treated as + hook failure. exception propagates if throw is "true". + + reason for "true" meaning "hook failed" is so that + unmodified commands (e.g. mercurial.commands.update) can + be run as hooks without wrappers to convert return values.''' + + ui.note(_("calling hook %s: %s\n") % (hname, funcname)) + obj = funcname + if not util.safehasattr(obj, '__call__'): + d = funcname.rfind('.') + if d == -1: + raise util.Abort(_('%s hook is invalid ("%s" not in ' + 'a module)') % (hname, funcname)) + modname = funcname[:d] + oldpaths = sys.path + if util.mainfrozen(): + # binary installs require sys.path manipulation + modpath, modfile = os.path.split(modname) + if modpath and modfile: + sys.path = sys.path[:] + [modpath] + modname = modfile + try: + obj = __import__(modname) + except ImportError: + e1 = sys.exc_type, sys.exc_value, sys.exc_traceback + try: + # extensions are loaded with hgext_ prefix + obj = __import__("hgext_%s" % modname) + except ImportError: + e2 = sys.exc_type, sys.exc_value, sys.exc_traceback + if ui.tracebackflag: + ui.warn(_('exception from first failed import attempt:\n')) + ui.traceback(e1) + if ui.tracebackflag: + ui.warn(_('exception from second failed import attempt:\n')) + ui.traceback(e2) + raise util.Abort(_('%s hook is invalid ' + '(import of "%s" failed)') % + (hname, modname)) + sys.path = oldpaths + try: + for p in funcname.split('.')[1:]: + obj = getattr(obj, p) + except AttributeError: + raise util.Abort(_('%s hook is invalid ' + '("%s" is not defined)') % + (hname, funcname)) + if not util.safehasattr(obj, '__call__'): + raise util.Abort(_('%s hook is invalid ' + '("%s" is not callable)') % + (hname, funcname)) + try: + try: + # redirect IO descriptors the the ui descriptors so hooks + # that write directly to these don't mess up the command + # protocol when running through the command server + old = sys.stdout, sys.stderr, sys.stdin + sys.stdout, sys.stderr, sys.stdin = ui.fout, ui.ferr, ui.fin + + r = obj(ui=ui, repo=repo, hooktype=name, **args) + except KeyboardInterrupt: + raise + except Exception, exc: + if isinstance(exc, util.Abort): + ui.warn(_('error: %s hook failed: %s\n') % + (hname, exc.args[0])) + else: + ui.warn(_('error: %s hook raised an exception: ' + '%s\n') % (hname, exc)) + if throw: + raise + ui.traceback() + return True + finally: + sys.stdout, sys.stderr, sys.stdin = old + if r: + if throw: + raise util.Abort(_('%s hook failed') % hname) + ui.warn(_('warning: %s hook failed\n') % hname) + return r + +def _exthook(ui, repo, name, cmd, args, throw): + ui.note(_("running hook %s: %s\n") % (name, cmd)) + + env = {} + for k, v in args.iteritems(): + if util.safehasattr(v, '__call__'): + v = v() + if isinstance(v, dict): + # make the dictionary element order stable across Python + # implementations + v = ('{' + + ', '.join('%r: %r' % i for i in sorted(v.iteritems())) + + '}') + env['HG_' + k.upper()] = v + + if repo: + cwd = repo.root + else: + cwd = os.getcwd() + if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'): + r = util.system(cmd, environ=env, cwd=cwd, out=ui) + else: + r = util.system(cmd, environ=env, cwd=cwd, out=ui.fout) + if r: + desc, r = util.explainexit(r) + if throw: + raise util.Abort(_('%s hook %s') % (name, desc)) + ui.warn(_('warning: %s hook %s\n') % (name, desc)) + return r + +_redirect = False +def redirect(state): + global _redirect + _redirect = state + +def hook(ui, repo, name, throw=False, **args): + r = False + + oldstdout = -1 + if _redirect: + try: + stdoutno = sys.__stdout__.fileno() + stderrno = sys.__stderr__.fileno() + # temporarily redirect stdout to stderr, if possible + if stdoutno >= 0 and stderrno >= 0: + sys.__stdout__.flush() + oldstdout = os.dup(stdoutno) + os.dup2(stderrno, stdoutno) + except AttributeError: + # __stdout/err__ doesn't have fileno(), it's not a real file + pass + + try: + for hname, cmd in ui.configitems('hooks'): + if hname.split('.')[0] != name or not cmd: + continue + if util.safehasattr(cmd, '__call__'): + r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r + elif cmd.startswith('python:'): + if cmd.count(':') >= 2: + path, cmd = cmd[7:].rsplit(':', 1) + path = util.expandpath(path) + if repo: + path = os.path.join(repo.root, path) + mod = extensions.loadpath(path, 'hghook.%s' % hname) + hookfn = getattr(mod, cmd) + else: + hookfn = cmd[7:].strip() + r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r + else: + r = _exthook(ui, repo, hname, cmd, args, throw) or r + finally: + if _redirect and oldstdout >= 0: + os.dup2(oldstdout, stdoutno) + os.close(oldstdout) + + return r diff --git a/websdk/mercurial/httpclient/__init__.py b/websdk/mercurial/httpclient/__init__.py new file mode 100644 index 0000000..3200edd --- /dev/null +++ b/websdk/mercurial/httpclient/__init__.py @@ -0,0 +1,711 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Improved HTTP/1.1 client library + +This library contains an HTTPConnection which is similar to the one in +httplib, but has several additional features: + + * supports keepalives natively + * uses select() to block for incoming data + * notices when the server responds early to a request + * implements ssl inline instead of in a different class +""" + +import cStringIO +import errno +import httplib +import logging +import rfc822 +import select +import socket + +import socketutil + +logger = logging.getLogger(__name__) + +__all__ = ['HTTPConnection', 'HTTPResponse'] + +HTTP_VER_1_0 = 'HTTP/1.0' +HTTP_VER_1_1 = 'HTTP/1.1' + +_LEN_CLOSE_IS_END = -1 + +OUTGOING_BUFFER_SIZE = 1 << 15 +INCOMING_BUFFER_SIZE = 1 << 20 + +HDR_ACCEPT_ENCODING = 'accept-encoding' +HDR_CONNECTION_CTRL = 'connection' +HDR_CONTENT_LENGTH = 'content-length' +HDR_XFER_ENCODING = 'transfer-encoding' + +XFER_ENCODING_CHUNKED = 'chunked' + +CONNECTION_CLOSE = 'close' + +EOL = '\r\n' +_END_HEADERS = EOL * 2 + +# Based on some searching around, 1 second seems like a reasonable +# default here. +TIMEOUT_ASSUME_CONTINUE = 1 +TIMEOUT_DEFAULT = None + + +class HTTPResponse(object): + """Response from an HTTP server. + + The response will continue to load as available. If you need the + complete response before continuing, check the .complete() method. + """ + def __init__(self, sock, timeout): + self.sock = sock + self.raw_response = '' + self._body = None + self._headers_len = 0 + self._content_len = 0 + self.headers = None + self.will_close = False + self.status_line = '' + self.status = None + self.http_version = None + self.reason = None + self._chunked = False + self._chunked_done = False + self._chunked_until_next = 0 + self._chunked_skip_bytes = 0 + self._chunked_preloaded_block = None + + self._read_location = 0 + self._eol = EOL + + self._timeout = timeout + + @property + def _end_headers(self): + return self._eol * 2 + + def complete(self): + """Returns true if this response is completely loaded. + + Note that if this is a connection where complete means the + socket is closed, this will nearly always return False, even + in cases where all the data has actually been loaded. + """ + if self._chunked: + return self._chunked_done + if self._content_len == _LEN_CLOSE_IS_END: + return False + return self._body is not None and len(self._body) >= self._content_len + + def readline(self): + """Read a single line from the response body. + + This may block until either a line ending is found or the + response is complete. + """ + eol = self._body.find('\n', self._read_location) + while eol == -1 and not self.complete(): + self._select() + eol = self._body.find('\n', self._read_location) + if eol != -1: + eol += 1 + else: + eol = len(self._body) + data = self._body[self._read_location:eol] + self._read_location = eol + return data + + def read(self, length=None): + # if length is None, unbounded read + while (not self.complete() # never select on a finished read + and (not length # unbounded, so we wait for complete() + or (self._read_location + length) > len(self._body))): + self._select() + if not length: + length = len(self._body) - self._read_location + elif len(self._body) < (self._read_location + length): + length = len(self._body) - self._read_location + r = self._body[self._read_location:self._read_location + length] + self._read_location += len(r) + if self.complete() and self.will_close: + self.sock.close() + return r + + def _select(self): + r, _, _ = select.select([self.sock], [], [], self._timeout) + if not r: + # socket was not readable. If the response is not complete + # and we're not a _LEN_CLOSE_IS_END response, raise a timeout. + # If we are a _LEN_CLOSE_IS_END response and we have no data, + # raise a timeout. + if not (self.complete() or + (self._content_len == _LEN_CLOSE_IS_END and self._body)): + logger.info('timed out with timeout of %s', self._timeout) + raise HTTPTimeoutException('timeout reading data') + logger.info('cl: %r body: %r', self._content_len, self._body) + try: + data = self.sock.recv(INCOMING_BUFFER_SIZE) + # If the socket was readable and no data was read, that + # means the socket was closed. If this isn't a + # _CLOSE_IS_END socket, then something is wrong if we're + # here (we shouldn't enter _select() if the response is + # complete), so abort. + if not data and self._content_len != _LEN_CLOSE_IS_END: + raise HTTPRemoteClosedError( + 'server appears to have closed the socket mid-response') + except socket.sslerror, e: + if e.args[0] != socket.SSL_ERROR_WANT_READ: + raise + logger.debug('SSL_WANT_READ in _select, should retry later') + return True + logger.debug('response read %d data during _select', len(data)) + if not data: + if self.headers and self._content_len == _LEN_CLOSE_IS_END: + self._content_len = len(self._body) + return False + else: + self._load_response(data) + return True + + def _chunked_parsedata(self, data): + if self._chunked_preloaded_block: + data = self._chunked_preloaded_block + data + self._chunked_preloaded_block = None + while data: + logger.debug('looping with %d data remaining', len(data)) + # Slice out anything we should skip + if self._chunked_skip_bytes: + if len(data) <= self._chunked_skip_bytes: + self._chunked_skip_bytes -= len(data) + data = '' + break + else: + data = data[self._chunked_skip_bytes:] + self._chunked_skip_bytes = 0 + + # determine how much is until the next chunk + if self._chunked_until_next: + amt = self._chunked_until_next + logger.debug('reading remaining %d of existing chunk', amt) + self._chunked_until_next = 0 + body = data + else: + try: + amt, body = data.split(self._eol, 1) + except ValueError: + self._chunked_preloaded_block = data + logger.debug('saving %r as a preloaded block for chunked', + self._chunked_preloaded_block) + return + amt = int(amt, base=16) + logger.debug('reading chunk of length %d', amt) + if amt == 0: + self._chunked_done = True + + # read through end of what we have or the chunk + self._body += body[:amt] + if len(body) >= amt: + data = body[amt:] + self._chunked_skip_bytes = len(self._eol) + else: + self._chunked_until_next = amt - len(body) + self._chunked_skip_bytes = 0 + data = '' + + def _load_response(self, data): + if self._chunked: + self._chunked_parsedata(data) + return + elif self._body is not None: + self._body += data + return + + # We haven't seen end of headers yet + self.raw_response += data + # This is a bogus server with bad line endings + if self._eol not in self.raw_response: + for bad_eol in ('\n', '\r'): + if (bad_eol in self.raw_response + # verify that bad_eol is not the end of the incoming data + # as this could be a response line that just got + # split between \r and \n. + and (self.raw_response.index(bad_eol) < + (len(self.raw_response) - 1))): + logger.info('bogus line endings detected, ' + 'using %r for EOL', bad_eol) + self._eol = bad_eol + break + # exit early if not at end of headers + if self._end_headers not in self.raw_response or self.headers: + return + + # handle 100-continue response + hdrs, body = self.raw_response.split(self._end_headers, 1) + http_ver, status = hdrs.split(' ', 1) + if status.startswith('100'): + self.raw_response = body + logger.debug('continue seen, setting body to %r', body) + return + + # arriving here means we should parse response headers + # as all headers have arrived completely + hdrs, body = self.raw_response.split(self._end_headers, 1) + del self.raw_response + if self._eol in hdrs: + self.status_line, hdrs = hdrs.split(self._eol, 1) + else: + self.status_line = hdrs + hdrs = '' + # TODO HTTP < 1.0 support + (self.http_version, self.status, + self.reason) = self.status_line.split(' ', 2) + self.status = int(self.status) + if self._eol != EOL: + hdrs = hdrs.replace(self._eol, '\r\n') + headers = rfc822.Message(cStringIO.StringIO(hdrs)) + if HDR_CONTENT_LENGTH in headers: + self._content_len = int(headers[HDR_CONTENT_LENGTH]) + if self.http_version == HTTP_VER_1_0: + self.will_close = True + elif HDR_CONNECTION_CTRL in headers: + self.will_close = ( + headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE) + if self._content_len == 0: + self._content_len = _LEN_CLOSE_IS_END + if (HDR_XFER_ENCODING in headers + and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED): + self._body = '' + self._chunked_parsedata(body) + self._chunked = True + if self._body is None: + self._body = body + self.headers = headers + + +class HTTPConnection(object): + """Connection to a single http server. + + Supports 100-continue and keepalives natively. Uses select() for + non-blocking socket operations. + """ + http_version = HTTP_VER_1_1 + response_class = HTTPResponse + + def __init__(self, host, port=None, use_ssl=None, ssl_validator=None, + timeout=TIMEOUT_DEFAULT, + continue_timeout=TIMEOUT_ASSUME_CONTINUE, + proxy_hostport=None, **ssl_opts): + """Create a new HTTPConnection. + + Args: + host: The host to which we'll connect. + port: Optional. The port over which we'll connect. Default 80 for + non-ssl, 443 for ssl. + use_ssl: Optional. Wether to use ssl. Defaults to False if port is + not 443, true if port is 443. + ssl_validator: a function(socket) to validate the ssl cert + timeout: Optional. Connection timeout, default is TIMEOUT_DEFAULT. + continue_timeout: Optional. Timeout for waiting on an expected + "100 Continue" response. Default is TIMEOUT_ASSUME_CONTINUE. + proxy_hostport: Optional. Tuple of (host, port) to use as an http + proxy for the connection. Default is to not use a proxy. + """ + if port is None and host.count(':') == 1 or ']:' in host: + host, port = host.rsplit(':', 1) + port = int(port) + if '[' in host: + host = host[1:-1] + if use_ssl is None and port is None: + use_ssl = False + port = 80 + elif use_ssl is None: + use_ssl = (port == 443) + elif port is None: + port = (use_ssl and 443 or 80) + self.port = port + if use_ssl and not socketutil.have_ssl: + raise Exception('ssl requested but unavailable on this Python') + self.ssl = use_ssl + self.ssl_opts = ssl_opts + self._ssl_validator = ssl_validator + self.host = host + self.sock = None + self._current_response = None + self._current_response_taken = False + if proxy_hostport is None: + self._proxy_host = self._proxy_port = None + else: + self._proxy_host, self._proxy_port = proxy_hostport + + self.timeout = timeout + self.continue_timeout = continue_timeout + + def _connect(self): + """Connect to the host and port specified in __init__.""" + if self.sock: + return + if self._proxy_host is not None: + logger.info('Connecting to http proxy %s:%s', + self._proxy_host, self._proxy_port) + sock = socketutil.create_connection((self._proxy_host, + self._proxy_port)) + if self.ssl: + # TODO proxy header support + data = self.buildheaders('CONNECT', '%s:%d' % (self.host, + self.port), + {}, HTTP_VER_1_0) + sock.send(data) + sock.setblocking(0) + r = self.response_class(sock, self.timeout) + timeout_exc = HTTPTimeoutException( + 'Timed out waiting for CONNECT response from proxy') + while not r.complete(): + try: + if not r._select(): + raise timeout_exc + except HTTPTimeoutException: + # This raise/except pattern looks goofy, but + # _select can raise the timeout as well as the + # loop body. I wish it wasn't this convoluted, + # but I don't have a better solution + # immediately handy. + raise timeout_exc + if r.status != 200: + raise HTTPProxyConnectFailedException( + 'Proxy connection failed: %d %s' % (r.status, + r.read())) + logger.info('CONNECT (for SSL) to %s:%s via proxy succeeded.', + self.host, self.port) + else: + sock = socketutil.create_connection((self.host, self.port)) + if self.ssl: + logger.debug('wrapping socket for ssl with options %r', + self.ssl_opts) + sock = socketutil.wrap_socket(sock, **self.ssl_opts) + if self._ssl_validator: + self._ssl_validator(sock) + sock.setblocking(0) + self.sock = sock + + def buildheaders(self, method, path, headers, http_ver): + if self.ssl and self.port == 443 or self.port == 80: + # default port for protocol, so leave it out + hdrhost = self.host + else: + # include nonstandard port in header + if ':' in self.host: # must be IPv6 + hdrhost = '[%s]:%d' % (self.host, self.port) + else: + hdrhost = '%s:%d' % (self.host, self.port) + if self._proxy_host and not self.ssl: + # When talking to a regular http proxy we must send the + # full URI, but in all other cases we must not (although + # technically RFC 2616 says servers must accept our + # request if we screw up, experimentally few do that + # correctly.) + assert path[0] == '/', 'path must start with a /' + path = 'http://%s%s' % (hdrhost, path) + outgoing = ['%s %s %s%s' % (method, path, http_ver, EOL)] + headers['host'] = ('Host', hdrhost) + headers[HDR_ACCEPT_ENCODING] = (HDR_ACCEPT_ENCODING, 'identity') + for hdr, val in headers.itervalues(): + outgoing.append('%s: %s%s' % (hdr, val, EOL)) + outgoing.append(EOL) + return ''.join(outgoing) + + def close(self): + """Close the connection to the server. + + This is a no-op if the connection is already closed. The + connection may automatically close if requessted by the server + or required by the nature of a response. + """ + if self.sock is None: + return + self.sock.close() + self.sock = None + logger.info('closed connection to %s on %s', self.host, self.port) + + def busy(self): + """Returns True if this connection object is currently in use. + + If a response is still pending, this will return True, even if + the request has finished sending. In the future, + HTTPConnection may transparently juggle multiple connections + to the server, in which case this will be useful to detect if + any of those connections is ready for use. + """ + cr = self._current_response + if cr is not None: + if self._current_response_taken: + if cr.will_close: + self.sock = None + self._current_response = None + return False + elif cr.complete(): + self._current_response = None + return False + return True + return False + + def request(self, method, path, body=None, headers={}, + expect_continue=False): + """Send a request to the server. + + For increased flexibility, this does not return the response + object. Future versions of HTTPConnection that juggle multiple + sockets will be able to send (for example) 5 requests all at + once, and then let the requests arrive as data is + available. Use the `getresponse()` method to retrieve the + response. + """ + if self.busy(): + raise httplib.CannotSendRequest( + 'Can not send another request before ' + 'current response is read!') + self._current_response_taken = False + + logger.info('sending %s request for %s to %s on port %s', + method, path, self.host, self.port) + hdrs = dict((k.lower(), (k, v)) for k, v in headers.iteritems()) + if hdrs.get('expect', ('', ''))[1].lower() == '100-continue': + expect_continue = True + elif expect_continue: + hdrs['expect'] = ('Expect', '100-Continue') + + chunked = False + if body and HDR_CONTENT_LENGTH not in hdrs: + if getattr(body, '__len__', False): + hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH, len(body)) + elif getattr(body, 'read', False): + hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING, + XFER_ENCODING_CHUNKED) + chunked = True + else: + raise BadRequestData('body has no __len__() nor read()') + + self._connect() + outgoing_headers = self.buildheaders( + method, path, hdrs, self.http_version) + response = None + first = True + + def reconnect(where): + logger.info('reconnecting during %s', where) + self.close() + self._connect() + + while ((outgoing_headers or body) + and not (response and response.complete())): + select_timeout = self.timeout + out = outgoing_headers or body + blocking_on_continue = False + if expect_continue and not outgoing_headers and not ( + response and response.headers): + logger.info( + 'waiting up to %s seconds for' + ' continue response from server', + self.continue_timeout) + select_timeout = self.continue_timeout + blocking_on_continue = True + out = False + if out: + w = [self.sock] + else: + w = [] + r, w, x = select.select([self.sock], w, [], select_timeout) + # if we were expecting a 100 continue and it's been long + # enough, just go ahead and assume it's ok. This is the + # recommended behavior from the RFC. + if r == w == x == []: + if blocking_on_continue: + expect_continue = False + logger.info('no response to continue expectation from ' + 'server, optimistically sending request body') + else: + raise HTTPTimeoutException('timeout sending data') + # TODO exceptional conditions with select? (what are those be?) + # TODO if the response is loading, must we finish sending at all? + # + # Certainly not if it's going to close the connection and/or + # the response is already done...I think. + was_first = first + + # incoming data + if r: + try: + try: + data = r[0].recv(INCOMING_BUFFER_SIZE) + except socket.sslerror, e: + if e.args[0] != socket.SSL_ERROR_WANT_READ: + raise + logger.debug( + 'SSL_WANT_READ while sending data, retrying...') + continue + if not data: + logger.info('socket appears closed in read') + self.sock = None + self._current_response = None + # This if/elif ladder is a bit subtle, + # comments in each branch should help. + if response is not None and ( + response.complete() or + response._content_len == _LEN_CLOSE_IS_END): + # Server responded completely and then + # closed the socket. We should just shut + # things down and let the caller get their + # response. + logger.info('Got an early response, ' + 'aborting remaining request.') + break + elif was_first and response is None: + # Most likely a keepalive that got killed + # on the server's end. Commonly happens + # after getting a really large response + # from the server. + logger.info( + 'Connection appeared closed in read on first' + ' request loop iteration, will retry.') + reconnect('read') + continue + else: + # We didn't just send the first data hunk, + # and either have a partial response or no + # response at all. There's really nothing + # meaningful we can do here. + raise HTTPStateError( + 'Connection appears closed after ' + 'some request data was written, but the ' + 'response was missing or incomplete!') + logger.debug('read %d bytes in request()', len(data)) + if response is None: + response = self.response_class(r[0], self.timeout) + response._load_response(data) + # Jump to the next select() call so we load more + # data if the server is still sending us content. + continue + except socket.error, e: + if e[0] != errno.EPIPE and not was_first: + raise + if (response._content_len + and response._content_len != _LEN_CLOSE_IS_END): + outgoing_headers = sent_data + outgoing_headers + reconnect('read') + + # outgoing data + if w and out: + try: + if getattr(out, 'read', False): + data = out.read(OUTGOING_BUFFER_SIZE) + if not data: + continue + if len(data) < OUTGOING_BUFFER_SIZE: + if chunked: + body = '0' + EOL + EOL + else: + body = None + if chunked: + out = hex(len(data))[2:] + EOL + data + EOL + else: + out = data + amt = w[0].send(out) + except socket.error, e: + if e[0] == socket.SSL_ERROR_WANT_WRITE and self.ssl: + # This means that SSL hasn't flushed its buffer into + # the socket yet. + # TODO: find a way to block on ssl flushing its buffer + # similar to selecting on a raw socket. + continue + elif (e[0] not in (errno.ECONNRESET, errno.EPIPE) + and not first): + raise + reconnect('write') + amt = self.sock.send(out) + logger.debug('sent %d', amt) + first = False + # stash data we think we sent in case the socket breaks + # when we read from it + if was_first: + sent_data = out[:amt] + if out is body: + body = out[amt:] + else: + outgoing_headers = out[amt:] + + # close if the server response said to or responded before eating + # the whole request + if response is None: + response = self.response_class(self.sock, self.timeout) + complete = response.complete() + data_left = bool(outgoing_headers or body) + if data_left: + logger.info('stopped sending request early, ' + 'will close the socket to be safe.') + response.will_close = True + if response.will_close: + # The socket will be closed by the response, so we disown + # the socket + self.sock = None + self._current_response = response + + def getresponse(self): + if self._current_response is None: + raise httplib.ResponseNotReady() + r = self._current_response + while r.headers is None: + r._select() + if r.will_close: + self.sock = None + self._current_response = None + elif r.complete(): + self._current_response = None + else: + self._current_response_taken = True + return r + + +class HTTPTimeoutException(httplib.HTTPException): + """A timeout occurred while waiting on the server.""" + + +class BadRequestData(httplib.HTTPException): + """Request body object has neither __len__ nor read.""" + + +class HTTPProxyConnectFailedException(httplib.HTTPException): + """Connecting to the HTTP proxy failed.""" + + +class HTTPStateError(httplib.HTTPException): + """Invalid internal state encountered.""" + + +class HTTPRemoteClosedError(httplib.HTTPException): + """The server closed the remote socket in the middle of a response.""" +# no-check-code diff --git a/websdk/mercurial/httpclient/socketutil.py b/websdk/mercurial/httpclient/socketutil.py new file mode 100644 index 0000000..6f06a3c --- /dev/null +++ b/websdk/mercurial/httpclient/socketutil.py @@ -0,0 +1,127 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Abstraction to simplify socket use for Python < 2.6 + +This will attempt to use the ssl module and the new +socket.create_connection method, but fall back to the old +methods if those are unavailable. +""" +import logging +import socket + +logger = logging.getLogger(__name__) + +try: + import ssl + ssl.wrap_socket # make demandimporters load the module + have_ssl = True +except ImportError: + import httplib + import urllib2 + have_ssl = getattr(urllib2, 'HTTPSHandler', False) + ssl = False + + +try: + create_connection = socket.create_connection +except AttributeError: + def create_connection(address): + host, port = address + msg = "getaddrinfo returns an empty list" + sock = None + for res in socket.getaddrinfo(host, port, 0, + socket.SOCK_STREAM): + af, socktype, proto, _canonname, sa = res + try: + sock = socket.socket(af, socktype, proto) + logger.info("connect: (%s, %s)", host, port) + sock.connect(sa) + except socket.error, msg: + logger.info('connect fail: %s %s', host, port) + if sock: + sock.close() + sock = None + continue + break + if not sock: + raise socket.error, msg + return sock + +if ssl: + wrap_socket = ssl.wrap_socket + CERT_NONE = ssl.CERT_NONE + CERT_OPTIONAL = ssl.CERT_OPTIONAL + CERT_REQUIRED = ssl.CERT_REQUIRED +else: + class FakeSocket(httplib.FakeSocket): + """Socket wrapper that supports SSL. + """ + # backport the behavior from Python 2.6, which is to busy wait + # on the socket instead of anything nice. Sigh. + # See http://bugs.python.org/issue3890 for more info. + def recv(self, buflen=1024, flags=0): + """ssl-aware wrapper around socket.recv + """ + if flags != 0: + raise ValueError( + "non-zero flags not allowed in calls to recv() on %s" % + self.__class__) + while True: + try: + return self._ssl.read(buflen) + except socket.sslerror, x: + if x.args[0] == socket.SSL_ERROR_WANT_READ: + continue + else: + raise x + + _PROTOCOL_SSLv23 = 2 + + CERT_NONE = 0 + CERT_OPTIONAL = 1 + CERT_REQUIRED = 2 + + def wrap_socket(sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=CERT_NONE, + ssl_version=_PROTOCOL_SSLv23, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True): + if cert_reqs != CERT_NONE and ca_certs: + raise CertificateValidationUnsupported( + 'SSL certificate validation requires the ssl module' + '(included in Python 2.6 and later.)') + sslob = socket.ssl(sock) + # borrow httplib's workaround for no ssl.wrap_socket + sock = FakeSocket(sock, sslob) + return sock + + +class CertificateValidationUnsupported(Exception): + """Exception raised when cert validation is requested but unavailable.""" +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/__init__.py b/websdk/mercurial/httpclient/tests/__init__.py new file mode 100644 index 0000000..84b3a07 --- /dev/null +++ b/websdk/mercurial/httpclient/tests/__init__.py @@ -0,0 +1 @@ +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/simple_http_test.py b/websdk/mercurial/httpclient/tests/simple_http_test.py new file mode 100644 index 0000000..f933d50 --- /dev/null +++ b/websdk/mercurial/httpclient/tests/simple_http_test.py @@ -0,0 +1,401 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import socket +import unittest + +import http + +# relative import to ease embedding the library +import util + + +class SimpleHttpTest(util.HttpTestBase, unittest.TestCase): + + def _run_simple_test(self, host, server_data, expected_req, expected_data): + con = http.HTTPConnection(host) + con._connect() + con.sock.data = server_data + con.request('GET', '/') + + self.assertStringEqual(expected_req, con.sock.sent) + self.assertEqual(expected_data, con.getresponse().read()) + + def test_broken_data_obj(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + self.assertRaises(http.BadRequestData, + con.request, 'POST', '/', body=1) + + def test_no_keepalive_http_1_0(self): + expected_request_one = """GET /remote/.hg/requires HTTP/1.1 +Host: localhost:9999 +range: bytes=0- +accept-encoding: identity +accept: application/mercurial-0.1 +user-agent: mercurial/proto-1.0 + +""".replace('\n', '\r\n') + expected_response_headers = """HTTP/1.0 200 OK +Server: SimpleHTTP/0.6 Python/2.6.1 +Date: Sun, 01 May 2011 13:56:57 GMT +Content-type: application/octet-stream +Content-Length: 33 +Last-Modified: Sun, 01 May 2011 13:56:56 GMT + +""".replace('\n', '\r\n') + expected_response_body = """revlogv1 +store +fncache +dotencode +""" + con = http.HTTPConnection('localhost:9999') + con._connect() + con.sock.data = [expected_response_headers, expected_response_body] + con.request('GET', '/remote/.hg/requires', + headers={'accept-encoding': 'identity', + 'range': 'bytes=0-', + 'accept': 'application/mercurial-0.1', + 'user-agent': 'mercurial/proto-1.0', + }) + self.assertStringEqual(expected_request_one, con.sock.sent) + self.assertEqual(con.sock.closed, False) + self.assertNotEqual(con.sock.data, []) + self.assert_(con.busy()) + resp = con.getresponse() + self.assertStringEqual(resp.read(), expected_response_body) + self.failIf(con.busy()) + self.assertEqual(con.sock, None) + self.assertEqual(resp.sock.data, []) + self.assert_(resp.sock.closed) + + def test_multiline_header(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Multiline: Value\r\n', + ' Rest of value\r\n', + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + resp = con.getresponse() + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['Value\n Rest of value'], + resp.headers.getheaders('multiline')) + # Socket should not be closed + self.assertEqual(resp.sock.closed, False) + self.assertEqual(con.sock.closed, False) + + def testSimpleRequest(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'MultiHeader: Value\r\n' + 'MultiHeader: Other Value\r\n' + 'MultiHeader: One More!\r\n' + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + resp = con.getresponse() + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['Value', 'Other Value', 'One More!'], + resp.headers.getheaders('multiheader')) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) + + def testHeaderlessResponse(self): + con = http.HTTPConnection('1.2.3.4', use_ssl=False) + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + '\r\n' + '1234567890' + ] + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + resp = con.getresponse() + self.assertEqual('1234567890', resp.read()) + self.assertEqual({}, dict(resp.headers)) + self.assertEqual(resp.status, 200) + + def testReadline(self): + con = http.HTTPConnection('1.2.3.4') + con._connect() + # make sure it trickles in one byte at a time + # so that we touch all the cases in readline + con.sock.data = list(''.join( + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Close\r\n', + '\r\n' + '1\n2\nabcdefg\n4\n5'])) + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + con.request('GET', '/') + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + r = con.getresponse() + for expected in ['1\n', '2\n', 'abcdefg\n', '4\n', '5']: + actual = r.readline() + self.assertEqual(expected, actual, + 'Expected %r, got %r' % (expected, actual)) + + def testIPv6(self): + self._run_simple_test('[::1]:8221', + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10', + '\r\n\r\n' + '1234567890'], + ('GET / HTTP/1.1\r\n' + 'Host: [::1]:8221\r\n' + 'accept-encoding: identity\r\n\r\n'), + '1234567890') + self._run_simple_test('::2', + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10', + '\r\n\r\n' + '1234567890'], + ('GET / HTTP/1.1\r\n' + 'Host: ::2\r\n' + 'accept-encoding: identity\r\n\r\n'), + '1234567890') + self._run_simple_test('[::3]:443', + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10', + '\r\n\r\n' + '1234567890'], + ('GET / HTTP/1.1\r\n' + 'Host: ::3\r\n' + 'accept-encoding: identity\r\n\r\n'), + '1234567890') + + def testEarlyContinueResponse(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 403 Forbidden\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 18', + '\r\n\r\n' + "You can't do that."] + expected_req = self.doPost(con, expect_body=False) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertStringEqual(expected_req, sock.sent) + self.assertEqual("You can't do that.", con.getresponse().read()) + self.assertEqual(sock.closed, True) + + def testDeniedAfterContinueTimeoutExpires(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 403 Forbidden\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 18\r\n', + 'Connection: close', + '\r\n\r\n' + "You can't do that."] + sock.read_wait_sentinel = 'Dear server, send response!' + sock.close_on_empty = True + # send enough data out that we'll chunk it into multiple + # blocks and the socket will close before we can send the + # whole request. + post_body = ('This is some POST data\n' * 1024 * 32 + + 'Dear server, send response!\n' + + 'This is some POST data\n' * 1024 * 32) + expected_req = self.doPost(con, expect_body=False, + body_to_send=post_body) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assert_('POST data\n' in sock.sent) + self.assert_('Dear server, send response!\n' in sock.sent) + # We expect not all of our data was sent. + self.assertNotEqual(sock.sent, expected_req) + self.assertEqual("You can't do that.", con.getresponse().read()) + self.assertEqual(sock.closed, True) + + def testPostData(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.read_wait_sentinel = 'POST data' + sock.early_data = ['HTTP/1.1 100 Co', 'ntinue\r\n\r\n'] + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 16', + '\r\n\r\n', + "You can do that."] + expected_req = self.doPost(con, expect_body=True) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertEqual(expected_req, sock.sent) + self.assertEqual("You can do that.", con.getresponse().read()) + self.assertEqual(sock.closed, False) + + def testServerWithoutContinue(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.read_wait_sentinel = 'POST data' + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 16', + '\r\n\r\n', + "You can do that."] + expected_req = self.doPost(con, expect_body=True) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertEqual(expected_req, sock.sent) + self.assertEqual("You can do that.", con.getresponse().read()) + self.assertEqual(sock.closed, False) + + def testServerWithSlowContinue(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.read_wait_sentinel = 'POST data' + sock.data = ['HTTP/1.1 100 ', 'Continue\r\n\r\n', + 'HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 16', + '\r\n\r\n', + "You can do that."] + expected_req = self.doPost(con, expect_body=True) + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertEqual(expected_req, sock.sent) + resp = con.getresponse() + self.assertEqual("You can do that.", resp.read()) + self.assertEqual(200, resp.status) + self.assertEqual(sock.closed, False) + + def testSlowConnection(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + # simulate one byte arriving at a time, to check for various + # corner cases + con.sock.data = list('HTTP/1.1 200 OK\r\n' + 'Server: BogusServer 1.0\r\n' + 'Content-Length: 10' + '\r\n\r\n' + '1234567890') + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + self.assertEqual('1234567890', con.getresponse().read()) + + def testTimeout(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + con.sock.data = [] + con.request('GET', '/') + self.assertRaises(http.HTTPTimeoutException, + con.getresponse) + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + + def test_conn_keep_alive_but_server_close_anyway(self): + sockets = [] + def closingsocket(*args, **kwargs): + s = util.MockSocket(*args, **kwargs) + sockets.append(s) + s.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Keep-Alive\r\n', + 'Content-Length: 16', + '\r\n\r\n', + 'You can do that.'] + s.close_on_empty = True + return s + + socket.socket = closingsocket + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + con.request('GET', '/') + r1 = con.getresponse() + r1.read() + self.assertFalse(con.sock.closed) + self.assert_(con.sock.remote_closed) + con.request('GET', '/') + self.assertEqual(2, len(sockets)) + + def test_server_closes_before_end_of_body(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + s = con.sock + s.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Connection: Keep-Alive\r\n', + 'Content-Length: 16', + '\r\n\r\n', + 'You can '] # Note: this is shorter than content-length + s.close_on_empty = True + con.request('GET', '/') + r1 = con.getresponse() + self.assertRaises(http.HTTPRemoteClosedError, r1.read) + + def test_no_response_raises_response_not_ready(self): + con = http.HTTPConnection('foo') + self.assertRaises(http.httplib.ResponseNotReady, con.getresponse) +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/test_bogus_responses.py b/websdk/mercurial/httpclient/tests/test_bogus_responses.py new file mode 100644 index 0000000..486e770 --- /dev/null +++ b/websdk/mercurial/httpclient/tests/test_bogus_responses.py @@ -0,0 +1,68 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"""Tests against malformed responses. + +Server implementations that respond with only LF instead of CRLF have +been observed. Checking against ones that use only CR is a hedge +against that potential insanit.y +""" +import unittest + +import http + +# relative import to ease embedding the library +import util + + +class SimpleHttpTest(util.HttpTestBase, unittest.TestCase): + + def bogusEOL(self, eol): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + con.sock.data = ['HTTP/1.1 200 OK%s' % eol, + 'Server: BogusServer 1.0%s' % eol, + 'Content-Length: 10', + eol * 2, + '1234567890'] + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 80), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + self.assertEqual('1234567890', con.getresponse().read()) + + def testOnlyLinefeed(self): + self.bogusEOL('\n') + + def testOnlyCarriageReturn(self): + self.bogusEOL('\r') +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/test_chunked_transfer.py b/websdk/mercurial/httpclient/tests/test_chunked_transfer.py new file mode 100644 index 0000000..88244b5 --- /dev/null +++ b/websdk/mercurial/httpclient/tests/test_chunked_transfer.py @@ -0,0 +1,153 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import cStringIO +import unittest + +import http + +# relative import to ease embedding the library +import util + + +def chunkedblock(x, eol='\r\n'): + r"""Make a chunked transfer-encoding block. + + >>> chunkedblock('hi') + '2\r\nhi\r\n' + >>> chunkedblock('hi' * 10) + '14\r\nhihihihihihihihihihi\r\n' + >>> chunkedblock('hi', eol='\n') + '2\nhi\n' + """ + return ''.join((hex(len(x))[2:], eol, x, eol)) + + +class ChunkedTransferTest(util.HttpTestBase, unittest.TestCase): + def testChunkedUpload(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.read_wait_sentinel = '0\r\n\r\n' + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 6', + '\r\n\r\n', + "Thanks"] + + zz = 'zz\n' + con.request('POST', '/', body=cStringIO.StringIO( + (zz * (0x8010 / 3)) + 'end-of-body')) + expected_req = ('POST / HTTP/1.1\r\n' + 'transfer-encoding: chunked\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + expected_req += chunkedblock('zz\n' * (0x8000 / 3) + 'zz') + expected_req += chunkedblock( + '\n' + 'zz\n' * ((0x1b - len('end-of-body')) / 3) + 'end-of-body') + expected_req += '0\r\n\r\n' + self.assertEqual(('1.2.3.4', 80), sock.sa) + self.assertStringEqual(expected_req, sock.sent) + self.assertEqual("Thanks", con.getresponse().read()) + self.assertEqual(sock.closed, False) + + def testChunkedDownload(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'transfer-encoding: chunked', + '\r\n\r\n', + chunkedblock('hi '), + chunkedblock('there'), + chunkedblock(''), + ] + con.request('GET', '/') + self.assertStringEqual('hi there', con.getresponse().read()) + + def testChunkedDownloadBadEOL(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 200 OK\n', + 'Server: BogusServer 1.0\n', + 'transfer-encoding: chunked', + '\n\n', + chunkedblock('hi ', eol='\n'), + chunkedblock('there', eol='\n'), + chunkedblock('', eol='\n'), + ] + con.request('GET', '/') + self.assertStringEqual('hi there', con.getresponse().read()) + + def testChunkedDownloadPartialChunkBadEOL(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 200 OK\n', + 'Server: BogusServer 1.0\n', + 'transfer-encoding: chunked', + '\n\n', + chunkedblock('hi ', eol='\n'), + ] + list(chunkedblock('there\n' * 5, eol='\n')) + [ + chunkedblock('', eol='\n')] + con.request('GET', '/') + self.assertStringEqual('hi there\nthere\nthere\nthere\nthere\n', + con.getresponse().read()) + + def testChunkedDownloadPartialChunk(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'transfer-encoding: chunked', + '\r\n\r\n', + chunkedblock('hi '), + ] + list(chunkedblock('there\n' * 5)) + [chunkedblock('')] + con.request('GET', '/') + self.assertStringEqual('hi there\nthere\nthere\nthere\nthere\n', + con.getresponse().read()) + + def testChunkedDownloadEarlyHangup(self): + con = http.HTTPConnection('1.2.3.4:80') + con._connect() + sock = con.sock + broken = chunkedblock('hi'*20)[:-1] + sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'transfer-encoding: chunked', + '\r\n\r\n', + broken, + ] + sock.close_on_empty = True + con.request('GET', '/') + resp = con.getresponse() + self.assertRaises(http.HTTPRemoteClosedError, resp.read) +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/test_proxy_support.py b/websdk/mercurial/httpclient/tests/test_proxy_support.py new file mode 100644 index 0000000..1526a9a --- /dev/null +++ b/websdk/mercurial/httpclient/tests/test_proxy_support.py @@ -0,0 +1,135 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import unittest +import socket + +import http + +# relative import to ease embedding the library +import util + + +def make_preloaded_socket(data): + """Make a socket pre-loaded with data so it can be read during connect. + + Useful for https proxy tests because we have to read from the + socket during _connect rather than later on. + """ + def s(*args, **kwargs): + sock = util.MockSocket(*args, **kwargs) + sock.early_data = data[:] + return sock + return s + + +class ProxyHttpTest(util.HttpTestBase, unittest.TestCase): + + def _run_simple_test(self, host, server_data, expected_req, expected_data): + con = http.HTTPConnection(host) + con._connect() + con.sock.data = server_data + con.request('GET', '/') + + self.assertEqual(expected_req, con.sock.sent) + self.assertEqual(expected_data, con.getresponse().read()) + + def testSimpleRequest(self): + con = http.HTTPConnection('1.2.3.4:80', + proxy_hostport=('magicproxy', 4242)) + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'MultiHeader: Value\r\n' + 'MultiHeader: Other Value\r\n' + 'MultiHeader: One More!\r\n' + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + con.request('GET', '/') + + expected_req = ('GET http://1.2.3.4/ HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('127.0.0.42', 4242), con.sock.sa) + self.assertStringEqual(expected_req, con.sock.sent) + resp = con.getresponse() + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['Value', 'Other Value', 'One More!'], + resp.headers.getheaders('multiheader')) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) + + def testSSLRequest(self): + con = http.HTTPConnection('1.2.3.4:443', + proxy_hostport=('magicproxy', 4242)) + socket.socket = make_preloaded_socket( + ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10\r\n', + '\r\n' + '1234567890']) + con._connect() + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + connect_sent = con.sock.sent + con.sock.sent = '' + con.request('GET', '/') + + expected_connect = ('CONNECT 1.2.3.4:443 HTTP/1.0\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n' + '\r\n') + expected_request = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('127.0.0.42', 4242), con.sock.sa) + self.assertStringEqual(expected_connect, connect_sent) + self.assertStringEqual(expected_request, con.sock.sent) + resp = con.getresponse() + self.assertEqual(resp.status, 200) + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) + + def testSSLProxyFailure(self): + con = http.HTTPConnection('1.2.3.4:443', + proxy_hostport=('magicproxy', 4242)) + socket.socket = make_preloaded_socket( + ['HTTP/1.1 407 Proxy Authentication Required\r\n\r\n']) + self.assertRaises(http.HTTPProxyConnectFailedException, con._connect) + self.assertRaises(http.HTTPProxyConnectFailedException, + con.request, 'GET', '/') +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/test_ssl.py b/websdk/mercurial/httpclient/tests/test_ssl.py new file mode 100644 index 0000000..5799a8f --- /dev/null +++ b/websdk/mercurial/httpclient/tests/test_ssl.py @@ -0,0 +1,93 @@ +# Copyright 2011, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import unittest + +import http + +# relative import to ease embedding the library +import util + + + +class HttpSslTest(util.HttpTestBase, unittest.TestCase): + def testSslRereadRequired(self): + con = http.HTTPConnection('1.2.3.4:443') + con._connect() + # extend the list instead of assign because of how + # MockSSLSocket works. + con.sock.data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'MultiHeader: Value\r\n' + 'MultiHeader: Other Value\r\n' + 'MultiHeader: One More!\r\n' + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + con.request('GET', '/') + + expected_req = ('GET / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'accept-encoding: identity\r\n\r\n') + + self.assertEqual(('1.2.3.4', 443), con.sock.sa) + self.assertEqual(expected_req, con.sock.sent) + resp = con.getresponse() + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['Value', 'Other Value', 'One More!'], + resp.headers.getheaders('multiheader')) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) + + def testSslRereadInEarlyResponse(self): + con = http.HTTPConnection('1.2.3.4:443') + con._connect() + con.sock.early_data = ['HTTP/1.1 200 OK\r\n', + 'Server: BogusServer 1.0\r\n', + 'MultiHeader: Value\r\n' + 'MultiHeader: Other Value\r\n' + 'MultiHeader: One More!\r\n' + 'Content-Length: 10\r\n', + '\r\n' + '1234567890' + ] + + expected_req = self.doPost(con, False) + self.assertEqual(None, con.sock, + 'Connection should have disowned socket') + + resp = con.getresponse() + self.assertEqual(('1.2.3.4', 443), resp.sock.sa) + self.assertEqual(expected_req, resp.sock.sent) + self.assertEqual('1234567890', resp.read()) + self.assertEqual(['Value', 'Other Value', 'One More!'], + resp.headers.getheaders('multiheader')) + self.assertEqual(['BogusServer 1.0'], + resp.headers.getheaders('server')) +# no-check-code diff --git a/websdk/mercurial/httpclient/tests/util.py b/websdk/mercurial/httpclient/tests/util.py new file mode 100644 index 0000000..bbc3d87 --- /dev/null +++ b/websdk/mercurial/httpclient/tests/util.py @@ -0,0 +1,195 @@ +# Copyright 2010, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import difflib +import socket + +import http + + +class MockSocket(object): + """Mock non-blocking socket object. + + This is ONLY capable of mocking a nonblocking socket. + + Attributes: + early_data: data to always send as soon as end of headers is seen + data: a list of strings to return on recv(), with the + assumption that the socket would block between each + string in the list. + read_wait_sentinel: data that must be written to the socket before + beginning the response. + close_on_empty: If true, close the socket when it runs out of data + for the client. + """ + def __init__(self, af, socktype, proto): + self.af = af + self.socktype = socktype + self.proto = proto + + self.early_data = [] + self.data = [] + self.remote_closed = self.closed = False + self.close_on_empty = False + self.sent = '' + self.read_wait_sentinel = http._END_HEADERS + + def close(self): + self.closed = True + + def connect(self, sa): + self.sa = sa + + def setblocking(self, timeout): + assert timeout == 0 + + def recv(self, amt=-1): + if self.early_data: + datalist = self.early_data + elif not self.data: + return '' + else: + datalist = self.data + if amt == -1: + return datalist.pop(0) + data = datalist.pop(0) + if len(data) > amt: + datalist.insert(0, data[amt:]) + if not self.data and not self.early_data and self.close_on_empty: + self.remote_closed = True + return data[:amt] + + @property + def ready_for_read(self): + return ((self.early_data and http._END_HEADERS in self.sent) + or (self.read_wait_sentinel in self.sent and self.data) + or self.closed or self.remote_closed) + + def send(self, data): + # this is a horrible mock, but nothing needs us to raise the + # correct exception yet + assert not self.closed, 'attempted to write to a closed socket' + assert not self.remote_closed, ('attempted to write to a' + ' socket closed by the server') + if len(data) > 8192: + data = data[:8192] + self.sent += data + return len(data) + + +def mockselect(r, w, x, timeout=0): + """Simple mock for select() + """ + readable = filter(lambda s: s.ready_for_read, r) + return readable, w[:], [] + + +class MockSSLSocket(object): + def __init__(self, sock): + self._sock = sock + self._fail_recv = True + + def __getattr__(self, key): + return getattr(self._sock, key) + + def __setattr__(self, key, value): + if key not in ('_sock', '_fail_recv'): + return setattr(self._sock, key, value) + return object.__setattr__(self, key, value) + + def recv(self, amt=-1): + try: + if self._fail_recv: + raise socket.sslerror(socket.SSL_ERROR_WANT_READ) + return self._sock.recv(amt=amt) + finally: + self._fail_recv = not self._fail_recv + + +def mocksslwrap(sock, keyfile=None, certfile=None, + server_side=False, cert_reqs=http.socketutil.CERT_NONE, + ssl_version=None, ca_certs=None, + do_handshake_on_connect=True, + suppress_ragged_eofs=True): + return MockSSLSocket(sock) + + +def mockgetaddrinfo(host, port, unused, streamtype): + assert unused == 0 + assert streamtype == socket.SOCK_STREAM + if host.count('.') != 3: + host = '127.0.0.42' + return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP, '', + (host, port))] + + +class HttpTestBase(object): + def setUp(self): + self.orig_socket = socket.socket + socket.socket = MockSocket + + self.orig_getaddrinfo = socket.getaddrinfo + socket.getaddrinfo = mockgetaddrinfo + + self.orig_select = http.select.select + http.select.select = mockselect + + self.orig_sslwrap = http.socketutil.wrap_socket + http.socketutil.wrap_socket = mocksslwrap + + def tearDown(self): + socket.socket = self.orig_socket + http.select.select = self.orig_select + http.socketutil.wrap_socket = self.orig_sslwrap + socket.getaddrinfo = self.orig_getaddrinfo + + def assertStringEqual(self, l, r): + try: + self.assertEqual(l, r, ('failed string equality check, ' + 'see stdout for details')) + except: + add_nl = lambda li: map(lambda x: x + '\n', li) + print 'failed expectation:' + print ''.join(difflib.unified_diff( + add_nl(l.splitlines()), add_nl(r.splitlines()), + fromfile='expected', tofile='got')) + raise + + def doPost(self, con, expect_body, body_to_send='This is some POST data'): + con.request('POST', '/', body=body_to_send, + expect_continue=True) + expected_req = ('POST / HTTP/1.1\r\n' + 'Host: 1.2.3.4\r\n' + 'content-length: %d\r\n' + 'Expect: 100-Continue\r\n' + 'accept-encoding: identity\r\n\r\n' % + len(body_to_send)) + if expect_body: + expected_req += body_to_send + return expected_req +# no-check-code diff --git a/websdk/mercurial/httpconnection.py b/websdk/mercurial/httpconnection.py new file mode 100644 index 0000000..c5fb058 --- /dev/null +++ b/websdk/mercurial/httpconnection.py @@ -0,0 +1,284 @@ +# httpconnection.py - urllib2 handler for new http support +# +# Copyright 2005, 2006, 2007, 2008 Matt Mackall +# Copyright 2006, 2007 Alexis S. L. Carvalho +# Copyright 2006 Vadim Gelfer +# Copyright 2011 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +import logging +import socket +import urllib +import urllib2 +import os + +from mercurial import httpclient +from mercurial import sslutil +from mercurial import util +from mercurial.i18n import _ + +# moved here from url.py to avoid a cycle +class httpsendfile(object): + """This is a wrapper around the objects returned by python's "open". + + Its purpose is to send file-like objects via HTTP. + It do however not define a __len__ attribute because the length + might be more than Py_ssize_t can handle. + """ + + def __init__(self, ui, *args, **kwargs): + # We can't just "self._data = open(*args, **kwargs)" here because there + # is an "open" function defined in this module that shadows the global + # one + self.ui = ui + self._data = open(*args, **kwargs) + self.seek = self._data.seek + self.close = self._data.close + self.write = self._data.write + self.length = os.fstat(self._data.fileno()).st_size + self._pos = 0 + self._total = self.length / 1024 * 2 + + def read(self, *args, **kwargs): + try: + ret = self._data.read(*args, **kwargs) + except EOFError: + self.ui.progress(_('sending'), None) + self._pos += len(ret) + # We pass double the max for total because we currently have + # to send the bundle twice in the case of a server that + # requires authentication. Since we can't know until we try + # once whether authentication will be required, just lie to + # the user and maybe the push succeeds suddenly at 50%. + self.ui.progress(_('sending'), self._pos / 1024, + unit=_('kb'), total=self._total) + return ret + +# moved here from url.py to avoid a cycle +def readauthforuri(ui, uri, user): + # Read configuration + config = dict() + for key, val in ui.configitems('auth'): + if '.' not in key: + ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) + continue + group, setting = key.rsplit('.', 1) + gdict = config.setdefault(group, dict()) + if setting in ('username', 'cert', 'key'): + val = util.expandpath(val) + gdict[setting] = val + + # Find the best match + if '://' in uri: + scheme, hostpath = uri.split('://', 1) + else: + # py2.4.1 doesn't provide the full URI + scheme, hostpath = 'http', uri + bestuser = None + bestlen = 0 + bestauth = None + for group, auth in config.iteritems(): + if user and user != auth.get('username', user): + # If a username was set in the URI, the entry username + # must either match it or be unset + continue + prefix = auth.get('prefix') + if not prefix: + continue + p = prefix.split('://', 1) + if len(p) > 1: + schemes, prefix = [p[0]], p[1] + else: + schemes = (auth.get('schemes') or 'https').split() + if (prefix == '*' or hostpath.startswith(prefix)) and \ + (len(prefix) > bestlen or (len(prefix) == bestlen and \ + not bestuser and 'username' in auth)) \ + and scheme in schemes: + bestlen = len(prefix) + bestauth = group, auth + bestuser = auth.get('username') + if user and not bestuser: + auth['username'] = user + return bestauth + +# Mercurial (at least until we can remove the old codepath) requires +# that the http response object be sufficiently file-like, so we +# provide a close() method here. +class HTTPResponse(httpclient.HTTPResponse): + def close(self): + pass + +class HTTPConnection(httpclient.HTTPConnection): + response_class = HTTPResponse + def request(self, method, uri, body=None, headers={}): + if isinstance(body, httpsendfile): + body.seek(0) + httpclient.HTTPConnection.request(self, method, uri, body=body, + headers=headers) + + +_configuredlogging = False +LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s' +# Subclass BOTH of these because otherwise urllib2 "helpfully" +# reinserts them since it notices we don't include any subclasses of +# them. +class http2handler(urllib2.HTTPHandler, urllib2.HTTPSHandler): + def __init__(self, ui, pwmgr): + global _configuredlogging + urllib2.AbstractHTTPHandler.__init__(self) + self.ui = ui + self.pwmgr = pwmgr + self._connections = {} + loglevel = ui.config('ui', 'http2debuglevel', default=None) + if loglevel and not _configuredlogging: + _configuredlogging = True + logger = logging.getLogger('mercurial.httpclient') + logger.setLevel(getattr(logging, loglevel.upper())) + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter(LOGFMT)) + logger.addHandler(handler) + + def close_all(self): + """Close and remove all connection objects being kept for reuse.""" + for openconns in self._connections.values(): + for conn in openconns: + conn.close() + self._connections = {} + + # shamelessly borrowed from urllib2.AbstractHTTPHandler + def do_open(self, http_class, req, use_ssl): + """Return an addinfourl object for the request, using http_class. + + http_class must implement the HTTPConnection API from httplib. + The addinfourl return value is a file-like object. It also + has methods and attributes including: + - info(): return a mimetools.Message object for the headers + - geturl(): return the original request URL + - code: HTTP status code + """ + # If using a proxy, the host returned by get_host() is + # actually the proxy. On Python 2.6.1, the real destination + # hostname is encoded in the URI in the urllib2 request + # object. On Python 2.6.5, it's stored in the _tunnel_host + # attribute which has no accessor. + tunhost = getattr(req, '_tunnel_host', None) + host = req.get_host() + if tunhost: + proxyhost = host + host = tunhost + elif req.has_proxy(): + proxyhost = req.get_host() + host = req.get_selector().split('://', 1)[1].split('/', 1)[0] + else: + proxyhost = None + + if proxyhost: + if ':' in proxyhost: + # Note: this means we'll explode if we try and use an + # IPv6 http proxy. This isn't a regression, so we + # won't worry about it for now. + proxyhost, proxyport = proxyhost.rsplit(':', 1) + else: + proxyport = 3128 # squid default + proxy = (proxyhost, proxyport) + else: + proxy = None + + if not host: + raise urllib2.URLError('no host given') + + connkey = use_ssl, host, proxy + allconns = self._connections.get(connkey, []) + conns = [c for c in allconns if not c.busy()] + if conns: + h = conns[0] + else: + if allconns: + self.ui.debug('all connections for %s busy, making a new ' + 'one\n' % host) + timeout = None + if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: + timeout = req.timeout + h = http_class(host, timeout=timeout, proxy_hostport=proxy) + self._connections.setdefault(connkey, []).append(h) + + headers = dict(req.headers) + headers.update(req.unredirected_hdrs) + headers = dict( + (name.title(), val) for name, val in headers.items()) + try: + path = req.get_selector() + if '://' in path: + path = path.split('://', 1)[1].split('/', 1)[1] + if path[0] != '/': + path = '/' + path + h.request(req.get_method(), path, req.data, headers) + r = h.getresponse() + except socket.error, err: # XXX what error? + raise urllib2.URLError(err) + + # Pick apart the HTTPResponse object to get the addinfourl + # object initialized properly. + r.recv = r.read + + resp = urllib.addinfourl(r, r.headers, req.get_full_url()) + resp.code = r.status + resp.msg = r.reason + return resp + + # httplib always uses the given host/port as the socket connect + # target, and then allows full URIs in the request path, which it + # then observes and treats as a signal to do proxying instead. + def http_open(self, req): + if req.get_full_url().startswith('https'): + return self.https_open(req) + return self.do_open(HTTPConnection, req, False) + + def https_open(self, req): + # req.get_full_url() does not contain credentials and we may + # need them to match the certificates. + url = req.get_full_url() + user, password = self.pwmgr.find_stored_password(url) + res = readauthforuri(self.ui, url, user) + if res: + group, auth = res + self.auth = auth + self.ui.debug("using auth.%s.* for authentication\n" % group) + else: + self.auth = None + return self.do_open(self._makesslconnection, req, True) + + def _makesslconnection(self, host, port=443, *args, **kwargs): + keyfile = None + certfile = None + + if args: # key_file + keyfile = args.pop(0) + if args: # cert_file + certfile = args.pop(0) + + # if the user has specified different key/cert files in + # hgrc, we prefer these + if self.auth and 'key' in self.auth and 'cert' in self.auth: + keyfile = self.auth['key'] + certfile = self.auth['cert'] + + # let host port take precedence + if ':' in host and '[' not in host or ']:' in host: + host, port = host.rsplit(':', 1) + port = int(port) + if '[' in host: + host = host[1:-1] + + if keyfile: + kwargs['keyfile'] = keyfile + if certfile: + kwargs['certfile'] = certfile + + kwargs.update(sslutil.sslkwargs(self.ui, host)) + + con = HTTPConnection(host, port, use_ssl=True, + ssl_validator=sslutil.validator(self.ui, host), + **kwargs) + return con diff --git a/websdk/mercurial/httprepo.py b/websdk/mercurial/httprepo.py index ed742e6..9ee2eae 120000..100644 --- a/websdk/mercurial/httprepo.py +++ b/websdk/mercurial/httprepo.py @@ -1 +1,247 @@ -/usr/share/pyshared/mercurial/httprepo.py \ No newline at end of file +# httprepo.py - HTTP repository proxy classes for mercurial +# +# Copyright 2005, 2006 Matt Mackall +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid +from i18n import _ +import changegroup, statichttprepo, error, httpconnection, url, util, wireproto +import os, urllib, urllib2, zlib, httplib +import errno, socket + +def zgenerator(f): + zd = zlib.decompressobj() + try: + for chunk in util.filechunkiter(f): + while chunk: + yield zd.decompress(chunk, 2**18) + chunk = zd.unconsumed_tail + except httplib.HTTPException: + raise IOError(None, _('connection ended unexpectedly')) + yield zd.flush() + +class httprepository(wireproto.wirerepository): + def __init__(self, ui, path): + self.path = path + self.caps = None + self.handler = None + self.urlopener = None + u = util.url(path) + if u.query or u.fragment: + raise util.Abort(_('unsupported URL component: "%s"') % + (u.query or u.fragment)) + + # urllib cannot handle URLs with embedded user or passwd + self._url, authinfo = u.authinfo() + + self.ui = ui + self.ui.debug('using %s\n' % self._url) + + self.urlopener = url.opener(ui, authinfo) + + def __del__(self): + if self.urlopener: + for h in self.urlopener.handlers: + h.close() + getattr(h, "close_all", lambda : None)() + + def url(self): + return self.path + + # look up capabilities only when needed + + def _fetchcaps(self): + self.caps = set(self._call('capabilities').split()) + + def get_caps(self): + if self.caps is None: + try: + self._fetchcaps() + except error.RepoError: + self.caps = set() + self.ui.debug('capabilities: %s\n' % + (' '.join(self.caps or ['none']))) + return self.caps + + capabilities = property(get_caps) + + def lock(self): + raise util.Abort(_('operation not supported over http')) + + def _callstream(self, cmd, **args): + if cmd == 'pushkey': + args['data'] = '' + data = args.pop('data', None) + size = 0 + if util.safehasattr(data, 'length'): + size = data.length + elif data is not None: + size = len(data) + headers = args.pop('headers', {}) + + if size and self.ui.configbool('ui', 'usehttp2', False): + headers['Expect'] = '100-Continue' + headers['X-HgHttp2'] = '1' + + self.ui.debug("sending %s command\n" % cmd) + q = [('cmd', cmd)] + headersize = 0 + if len(args) > 0: + httpheader = self.capable('httpheader') + if httpheader: + headersize = int(httpheader.split(',')[0]) + if headersize > 0: + # The headers can typically carry more data than the URL. + encargs = urllib.urlencode(sorted(args.items())) + headerfmt = 'X-HgArg-%s' + contentlen = headersize - len(headerfmt % '000' + ': \r\n') + headernum = 0 + for i in xrange(0, len(encargs), contentlen): + headernum += 1 + header = headerfmt % str(headernum) + headers[header] = encargs[i:i + contentlen] + varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)] + headers['Vary'] = ','.join(varyheaders) + else: + q += sorted(args.items()) + qs = '?%s' % urllib.urlencode(q) + cu = "%s%s" % (self._url, qs) + req = urllib2.Request(cu, data, headers) + if data is not None: + self.ui.debug("sending %s bytes\n" % size) + req.add_unredirected_header('Content-Length', '%d' % size) + try: + resp = self.urlopener.open(req) + except urllib2.HTTPError, inst: + if inst.code == 401: + raise util.Abort(_('authorization failed')) + raise + except httplib.HTTPException, inst: + self.ui.debug('http error while sending %s command\n' % cmd) + self.ui.traceback() + raise IOError(None, inst) + except IndexError: + # this only happens with Python 2.3, later versions raise URLError + raise util.Abort(_('http error, possibly caused by proxy setting')) + # record the url we got redirected to + resp_url = resp.geturl() + if resp_url.endswith(qs): + resp_url = resp_url[:-len(qs)] + if self._url.rstrip('/') != resp_url.rstrip('/'): + if not self.ui.quiet: + self.ui.warn(_('real URL is %s\n') % resp_url) + self._url = resp_url + try: + proto = resp.getheader('content-type') + except AttributeError: + proto = resp.headers.get('content-type', '') + + safeurl = util.hidepassword(self._url) + if proto.startswith('application/hg-error'): + raise error.OutOfBandError(resp.read()) + # accept old "text/plain" and "application/hg-changegroup" for now + if not (proto.startswith('application/mercurial-') or + proto.startswith('text/plain') or + proto.startswith('application/hg-changegroup')): + self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu)) + raise error.RepoError( + _("'%s' does not appear to be an hg repository:\n" + "---%%<--- (%s)\n%s\n---%%<---\n") + % (safeurl, proto or 'no content-type', resp.read())) + + if proto.startswith('application/mercurial-'): + try: + version = proto.split('-', 1)[1] + version_info = tuple([int(n) for n in version.split('.')]) + except ValueError: + raise error.RepoError(_("'%s' sent a broken Content-Type " + "header (%s)") % (safeurl, proto)) + if version_info > (0, 1): + raise error.RepoError(_("'%s' uses newer protocol %s") % + (safeurl, version)) + + return resp + + def _call(self, cmd, **args): + fp = self._callstream(cmd, **args) + try: + return fp.read() + finally: + # if using keepalive, allow connection to be reused + fp.close() + + def _callpush(self, cmd, cg, **args): + # have to stream bundle to a temp file because we do not have + # http 1.1 chunked transfer. + + types = self.capable('unbundle') + try: + types = types.split(',') + except AttributeError: + # servers older than d1b16a746db6 will send 'unbundle' as a + # boolean capability. They only support headerless/uncompressed + # bundles. + types = [""] + for x in types: + if x in changegroup.bundletypes: + type = x + break + + tempname = changegroup.writebundle(cg, None, type) + fp = httpconnection.httpsendfile(self.ui, tempname, "rb") + headers = {'Content-Type': 'application/mercurial-0.1'} + + try: + try: + r = self._call(cmd, data=fp, headers=headers, **args) + vals = r.split('\n', 1) + if len(vals) < 2: + raise error.ResponseError(_("unexpected response:"), r) + return vals + except socket.error, err: + if err.args[0] in (errno.ECONNRESET, errno.EPIPE): + raise util.Abort(_('push failed: %s') % err.args[1]) + raise util.Abort(err.args[1]) + finally: + fp.close() + os.unlink(tempname) + + def _abort(self, exception): + raise exception + + def _decompress(self, stream): + return util.chunkbuffer(zgenerator(stream)) + +class httpsrepository(httprepository): + def __init__(self, ui, path): + if not url.has_https: + raise util.Abort(_('Python support for SSL and HTTPS ' + 'is not installed')) + httprepository.__init__(self, ui, path) + +def instance(ui, path, create): + if create: + raise util.Abort(_('cannot create new http repository')) + try: + if path.startswith('https:'): + inst = httpsrepository(ui, path) + else: + inst = httprepository(ui, path) + try: + # Try to do useful work when checking compatibility. + # Usually saves a roundtrip since we want the caps anyway. + inst._fetchcaps() + except error.RepoError: + # No luck, try older compatibility check. + inst.between([(nullid, nullid)]) + return inst + except error.RepoError, httpexception: + try: + r = statichttprepo.instance(ui, "static-" + path, create) + ui.note('(falling back to static-http)\n') + return r + except error.RepoError: + raise httpexception # use the original http RepoError instead diff --git a/websdk/mercurial/i18n.py b/websdk/mercurial/i18n.py index 6a9a8f0..b06a02e 120000..100644 --- a/websdk/mercurial/i18n.py +++ b/websdk/mercurial/i18n.py @@ -1 +1,63 @@ -/usr/share/pyshared/mercurial/i18n.py \ No newline at end of file +# i18n.py - internationalization support for mercurial +# +# Copyright 2005, 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import encoding +import gettext, sys, os + +# modelled after templater.templatepath: +if getattr(sys, 'frozen', None) is not None: + module = sys.executable +else: + module = __file__ + +base = os.path.dirname(module) +for dir in ('.', '..'): + localedir = os.path.join(base, dir, 'locale') + if os.path.isdir(localedir): + break + +t = gettext.translation('hg', localedir, fallback=True) + +def gettext(message): + """Translate message. + + The message is looked up in the catalog to get a Unicode string, + which is encoded in the local encoding before being returned. + + Important: message is restricted to characters in the encoding + given by sys.getdefaultencoding() which is most likely 'ascii'. + """ + # If message is None, t.ugettext will return u'None' as the + # translation whereas our callers expect us to return None. + if message is None: + return message + + paragraphs = message.split('\n\n') + # Be careful not to translate the empty string -- it holds the + # meta data of the .po file. + u = u'\n\n'.join([p and t.ugettext(p) or '' for p in paragraphs]) + try: + # encoding.tolocal cannot be used since it will first try to + # decode the Unicode string. Calling u.decode(enc) really + # means u.encode(sys.getdefaultencoding()).decode(enc). Since + # the Python encoding defaults to 'ascii', this fails if the + # translated string use non-ASCII characters. + return u.encode(encoding.encoding, "replace") + except LookupError: + # An unknown encoding results in a LookupError. + return message + +def _plain(): + if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ: + return False + exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',') + return 'i18n' not in exceptions + +if _plain(): + _ = lambda message: message +else: + _ = gettext diff --git a/websdk/mercurial/ignore.py b/websdk/mercurial/ignore.py index 5920262..9c2e9cf 120000..100644 --- a/websdk/mercurial/ignore.py +++ b/websdk/mercurial/ignore.py @@ -1 +1,105 @@ -/usr/share/pyshared/mercurial/ignore.py \ No newline at end of file +# ignore.py - ignored file handling for mercurial +# +# Copyright 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util, match +import re + +_commentre = None + +def ignorepats(lines): + '''parse lines (iterable) of .hgignore text, returning a tuple of + (patterns, parse errors). These patterns should be given to compile() + to be validated and converted into a match function.''' + syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'} + syntax = 'relre:' + patterns = [] + warnings = [] + + for line in lines: + if "#" in line: + global _commentre + if not _commentre: + _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*') + # remove comments prefixed by an even number of escapes + line = _commentre.sub(r'\1', line) + # fixup properly escaped comments that survived the above + line = line.replace("\\#", "#") + line = line.rstrip() + if not line: + continue + + if line.startswith('syntax:'): + s = line[7:].strip() + try: + syntax = syntaxes[s] + except KeyError: + warnings.append(_("ignoring invalid syntax '%s'") % s) + continue + pat = syntax + line + for s, rels in syntaxes.iteritems(): + if line.startswith(rels): + pat = line + break + elif line.startswith(s+':'): + pat = rels + line[len(s)+1:] + break + patterns.append(pat) + + return patterns, warnings + +def ignore(root, files, warn): + '''return matcher covering patterns in 'files'. + + the files parsed for patterns include: + .hgignore in the repository root + any additional files specified in the [ui] section of ~/.hgrc + + trailing white space is dropped. + the escape character is backslash. + comments start with #. + empty lines are skipped. + + lines can be of the following formats: + + syntax: regexp # defaults following lines to non-rooted regexps + syntax: glob # defaults following lines to non-rooted globs + re:pattern # non-rooted regular expression + glob:pattern # non-rooted glob + pattern # pattern of the current default type''' + + pats = {} + for f in files: + try: + pats[f] = [] + fp = open(f) + pats[f], warnings = ignorepats(fp) + fp.close() + for warning in warnings: + warn("%s: %s\n" % (f, warning)) + except IOError, inst: + if f != files[0]: + warn(_("skipping unreadable ignore file '%s': %s\n") % + (f, inst.strerror)) + + allpats = [] + for patlist in pats.values(): + allpats.extend(patlist) + if not allpats: + return util.never + + try: + ignorefunc = match.match(root, '', [], allpats) + except util.Abort: + # Re-raise an exception where the src is the right file + for f, patlist in pats.iteritems(): + try: + match.match(root, '', [], patlist) + except util.Abort, inst: + raise util.Abort('%s: %s' % (f, inst[0])) + + return ignorefunc diff --git a/websdk/mercurial/keepalive.py b/websdk/mercurial/keepalive.py index e159a6f..0402dfb 120000..100644 --- a/websdk/mercurial/keepalive.py +++ b/websdk/mercurial/keepalive.py @@ -1 +1,766 @@ -/usr/share/pyshared/mercurial/keepalive.py \ No newline at end of file +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA + +# This file is part of urlgrabber, a high-level cross-protocol url-grabber +# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko + +# Modified by Benoit Boissinot: +# - fix for digest auth (inspired from urllib2.py @ Python v2.4) +# Modified by Dirkjan Ochtman: +# - import md5 function from a local util module +# Modified by Martin Geisler: +# - moved md5 function from local util module to this module +# Modified by Augie Fackler: +# - add safesend method and use it to prevent broken pipe errors +# on large POST requests + +"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive. + +>>> import urllib2 +>>> from keepalive import HTTPHandler +>>> keepalive_handler = HTTPHandler() +>>> opener = urllib2.build_opener(keepalive_handler) +>>> urllib2.install_opener(opener) +>>> +>>> fo = urllib2.urlopen('http://www.python.org') + +If a connection to a given host is requested, and all of the existing +connections are still in use, another connection will be opened. If +the handler tries to use an existing connection but it fails in some +way, it will be closed and removed from the pool. + +To remove the handler, simply re-run build_opener with no arguments, and +install that opener. + +You can explicitly close connections by using the close_connection() +method of the returned file-like object (described below) or you can +use the handler methods: + + close_connection(host) + close_all() + open_connections() + +NOTE: using the close_connection and close_all methods of the handler +should be done with care when using multiple threads. + * there is nothing that prevents another thread from creating new + connections immediately after connections are closed + * no checks are done to prevent in-use connections from being closed + +>>> keepalive_handler.close_all() + +EXTRA ATTRIBUTES AND METHODS + + Upon a status of 200, the object returned has a few additional + attributes and methods, which should not be used if you want to + remain consistent with the normal urllib2-returned objects: + + close_connection() - close the connection to the host + readlines() - you know, readlines() + status - the return status (ie 404) + reason - english translation of status (ie 'File not found') + + If you want the best of both worlds, use this inside an + AttributeError-catching try: + + >>> try: status = fo.status + >>> except AttributeError: status = None + + Unfortunately, these are ONLY there if status == 200, so it's not + easy to distinguish between non-200 responses. The reason is that + urllib2 tries to do clever things with error codes 301, 302, 401, + and 407, and it wraps the object upon return. + + For python versions earlier than 2.4, you can avoid this fancy error + handling by setting the module-level global HANDLE_ERRORS to zero. + You see, prior to 2.4, it's the HTTP Handler's job to determine what + to handle specially, and what to just pass up. HANDLE_ERRORS == 0 + means "pass everything up". In python 2.4, however, this job no + longer belongs to the HTTP Handler and is now done by a NEW handler, + HTTPErrorProcessor. Here's the bottom line: + + python version < 2.4 + HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as + errors + HANDLE_ERRORS == 0 pass everything up, error processing is + left to the calling code + python version >= 2.4 + HANDLE_ERRORS == 1 pass up 200, treat the rest as errors + HANDLE_ERRORS == 0 (default) pass everything up, let the + other handlers (specifically, + HTTPErrorProcessor) decide what to do + + In practice, setting the variable either way makes little difference + in python 2.4, so for the most consistent behavior across versions, + you probably just want to use the defaults, which will give you + exceptions on errors. + +""" + +# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $ + +import errno +import httplib +import socket +import thread +import urllib2 + +DEBUG = None + +import sys +if sys.version_info < (2, 4): + HANDLE_ERRORS = 1 +else: HANDLE_ERRORS = 0 + +class ConnectionManager(object): + """ + The connection manager must be able to: + * keep track of all existing + """ + def __init__(self): + self._lock = thread.allocate_lock() + self._hostmap = {} # map hosts to a list of connections + self._connmap = {} # map connections to host + self._readymap = {} # map connection to ready state + + def add(self, host, connection, ready): + self._lock.acquire() + try: + if not host in self._hostmap: + self._hostmap[host] = [] + self._hostmap[host].append(connection) + self._connmap[connection] = host + self._readymap[connection] = ready + finally: + self._lock.release() + + def remove(self, connection): + self._lock.acquire() + try: + try: + host = self._connmap[connection] + except KeyError: + pass + else: + del self._connmap[connection] + del self._readymap[connection] + self._hostmap[host].remove(connection) + if not self._hostmap[host]: del self._hostmap[host] + finally: + self._lock.release() + + def set_ready(self, connection, ready): + try: + self._readymap[connection] = ready + except KeyError: + pass + + def get_ready_conn(self, host): + conn = None + self._lock.acquire() + try: + if host in self._hostmap: + for c in self._hostmap[host]: + if self._readymap[c]: + self._readymap[c] = 0 + conn = c + break + finally: + self._lock.release() + return conn + + def get_all(self, host=None): + if host: + return list(self._hostmap.get(host, [])) + else: + return dict(self._hostmap) + +class KeepAliveHandler(object): + def __init__(self): + self._cm = ConnectionManager() + + #### Connection Management + def open_connections(self): + """return a list of connected hosts and the number of connections + to each. [('foo.com:80', 2), ('bar.org', 1)]""" + return [(host, len(li)) for (host, li) in self._cm.get_all().items()] + + def close_connection(self, host): + """close connection(s) to + host is the host:port spec, as in 'www.cnn.com:8080' as passed in. + no error occurs if there is no connection to that host.""" + for h in self._cm.get_all(host): + self._cm.remove(h) + h.close() + + def close_all(self): + """close all open connections""" + for host, conns in self._cm.get_all().iteritems(): + for h in conns: + self._cm.remove(h) + h.close() + + def _request_closed(self, request, host, connection): + """tells us that this request is now closed and the the + connection is ready for another request""" + self._cm.set_ready(connection, 1) + + def _remove_connection(self, host, connection, close=0): + if close: + connection.close() + self._cm.remove(connection) + + #### Transaction Execution + def http_open(self, req): + return self.do_open(HTTPConnection, req) + + def do_open(self, http_class, req): + host = req.get_host() + if not host: + raise urllib2.URLError('no host given') + + try: + h = self._cm.get_ready_conn(host) + while h: + r = self._reuse_connection(h, req, host) + + # if this response is non-None, then it worked and we're + # done. Break out, skipping the else block. + if r: + break + + # connection is bad - possibly closed by server + # discard it and ask for the next free connection + h.close() + self._cm.remove(h) + h = self._cm.get_ready_conn(host) + else: + # no (working) free connections were found. Create a new one. + h = http_class(host) + if DEBUG: + DEBUG.info("creating new connection to %s (%d)", + host, id(h)) + self._cm.add(host, h, 0) + self._start_transaction(h, req) + r = h.getresponse() + except (socket.error, httplib.HTTPException), err: + raise urllib2.URLError(err) + + # if not a persistent connection, don't try to reuse it + if r.will_close: + self._cm.remove(h) + + if DEBUG: + DEBUG.info("STATUS: %s, %s", r.status, r.reason) + r._handler = self + r._host = host + r._url = req.get_full_url() + r._connection = h + r.code = r.status + r.headers = r.msg + r.msg = r.reason + + if r.status == 200 or not HANDLE_ERRORS: + return r + else: + return self.parent.error('http', req, r, + r.status, r.msg, r.headers) + + def _reuse_connection(self, h, req, host): + """start the transaction with a re-used connection + return a response object (r) upon success or None on failure. + This DOES not close or remove bad connections in cases where + it returns. However, if an unexpected exception occurs, it + will close and remove the connection before re-raising. + """ + try: + self._start_transaction(h, req) + r = h.getresponse() + # note: just because we got something back doesn't mean it + # worked. We'll check the version below, too. + except (socket.error, httplib.HTTPException): + r = None + except: + # adding this block just in case we've missed + # something we will still raise the exception, but + # lets try and close the connection and remove it + # first. We previously got into a nasty loop + # where an exception was uncaught, and so the + # connection stayed open. On the next try, the + # same exception was raised, etc. The tradeoff is + # that it's now possible this call will raise + # a DIFFERENT exception + if DEBUG: + DEBUG.error("unexpected exception - closing " + "connection to %s (%d)", host, id(h)) + self._cm.remove(h) + h.close() + raise + + if r is None or r.version == 9: + # httplib falls back to assuming HTTP 0.9 if it gets a + # bad header back. This is most likely to happen if + # the socket has been closed by the server since we + # last used the connection. + if DEBUG: + DEBUG.info("failed to re-use connection to %s (%d)", + host, id(h)) + r = None + else: + if DEBUG: + DEBUG.info("re-using connection to %s (%d)", host, id(h)) + + return r + + def _start_transaction(self, h, req): + # What follows mostly reimplements HTTPConnection.request() + # except it adds self.parent.addheaders in the mix. + headers = req.headers.copy() + if sys.version_info >= (2, 4): + headers.update(req.unredirected_hdrs) + headers.update(self.parent.addheaders) + headers = dict((n.lower(), v) for n, v in headers.items()) + skipheaders = {} + for n in ('host', 'accept-encoding'): + if n in headers: + skipheaders['skip_' + n.replace('-', '_')] = 1 + try: + if req.has_data(): + data = req.get_data() + h.putrequest('POST', req.get_selector(), **skipheaders) + if 'content-type' not in headers: + h.putheader('Content-type', + 'application/x-www-form-urlencoded') + if 'content-length' not in headers: + h.putheader('Content-length', '%d' % len(data)) + else: + h.putrequest('GET', req.get_selector(), **skipheaders) + except (socket.error), err: + raise urllib2.URLError(err) + for k, v in headers.items(): + h.putheader(k, v) + h.endheaders() + if req.has_data(): + h.send(data) + +class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler): + pass + +class HTTPResponse(httplib.HTTPResponse): + # we need to subclass HTTPResponse in order to + # 1) add readline() and readlines() methods + # 2) add close_connection() methods + # 3) add info() and geturl() methods + + # in order to add readline(), read must be modified to deal with a + # buffer. example: readline must read a buffer and then spit back + # one line at a time. The only real alternative is to read one + # BYTE at a time (ick). Once something has been read, it can't be + # put back (ok, maybe it can, but that's even uglier than this), + # so if you THEN do a normal read, you must first take stuff from + # the buffer. + + # the read method wraps the original to accomodate buffering, + # although read() never adds to the buffer. + # Both readline and readlines have been stolen with almost no + # modification from socket.py + + + def __init__(self, sock, debuglevel=0, strict=0, method=None): + if method: # the httplib in python 2.3 uses the method arg + httplib.HTTPResponse.__init__(self, sock, debuglevel, method) + else: # 2.2 doesn't + httplib.HTTPResponse.__init__(self, sock, debuglevel) + self.fileno = sock.fileno + self.code = None + self._rbuf = '' + self._rbufsize = 8096 + self._handler = None # inserted by the handler later + self._host = None # (same) + self._url = None # (same) + self._connection = None # (same) + + _raw_read = httplib.HTTPResponse.read + + def close(self): + if self.fp: + self.fp.close() + self.fp = None + if self._handler: + self._handler._request_closed(self, self._host, + self._connection) + + def close_connection(self): + self._handler._remove_connection(self._host, self._connection, close=1) + self.close() + + def info(self): + return self.headers + + def geturl(self): + return self._url + + def read(self, amt=None): + # the _rbuf test is only in this first if for speed. It's not + # logically necessary + if self._rbuf and not amt is None: + L = len(self._rbuf) + if amt > L: + amt -= L + else: + s = self._rbuf[:amt] + self._rbuf = self._rbuf[amt:] + return s + + s = self._rbuf + self._raw_read(amt) + self._rbuf = '' + return s + + # stolen from Python SVN #68532 to fix issue1088 + def _read_chunked(self, amt): + chunk_left = self.chunk_left + value = '' + + # XXX This accumulates chunks by repeated string concatenation, + # which is not efficient as the number or size of chunks gets big. + while True: + if chunk_left is None: + line = self.fp.readline() + i = line.find(';') + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + chunk_left = int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self.close() + raise httplib.IncompleteRead(value) + if chunk_left == 0: + break + if amt is None: + value += self._safe_read(chunk_left) + elif amt < chunk_left: + value += self._safe_read(amt) + self.chunk_left = chunk_left - amt + return value + elif amt == chunk_left: + value += self._safe_read(amt) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return value + else: + value += self._safe_read(chunk_left) + amt -= chunk_left + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline() + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line == '\r\n': + break + + # we read everything; close the "file" + self.close() + + return value + + def readline(self, limit=-1): + i = self._rbuf.find('\n') + while i < 0 and not (0 < limit <= len(self._rbuf)): + new = self._raw_read(self._rbufsize) + if not new: + break + i = new.find('\n') + if i >= 0: + i = i + len(self._rbuf) + self._rbuf = self._rbuf + new + if i < 0: + i = len(self._rbuf) + else: + i = i + 1 + if 0 <= limit < len(self._rbuf): + i = limit + data, self._rbuf = self._rbuf[:i], self._rbuf[i:] + return data + + def readlines(self, sizehint = 0): + total = 0 + list = [] + while True: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + +def safesend(self, str): + """Send `str' to the server. + + Shamelessly ripped off from httplib to patch a bad behavior. + """ + # _broken_pipe_resp is an attribute we set in this function + # if the socket is closed while we're sending data but + # the server sent us a response before hanging up. + # In that case, we want to pretend to send the rest of the + # outgoing data, and then let the user use getresponse() + # (which we wrap) to get this last response before + # opening a new socket. + if getattr(self, '_broken_pipe_resp', None) is not None: + return + + if self.sock is None: + if self.auto_open: + self.connect() + else: + raise httplib.NotConnected() + + # send the data to the server. if we get a broken pipe, then close + # the socket. we want to reconnect when somebody tries to send again. + # + # NOTE: we DO propagate the error, though, because we cannot simply + # ignore the error... the caller will know if they can retry. + if self.debuglevel > 0: + print "send:", repr(str) + try: + blocksize = 8192 + read = getattr(str, 'read', None) + if read is not None: + if self.debuglevel > 0: + print "sendIng a read()able" + data = read(blocksize) + while data: + self.sock.sendall(data) + data = read(blocksize) + else: + self.sock.sendall(str) + except socket.error, v: + reraise = True + if v[0] == errno.EPIPE: # Broken pipe + if self._HTTPConnection__state == httplib._CS_REQ_SENT: + self._broken_pipe_resp = None + self._broken_pipe_resp = self.getresponse() + reraise = False + self.close() + if reraise: + raise + +def wrapgetresponse(cls): + """Wraps getresponse in cls with a broken-pipe sane version. + """ + def safegetresponse(self): + # In safesend() we might set the _broken_pipe_resp + # attribute, in which case the socket has already + # been closed and we just need to give them the response + # back. Otherwise, we use the normal response path. + r = getattr(self, '_broken_pipe_resp', None) + if r is not None: + return r + return cls.getresponse(self) + safegetresponse.__doc__ = cls.getresponse.__doc__ + return safegetresponse + +class HTTPConnection(httplib.HTTPConnection): + # use the modified response class + response_class = HTTPResponse + send = safesend + getresponse = wrapgetresponse(httplib.HTTPConnection) + + +######################################################################### +##### TEST FUNCTIONS +######################################################################### + +def error_handler(url): + global HANDLE_ERRORS + orig = HANDLE_ERRORS + keepalive_handler = HTTPHandler() + opener = urllib2.build_opener(keepalive_handler) + urllib2.install_opener(opener) + pos = {0: 'off', 1: 'on'} + for i in (0, 1): + print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i) + HANDLE_ERRORS = i + try: + fo = urllib2.urlopen(url) + fo.read() + fo.close() + try: + status, reason = fo.status, fo.reason + except AttributeError: + status, reason = None, None + except IOError, e: + print " EXCEPTION: %s" % e + raise + else: + print " status = %s, reason = %s" % (status, reason) + HANDLE_ERRORS = orig + hosts = keepalive_handler.open_connections() + print "open connections:", hosts + keepalive_handler.close_all() + +def md5(s): + try: + from hashlib import md5 as _md5 + except ImportError: + from md5 import md5 as _md5 + global md5 + md5 = _md5 + return _md5(s) + +def continuity(url): + format = '%25s: %s' + + # first fetch the file with the normal http handler + opener = urllib2.build_opener() + urllib2.install_opener(opener) + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + m = md5.new(foo) + print format % ('normal urllib', m.hexdigest()) + + # now install the keepalive handler and try again + opener = urllib2.build_opener(HTTPHandler()) + urllib2.install_opener(opener) + + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + m = md5.new(foo) + print format % ('keepalive read', m.hexdigest()) + + fo = urllib2.urlopen(url) + foo = '' + while True: + f = fo.readline() + if f: + foo = foo + f + else: break + fo.close() + m = md5.new(foo) + print format % ('keepalive readline', m.hexdigest()) + +def comp(N, url): + print ' making %i connections to:\n %s' % (N, url) + + sys.stdout.write(' first using the normal urllib handlers') + # first use normal opener + opener = urllib2.build_opener() + urllib2.install_opener(opener) + t1 = fetch(N, url) + print ' TIME: %.3f s' % t1 + + sys.stdout.write(' now using the keepalive handler ') + # now install the keepalive handler and try again + opener = urllib2.build_opener(HTTPHandler()) + urllib2.install_opener(opener) + t2 = fetch(N, url) + print ' TIME: %.3f s' % t2 + print ' improvement factor: %.2f' % (t1 / t2) + +def fetch(N, url, delay=0): + import time + lens = [] + starttime = time.time() + for i in range(N): + if delay and i > 0: + time.sleep(delay) + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + lens.append(len(foo)) + diff = time.time() - starttime + + j = 0 + for i in lens[1:]: + j = j + 1 + if not i == lens[0]: + print "WARNING: inconsistent length on read %i: %i" % (j, i) + + return diff + +def test_timeout(url): + global DEBUG + dbbackup = DEBUG + class FakeLogger(object): + def debug(self, msg, *args): + print msg % args + info = warning = error = debug + DEBUG = FakeLogger() + print " fetching the file to establish a connection" + fo = urllib2.urlopen(url) + data1 = fo.read() + fo.close() + + i = 20 + print " waiting %i seconds for the server to close the connection" % i + while i > 0: + sys.stdout.write('\r %2i' % i) + sys.stdout.flush() + time.sleep(1) + i -= 1 + sys.stderr.write('\r') + + print " fetching the file a second time" + fo = urllib2.urlopen(url) + data2 = fo.read() + fo.close() + + if data1 == data2: + print ' data are identical' + else: + print ' ERROR: DATA DIFFER' + + DEBUG = dbbackup + + +def test(url, N=10): + print "checking error hander (do this on a non-200)" + try: error_handler(url) + except IOError: + print "exiting - exception will prevent further tests" + sys.exit() + print + print "performing continuity test (making sure stuff isn't corrupted)" + continuity(url) + print + print "performing speed comparison" + comp(N, url) + print + print "performing dropped-connection check" + test_timeout(url) + +if __name__ == '__main__': + import time + import sys + try: + N = int(sys.argv[1]) + url = sys.argv[2] + except: + print "%s " % sys.argv[0] + else: + test(url, N) diff --git a/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..20c7ec1 --- /dev/null +++ b/websdk/mercurial/locale/da/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..88f9e80 --- /dev/null +++ b/websdk/mercurial/locale/de/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..92eb59d --- /dev/null +++ b/websdk/mercurial/locale/el/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..efa851e --- /dev/null +++ b/websdk/mercurial/locale/fr/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..67c147b --- /dev/null +++ b/websdk/mercurial/locale/it/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..2daf2d5 --- /dev/null +++ b/websdk/mercurial/locale/ja/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..282ff96 --- /dev/null +++ b/websdk/mercurial/locale/pt_BR/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..c120a34 --- /dev/null +++ b/websdk/mercurial/locale/ro/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..eda573c --- /dev/null +++ b/websdk/mercurial/locale/ru/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..1d53822 --- /dev/null +++ b/websdk/mercurial/locale/sv/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..9f55d4e --- /dev/null +++ b/websdk/mercurial/locale/zh_CN/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo b/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo new file mode 100644 index 0000000..5615355 --- /dev/null +++ b/websdk/mercurial/locale/zh_TW/LC_MESSAGES/hg.mo Binary files differ diff --git a/websdk/mercurial/localrepo.py b/websdk/mercurial/localrepo.py index 74d1c61..ce89f5e 120000..100644 --- a/websdk/mercurial/localrepo.py +++ b/websdk/mercurial/localrepo.py @@ -1 +1,2101 @@ -/usr/share/pyshared/mercurial/localrepo.py \ No newline at end of file +# localrepo.py - read/write repository class for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import bin, hex, nullid, nullrev, short +from i18n import _ +import repo, changegroup, subrepo, discovery, pushkey +import changelog, dirstate, filelog, manifest, context, bookmarks +import lock, transaction, store, encoding +import scmutil, util, extensions, hook, error, revset +import match as matchmod +import merge as mergemod +import tags as tagsmod +from lock import release +import weakref, errno, os, time, inspect +propertycache = util.propertycache +filecache = scmutil.filecache + +class localrepository(repo.repository): + capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey', + 'known', 'getbundle')) + supportedformats = set(('revlogv1', 'generaldelta')) + supported = supportedformats | set(('store', 'fncache', 'shared', + 'dotencode')) + + def __init__(self, baseui, path=None, create=False): + repo.repository.__init__(self) + self.root = os.path.realpath(util.expandpath(path)) + self.path = os.path.join(self.root, ".hg") + self.origroot = path + self.auditor = scmutil.pathauditor(self.root, self._checknested) + self.opener = scmutil.opener(self.path) + self.wopener = scmutil.opener(self.root) + self.baseui = baseui + self.ui = baseui.copy() + + try: + self.ui.readconfig(self.join("hgrc"), self.root) + extensions.loadall(self.ui) + except IOError: + pass + + if not os.path.isdir(self.path): + if create: + if not os.path.exists(path): + util.makedirs(path) + util.makedir(self.path, notindexed=True) + requirements = ["revlogv1"] + if self.ui.configbool('format', 'usestore', True): + os.mkdir(os.path.join(self.path, "store")) + requirements.append("store") + if self.ui.configbool('format', 'usefncache', True): + requirements.append("fncache") + if self.ui.configbool('format', 'dotencode', True): + requirements.append('dotencode') + # create an invalid changelog + self.opener.append( + "00changelog.i", + '\0\0\0\2' # represents revlogv2 + ' dummy changelog to prevent using the old repo layout' + ) + if self.ui.configbool('format', 'generaldelta', False): + requirements.append("generaldelta") + requirements = set(requirements) + else: + raise error.RepoError(_("repository %s not found") % path) + elif create: + raise error.RepoError(_("repository %s already exists") % path) + else: + try: + requirements = scmutil.readrequires(self.opener, self.supported) + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + requirements = set() + + self.sharedpath = self.path + try: + s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n')) + if not os.path.exists(s): + raise error.RepoError( + _('.hg/sharedpath points to nonexistent directory %s') % s) + self.sharedpath = s + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + + self.store = store.store(requirements, self.sharedpath, scmutil.opener) + self.spath = self.store.path + self.sopener = self.store.opener + self.sjoin = self.store.join + self.opener.createmode = self.store.createmode + self._applyrequirements(requirements) + if create: + self._writerequirements() + + + self._branchcache = None + self._branchcachetip = None + self.filterpats = {} + self._datafilters = {} + self._transref = self._lockref = self._wlockref = None + + # A cache for various files under .hg/ that tracks file changes, + # (used by the filecache decorator) + # + # Maps a property name to its util.filecacheentry + self._filecache = {} + + def _applyrequirements(self, requirements): + self.requirements = requirements + openerreqs = set(('revlogv1', 'generaldelta')) + self.sopener.options = dict((r, 1) for r in requirements + if r in openerreqs) + + def _writerequirements(self): + reqfile = self.opener("requires", "w") + for r in self.requirements: + reqfile.write("%s\n" % r) + reqfile.close() + + def _checknested(self, path): + """Determine if path is a legal nested repository.""" + if not path.startswith(self.root): + return False + subpath = path[len(self.root) + 1:] + + # XXX: Checking against the current working copy is wrong in + # the sense that it can reject things like + # + # $ hg cat -r 10 sub/x.txt + # + # if sub/ is no longer a subrepository in the working copy + # parent revision. + # + # However, it can of course also allow things that would have + # been rejected before, such as the above cat command if sub/ + # is a subrepository now, but was a normal directory before. + # The old path auditor would have rejected by mistake since it + # panics when it sees sub/.hg/. + # + # All in all, checking against the working copy seems sensible + # since we want to prevent access to nested repositories on + # the filesystem *now*. + ctx = self[None] + parts = util.splitpath(subpath) + while parts: + prefix = os.sep.join(parts) + if prefix in ctx.substate: + if prefix == subpath: + return True + else: + sub = ctx.sub(prefix) + return sub.checknested(subpath[len(prefix) + 1:]) + else: + parts.pop() + return False + + @filecache('bookmarks') + def _bookmarks(self): + return bookmarks.read(self) + + @filecache('bookmarks.current') + def _bookmarkcurrent(self): + return bookmarks.readcurrent(self) + + def _writebookmarks(self, marks): + bookmarks.write(self) + + @filecache('00changelog.i', True) + def changelog(self): + c = changelog.changelog(self.sopener) + if 'HG_PENDING' in os.environ: + p = os.environ['HG_PENDING'] + if p.startswith(self.root): + c.readpending('00changelog.i.a') + return c + + @filecache('00manifest.i', True) + def manifest(self): + return manifest.manifest(self.sopener) + + @filecache('dirstate') + def dirstate(self): + warned = [0] + def validate(node): + try: + self.changelog.rev(node) + return node + except error.LookupError: + if not warned[0]: + warned[0] = True + self.ui.warn(_("warning: ignoring unknown" + " working parent %s!\n") % short(node)) + return nullid + + return dirstate.dirstate(self.opener, self.ui, self.root, validate) + + def __getitem__(self, changeid): + if changeid is None: + return context.workingctx(self) + return context.changectx(self, changeid) + + def __contains__(self, changeid): + try: + return bool(self.lookup(changeid)) + except error.RepoLookupError: + return False + + def __nonzero__(self): + return True + + def __len__(self): + return len(self.changelog) + + def __iter__(self): + for i in xrange(len(self)): + yield i + + def set(self, expr, *args): + ''' + Yield a context for each matching revision, after doing arg + replacement via revset.formatspec + ''' + + expr = revset.formatspec(expr, *args) + m = revset.match(None, expr) + for r in m(self, range(len(self))): + yield self[r] + + def url(self): + return 'file:' + self.root + + def hook(self, name, throw=False, **args): + return hook.hook(self.ui, self, name, throw, **args) + + tag_disallowed = ':\r\n' + + def _tag(self, names, node, message, local, user, date, extra={}): + if isinstance(names, str): + allchars = names + names = (names,) + else: + allchars = ''.join(names) + for c in self.tag_disallowed: + if c in allchars: + raise util.Abort(_('%r cannot be used in a tag name') % c) + + branches = self.branchmap() + for name in names: + self.hook('pretag', throw=True, node=hex(node), tag=name, + local=local) + if name in branches: + self.ui.warn(_("warning: tag %s conflicts with existing" + " branch name\n") % name) + + def writetags(fp, names, munge, prevtags): + fp.seek(0, 2) + if prevtags and prevtags[-1] != '\n': + fp.write('\n') + for name in names: + m = munge and munge(name) or name + if self._tagscache.tagtypes and name in self._tagscache.tagtypes: + old = self.tags().get(name, nullid) + fp.write('%s %s\n' % (hex(old), m)) + fp.write('%s %s\n' % (hex(node), m)) + fp.close() + + prevtags = '' + if local: + try: + fp = self.opener('localtags', 'r+') + except IOError: + fp = self.opener('localtags', 'a') + else: + prevtags = fp.read() + + # local tags are stored in the current charset + writetags(fp, names, None, prevtags) + for name in names: + self.hook('tag', node=hex(node), tag=name, local=local) + return + + try: + fp = self.wfile('.hgtags', 'rb+') + except IOError, e: + if e.errno != errno.ENOENT: + raise + fp = self.wfile('.hgtags', 'ab') + else: + prevtags = fp.read() + + # committed tags are stored in UTF-8 + writetags(fp, names, encoding.fromlocal, prevtags) + + fp.close() + + if '.hgtags' not in self.dirstate: + self[None].add(['.hgtags']) + + m = matchmod.exact(self.root, '', ['.hgtags']) + tagnode = self.commit(message, user, date, extra=extra, match=m) + + for name in names: + self.hook('tag', node=hex(node), tag=name, local=local) + + return tagnode + + def tag(self, names, node, message, local, user, date): + '''tag a revision with one or more symbolic names. + + names is a list of strings or, when adding a single tag, names may be a + string. + + if local is True, the tags are stored in a per-repository file. + otherwise, they are stored in the .hgtags file, and a new + changeset is committed with the change. + + keyword arguments: + + local: whether to store tags in non-version-controlled file + (default False) + + message: commit message to use if committing + + user: name of user to use if committing + + date: date tuple to use if committing''' + + if not local: + for x in self.status()[:5]: + if '.hgtags' in x: + raise util.Abort(_('working copy of .hgtags is changed ' + '(please commit .hgtags manually)')) + + self.tags() # instantiate the cache + self._tag(names, node, message, local, user, date) + + @propertycache + def _tagscache(self): + '''Returns a tagscache object that contains various tags related caches.''' + + # This simplifies its cache management by having one decorated + # function (this one) and the rest simply fetch things from it. + class tagscache(object): + def __init__(self): + # These two define the set of tags for this repository. tags + # maps tag name to node; tagtypes maps tag name to 'global' or + # 'local'. (Global tags are defined by .hgtags across all + # heads, and local tags are defined in .hg/localtags.) + # They constitute the in-memory cache of tags. + self.tags = self.tagtypes = None + + self.nodetagscache = self.tagslist = None + + cache = tagscache() + cache.tags, cache.tagtypes = self._findtags() + + return cache + + def tags(self): + '''return a mapping of tag to node''' + return self._tagscache.tags + + def _findtags(self): + '''Do the hard work of finding tags. Return a pair of dicts + (tags, tagtypes) where tags maps tag name to node, and tagtypes + maps tag name to a string like \'global\' or \'local\'. + Subclasses or extensions are free to add their own tags, but + should be aware that the returned dicts will be retained for the + duration of the localrepo object.''' + + # XXX what tagtype should subclasses/extensions use? Currently + # mq and bookmarks add tags, but do not set the tagtype at all. + # Should each extension invent its own tag type? Should there + # be one tagtype for all such "virtual" tags? Or is the status + # quo fine? + + alltags = {} # map tag name to (node, hist) + tagtypes = {} + + tagsmod.findglobaltags(self.ui, self, alltags, tagtypes) + tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) + + # Build the return dicts. Have to re-encode tag names because + # the tags module always uses UTF-8 (in order not to lose info + # writing to the cache), but the rest of Mercurial wants them in + # local encoding. + tags = {} + for (name, (node, hist)) in alltags.iteritems(): + if node != nullid: + try: + # ignore tags to unknown nodes + self.changelog.lookup(node) + tags[encoding.tolocal(name)] = node + except error.LookupError: + pass + tags['tip'] = self.changelog.tip() + tagtypes = dict([(encoding.tolocal(name), value) + for (name, value) in tagtypes.iteritems()]) + return (tags, tagtypes) + + def tagtype(self, tagname): + ''' + return the type of the given tag. result can be: + + 'local' : a local tag + 'global' : a global tag + None : tag does not exist + ''' + + return self._tagscache.tagtypes.get(tagname) + + def tagslist(self): + '''return a list of tags ordered by revision''' + if not self._tagscache.tagslist: + l = [] + for t, n in self.tags().iteritems(): + r = self.changelog.rev(n) + l.append((r, t, n)) + self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)] + + return self._tagscache.tagslist + + def nodetags(self, node): + '''return the tags associated with a node''' + if not self._tagscache.nodetagscache: + nodetagscache = {} + for t, n in self.tags().iteritems(): + nodetagscache.setdefault(n, []).append(t) + for tags in nodetagscache.itervalues(): + tags.sort() + self._tagscache.nodetagscache = nodetagscache + return self._tagscache.nodetagscache.get(node, []) + + def nodebookmarks(self, node): + marks = [] + for bookmark, n in self._bookmarks.iteritems(): + if n == node: + marks.append(bookmark) + return sorted(marks) + + def _branchtags(self, partial, lrev): + # TODO: rename this function? + tiprev = len(self) - 1 + if lrev != tiprev: + ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1)) + self._updatebranchcache(partial, ctxgen) + self._writebranchcache(partial, self.changelog.tip(), tiprev) + + return partial + + def updatebranchcache(self): + tip = self.changelog.tip() + if self._branchcache is not None and self._branchcachetip == tip: + return self._branchcache + + oldtip = self._branchcachetip + self._branchcachetip = tip + if oldtip is None or oldtip not in self.changelog.nodemap: + partial, last, lrev = self._readbranchcache() + else: + lrev = self.changelog.rev(oldtip) + partial = self._branchcache + + self._branchtags(partial, lrev) + # this private cache holds all heads (not just tips) + self._branchcache = partial + + def branchmap(self): + '''returns a dictionary {branch: [branchheads]}''' + self.updatebranchcache() + return self._branchcache + + def branchtags(self): + '''return a dict where branch names map to the tipmost head of + the branch, open heads come before closed''' + bt = {} + for bn, heads in self.branchmap().iteritems(): + tip = heads[-1] + for h in reversed(heads): + if 'close' not in self.changelog.read(h)[5]: + tip = h + break + bt[bn] = tip + return bt + + def _readbranchcache(self): + partial = {} + try: + f = self.opener("cache/branchheads") + lines = f.read().split('\n') + f.close() + except (IOError, OSError): + return {}, nullid, nullrev + + try: + last, lrev = lines.pop(0).split(" ", 1) + last, lrev = bin(last), int(lrev) + if lrev >= len(self) or self[lrev].node() != last: + # invalidate the cache + raise ValueError('invalidating branch cache (tip differs)') + for l in lines: + if not l: + continue + node, label = l.split(" ", 1) + label = encoding.tolocal(label.strip()) + partial.setdefault(label, []).append(bin(node)) + except KeyboardInterrupt: + raise + except Exception, inst: + if self.ui.debugflag: + self.ui.warn(str(inst), '\n') + partial, last, lrev = {}, nullid, nullrev + return partial, last, lrev + + def _writebranchcache(self, branches, tip, tiprev): + try: + f = self.opener("cache/branchheads", "w", atomictemp=True) + f.write("%s %s\n" % (hex(tip), tiprev)) + for label, nodes in branches.iteritems(): + for node in nodes: + f.write("%s %s\n" % (hex(node), encoding.fromlocal(label))) + f.close() + except (IOError, OSError): + pass + + def _updatebranchcache(self, partial, ctxgen): + # collect new branch entries + newbranches = {} + for c in ctxgen: + newbranches.setdefault(c.branch(), []).append(c.node()) + # if older branchheads are reachable from new ones, they aren't + # really branchheads. Note checking parents is insufficient: + # 1 (branch a) -> 2 (branch b) -> 3 (branch a) + for branch, newnodes in newbranches.iteritems(): + bheads = partial.setdefault(branch, []) + bheads.extend(newnodes) + if len(bheads) <= 1: + continue + bheads = sorted(bheads, key=lambda x: self[x].rev()) + # starting from tip means fewer passes over reachable + while newnodes: + latest = newnodes.pop() + if latest not in bheads: + continue + minbhrev = self[bheads[0]].node() + reachable = self.changelog.reachable(latest, minbhrev) + reachable.remove(latest) + if reachable: + bheads = [b for b in bheads if b not in reachable] + partial[branch] = bheads + + def lookup(self, key): + if isinstance(key, int): + return self.changelog.node(key) + elif key == '.': + return self.dirstate.p1() + elif key == 'null': + return nullid + elif key == 'tip': + return self.changelog.tip() + n = self.changelog._match(key) + if n: + return n + if key in self._bookmarks: + return self._bookmarks[key] + if key in self.tags(): + return self.tags()[key] + if key in self.branchtags(): + return self.branchtags()[key] + n = self.changelog._partialmatch(key) + if n: + return n + + # can't find key, check if it might have come from damaged dirstate + if key in self.dirstate.parents(): + raise error.Abort(_("working directory has unknown parent '%s'!") + % short(key)) + try: + if len(key) == 20: + key = hex(key) + except TypeError: + pass + raise error.RepoLookupError(_("unknown revision '%s'") % key) + + def lookupbranch(self, key, remote=None): + repo = remote or self + if key in repo.branchmap(): + return key + + repo = (remote and remote.local()) and remote or self + return repo[key].branch() + + def known(self, nodes): + nm = self.changelog.nodemap + return [(n in nm) for n in nodes] + + def local(self): + return self + + def join(self, f): + return os.path.join(self.path, f) + + def wjoin(self, f): + return os.path.join(self.root, f) + + def file(self, f): + if f[0] == '/': + f = f[1:] + return filelog.filelog(self.sopener, f) + + def changectx(self, changeid): + return self[changeid] + + def parents(self, changeid=None): + '''get list of changectxs for parents of changeid''' + return self[changeid].parents() + + def filectx(self, path, changeid=None, fileid=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + return context.filectx(self, path, changeid, fileid) + + def getcwd(self): + return self.dirstate.getcwd() + + def pathto(self, f, cwd=None): + return self.dirstate.pathto(f, cwd) + + def wfile(self, f, mode='r'): + return self.wopener(f, mode) + + def _link(self, f): + return os.path.islink(self.wjoin(f)) + + def _loadfilter(self, filter): + if filter not in self.filterpats: + l = [] + for pat, cmd in self.ui.configitems(filter): + if cmd == '!': + continue + mf = matchmod.match(self.root, '', [pat]) + fn = None + params = cmd + for name, filterfn in self._datafilters.iteritems(): + if cmd.startswith(name): + fn = filterfn + params = cmd[len(name):].lstrip() + break + if not fn: + fn = lambda s, c, **kwargs: util.filter(s, c) + # Wrap old filters not supporting keyword arguments + if not inspect.getargspec(fn)[2]: + oldfn = fn + fn = lambda s, c, **kwargs: oldfn(s, c) + l.append((mf, fn, params)) + self.filterpats[filter] = l + return self.filterpats[filter] + + def _filter(self, filterpats, filename, data): + for mf, fn, cmd in filterpats: + if mf(filename): + self.ui.debug("filtering %s through %s\n" % (filename, cmd)) + data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) + break + + return data + + @propertycache + def _encodefilterpats(self): + return self._loadfilter('encode') + + @propertycache + def _decodefilterpats(self): + return self._loadfilter('decode') + + def adddatafilter(self, name, filter): + self._datafilters[name] = filter + + def wread(self, filename): + if self._link(filename): + data = os.readlink(self.wjoin(filename)) + else: + data = self.wopener.read(filename) + return self._filter(self._encodefilterpats, filename, data) + + def wwrite(self, filename, data, flags): + data = self._filter(self._decodefilterpats, filename, data) + if 'l' in flags: + self.wopener.symlink(data, filename) + else: + self.wopener.write(filename, data) + if 'x' in flags: + util.setflags(self.wjoin(filename), False, True) + + def wwritedata(self, filename, data): + return self._filter(self._decodefilterpats, filename, data) + + def transaction(self, desc): + tr = self._transref and self._transref() or None + if tr and tr.running(): + return tr.nest() + + # abort here if the journal already exists + if os.path.exists(self.sjoin("journal")): + raise error.RepoError( + _("abandoned transaction found - run hg recover")) + + journalfiles = self._writejournal(desc) + renames = [(x, undoname(x)) for x in journalfiles] + + tr = transaction.transaction(self.ui.warn, self.sopener, + self.sjoin("journal"), + aftertrans(renames), + self.store.createmode) + self._transref = weakref.ref(tr) + return tr + + def _writejournal(self, desc): + # save dirstate for rollback + try: + ds = self.opener.read("dirstate") + except IOError: + ds = "" + self.opener.write("journal.dirstate", ds) + self.opener.write("journal.branch", + encoding.fromlocal(self.dirstate.branch())) + self.opener.write("journal.desc", + "%d\n%s\n" % (len(self), desc)) + + bkname = self.join('bookmarks') + if os.path.exists(bkname): + util.copyfile(bkname, self.join('journal.bookmarks')) + else: + self.opener.write('journal.bookmarks', '') + + return (self.sjoin('journal'), self.join('journal.dirstate'), + self.join('journal.branch'), self.join('journal.desc'), + self.join('journal.bookmarks')) + + def recover(self): + lock = self.lock() + try: + if os.path.exists(self.sjoin("journal")): + self.ui.status(_("rolling back interrupted transaction\n")) + transaction.rollback(self.sopener, self.sjoin("journal"), + self.ui.warn) + self.invalidate() + return True + else: + self.ui.warn(_("no interrupted transaction available\n")) + return False + finally: + lock.release() + + def rollback(self, dryrun=False, force=False): + wlock = lock = None + try: + wlock = self.wlock() + lock = self.lock() + if os.path.exists(self.sjoin("undo")): + return self._rollback(dryrun, force) + else: + self.ui.warn(_("no rollback information available\n")) + return 1 + finally: + release(lock, wlock) + + def _rollback(self, dryrun, force): + ui = self.ui + try: + args = self.opener.read('undo.desc').splitlines() + (oldlen, desc, detail) = (int(args[0]), args[1], None) + if len(args) >= 3: + detail = args[2] + oldtip = oldlen - 1 + + if detail and ui.verbose: + msg = (_('repository tip rolled back to revision %s' + ' (undo %s: %s)\n') + % (oldtip, desc, detail)) + else: + msg = (_('repository tip rolled back to revision %s' + ' (undo %s)\n') + % (oldtip, desc)) + except IOError: + msg = _('rolling back unknown transaction\n') + desc = None + + if not force and self['.'] != self['tip'] and desc == 'commit': + raise util.Abort( + _('rollback of last commit while not checked out ' + 'may lose data'), hint=_('use -f to force')) + + ui.status(msg) + if dryrun: + return 0 + + parents = self.dirstate.parents() + transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn) + if os.path.exists(self.join('undo.bookmarks')): + util.rename(self.join('undo.bookmarks'), + self.join('bookmarks')) + self.invalidate() + + parentgone = (parents[0] not in self.changelog.nodemap or + parents[1] not in self.changelog.nodemap) + if parentgone: + util.rename(self.join('undo.dirstate'), self.join('dirstate')) + try: + branch = self.opener.read('undo.branch') + self.dirstate.setbranch(branch) + except IOError: + ui.warn(_('named branch could not be reset: ' + 'current branch is still \'%s\'\n') + % self.dirstate.branch()) + + self.dirstate.invalidate() + self.destroyed() + parents = tuple([p.rev() for p in self.parents()]) + if len(parents) > 1: + ui.status(_('working directory now based on ' + 'revisions %d and %d\n') % parents) + else: + ui.status(_('working directory now based on ' + 'revision %d\n') % parents) + return 0 + + def invalidatecaches(self): + try: + delattr(self, '_tagscache') + except AttributeError: + pass + + self._branchcache = None # in UTF-8 + self._branchcachetip = None + + def invalidatedirstate(self): + '''Invalidates the dirstate, causing the next call to dirstate + to check if it was modified since the last time it was read, + rereading it if it has. + + This is different to dirstate.invalidate() that it doesn't always + rereads the dirstate. Use dirstate.invalidate() if you want to + explicitly read the dirstate again (i.e. restoring it to a previous + known good state).''' + try: + delattr(self, 'dirstate') + except AttributeError: + pass + + def invalidate(self): + for k in self._filecache: + # dirstate is invalidated separately in invalidatedirstate() + if k == 'dirstate': + continue + + try: + delattr(self, k) + except AttributeError: + pass + self.invalidatecaches() + + def _lock(self, lockname, wait, releasefn, acquirefn, desc): + try: + l = lock.lock(lockname, 0, releasefn, desc=desc) + except error.LockHeld, inst: + if not wait: + raise + self.ui.warn(_("waiting for lock on %s held by %r\n") % + (desc, inst.locker)) + # default to 600 seconds timeout + l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), + releasefn, desc=desc) + if acquirefn: + acquirefn() + return l + + def lock(self, wait=True): + '''Lock the repository store (.hg/store) and return a weak reference + to the lock. Use this before modifying the store (e.g. committing or + stripping). If you are opening a transaction, get a lock as well.)''' + l = self._lockref and self._lockref() + if l is not None and l.held: + l.lock() + return l + + def unlock(): + self.store.write() + for k, ce in self._filecache.items(): + if k == 'dirstate': + continue + ce.refresh() + + l = self._lock(self.sjoin("lock"), wait, unlock, + self.invalidate, _('repository %s') % self.origroot) + self._lockref = weakref.ref(l) + return l + + def wlock(self, wait=True): + '''Lock the non-store parts of the repository (everything under + .hg except .hg/store) and return a weak reference to the lock. + Use this before modifying files in .hg.''' + l = self._wlockref and self._wlockref() + if l is not None and l.held: + l.lock() + return l + + def unlock(): + self.dirstate.write() + ce = self._filecache.get('dirstate') + if ce: + ce.refresh() + + l = self._lock(self.join("wlock"), wait, unlock, + self.invalidatedirstate, _('working directory of %s') % + self.origroot) + self._wlockref = weakref.ref(l) + return l + + def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): + """ + commit an individual file as part of a larger transaction + """ + + fname = fctx.path() + text = fctx.data() + flog = self.file(fname) + fparent1 = manifest1.get(fname, nullid) + fparent2 = fparent2o = manifest2.get(fname, nullid) + + meta = {} + copy = fctx.renamed() + if copy and copy[0] != fname: + # Mark the new revision of this file as a copy of another + # file. This copy data will effectively act as a parent + # of this new revision. If this is a merge, the first + # parent will be the nullid (meaning "look up the copy data") + # and the second one will be the other parent. For example: + # + # 0 --- 1 --- 3 rev1 changes file foo + # \ / rev2 renames foo to bar and changes it + # \- 2 -/ rev3 should have bar with all changes and + # should record that bar descends from + # bar in rev2 and foo in rev1 + # + # this allows this merge to succeed: + # + # 0 --- 1 --- 3 rev4 reverts the content change from rev2 + # \ / merging rev3 and rev4 should use bar@rev2 + # \- 2 --- 4 as the merge base + # + + cfname = copy[0] + crev = manifest1.get(cfname) + newfparent = fparent2 + + if manifest2: # branch merge + if fparent2 == nullid or crev is None: # copied on remote side + if cfname in manifest2: + crev = manifest2[cfname] + newfparent = fparent1 + + # find source in nearest ancestor if we've lost track + if not crev: + self.ui.debug(" %s: searching for copy revision for %s\n" % + (fname, cfname)) + for ancestor in self[None].ancestors(): + if cfname in ancestor: + crev = ancestor[cfname].filenode() + break + + if crev: + self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev))) + meta["copy"] = cfname + meta["copyrev"] = hex(crev) + fparent1, fparent2 = nullid, newfparent + else: + self.ui.warn(_("warning: can't find ancestor for '%s' " + "copied from '%s'!\n") % (fname, cfname)) + + elif fparent2 != nullid: + # is one parent an ancestor of the other? + fparentancestor = flog.ancestor(fparent1, fparent2) + if fparentancestor == fparent1: + fparent1, fparent2 = fparent2, nullid + elif fparentancestor == fparent2: + fparent2 = nullid + + # is the file changed? + if fparent2 != nullid or flog.cmp(fparent1, text) or meta: + changelist.append(fname) + return flog.add(text, meta, tr, linkrev, fparent1, fparent2) + + # are just the flags changed during merge? + if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags(): + changelist.append(fname) + + return fparent1 + + def commit(self, text="", user=None, date=None, match=None, force=False, + editor=False, extra={}): + """Add a new revision to current repository. + + Revision information is gathered from the working directory, + match can be used to filter the committed files. If editor is + supplied, it is called to get a commit message. + """ + + def fail(f, msg): + raise util.Abort('%s: %s' % (f, msg)) + + if not match: + match = matchmod.always(self.root, '') + + if not force: + vdirs = [] + match.dir = vdirs.append + match.bad = fail + + wlock = self.wlock() + try: + wctx = self[None] + merge = len(wctx.parents()) > 1 + + if (not force and merge and match and + (match.files() or match.anypats())): + raise util.Abort(_('cannot partially commit a merge ' + '(do not specify files or patterns)')) + + changes = self.status(match=match, clean=force) + if force: + changes[0].extend(changes[6]) # mq may commit unchanged files + + # check subrepos + subs = [] + removedsubs = set() + if '.hgsub' in wctx: + # only manage subrepos and .hgsubstate if .hgsub is present + for p in wctx.parents(): + removedsubs.update(s for s in p.substate if match(s)) + for s in wctx.substate: + removedsubs.discard(s) + if match(s) and wctx.sub(s).dirty(): + subs.append(s) + if (subs or removedsubs): + if (not match('.hgsub') and + '.hgsub' in (wctx.modified() + wctx.added())): + raise util.Abort( + _("can't commit subrepos without .hgsub")) + if '.hgsubstate' not in changes[0]: + changes[0].insert(0, '.hgsubstate') + if '.hgsubstate' in changes[2]: + changes[2].remove('.hgsubstate') + elif '.hgsub' in changes[2]: + # clean up .hgsubstate when .hgsub is removed + if ('.hgsubstate' in wctx and + '.hgsubstate' not in changes[0] + changes[1] + changes[2]): + changes[2].insert(0, '.hgsubstate') + + if subs and not self.ui.configbool('ui', 'commitsubrepos', False): + changedsubs = [s for s in subs if wctx.sub(s).dirty(True)] + if changedsubs: + raise util.Abort(_("uncommitted changes in subrepo %s") + % changedsubs[0], + hint=_("use --subrepos for recursive commit")) + + # make sure all explicit patterns are matched + if not force and match.files(): + matched = set(changes[0] + changes[1] + changes[2]) + + for f in match.files(): + if f == '.' or f in matched or f in wctx.substate: + continue + if f in changes[3]: # missing + fail(f, _('file not found!')) + if f in vdirs: # visited directory + d = f + '/' + for mf in matched: + if mf.startswith(d): + break + else: + fail(f, _("no match under directory!")) + elif f not in self.dirstate: + fail(f, _("file not tracked!")) + + if (not force and not extra.get("close") and not merge + and not (changes[0] or changes[1] or changes[2]) + and wctx.branch() == wctx.p1().branch()): + return None + + ms = mergemod.mergestate(self) + for f in changes[0]: + if f in ms and ms[f] == 'u': + raise util.Abort(_("unresolved merge conflicts " + "(see hg help resolve)")) + + cctx = context.workingctx(self, text, user, date, extra, changes) + if editor: + cctx._text = editor(self, cctx, subs) + edited = (text != cctx._text) + + # commit subs + if subs or removedsubs: + state = wctx.substate.copy() + for s in sorted(subs): + sub = wctx.sub(s) + self.ui.status(_('committing subrepository %s\n') % + subrepo.subrelpath(sub)) + sr = sub.commit(cctx._text, user, date) + state[s] = (state[s][0], sr) + subrepo.writestate(self, state) + + # Save commit message in case this transaction gets rolled back + # (e.g. by a pretxncommit hook). Leave the content alone on + # the assumption that the user will use the same editor again. + msgfn = self.savecommitmessage(cctx._text) + + p1, p2 = self.dirstate.parents() + hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') + try: + self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2) + ret = self.commitctx(cctx, True) + except: + if edited: + self.ui.write( + _('note: commit message saved in %s\n') % msgfn) + raise + + # update bookmarks, dirstate and mergestate + bookmarks.update(self, p1, ret) + for f in changes[0] + changes[1]: + self.dirstate.normal(f) + for f in changes[2]: + self.dirstate.drop(f) + self.dirstate.setparents(ret) + ms.reset() + finally: + wlock.release() + + self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2) + return ret + + def commitctx(self, ctx, error=False): + """Add a new revision to current repository. + Revision information is passed via the context argument. + """ + + tr = lock = None + removed = list(ctx.removed()) + p1, p2 = ctx.p1(), ctx.p2() + user = ctx.user() + + lock = self.lock() + try: + tr = self.transaction("commit") + trp = weakref.proxy(tr) + + if ctx.files(): + m1 = p1.manifest().copy() + m2 = p2.manifest() + + # check in files + new = {} + changed = [] + linkrev = len(self) + for f in sorted(ctx.modified() + ctx.added()): + self.ui.note(f + "\n") + try: + fctx = ctx[f] + new[f] = self._filecommit(fctx, m1, m2, linkrev, trp, + changed) + m1.set(f, fctx.flags()) + except OSError, inst: + self.ui.warn(_("trouble committing %s!\n") % f) + raise + except IOError, inst: + errcode = getattr(inst, 'errno', errno.ENOENT) + if error or errcode and errcode != errno.ENOENT: + self.ui.warn(_("trouble committing %s!\n") % f) + raise + else: + removed.append(f) + + # update manifest + m1.update(new) + removed = [f for f in sorted(removed) if f in m1 or f in m2] + drop = [f for f in removed if f in m1] + for f in drop: + del m1[f] + mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(), + p2.manifestnode(), (new, drop)) + files = changed + removed + else: + mn = p1.manifestnode() + files = [] + + # update changelog + self.changelog.delayupdate() + n = self.changelog.add(mn, files, ctx.description(), + trp, p1.node(), p2.node(), + user, ctx.date(), ctx.extra().copy()) + p = lambda: self.changelog.writepending() and self.root or "" + xp1, xp2 = p1.hex(), p2 and p2.hex() or '' + self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, + parent2=xp2, pending=p) + self.changelog.finalize(trp) + tr.close() + + if self._branchcache: + self.updatebranchcache() + return n + finally: + if tr: + tr.release() + lock.release() + + def destroyed(self): + '''Inform the repository that nodes have been destroyed. + Intended for use by strip and rollback, so there's a common + place for anything that has to be done after destroying history.''' + # XXX it might be nice if we could take the list of destroyed + # nodes, but I don't see an easy way for rollback() to do that + + # Ensure the persistent tag cache is updated. Doing it now + # means that the tag cache only has to worry about destroyed + # heads immediately after a strip/rollback. That in turn + # guarantees that "cachetip == currenttip" (comparing both rev + # and node) always means no nodes have been added or destroyed. + + # XXX this is suboptimal when qrefresh'ing: we strip the current + # head, refresh the tag cache, then immediately add a new head. + # But I think doing it this way is necessary for the "instant + # tag cache retrieval" case to work. + self.invalidatecaches() + + def walk(self, match, node=None): + ''' + walk recursively through the directory tree or a given + changeset, finding all files matched by the match + function + ''' + return self[node].walk(match) + + def status(self, node1='.', node2=None, match=None, + ignored=False, clean=False, unknown=False, + listsubrepos=False): + """return status of files between two nodes or node and working directory + + If node1 is None, use the first dirstate parent instead. + If node2 is None, compare node1 with working directory. + """ + + def mfmatches(ctx): + mf = ctx.manifest().copy() + for fn in mf.keys(): + if not match(fn): + del mf[fn] + return mf + + if isinstance(node1, context.changectx): + ctx1 = node1 + else: + ctx1 = self[node1] + if isinstance(node2, context.changectx): + ctx2 = node2 + else: + ctx2 = self[node2] + + working = ctx2.rev() is None + parentworking = working and ctx1 == self['.'] + match = match or matchmod.always(self.root, self.getcwd()) + listignored, listclean, listunknown = ignored, clean, unknown + + # load earliest manifest first for caching reasons + if not working and ctx2.rev() < ctx1.rev(): + ctx2.manifest() + + if not parentworking: + def bad(f, msg): + if f not in ctx1: + self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) + match.bad = bad + + if working: # we need to scan the working dir + subrepos = [] + if '.hgsub' in self.dirstate: + subrepos = ctx2.substate.keys() + s = self.dirstate.status(match, subrepos, listignored, + listclean, listunknown) + cmp, modified, added, removed, deleted, unknown, ignored, clean = s + + # check for any possibly clean files + if parentworking and cmp: + fixup = [] + # do a full compare of any files that might have changed + for f in sorted(cmp): + if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) + or ctx1[f].cmp(ctx2[f])): + modified.append(f) + else: + fixup.append(f) + + # update dirstate for files that are actually clean + if fixup: + if listclean: + clean += fixup + + try: + # updating the dirstate is optional + # so we don't wait on the lock + wlock = self.wlock(False) + try: + for f in fixup: + self.dirstate.normal(f) + finally: + wlock.release() + except error.LockError: + pass + + if not parentworking: + mf1 = mfmatches(ctx1) + if working: + # we are comparing working dir against non-parent + # generate a pseudo-manifest for the working dir + mf2 = mfmatches(self['.']) + for f in cmp + modified + added: + mf2[f] = None + mf2.set(f, ctx2.flags(f)) + for f in removed: + if f in mf2: + del mf2[f] + else: + # we are comparing two revisions + deleted, unknown, ignored = [], [], [] + mf2 = mfmatches(ctx2) + + modified, added, clean = [], [], [] + for fn in mf2: + if fn in mf1: + if (fn not in deleted and + (mf1.flags(fn) != mf2.flags(fn) or + (mf1[fn] != mf2[fn] and + (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))): + modified.append(fn) + elif listclean: + clean.append(fn) + del mf1[fn] + elif fn not in deleted: + added.append(fn) + removed = mf1.keys() + + if working and modified and not self.dirstate._checklink: + # Symlink placeholders may get non-symlink-like contents + # via user error or dereferencing by NFS or Samba servers, + # so we filter out any placeholders that don't look like a + # symlink + sane = [] + for f in modified: + if ctx2.flags(f) == 'l': + d = ctx2[f].data() + if len(d) >= 1024 or '\n' in d or util.binary(d): + self.ui.debug('ignoring suspect symlink placeholder' + ' "%s"\n' % f) + continue + sane.append(f) + modified = sane + + r = modified, added, removed, deleted, unknown, ignored, clean + + if listsubrepos: + for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + if working: + rev2 = None + else: + rev2 = ctx2.substate[subpath][1] + try: + submatch = matchmod.narrowmatcher(subpath, match) + s = sub.status(rev2, match=submatch, ignored=listignored, + clean=listclean, unknown=listunknown, + listsubrepos=True) + for rfiles, sfiles in zip(r, s): + rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) + except error.LookupError: + self.ui.status(_("skipping missing subrepository: %s\n") + % subpath) + + for l in r: + l.sort() + return r + + def heads(self, start=None): + heads = self.changelog.heads(start) + # sort the output in rev descending order + return sorted(heads, key=self.changelog.rev, reverse=True) + + def branchheads(self, branch=None, start=None, closed=False): + '''return a (possibly filtered) list of heads for the given branch + + Heads are returned in topological order, from newest to oldest. + If branch is None, use the dirstate branch. + If start is not None, return only heads reachable from start. + If closed is True, return heads that are marked as closed as well. + ''' + if branch is None: + branch = self[None].branch() + branches = self.branchmap() + if branch not in branches: + return [] + # the cache returns heads ordered lowest to highest + bheads = list(reversed(branches[branch])) + if start is not None: + # filter out the heads that cannot be reached from startrev + fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) + bheads = [h for h in bheads if h in fbheads] + if not closed: + bheads = [h for h in bheads if + ('close' not in self.changelog.read(h)[5])] + return bheads + + def branches(self, nodes): + if not nodes: + nodes = [self.changelog.tip()] + b = [] + for n in nodes: + t = n + while True: + p = self.changelog.parents(n) + if p[1] != nullid or p[0] == nullid: + b.append((t, n, p[0], p[1])) + break + n = p[0] + return b + + def between(self, pairs): + r = [] + + for top, bottom in pairs: + n, l, i = top, [], 0 + f = 1 + + while n != bottom and n != nullid: + p = self.changelog.parents(n)[0] + if i == f: + l.append(n) + f = f * 2 + n = p + i += 1 + + r.append(l) + + return r + + def pull(self, remote, heads=None, force=False): + lock = self.lock() + try: + tmp = discovery.findcommonincoming(self, remote, heads=heads, + force=force) + common, fetch, rheads = tmp + if not fetch: + self.ui.status(_("no changes found\n")) + result = 0 + else: + if heads is None and list(common) == [nullid]: + self.ui.status(_("requesting all changes\n")) + elif heads is None and remote.capable('changegroupsubset'): + # issue1320, avoid a race if remote changed after discovery + heads = rheads + + if remote.capable('getbundle'): + cg = remote.getbundle('pull', common=common, + heads=heads or rheads) + elif heads is None: + cg = remote.changegroup(fetch, 'pull') + elif not remote.capable('changegroupsubset'): + raise util.Abort(_("partial pull cannot be done because " + "other repository doesn't support " + "changegroupsubset.")) + else: + cg = remote.changegroupsubset(fetch, heads, 'pull') + result = self.addchangegroup(cg, 'pull', remote.url(), + lock=lock) + finally: + lock.release() + + return result + + def checkpush(self, force, revs): + """Extensions can override this function if additional checks have + to be performed before pushing, or call it if they override push + command. + """ + pass + + def push(self, remote, force=False, revs=None, newbranch=False): + '''Push outgoing changesets (limited by revs) from the current + repository to remote. Return an integer: + - 0 means HTTP error *or* nothing to push + - 1 means we pushed and remote head count is unchanged *or* + we have outgoing changesets but refused to push + - other values as described by addchangegroup() + ''' + # there are two ways to push to remote repo: + # + # addchangegroup assumes local user can lock remote + # repo (local filesystem, old ssh servers). + # + # unbundle assumes local user cannot lock remote repo (new ssh + # servers, http servers). + + self.checkpush(force, revs) + lock = None + unbundle = remote.capable('unbundle') + if not unbundle: + lock = remote.lock() + try: + cg, remote_heads = discovery.prepush(self, remote, force, revs, + newbranch) + ret = remote_heads + if cg is not None: + if unbundle: + # local repo finds heads on server, finds out what + # revs it must push. once revs transferred, if server + # finds it has different heads (someone else won + # commit/push race), server aborts. + if force: + remote_heads = ['force'] + # ssh: return remote's addchangegroup() + # http: return remote's addchangegroup() or 0 for error + ret = remote.unbundle(cg, remote_heads, 'push') + else: + # we return an integer indicating remote head count change + ret = remote.addchangegroup(cg, 'push', self.url(), + lock=lock) + finally: + if lock is not None: + lock.release() + + self.ui.debug("checking for updated bookmarks\n") + rb = remote.listkeys('bookmarks') + for k in rb.keys(): + if k in self._bookmarks: + nr, nl = rb[k], hex(self._bookmarks[k]) + if nr in self: + cr = self[nr] + cl = self[nl] + if cl in cr.descendants(): + r = remote.pushkey('bookmarks', k, nr, nl) + if r: + self.ui.status(_("updating bookmark %s\n") % k) + else: + self.ui.warn(_('updating bookmark %s' + ' failed!\n') % k) + + return ret + + def changegroupinfo(self, nodes, source): + if self.ui.verbose or source == 'bundle': + self.ui.status(_("%d changesets found\n") % len(nodes)) + if self.ui.debugflag: + self.ui.debug("list of changesets:\n") + for node in nodes: + self.ui.debug("%s\n" % hex(node)) + + def changegroupsubset(self, bases, heads, source): + """Compute a changegroup consisting of all the nodes that are + descendants of any of the bases and ancestors of any of the heads. + Return a chunkbuffer object whose read() method will return + successive changegroup chunks. + + It is fairly complex as determining which filenodes and which + manifest nodes need to be included for the changeset to be complete + is non-trivial. + + Another wrinkle is doing the reverse, figuring out which changeset in + the changegroup a particular filenode or manifestnode belongs to. + """ + cl = self.changelog + if not bases: + bases = [nullid] + csets, bases, heads = cl.nodesbetween(bases, heads) + # We assume that all ancestors of bases are known + common = set(cl.ancestors(*[cl.rev(n) for n in bases])) + return self._changegroupsubset(common, csets, heads, source) + + def getbundle(self, source, heads=None, common=None): + """Like changegroupsubset, but returns the set difference between the + ancestors of heads and the ancestors common. + + If heads is None, use the local heads. If common is None, use [nullid]. + + The nodes in common might not all be known locally due to the way the + current discovery protocol works. + """ + cl = self.changelog + if common: + nm = cl.nodemap + common = [n for n in common if n in nm] + else: + common = [nullid] + if not heads: + heads = cl.heads() + common, missing = cl.findcommonmissing(common, heads) + if not missing: + return None + return self._changegroupsubset(common, missing, heads, source) + + def _changegroupsubset(self, commonrevs, csets, heads, source): + + cl = self.changelog + mf = self.manifest + mfs = {} # needed manifests + fnodes = {} # needed file nodes + changedfiles = set() + fstate = ['', {}] + count = [0] + + # can we go through the fast path ? + heads.sort() + if heads == sorted(self.heads()): + return self._changegroup(csets, source) + + # slow path + self.hook('preoutgoing', throw=True, source=source) + self.changegroupinfo(csets, source) + + # filter any nodes that claim to be part of the known set + def prune(revlog, missing): + return [n for n in missing + if revlog.linkrev(revlog.rev(n)) not in commonrevs] + + def lookup(revlog, x): + if revlog == cl: + c = cl.read(x) + changedfiles.update(c[3]) + mfs.setdefault(c[0], x) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('changesets'), total=len(csets)) + return x + elif revlog == mf: + clnode = mfs[x] + mdata = mf.readfast(x) + for f in changedfiles: + if f in mdata: + fnodes.setdefault(f, {}).setdefault(mdata[f], clnode) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('manifests'), total=len(mfs)) + return mfs[x] + else: + self.ui.progress( + _('bundling'), count[0], item=fstate[0], + unit=_('files'), total=len(changedfiles)) + return fstate[1][x] + + bundler = changegroup.bundle10(lookup) + reorder = self.ui.config('bundle', 'reorder', 'auto') + if reorder == 'auto': + reorder = None + else: + reorder = util.parsebool(reorder) + + def gengroup(): + # Create a changenode group generator that will call our functions + # back to lookup the owning changenode and collect information. + for chunk in cl.group(csets, bundler, reorder=reorder): + yield chunk + self.ui.progress(_('bundling'), None) + + # Create a generator for the manifestnodes that calls our lookup + # and data collection functions back. + count[0] = 0 + for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder): + yield chunk + self.ui.progress(_('bundling'), None) + + mfs.clear() + + # Go through all our files in order sorted by name. + count[0] = 0 + for fname in sorted(changedfiles): + filerevlog = self.file(fname) + if not len(filerevlog): + raise util.Abort(_("empty or missing revlog for %s") % fname) + fstate[0] = fname + fstate[1] = fnodes.pop(fname, {}) + + nodelist = prune(filerevlog, fstate[1]) + if nodelist: + count[0] += 1 + yield bundler.fileheader(fname) + for chunk in filerevlog.group(nodelist, bundler, reorder): + yield chunk + + # Signal that no more groups are left. + yield bundler.close() + self.ui.progress(_('bundling'), None) + + if csets: + self.hook('outgoing', node=hex(csets[0]), source=source) + + return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') + + def changegroup(self, basenodes, source): + # to avoid a race we use changegroupsubset() (issue1320) + return self.changegroupsubset(basenodes, self.heads(), source) + + def _changegroup(self, nodes, source): + """Compute the changegroup of all nodes that we have that a recipient + doesn't. Return a chunkbuffer object whose read() method will return + successive changegroup chunks. + + This is much easier than the previous function as we can assume that + the recipient has any changenode we aren't sending them. + + nodes is the set of nodes to send""" + + cl = self.changelog + mf = self.manifest + mfs = {} + changedfiles = set() + fstate = [''] + count = [0] + + self.hook('preoutgoing', throw=True, source=source) + self.changegroupinfo(nodes, source) + + revset = set([cl.rev(n) for n in nodes]) + + def gennodelst(log): + return [log.node(r) for r in log if log.linkrev(r) in revset] + + def lookup(revlog, x): + if revlog == cl: + c = cl.read(x) + changedfiles.update(c[3]) + mfs.setdefault(c[0], x) + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('changesets'), total=len(nodes)) + return x + elif revlog == mf: + count[0] += 1 + self.ui.progress(_('bundling'), count[0], + unit=_('manifests'), total=len(mfs)) + return cl.node(revlog.linkrev(revlog.rev(x))) + else: + self.ui.progress( + _('bundling'), count[0], item=fstate[0], + total=len(changedfiles), unit=_('files')) + return cl.node(revlog.linkrev(revlog.rev(x))) + + bundler = changegroup.bundle10(lookup) + reorder = self.ui.config('bundle', 'reorder', 'auto') + if reorder == 'auto': + reorder = None + else: + reorder = util.parsebool(reorder) + + def gengroup(): + '''yield a sequence of changegroup chunks (strings)''' + # construct a list of all changed files + + for chunk in cl.group(nodes, bundler, reorder=reorder): + yield chunk + self.ui.progress(_('bundling'), None) + + count[0] = 0 + for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder): + yield chunk + self.ui.progress(_('bundling'), None) + + count[0] = 0 + for fname in sorted(changedfiles): + filerevlog = self.file(fname) + if not len(filerevlog): + raise util.Abort(_("empty or missing revlog for %s") % fname) + fstate[0] = fname + nodelist = gennodelst(filerevlog) + if nodelist: + count[0] += 1 + yield bundler.fileheader(fname) + for chunk in filerevlog.group(nodelist, bundler, reorder): + yield chunk + yield bundler.close() + self.ui.progress(_('bundling'), None) + + if nodes: + self.hook('outgoing', node=hex(nodes[0]), source=source) + + return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') + + def addchangegroup(self, source, srctype, url, emptyok=False, lock=None): + """Add the changegroup returned by source.read() to this repo. + srctype is a string like 'push', 'pull', or 'unbundle'. url is + the URL of the repo where this changegroup is coming from. + If lock is not None, the function takes ownership of the lock + and releases it after the changegroup is added. + + Return an integer summarizing the change to this repo: + - nothing changed or no source: 0 + - more heads than before: 1+added heads (2..n) + - fewer heads than before: -1-removed heads (-2..-n) + - number of heads stays the same: 1 + """ + def csmap(x): + self.ui.debug("add changeset %s\n" % short(x)) + return len(cl) + + def revmap(x): + return cl.rev(x) + + if not source: + return 0 + + self.hook('prechangegroup', throw=True, source=srctype, url=url) + + changesets = files = revisions = 0 + efiles = set() + + # write changelog data to temp files so concurrent readers will not see + # inconsistent view + cl = self.changelog + cl.delayupdate() + oldheads = cl.heads() + + tr = self.transaction("\n".join([srctype, util.hidepassword(url)])) + try: + trp = weakref.proxy(tr) + # pull off the changeset group + self.ui.status(_("adding changesets\n")) + clstart = len(cl) + class prog(object): + step = _('changesets') + count = 1 + ui = self.ui + total = None + def __call__(self): + self.ui.progress(self.step, self.count, unit=_('chunks'), + total=self.total) + self.count += 1 + pr = prog() + source.callback = pr + + source.changelogheader() + if (cl.addgroup(source, csmap, trp) is None + and not emptyok): + raise util.Abort(_("received changelog group is empty")) + clend = len(cl) + changesets = clend - clstart + for c in xrange(clstart, clend): + efiles.update(self[c].files()) + efiles = len(efiles) + self.ui.progress(_('changesets'), None) + + # pull off the manifest group + self.ui.status(_("adding manifests\n")) + pr.step = _('manifests') + pr.count = 1 + pr.total = changesets # manifests <= changesets + # no need to check for empty manifest group here: + # if the result of the merge of 1 and 2 is the same in 3 and 4, + # no new manifest will be created and the manifest group will + # be empty during the pull + source.manifestheader() + self.manifest.addgroup(source, revmap, trp) + self.ui.progress(_('manifests'), None) + + needfiles = {} + if self.ui.configbool('server', 'validate', default=False): + # validate incoming csets have their manifests + for cset in xrange(clstart, clend): + mfest = self.changelog.read(self.changelog.node(cset))[0] + mfest = self.manifest.readdelta(mfest) + # store file nodes we must see + for f, n in mfest.iteritems(): + needfiles.setdefault(f, set()).add(n) + + # process the files + self.ui.status(_("adding file changes\n")) + pr.step = _('files') + pr.count = 1 + pr.total = efiles + source.callback = None + + while True: + chunkdata = source.filelogheader() + if not chunkdata: + break + f = chunkdata["filename"] + self.ui.debug("adding %s revisions\n" % f) + pr() + fl = self.file(f) + o = len(fl) + if fl.addgroup(source, revmap, trp) is None: + raise util.Abort(_("received file revlog group is empty")) + revisions += len(fl) - o + files += 1 + if f in needfiles: + needs = needfiles[f] + for new in xrange(o, len(fl)): + n = fl.node(new) + if n in needs: + needs.remove(n) + if not needs: + del needfiles[f] + self.ui.progress(_('files'), None) + + for f, needs in needfiles.iteritems(): + fl = self.file(f) + for n in needs: + try: + fl.rev(n) + except error.LookupError: + raise util.Abort( + _('missing file data for %s:%s - run hg verify') % + (f, hex(n))) + + dh = 0 + if oldheads: + heads = cl.heads() + dh = len(heads) - len(oldheads) + for h in heads: + if h not in oldheads and 'close' in self[h].extra(): + dh -= 1 + htext = "" + if dh: + htext = _(" (%+d heads)") % dh + + self.ui.status(_("added %d changesets" + " with %d changes to %d files%s\n") + % (changesets, revisions, files, htext)) + + if changesets > 0: + p = lambda: cl.writepending() and self.root or "" + self.hook('pretxnchangegroup', throw=True, + node=hex(cl.node(clstart)), source=srctype, + url=url, pending=p) + + # make changelog see real files again + cl.finalize(trp) + + tr.close() + finally: + tr.release() + if lock: + lock.release() + + if changesets > 0: + # forcefully update the on-disk branch cache + self.ui.debug("updating the branch cache\n") + self.updatebranchcache() + self.hook("changegroup", node=hex(cl.node(clstart)), + source=srctype, url=url) + + for i in xrange(clstart, clend): + self.hook("incoming", node=hex(cl.node(i)), + source=srctype, url=url) + + # never return 0 here: + if dh < 0: + return dh - 1 + else: + return dh + 1 + + def stream_in(self, remote, requirements): + lock = self.lock() + try: + fp = remote.stream_out() + l = fp.readline() + try: + resp = int(l) + except ValueError: + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + if resp == 1: + raise util.Abort(_('operation forbidden by server')) + elif resp == 2: + raise util.Abort(_('locking the remote repository failed')) + elif resp != 0: + raise util.Abort(_('the server sent an unknown error code')) + self.ui.status(_('streaming all changes\n')) + l = fp.readline() + try: + total_files, total_bytes = map(int, l.split(' ', 1)) + except (ValueError, TypeError): + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + self.ui.status(_('%d files to transfer, %s of data\n') % + (total_files, util.bytecount(total_bytes))) + start = time.time() + for i in xrange(total_files): + # XXX doesn't support '\n' or '\r' in filenames + l = fp.readline() + try: + name, size = l.split('\0', 1) + size = int(size) + except (ValueError, TypeError): + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size))) + # for backwards compat, name was partially encoded + ofp = self.sopener(store.decodedir(name), 'w') + for chunk in util.filechunkiter(fp, limit=size): + ofp.write(chunk) + ofp.close() + elapsed = time.time() - start + if elapsed <= 0: + elapsed = 0.001 + self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % + (util.bytecount(total_bytes), elapsed, + util.bytecount(total_bytes / elapsed))) + + # new requirements = old non-format requirements + new format-related + # requirements from the streamed-in repository + requirements.update(set(self.requirements) - self.supportedformats) + self._applyrequirements(requirements) + self._writerequirements() + + self.invalidate() + return len(self.heads()) + 1 + finally: + lock.release() + + def clone(self, remote, heads=[], stream=False): + '''clone remote repository. + + keyword arguments: + heads: list of revs to clone (forces use of pull) + stream: use streaming clone if possible''' + + # now, all clients that can request uncompressed clones can + # read repo formats supported by all servers that can serve + # them. + + # if revlog format changes, client will have to check version + # and format flags on "stream" capability, and use + # uncompressed only if compatible. + + if stream and not heads: + # 'stream' means remote revlog format is revlogv1 only + if remote.capable('stream'): + return self.stream_in(remote, set(('revlogv1',))) + # otherwise, 'streamreqs' contains the remote revlog format + streamreqs = remote.capable('streamreqs') + if streamreqs: + streamreqs = set(streamreqs.split(',')) + # if we support it, stream in and adjust our requirements + if not streamreqs - self.supportedformats: + return self.stream_in(remote, streamreqs) + return self.pull(remote, heads) + + def pushkey(self, namespace, key, old, new): + self.hook('prepushkey', throw=True, namespace=namespace, key=key, + old=old, new=new) + ret = pushkey.push(self, namespace, key, old, new) + self.hook('pushkey', namespace=namespace, key=key, old=old, new=new, + ret=ret) + return ret + + def listkeys(self, namespace): + self.hook('prelistkeys', throw=True, namespace=namespace) + values = pushkey.list(self, namespace) + self.hook('listkeys', namespace=namespace, values=values) + return values + + def debugwireargs(self, one, two, three=None, four=None, five=None): + '''used to test argument passing over the wire''' + return "%s %s %s %s %s" % (one, two, three, four, five) + + def savecommitmessage(self, text): + fp = self.opener('last-message.txt', 'wb') + try: + fp.write(text) + finally: + fp.close() + return self.pathto(fp.name[len(self.root)+1:]) + +# used to avoid circular references so destructors work +def aftertrans(files): + renamefiles = [tuple(t) for t in files] + def a(): + for src, dest in renamefiles: + util.rename(src, dest) + return a + +def undoname(fn): + base, name = os.path.split(fn) + assert name.startswith('journal') + return os.path.join(base, name.replace('journal', 'undo', 1)) + +def instance(ui, path, create): + return localrepository(ui, util.urllocalpath(path), create) + +def islocal(path): + return True diff --git a/websdk/mercurial/lock.py b/websdk/mercurial/lock.py index 39206c3..bf33f02 120000..100644 --- a/websdk/mercurial/lock.py +++ b/websdk/mercurial/lock.py @@ -1 +1,137 @@ -/usr/share/pyshared/mercurial/lock.py \ No newline at end of file +# lock.py - simple advisory locking scheme for mercurial +# +# Copyright 2005, 2006 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import util, error +import errno, os, socket, time +import warnings + +class lock(object): + '''An advisory lock held by one process to control access to a set + of files. Non-cooperating processes or incorrectly written scripts + can ignore Mercurial's locking scheme and stomp all over the + repository, so don't do that. + + Typically used via localrepository.lock() to lock the repository + store (.hg/store/) or localrepository.wlock() to lock everything + else under .hg/.''' + + # lock is symlink on platforms that support it, file on others. + + # symlink is used because create of directory entry and contents + # are atomic even over nfs. + + # old-style lock: symlink to pid + # new-style lock: symlink to hostname:pid + + _host = None + + def __init__(self, file, timeout=-1, releasefn=None, desc=None): + self.f = file + self.held = 0 + self.timeout = timeout + self.releasefn = releasefn + self.desc = desc + self.lock() + + def __del__(self): + if self.held: + warnings.warn("use lock.release instead of del lock", + category=DeprecationWarning, + stacklevel=2) + + # ensure the lock will be removed + # even if recursive locking did occur + self.held = 1 + + self.release() + + def lock(self): + timeout = self.timeout + while True: + try: + self.trylock() + return 1 + except error.LockHeld, inst: + if timeout != 0: + time.sleep(1) + if timeout > 0: + timeout -= 1 + continue + raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc, + inst.locker) + + def trylock(self): + if self.held: + self.held += 1 + return + if lock._host is None: + lock._host = socket.gethostname() + lockname = '%s:%s' % (lock._host, os.getpid()) + while not self.held: + try: + util.makelock(lockname, self.f) + self.held = 1 + except (OSError, IOError), why: + if why.errno == errno.EEXIST: + locker = self.testlock() + if locker is not None: + raise error.LockHeld(errno.EAGAIN, self.f, self.desc, + locker) + else: + raise error.LockUnavailable(why.errno, why.strerror, + why.filename, self.desc) + + def testlock(self): + """return id of locker if lock is valid, else None. + + If old-style lock, we cannot tell what machine locker is on. + with new-style lock, if locker is on this machine, we can + see if locker is alive. If locker is on this machine but + not alive, we can safely break lock. + + The lock file is only deleted when None is returned. + + """ + locker = util.readlock(self.f) + try: + host, pid = locker.split(":", 1) + except ValueError: + return locker + if host != lock._host: + return locker + try: + pid = int(pid) + except ValueError: + return locker + if util.testpid(pid): + return locker + # if locker dead, break lock. must do this with another lock + # held, or can race and break valid lock. + try: + l = lock(self.f + '.break', timeout=0) + util.unlink(self.f) + l.release() + except error.LockError: + return locker + + def release(self): + if self.held > 1: + self.held -= 1 + elif self.held == 1: + self.held = 0 + if self.releasefn: + self.releasefn() + try: + util.unlink(self.f) + except OSError: + pass + +def release(*locks): + for lock in locks: + if lock is not None: + lock.release() + diff --git a/websdk/mercurial/lsprof.py b/websdk/mercurial/lsprof.py index 2496191..7ce6f67 120000..100644 --- a/websdk/mercurial/lsprof.py +++ b/websdk/mercurial/lsprof.py @@ -1 +1,109 @@ -/usr/share/pyshared/mercurial/lsprof.py \ No newline at end of file +import sys +from _lsprof import Profiler, profiler_entry + +__all__ = ['profile', 'Stats'] + +def profile(f, *args, **kwds): + """XXX docstring""" + p = Profiler() + p.enable(subcalls=True, builtins=True) + try: + f(*args, **kwds) + finally: + p.disable() + return Stats(p.getstats()) + + +class Stats(object): + """XXX docstring""" + + def __init__(self, data): + self.data = data + + def sort(self, crit="inlinetime"): + """XXX docstring""" + if crit not in profiler_entry.__dict__: + raise ValueError("Can't sort by %s" % crit) + self.data.sort(key=lambda x: getattr(x, crit), reverse=True) + for e in self.data: + if e.calls: + e.calls.sort(key=lambda x: getattr(x, crit), reverse=True) + + def pprint(self, top=None, file=None, limit=None, climit=None): + """XXX docstring""" + if file is None: + file = sys.stdout + d = self.data + if top is not None: + d = d[:top] + cols = "% 12s %12s %11.4f %11.4f %s\n" + hcols = "% 12s %12s %12s %12s %s\n" + file.write(hcols % ("CallCount", "Recursive", "Total(ms)", + "Inline(ms)", "module:lineno(function)")) + count = 0 + for e in d: + file.write(cols % (e.callcount, e.reccallcount, e.totaltime, + e.inlinetime, label(e.code))) + count += 1 + if limit is not None and count == limit: + return + ccount = 0 + if e.calls: + for se in e.calls: + file.write(cols % ("+%s" % se.callcount, se.reccallcount, + se.totaltime, se.inlinetime, + "+%s" % label(se.code))) + count += 1 + ccount += 1 + if limit is not None and count == limit: + return + if climit is not None and ccount == climit: + break + + def freeze(self): + """Replace all references to code objects with string + descriptions; this makes it possible to pickle the instance.""" + + # this code is probably rather ickier than it needs to be! + for i in range(len(self.data)): + e = self.data[i] + if not isinstance(e.code, str): + self.data[i] = type(e)((label(e.code),) + e[1:]) + if e.calls: + for j in range(len(e.calls)): + se = e.calls[j] + if not isinstance(se.code, str): + e.calls[j] = type(se)((label(se.code),) + se[1:]) + +_fn2mod = {} + +def label(code): + if isinstance(code, str): + return code + try: + mname = _fn2mod[code.co_filename] + except KeyError: + for k, v in list(sys.modules.iteritems()): + if v is None: + continue + if not isinstance(getattr(v, '__file__', None), str): + continue + if v.__file__.startswith(code.co_filename): + mname = _fn2mod[code.co_filename] = k + break + else: + mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename + + return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name) + + +if __name__ == '__main__': + import os + sys.argv = sys.argv[1:] + if not sys.argv: + print >> sys.stderr, "usage: lsprof.py diff --git a/websdk/mercurial/templates/coal/map b/websdk/mercurial/templates/coal/map new file mode 100644 index 0000000..39d04e7 --- /dev/null +++ b/websdk/mercurial/templates/coal/map @@ -0,0 +1,214 @@ +default = 'shortlog' + +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = ../paper/footer.tmpl +search = ../paper/search.tmpl + +changelog = ../paper/shortlog.tmpl +shortlog = ../paper/shortlog.tmpl +shortlogentry = ../paper/shortlogentry.tmpl +graph = ../paper/graph.tmpl + +help = ../paper/help.tmpl +helptopics = ../paper/helptopics.tmpl + +helpentry = '{topic|escape}{summary|escape}' + +naventry = '{label|escape} ' +navshortentry = '{label|escape} ' +navgraphentry = '{label|escape} ' +filenaventry = '{label|escape} ' +filedifflink = '{file|escape} ' +filenodelink = '{file|escape} ' +filenolink = '{file|escape} ' +fileellipses = '...' +diffstatlink = ../paper/diffstat.tmpl +diffstatnolink = ../paper/diffstat.tmpl +changelogentry = ../paper/shortlogentry.tmpl +searchentry = ../paper/shortlogentry.tmpl +changeset = ../paper/changeset.tmpl +manifest = ../paper/manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + + + + dir. {basename|escape}/ + + + {emptydirs|escape} + + + + drwxr-xr-x + ' + +fileentry = ' + + + + file {basename|escape} + + + {size} + {permissions|permissions} + ' + +filerevision = ../paper/filerevision.tmpl +fileannotate = ../paper/fileannotate.tmpl +filediff = ../paper/filediff.tmpl +filelog = ../paper/filelog.tmpl +fileline = ' +
{linenumber} {line|escape}
' +filelogentry = ../paper/filelogentry.tmpl + +annotateline = ' + + + {author|user}@{rev} + + {linenumber} {line|escape} + ' + +diffblock = '
{lines}
' +difflineplus = '{linenumber} {line|escape}' +difflineminus = '{linenumber} {line|escape}' +difflineat = '{linenumber} {line|escape}' +diffline = '{linenumber} {line|escape}' + +changelogparent = ' + + parent {rev}: + {node|short} + ' + +changesetparent = '{node|short} ' + +filerevparent = '{rename%filerename}{node|short} ' +filerevchild = '{node|short} ' + +filerename = '{file|escape}@' +filelogrename = ' + + base + + {file|escape}@{node|short} + + ' +fileannotateparent = ' + + parent: + + + {rename%filerename}{node|short} + + + ' +changesetchild = ' {node|short}' +changelogchild = ' + + child + + + {node|short} + + + ' +fileannotatechild = ' + + child: + + + {node|short} + + + ' +tags = ../paper/tags.tmpl +tagentry = ' + + + + {tag|escape} + + + + {node|short} + + ' +bookmarks = ../paper/bookmarks.tmpl +bookmarkentry = ' + + + + {bookmark|escape} + + + + {node|short} + + ' +branches = ../paper/branches.tmpl +branchentry = ' + + + + {branch|escape} + + + + {node|short} + + ' +changelogtag = '{name|escape} ' +changesettag = '{tag|escape} ' +changesetbookmark = '{bookmark|escape} ' +changelogbranchhead = '{name|escape} ' +changelogbranchname = '{name|escape} ' + +filediffparent = ' + + parent {rev}: + {node|short} + ' +filelogparent = ' + + parent {rev}: + {node|short} + ' +filediffchild = ' + + child {rev}: + {node|short} + + ' +filelogchild = ' + + child {rev}: + {node|short} + ' + +indexentry = ' + + {name|escape} + {description} + {contact|obfuscate} + {lastchange|rfc822date} + {archives%indexarchiveentry} + \n' +indexarchiveentry = ' ↓{type|escape}' +index = ../paper/index.tmpl +archiveentry = ' +
  • + {type|escape} +
  • ' +notfound = ../paper/notfound.tmpl +error = ../paper/error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '' diff --git a/websdk/mercurial/templates/gitweb/bookmarks.tmpl b/websdk/mercurial/templates/gitweb/bookmarks.tmpl new file mode 100644 index 0000000..fc0e66a --- /dev/null +++ b/websdk/mercurial/templates/gitweb/bookmarks.tmpl @@ -0,0 +1,32 @@ +{header} +{repo|escape}: Bookmarks + + + + + + + + + +
     
    + +{entries%bookmarkentry} +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/branches.tmpl b/websdk/mercurial/templates/gitweb/branches.tmpl new file mode 100644 index 0000000..89f5457 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/branches.tmpl @@ -0,0 +1,32 @@ +{header} +{repo|escape}: Branches + + + + + + + + + +
     
    + +{entries%branchentry} +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/changelog.tmpl b/websdk/mercurial/templates/gitweb/changelog.tmpl new file mode 100644 index 0000000..56d16d0 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/changelog.tmpl @@ -0,0 +1,41 @@ +{header} +{repo|escape}: Changelog + + + + + + + +
    +{sessionvars%hiddenformentry} + +
    + + + +{entries%changelogentry} + + + +{footer} diff --git a/websdk/mercurial/templates/gitweb/changelogentry.tmpl b/websdk/mercurial/templates/gitweb/changelogentry.tmpl new file mode 100644 index 0000000..3935de6 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/changelogentry.tmpl @@ -0,0 +1,14 @@ + +
    + +{author|obfuscate} [{date|rfc822date}] rev {rev}
    +
    +
    +{desc|strip|escape|addbreaks|nonempty} +
    +
    +
    diff --git a/websdk/mercurial/templates/gitweb/changeset.tmpl b/websdk/mercurial/templates/gitweb/changeset.tmpl new file mode 100644 index 0000000..7c8f731 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/changeset.tmpl @@ -0,0 +1,53 @@ +{header} +{repo|escape}: changeset {rev}:{node|short} + + + + + + + + + + +
    + + + +{branch%changesetbranch} + +{parent%changesetparent} +{child%changesetchild} +
    author{author|obfuscate}
    {date|rfc822date}
    changeset {rev}{node|short}
    + +
    +{desc|strip|escape|addbreaks|nonempty} +
    +
    +
    + +{files} +
    + +
    {diff}
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/error.tmpl b/websdk/mercurial/templates/gitweb/error.tmpl new file mode 100644 index 0000000..b59046f --- /dev/null +++ b/websdk/mercurial/templates/gitweb/error.tmpl @@ -0,0 +1,33 @@ +{header} +{repo|escape}: Error + + + + + + + + + +
    +
    +An error occurred while processing your request
    +
    +{error|escape} +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/fileannotate.tmpl b/websdk/mercurial/templates/gitweb/fileannotate.tmpl new file mode 100644 index 0000000..abb1aed --- /dev/null +++ b/websdk/mercurial/templates/gitweb/fileannotate.tmpl @@ -0,0 +1,65 @@ +{header} +{repo|escape}: {file|escape}@{node|short} (annotated) + + + + + + + + + +
    {file|escape}
    + +
    + + + + + + + +{branch%filerevbranch} + + + +{parent%fileannotateparent} +{child%fileannotatechild} + + + +
    author{author|obfuscate}
    {date|rfc822date}
    changeset {rev}{node|short}
    permissions{permissions|permissions}
    +
    + +
    +{desc|strip|escape|addbreaks|nonempty} +
    +
    + +{annotate%annotateline} +
    +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/filediff.tmpl b/websdk/mercurial/templates/gitweb/filediff.tmpl new file mode 100644 index 0000000..5450adb --- /dev/null +++ b/websdk/mercurial/templates/gitweb/filediff.tmpl @@ -0,0 +1,50 @@ +{header} +{repo|escape}: diff {file|escape} + + + + + + + + + +
    {file|escape}
    + + +{branch%filerevbranch} + + + +{parent%filediffparent} +{child%filediffchild} +
    changeset {rev}{node|short}
    + +
    + +
    +{diff} +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/filelog.tmpl b/websdk/mercurial/templates/gitweb/filelog.tmpl new file mode 100644 index 0000000..52c042b --- /dev/null +++ b/websdk/mercurial/templates/gitweb/filelog.tmpl @@ -0,0 +1,42 @@ +{header} +{repo|escape}: File revisions + + + + + + + + + +
    {file|urlescape}
    + + +{entries%filelogentry} +
    + + + +{footer} diff --git a/websdk/mercurial/templates/gitweb/filerevision.tmpl b/websdk/mercurial/templates/gitweb/filerevision.tmpl new file mode 100644 index 0000000..966d254 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/filerevision.tmpl @@ -0,0 +1,64 @@ +{header} +{repo|escape}: {file|escape}@{node|short} + + + + + + + + + +
    {file|escape}
    + +
    + + + + + + + +{branch%filerevbranch} + + + +{parent%filerevparent} +{child%filerevchild} + + + +
    author{author|obfuscate}
    {date|rfc822date}
    changeset {rev}{node|short}
    permissions{permissions|permissions}
    +
    + +
    +{desc|strip|escape|addbreaks|nonempty} +
    + +
    +{text%fileline} +
    + +{footer} diff --git a/websdk/mercurial/templates/gitweb/footer.tmpl b/websdk/mercurial/templates/gitweb/footer.tmpl new file mode 100644 index 0000000..298800b --- /dev/null +++ b/websdk/mercurial/templates/gitweb/footer.tmpl @@ -0,0 +1,12 @@ + + + + diff --git a/websdk/mercurial/templates/gitweb/graph.tmpl b/websdk/mercurial/templates/gitweb/graph.tmpl new file mode 100644 index 0000000..0ddcf18 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/graph.tmpl @@ -0,0 +1,128 @@ +{header} +{repo|escape}: Graph + + + + + + + + +
    +{sessionvars%hiddenformentry} + +
    + + +
     
    + + + +
    +
      + +
        +
        + + + + + +{footer} diff --git a/websdk/mercurial/templates/gitweb/header.tmpl b/websdk/mercurial/templates/gitweb/header.tmpl new file mode 100644 index 0000000..8074dde --- /dev/null +++ b/websdk/mercurial/templates/gitweb/header.tmpl @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/websdk/mercurial/templates/gitweb/help.tmpl b/websdk/mercurial/templates/gitweb/help.tmpl new file mode 100644 index 0000000..2e90b82 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/help.tmpl @@ -0,0 +1,33 @@ +{header} +{repo|escape}: Branches + + + + + + + + + +
         
        + +
        +{doc|escape}
        +
        + +{footer} diff --git a/websdk/mercurial/templates/gitweb/helptopics.tmpl b/websdk/mercurial/templates/gitweb/helptopics.tmpl new file mode 100644 index 0000000..2f6c26d --- /dev/null +++ b/websdk/mercurial/templates/gitweb/helptopics.tmpl @@ -0,0 +1,39 @@ +{header} +{repo|escape}: Branches + + + + + + + + + +
         
        + + +{topics % helpentry} + + +{earlycommands % helpentry} + + +{othercommands % helpentry} +

        Topics

        Main Commands

        Other Commands

        + +{footer} diff --git a/websdk/mercurial/templates/gitweb/index.tmpl b/websdk/mercurial/templates/gitweb/index.tmpl new file mode 100644 index 0000000..3e0f4a2 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/index.tmpl @@ -0,0 +1,26 @@ +{header} +Mercurial repositories index + + + + + + + + + + + + + + + {entries%indexentry} +
        NameDescriptionContactLast modified  
        + + + diff --git a/websdk/mercurial/templates/gitweb/manifest.tmpl b/websdk/mercurial/templates/gitweb/manifest.tmpl new file mode 100644 index 0000000..41e8c81 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/manifest.tmpl @@ -0,0 +1,41 @@ +{header} +{repo|escape}: files + + + + + + + + + +
        {path|escape} {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}
        + + + + + + + + +{dentries%direntry} +{fentries%fileentry} +
        drwxr-xr-x[up]
        + +{footer} diff --git a/websdk/mercurial/templates/gitweb/map b/websdk/mercurial/templates/gitweb/map new file mode 100644 index 0000000..0d1db2f --- /dev/null +++ b/websdk/mercurial/templates/gitweb/map @@ -0,0 +1,272 @@ +default = 'summary' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +summary = summary.tmpl +error = error.tmpl +notfound = notfound.tmpl + +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '{topic|escape}{summary|escape}' + +naventry = '{label|escape} ' +navshortentry = '{label|escape} ' +navgraphentry = '{label|escape} ' +filenaventry = '{label|escape} ' +filedifflink = '{file|escape} ' +filenodelink = ' + + {file|escape} + + + file | + annotate | + diff | + revisions + + ' +filenolink = ' + + {file|escape} + + + file | + annotate | + diff | + revisions + + ' + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl +direntry = ' + + drwxr-xr-x + + + + {basename|escape} + {emptydirs|escape} + + + files + + ' +fileentry = ' + + {permissions|permissions} + {date|isodate} + {size} + + {basename|escape} + + + file | + revisions | + annotate + + ' +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' +
        +
        {linenumber} {line|escape}
        +
        ' +annotateline = ' + + + {author|user}@{rev} + +
        {linenumber}
        +
        {line|escape}
        + ' +difflineplus = '{linenumber} {line|escape}' +difflineminus = '{linenumber} {line|escape}' +difflineat = '{linenumber} {line|escape}' +diffline = '{linenumber} {line|escape}' +changelogparent = ' + + parent {rev}: + + {node|short} + + ' +changesetbranch = 'branch{name}' +changesetparent = ' + + parent {rev} + + {node|short} + + ' +filerevbranch = 'branch{name}' +filerevparent = ' + + parent {rev} + + + {rename%filerename}{node|short} + + + ' +filerename = '{file|escape}@' +filelogrename = '| base' +fileannotateparent = ' + + parent {rev} + + + {rename%filerename}{node|short} + + + ' +changelogchild = ' + + child {rev}: + {node|short} + ' +changesetchild = ' + + child {rev} + + {node|short} + + ' +filerevchild = ' + + child {rev} + + {node|short} + ' +fileannotatechild = ' + + child {rev} + + {node|short} + ' +tags = tags.tmpl +tagentry = ' + + {date|rfc822date} + {tag|escape} + + changeset | + changelog | + files + + ' +bookmarks = bookmarks.tmpl +bookmarkentry = ' + + {date|rfc822date} + {bookmark|escape} + + changeset | + changelog | + files + + ' +branches = branches.tmpl +branchentry = ' + + {date|rfc822date} + {node|short} + {branch|escape} + + changeset | + changelog | + files + + ' +diffblock = '
        {lines}
        ' +filediffparent = ' + + parent {rev} + + + {node|short} + + + ' +filelogparent = ' + + parent {rev}:  + {node|short} + ' +filediffchild = ' + + child {rev} + + {node|short} + + ' +filelogchild = ' + + child {rev}:  + {node|short} + ' +shortlog = shortlog.tmpl +graph = graph.tmpl +tagtag = '{name} ' +branchtag = '{name} ' +inbranchtag = '{name} ' +bookmarktag = '{name} ' +shortlogentry = ' + + {date|rfc822date} + {author|person} + + + {desc|strip|firstline|escape|nonempty} + {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag} + + + + changeset | + files + + ' +filelogentry = ' + + {date|rfc822date} + + + {desc|strip|firstline|escape|nonempty} + + + + file | diff | annotate {rename%filelogrename} + ' +archiveentry = ' | {type|escape} ' +indexentry = ' + + + + {name|escape} + + + {description} + {contact|obfuscate} + {lastchange|rfc822date} + {archives%indexarchiveentry} + + \n' +indexarchiveentry = ' {type|escape} ' +index = index.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '' diff --git a/websdk/mercurial/templates/gitweb/notfound.tmpl b/websdk/mercurial/templates/gitweb/notfound.tmpl new file mode 100644 index 0000000..e9f74a3 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/notfound.tmpl @@ -0,0 +1,18 @@ +{header} +Mercurial repository not found + + + + + + +
        +The specified repository "{repo|escape}" is unknown, sorry. +
        +
        +Please go back to the main repository list page. +
        + +{footer} diff --git a/websdk/mercurial/templates/gitweb/search.tmpl b/websdk/mercurial/templates/gitweb/search.tmpl new file mode 100644 index 0000000..2d77ed6 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/search.tmpl @@ -0,0 +1,39 @@ +{header} +{repo|escape}: Search + + + + + + + + + +
        searching for {query|escape}
        + +{entries} + +{footer} diff --git a/websdk/mercurial/templates/gitweb/shortlog.tmpl b/websdk/mercurial/templates/gitweb/shortlog.tmpl new file mode 100644 index 0000000..2e89dbc --- /dev/null +++ b/websdk/mercurial/templates/gitweb/shortlog.tmpl @@ -0,0 +1,42 @@ +{header} +{repo|escape}: Shortlog + + + + + + + +
        +{sessionvars%hiddenformentry} + +
        + + +
         
        + +{entries%shortlogentry} +
        + + + +{footer} diff --git a/websdk/mercurial/templates/gitweb/summary.tmpl b/websdk/mercurial/templates/gitweb/summary.tmpl new file mode 100644 index 0000000..79f42a1 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/summary.tmpl @@ -0,0 +1,66 @@ +{header} +{repo|escape}: Summary + + + + + + + + + +
         
        + + + + +
        description{desc}
        owner{owner|obfuscate}
        last change{lastchange|rfc822date}
        + + + +{shortlog} + +
        ...
        + + + +{tags} + +
        ...
        + + + +{bookmarks%bookmarkentry} + +
        ...
        + + + +{branches%branchentry} + + + +
        ...
        +{footer} diff --git a/websdk/mercurial/templates/gitweb/tags.tmpl b/websdk/mercurial/templates/gitweb/tags.tmpl new file mode 100644 index 0000000..54cbe06 --- /dev/null +++ b/websdk/mercurial/templates/gitweb/tags.tmpl @@ -0,0 +1,32 @@ +{header} +{repo|escape}: Tags + + + + + + + + + +
         
        + +{entries%tagentry} +
        + +{footer} diff --git a/websdk/mercurial/templates/map-cmdline.bisect b/websdk/mercurial/templates/map-cmdline.bisect new file mode 100644 index 0000000..37c5ffb --- /dev/null +++ b/websdk/mercurial/templates/map-cmdline.bisect @@ -0,0 +1,25 @@ +changeset = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n' +changeset_quiet = '{bisect|shortbisect} {rev}:{node|short}\n' +changeset_verbose = 'changeset: {rev}:{node|short}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n' +changeset_debug = 'changeset: {rev}:{node}\nbisect: {bisect}\n{branches}{bookmarks}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n' +start_files = 'files: ' +file = ' {file}' +end_files = '\n' +start_file_mods = 'files: ' +file_mod = ' {file_mod}' +end_file_mods = '\n' +start_file_adds = 'files+: ' +file_add = ' {file_add}' +end_file_adds = '\n' +start_file_dels = 'files-: ' +file_del = ' {file_del}' +end_file_dels = '\n' +start_file_copies = 'copies: ' +file_copy = ' {name} ({source})' +end_file_copies = '\n' +parent = 'parent: {rev}:{node|formatnode}\n' +manifest = 'manifest: {rev}:{node}\n' +branch = 'branch: {branch}\n' +tag = 'tag: {tag}\n' +bookmark = 'bookmark: {bookmark}\n' +extra = 'extra: {key}={value|stringescape}\n' diff --git a/websdk/mercurial/templates/map-cmdline.changelog b/websdk/mercurial/templates/map-cmdline.changelog new file mode 100644 index 0000000..f54134a --- /dev/null +++ b/websdk/mercurial/templates/map-cmdline.changelog @@ -0,0 +1,17 @@ +header = '{date|shortdate} {author|person} <{author|email}>\n\n' +header_verbose = '' +changeset = '\t* {files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\t[{node|short}]{tags}{branches}\n\n' +changeset_quiet = '\t* {desc|firstline|fill68|tabindent|strip}\n\n' +changeset_verbose = '{date|isodate} {author|person} <{author|email}> ({node|short}{tags}{branches})\n\n\t* {file_adds|stringify|fill68|tabindent}{file_dels|stringify|fill68|tabindent}{files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\n' +start_tags = ' [' +tag = '{tag}, ' +last_tag = '{tag}]' +start_branches = ' <' +branch = '{branch}, ' +last_branch = '{branch}>' +file = '{file}, ' +last_file = '{file}:\n\t' +file_add = '{file_add}, ' +last_file_add = '{file_add}: new file.\n* ' +file_del = '{file_del}, ' +last_file_del = '{file_del}: deleted file.\n* ' diff --git a/websdk/mercurial/templates/map-cmdline.compact b/websdk/mercurial/templates/map-cmdline.compact new file mode 100644 index 0000000..0a71639 --- /dev/null +++ b/websdk/mercurial/templates/map-cmdline.compact @@ -0,0 +1,12 @@ +changeset = '{rev}{tags}{bookmarks}{parents} {node|short} {date|isodate} {author|user}\n {desc|firstline|strip}\n\n' +changeset_quiet = '{rev}:{node|short}\n' +changeset_verbose = '{rev}{tags}{parents} {node|short} {date|isodate} {author}\n {desc|strip}\n\n' +start_tags = '[' +tag = '{tag},' +last_tag = '{tag}]' +start_parents = ':' +parent = '{rev},' +last_parent = '{rev}' +start_bookmarks = '[' +bookmark = '{bookmark},' +last_bookmark = '{bookmark}]' diff --git a/websdk/mercurial/templates/map-cmdline.default b/websdk/mercurial/templates/map-cmdline.default new file mode 100644 index 0000000..aeb695b --- /dev/null +++ b/websdk/mercurial/templates/map-cmdline.default @@ -0,0 +1,25 @@ +changeset = 'changeset: {rev}:{node|short}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n' +changeset_quiet = '{rev}:{node|short}\n' +changeset_verbose = 'changeset: {rev}:{node|short}\n{branches}{bookmarks}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n' +changeset_debug = 'changeset: {rev}:{node}\n{branches}{bookmarks}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n' +start_files = 'files: ' +file = ' {file}' +end_files = '\n' +start_file_mods = 'files: ' +file_mod = ' {file_mod}' +end_file_mods = '\n' +start_file_adds = 'files+: ' +file_add = ' {file_add}' +end_file_adds = '\n' +start_file_dels = 'files-: ' +file_del = ' {file_del}' +end_file_dels = '\n' +start_file_copies = 'copies: ' +file_copy = ' {name} ({source})' +end_file_copies = '\n' +parent = 'parent: {rev}:{node|formatnode}\n' +manifest = 'manifest: {rev}:{node}\n' +branch = 'branch: {branch}\n' +tag = 'tag: {tag}\n' +bookmark = 'bookmark: {bookmark}\n' +extra = 'extra: {key}={value|stringescape}\n' diff --git a/websdk/mercurial/templates/map-cmdline.xml b/websdk/mercurial/templates/map-cmdline.xml new file mode 100644 index 0000000..ea1cfc6 --- /dev/null +++ b/websdk/mercurial/templates/map-cmdline.xml @@ -0,0 +1,20 @@ +header = '\n\n' +footer = '\n' + +changeset = '\n{branches}{bookmarks}{tags}{parents}{author|person|xmlescape}\n{date|rfc3339date}\n{desc|xmlescape}\n\n' +changeset_verbose = '\n{branches}{bookmarks}{tags}{parents}{author|person|xmlescape}\n{date|rfc3339date}\n{desc|xmlescape}\n\n{file_adds}{file_dels}{file_mods}\n{file_copies}\n' +changeset_debug = '\n{branches}{bookmarks}{tags}{parents}{author|person|xmlescape}\n{date|rfc3339date}\n{desc|xmlescape}\n\n{file_adds}{file_dels}{file_mods}\n{file_copies}{extras}\n' + +file_add = '{file_add|xmlescape}\n' +file_mod = '{file_mod|xmlescape}\n' +file_del = '{file_del|xmlescape}\n' + +start_file_copies = '\n' +file_copy = '{name|xmlescape}\n' +end_file_copies = '\n' + +parent = '\n' +branch = '{branch|xmlescape}\n' +tag = '{tag|xmlescape}\n' +bookmark = '{bookmark|xmlescape}\n' +extra = '{value|xmlescape}\n' diff --git a/websdk/mercurial/templates/monoblue/bookmarks.tmpl b/websdk/mercurial/templates/monoblue/bookmarks.tmpl new file mode 100644 index 0000000..7fb79ee --- /dev/null +++ b/websdk/mercurial/templates/monoblue/bookmarks.tmpl @@ -0,0 +1,38 @@ +{header} + {repo|escape}: Bookmarks + + + + + +
        + + + + +{entries%bookmarkentry} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/branches.tmpl b/websdk/mercurial/templates/monoblue/branches.tmpl new file mode 100644 index 0000000..969e5a1 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/branches.tmpl @@ -0,0 +1,38 @@ +{header} + {repo|escape}: Branches + + + + + +
        + + + + +{entries%branchentry} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/changelog.tmpl b/websdk/mercurial/templates/monoblue/changelog.tmpl new file mode 100644 index 0000000..430c388 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/changelog.tmpl @@ -0,0 +1,42 @@ +{header} + {repo|escape}: changelog + + + + + +
        + + + +
        + {entries%changelogentry} +
        + +
        +{changenav%nav} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/changelogentry.tmpl b/websdk/mercurial/templates/monoblue/changelogentry.tmpl new file mode 100644 index 0000000..1c04f07 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/changelogentry.tmpl @@ -0,0 +1,6 @@ +

        {desc|strip|firstline|escape|nonempty} {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}

        +
          +
        • {date|rfc822date}
        • +
        • by {author|obfuscate} [{date|rfc822date}] rev {rev}
        • +
        • {desc|strip|escape|addbreaks|nonempty}
        • +
        diff --git a/websdk/mercurial/templates/monoblue/changeset.tmpl b/websdk/mercurial/templates/monoblue/changeset.tmpl new file mode 100644 index 0000000..56d7bb0 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/changeset.tmpl @@ -0,0 +1,65 @@ +{header} +{repo|escape}: changeset {rev}:{node|short} + + + + + +
        + + + + + + +

        {desc|strip|escape|firstline|nonempty} {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}

        +

        {date|rfc822date}

        + +
        +
        author
        +
        {author|obfuscate}
        +
        date
        +
        {date|rfc822date}
        + {branch%changesetbranch} +
        changeset {rev}
        +
        {node|short}
        + {parent%changesetparent} + {child%changesetchild} +
        + +

        {desc|strip|escape|addbreaks|nonempty}

        + + + {files} +
        + +
        + {diff} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/error.tmpl b/websdk/mercurial/templates/monoblue/error.tmpl new file mode 100644 index 0000000..edbef72 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/error.tmpl @@ -0,0 +1,36 @@ +{header} + {repo|escape}: Error + + + + + +
        + + + +

        {error|escape}

        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/fileannotate.tmpl b/websdk/mercurial/templates/monoblue/fileannotate.tmpl new file mode 100644 index 0000000..fb067c1 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/fileannotate.tmpl @@ -0,0 +1,65 @@ +{header} +{repo|escape}: {file|escape}@{node|short} (annotated) + + + + + +
        + + + + + +

        {file|escape}

        +

        {date|rfc822date}

        + +
        +
        author
        +
        {author|obfuscate}
        +
        date
        +
        {date|rfc822date}
        + {branch%filerevbranch} +
        changeset {rev}
        +
        {node|short}
        + {parent%fileannotateparent} + {child%fileannotatechild} +
        permissions
        +
        {permissions|permissions}
        +
        + +

        {desc|strip|escape|addbreaks|nonempty}

        + + + {annotate%annotateline} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/filediff.tmpl b/websdk/mercurial/templates/monoblue/filediff.tmpl new file mode 100644 index 0000000..e6298f9 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/filediff.tmpl @@ -0,0 +1,56 @@ +{header} +{repo|escape}: diff {file|escape} + + + + + +
        + + + + + +

        {file|escape}

        + +
        + {branch%filerevbranch} +
        changeset {rev}
        +
        {node|short}
        + {parent%filediffparent} + {child%filediffchild} +
        + +
        + {diff} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/filelog.tmpl b/websdk/mercurial/templates/monoblue/filelog.tmpl new file mode 100644 index 0000000..0e77f9f --- /dev/null +++ b/websdk/mercurial/templates/monoblue/filelog.tmpl @@ -0,0 +1,51 @@ +{header} +{repo|escape}: File revisions + + + + + +
        + + + + + + + + {entries%filelogentry} +
        + +
        + {nav%filenav} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/filerevision.tmpl b/websdk/mercurial/templates/monoblue/filerevision.tmpl new file mode 100644 index 0000000..7aaec8b --- /dev/null +++ b/websdk/mercurial/templates/monoblue/filerevision.tmpl @@ -0,0 +1,65 @@ +{header} +{repo|escape}: {file|escape}@{node|short} + + + + + +
        + + + + + +

        {file|escape}

        +

        {date|rfc822date}

        + +
        +
        author
        +
        {author|obfuscate}
        +
        date
        +
        {date|rfc822date}
        + {branch%filerevbranch} +
        changeset {rev}
        +
        {node|short}
        + {parent%filerevparent} + {child%filerevchild} +
        permissions
        +
        {permissions|permissions}
        +
        + +

        {desc|strip|escape|addbreaks|nonempty}

        + +
        + {text%fileline} +
        + +{footer} diff --git a/websdk/mercurial/templates/monoblue/footer.tmpl b/websdk/mercurial/templates/monoblue/footer.tmpl new file mode 100644 index 0000000..c19bdae --- /dev/null +++ b/websdk/mercurial/templates/monoblue/footer.tmpl @@ -0,0 +1,23 @@ + + + +
        +

        mercurial

        +
        + +
        +
        +
        +
        + +
        + + + diff --git a/websdk/mercurial/templates/monoblue/graph.tmpl b/websdk/mercurial/templates/monoblue/graph.tmpl new file mode 100644 index 0000000..08fd79d --- /dev/null +++ b/websdk/mercurial/templates/monoblue/graph.tmpl @@ -0,0 +1,125 @@ +{header} + {repo|escape}: graph + + + + + + +
        + + + + +
        The revision graph only works with JavaScript-enabled browsers.
        +
        +
          + +
            +
            + + + +
            + less + more + | {changenav%navgraph} +
            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/header.tmpl b/websdk/mercurial/templates/monoblue/header.tmpl new file mode 100644 index 0000000..0c1f580 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/header.tmpl @@ -0,0 +1,7 @@ + + + + + + + diff --git a/websdk/mercurial/templates/monoblue/help.tmpl b/websdk/mercurial/templates/monoblue/help.tmpl new file mode 100644 index 0000000..33468f2 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/help.tmpl @@ -0,0 +1,38 @@ +{header} + {repo|escape}: Branches + + + + + +
            + + + +
            +    {doc|escape}
            +    
            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/helptopics.tmpl b/websdk/mercurial/templates/monoblue/helptopics.tmpl new file mode 100644 index 0000000..ca4ca9b --- /dev/null +++ b/websdk/mercurial/templates/monoblue/helptopics.tmpl @@ -0,0 +1,45 @@ +{header} + {repo|escape}: Branches + + + + + +
            + + + + + + {topics % helpentry} + + + {earlycommands % helpentry} + + + {othercommands % helpentry} +

            Topics

            Main Commands

            Other Commands

            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/index.tmpl b/websdk/mercurial/templates/monoblue/index.tmpl new file mode 100644 index 0000000..e5086f7 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/index.tmpl @@ -0,0 +1,39 @@ +{header} + {repo|escape}: Mercurial repositories index + + + +
            + + + + + + + + + + + + {entries%indexentry} +
            NameDescriptionContactLast modified  
            + + +
            +

            mercurial

            +
            + +
            +
            +
            +
            + +
            + + diff --git a/websdk/mercurial/templates/monoblue/manifest.tmpl b/websdk/mercurial/templates/monoblue/manifest.tmpl new file mode 100644 index 0000000..c99af7c --- /dev/null +++ b/websdk/mercurial/templates/monoblue/manifest.tmpl @@ -0,0 +1,53 @@ +{header} +{repo|escape}: files + + + + + +
            + + + + + +

            {path|escape} {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag}

            + + + + + + + + + + {dentries%direntry} + {fentries%fileentry} +
            drwxr-xr-x[up]
            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/map b/websdk/mercurial/templates/monoblue/map new file mode 100644 index 0000000..7fa26c9 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/map @@ -0,0 +1,240 @@ +default = 'summary' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +summary = summary.tmpl +error = error.tmpl +notfound = notfound.tmpl + +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '{topic|escape}{summary|escape}' + +naventry = '{label|escape} ' +navshortentry = '{label|escape} ' +navgraphentry = '{label|escape} ' +filenaventry = '{label|escape}' +filedifflink = '{file|escape} ' +filenodelink = ' + + {file|escape} + + + file | + annotate | + diff | + revisions + + ' +filenolink = ' + + {file|escape} + + + file | + annotate | + diff | + revisions + + ' + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl +direntry = ' + + drwxr-xr-x + + + {basename|escape} + files + ' +fileentry = ' + + {permissions|permissions} + {date|isodate} + {size} + {basename|escape} + + file | + revisions | + annotate + + ' +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' +
            +
            {linenumber} {line|escape}
            +
            ' +annotateline = ' + + + {author|user}@{rev} + + + {linenumber} + + {line|escape} + ' +difflineplus = '{linenumber} {line|escape}' +difflineminus = '{linenumber} {line|escape}' +difflineat = '{linenumber} {line|escape}' +diffline = '{linenumber} {line|escape}' +changelogparent = ' + + parent {rev}: + + {node|short} + + ' +changesetbranch = '
            branch
            {name}
            ' +changesetparent = ' +
            parent {rev}
            +
            {node|short}
            ' +filerevbranch = '
            branch
            {name}
            ' +filerevparent = ' +
            parent {rev}
            +
            + + {rename%filerename}{node|short} + +
            ' +filerename = '{file|escape}@' +filelogrename = '| base' +fileannotateparent = ' +
            parent {rev}
            +
            + + {rename%filerename}{node|short} + +
            ' +changelogchild = ' +
            child {rev}:
            +
            {node|short}
            ' +changesetchild = ' +
            child {rev}
            +
            {node|short}
            ' +filerevchild = ' +
            child {rev}
            +
            + {node|short} +
            ' +fileannotatechild = ' +
            child {rev}
            +
            + {node|short} +
            ' +tags = tags.tmpl +tagentry = ' + + {date|rfc822date} + {tag|escape} + + changeset | + changelog | + files + + ' +bookmarks = bookmarks.tmpl +bookmarkentry = ' + + {date|rfc822date} + {bookmark|escape} + + changeset | + changelog | + files + + ' +branches = branches.tmpl +branchentry = ' + + {date|rfc822date} + {node|short} + {branch|escape} + + changeset | + changelog | + files + + ' +diffblock = '
            {lines}
            ' +filediffparent = ' +
            parent {rev}
            +
            {node|short}
            ' +filelogparent = ' + + parent {rev}:  + {node|short} + ' +filediffchild = ' +
            child {rev}
            +
            {node|short}
            ' +filelogchild = ' + + child {rev}:  + {node|short} + ' +shortlog = shortlog.tmpl +tagtag = '{name} ' +branchtag = '{name} ' +inbranchtag = '{name} ' +bookmarktag = '{name} ' +shortlogentry = ' + + {date|rfc822date} + {author|person} + + + {desc|strip|firstline|escape|nonempty} + {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}{bookmarks%bookmarktag} + + + + changeset | + files + + ' +filelogentry = ' + + {date|rfc822date} + {desc|strip|firstline|escape|nonempty} + + file | diff | annotate + {rename%filelogrename} + + ' +archiveentry = '
          • {type|escape}
          • ' +indexentry = ' + + {name|escape} + {description} + {contact|obfuscate} + {lastchange|rfc822date} + {archives%indexarchiveentry} + + + + \n' +indexarchiveentry = '{type|escape} ' +index = index.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '' +graph = graph.tmpl diff --git a/websdk/mercurial/templates/monoblue/notfound.tmpl b/websdk/mercurial/templates/monoblue/notfound.tmpl new file mode 100644 index 0000000..38df584 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/notfound.tmpl @@ -0,0 +1,37 @@ +{header} + {repo|escape}: Mercurial repository not found + + + + + +
            + + + +

            The specified repository "{repo|escape}" is unknown, sorry.

            +

            Please go back to the main repository list page.

            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/search.tmpl b/websdk/mercurial/templates/monoblue/search.tmpl new file mode 100644 index 0000000..2be816a --- /dev/null +++ b/websdk/mercurial/templates/monoblue/search.tmpl @@ -0,0 +1,36 @@ +{header} + {repo|escape}: Search + + + + + +
            + + + + {entries} + +{footer} diff --git a/websdk/mercurial/templates/monoblue/shortlog.tmpl b/websdk/mercurial/templates/monoblue/shortlog.tmpl new file mode 100644 index 0000000..56c4584 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/shortlog.tmpl @@ -0,0 +1,44 @@ +{header} + {repo|escape}: shortlog + + + + + +
            + + + + + +{entries%shortlogentry} +
            + +
            + {changenav%navshort} +
            + +{footer} diff --git a/websdk/mercurial/templates/monoblue/summary.tmpl b/websdk/mercurial/templates/monoblue/summary.tmpl new file mode 100644 index 0000000..9528dc6 --- /dev/null +++ b/websdk/mercurial/templates/monoblue/summary.tmpl @@ -0,0 +1,76 @@ +{header} + {repo|escape}: Summary + + + + + +
            + + + +
            +
            name
            +
            {repo|escape}
            +
            description
            +
            {desc}
            +
            owner
            +
            {owner|obfuscate}
            +
            last change
            +
            {lastchange|rfc822date}
            +
            + +

            Changes

            + +{shortlog} + + + +
            ...
            + +

            Tags

            + +{tags} + + + +
            ...
            + +

            Bookmarks

            + + {bookmarks%bookmarkentry} + + + +
            ...
            + + + + {branches%branchentry} + + + +
            ...
            +{footer} diff --git a/websdk/mercurial/templates/monoblue/tags.tmpl b/websdk/mercurial/templates/monoblue/tags.tmpl new file mode 100644 index 0000000..0c8f93d --- /dev/null +++ b/websdk/mercurial/templates/monoblue/tags.tmpl @@ -0,0 +1,38 @@ +{header} + {repo|escape}: Tags + + + + + +
            + + + + +{entries%tagentry} +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/bookmarks.tmpl b/websdk/mercurial/templates/paper/bookmarks.tmpl new file mode 100644 index 0000000..5989243 --- /dev/null +++ b/websdk/mercurial/templates/paper/bookmarks.tmpl @@ -0,0 +1,49 @@ +{header} +{repo|escape}: bookmarks + + + + + +
            + + +
            +

            {repo|escape}

            +

            bookmarks

            + + + + + + + + +{entries%bookmarkentry} +
            bookmarknode
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/branches.tmpl b/websdk/mercurial/templates/paper/branches.tmpl new file mode 100644 index 0000000..b26a04d --- /dev/null +++ b/websdk/mercurial/templates/paper/branches.tmpl @@ -0,0 +1,60 @@ +{header} +{repo|escape}: branches + + + + + +
            + + +
            +

            {repo|escape}

            +

            branches

            + + + + + + + + +{entries % +' + + + ' +} +
            branchnode
            + + {branch|escape} + + + {node|short} +
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/changeset.tmpl b/websdk/mercurial/templates/paper/changeset.tmpl new file mode 100644 index 0000000..b7cc62b --- /dev/null +++ b/websdk/mercurial/templates/paper/changeset.tmpl @@ -0,0 +1,87 @@ +{header} +{repo|escape}: {node|short} + + +
            + + +
            + +

            {repo|escape}

            +

            changeset {rev}:{node|short} {changesetbranch%changelogbranchname} {changesettag} {changesetbookmark}

            + + + +
            {desc|strip|escape|nonempty}
            + + + + + + + + + + + + + + + + + + + + + + + + + +
            author{author|obfuscate}
            date{date|rfc822date}
            parents{parent%changesetparent}
            children{child%changesetchild}
            files{files}
            diffstat + {diffsummary} + [+] + +
            + +
            +
            line diff
            + +{diff} +
            + +
            +
            +{footer} diff --git a/websdk/mercurial/templates/paper/diffstat.tmpl b/websdk/mercurial/templates/paper/diffstat.tmpl new file mode 100644 index 0000000..90261c9 --- /dev/null +++ b/websdk/mercurial/templates/paper/diffstat.tmpl @@ -0,0 +1,8 @@ + + {file|escape} + {total} + +   +   + + diff --git a/websdk/mercurial/templates/paper/error.tmpl b/websdk/mercurial/templates/paper/error.tmpl new file mode 100644 index 0000000..1aca07c --- /dev/null +++ b/websdk/mercurial/templates/paper/error.tmpl @@ -0,0 +1,45 @@ +{header} +{repo|escape}: error + + + +
            + + +
            + +

            {repo|escape}

            +

            error

            + + + +
            +

            +An error occurred while processing your request: +

            +

            +{error|escape} +

            +
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/fileannotate.tmpl b/websdk/mercurial/templates/paper/fileannotate.tmpl new file mode 100644 index 0000000..cb02b71 --- /dev/null +++ b/websdk/mercurial/templates/paper/fileannotate.tmpl @@ -0,0 +1,82 @@ +{header} +{repo|escape}: {file|escape} annotate + + + +
            + + +
            +

            {repo|escape}

            +

            annotate {file|escape} @ {rev}:{node|short}

            + + + +
            {desc|strip|escape|nonempty}
            + + + + + + + + + + + + + + + + + + +{changesettag} +
            author{author|obfuscate}
            date{date|rfc822date}
            parents{parent%filerevparent}
            children{child%filerevchild}
            + +
            + + + + + +{annotate%annotateline} +
            rev  line source
            +
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/filediff.tmpl b/websdk/mercurial/templates/paper/filediff.tmpl new file mode 100644 index 0000000..43524c4 --- /dev/null +++ b/websdk/mercurial/templates/paper/filediff.tmpl @@ -0,0 +1,77 @@ +{header} +{repo|escape}: {file|escape} diff + + + +
            + + +
            +

            {repo|escape}

            +

            diff {file|escape} @ {rev}:{node|short}

            + + + +
            {desc|strip|escape|nonempty}
            + + + + + + + + + + + + + + + + + + +{changesettag} +
            author{author|obfuscate}
            date{date|rfc822date}
            parents{parent%filerevparent}
            children{child%filerevchild}
            + +
            +
            line diff
            + +{diff} +
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/filelog.tmpl b/websdk/mercurial/templates/paper/filelog.tmpl new file mode 100644 index 0000000..4880d78 --- /dev/null +++ b/websdk/mercurial/templates/paper/filelog.tmpl @@ -0,0 +1,73 @@ +{header} +{repo|escape}: {file|escape} history + + + + + +
            + + +
            +

            {repo|escape}

            +

            log {file|escape}

            + + + + + + + + + + + +{entries%filelogentry} +
            ageauthordescription
            + + + +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/filelogentry.tmpl b/websdk/mercurial/templates/paper/filelogentry.tmpl new file mode 100644 index 0000000..8b4ae84 --- /dev/null +++ b/websdk/mercurial/templates/paper/filelogentry.tmpl @@ -0,0 +1,5 @@ + + {date|rfc822date} + {author|person} + {desc|strip|firstline|escape|nonempty}{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}{rename%filelogrename} + diff --git a/websdk/mercurial/templates/paper/filerevision.tmpl b/websdk/mercurial/templates/paper/filerevision.tmpl new file mode 100644 index 0000000..cdadf48 --- /dev/null +++ b/websdk/mercurial/templates/paper/filerevision.tmpl @@ -0,0 +1,76 @@ +{header} +{repo|escape}: {node|short} {file|escape} + + + +
            + + +
            +

            {repo|escape}

            +

            view {file|escape} @ {rev}:{node|short}

            + + + +
            {desc|strip|escape|nonempty}
            + + + + + + + + + + + + + + + + + + +{changesettag} +
            author{author|obfuscate}
            date{date|rfc822date}
            parents{parent%filerevparent}
            children{child%filerevchild}
            + +
            +
            line source
            +{text%fileline} +
            +
            +
            +
            + +{footer} diff --git a/websdk/mercurial/templates/paper/footer.tmpl b/websdk/mercurial/templates/paper/footer.tmpl new file mode 100644 index 0000000..4eb2d46 --- /dev/null +++ b/websdk/mercurial/templates/paper/footer.tmpl @@ -0,0 +1,5 @@ + +{motd} + + + diff --git a/websdk/mercurial/templates/paper/graph.tmpl b/websdk/mercurial/templates/paper/graph.tmpl new file mode 100644 index 0000000..80b44ac --- /dev/null +++ b/websdk/mercurial/templates/paper/graph.tmpl @@ -0,0 +1,141 @@ +{header} +{repo|escape}: revision graph + + + + + + +
            + + +
            +

            {repo|escape}

            +

            graph

            + + + + + + + +
            +
              + +
                +
                + + + + + +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/paper/header.tmpl b/websdk/mercurial/templates/paper/header.tmpl new file mode 100644 index 0000000..f24d768 --- /dev/null +++ b/websdk/mercurial/templates/paper/header.tmpl @@ -0,0 +1,7 @@ + + + + + + + diff --git a/websdk/mercurial/templates/paper/help.tmpl b/websdk/mercurial/templates/paper/help.tmpl new file mode 100644 index 0000000..487e1f2 --- /dev/null +++ b/websdk/mercurial/templates/paper/help.tmpl @@ -0,0 +1,44 @@ +{header} +Help: {topic} + + + + + +
                + + +
                +

                {repo|escape}

                +

                Help: {topic}

                + + +
                +{doc|escape}
                +
                +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/paper/helptopics.tmpl b/websdk/mercurial/templates/paper/helptopics.tmpl new file mode 100644 index 0000000..a723b53 --- /dev/null +++ b/websdk/mercurial/templates/paper/helptopics.tmpl @@ -0,0 +1,49 @@ +{header} +Help: {title} + + + + + +
                + + +
                +

                {repo|escape}

                + + + +{topics % helpentry} + + +{earlycommands % helpentry} + + +{othercommands % helpentry} +

                Topics

                Main Commands

                Other Commands

                +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/paper/index.tmpl b/websdk/mercurial/templates/paper/index.tmpl new file mode 100644 index 0000000..04b4ffb --- /dev/null +++ b/websdk/mercurial/templates/paper/index.tmpl @@ -0,0 +1,26 @@ +{header} +Mercurial repositories index + + + +
                + +
                +

                Mercurial Repositories

                + + + + + + + + + + {entries%indexentry} +
                NameDescriptionContactLast modified 
                +
                +
                +{footer} diff --git a/websdk/mercurial/templates/paper/manifest.tmpl b/websdk/mercurial/templates/paper/manifest.tmpl new file mode 100644 index 0000000..3dc7e77 --- /dev/null +++ b/websdk/mercurial/templates/paper/manifest.tmpl @@ -0,0 +1,58 @@ +{header} +{repo|escape}: {node|short} {path|escape} + + + +
                + + +
                +

                {repo|escape}

                +

                directory {path|escape} @ {rev}:{node|short} {tags%changelogtag}

                + + + + + + + + + + + + + + +{dentries%direntry} +{fentries%fileentry} +
                namesizepermissions
                [up]drwxr-xr-x
                +
                +
                +{footer} diff --git a/websdk/mercurial/templates/paper/map b/websdk/mercurial/templates/paper/map new file mode 100644 index 0000000..42042e8 --- /dev/null +++ b/websdk/mercurial/templates/paper/map @@ -0,0 +1,213 @@ +default = 'shortlog' + +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl + +changelog = shortlog.tmpl +shortlog = shortlog.tmpl +shortlogentry = shortlogentry.tmpl +graph = graph.tmpl +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '{topic|escape}{summary|escape}' + +naventry = '{label|escape} ' +navshortentry = '{label|escape} ' +navgraphentry = '{label|escape} ' +filenaventry = '{label|escape} ' +filedifflink = '{file|escape} ' +filenodelink = '{file|escape} ' +filenolink = '{file|escape} ' +fileellipses = '...' +diffstatlink = diffstat.tmpl +diffstatnolink = diffstat.tmpl +changelogentry = shortlogentry.tmpl +searchentry = shortlogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + + + + dir. {basename|escape}/ + + + {emptydirs|escape} + + + + drwxr-xr-x + ' + +fileentry = ' + + + + file {basename|escape} + + + {size} + {permissions|permissions} + ' + +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' +
                {linenumber} {line|escape}
                ' +filelogentry = filelogentry.tmpl + +annotateline = ' + + + {author|user}@{rev} + + {linenumber} {line|escape} + ' + +diffblock = '
                {lines}
                ' +difflineplus = '{linenumber} {line|escape}' +difflineminus = '{linenumber} {line|escape}' +difflineat = '{linenumber} {line|escape}' +diffline = '{linenumber} {line|escape}' + +changelogparent = ' + + parent {rev}: + {node|short} + ' + +changesetparent = '{node|short} ' + +filerevparent = '{rename%filerename}{node|short} ' +filerevchild = '{node|short} ' + +filerename = '{file|escape}@' +filelogrename = ' + + base + + {file|escape}@{node|short} + + ' +fileannotateparent = ' + + parent: + + + {rename%filerename}{node|short} + + + ' +changesetchild = ' {node|short}' +changelogchild = ' + + child + + + {node|short} + + + ' +fileannotatechild = ' + + child: + + + {node|short} + + + ' +tags = tags.tmpl +tagentry = ' + + + + {tag|escape} + + + + {node|short} + + ' +bookmarks = bookmarks.tmpl +bookmarkentry = ' + + + + {bookmark|escape} + + + + {node|short} + + ' +branches = branches.tmpl +branchentry = ' + + + + {branch|escape} + + + + {node|short} + + ' +changelogtag = '{name|escape} ' +changesettag = '{tag|escape} ' +changesetbookmark = '{bookmark|escape} ' +changelogbranchhead = '{name|escape} ' +changelogbranchname = '{name|escape} ' + +filediffparent = ' + + parent {rev}: + {node|short} + ' +filelogparent = ' + + parent {rev}: + {node|short} + ' +filediffchild = ' + + child {rev}: + {node|short} + + ' +filelogchild = ' + + child {rev}: + {node|short} + ' + +indexentry = ' + + {name|escape} + {description} + {contact|obfuscate} + {lastchange|rfc822date} + {archives%indexarchiveentry} + \n' +indexarchiveentry = ' ↓{type|escape}' +index = index.tmpl +archiveentry = ' +
              • + {type|escape} +
              • ' +notfound = notfound.tmpl +error = error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '' diff --git a/websdk/mercurial/templates/paper/notfound.tmpl b/websdk/mercurial/templates/paper/notfound.tmpl new file mode 100644 index 0000000..e9e6ba4 --- /dev/null +++ b/websdk/mercurial/templates/paper/notfound.tmpl @@ -0,0 +1,12 @@ +{header} +Mercurial repository not found + + + +

                Mercurial repository not found

                + +The specified repository "{repo|escape}" is unknown, sorry. + +Please go back to the main repository list page. + +{footer} diff --git a/websdk/mercurial/templates/paper/search.tmpl b/websdk/mercurial/templates/paper/search.tmpl new file mode 100644 index 0000000..feee155 --- /dev/null +++ b/websdk/mercurial/templates/paper/search.tmpl @@ -0,0 +1,55 @@ +{header} +{repo|escape}: searching for {query|escape} + + + +
                + + +
                +

                {repo|escape}

                +

                searching for '{query|escape}'

                + + + + + + + + + + + +{entries} +
                ageauthordescription
                + + + +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/paper/shortlog.tmpl b/websdk/mercurial/templates/paper/shortlog.tmpl new file mode 100644 index 0000000..b69d4b0 --- /dev/null +++ b/websdk/mercurial/templates/paper/shortlog.tmpl @@ -0,0 +1,70 @@ +{header} +{repo|escape}: log + + + + + +
                + + +
                +

                {repo|escape}

                +

                log

                + + + + + + + + + + + +{entries%shortlogentry} +
                ageauthordescription
                + + + +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/paper/shortlogentry.tmpl b/websdk/mercurial/templates/paper/shortlogentry.tmpl new file mode 100644 index 0000000..09e94c0 --- /dev/null +++ b/websdk/mercurial/templates/paper/shortlogentry.tmpl @@ -0,0 +1,5 @@ + + {date|rfc822date} + {author|person} + {desc|strip|firstline|escape|nonempty}{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags % '{name|escape} '}{bookmarks % '{name|escape} '} + diff --git a/websdk/mercurial/templates/paper/tags.tmpl b/websdk/mercurial/templates/paper/tags.tmpl new file mode 100644 index 0000000..9b4e623 --- /dev/null +++ b/websdk/mercurial/templates/paper/tags.tmpl @@ -0,0 +1,49 @@ +{header} +{repo|escape}: tags + + + + + +
                + + +
                +

                {repo|escape}

                +

                tags

                + + + + + + + + +{entries%tagentry} +
                tagnode
                +
                +
                + +{footer} diff --git a/websdk/mercurial/templates/raw/changeset.tmpl b/websdk/mercurial/templates/raw/changeset.tmpl new file mode 100644 index 0000000..b59d99b --- /dev/null +++ b/websdk/mercurial/templates/raw/changeset.tmpl @@ -0,0 +1,9 @@ +{header} +# HG changeset patch +# User {author} +# Date {date|hgdate} +# Node ID {node} +{parent%changesetparent} +{desc} + +{diff} diff --git a/websdk/mercurial/templates/raw/error.tmpl b/websdk/mercurial/templates/raw/error.tmpl new file mode 100644 index 0000000..9407c13 --- /dev/null +++ b/websdk/mercurial/templates/raw/error.tmpl @@ -0,0 +1,2 @@ +{header} +error: {error} diff --git a/websdk/mercurial/templates/raw/fileannotate.tmpl b/websdk/mercurial/templates/raw/fileannotate.tmpl new file mode 100644 index 0000000..ad1bed6 --- /dev/null +++ b/websdk/mercurial/templates/raw/fileannotate.tmpl @@ -0,0 +1,5 @@ +{header} +{annotate%annotateline} +{footer} + + diff --git a/websdk/mercurial/templates/raw/filediff.tmpl b/websdk/mercurial/templates/raw/filediff.tmpl new file mode 100644 index 0000000..c4014bc --- /dev/null +++ b/websdk/mercurial/templates/raw/filediff.tmpl @@ -0,0 +1,5 @@ +{header} +{diff} +{footer} + + diff --git a/websdk/mercurial/templates/raw/index.tmpl b/websdk/mercurial/templates/raw/index.tmpl new file mode 100644 index 0000000..29d7c9e --- /dev/null +++ b/websdk/mercurial/templates/raw/index.tmpl @@ -0,0 +1,2 @@ +{header} +{entries%indexentry} diff --git a/websdk/mercurial/templates/raw/manifest.tmpl b/websdk/mercurial/templates/raw/manifest.tmpl new file mode 100644 index 0000000..8d4a934 --- /dev/null +++ b/websdk/mercurial/templates/raw/manifest.tmpl @@ -0,0 +1,3 @@ +{header} +{dentries%direntry}{fentries%fileentry} +{footer} diff --git a/websdk/mercurial/templates/raw/map b/websdk/mercurial/templates/raw/map new file mode 100644 index 0000000..ed5c35b --- /dev/null +++ b/websdk/mercurial/templates/raw/map @@ -0,0 +1,30 @@ +mimetype = 'text/plain; charset={encoding}' +header = '' +footer = '' +changeset = changeset.tmpl +difflineplus = '{line}' +difflineminus = '{line}' +difflineat = '{line}' +diffline = '{line}' +changesetparent = '# Parent {node}' +changesetchild = '# Child {node}' +filenodelink = '' +filenolink = '' +fileline = '{line}' +diffblock = '{lines}' +filediff = filediff.tmpl +fileannotate = fileannotate.tmpl +annotateline = '{author|user}@{rev}: {line}' +manifest = manifest.tmpl +direntry = 'drwxr-xr-x {basename}\n' +fileentry = '{permissions|permissions} {size} {basename}\n' +index = index.tmpl +notfound = notfound.tmpl +error = error.tmpl +indexentry = '{url}\n' +tags = '{entries%tagentry}' +tagentry = '{tag} {node}\n' +bookmarks = '{entries%bookmarkentry}' +bookmarkentry = '{bookmark} {node}\n' +branches = '{entries%branchentry}' +branchentry = '{branch} {node} {status}\n' diff --git a/websdk/mercurial/templates/raw/notfound.tmpl b/websdk/mercurial/templates/raw/notfound.tmpl new file mode 100644 index 0000000..a7b3251 --- /dev/null +++ b/websdk/mercurial/templates/raw/notfound.tmpl @@ -0,0 +1,2 @@ +{header} +error: repository {repo} not found diff --git a/websdk/mercurial/templates/rss/bookmarkentry.tmpl b/websdk/mercurial/templates/rss/bookmarkentry.tmpl new file mode 100644 index 0000000..93b5614 --- /dev/null +++ b/websdk/mercurial/templates/rss/bookmarkentry.tmpl @@ -0,0 +1,6 @@ + + {bookmark|escape} + {urlbase}{url}rev/{node|short} + + {date|rfc822date} + diff --git a/websdk/mercurial/templates/rss/bookmarks.tmpl b/websdk/mercurial/templates/rss/bookmarks.tmpl new file mode 100644 index 0000000..8cc8172 --- /dev/null +++ b/websdk/mercurial/templates/rss/bookmarks.tmpl @@ -0,0 +1,6 @@ +{header} + {repo|escape}: bookmarks + {repo|escape} bookmark history + {entries%bookmarkentry} + + diff --git a/websdk/mercurial/templates/rss/changelog.tmpl b/websdk/mercurial/templates/rss/changelog.tmpl new file mode 100644 index 0000000..65b96ad --- /dev/null +++ b/websdk/mercurial/templates/rss/changelog.tmpl @@ -0,0 +1,6 @@ +{header} + {repo|escape} Changelog + {repo|escape} Changelog + {entries%changelogentry} + + \ No newline at end of file diff --git a/websdk/mercurial/templates/rss/changelogentry.tmpl b/websdk/mercurial/templates/rss/changelogentry.tmpl new file mode 100644 index 0000000..12fe8e0 --- /dev/null +++ b/websdk/mercurial/templates/rss/changelogentry.tmpl @@ -0,0 +1,7 @@ + + {desc|strip|firstline|strip|escape} + {urlbase}{url}rev/{node|short} + + {author|obfuscate} + {date|rfc822date} + diff --git a/websdk/mercurial/templates/rss/error.tmpl b/websdk/mercurial/templates/rss/error.tmpl new file mode 100644 index 0000000..87e6009 --- /dev/null +++ b/websdk/mercurial/templates/rss/error.tmpl @@ -0,0 +1,10 @@ +{header} + Error + Error + + Error + {error|escape} + http://mercurial.selenic.com/#error + + + diff --git a/websdk/mercurial/templates/rss/filelog.tmpl b/websdk/mercurial/templates/rss/filelog.tmpl new file mode 100644 index 0000000..31f4dc7 --- /dev/null +++ b/websdk/mercurial/templates/rss/filelog.tmpl @@ -0,0 +1,6 @@ +{header} + {repo|escape}: {file|escape} history + {file|escape} revision history + {entries%filelogentry} + + diff --git a/websdk/mercurial/templates/rss/filelogentry.tmpl b/websdk/mercurial/templates/rss/filelogentry.tmpl new file mode 100644 index 0000000..51cd3af --- /dev/null +++ b/websdk/mercurial/templates/rss/filelogentry.tmpl @@ -0,0 +1,7 @@ + + {desc|strip|firstline|strip|escape} + {urlbase}{url}log{node|short}/{file|urlescape} + + {author|obfuscate} + {date|rfc822date} + diff --git a/websdk/mercurial/templates/rss/header.tmpl b/websdk/mercurial/templates/rss/header.tmpl new file mode 100644 index 0000000..ed29196 --- /dev/null +++ b/websdk/mercurial/templates/rss/header.tmpl @@ -0,0 +1,5 @@ + + + + {urlbase}{url} + en-us diff --git a/websdk/mercurial/templates/rss/map b/websdk/mercurial/templates/rss/map new file mode 100644 index 0000000..0f141be --- /dev/null +++ b/websdk/mercurial/templates/rss/map @@ -0,0 +1,12 @@ +default = 'changelog' +mimetype = 'text/xml; charset={encoding}' +header = header.tmpl +changelog = changelog.tmpl +changelogentry = changelogentry.tmpl +filelog = filelog.tmpl +filelogentry = filelogentry.tmpl +tags = tags.tmpl +tagentry = tagentry.tmpl +bookmarks = bookmarks.tmpl +bookmarkentry = bookmarkentry.tmpl +error = error.tmpl diff --git a/websdk/mercurial/templates/rss/tagentry.tmpl b/websdk/mercurial/templates/rss/tagentry.tmpl new file mode 100644 index 0000000..42fa038 --- /dev/null +++ b/websdk/mercurial/templates/rss/tagentry.tmpl @@ -0,0 +1,6 @@ + + {tag|escape} + {urlbase}{url}rev/{node|short} + + {date|rfc822date} + diff --git a/websdk/mercurial/templates/rss/tags.tmpl b/websdk/mercurial/templates/rss/tags.tmpl new file mode 100644 index 0000000..93f1e96 --- /dev/null +++ b/websdk/mercurial/templates/rss/tags.tmpl @@ -0,0 +1,6 @@ +{header} + {repo|escape}: tags + {repo|escape} tag history + {entriesnotip%tagentry} + + diff --git a/websdk/mercurial/templates/spartan/branches.tmpl b/websdk/mercurial/templates/spartan/branches.tmpl new file mode 100644 index 0000000..43e3bdb --- /dev/null +++ b/websdk/mercurial/templates/spartan/branches.tmpl @@ -0,0 +1,27 @@ +{header} +{repo|escape}: branches + + + + + + + +

                branches:

                + +
                  +{entries%branchentry} +
                + +{footer} diff --git a/websdk/mercurial/templates/spartan/changelog.tmpl b/websdk/mercurial/templates/spartan/changelog.tmpl new file mode 100644 index 0000000..466e681 --- /dev/null +++ b/websdk/mercurial/templates/spartan/changelog.tmpl @@ -0,0 +1,44 @@ +{header} +{repo|escape}: changelog + + + + + +
                +shortlog +graph +tags +branches +files +{archives%archiveentry} +help +rss +atom +
                + +

                changelog for {repo|escape}

                + +
                +{sessionvars%hiddenformentry} +

                + + +navigate: {changenav%nav} +

                +
                + +{entries%changelogentry} + +
                +{sessionvars%hiddenformentry} +

                + + +navigate: {changenav%nav} +

                +
                + +{footer} diff --git a/websdk/mercurial/templates/spartan/changelogentry.tmpl b/websdk/mercurial/templates/spartan/changelogentry.tmpl new file mode 100644 index 0000000..f506b94 --- /dev/null +++ b/websdk/mercurial/templates/spartan/changelogentry.tmpl @@ -0,0 +1,25 @@ + + + + + + + + + + {parent%changelogparent} + {child%changelogchild} + {changelogtag} + + + + + + + + + + + + +
                {date|rfc822date}:{desc|strip|firstline|escape|nonempty}
                changeset {rev}:{node|short}
                author:{author|obfuscate}
                date:{date|rfc822date}
                files:{files}
                diff --git a/websdk/mercurial/templates/spartan/changeset.tmpl b/websdk/mercurial/templates/spartan/changeset.tmpl new file mode 100644 index 0000000..b357126 --- /dev/null +++ b/websdk/mercurial/templates/spartan/changeset.tmpl @@ -0,0 +1,52 @@ +{header} +{repo|escape}: changeset {node|short} + + + +
                +changelog +shortlog +graph +tags +branches +files +raw +{archives%archiveentry} +help +
                + +

                changeset: {desc|strip|escape|firstline|nonempty}

                + + + + + + +{parent%changesetparent} +{child%changesetchild} +{changesettag} + + + + + + + + + + + + + + + + +
                changeset {rev}:{node|short}
                author:{author|obfuscate}
                date:{date|rfc822date}
                files:{files}
                description:{desc|strip|escape|addbreaks|nonempty}
                + +
                +{diff} +
                + +{footer} + + diff --git a/websdk/mercurial/templates/spartan/error.tmpl b/websdk/mercurial/templates/spartan/error.tmpl new file mode 100644 index 0000000..fc2c788 --- /dev/null +++ b/websdk/mercurial/templates/spartan/error.tmpl @@ -0,0 +1,15 @@ +{header} +Mercurial Error + + + +

                Mercurial Error

                + +

                +An error occurred while processing your request: +

                +

                +{error|escape} +

                + +{footer} diff --git a/websdk/mercurial/templates/spartan/fileannotate.tmpl b/websdk/mercurial/templates/spartan/fileannotate.tmpl new file mode 100644 index 0000000..5d6a72b --- /dev/null +++ b/websdk/mercurial/templates/spartan/fileannotate.tmpl @@ -0,0 +1,49 @@ +{header} +{repo|escape}: {file|escape} annotate + + + + + +

                Annotate {file|escape}

                + + + + + +{parent%fileannotateparent} +{child%fileannotatechild} + + + + + + + + + + + + + + + +
                changeset {rev}:{node|short}
                author:{author|obfuscate}
                date:{date|rfc822date}
                permissions:{permissions|permissions}
                description:{desc|strip|escape|addbreaks|nonempty}
                + + +{annotate%annotateline} +
                + +{footer} diff --git a/websdk/mercurial/templates/spartan/filediff.tmpl b/websdk/mercurial/templates/spartan/filediff.tmpl new file mode 100644 index 0000000..7640cba --- /dev/null +++ b/websdk/mercurial/templates/spartan/filediff.tmpl @@ -0,0 +1,37 @@ +{header} +{repo|escape}: {file|escape} diff + + + + + +

                {file|escape}

                + + + + + + +{parent%filediffparent} +{child%filediffchild} +
                revision {rev}:{node|short}
                + +
                +{diff} +
                + +{footer} + + diff --git a/websdk/mercurial/templates/spartan/filelog.tmpl b/websdk/mercurial/templates/spartan/filelog.tmpl new file mode 100644 index 0000000..d003f79 --- /dev/null +++ b/websdk/mercurial/templates/spartan/filelog.tmpl @@ -0,0 +1,29 @@ +{header} +{repo|escape}: {file|escape} history + + + + + + + +

                {file|escape} revision history

                + +

                navigate: {nav%filenav}

                + +{entries%filelogentry} + +{footer} diff --git a/websdk/mercurial/templates/spartan/filelogentry.tmpl b/websdk/mercurial/templates/spartan/filelogentry.tmpl new file mode 100644 index 0000000..baff52d --- /dev/null +++ b/websdk/mercurial/templates/spartan/filelogentry.tmpl @@ -0,0 +1,25 @@ + + + + + + + + + {rename%filelogrename} + + + + + + + + +
                {date|rfc822date}:{desc|strip|firstline|escape|nonempty}
                revision {filerev}: + + {node|short} + (diff) + (annotate) +
                author:{author|obfuscate}
                date:{date|rfc822date}
                + + diff --git a/websdk/mercurial/templates/spartan/filerevision.tmpl b/websdk/mercurial/templates/spartan/filerevision.tmpl new file mode 100644 index 0000000..548d3c6 --- /dev/null +++ b/websdk/mercurial/templates/spartan/filerevision.tmpl @@ -0,0 +1,47 @@ +{header} +{repo|escape}:{file|escape} + + + + + +

                {file|escape}

                + + + + + +{parent%filerevparent} +{child%filerevchild} + + + + + + + + + + + + + +
                changeset {rev}:{node|short}
                author:{author|obfuscate}
                date:{date|rfc822date}
                permissions:{permissions|permissions}
                description:{desc|strip|escape|addbreaks|nonempty}
                + +
                +{text%fileline}
                +
                + +{footer} diff --git a/websdk/mercurial/templates/spartan/footer.tmpl b/websdk/mercurial/templates/spartan/footer.tmpl new file mode 100644 index 0000000..665bdb2 --- /dev/null +++ b/websdk/mercurial/templates/spartan/footer.tmpl @@ -0,0 +1,9 @@ + +{motd} + + + + diff --git a/websdk/mercurial/templates/spartan/graph.tmpl b/websdk/mercurial/templates/spartan/graph.tmpl new file mode 100644 index 0000000..67736e0 --- /dev/null +++ b/websdk/mercurial/templates/spartan/graph.tmpl @@ -0,0 +1,96 @@ +{header} +{repo|escape}: graph + + + + + + + + +

                graph

                + +
                +{sessionvars%hiddenformentry} +

                + + +navigate: {changenav%navgraph} +

                +
                + + + +
                +
                  + +
                    +
                    + + + +
                    +{sessionvars%hiddenformentry} +

                    + + +navigate: {changenav%navgraph} +

                    +
                    + +{footer} diff --git a/websdk/mercurial/templates/spartan/header.tmpl b/websdk/mercurial/templates/spartan/header.tmpl new file mode 100644 index 0000000..cd40d80 --- /dev/null +++ b/websdk/mercurial/templates/spartan/header.tmpl @@ -0,0 +1,7 @@ + + + + + + + diff --git a/websdk/mercurial/templates/spartan/index.tmpl b/websdk/mercurial/templates/spartan/index.tmpl new file mode 100644 index 0000000..f399813 --- /dev/null +++ b/websdk/mercurial/templates/spartan/index.tmpl @@ -0,0 +1,19 @@ +{header} +Mercurial repositories index + + + +

                    Mercurial Repositories

                    + + + + + + + + + + {entries%indexentry} +
                    NameDescriptionContactLast modified 
                    + +{footer} diff --git a/websdk/mercurial/templates/spartan/manifest.tmpl b/websdk/mercurial/templates/spartan/manifest.tmpl new file mode 100644 index 0000000..24a0973 --- /dev/null +++ b/websdk/mercurial/templates/spartan/manifest.tmpl @@ -0,0 +1,29 @@ +{header} +{repo|escape}: files for changeset {node|short} + + + +
                    +changelog +shortlog +graph +tags +branches +changeset +{archives%archiveentry} +help +
                    + +

                    files for changeset {node|short}: {path|escape}

                    + + + + +{dentries%direntry} +{fentries%fileentry} +
                    drwxr-xr-x  +   +   + [up] +
                    +{footer} diff --git a/websdk/mercurial/templates/spartan/map b/websdk/mercurial/templates/spartan/map new file mode 100644 index 0000000..8280723 --- /dev/null +++ b/websdk/mercurial/templates/spartan/map @@ -0,0 +1,183 @@ +default = 'shortlog' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +shortlog = shortlog.tmpl +shortlogentry = shortlogentry.tmpl +graph = graph.tmpl +naventry = '{label|escape} ' +navshortentry = '{label|escape} ' +navgraphentry = '{label|escape} ' +filenaventry = '{label|escape} ' +filedifflink = '{file|escape} ' +filenodelink = '{file|escape} ' +filenolink = '{file|escape} ' +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + + drwxr-xr-x  +   +   + + {basename|escape}/ + + {emptydirs|urlescape} + ' + +fileentry = ' + + {permissions|permissions}  + {date|isodate}  + {size}  + {basename|escape}' + +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = '
                    {linenumber} {line|escape}
                    ' +filelogentry = filelogentry.tmpl + +# The   ensures that all table cells have content (even if there +# is an empty line in the annotated file), which in turn ensures that +# all table rows have equal height. +annotateline = ' + + + {author|user}@{rev} + + + {linenumber} + +
                     {line|escape}
                    + ' +difflineplus = '{linenumber}{line|escape}' +difflineminus = '{linenumber}{line|escape}' +difflineat = '{linenumber}{line|escape}' +diffline = '{linenumber}{line|escape}' +changelogparent = ' + + parent {rev}: + + {node|short} + + ' +changesetparent = ' + + parent {rev}: + {node|short} + ' +filerevparent = ' + + parent: + + + {rename%filerename}{node|short} + + + ' +filerename = '{file|escape}@' +filelogrename = ' + + base: + + + {file|escape}@{node|short} + + + ' +fileannotateparent = ' + + parent: + + + {rename%filerename}{node|short} + + + ' +changesetchild = ' + + child {rev}: + {node|short} + ' +changelogchild = ' + + child {rev}: + {node|short} + ' +filerevchild = ' + + child: + {node|short} + ' +fileannotatechild = ' + + child: + {node|short} + ' +tags = tags.tmpl +tagentry = ' +
                  • + {node} + {tag|escape} +
                  • ' +branches = branches.tmpl +branchentry = ' +
                  • + {node} + {branch|escape} +
                  • ' +diffblock = '
                    {lines}
                    ' +changelogtag = 'tag:{tag|escape}' +changesettag = 'tag:{tag|escape}' +filediffparent = ' + + parent {rev}: + {node|short} + ' +filelogparent = ' + + parent {rev}: + {node|short} + ' +filediffchild = ' + + child {rev}: + {node|short} + ' +filelogchild = ' + + child {rev}: + {node|short} + ' +indexentry = ' + + {name|escape} + {description} + {contact|obfuscate} + {lastchange|rfc822date} + + RSS + Atom + {archives%archiveentry} + + ' +index = index.tmpl +archiveentry = '{type|escape} ' +notfound = notfound.tmpl +error = error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '' diff --git a/websdk/mercurial/templates/spartan/notfound.tmpl b/websdk/mercurial/templates/spartan/notfound.tmpl new file mode 100644 index 0000000..e9e6ba4 --- /dev/null +++ b/websdk/mercurial/templates/spartan/notfound.tmpl @@ -0,0 +1,12 @@ +{header} +Mercurial repository not found + + + +

                    Mercurial repository not found

                    + +The specified repository "{repo|escape}" is unknown, sorry. + +Please go back to the main repository list page. + +{footer} diff --git a/websdk/mercurial/templates/spartan/search.tmpl b/websdk/mercurial/templates/spartan/search.tmpl new file mode 100644 index 0000000..9ace1eb --- /dev/null +++ b/websdk/mercurial/templates/spartan/search.tmpl @@ -0,0 +1,37 @@ +{header} +{repo|escape}: searching for {query|escape} + + + +
                    +changelog +shortlog +graph +tags +branches +files +{archives%archiveentry} +help +
                    + +

                    searching for {query|escape}

                    + +
                    +{sessionvars%hiddenformentry} +

                    +search: + +

                    +
                    + +{entries} + +
                    +{sessionvars%hiddenformentry} +

                    +search: + +

                    +
                    + +{footer} diff --git a/websdk/mercurial/templates/spartan/shortlog.tmpl b/websdk/mercurial/templates/spartan/shortlog.tmpl new file mode 100644 index 0000000..1f70a53 --- /dev/null +++ b/websdk/mercurial/templates/spartan/shortlog.tmpl @@ -0,0 +1,44 @@ +{header} +{repo|escape}: shortlog + + + + + +
                    +changelog +graph +tags +branches +files +{archives%archiveentry} +help +rss +atom +
                    + +

                    shortlog for {repo|escape}

                    + +
                    +{sessionvars%hiddenformentry} +

                    + + +navigate: {changenav%navshort} +

                    +
                    + +{entries%shortlogentry} + +
                    +{sessionvars%hiddenformentry} +

                    + + +navigate: {changenav%navshort} +

                    +
                    + +{footer} diff --git a/websdk/mercurial/templates/spartan/shortlogentry.tmpl b/websdk/mercurial/templates/spartan/shortlogentry.tmpl new file mode 100644 index 0000000..d104753 --- /dev/null +++ b/websdk/mercurial/templates/spartan/shortlogentry.tmpl @@ -0,0 +1,7 @@ + + + + + + +
                    {date|rfc822date}{author|person}{desc|strip|firstline|escape|nonempty}
                    diff --git a/websdk/mercurial/templates/spartan/tags.tmpl b/websdk/mercurial/templates/spartan/tags.tmpl new file mode 100644 index 0000000..4c41551 --- /dev/null +++ b/websdk/mercurial/templates/spartan/tags.tmpl @@ -0,0 +1,27 @@ +{header} +{repo|escape}: tags + + + + + + + +

                    tags:

                    + +
                      +{entries%tagentry} +
                    + +{footer} diff --git a/websdk/mercurial/templates/static/background.png b/websdk/mercurial/templates/static/background.png new file mode 100644 index 0000000..af8a0aa --- /dev/null +++ b/websdk/mercurial/templates/static/background.png Binary files differ diff --git a/websdk/mercurial/templates/static/coal-file.png b/websdk/mercurial/templates/static/coal-file.png new file mode 100644 index 0000000..7ecf463 --- /dev/null +++ b/websdk/mercurial/templates/static/coal-file.png Binary files differ diff --git a/websdk/mercurial/templates/static/coal-folder.png b/websdk/mercurial/templates/static/coal-folder.png new file mode 100644 index 0000000..d1b8ecc --- /dev/null +++ b/websdk/mercurial/templates/static/coal-folder.png Binary files differ diff --git a/websdk/mercurial/templates/static/excanvas.js b/websdk/mercurial/templates/static/excanvas.js new file mode 100644 index 0000000..9d71658 --- /dev/null +++ b/websdk/mercurial/templates/static/excanvas.js @@ -0,0 +1,19 @@ +if(!window.CanvasRenderingContext2D){(function(){var I=Math,i=I.round,L=I.sin,M=I.cos,m=10,A=m/2,Q={init:function(a){var b=a||document;if(/MSIE/.test(navigator.userAgent)&&!window.opera){var c=this;b.attachEvent("onreadystatechange",function(){c.r(b)})}},r:function(a){if(a.readyState=="complete"){if(!a.namespaces["s"]){a.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml")}var b=a.createStyleSheet();b.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}g_vml_\\:*{behavior:url(#default#VML)}"; +var c=a.getElementsByTagName("canvas");for(var d=0;d"){var d="/"+a.tagName,e;while((e=a.nextSibling)&&e.tagName!=d){e.removeNode()}if(e){e.removeNode()}}a.parentNode.replaceChild(c,a);return c},initElement:function(a){a=this.q(a);a.getContext=function(){if(this.l){return this.l}return this.l=new K(this)};a.attachEvent("onpropertychange",V);a.attachEvent("onresize", +W);var b=a.attributes;if(b.width&&b.width.specified){a.style.width=b.width.nodeValue+"px"}else{a.width=a.clientWidth}if(b.height&&b.height.specified){a.style.height=b.height.nodeValue+"px"}else{a.height=a.clientHeight}return a}};function V(a){var b=a.srcElement;switch(a.propertyName){case "width":b.style.width=b.attributes.width.nodeValue+"px";b.getContext().clearRect();break;case "height":b.style.height=b.attributes.height.nodeValue+"px";b.getContext().clearRect();break}}function W(a){var b=a.srcElement; +if(b.firstChild){b.firstChild.style.width=b.clientWidth+"px";b.firstChild.style.height=b.clientHeight+"px"}}Q.init();var R=[];for(var E=0;E<16;E++){for(var F=0;F<16;F++){R[E*16+F]=E.toString(16)+F.toString(16)}}function J(){return[[1,0,0],[0,1,0],[0,0,1]]}function G(a,b){var c=J();for(var d=0;d<3;d++){for(var e=0;e<3;e++){var g=0;for(var h=0;h<3;h++){g+=a[d][h]*b[h][e]}c[d][e]=g}}return c}function N(a,b){b.fillStyle=a.fillStyle;b.lineCap=a.lineCap;b.lineJoin=a.lineJoin;b.lineWidth=a.lineWidth;b.miterLimit= +a.miterLimit;b.shadowBlur=a.shadowBlur;b.shadowColor=a.shadowColor;b.shadowOffsetX=a.shadowOffsetX;b.shadowOffsetY=a.shadowOffsetY;b.strokeStyle=a.strokeStyle;b.d=a.d;b.e=a.e}function O(a){var b,c=1;a=String(a);if(a.substring(0,3)=="rgb"){var d=a.indexOf("(",3),e=a.indexOf(")",d+1),g=a.substring(d+1,e).split(",");b="#";for(var h=0;h<3;h++){b+=R[Number(g[h])]}if(g.length==4&&a.substr(3,1)=="a"){c=g[3]}}else{b=a}return[b,c]}function S(a){switch(a){case "butt":return"flat";case "round":return"round"; +case "square":default:return"square"}}function K(a){this.a=J();this.m=[];this.k=[];this.c=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=m*1;this.globalAlpha=1;this.canvas=a;var b=a.ownerDocument.createElement("div");b.style.width=a.clientWidth+"px";b.style.height=a.clientHeight+"px";b.style.overflow="hidden";b.style.position="absolute";a.appendChild(b);this.j=b;this.d=1;this.e=1}var j=K.prototype;j.clearRect=function(){this.j.innerHTML= +"";this.c=[]};j.beginPath=function(){this.c=[]};j.moveTo=function(a,b){this.c.push({type:"moveTo",x:a,y:b});this.f=a;this.g=b};j.lineTo=function(a,b){this.c.push({type:"lineTo",x:a,y:b});this.f=a;this.g=b};j.bezierCurveTo=function(a,b,c,d,e,g){this.c.push({type:"bezierCurveTo",cp1x:a,cp1y:b,cp2x:c,cp2y:d,x:e,y:g});this.f=e;this.g=g};j.quadraticCurveTo=function(a,b,c,d){var e=this.f+0.6666666666666666*(a-this.f),g=this.g+0.6666666666666666*(b-this.g),h=e+(c-this.f)/3,l=g+(d-this.g)/3;this.bezierCurveTo(e, +g,h,l,c,d)};j.arc=function(a,b,c,d,e,g){c*=m;var h=g?"at":"wa",l=a+M(d)*c-A,n=b+L(d)*c-A,o=a+M(e)*c-A,f=b+L(e)*c-A;if(l==o&&!g){l+=0.125}this.c.push({type:h,x:a,y:b,radius:c,xStart:l,yStart:n,xEnd:o,yEnd:f})};j.rect=function(a,b,c,d){this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath()};j.strokeRect=function(a,b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.stroke()};j.fillRect=function(a, +b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.fill()};j.createLinearGradient=function(a,b,c,d){var e=new H("gradient");return e};j.createRadialGradient=function(a,b,c,d,e,g){var h=new H("gradientradial");h.n=c;h.o=g;h.i.x=a;h.i.y=b;return h};j.drawImage=function(a,b){var c,d,e,g,h,l,n,o,f=a.runtimeStyle.width,k=a.runtimeStyle.height;a.runtimeStyle.width="auto";a.runtimeStyle.height="auto";var q=a.width,r=a.height;a.runtimeStyle.width= +f;a.runtimeStyle.height=k;if(arguments.length==3){c=arguments[1];d=arguments[2];h=(l=0);n=(e=q);o=(g=r)}else if(arguments.length==5){c=arguments[1];d=arguments[2];e=arguments[3];g=arguments[4];h=(l=0);n=q;o=r}else if(arguments.length==9){h=arguments[1];l=arguments[2];n=arguments[3];o=arguments[4];c=arguments[5];d=arguments[6];e=arguments[7];g=arguments[8]}else{throw"Invalid number of arguments";}var s=this.b(c,d),t=[],v=10,w=10;t.push(" ','","");this.j.insertAdjacentHTML("BeforeEnd",t.join(""))};j.stroke=function(a){var b=[],c=O(a?this.fillStyle:this.strokeStyle),d=c[0],e=c[1]*this.globalAlpha,g=10,h=10;b.push("n.x){n.x=k.x}if(l.y== +null||k.yn.y){n.y=k.y}}}b.push(' ">');if(typeof this.fillStyle=="object"){var v={x:"50%",y:"50%"},w=n.x-l.x,x=n.y-l.y,p=w>x?w:x;v.x=i(this.fillStyle.i.x/w*100+50)+"%";v.y=i(this.fillStyle.i.y/x*100+50)+"%";var y=[];if(this.fillStyle.p=="gradientradial"){var z=this.fillStyle.n/p*100,B=this.fillStyle.o/p*100-z}else{var z=0,B=100}var C={offset:null,color:null},D={offset:null,color:null};this.fillStyle.h.sort(function(T,U){return T.offset-U.offset});for(var o=0;oC.offset||C.offset==null){C.offset=u.offset;C.color=u.color}if(u.offset')}else if(a){b.push('')}else{b.push("')}b.push("");this.j.insertAdjacentHTML("beforeEnd",b.join(""));this.c=[]};j.fill=function(){this.stroke(true)};j.closePath=function(){this.c.push({type:"close"})};j.b=function(a,b){return{x:m*(a*this.a[0][0]+b*this.a[1][0]+this.a[2][0])-A,y:m*(a*this.a[0][1]+b*this.a[1][1]+this.a[2][1])-A}};j.save=function(){var a={};N(this,a); +this.k.push(a);this.m.push(this.a);this.a=G(J(),this.a)};j.restore=function(){N(this.k.pop(),this);this.a=this.m.pop()};j.translate=function(a,b){var c=[[1,0,0],[0,1,0],[a,b,1]];this.a=G(c,this.a)};j.rotate=function(a){var b=M(a),c=L(a),d=[[b,c,0],[-c,b,0],[0,0,1]];this.a=G(d,this.a)};j.scale=function(a,b){this.d*=a;this.e*=b;var c=[[a,0,0],[0,b,0],[0,0,1]];this.a=G(c,this.a)};j.clip=function(){};j.arcTo=function(){};j.createPattern=function(){return new P};function H(a){this.p=a;this.n=0;this.o= +0;this.h=[];this.i={x:0,y:0}}H.prototype.addColorStop=function(a,b){b=O(b);this.h.push({offset:1-a,color:b})};function P(){}G_vmlCanvasManager=Q;CanvasRenderingContext2D=K;CanvasGradient=H;CanvasPattern=P})()}; diff --git a/websdk/mercurial/templates/static/hgicon.png b/websdk/mercurial/templates/static/hgicon.png new file mode 100644 index 0000000..60effbc --- /dev/null +++ b/websdk/mercurial/templates/static/hgicon.png Binary files differ diff --git a/websdk/mercurial/templates/static/hglogo.png b/websdk/mercurial/templates/static/hglogo.png new file mode 100644 index 0000000..adc6e65 --- /dev/null +++ b/websdk/mercurial/templates/static/hglogo.png Binary files differ diff --git a/websdk/mercurial/templates/static/mercurial.js b/websdk/mercurial/templates/static/mercurial.js new file mode 100644 index 0000000..3bc27a4 --- /dev/null +++ b/websdk/mercurial/templates/static/mercurial.js @@ -0,0 +1,249 @@ +// mercurial.js - JavaScript utility functions +// +// Rendering of branch DAGs on the client side +// Display of elapsed time +// Show or hide diffstat +// +// Copyright 2008 Dirkjan Ochtman +// Copyright 2006 Alexander Schremmer +// +// derived from code written by Scott James Remnant +// Copyright 2005 Canonical Ltd. +// +// This software may be used and distributed according to the terms +// of the GNU General Public License, incorporated herein by reference. + +var colors = [ + [ 1.0, 0.0, 0.0 ], + [ 1.0, 1.0, 0.0 ], + [ 0.0, 1.0, 0.0 ], + [ 0.0, 1.0, 1.0 ], + [ 0.0, 0.0, 1.0 ], + [ 1.0, 0.0, 1.0 ] +]; + +function Graph() { + + this.canvas = document.getElementById('graph'); + if (navigator.userAgent.indexOf('MSIE') >= 0) this.canvas = window.G_vmlCanvasManager.initElement(this.canvas); + this.ctx = this.canvas.getContext('2d'); + this.ctx.strokeStyle = 'rgb(0, 0, 0)'; + this.ctx.fillStyle = 'rgb(0, 0, 0)'; + this.cur = [0, 0]; + this.line_width = 3; + this.bg = [0, 4]; + this.cell = [2, 0]; + this.columns = 0; + this.revlink = ''; + + this.scale = function(height) { + this.bg_height = height; + this.box_size = Math.floor(this.bg_height / 1.2); + this.cell_height = this.box_size; + } + + function colorPart(num) { + num *= 255 + num = num < 0 ? 0 : num; + num = num > 255 ? 255 : num; + var digits = Math.round(num).toString(16); + if (num < 16) { + return '0' + digits; + } else { + return digits; + } + } + + this.setColor = function(color, bg, fg) { + + // Set the colour. + // + // Picks a distinct colour based on an internal wheel; the bg + // parameter provides the value that should be assigned to the 'zero' + // colours and the fg parameter provides the multiplier that should be + // applied to the foreground colours. + + color %= colors.length; + var red = (colors[color][0] * fg) || bg; + var green = (colors[color][1] * fg) || bg; + var blue = (colors[color][2] * fg) || bg; + red = Math.round(red * 255); + green = Math.round(green * 255); + blue = Math.round(blue * 255); + var s = 'rgb(' + red + ', ' + green + ', ' + blue + ')'; + this.ctx.strokeStyle = s; + this.ctx.fillStyle = s; + return s; + + } + + this.render = function(data) { + + var backgrounds = ''; + var nodedata = ''; + + for (var i in data) { + + var parity = i % 2; + this.cell[1] += this.bg_height; + this.bg[1] += this.bg_height; + + var cur = data[i]; + var node = cur[1]; + var edges = cur[2]; + var fold = false; + + for (var j in edges) { + + line = edges[j]; + start = line[0]; + end = line[1]; + color = line[2]; + + if (end > this.columns || start > this.columns) { + this.columns += 1; + } + + if (start == this.columns && start > end) { + var fold = true; + } + + x0 = this.cell[0] + this.box_size * start + this.box_size / 2; + y0 = this.bg[1] - this.bg_height / 2; + x1 = this.cell[0] + this.box_size * end + this.box_size / 2; + y1 = this.bg[1] + this.bg_height / 2; + + this.edge(x0, y0, x1, y1, color); + + } + + // Draw the revision node in the right column + + column = node[0] + color = node[1] + + radius = this.box_size / 8; + x = this.cell[0] + this.box_size * column + this.box_size / 2; + y = this.bg[1] - this.bg_height / 2; + var add = this.vertex(x, y, color, parity, cur); + backgrounds += add[0]; + nodedata += add[1]; + + if (fold) this.columns -= 1; + + } + + document.getElementById('nodebgs').innerHTML += backgrounds; + document.getElementById('graphnodes').innerHTML += nodedata; + + } + +} + + +process_dates = (function(document, RegExp, Math, isNaN, Date, _false, _true){ + + // derived from code from mercurial/templatefilter.py + + var scales = { + 'year': 365 * 24 * 60 * 60, + 'month': 30 * 24 * 60 * 60, + 'week': 7 * 24 * 60 * 60, + 'day': 24 * 60 * 60, + 'hour': 60 * 60, + 'minute': 60, + 'second': 1 + }; + + function format(count, string){ + var ret = count + ' ' + string; + if (count > 1){ + ret = ret + 's'; + } + return ret; + } + + function shortdate(date){ + var ret = date.getFullYear() + '-'; + // getMonth() gives a 0-11 result + var month = date.getMonth() + 1; + if (month <= 9){ + ret += '0' + month; + } else { + ret += month; + } + ret += '-'; + var day = date.getDate(); + if (day <= 9){ + ret += '0' + day; + } else { + ret += day; + } + return ret; + } + + function age(datestr){ + var now = new Date(); + var once = new Date(datestr); + if (isNaN(once.getTime())){ + // parsing error + return datestr; + } + + var delta = Math.floor((now.getTime() - once.getTime()) / 1000); + + var future = _false; + if (delta < 0){ + future = _true; + delta = -delta; + if (delta > (30 * scales.year)){ + return "in the distant future"; + } + } + + if (delta > (2 * scales.year)){ + return shortdate(once); + } + + for (unit in scales){ + var s = scales[unit]; + var n = Math.floor(delta / s); + if ((n >= 2) || (s == 1)){ + if (future){ + return format(n, unit) + ' from now'; + } else { + return format(n, unit) + ' ago'; + } + } + } + } + + return function(){ + var nodes = document.getElementsByTagName('*'); + var ageclass = new RegExp('\\bage\\b'); + var dateclass = new RegExp('\\bdate\\b'); + for (var i=0; i +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, errno +import error, util + +def active(func): + def _active(self, *args, **kwds): + if self.count == 0: + raise error.Abort(_( + 'cannot use transaction when it is already committed/aborted')) + return func(self, *args, **kwds) + return _active + +def _playback(journal, report, opener, entries, unlink=True): + for f, o, ignore in entries: + if o or not unlink: + try: + fp = opener(f, 'a') + fp.truncate(o) + fp.close() + except IOError: + report(_("failed to truncate %s\n") % f) + raise + else: + try: + fp = opener(f) + fn = fp.name + fp.close() + util.unlink(fn) + except (IOError, OSError), inst: + if inst.errno != errno.ENOENT: + raise + util.unlink(journal) + +class transaction(object): + def __init__(self, report, opener, journal, after=None, createmode=None): + self.count = 1 + self.usages = 1 + self.report = report + self.opener = opener + self.after = after + self.entries = [] + self.map = {} + self.journal = journal + self._queue = [] + + self.file = util.posixfile(self.journal, "w") + if createmode is not None: + os.chmod(self.journal, createmode & 0666) + + def __del__(self): + if self.journal: + self._abort() + + @active + def startgroup(self): + self._queue.append([]) + + @active + def endgroup(self): + q = self._queue.pop() + d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q]) + self.entries.extend(q) + self.file.write(d) + self.file.flush() + + @active + def add(self, file, offset, data=None): + if file in self.map: + return + if self._queue: + self._queue[-1].append((file, offset, data)) + return + + self.entries.append((file, offset, data)) + self.map[file] = len(self.entries) - 1 + # add enough data to the journal to do the truncate + self.file.write("%s\0%d\n" % (file, offset)) + self.file.flush() + + @active + def find(self, file): + if file in self.map: + return self.entries[self.map[file]] + return None + + @active + def replace(self, file, offset, data=None): + ''' + replace can only replace already committed entries + that are not pending in the queue + ''' + + if file not in self.map: + raise KeyError(file) + index = self.map[file] + self.entries[index] = (file, offset, data) + self.file.write("%s\0%d\n" % (file, offset)) + self.file.flush() + + @active + def nest(self): + self.count += 1 + self.usages += 1 + return self + + def release(self): + if self.count > 0: + self.usages -= 1 + # if the transaction scopes are left without being closed, fail + if self.count > 0 and self.usages == 0: + self._abort() + + def running(self): + return self.count > 0 + + @active + def close(self): + '''commit the transaction''' + self.count -= 1 + if self.count != 0: + return + self.file.close() + self.entries = [] + if self.after: + self.after() + if os.path.isfile(self.journal): + util.unlink(self.journal) + self.journal = None + + @active + def abort(self): + '''abort the transaction (generally called on error, or when the + transaction is not explicitly committed before going out of + scope)''' + self._abort() + + def _abort(self): + self.count = 0 + self.usages = 0 + self.file.close() + + try: + if not self.entries: + if self.journal: + util.unlink(self.journal) + return + + self.report(_("transaction abort!\n")) + + try: + _playback(self.journal, self.report, self.opener, + self.entries, False) + self.report(_("rollback completed\n")) + except: + self.report(_("rollback failed - please run hg recover\n")) + finally: + self.journal = None + + +def rollback(opener, file, report): + entries = [] + + fp = util.posixfile(file) + lines = fp.readlines() + fp.close() + for l in lines: + f, o = l.split('\0') + entries.append((f, int(o), None)) + + _playback(file, report, opener, entries) diff --git a/websdk/mercurial/treediscovery.py b/websdk/mercurial/treediscovery.py new file mode 100644 index 0000000..49011c9 --- /dev/null +++ b/websdk/mercurial/treediscovery.py @@ -0,0 +1,150 @@ +# discovery.py - protocol changeset discovery functions +# +# Copyright 2010 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, short +from i18n import _ +import util, error + +def findcommonincoming(repo, remote, heads=None, force=False): + """Return a tuple (common, fetch, heads) used to identify the common + subset of nodes between repo and remote. + + "common" is a list of (at least) the heads of the common subset. + "fetch" is a list of roots of the nodes that would be incoming, to be + supplied to changegroupsubset. + "heads" is either the supplied heads, or else the remote's heads. + """ + + m = repo.changelog.nodemap + search = [] + fetch = set() + seen = set() + seenbranch = set() + base = set() + + if not heads: + heads = remote.heads() + + if repo.changelog.tip() == nullid: + base.add(nullid) + if heads != [nullid]: + return [nullid], [nullid], list(heads) + return [nullid], [], heads + + # assume we're closer to the tip than the root + # and start by examining the heads + repo.ui.status(_("searching for changes\n")) + + unknown = [] + for h in heads: + if h not in m: + unknown.append(h) + else: + base.add(h) + + if not unknown: + return list(base), [], list(heads) + + req = set(unknown) + reqcnt = 0 + + # search through remote branches + # a 'branch' here is a linear segment of history, with four parts: + # head, root, first parent, second parent + # (a branch always has two parents (or none) by definition) + unknown = remote.branches(unknown) + while unknown: + r = [] + while unknown: + n = unknown.pop(0) + if n[0] in seen: + continue + + repo.ui.debug("examining %s:%s\n" + % (short(n[0]), short(n[1]))) + if n[0] == nullid: # found the end of the branch + pass + elif n in seenbranch: + repo.ui.debug("branch already found\n") + continue + elif n[1] and n[1] in m: # do we know the base? + repo.ui.debug("found incomplete branch %s:%s\n" + % (short(n[0]), short(n[1]))) + search.append(n[0:2]) # schedule branch range for scanning + seenbranch.add(n) + else: + if n[1] not in seen and n[1] not in fetch: + if n[2] in m and n[3] in m: + repo.ui.debug("found new changeset %s\n" % + short(n[1])) + fetch.add(n[1]) # earliest unknown + for p in n[2:4]: + if p in m: + base.add(p) # latest known + + for p in n[2:4]: + if p not in req and p not in m: + r.append(p) + req.add(p) + seen.add(n[0]) + + if r: + reqcnt += 1 + repo.ui.progress(_('searching'), reqcnt, unit=_('queries')) + repo.ui.debug("request %d: %s\n" % + (reqcnt, " ".join(map(short, r)))) + for p in xrange(0, len(r), 10): + for b in remote.branches(r[p:p + 10]): + repo.ui.debug("received %s:%s\n" % + (short(b[0]), short(b[1]))) + unknown.append(b) + + # do binary search on the branches we found + while search: + newsearch = [] + reqcnt += 1 + repo.ui.progress(_('searching'), reqcnt, unit=_('queries')) + for n, l in zip(search, remote.between(search)): + l.append(n[1]) + p = n[0] + f = 1 + for i in l: + repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i))) + if i in m: + if f <= 2: + repo.ui.debug("found new branch changeset %s\n" % + short(p)) + fetch.add(p) + base.add(i) + else: + repo.ui.debug("narrowed branch search to %s:%s\n" + % (short(p), short(i))) + newsearch.append((p, i)) + break + p, f = i, f * 2 + search = newsearch + + # sanity check our fetch list + for f in fetch: + if f in m: + raise error.RepoError(_("already have changeset ") + + short(f[:4])) + + base = list(base) + if base == [nullid]: + if force: + repo.ui.warn(_("warning: repository is unrelated\n")) + else: + raise util.Abort(_("repository is unrelated")) + + repo.ui.debug("found new changesets starting at " + + " ".join([short(f) for f in fetch]) + "\n") + + repo.ui.progress(_('searching'), None) + repo.ui.debug("%d total queries\n" % reqcnt) + + return base, list(fetch), heads diff --git a/websdk/mercurial/ui.py b/websdk/mercurial/ui.py index 43fe077..ef76206 120000..100644 --- a/websdk/mercurial/ui.py +++ b/websdk/mercurial/ui.py @@ -1 +1,734 @@ -/usr/share/pyshared/mercurial/ui.py \ No newline at end of file +# ui.py - user interface bits for mercurial +# +# Copyright 2005-2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import errno, getpass, os, socket, sys, tempfile, traceback +import config, scmutil, util, error + +class ui(object): + def __init__(self, src=None): + self._buffers = [] + self.quiet = self.verbose = self.debugflag = self.tracebackflag = False + self._reportuntrusted = True + self._ocfg = config.config() # overlay + self._tcfg = config.config() # trusted + self._ucfg = config.config() # untrusted + self._trustusers = set() + self._trustgroups = set() + + if src: + self.fout = src.fout + self.ferr = src.ferr + self.fin = src.fin + + self._tcfg = src._tcfg.copy() + self._ucfg = src._ucfg.copy() + self._ocfg = src._ocfg.copy() + self._trustusers = src._trustusers.copy() + self._trustgroups = src._trustgroups.copy() + self.environ = src.environ + self.fixconfig() + else: + self.fout = sys.stdout + self.ferr = sys.stderr + self.fin = sys.stdin + + # shared read-only environment + self.environ = os.environ + # we always trust global config files + for f in scmutil.rcpath(): + self.readconfig(f, trust=True) + + def copy(self): + return self.__class__(self) + + def _trusted(self, fp, f): + st = util.fstat(fp) + if util.isowner(st): + return True + + tusers, tgroups = self._trustusers, self._trustgroups + if '*' in tusers or '*' in tgroups: + return True + + user = util.username(st.st_uid) + group = util.groupname(st.st_gid) + if user in tusers or group in tgroups or user == util.username(): + return True + + if self._reportuntrusted: + self.warn(_('Not trusting file %s from untrusted ' + 'user %s, group %s\n') % (f, user, group)) + return False + + def readconfig(self, filename, root=None, trust=False, + sections=None, remap=None): + try: + fp = open(filename) + except IOError: + if not sections: # ignore unless we were looking for something + return + raise + + cfg = config.config() + trusted = sections or trust or self._trusted(fp, filename) + + try: + cfg.read(filename, fp, sections=sections, remap=remap) + fp.close() + except error.ConfigError, inst: + if trusted: + raise + self.warn(_("Ignored: %s\n") % str(inst)) + + if self.plain(): + for k in ('debug', 'fallbackencoding', 'quiet', 'slash', + 'logtemplate', 'style', + 'traceback', 'verbose'): + if k in cfg['ui']: + del cfg['ui'][k] + for k, v in cfg.items('defaults'): + del cfg['defaults'][k] + # Don't remove aliases from the configuration if in the exceptionlist + if self.plain('alias'): + for k, v in cfg.items('alias'): + del cfg['alias'][k] + + if trusted: + self._tcfg.update(cfg) + self._tcfg.update(self._ocfg) + self._ucfg.update(cfg) + self._ucfg.update(self._ocfg) + + if root is None: + root = os.path.expanduser('~') + self.fixconfig(root=root) + + def fixconfig(self, root=None, section=None): + if section in (None, 'paths'): + # expand vars and ~ + # translate paths relative to root (or home) into absolute paths + root = root or os.getcwd() + for c in self._tcfg, self._ucfg, self._ocfg: + for n, p in c.items('paths'): + if not p: + continue + if '%%' in p: + self.warn(_("(deprecated '%%' in path %s=%s from %s)\n") + % (n, p, self.configsource('paths', n))) + p = p.replace('%%', '%') + p = util.expandpath(p) + if not util.hasscheme(p) and not os.path.isabs(p): + p = os.path.normpath(os.path.join(root, p)) + c.set("paths", n, p) + + if section in (None, 'ui'): + # update ui options + self.debugflag = self.configbool('ui', 'debug') + self.verbose = self.debugflag or self.configbool('ui', 'verbose') + self.quiet = not self.debugflag and self.configbool('ui', 'quiet') + if self.verbose and self.quiet: + self.quiet = self.verbose = False + self._reportuntrusted = self.debugflag or self.configbool("ui", + "report_untrusted", True) + self.tracebackflag = self.configbool('ui', 'traceback', False) + + if section in (None, 'trusted'): + # update trust information + self._trustusers.update(self.configlist('trusted', 'users')) + self._trustgroups.update(self.configlist('trusted', 'groups')) + + def setconfig(self, section, name, value, overlay=True): + if overlay: + self._ocfg.set(section, name, value) + self._tcfg.set(section, name, value) + self._ucfg.set(section, name, value) + self.fixconfig(section=section) + + def _data(self, untrusted): + return untrusted and self._ucfg or self._tcfg + + def configsource(self, section, name, untrusted=False): + return self._data(untrusted).source(section, name) or 'none' + + def config(self, section, name, default=None, untrusted=False): + if isinstance(name, list): + alternates = name + else: + alternates = [name] + + for n in alternates: + value = self._data(untrusted).get(section, name, None) + if value is not None: + name = n + break + else: + value = default + + if self.debugflag and not untrusted and self._reportuntrusted: + uvalue = self._ucfg.get(section, name) + if uvalue is not None and uvalue != value: + self.debug("ignoring untrusted configuration option " + "%s.%s = %s\n" % (section, name, uvalue)) + return value + + def configpath(self, section, name, default=None, untrusted=False): + 'get a path config item, expanded relative to repo root or config file' + v = self.config(section, name, default, untrusted) + if v is None: + return None + if not os.path.isabs(v) or "://" not in v: + src = self.configsource(section, name, untrusted) + if ':' in src: + base = os.path.dirname(src.rsplit(':')[0]) + v = os.path.join(base, os.path.expanduser(v)) + return v + + def configbool(self, section, name, default=False, untrusted=False): + """parse a configuration element as a boolean + + >>> u = ui(); s = 'foo' + >>> u.setconfig(s, 'true', 'yes') + >>> u.configbool(s, 'true') + True + >>> u.setconfig(s, 'false', 'no') + >>> u.configbool(s, 'false') + False + >>> u.configbool(s, 'unknown') + False + >>> u.configbool(s, 'unknown', True) + True + >>> u.setconfig(s, 'invalid', 'somevalue') + >>> u.configbool(s, 'invalid') + Traceback (most recent call last): + ... + ConfigError: foo.invalid is not a boolean ('somevalue') + """ + + v = self.config(section, name, None, untrusted) + if v is None: + return default + if isinstance(v, bool): + return v + b = util.parsebool(v) + if b is None: + raise error.ConfigError(_("%s.%s is not a boolean ('%s')") + % (section, name, v)) + return b + + def configint(self, section, name, default=None, untrusted=False): + """parse a configuration element as an integer + + >>> u = ui(); s = 'foo' + >>> u.setconfig(s, 'int1', '42') + >>> u.configint(s, 'int1') + 42 + >>> u.setconfig(s, 'int2', '-42') + >>> u.configint(s, 'int2') + -42 + >>> u.configint(s, 'unknown', 7) + 7 + >>> u.setconfig(s, 'invalid', 'somevalue') + >>> u.configint(s, 'invalid') + Traceback (most recent call last): + ... + ConfigError: foo.invalid is not an integer ('somevalue') + """ + + v = self.config(section, name, None, untrusted) + if v is None: + return default + try: + return int(v) + except ValueError: + raise error.ConfigError(_("%s.%s is not an integer ('%s')") + % (section, name, v)) + + def configlist(self, section, name, default=None, untrusted=False): + """parse a configuration element as a list of comma/space separated + strings + + >>> u = ui(); s = 'foo' + >>> u.setconfig(s, 'list1', 'this,is "a small" ,test') + >>> u.configlist(s, 'list1') + ['this', 'is', 'a small', 'test'] + """ + + def _parse_plain(parts, s, offset): + whitespace = False + while offset < len(s) and (s[offset].isspace() or s[offset] == ','): + whitespace = True + offset += 1 + if offset >= len(s): + return None, parts, offset + if whitespace: + parts.append('') + if s[offset] == '"' and not parts[-1]: + return _parse_quote, parts, offset + 1 + elif s[offset] == '"' and parts[-1][-1] == '\\': + parts[-1] = parts[-1][:-1] + s[offset] + return _parse_plain, parts, offset + 1 + parts[-1] += s[offset] + return _parse_plain, parts, offset + 1 + + def _parse_quote(parts, s, offset): + if offset < len(s) and s[offset] == '"': # "" + parts.append('') + offset += 1 + while offset < len(s) and (s[offset].isspace() or + s[offset] == ','): + offset += 1 + return _parse_plain, parts, offset + + while offset < len(s) and s[offset] != '"': + if (s[offset] == '\\' and offset + 1 < len(s) + and s[offset + 1] == '"'): + offset += 1 + parts[-1] += '"' + else: + parts[-1] += s[offset] + offset += 1 + + if offset >= len(s): + real_parts = _configlist(parts[-1]) + if not real_parts: + parts[-1] = '"' + else: + real_parts[0] = '"' + real_parts[0] + parts = parts[:-1] + parts.extend(real_parts) + return None, parts, offset + + offset += 1 + while offset < len(s) and s[offset] in [' ', ',']: + offset += 1 + + if offset < len(s): + if offset + 1 == len(s) and s[offset] == '"': + parts[-1] += '"' + offset += 1 + else: + parts.append('') + else: + return None, parts, offset + + return _parse_plain, parts, offset + + def _configlist(s): + s = s.rstrip(' ,') + if not s: + return [] + parser, parts, offset = _parse_plain, [''], 0 + while parser: + parser, parts, offset = parser(parts, s, offset) + return parts + + result = self.config(section, name, untrusted=untrusted) + if result is None: + result = default or [] + if isinstance(result, basestring): + result = _configlist(result.lstrip(' ,\n')) + if result is None: + result = default or [] + return result + + def has_section(self, section, untrusted=False): + '''tell whether section exists in config.''' + return section in self._data(untrusted) + + def configitems(self, section, untrusted=False): + items = self._data(untrusted).items(section) + if self.debugflag and not untrusted and self._reportuntrusted: + for k, v in self._ucfg.items(section): + if self._tcfg.get(section, k) != v: + self.debug("ignoring untrusted configuration option " + "%s.%s = %s\n" % (section, k, v)) + return items + + def walkconfig(self, untrusted=False): + cfg = self._data(untrusted) + for section in cfg.sections(): + for name, value in self.configitems(section, untrusted): + yield section, name, value + + def plain(self, feature=None): + '''is plain mode active? + + Plain mode means that all configuration variables which affect + the behavior and output of Mercurial should be + ignored. Additionally, the output should be stable, + reproducible and suitable for use in scripts or applications. + + The only way to trigger plain mode is by setting either the + `HGPLAIN' or `HGPLAINEXCEPT' environment variables. + + The return value can either be + - False if HGPLAIN is not set, or feature is in HGPLAINEXCEPT + - True otherwise + ''' + if 'HGPLAIN' not in os.environ and 'HGPLAINEXCEPT' not in os.environ: + return False + exceptions = os.environ.get('HGPLAINEXCEPT', '').strip().split(',') + if feature and exceptions: + return feature not in exceptions + return True + + def username(self): + """Return default username to be used in commits. + + Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL + and stop searching if one of these is set. + If not found and ui.askusername is True, ask the user, else use + ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname". + """ + user = os.environ.get("HGUSER") + if user is None: + user = self.config("ui", "username") + if user is not None: + user = os.path.expandvars(user) + if user is None: + user = os.environ.get("EMAIL") + if user is None and self.configbool("ui", "askusername"): + user = self.prompt(_("enter a commit username:"), default=None) + if user is None and not self.interactive(): + try: + user = '%s@%s' % (util.getuser(), socket.getfqdn()) + self.warn(_("No username found, using '%s' instead\n") % user) + except KeyError: + pass + if not user: + raise util.Abort(_('no username supplied (see "hg help config")')) + if "\n" in user: + raise util.Abort(_("username %s contains a newline\n") % repr(user)) + return user + + def shortuser(self, user): + """Return a short representation of a user name or email address.""" + if not self.verbose: + user = util.shortuser(user) + return user + + def expandpath(self, loc, default=None): + """Return repository location relative to cwd or from [paths]""" + if util.hasscheme(loc) or os.path.isdir(os.path.join(loc, '.hg')): + return loc + + path = self.config('paths', loc) + if not path and default is not None: + path = self.config('paths', default) + return path or loc + + def pushbuffer(self): + self._buffers.append([]) + + def popbuffer(self, labeled=False): + '''pop the last buffer and return the buffered output + + If labeled is True, any labels associated with buffered + output will be handled. By default, this has no effect + on the output returned, but extensions and GUI tools may + handle this argument and returned styled output. If output + is being buffered so it can be captured and parsed or + processed, labeled should not be set to True. + ''' + return "".join(self._buffers.pop()) + + def write(self, *args, **opts): + '''write args to output + + By default, this method simply writes to the buffer or stdout, + but extensions or GUI tools may override this method, + write_err(), popbuffer(), and label() to style output from + various parts of hg. + + An optional keyword argument, "label", can be passed in. + This should be a string containing label names separated by + space. Label names take the form of "topic.type". For example, + ui.debug() issues a label of "ui.debug". + + When labeling output for a specific command, a label of + "cmdname.type" is recommended. For example, status issues + a label of "status.modified" for modified files. + ''' + if self._buffers: + self._buffers[-1].extend([str(a) for a in args]) + else: + for a in args: + self.fout.write(str(a)) + + def write_err(self, *args, **opts): + try: + if not getattr(self.fout, 'closed', False): + self.fout.flush() + for a in args: + self.ferr.write(str(a)) + # stderr may be buffered under win32 when redirected to files, + # including stdout. + if not getattr(self.ferr, 'closed', False): + self.ferr.flush() + except IOError, inst: + if inst.errno not in (errno.EPIPE, errno.EIO): + raise + + def flush(self): + try: self.fout.flush() + except: pass + try: self.ferr.flush() + except: pass + + def interactive(self): + '''is interactive input allowed? + + An interactive session is a session where input can be reasonably read + from `sys.stdin'. If this function returns false, any attempt to read + from stdin should fail with an error, unless a sensible default has been + specified. + + Interactiveness is triggered by the value of the `ui.interactive' + configuration variable or - if it is unset - when `sys.stdin' points + to a terminal device. + + This function refers to input only; for output, see `ui.formatted()'. + ''' + i = self.configbool("ui", "interactive", None) + if i is None: + # some environments replace stdin without implementing isatty + # usually those are non-interactive + return util.isatty(self.fin) + + return i + + def termwidth(self): + '''how wide is the terminal in columns? + ''' + if 'COLUMNS' in os.environ: + try: + return int(os.environ['COLUMNS']) + except ValueError: + pass + return util.termwidth() + + def formatted(self): + '''should formatted output be used? + + It is often desirable to format the output to suite the output medium. + Examples of this are truncating long lines or colorizing messages. + However, this is not often not desirable when piping output into other + utilities, e.g. `grep'. + + Formatted output is triggered by the value of the `ui.formatted' + configuration variable or - if it is unset - when `sys.stdout' points + to a terminal device. Please note that `ui.formatted' should be + considered an implementation detail; it is not intended for use outside + Mercurial or its extensions. + + This function refers to output only; for input, see `ui.interactive()'. + This function always returns false when in plain mode, see `ui.plain()'. + ''' + if self.plain(): + return False + + i = self.configbool("ui", "formatted", None) + if i is None: + # some environments replace stdout without implementing isatty + # usually those are non-interactive + return util.isatty(self.fout) + + return i + + def _readline(self, prompt=''): + if util.isatty(self.fin): + try: + # magically add command line editing support, where + # available + import readline + # force demandimport to really load the module + readline.read_history_file + # windows sometimes raises something other than ImportError + except Exception: + pass + + # call write() so output goes through subclassed implementation + # e.g. color extension on Windows + self.write(prompt) + + # instead of trying to emulate raw_input, swap (self.fin, + # self.fout) with (sys.stdin, sys.stdout) + oldin = sys.stdin + oldout = sys.stdout + sys.stdin = self.fin + sys.stdout = self.fout + line = raw_input(' ') + sys.stdin = oldin + sys.stdout = oldout + + # When stdin is in binary mode on Windows, it can cause + # raw_input() to emit an extra trailing carriage return + if os.linesep == '\r\n' and line and line[-1] == '\r': + line = line[:-1] + return line + + def prompt(self, msg, default="y"): + """Prompt user with msg, read response. + If ui is not interactive, the default is returned. + """ + if not self.interactive(): + self.write(msg, ' ', default, "\n") + return default + try: + r = self._readline(self.label(msg, 'ui.prompt')) + if not r: + return default + return r + except EOFError: + raise util.Abort(_('response expected')) + + def promptchoice(self, msg, choices, default=0): + """Prompt user with msg, read response, and ensure it matches + one of the provided choices. The index of the choice is returned. + choices is a sequence of acceptable responses with the format: + ('&None', 'E&xec', 'Sym&link') Responses are case insensitive. + If ui is not interactive, the default is returned. + """ + resps = [s[s.index('&')+1].lower() for s in choices] + while True: + r = self.prompt(msg, resps[default]) + if r.lower() in resps: + return resps.index(r.lower()) + self.write(_("unrecognized response\n")) + + def getpass(self, prompt=None, default=None): + if not self.interactive(): + return default + try: + return getpass.getpass(prompt or _('password: ')) + except EOFError: + raise util.Abort(_('response expected')) + def status(self, *msg, **opts): + '''write status message to output (if ui.quiet is False) + + This adds an output label of "ui.status". + ''' + if not self.quiet: + opts['label'] = opts.get('label', '') + ' ui.status' + self.write(*msg, **opts) + def warn(self, *msg, **opts): + '''write warning message to output (stderr) + + This adds an output label of "ui.warning". + ''' + opts['label'] = opts.get('label', '') + ' ui.warning' + self.write_err(*msg, **opts) + def note(self, *msg, **opts): + '''write note to output (if ui.verbose is True) + + This adds an output label of "ui.note". + ''' + if self.verbose: + opts['label'] = opts.get('label', '') + ' ui.note' + self.write(*msg, **opts) + def debug(self, *msg, **opts): + '''write debug message to output (if ui.debugflag is True) + + This adds an output label of "ui.debug". + ''' + if self.debugflag: + opts['label'] = opts.get('label', '') + ' ui.debug' + self.write(*msg, **opts) + def edit(self, text, user): + (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt", + text=True) + try: + f = os.fdopen(fd, "w") + f.write(text) + f.close() + + editor = self.geteditor() + + util.system("%s \"%s\"" % (editor, name), + environ={'HGUSER': user}, + onerr=util.Abort, errprefix=_("edit failed"), + out=self.fout) + + f = open(name) + t = f.read() + f.close() + finally: + os.unlink(name) + + return t + + def traceback(self, exc=None): + '''print exception traceback if traceback printing enabled. + only to call in exception handler. returns true if traceback + printed.''' + if self.tracebackflag: + if exc: + traceback.print_exception(exc[0], exc[1], exc[2], file=self.ferr) + else: + traceback.print_exc(file=self.ferr) + return self.tracebackflag + + def geteditor(self): + '''return editor to use''' + return (os.environ.get("HGEDITOR") or + self.config("ui", "editor") or + os.environ.get("VISUAL") or + os.environ.get("EDITOR", "vi")) + + def progress(self, topic, pos, item="", unit="", total=None): + '''show a progress message + + With stock hg, this is simply a debug message that is hidden + by default, but with extensions or GUI tools it may be + visible. 'topic' is the current operation, 'item' is a + non-numeric marker of the current position (ie the currently + in-process file), 'pos' is the current numeric position (ie + revision, bytes, etc.), unit is a corresponding unit label, + and total is the highest expected pos. + + Multiple nested topics may be active at a time. + + All topics should be marked closed by setting pos to None at + termination. + ''' + + if pos is None or not self.debugflag: + return + + if unit: + unit = ' ' + unit + if item: + item = ' ' + item + + if total: + pct = 100.0 * pos / total + self.debug('%s:%s %s/%s%s (%4.2f%%)\n' + % (topic, item, pos, total, unit, pct)) + else: + self.debug('%s:%s %s%s\n' % (topic, item, pos, unit)) + + def log(self, service, message): + '''hook for logging facility extensions + + service should be a readily-identifiable subsystem, which will + allow filtering. + message should be a newline-terminated string to log. + ''' + pass + + def label(self, msg, label): + '''style msg based on supplied label + + Like ui.write(), this just returns msg unchanged, but extensions + and GUI tools can override it to allow styling output without + writing it. + + ui.write(s, 'label') is equivalent to + ui.write(ui.label(s, 'label')). + ''' + return msg diff --git a/websdk/mercurial/url.py b/websdk/mercurial/url.py index e2784a9..2430d3a 120000..100644 --- a/websdk/mercurial/url.py +++ b/websdk/mercurial/url.py @@ -1 +1,473 @@ -/usr/share/pyshared/mercurial/url.py \ No newline at end of file +# url.py - HTTP handling for mercurial +# +# Copyright 2005, 2006, 2007, 2008 Matt Mackall +# Copyright 2006, 2007 Alexis S. L. Carvalho +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import urllib, urllib2, httplib, os, socket, cStringIO +from i18n import _ +import keepalive, util, sslutil +import httpconnection as httpconnectionmod + +class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): + def __init__(self, ui): + urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) + self.ui = ui + + def find_user_password(self, realm, authuri): + authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( + self, realm, authuri) + user, passwd = authinfo + if user and passwd: + self._writedebug(user, passwd) + return (user, passwd) + + if not user or not passwd: + res = httpconnectionmod.readauthforuri(self.ui, authuri, user) + if res: + group, auth = res + user, passwd = auth.get('username'), auth.get('password') + self.ui.debug("using auth.%s.* for authentication\n" % group) + if not user or not passwd: + if not self.ui.interactive(): + raise util.Abort(_('http authorization required')) + + self.ui.write(_("http authorization required\n")) + self.ui.write(_("realm: %s\n") % realm) + if user: + self.ui.write(_("user: %s\n") % user) + else: + user = self.ui.prompt(_("user:"), default=None) + + if not passwd: + passwd = self.ui.getpass() + + self.add_password(realm, authuri, user, passwd) + self._writedebug(user, passwd) + return (user, passwd) + + def _writedebug(self, user, passwd): + msg = _('http auth: user %s, password %s\n') + self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set')) + + def find_stored_password(self, authuri): + return urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( + self, None, authuri) + +class proxyhandler(urllib2.ProxyHandler): + def __init__(self, ui): + proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') + # XXX proxyauthinfo = None + + if proxyurl: + # proxy can be proper url or host[:port] + if not (proxyurl.startswith('http:') or + proxyurl.startswith('https:')): + proxyurl = 'http://' + proxyurl + '/' + proxy = util.url(proxyurl) + if not proxy.user: + proxy.user = ui.config("http_proxy", "user") + proxy.passwd = ui.config("http_proxy", "passwd") + + # see if we should use a proxy for this url + no_list = ["localhost", "127.0.0.1"] + no_list.extend([p.lower() for + p in ui.configlist("http_proxy", "no")]) + no_list.extend([p.strip().lower() for + p in os.getenv("no_proxy", '').split(',') + if p.strip()]) + # "http_proxy.always" config is for running tests on localhost + if ui.configbool("http_proxy", "always"): + self.no_list = [] + else: + self.no_list = no_list + + proxyurl = str(proxy) + proxies = {'http': proxyurl, 'https': proxyurl} + ui.debug('proxying through http://%s:%s\n' % + (proxy.host, proxy.port)) + else: + proxies = {} + + # urllib2 takes proxy values from the environment and those + # will take precedence if found. So, if there's a config entry + # defining a proxy, drop the environment ones + if ui.config("http_proxy", "host"): + for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: + try: + if env in os.environ: + del os.environ[env] + except OSError: + pass + + urllib2.ProxyHandler.__init__(self, proxies) + self.ui = ui + + def proxy_open(self, req, proxy, type_): + host = req.get_host().split(':')[0] + if host in self.no_list: + return None + + # work around a bug in Python < 2.4.2 + # (it leaves a "\n" at the end of Proxy-authorization headers) + baseclass = req.__class__ + class _request(baseclass): + def add_header(self, key, val): + if key.lower() == 'proxy-authorization': + val = val.strip() + return baseclass.add_header(self, key, val) + req.__class__ = _request + + return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_) + +def _gen_sendfile(orgsend): + def _sendfile(self, data): + # send a file + if isinstance(data, httpconnectionmod.httpsendfile): + # if auth required, some data sent twice, so rewind here + data.seek(0) + for chunk in util.filechunkiter(data): + orgsend(self, chunk) + else: + orgsend(self, data) + return _sendfile + +has_https = util.safehasattr(urllib2, 'HTTPSHandler') +if has_https: + try: + _create_connection = socket.create_connection + except AttributeError: + _GLOBAL_DEFAULT_TIMEOUT = object() + + def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + # lifted from Python 2.6 + + msg = "getaddrinfo returns an empty list" + host, port = address + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except socket.error, msg: + if sock is not None: + sock.close() + + raise socket.error, msg + +class httpconnection(keepalive.HTTPConnection): + # must be able to send big bundle as stream. + send = _gen_sendfile(keepalive.HTTPConnection.send) + + def connect(self): + if has_https and self.realhostport: # use CONNECT proxy + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((self.host, self.port)) + if _generic_proxytunnel(self): + # we do not support client x509 certificates + self.sock = sslutil.ssl_wrap_socket(self.sock, None, None) + else: + keepalive.HTTPConnection.connect(self) + + def getresponse(self): + proxyres = getattr(self, 'proxyres', None) + if proxyres: + if proxyres.will_close: + self.close() + self.proxyres = None + return proxyres + return keepalive.HTTPConnection.getresponse(self) + +# general transaction handler to support different ways to handle +# HTTPS proxying before and after Python 2.6.3. +def _generic_start_transaction(handler, h, req): + tunnel_host = getattr(req, '_tunnel_host', None) + if tunnel_host: + if tunnel_host[:7] not in ['http://', 'https:/']: + tunnel_host = 'https://' + tunnel_host + new_tunnel = True + else: + tunnel_host = req.get_selector() + new_tunnel = False + + if new_tunnel or tunnel_host == req.get_full_url(): # has proxy + u = util.url(tunnel_host) + if new_tunnel or u.scheme == 'https': # only use CONNECT for HTTPS + h.realhostport = ':'.join([u.host, (u.port or '443')]) + h.headers = req.headers.copy() + h.headers.update(handler.parent.addheaders) + return + + h.realhostport = None + h.headers = None + +def _generic_proxytunnel(self): + proxyheaders = dict( + [(x, self.headers[x]) for x in self.headers + if x.lower().startswith('proxy-')]) + self._set_hostport(self.host, self.port) + self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport) + for header in proxyheaders.iteritems(): + self.send('%s: %s\r\n' % header) + self.send('\r\n') + + # majority of the following code is duplicated from + # httplib.HTTPConnection as there are no adequate places to + # override functions to provide the needed functionality + res = self.response_class(self.sock, + strict=self.strict, + method=self._method) + + while True: + version, status, reason = res._read_status() + if status != httplib.CONTINUE: + break + while True: + skip = res.fp.readline().strip() + if not skip: + break + res.status = status + res.reason = reason.strip() + + if res.status == 200: + while True: + line = res.fp.readline() + if line == '\r\n': + break + return True + + if version == 'HTTP/1.0': + res.version = 10 + elif version.startswith('HTTP/1.'): + res.version = 11 + elif version == 'HTTP/0.9': + res.version = 9 + else: + raise httplib.UnknownProtocol(version) + + if res.version == 9: + res.length = None + res.chunked = 0 + res.will_close = 1 + res.msg = httplib.HTTPMessage(cStringIO.StringIO()) + return False + + res.msg = httplib.HTTPMessage(res.fp) + res.msg.fp = None + + # are we using the chunked-style of transfer encoding? + trenc = res.msg.getheader('transfer-encoding') + if trenc and trenc.lower() == "chunked": + res.chunked = 1 + res.chunk_left = None + else: + res.chunked = 0 + + # will the connection close at the end of the response? + res.will_close = res._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + length = res.msg.getheader('content-length') + if length and not res.chunked: + try: + res.length = int(length) + except ValueError: + res.length = None + else: + if res.length < 0: # ignore nonsensical negative lengths + res.length = None + else: + res.length = None + + # does the body have a fixed length? (of zero) + if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + res._method == 'HEAD'): + res.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if (not res.will_close and + not res.chunked and + res.length is None): + res.will_close = 1 + + self.proxyres = res + + return False + +class httphandler(keepalive.HTTPHandler): + def http_open(self, req): + return self.do_open(httpconnection, req) + + def _start_transaction(self, h, req): + _generic_start_transaction(self, h, req) + return keepalive.HTTPHandler._start_transaction(self, h, req) + +if has_https: + class httpsconnection(httplib.HTTPSConnection): + response_class = keepalive.HTTPResponse + # must be able to send big bundle as stream. + send = _gen_sendfile(keepalive.safesend) + getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection) + + def connect(self): + self.sock = _create_connection((self.host, self.port)) + + host = self.host + if self.realhostport: # use CONNECT proxy + _generic_proxytunnel(self) + host = self.realhostport.rsplit(':', 1)[0] + self.sock = sslutil.ssl_wrap_socket( + self.sock, self.key_file, self.cert_file, + **sslutil.sslkwargs(self.ui, host)) + sslutil.validator(self.ui, host)(self.sock) + + class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler): + def __init__(self, ui): + keepalive.KeepAliveHandler.__init__(self) + urllib2.HTTPSHandler.__init__(self) + self.ui = ui + self.pwmgr = passwordmgr(self.ui) + + def _start_transaction(self, h, req): + _generic_start_transaction(self, h, req) + return keepalive.KeepAliveHandler._start_transaction(self, h, req) + + def https_open(self, req): + # req.get_full_url() does not contain credentials and we may + # need them to match the certificates. + url = req.get_full_url() + user, password = self.pwmgr.find_stored_password(url) + res = httpconnectionmod.readauthforuri(self.ui, url, user) + if res: + group, auth = res + self.auth = auth + self.ui.debug("using auth.%s.* for authentication\n" % group) + else: + self.auth = None + return self.do_open(self._makeconnection, req) + + def _makeconnection(self, host, port=None, *args, **kwargs): + keyfile = None + certfile = None + + if len(args) >= 1: # key_file + keyfile = args[0] + if len(args) >= 2: # cert_file + certfile = args[1] + args = args[2:] + + # if the user has specified different key/cert files in + # hgrc, we prefer these + if self.auth and 'key' in self.auth and 'cert' in self.auth: + keyfile = self.auth['key'] + certfile = self.auth['cert'] + + conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs) + conn.ui = self.ui + return conn + +class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler): + def __init__(self, *args, **kwargs): + urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs) + self.retried_req = None + + def reset_retry_count(self): + # Python 2.6.5 will call this on 401 or 407 errors and thus loop + # forever. We disable reset_retry_count completely and reset in + # http_error_auth_reqed instead. + pass + + def http_error_auth_reqed(self, auth_header, host, req, headers): + # Reset the retry counter once for each request. + if req is not self.retried_req: + self.retried_req = req + self.retried = 0 + # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if + # it doesn't know about the auth type requested. This can happen if + # somebody is using BasicAuth and types a bad password. + try: + return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed( + self, auth_header, host, req, headers) + except ValueError, inst: + arg = inst.args[0] + if arg.startswith("AbstractDigestAuthHandler doesn't know "): + return + raise + +class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler): + def __init__(self, *args, **kwargs): + urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) + self.retried_req = None + + def reset_retry_count(self): + # Python 2.6.5 will call this on 401 or 407 errors and thus loop + # forever. We disable reset_retry_count completely and reset in + # http_error_auth_reqed instead. + pass + + def http_error_auth_reqed(self, auth_header, host, req, headers): + # Reset the retry counter once for each request. + if req is not self.retried_req: + self.retried_req = req + self.retried = 0 + return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( + self, auth_header, host, req, headers) + +handlerfuncs = [] + +def opener(ui, authinfo=None): + ''' + construct an opener suitable for urllib2 + authinfo will be added to the password manager + ''' + if ui.configbool('ui', 'usehttp2', False): + handlers = [httpconnectionmod.http2handler(ui, passwordmgr(ui))] + else: + handlers = [httphandler()] + if has_https: + handlers.append(httpshandler(ui)) + + handlers.append(proxyhandler(ui)) + + passmgr = passwordmgr(ui) + if authinfo is not None: + passmgr.add_password(*authinfo) + user, passwd = authinfo[2:4] + ui.debug('http auth: user %s, password %s\n' % + (user, passwd and '*' * len(passwd) or 'not set')) + + handlers.extend((httpbasicauthhandler(passmgr), + httpdigestauthhandler(passmgr))) + handlers.extend([h(ui, passmgr) for h in handlerfuncs]) + opener = urllib2.build_opener(*handlers) + + # 1.0 here is the _protocol_ version + opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] + opener.addheaders.append(('Accept', 'application/mercurial-0.1')) + return opener + +def open(ui, url_, data=None): + u = util.url(url_) + if u.scheme: + u.scheme = u.scheme.lower() + url_, authinfo = u.authinfo() + else: + path = util.normpath(os.path.abspath(url_)) + url_ = 'file://' + urllib.pathname2url(path) + authinfo = None + return opener(ui, authinfo).open(url_, data) diff --git a/websdk/mercurial/util.py b/websdk/mercurial/util.py index e86996a..7366614 120000..100644 --- a/websdk/mercurial/util.py +++ b/websdk/mercurial/util.py @@ -1 +1,1741 @@ -/usr/share/pyshared/mercurial/util.py \ No newline at end of file +# util.py - Mercurial utility functions and platform specfic implementations +# +# Copyright 2005 K. Thananchayan +# Copyright 2005-2007 Matt Mackall +# Copyright 2006 Vadim Gelfer +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Mercurial utility functions and platform specfic implementations. + +This contains helper routines that are independent of the SCM core and +hide platform-specific details from the core. +""" + +from i18n import _ +import error, osutil, encoding +import errno, re, shutil, sys, tempfile, traceback +import os, time, datetime, calendar, textwrap, signal +import imp, socket, urllib + +if os.name == 'nt': + import windows as platform +else: + import posix as platform + +cachestat = platform.cachestat +checkexec = platform.checkexec +checklink = platform.checklink +copymode = platform.copymode +executablepath = platform.executablepath +expandglobs = platform.expandglobs +explainexit = platform.explainexit +findexe = platform.findexe +gethgcmd = platform.gethgcmd +getuser = platform.getuser +groupmembers = platform.groupmembers +groupname = platform.groupname +hidewindow = platform.hidewindow +isexec = platform.isexec +isowner = platform.isowner +localpath = platform.localpath +lookupreg = platform.lookupreg +makedir = platform.makedir +nlinks = platform.nlinks +normpath = platform.normpath +normcase = platform.normcase +nulldev = platform.nulldev +openhardlinks = platform.openhardlinks +oslink = platform.oslink +parsepatchoutput = platform.parsepatchoutput +pconvert = platform.pconvert +popen = platform.popen +posixfile = platform.posixfile +quotecommand = platform.quotecommand +realpath = platform.realpath +rename = platform.rename +samedevice = platform.samedevice +samefile = platform.samefile +samestat = platform.samestat +setbinary = platform.setbinary +setflags = platform.setflags +setsignalhandler = platform.setsignalhandler +shellquote = platform.shellquote +spawndetached = platform.spawndetached +sshargs = platform.sshargs +statfiles = platform.statfiles +termwidth = platform.termwidth +testpid = platform.testpid +umask = platform.umask +unlink = platform.unlink +unlinkpath = platform.unlinkpath +username = platform.username + +# Python compatibility + +def sha1(s=''): + ''' + Low-overhead wrapper around Python's SHA support + + >>> f = _fastsha1 + >>> a = sha1() + >>> a = f() + >>> a.hexdigest() + 'da39a3ee5e6b4b0d3255bfef95601890afd80709' + ''' + + return _fastsha1(s) + +_notset = object() +def safehasattr(thing, attr): + return getattr(thing, attr, _notset) is not _notset + +def _fastsha1(s=''): + # This function will import sha1 from hashlib or sha (whichever is + # available) and overwrite itself with it on the first call. + # Subsequent calls will go directly to the imported function. + if sys.version_info >= (2, 5): + from hashlib import sha1 as _sha1 + else: + from sha import sha as _sha1 + global _fastsha1, sha1 + _fastsha1 = sha1 = _sha1 + return _sha1(s) + +import __builtin__ + +if sys.version_info[0] < 3: + def fakebuffer(sliceable, offset=0): + return sliceable[offset:] +else: + def fakebuffer(sliceable, offset=0): + return memoryview(sliceable)[offset:] +try: + buffer +except NameError: + __builtin__.buffer = fakebuffer + +import subprocess +closefds = os.name == 'posix' + +def popen2(cmd, env=None, newlines=False): + # Setting bufsize to -1 lets the system decide the buffer size. + # The default for bufsize is 0, meaning unbuffered. This leads to + # poor performance on Mac OS X: http://bugs.python.org/issue4194 + p = subprocess.Popen(cmd, shell=True, bufsize=-1, + close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + universal_newlines=newlines, + env=env) + return p.stdin, p.stdout + +def popen3(cmd, env=None, newlines=False): + p = subprocess.Popen(cmd, shell=True, bufsize=-1, + close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=newlines, + env=env) + return p.stdin, p.stdout, p.stderr + +def version(): + """Return version information if available.""" + try: + import __version__ + return __version__.version + except ImportError: + return 'unknown' + +# used by parsedate +defaultdateformats = ( + '%Y-%m-%d %H:%M:%S', + '%Y-%m-%d %I:%M:%S%p', + '%Y-%m-%d %H:%M', + '%Y-%m-%d %I:%M%p', + '%Y-%m-%d', + '%m-%d', + '%m/%d', + '%m/%d/%y', + '%m/%d/%Y', + '%a %b %d %H:%M:%S %Y', + '%a %b %d %I:%M:%S%p %Y', + '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822" + '%b %d %H:%M:%S %Y', + '%b %d %I:%M:%S%p %Y', + '%b %d %H:%M:%S', + '%b %d %I:%M:%S%p', + '%b %d %H:%M', + '%b %d %I:%M%p', + '%b %d %Y', + '%b %d', + '%H:%M:%S', + '%I:%M:%S%p', + '%H:%M', + '%I:%M%p', +) + +extendeddateformats = defaultdateformats + ( + "%Y", + "%Y-%m", + "%b", + "%b %Y", + ) + +def cachefunc(func): + '''cache the result of function calls''' + # XXX doesn't handle keywords args + cache = {} + if func.func_code.co_argcount == 1: + # we gain a small amount of time because + # we don't need to pack/unpack the list + def f(arg): + if arg not in cache: + cache[arg] = func(arg) + return cache[arg] + else: + def f(*args): + if args not in cache: + cache[args] = func(*args) + return cache[args] + + return f + +def lrucachefunc(func): + '''cache most recent results of function calls''' + cache = {} + order = [] + if func.func_code.co_argcount == 1: + def f(arg): + if arg not in cache: + if len(cache) > 20: + del cache[order.pop(0)] + cache[arg] = func(arg) + else: + order.remove(arg) + order.append(arg) + return cache[arg] + else: + def f(*args): + if args not in cache: + if len(cache) > 20: + del cache[order.pop(0)] + cache[args] = func(*args) + else: + order.remove(args) + order.append(args) + return cache[args] + + return f + +class propertycache(object): + def __init__(self, func): + self.func = func + self.name = func.__name__ + def __get__(self, obj, type=None): + result = self.func(obj) + setattr(obj, self.name, result) + return result + +def pipefilter(s, cmd): + '''filter string S through command CMD, returning its output''' + p = subprocess.Popen(cmd, shell=True, close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + pout, perr = p.communicate(s) + return pout + +def tempfilter(s, cmd): + '''filter string S through a pair of temporary files with CMD. + CMD is used as a template to create the real command to be run, + with the strings INFILE and OUTFILE replaced by the real names of + the temporary files generated.''' + inname, outname = None, None + try: + infd, inname = tempfile.mkstemp(prefix='hg-filter-in-') + fp = os.fdopen(infd, 'wb') + fp.write(s) + fp.close() + outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-') + os.close(outfd) + cmd = cmd.replace('INFILE', inname) + cmd = cmd.replace('OUTFILE', outname) + code = os.system(cmd) + if sys.platform == 'OpenVMS' and code & 1: + code = 0 + if code: + raise Abort(_("command '%s' failed: %s") % + (cmd, explainexit(code))) + fp = open(outname, 'rb') + r = fp.read() + fp.close() + return r + finally: + try: + if inname: + os.unlink(inname) + except OSError: + pass + try: + if outname: + os.unlink(outname) + except OSError: + pass + +filtertable = { + 'tempfile:': tempfilter, + 'pipe:': pipefilter, + } + +def filter(s, cmd): + "filter a string through a command that transforms its input to its output" + for name, fn in filtertable.iteritems(): + if cmd.startswith(name): + return fn(s, cmd[len(name):].lstrip()) + return pipefilter(s, cmd) + +def binary(s): + """return true if a string is binary data""" + return bool(s and '\0' in s) + +def increasingchunks(source, min=1024, max=65536): + '''return no less than min bytes per chunk while data remains, + doubling min after each chunk until it reaches max''' + def log2(x): + if not x: + return 0 + i = 0 + while x: + x >>= 1 + i += 1 + return i - 1 + + buf = [] + blen = 0 + for chunk in source: + buf.append(chunk) + blen += len(chunk) + if blen >= min: + if min < max: + min = min << 1 + nmin = 1 << log2(blen) + if nmin > min: + min = nmin + if min > max: + min = max + yield ''.join(buf) + blen = 0 + buf = [] + if buf: + yield ''.join(buf) + +Abort = error.Abort + +def always(fn): + return True + +def never(fn): + return False + +def pathto(root, n1, n2): + '''return the relative path from one place to another. + root should use os.sep to separate directories + n1 should use os.sep to separate directories + n2 should use "/" to separate directories + returns an os.sep-separated path. + + If n1 is a relative path, it's assumed it's + relative to root. + n2 should always be relative to root. + ''' + if not n1: + return localpath(n2) + if os.path.isabs(n1): + if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: + return os.path.join(root, localpath(n2)) + n2 = '/'.join((pconvert(root), n2)) + a, b = splitpath(n1), n2.split('/') + a.reverse() + b.reverse() + while a and b and a[-1] == b[-1]: + a.pop() + b.pop() + b.reverse() + return os.sep.join((['..'] * len(a)) + b) or '.' + +_hgexecutable = None + +def mainfrozen(): + """return True if we are a frozen executable. + + The code supports py2exe (most common, Windows only) and tools/freeze + (portable, not much used). + """ + return (safehasattr(sys, "frozen") or # new py2exe + safehasattr(sys, "importers") or # old py2exe + imp.is_frozen("__main__")) # tools/freeze + +def hgexecutable(): + """return location of the 'hg' executable. + + Defaults to $HG or 'hg' in the search path. + """ + if _hgexecutable is None: + hg = os.environ.get('HG') + mainmod = sys.modules['__main__'] + if hg: + _sethgexecutable(hg) + elif mainfrozen(): + _sethgexecutable(sys.executable) + elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg': + _sethgexecutable(mainmod.__file__) + else: + exe = findexe('hg') or os.path.basename(sys.argv[0]) + _sethgexecutable(exe) + return _hgexecutable + +def _sethgexecutable(path): + """set location of the 'hg' executable""" + global _hgexecutable + _hgexecutable = path + +def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None): + '''enhanced shell command execution. + run with environment maybe modified, maybe in different dir. + + if command fails and onerr is None, return status. if ui object, + print error message and return status, else raise onerr object as + exception. + + if out is specified, it is assumed to be a file-like object that has a + write() method. stdout and stderr will be redirected to out.''' + try: + sys.stdout.flush() + except Exception: + pass + def py2shell(val): + 'convert python object into string that is useful to shell' + if val is None or val is False: + return '0' + if val is True: + return '1' + return str(val) + origcmd = cmd + cmd = quotecommand(cmd) + env = dict(os.environ) + env.update((k, py2shell(v)) for k, v in environ.iteritems()) + env['HG'] = hgexecutable() + if out is None or out == sys.__stdout__: + rc = subprocess.call(cmd, shell=True, close_fds=closefds, + env=env, cwd=cwd) + else: + proc = subprocess.Popen(cmd, shell=True, close_fds=closefds, + env=env, cwd=cwd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + for line in proc.stdout: + out.write(line) + proc.wait() + rc = proc.returncode + if sys.platform == 'OpenVMS' and rc & 1: + rc = 0 + if rc and onerr: + errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]), + explainexit(rc)[0]) + if errprefix: + errmsg = '%s: %s' % (errprefix, errmsg) + try: + onerr.warn(errmsg + '\n') + except AttributeError: + raise onerr(errmsg) + return rc + +def checksignature(func): + '''wrap a function with code to check for calling errors''' + def check(*args, **kwargs): + try: + return func(*args, **kwargs) + except TypeError: + if len(traceback.extract_tb(sys.exc_info()[2])) == 1: + raise error.SignatureError + raise + + return check + +def copyfile(src, dest): + "copy a file, preserving mode and atime/mtime" + if os.path.islink(src): + try: + os.unlink(dest) + except OSError: + pass + os.symlink(os.readlink(src), dest) + else: + try: + shutil.copyfile(src, dest) + shutil.copymode(src, dest) + except shutil.Error, inst: + raise Abort(str(inst)) + +def copyfiles(src, dst, hardlink=None): + """Copy a directory tree using hardlinks if possible""" + + if hardlink is None: + hardlink = (os.stat(src).st_dev == + os.stat(os.path.dirname(dst)).st_dev) + + num = 0 + if os.path.isdir(src): + os.mkdir(dst) + for name, kind in osutil.listdir(src): + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + hardlink, n = copyfiles(srcname, dstname, hardlink) + num += n + else: + if hardlink: + try: + oslink(src, dst) + except (IOError, OSError): + hardlink = False + shutil.copy(src, dst) + else: + shutil.copy(src, dst) + num += 1 + + return hardlink, num + +_winreservednames = '''con prn aux nul + com1 com2 com3 com4 com5 com6 com7 com8 com9 + lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split() +_winreservedchars = ':*?"<>|' +def checkwinfilename(path): + '''Check that the base-relative path is a valid filename on Windows. + Returns None if the path is ok, or a UI string describing the problem. + + >>> checkwinfilename("just/a/normal/path") + >>> checkwinfilename("foo/bar/con.xml") + "filename contains 'con', which is reserved on Windows" + >>> checkwinfilename("foo/con.xml/bar") + "filename contains 'con', which is reserved on Windows" + >>> checkwinfilename("foo/bar/xml.con") + >>> checkwinfilename("foo/bar/AUX/bla.txt") + "filename contains 'AUX', which is reserved on Windows" + >>> checkwinfilename("foo/bar/bla:.txt") + "filename contains ':', which is reserved on Windows" + >>> checkwinfilename("foo/bar/b\07la.txt") + "filename contains '\\\\x07', which is invalid on Windows" + >>> checkwinfilename("foo/bar/bla ") + "filename ends with ' ', which is not allowed on Windows" + >>> checkwinfilename("../bar") + ''' + for n in path.replace('\\', '/').split('/'): + if not n: + continue + for c in n: + if c in _winreservedchars: + return _("filename contains '%s', which is reserved " + "on Windows") % c + if ord(c) <= 31: + return _("filename contains %r, which is invalid " + "on Windows") % c + base = n.split('.')[0] + if base and base.lower() in _winreservednames: + return _("filename contains '%s', which is reserved " + "on Windows") % base + t = n[-1] + if t in '. ' and n not in '..': + return _("filename ends with '%s', which is not allowed " + "on Windows") % t + +if os.name == 'nt': + checkosfilename = checkwinfilename +else: + checkosfilename = platform.checkosfilename + +def makelock(info, pathname): + try: + return os.symlink(info, pathname) + except OSError, why: + if why.errno == errno.EEXIST: + raise + except AttributeError: # no symlink in os + pass + + ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) + os.write(ld, info) + os.close(ld) + +def readlock(pathname): + try: + return os.readlink(pathname) + except OSError, why: + if why.errno not in (errno.EINVAL, errno.ENOSYS): + raise + except AttributeError: # no symlink in os + pass + fp = posixfile(pathname) + r = fp.read() + fp.close() + return r + +def fstat(fp): + '''stat file object that may not have fileno method.''' + try: + return os.fstat(fp.fileno()) + except AttributeError: + return os.stat(fp.name) + +# File system features + +def checkcase(path): + """ + Check whether the given path is on a case-sensitive filesystem + + Requires a path (like /foo/.hg) ending with a foldable final + directory component. + """ + s1 = os.stat(path) + d, b = os.path.split(path) + p2 = os.path.join(d, b.upper()) + if path == p2: + p2 = os.path.join(d, b.lower()) + try: + s2 = os.stat(p2) + if s2 == s1: + return False + return True + except OSError: + return True + +_fspathcache = {} +def fspath(name, root): + '''Get name in the case stored in the filesystem + + The name is either relative to root, or it is an absolute path starting + with root. Note that this function is unnecessary, and should not be + called, for case-sensitive filesystems (simply because it's expensive). + ''' + # If name is absolute, make it relative + if name.lower().startswith(root.lower()): + l = len(root) + if name[l] == os.sep or name[l] == os.altsep: + l = l + 1 + name = name[l:] + + if not os.path.lexists(os.path.join(root, name)): + return None + + seps = os.sep + if os.altsep: + seps = seps + os.altsep + # Protect backslashes. This gets silly very quickly. + seps.replace('\\','\\\\') + pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps)) + dir = os.path.normcase(os.path.normpath(root)) + result = [] + for part, sep in pattern.findall(name): + if sep: + result.append(sep) + continue + + if dir not in _fspathcache: + _fspathcache[dir] = os.listdir(dir) + contents = _fspathcache[dir] + + lpart = part.lower() + lenp = len(part) + for n in contents: + if lenp == len(n) and n.lower() == lpart: + result.append(n) + break + else: + # Cannot happen, as the file exists! + result.append(part) + dir = os.path.join(dir, lpart) + + return ''.join(result) + +def checknlink(testfile): + '''check whether hardlink count reporting works properly''' + + # testfile may be open, so we need a separate file for checking to + # work around issue2543 (or testfile may get lost on Samba shares) + f1 = testfile + ".hgtmp1" + if os.path.lexists(f1): + return False + try: + posixfile(f1, 'w').close() + except IOError: + return False + + f2 = testfile + ".hgtmp2" + fd = None + try: + try: + oslink(f1, f2) + except OSError: + return False + + # nlinks() may behave differently for files on Windows shares if + # the file is open. + fd = posixfile(f2) + return nlinks(f2) > 1 + finally: + if fd is not None: + fd.close() + for f in (f1, f2): + try: + os.unlink(f) + except OSError: + pass + + return False + +def endswithsep(path): + '''Check path ends with os.sep or os.altsep.''' + return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep) + +def splitpath(path): + '''Split path by os.sep. + Note that this function does not use os.altsep because this is + an alternative of simple "xxx.split(os.sep)". + It is recommended to use os.path.normpath() before using this + function if need.''' + return path.split(os.sep) + +def gui(): + '''Are we running in a GUI?''' + if sys.platform == 'darwin': + if 'SSH_CONNECTION' in os.environ: + # handle SSH access to a box where the user is logged in + return False + elif getattr(osutil, 'isgui', None): + # check if a CoreGraphics session is available + return osutil.isgui() + else: + # pure build; use a safe default + return True + else: + return os.name == "nt" or os.environ.get("DISPLAY") + +def mktempcopy(name, emptyok=False, createmode=None): + """Create a temporary file with the same contents from name + + The permission bits are copied from the original file. + + If the temporary file is going to be truncated immediately, you + can use emptyok=True as an optimization. + + Returns the name of the temporary file. + """ + d, fn = os.path.split(name) + fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d) + os.close(fd) + # Temporary files are created with mode 0600, which is usually not + # what we want. If the original file already exists, just copy + # its mode. Otherwise, manually obey umask. + copymode(name, temp, createmode) + if emptyok: + return temp + try: + try: + ifp = posixfile(name, "rb") + except IOError, inst: + if inst.errno == errno.ENOENT: + return temp + if not getattr(inst, 'filename', None): + inst.filename = name + raise + ofp = posixfile(temp, "wb") + for chunk in filechunkiter(ifp): + ofp.write(chunk) + ifp.close() + ofp.close() + except: + try: os.unlink(temp) + except: pass + raise + return temp + +class atomictempfile(object): + '''writeable file object that atomically updates a file + + All writes will go to a temporary copy of the original file. Call + close() when you are done writing, and atomictempfile will rename + the temporary copy to the original name, making the changes + visible. If the object is destroyed without being closed, all your + writes are discarded. + ''' + def __init__(self, name, mode='w+b', createmode=None): + self.__name = name # permanent name + self._tempname = mktempcopy(name, emptyok=('w' in mode), + createmode=createmode) + self._fp = posixfile(self._tempname, mode) + + # delegated methods + self.write = self._fp.write + self.fileno = self._fp.fileno + + def close(self): + if not self._fp.closed: + self._fp.close() + rename(self._tempname, localpath(self.__name)) + + def discard(self): + if not self._fp.closed: + try: + os.unlink(self._tempname) + except OSError: + pass + self._fp.close() + + def __del__(self): + if safehasattr(self, '_fp'): # constructor actually did something + self.discard() + +def makedirs(name, mode=None): + """recursive directory creation with parent mode inheritance""" + try: + os.mkdir(name) + except OSError, err: + if err.errno == errno.EEXIST: + return + if err.errno != errno.ENOENT or not name: + raise + parent = os.path.dirname(os.path.abspath(name)) + if parent == name: + raise + makedirs(parent, mode) + os.mkdir(name) + if mode is not None: + os.chmod(name, mode) + +def readfile(path): + fp = open(path, 'rb') + try: + return fp.read() + finally: + fp.close() + +def writefile(path, text): + fp = open(path, 'wb') + try: + fp.write(text) + finally: + fp.close() + +def appendfile(path, text): + fp = open(path, 'ab') + try: + fp.write(text) + finally: + fp.close() + +class chunkbuffer(object): + """Allow arbitrary sized chunks of data to be efficiently read from an + iterator over chunks of arbitrary size.""" + + def __init__(self, in_iter): + """in_iter is the iterator that's iterating over the input chunks. + targetsize is how big a buffer to try to maintain.""" + def splitbig(chunks): + for chunk in chunks: + if len(chunk) > 2**20: + pos = 0 + while pos < len(chunk): + end = pos + 2 ** 18 + yield chunk[pos:end] + pos = end + else: + yield chunk + self.iter = splitbig(in_iter) + self._queue = [] + + def read(self, l): + """Read L bytes of data from the iterator of chunks of data. + Returns less than L bytes if the iterator runs dry.""" + left = l + buf = '' + queue = self._queue + while left > 0: + # refill the queue + if not queue: + target = 2**18 + for chunk in self.iter: + queue.append(chunk) + target -= len(chunk) + if target <= 0: + break + if not queue: + break + + chunk = queue.pop(0) + left -= len(chunk) + if left < 0: + queue.insert(0, chunk[left:]) + buf += chunk[:left] + else: + buf += chunk + + return buf + +def filechunkiter(f, size=65536, limit=None): + """Create a generator that produces the data in the file size + (default 65536) bytes at a time, up to optional limit (default is + to read all data). Chunks may be less than size bytes if the + chunk is the last chunk in the file, or the file is a socket or + some other type of file that sometimes reads less data than is + requested.""" + assert size >= 0 + assert limit is None or limit >= 0 + while True: + if limit is None: + nbytes = size + else: + nbytes = min(limit, size) + s = nbytes and f.read(nbytes) + if not s: + break + if limit: + limit -= len(s) + yield s + +def makedate(): + ct = time.time() + if ct < 0: + hint = _("check your clock") + raise Abort(_("negative timestamp: %d") % ct, hint=hint) + delta = (datetime.datetime.utcfromtimestamp(ct) - + datetime.datetime.fromtimestamp(ct)) + tz = delta.days * 86400 + delta.seconds + return ct, tz + +def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'): + """represent a (unixtime, offset) tuple as a localized time. + unixtime is seconds since the epoch, and offset is the time zone's + number of seconds away from UTC. if timezone is false, do not + append time zone to string.""" + t, tz = date or makedate() + if t < 0: + t = 0 # time.gmtime(lt) fails on Windows for lt < -43200 + tz = 0 + if "%1" in format or "%2" in format: + sign = (tz > 0) and "-" or "+" + minutes = abs(tz) // 60 + format = format.replace("%1", "%c%02d" % (sign, minutes // 60)) + format = format.replace("%2", "%02d" % (minutes % 60)) + try: + t = time.gmtime(float(t) - tz) + except ValueError: + # time was out of range + t = time.gmtime(sys.maxint) + s = time.strftime(format, t) + return s + +def shortdate(date=None): + """turn (timestamp, tzoff) tuple into iso 8631 date.""" + return datestr(date, format='%Y-%m-%d') + +def strdate(string, format, defaults=[]): + """parse a localized time string and return a (unixtime, offset) tuple. + if the string cannot be parsed, ValueError is raised.""" + def timezone(string): + tz = string.split()[-1] + if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit(): + sign = (tz[0] == "+") and 1 or -1 + hours = int(tz[1:3]) + minutes = int(tz[3:5]) + return -sign * (hours * 60 + minutes) * 60 + if tz == "GMT" or tz == "UTC": + return 0 + return None + + # NOTE: unixtime = localunixtime + offset + offset, date = timezone(string), string + if offset is not None: + date = " ".join(string.split()[:-1]) + + # add missing elements from defaults + usenow = False # default to using biased defaults + for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity + found = [True for p in part if ("%"+p) in format] + if not found: + date += "@" + defaults[part][usenow] + format += "@%" + part[0] + else: + # We've found a specific time element, less specific time + # elements are relative to today + usenow = True + + timetuple = time.strptime(date, format) + localunixtime = int(calendar.timegm(timetuple)) + if offset is None: + # local timezone + unixtime = int(time.mktime(timetuple)) + offset = unixtime - localunixtime + else: + unixtime = localunixtime + offset + return unixtime, offset + +def parsedate(date, formats=None, bias={}): + """parse a localized date/time and return a (unixtime, offset) tuple. + + The date may be a "unixtime offset" string or in one of the specified + formats. If the date already is a (unixtime, offset) tuple, it is returned. + """ + if not date: + return 0, 0 + if isinstance(date, tuple) and len(date) == 2: + return date + if not formats: + formats = defaultdateformats + date = date.strip() + try: + when, offset = map(int, date.split(' ')) + except ValueError: + # fill out defaults + now = makedate() + defaults = {} + for part in ("d", "mb", "yY", "HI", "M", "S"): + # this piece is for rounding the specific end of unknowns + b = bias.get(part) + if b is None: + if part[0] in "HMS": + b = "00" + else: + b = "0" + + # this piece is for matching the generic end to today's date + n = datestr(now, "%" + part[0]) + + defaults[part] = (b, n) + + for format in formats: + try: + when, offset = strdate(date, format, defaults) + except (ValueError, OverflowError): + pass + else: + break + else: + raise Abort(_('invalid date: %r') % date) + # validate explicit (probably user-specified) date and + # time zone offset. values must fit in signed 32 bits for + # current 32-bit linux runtimes. timezones go from UTC-12 + # to UTC+14 + if abs(when) > 0x7fffffff: + raise Abort(_('date exceeds 32 bits: %d') % when) + if when < 0: + raise Abort(_('negative date value: %d') % when) + if offset < -50400 or offset > 43200: + raise Abort(_('impossible time zone offset: %d') % offset) + return when, offset + +def matchdate(date): + """Return a function that matches a given date match specifier + + Formats include: + + '{date}' match a given date to the accuracy provided + + '<{date}' on or before a given date + + '>{date}' on or after a given date + + >>> p1 = parsedate("10:29:59") + >>> p2 = parsedate("10:30:00") + >>> p3 = parsedate("10:30:59") + >>> p4 = parsedate("10:31:00") + >>> p5 = parsedate("Sep 15 10:30:00 1999") + >>> f = matchdate("10:30") + >>> f(p1[0]) + False + >>> f(p2[0]) + True + >>> f(p3[0]) + True + >>> f(p4[0]) + False + >>> f(p5[0]) + False + """ + + def lower(date): + d = dict(mb="1", d="1") + return parsedate(date, extendeddateformats, d)[0] + + def upper(date): + d = dict(mb="12", HI="23", M="59", S="59") + for days in ("31", "30", "29"): + try: + d["d"] = days + return parsedate(date, extendeddateformats, d)[0] + except: + pass + d["d"] = "28" + return parsedate(date, extendeddateformats, d)[0] + + date = date.strip() + + if not date: + raise Abort(_("dates cannot consist entirely of whitespace")) + elif date[0] == "<": + if not date[1:]: + raise Abort(_("invalid day spec, use '": + if not date[1:]: + raise Abort(_("invalid day spec, use '>DATE'")) + when = lower(date[1:]) + return lambda x: x >= when + elif date[0] == "-": + try: + days = int(date[1:]) + except ValueError: + raise Abort(_("invalid day spec: %s") % date[1:]) + if days < 0: + raise Abort(_("%s must be nonnegative (see 'hg help dates')") + % date[1:]) + when = makedate()[0] - days * 3600 * 24 + return lambda x: x >= when + elif " to " in date: + a, b = date.split(" to ") + start, stop = lower(a), upper(b) + return lambda x: x >= start and x <= stop + else: + start, stop = lower(date), upper(date) + return lambda x: x >= start and x <= stop + +def shortuser(user): + """Return a short representation of a user name or email address.""" + f = user.find('@') + if f >= 0: + user = user[:f] + f = user.find('<') + if f >= 0: + user = user[f + 1:] + f = user.find(' ') + if f >= 0: + user = user[:f] + f = user.find('.') + if f >= 0: + user = user[:f] + return user + +def email(author): + '''get email of author.''' + r = author.find('>') + if r == -1: + r = None + return author[author.find('<') + 1:r] + +def _ellipsis(text, maxlength): + if len(text) <= maxlength: + return text, False + else: + return "%s..." % (text[:maxlength - 3]), True + +def ellipsis(text, maxlength=400): + """Trim string to at most maxlength (default: 400) characters.""" + try: + # use unicode not to split at intermediate multi-byte sequence + utext, truncated = _ellipsis(text.decode(encoding.encoding), + maxlength) + if not truncated: + return text + return utext.encode(encoding.encoding) + except (UnicodeDecodeError, UnicodeEncodeError): + return _ellipsis(text, maxlength)[0] + +def bytecount(nbytes): + '''return byte count formatted as readable string, with units''' + + units = ( + (100, 1 << 30, _('%.0f GB')), + (10, 1 << 30, _('%.1f GB')), + (1, 1 << 30, _('%.2f GB')), + (100, 1 << 20, _('%.0f MB')), + (10, 1 << 20, _('%.1f MB')), + (1, 1 << 20, _('%.2f MB')), + (100, 1 << 10, _('%.0f KB')), + (10, 1 << 10, _('%.1f KB')), + (1, 1 << 10, _('%.2f KB')), + (1, 1, _('%.0f bytes')), + ) + + for multiplier, divisor, format in units: + if nbytes >= divisor * multiplier: + return format % (nbytes / float(divisor)) + return units[-1][2] % nbytes + +def uirepr(s): + # Avoid double backslash in Windows path repr() + return repr(s).replace('\\\\', '\\') + +# delay import of textwrap +def MBTextWrapper(**kwargs): + class tw(textwrap.TextWrapper): + """ + Extend TextWrapper for width-awareness. + + Neither number of 'bytes' in any encoding nor 'characters' is + appropriate to calculate terminal columns for specified string. + + Original TextWrapper implementation uses built-in 'len()' directly, + so overriding is needed to use width information of each characters. + + In addition, characters classified into 'ambiguous' width are + treated as wide in east asian area, but as narrow in other. + + This requires use decision to determine width of such characters. + """ + def __init__(self, **kwargs): + textwrap.TextWrapper.__init__(self, **kwargs) + + # for compatibility between 2.4 and 2.6 + if getattr(self, 'drop_whitespace', None) is None: + self.drop_whitespace = kwargs.get('drop_whitespace', True) + + def _cutdown(self, ucstr, space_left): + l = 0 + colwidth = encoding.ucolwidth + for i in xrange(len(ucstr)): + l += colwidth(ucstr[i]) + if space_left < l: + return (ucstr[:i], ucstr[i:]) + return ucstr, '' + + # overriding of base class + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + space_left = max(width - cur_len, 1) + + if self.break_long_words: + cut, res = self._cutdown(reversed_chunks[-1], space_left) + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # this overriding code is imported from TextWrapper of python 2.6 + # to calculate columns of string by 'encoding.ucolwidth()' + def _wrap_chunks(self, chunks): + colwidth = encoding.ucolwidth + + lines = [] + if self.width <= 0: + raise ValueError("invalid width %r (must be > 0)" % self.width) + + # Arrange in reverse order so items can be efficiently popped + # from a stack of chucks. + chunks.reverse() + + while chunks: + + # Start the list of chunks that will make up the current line. + # cur_len is just the length of all the chunks in cur_line. + cur_line = [] + cur_len = 0 + + # Figure out which static string will prefix this line. + if lines: + indent = self.subsequent_indent + else: + indent = self.initial_indent + + # Maximum width for this line. + width = self.width - len(indent) + + # First chunk on line is whitespace -- drop it, unless this + # is the very beginning of the text (ie. no lines started yet). + if self.drop_whitespace and chunks[-1].strip() == '' and lines: + del chunks[-1] + + while chunks: + l = colwidth(chunks[-1]) + + # Can at least squeeze this chunk onto the current line. + if cur_len + l <= width: + cur_line.append(chunks.pop()) + cur_len += l + + # Nope, this line is full. + else: + break + + # The current line is full, and the next chunk is too big to + # fit on *any* line (not just this one). + if chunks and colwidth(chunks[-1]) > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + + # If the last chunk on this line is all whitespace, drop it. + if (self.drop_whitespace and + cur_line and cur_line[-1].strip() == ''): + del cur_line[-1] + + # Convert current line back to a string and store it in list + # of all lines (return value). + if cur_line: + lines.append(indent + ''.join(cur_line)) + + return lines + + global MBTextWrapper + MBTextWrapper = tw + return tw(**kwargs) + +def wrap(line, width, initindent='', hangindent=''): + maxindent = max(len(hangindent), len(initindent)) + if width <= maxindent: + # adjust for weird terminal size + width = max(78, maxindent + 1) + line = line.decode(encoding.encoding, encoding.encodingmode) + initindent = initindent.decode(encoding.encoding, encoding.encodingmode) + hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode) + wrapper = MBTextWrapper(width=width, + initial_indent=initindent, + subsequent_indent=hangindent) + return wrapper.fill(line).encode(encoding.encoding) + +def iterlines(iterator): + for chunk in iterator: + for line in chunk.splitlines(): + yield line + +def expandpath(path): + return os.path.expanduser(os.path.expandvars(path)) + +def hgcmd(): + """Return the command used to execute current hg + + This is different from hgexecutable() because on Windows we want + to avoid things opening new shell windows like batch files, so we + get either the python call or current executable. + """ + if mainfrozen(): + return [sys.executable] + return gethgcmd() + +def rundetached(args, condfn): + """Execute the argument list in a detached process. + + condfn is a callable which is called repeatedly and should return + True once the child process is known to have started successfully. + At this point, the child process PID is returned. If the child + process fails to start or finishes before condfn() evaluates to + True, return -1. + """ + # Windows case is easier because the child process is either + # successfully starting and validating the condition or exiting + # on failure. We just poll on its PID. On Unix, if the child + # process fails to start, it will be left in a zombie state until + # the parent wait on it, which we cannot do since we expect a long + # running process on success. Instead we listen for SIGCHLD telling + # us our child process terminated. + terminated = set() + def handler(signum, frame): + terminated.add(os.wait()) + prevhandler = None + SIGCHLD = getattr(signal, 'SIGCHLD', None) + if SIGCHLD is not None: + prevhandler = signal.signal(SIGCHLD, handler) + try: + pid = spawndetached(args) + while not condfn(): + if ((pid in terminated or not testpid(pid)) + and not condfn()): + return -1 + time.sleep(0.1) + return pid + finally: + if prevhandler is not None: + signal.signal(signal.SIGCHLD, prevhandler) + +try: + any, all = any, all +except NameError: + def any(iterable): + for i in iterable: + if i: + return True + return False + + def all(iterable): + for i in iterable: + if not i: + return False + return True + +def interpolate(prefix, mapping, s, fn=None, escape_prefix=False): + """Return the result of interpolating items in the mapping into string s. + + prefix is a single character string, or a two character string with + a backslash as the first character if the prefix needs to be escaped in + a regular expression. + + fn is an optional function that will be applied to the replacement text + just before replacement. + + escape_prefix is an optional flag that allows using doubled prefix for + its escaping. + """ + fn = fn or (lambda s: s) + patterns = '|'.join(mapping.keys()) + if escape_prefix: + patterns += '|' + prefix + if len(prefix) > 1: + prefix_char = prefix[1:] + else: + prefix_char = prefix + mapping[prefix_char] = prefix_char + r = re.compile(r'%s(%s)' % (prefix, patterns)) + return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) + +def getport(port): + """Return the port for a given network service. + + If port is an integer, it's returned as is. If it's a string, it's + looked up using socket.getservbyname(). If there's no matching + service, util.Abort is raised. + """ + try: + return int(port) + except ValueError: + pass + + try: + return socket.getservbyname(port) + except socket.error: + raise Abort(_("no port number associated with service '%s'") % port) + +_booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True, + '0': False, 'no': False, 'false': False, 'off': False, + 'never': False} + +def parsebool(s): + """Parse s into a boolean. + + If s is not a valid boolean, returns None. + """ + return _booleans.get(s.lower(), None) + +_hexdig = '0123456789ABCDEFabcdef' +_hextochr = dict((a + b, chr(int(a + b, 16))) + for a in _hexdig for b in _hexdig) + +def _urlunquote(s): + """unquote('abc%20def') -> 'abc def'.""" + res = s.split('%') + # fastpath + if len(res) == 1: + return s + s = res[0] + for item in res[1:]: + try: + s += _hextochr[item[:2]] + item[2:] + except KeyError: + s += '%' + item + except UnicodeDecodeError: + s += unichr(int(item[:2], 16)) + item[2:] + return s + +class url(object): + r"""Reliable URL parser. + + This parses URLs and provides attributes for the following + components: + + ://:@:/?# + + Missing components are set to None. The only exception is + fragment, which is set to '' if present but empty. + + If parsefragment is False, fragment is included in query. If + parsequery is False, query is included in path. If both are + False, both fragment and query are included in path. + + See http://www.ietf.org/rfc/rfc2396.txt for more information. + + Note that for backward compatibility reasons, bundle URLs do not + take host names. That means 'bundle://../' has a path of '../'. + + Examples: + + >>> url('http://www.ietf.org/rfc/rfc2396.txt') + + >>> url('ssh://[::1]:2200//home/joe/repo') + + >>> url('file:///home/joe/repo') + + >>> url('file:///c:/temp/foo/') + + >>> url('bundle:foo') + + >>> url('bundle://../foo') + + >>> url(r'c:\foo\bar') + + >>> url(r'\\blah\blah\blah') + + >>> url(r'\\blah\blah\blah#baz') + + + Authentication credentials: + + >>> url('ssh://joe:xyz@x/repo') + + >>> url('ssh://joe@x/repo') + + + Query strings and fragments: + + >>> url('http://host/a?b#c') + + >>> url('http://host/a?b#c', parsequery=False, parsefragment=False) + + """ + + _safechars = "!~*'()+" + _safepchars = "/!~*'()+" + _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match + + def __init__(self, path, parsequery=True, parsefragment=True): + # We slowly chomp away at path until we have only the path left + self.scheme = self.user = self.passwd = self.host = None + self.port = self.path = self.query = self.fragment = None + self._localpath = True + self._hostport = '' + self._origpath = path + + if parsefragment and '#' in path: + path, self.fragment = path.split('#', 1) + if not path: + path = None + + # special case for Windows drive letters and UNC paths + if hasdriveletter(path) or path.startswith(r'\\'): + self.path = path + return + + # For compatibility reasons, we can't handle bundle paths as + # normal URLS + if path.startswith('bundle:'): + self.scheme = 'bundle' + path = path[7:] + if path.startswith('//'): + path = path[2:] + self.path = path + return + + if self._matchscheme(path): + parts = path.split(':', 1) + if parts[0]: + self.scheme, path = parts + self._localpath = False + + if not path: + path = None + if self._localpath: + self.path = '' + return + else: + if self._localpath: + self.path = path + return + + if parsequery and '?' in path: + path, self.query = path.split('?', 1) + if not path: + path = None + if not self.query: + self.query = None + + # // is required to specify a host/authority + if path and path.startswith('//'): + parts = path[2:].split('/', 1) + if len(parts) > 1: + self.host, path = parts + path = path + else: + self.host = parts[0] + path = None + if not self.host: + self.host = None + # path of file:///d is /d + # path of file:///d:/ is d:/, not /d:/ + if path and not hasdriveletter(path): + path = '/' + path + + if self.host and '@' in self.host: + self.user, self.host = self.host.rsplit('@', 1) + if ':' in self.user: + self.user, self.passwd = self.user.split(':', 1) + if not self.host: + self.host = None + + # Don't split on colons in IPv6 addresses without ports + if (self.host and ':' in self.host and + not (self.host.startswith('[') and self.host.endswith(']'))): + self._hostport = self.host + self.host, self.port = self.host.rsplit(':', 1) + if not self.host: + self.host = None + + if (self.host and self.scheme == 'file' and + self.host not in ('localhost', '127.0.0.1', '[::1]')): + raise Abort(_('file:// URLs can only refer to localhost')) + + self.path = path + + # leave the query string escaped + for a in ('user', 'passwd', 'host', 'port', + 'path', 'fragment'): + v = getattr(self, a) + if v is not None: + setattr(self, a, _urlunquote(v)) + + def __repr__(self): + attrs = [] + for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path', + 'query', 'fragment'): + v = getattr(self, a) + if v is not None: + attrs.append('%s: %r' % (a, v)) + return '' % ', '.join(attrs) + + def __str__(self): + r"""Join the URL's components back into a URL string. + + Examples: + + >>> str(url('http://user:pw@host:80/?foo#bar')) + 'http://user:pw@host:80/?foo#bar' + >>> str(url('http://user:pw@host:80/?foo=bar&baz=42')) + 'http://user:pw@host:80/?foo=bar&baz=42' + >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz')) + 'http://user:pw@host:80/?foo=bar%3dbaz' + >>> str(url('ssh://user:pw@[::1]:2200//home/joe#')) + 'ssh://user:pw@[::1]:2200//home/joe#' + >>> str(url('http://localhost:80//')) + 'http://localhost:80//' + >>> str(url('http://localhost:80/')) + 'http://localhost:80/' + >>> str(url('http://localhost:80')) + 'http://localhost:80/' + >>> str(url('bundle:foo')) + 'bundle:foo' + >>> str(url('bundle://../foo')) + 'bundle:../foo' + >>> str(url('path')) + 'path' + >>> str(url('file:///tmp/foo/bar')) + 'file:///tmp/foo/bar' + >>> print url(r'bundle:foo\bar') + bundle:foo\bar + """ + if self._localpath: + s = self.path + if self.scheme == 'bundle': + s = 'bundle:' + s + if self.fragment: + s += '#' + self.fragment + return s + + s = self.scheme + ':' + if self.user or self.passwd or self.host: + s += '//' + elif self.scheme and (not self.path or self.path.startswith('/')): + s += '//' + if self.user: + s += urllib.quote(self.user, safe=self._safechars) + if self.passwd: + s += ':' + urllib.quote(self.passwd, safe=self._safechars) + if self.user or self.passwd: + s += '@' + if self.host: + if not (self.host.startswith('[') and self.host.endswith(']')): + s += urllib.quote(self.host) + else: + s += self.host + if self.port: + s += ':' + urllib.quote(self.port) + if self.host: + s += '/' + if self.path: + # TODO: similar to the query string, we should not unescape the + # path when we store it, the path might contain '%2f' = '/', + # which we should *not* escape. + s += urllib.quote(self.path, safe=self._safepchars) + if self.query: + # we store the query in escaped form. + s += '?' + self.query + if self.fragment is not None: + s += '#' + urllib.quote(self.fragment, safe=self._safepchars) + return s + + def authinfo(self): + user, passwd = self.user, self.passwd + try: + self.user, self.passwd = None, None + s = str(self) + finally: + self.user, self.passwd = user, passwd + if not self.user: + return (s, None) + # authinfo[1] is passed to urllib2 password manager, and its + # URIs must not contain credentials. The host is passed in the + # URIs list because Python < 2.4.3 uses only that to search for + # a password. + return (s, (None, (s, self.host), + self.user, self.passwd or '')) + + def isabs(self): + if self.scheme and self.scheme != 'file': + return True # remote URL + if hasdriveletter(self.path): + return True # absolute for our purposes - can't be joined() + if self.path.startswith(r'\\'): + return True # Windows UNC path + if self.path.startswith('/'): + return True # POSIX-style + return False + + def localpath(self): + if self.scheme == 'file' or self.scheme == 'bundle': + path = self.path or '/' + # For Windows, we need to promote hosts containing drive + # letters to paths with drive letters. + if hasdriveletter(self._hostport): + path = self._hostport + '/' + self.path + elif (self.host is not None and self.path + and not hasdriveletter(path)): + path = '/' + path + return path + return self._origpath + +def hasscheme(path): + return bool(url(path).scheme) + +def hasdriveletter(path): + return path[1:2] == ':' and path[0:1].isalpha() + +def urllocalpath(path): + return url(path, parsequery=False, parsefragment=False).localpath() + +def hidepassword(u): + '''hide user credential in a url string''' + u = url(u) + if u.passwd: + u.passwd = '***' + return str(u) + +def removeauth(u): + '''remove all authentication information from a url string''' + u = url(u) + u.user = u.passwd = None + return str(u) + +def isatty(fd): + try: + return fd.isatty() + except AttributeError: + return False diff --git a/websdk/mercurial/verify.py b/websdk/mercurial/verify.py index b0896e0..3e53862 120000..100644 --- a/websdk/mercurial/verify.py +++ b/websdk/mercurial/verify.py @@ -1 +1,303 @@ -/usr/share/pyshared/mercurial/verify.py \ No newline at end of file +# verify.py - repository integrity checking for Mercurial +# +# Copyright 2006, 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, short +from i18n import _ +import os +import revlog, util, error + +def verify(repo): + lock = repo.lock() + try: + return _verify(repo) + finally: + lock.release() + +def _verify(repo): + mflinkrevs = {} + filelinkrevs = {} + filenodes = {} + revisions = 0 + badrevs = set() + errors = [0] + warnings = [0] + ui = repo.ui + cl = repo.changelog + mf = repo.manifest + lrugetctx = util.lrucachefunc(repo.changectx) + + if not repo.cancopy(): + raise util.Abort(_("cannot verify bundle or remote repos")) + + def err(linkrev, msg, filename=None): + if linkrev is not None: + badrevs.add(linkrev) + else: + linkrev = '?' + msg = "%s: %s" % (linkrev, msg) + if filename: + msg = "%s@%s" % (filename, msg) + ui.warn(" " + msg + "\n") + errors[0] += 1 + + def exc(linkrev, msg, inst, filename=None): + if isinstance(inst, KeyboardInterrupt): + ui.warn(_("interrupted")) + raise + if not str(inst): + inst = repr(inst) + err(linkrev, "%s: %s" % (msg, inst), filename) + + def warn(msg): + ui.warn(msg + "\n") + warnings[0] += 1 + + def checklog(obj, name, linkrev): + if not len(obj) and (havecl or havemf): + err(linkrev, _("empty or missing %s") % name) + return + + d = obj.checksize() + if d[0]: + err(None, _("data length off by %d bytes") % d[0], name) + if d[1]: + err(None, _("index contains %d extra bytes") % d[1], name) + + if obj.version != revlog.REVLOGV0: + if not revlogv1: + warn(_("warning: `%s' uses revlog format 1") % name) + elif revlogv1: + warn(_("warning: `%s' uses revlog format 0") % name) + + def checkentry(obj, i, node, seen, linkrevs, f): + lr = obj.linkrev(obj.rev(node)) + if lr < 0 or (havecl and lr not in linkrevs): + if lr < 0 or lr >= len(cl): + msg = _("rev %d points to nonexistent changeset %d") + else: + msg = _("rev %d points to unexpected changeset %d") + err(None, msg % (i, lr), f) + if linkrevs: + if f and len(linkrevs) > 1: + try: + # attempt to filter down to real linkrevs + linkrevs = [l for l in linkrevs + if lrugetctx(l)[f].filenode() == node] + except: + pass + warn(_(" (expected %s)") % " ".join(map(str, linkrevs))) + lr = None # can't be trusted + + try: + p1, p2 = obj.parents(node) + if p1 not in seen and p1 != nullid: + err(lr, _("unknown parent 1 %s of %s") % + (short(p1), short(n)), f) + if p2 not in seen and p2 != nullid: + err(lr, _("unknown parent 2 %s of %s") % + (short(p2), short(p1)), f) + except Exception, inst: + exc(lr, _("checking parents of %s") % short(node), inst, f) + + if node in seen: + err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f) + seen[n] = i + return lr + + if os.path.exists(repo.sjoin("journal")): + ui.warn(_("abandoned transaction found - run hg recover\n")) + + revlogv1 = cl.version != revlog.REVLOGV0 + if ui.verbose or not revlogv1: + ui.status(_("repository uses revlog format %d\n") % + (revlogv1 and 1 or 0)) + + havecl = len(cl) > 0 + havemf = len(mf) > 0 + + ui.status(_("checking changesets\n")) + seen = {} + checklog(cl, "changelog", 0) + total = len(repo) + for i in repo: + ui.progress(_('checking'), i, total=total, unit=_('changesets')) + n = cl.node(i) + checkentry(cl, i, n, seen, [i], "changelog") + + try: + changes = cl.read(n) + mflinkrevs.setdefault(changes[0], []).append(i) + for f in changes[3]: + filelinkrevs.setdefault(f, []).append(i) + except Exception, inst: + exc(i, _("unpacking changeset %s") % short(n), inst) + ui.progress(_('checking'), None) + + ui.status(_("checking manifests\n")) + seen = {} + checklog(mf, "manifest", 0) + total = len(mf) + for i in mf: + ui.progress(_('checking'), i, total=total, unit=_('manifests')) + n = mf.node(i) + lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest") + if n in mflinkrevs: + del mflinkrevs[n] + else: + err(lr, _("%s not in changesets") % short(n), "manifest") + + try: + for f, fn in mf.readdelta(n).iteritems(): + if not f: + err(lr, _("file without name in manifest")) + elif f != "/dev/null": + filenodes.setdefault(f, {}).setdefault(fn, lr) + except Exception, inst: + exc(lr, _("reading manifest delta %s") % short(n), inst) + ui.progress(_('checking'), None) + + ui.status(_("crosschecking files in changesets and manifests\n")) + + total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes) + count = 0 + if havemf: + for c, m in sorted([(c, m) for m in mflinkrevs + for c in mflinkrevs[m]]): + count += 1 + if m == nullid: + continue + ui.progress(_('crosschecking'), count, total=total) + err(c, _("changeset refers to unknown manifest %s") % short(m)) + mflinkrevs = None # del is bad here due to scope issues + + for f in sorted(filelinkrevs): + count += 1 + ui.progress(_('crosschecking'), count, total=total) + if f not in filenodes: + lr = filelinkrevs[f][0] + err(lr, _("in changeset but not in manifest"), f) + + if havecl: + for f in sorted(filenodes): + count += 1 + ui.progress(_('crosschecking'), count, total=total) + if f not in filelinkrevs: + try: + fl = repo.file(f) + lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]]) + except: + lr = None + err(lr, _("in manifest but not in changeset"), f) + + ui.progress(_('crosschecking'), None) + + ui.status(_("checking files\n")) + + storefiles = set() + for f, f2, size in repo.store.datafiles(): + if not f: + err(None, _("cannot decode filename '%s'") % f2) + elif size > 0 or not revlogv1: + storefiles.add(f) + + files = sorted(set(filenodes) | set(filelinkrevs)) + total = len(files) + for i, f in enumerate(files): + ui.progress(_('checking'), i, item=f, total=total) + try: + linkrevs = filelinkrevs[f] + except KeyError: + # in manifest but not in changelog + linkrevs = [] + + if linkrevs: + lr = linkrevs[0] + else: + lr = None + + try: + fl = repo.file(f) + except error.RevlogError, e: + err(lr, _("broken revlog! (%s)") % e, f) + continue + + for ff in fl.files(): + try: + storefiles.remove(ff) + except KeyError: + err(lr, _("missing revlog!"), ff) + + checklog(fl, f, lr) + seen = {} + rp = None + for i in fl: + revisions += 1 + n = fl.node(i) + lr = checkentry(fl, i, n, seen, linkrevs, f) + if f in filenodes: + if havemf and n not in filenodes[f]: + err(lr, _("%s not in manifests") % (short(n)), f) + else: + del filenodes[f][n] + + # verify contents + try: + l = len(fl.read(n)) + rp = fl.renamed(n) + if l != fl.size(i): + if len(fl.revision(n)) != fl.size(i): + err(lr, _("unpacked size is %s, %s expected") % + (l, fl.size(i)), f) + except Exception, inst: + exc(lr, _("unpacking %s") % short(n), inst, f) + + # check renames + try: + if rp: + if lr is not None and ui.verbose: + ctx = lrugetctx(lr) + found = False + for pctx in ctx.parents(): + if rp[0] in pctx: + found = True + break + if not found: + warn(_("warning: copy source of '%s' not" + " in parents of %s") % (f, ctx)) + fl2 = repo.file(rp[0]) + if not len(fl2): + err(lr, _("empty or missing copy source revlog %s:%s") + % (rp[0], short(rp[1])), f) + elif rp[1] == nullid: + ui.note(_("warning: %s@%s: copy source" + " revision is nullid %s:%s\n") + % (f, lr, rp[0], short(rp[1]))) + else: + fl2.rev(rp[1]) + except Exception, inst: + exc(lr, _("checking rename of %s") % short(n), inst, f) + + # cross-check + if f in filenodes: + fns = [(lr, n) for n, lr in filenodes[f].iteritems()] + for lr, node in sorted(fns): + err(lr, _("%s in manifests not found") % short(node), f) + ui.progress(_('checking'), None) + + for f in storefiles: + warn(_("warning: orphan revlog '%s'") % f) + + ui.status(_("%d files, %d changesets, %d total revisions\n") % + (len(files), len(cl), revisions)) + if warnings[0]: + ui.warn(_("%d warnings encountered!\n") % warnings[0]) + if errors[0]: + ui.warn(_("%d integrity errors encountered!\n") % errors[0]) + if badrevs: + ui.warn(_("(first damaged changeset appears to be %d)\n") + % min(badrevs)) + return 1 diff --git a/websdk/mercurial/win32.py b/websdk/mercurial/win32.py index f45a5c5..e886caf 120000..100644 --- a/websdk/mercurial/win32.py +++ b/websdk/mercurial/win32.py @@ -1 +1,452 @@ -/usr/share/pyshared/mercurial/win32.py \ No newline at end of file +# win32.py - utility functions that use win32 API +# +# Copyright 2005-2009 Matt Mackall and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import encoding +import ctypes, errno, os, struct, subprocess, random + +_kernel32 = ctypes.windll.kernel32 +_advapi32 = ctypes.windll.advapi32 +_user32 = ctypes.windll.user32 + +_BOOL = ctypes.c_long +_WORD = ctypes.c_ushort +_DWORD = ctypes.c_ulong +_UINT = ctypes.c_uint +_LONG = ctypes.c_long +_LPCSTR = _LPSTR = ctypes.c_char_p +_HANDLE = ctypes.c_void_p +_HWND = _HANDLE + +_INVALID_HANDLE_VALUE = _HANDLE(-1).value + +# GetLastError +_ERROR_SUCCESS = 0 +_ERROR_INVALID_PARAMETER = 87 +_ERROR_INSUFFICIENT_BUFFER = 122 + +# WPARAM is defined as UINT_PTR (unsigned type) +# LPARAM is defined as LONG_PTR (signed type) +if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p): + _WPARAM = ctypes.c_ulong + _LPARAM = ctypes.c_long +elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p): + _WPARAM = ctypes.c_ulonglong + _LPARAM = ctypes.c_longlong + +class _FILETIME(ctypes.Structure): + _fields_ = [('dwLowDateTime', _DWORD), + ('dwHighDateTime', _DWORD)] + +class _BY_HANDLE_FILE_INFORMATION(ctypes.Structure): + _fields_ = [('dwFileAttributes', _DWORD), + ('ftCreationTime', _FILETIME), + ('ftLastAccessTime', _FILETIME), + ('ftLastWriteTime', _FILETIME), + ('dwVolumeSerialNumber', _DWORD), + ('nFileSizeHigh', _DWORD), + ('nFileSizeLow', _DWORD), + ('nNumberOfLinks', _DWORD), + ('nFileIndexHigh', _DWORD), + ('nFileIndexLow', _DWORD)] + +# CreateFile +_FILE_SHARE_READ = 0x00000001 +_FILE_SHARE_WRITE = 0x00000002 +_FILE_SHARE_DELETE = 0x00000004 + +_OPEN_EXISTING = 3 + +# SetFileAttributes +_FILE_ATTRIBUTE_NORMAL = 0x80 +_FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x2000 + +# Process Security and Access Rights +_PROCESS_QUERY_INFORMATION = 0x0400 + +# GetExitCodeProcess +_STILL_ACTIVE = 259 + +# registry +_HKEY_CURRENT_USER = 0x80000001L +_HKEY_LOCAL_MACHINE = 0x80000002L +_KEY_READ = 0x20019 +_REG_SZ = 1 +_REG_DWORD = 4 + +class _STARTUPINFO(ctypes.Structure): + _fields_ = [('cb', _DWORD), + ('lpReserved', _LPSTR), + ('lpDesktop', _LPSTR), + ('lpTitle', _LPSTR), + ('dwX', _DWORD), + ('dwY', _DWORD), + ('dwXSize', _DWORD), + ('dwYSize', _DWORD), + ('dwXCountChars', _DWORD), + ('dwYCountChars', _DWORD), + ('dwFillAttribute', _DWORD), + ('dwFlags', _DWORD), + ('wShowWindow', _WORD), + ('cbReserved2', _WORD), + ('lpReserved2', ctypes.c_char_p), + ('hStdInput', _HANDLE), + ('hStdOutput', _HANDLE), + ('hStdError', _HANDLE)] + +class _PROCESS_INFORMATION(ctypes.Structure): + _fields_ = [('hProcess', _HANDLE), + ('hThread', _HANDLE), + ('dwProcessId', _DWORD), + ('dwThreadId', _DWORD)] + +_DETACHED_PROCESS = 0x00000008 +_STARTF_USESHOWWINDOW = 0x00000001 +_SW_HIDE = 0 + +class _COORD(ctypes.Structure): + _fields_ = [('X', ctypes.c_short), + ('Y', ctypes.c_short)] + +class _SMALL_RECT(ctypes.Structure): + _fields_ = [('Left', ctypes.c_short), + ('Top', ctypes.c_short), + ('Right', ctypes.c_short), + ('Bottom', ctypes.c_short)] + +class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): + _fields_ = [('dwSize', _COORD), + ('dwCursorPosition', _COORD), + ('wAttributes', _WORD), + ('srWindow', _SMALL_RECT), + ('dwMaximumWindowSize', _COORD)] + +_STD_ERROR_HANDLE = _DWORD(-12).value + +# types of parameters of C functions used (required by pypy) + +_kernel32.CreateFileA.argtypes = [_LPCSTR, _DWORD, _DWORD, ctypes.c_void_p, + _DWORD, _DWORD, _HANDLE] +_kernel32.CreateFileA.restype = _HANDLE + +_kernel32.GetFileInformationByHandle.argtypes = [_HANDLE, ctypes.c_void_p] +_kernel32.GetFileInformationByHandle.restype = _BOOL + +_kernel32.CloseHandle.argtypes = [_HANDLE] +_kernel32.CloseHandle.restype = _BOOL + +try: + _kernel32.CreateHardLinkA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p] + _kernel32.CreateHardLinkA.restype = _BOOL +except AttributeError: + pass + +_kernel32.SetFileAttributesA.argtypes = [_LPCSTR, _DWORD] +_kernel32.SetFileAttributesA.restype = _BOOL + +_kernel32.OpenProcess.argtypes = [_DWORD, _BOOL, _DWORD] +_kernel32.OpenProcess.restype = _HANDLE + +_kernel32.GetExitCodeProcess.argtypes = [_HANDLE, ctypes.c_void_p] +_kernel32.GetExitCodeProcess.restype = _BOOL + +_kernel32.GetLastError.argtypes = [] +_kernel32.GetLastError.restype = _DWORD + +_kernel32.GetModuleFileNameA.argtypes = [_HANDLE, ctypes.c_void_p, _DWORD] +_kernel32.GetModuleFileNameA.restype = _DWORD + +_kernel32.CreateProcessA.argtypes = [_LPCSTR, _LPCSTR, ctypes.c_void_p, + ctypes.c_void_p, _BOOL, _DWORD, ctypes.c_void_p, _LPCSTR, ctypes.c_void_p, + ctypes.c_void_p] +_kernel32.CreateProcessA.restype = _BOOL + +_kernel32.ExitProcess.argtypes = [_UINT] +_kernel32.ExitProcess.restype = None + +_kernel32.GetCurrentProcessId.argtypes = [] +_kernel32.GetCurrentProcessId.restype = _DWORD + +_SIGNAL_HANDLER = ctypes.WINFUNCTYPE(_BOOL, _DWORD) +_kernel32.SetConsoleCtrlHandler.argtypes = [_SIGNAL_HANDLER, _BOOL] +_kernel32.SetConsoleCtrlHandler.restype = _BOOL + +_kernel32.GetStdHandle.argtypes = [_DWORD] +_kernel32.GetStdHandle.restype = _HANDLE + +_kernel32.GetConsoleScreenBufferInfo.argtypes = [_HANDLE, ctypes.c_void_p] +_kernel32.GetConsoleScreenBufferInfo.restype = _BOOL + +_advapi32.RegOpenKeyExA.argtypes = [_HANDLE, _LPCSTR, _DWORD, _DWORD, + ctypes.c_void_p] +_advapi32.RegOpenKeyExA.restype = _LONG + +_advapi32.RegQueryValueExA.argtypes = [_HANDLE, _LPCSTR, ctypes.c_void_p, + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] +_advapi32.RegQueryValueExA.restype = _LONG + +_advapi32.RegCloseKey.argtypes = [_HANDLE] +_advapi32.RegCloseKey.restype = _LONG + +_advapi32.GetUserNameA.argtypes = [ctypes.c_void_p, ctypes.c_void_p] +_advapi32.GetUserNameA.restype = _BOOL + +_user32.GetWindowThreadProcessId.argtypes = [_HANDLE, ctypes.c_void_p] +_user32.GetWindowThreadProcessId.restype = _DWORD + +_user32.ShowWindow.argtypes = [_HANDLE, ctypes.c_int] +_user32.ShowWindow.restype = _BOOL + +_WNDENUMPROC = ctypes.WINFUNCTYPE(_BOOL, _HWND, _LPARAM) +_user32.EnumWindows.argtypes = [_WNDENUMPROC, _LPARAM] +_user32.EnumWindows.restype = _BOOL + +def _raiseoserror(name): + err = ctypes.WinError() + raise OSError(err.errno, '%s: %s' % (name, err.strerror)) + +def _getfileinfo(name): + fh = _kernel32.CreateFileA(name, 0, + _FILE_SHARE_READ | _FILE_SHARE_WRITE | _FILE_SHARE_DELETE, + None, _OPEN_EXISTING, 0, None) + if fh == _INVALID_HANDLE_VALUE: + _raiseoserror(name) + try: + fi = _BY_HANDLE_FILE_INFORMATION() + if not _kernel32.GetFileInformationByHandle(fh, ctypes.byref(fi)): + _raiseoserror(name) + return fi + finally: + _kernel32.CloseHandle(fh) + +def oslink(src, dst): + try: + if not _kernel32.CreateHardLinkA(dst, src, None): + _raiseoserror(src) + except AttributeError: # Wine doesn't support this function + _raiseoserror(src) + +def nlinks(name): + '''return number of hardlinks for the given file''' + return _getfileinfo(name).nNumberOfLinks + +def samefile(fpath1, fpath2): + '''Returns whether fpath1 and fpath2 refer to the same file. This is only + guaranteed to work for files, not directories.''' + res1 = _getfileinfo(fpath1) + res2 = _getfileinfo(fpath2) + return (res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber + and res1.nFileIndexHigh == res2.nFileIndexHigh + and res1.nFileIndexLow == res2.nFileIndexLow) + +def samedevice(fpath1, fpath2): + '''Returns whether fpath1 and fpath2 are on the same device. This is only + guaranteed to work for files, not directories.''' + res1 = _getfileinfo(fpath1) + res2 = _getfileinfo(fpath2) + return res1.dwVolumeSerialNumber == res2.dwVolumeSerialNumber + +def testpid(pid): + '''return True if pid is still running or unable to + determine, False otherwise''' + h = _kernel32.OpenProcess(_PROCESS_QUERY_INFORMATION, False, pid) + if h: + try: + status = _DWORD() + if _kernel32.GetExitCodeProcess(h, ctypes.byref(status)): + return status.value == _STILL_ACTIVE + finally: + _kernel32.CloseHandle(h) + return _kernel32.GetLastError() != _ERROR_INVALID_PARAMETER + +def lookupreg(key, valname=None, scope=None): + ''' Look up a key/value name in the Windows registry. + + valname: value name. If unspecified, the default value for the key + is used. + scope: optionally specify scope for registry lookup, this can be + a sequence of scopes to look up in order. Default (CURRENT_USER, + LOCAL_MACHINE). + ''' + byref = ctypes.byref + if scope is None: + scope = (_HKEY_CURRENT_USER, _HKEY_LOCAL_MACHINE) + elif not isinstance(scope, (list, tuple)): + scope = (scope,) + for s in scope: + kh = _HANDLE() + res = _advapi32.RegOpenKeyExA(s, key, 0, _KEY_READ, ctypes.byref(kh)) + if res != _ERROR_SUCCESS: + continue + try: + size = _DWORD(600) + type = _DWORD() + buf = ctypes.create_string_buffer(size.value + 1) + res = _advapi32.RegQueryValueExA(kh.value, valname, None, + byref(type), buf, byref(size)) + if res != _ERROR_SUCCESS: + continue + if type.value == _REG_SZ: + # never let a Unicode string escape into the wild + return encoding.tolocal(buf.value.encode('UTF-8')) + elif type.value == _REG_DWORD: + fmt = ' and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import osutil +import errno, msvcrt, os, re, sys + +import win32 +executablepath = win32.executablepath +getuser = win32.getuser +hidewindow = win32.hidewindow +lookupreg = win32.lookupreg +makedir = win32.makedir +nlinks = win32.nlinks +oslink = win32.oslink +samedevice = win32.samedevice +samefile = win32.samefile +setsignalhandler = win32.setsignalhandler +spawndetached = win32.spawndetached +termwidth = win32.termwidth +testpid = win32.testpid +unlink = win32.unlink + +nulldev = 'NUL:' +umask = 002 + +# wrap osutil.posixfile to provide friendlier exceptions +def posixfile(name, mode='r', buffering=-1): + try: + return osutil.posixfile(name, mode, buffering) + except WindowsError, err: + raise IOError(err.errno, '%s: %s' % (name, err.strerror)) +posixfile.__doc__ = osutil.posixfile.__doc__ + +class winstdout(object): + '''stdout on windows misbehaves if sent through a pipe''' + + def __init__(self, fp): + self.fp = fp + + def __getattr__(self, key): + return getattr(self.fp, key) + + def close(self): + try: + self.fp.close() + except IOError: + pass + + def write(self, s): + try: + # This is workaround for "Not enough space" error on + # writing large size of data to console. + limit = 16000 + l = len(s) + start = 0 + self.softspace = 0 + while start < l: + end = start + limit + self.fp.write(s[start:end]) + start = end + except IOError, inst: + if inst.errno != 0: + raise + self.close() + raise IOError(errno.EPIPE, 'Broken pipe') + + def flush(self): + try: + return self.fp.flush() + except IOError, inst: + if inst.errno != errno.EINVAL: + raise + self.close() + raise IOError(errno.EPIPE, 'Broken pipe') + +sys.__stdout__ = sys.stdout = winstdout(sys.stdout) + +def _is_win_9x(): + '''return true if run on windows 95, 98 or me.''' + try: + return sys.getwindowsversion()[3] == 1 + except AttributeError: + return 'command' in os.environ.get('comspec', '') + +def openhardlinks(): + return not _is_win_9x() + +def parsepatchoutput(output_line): + """parses the output produced by patch and returns the filename""" + pf = output_line[14:] + if pf[0] == '`': + pf = pf[1:-1] # Remove the quotes + return pf + +def sshargs(sshcmd, host, user, port): + '''Build argument list for ssh or Plink''' + pflag = 'plink' in sshcmd.lower() and '-P' or '-p' + args = user and ("%s@%s" % (user, host)) or host + return port and ("%s %s %s" % (args, pflag, port)) or args + +def setflags(f, l, x): + pass + +def copymode(src, dst, mode=None): + pass + +def checkexec(path): + return False + +def checklink(path): + return False + +def setbinary(fd): + # When run without console, pipes may expose invalid + # fileno(), usually set to -1. + fno = getattr(fd, 'fileno', None) + if fno is not None and fno() >= 0: + msvcrt.setmode(fno(), os.O_BINARY) + +def pconvert(path): + return '/'.join(path.split(os.sep)) + +def localpath(path): + return path.replace('/', '\\') + +def normpath(path): + return pconvert(os.path.normpath(path)) + +normcase = os.path.normcase + +def realpath(path): + ''' + Returns the true, canonical file system path equivalent to the given + path. + ''' + # TODO: There may be a more clever way to do this that also handles other, + # less common file systems. + return os.path.normpath(os.path.normcase(os.path.realpath(path))) + +def samestat(s1, s2): + return False + +# A sequence of backslashes is special iff it precedes a double quote: +# - if there's an even number of backslashes, the double quote is not +# quoted (i.e. it ends the quoted region) +# - if there's an odd number of backslashes, the double quote is quoted +# - in both cases, every pair of backslashes is unquoted into a single +# backslash +# (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) +# So, to quote a string, we must surround it in double quotes, double +# the number of backslashes that preceed double quotes and add another +# backslash before every double quote (being careful with the double +# quote we've appended to the end) +_quotere = None +def shellquote(s): + global _quotere + if _quotere is None: + _quotere = re.compile(r'(\\*)("|\\$)') + return '"%s"' % _quotere.sub(r'\1\1\\\2', s) + +def quotecommand(cmd): + """Build a command string suitable for os.popen* calls.""" + if sys.version_info < (2, 7, 1): + # Python versions since 2.7.1 do this extra quoting themselves + return '"' + cmd + '"' + return cmd + +def popen(command, mode='r'): + # Work around "popen spawned process may not write to stdout + # under windows" + # http://bugs.python.org/issue1366 + command += " 2> %s" % nulldev + return os.popen(quotecommand(command), mode) + +def explainexit(code): + return _("exited with status %d") % code, code + +# if you change this stub into a real check, please try to implement the +# username and groupname functions above, too. +def isowner(st): + return True + +def findexe(command): + '''Find executable for command searching like cmd.exe does. + If command is a basename then PATH is searched for command. + PATH isn't searched if command is an absolute or relative path. + An extension from PATHEXT is found and added if not present. + If command isn't found None is returned.''' + pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') + pathexts = [ext for ext in pathext.lower().split(os.pathsep)] + if os.path.splitext(command)[1].lower() in pathexts: + pathexts = [''] + + def findexisting(pathcommand): + 'Will append extension (if needed) and return existing file' + for ext in pathexts: + executable = pathcommand + ext + if os.path.exists(executable): + return executable + return None + + if os.sep in command: + return findexisting(command) + + for path in os.environ.get('PATH', '').split(os.pathsep): + executable = findexisting(os.path.join(path, command)) + if executable is not None: + return executable + return findexisting(os.path.expanduser(os.path.expandvars(command))) + +def statfiles(files): + '''Stat each file in files and yield stat or None if file does not exist. + Cluster and cache stat per directory to minimize number of OS stat calls.''' + ncase = os.path.normcase + dircache = {} # dirname -> filename -> status | None if file does not exist + for nf in files: + nf = ncase(nf) + dir, base = os.path.split(nf) + if not dir: + dir = '.' + cache = dircache.get(dir, None) + if cache is None: + try: + dmap = dict([(ncase(n), s) + for n, k, s in osutil.listdir(dir, True)]) + except OSError, err: + # handle directory not found in Python version prior to 2.5 + # Python <= 2.4 returns native Windows code 3 in errno + # Python >= 2.5 returns ENOENT and adds winerror field + # EINVAL is raised if dir is not a directory. + if err.errno not in (3, errno.ENOENT, errno.EINVAL, + errno.ENOTDIR): + raise + dmap = {} + cache = dircache.setdefault(dir, dmap) + yield cache.get(base, None) + +def username(uid=None): + """Return the name of the user with the given uid. + + If uid is None, return the name of the current user.""" + return None + +def groupname(gid=None): + """Return the name of the group with the given gid. + + If gid is None, return the name of the current group.""" + return None + +def _removedirs(name): + """special version of os.removedirs that does not remove symlinked + directories or junction points if they actually contain files""" + if osutil.listdir(name): + return + os.rmdir(name) + head, tail = os.path.split(name) + if not tail: + head, tail = os.path.split(head) + while head and tail: + try: + if osutil.listdir(head): + return + os.rmdir(head) + except (ValueError, OSError): + break + head, tail = os.path.split(head) + +def unlinkpath(f): + """unlink and remove the directory if it is empty""" + unlink(f) + # try removing directories that might now be empty + try: + _removedirs(os.path.dirname(f)) + except OSError: + pass + +def rename(src, dst): + '''atomically rename file src to dst, replacing dst if it exists''' + try: + os.rename(src, dst) + except OSError, e: + if e.errno != errno.EEXIST: + raise + unlink(dst) + os.rename(src, dst) + +def gethgcmd(): + return [sys.executable] + sys.argv[:1] + +def termwidth(): + # cmd.exe does not handle CR like a unix console, the CR is + # counted in the line length. On 80 columns consoles, if 80 + # characters are written, the following CR won't apply on the + # current line but on the new one. Keep room for it. + return 79 + +def groupmembers(name): + # Don't support groups on Windows for now + raise KeyError() + +def isexec(f): + return False + +class cachestat(object): + def __init__(self, path): + pass + + def cacheable(self): + return False + +expandglobs = True diff --git a/websdk/mercurial/wireproto.py b/websdk/mercurial/wireproto.py new file mode 100644 index 0000000..d189004 --- /dev/null +++ b/websdk/mercurial/wireproto.py @@ -0,0 +1,607 @@ +# wireproto.py - generic wire protocol support functions +# +# Copyright 2005-2010 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import urllib, tempfile, os, sys +from i18n import _ +from node import bin, hex +import changegroup as changegroupmod +import repo, error, encoding, util, store + +# abstract batching support + +class future(object): + '''placeholder for a value to be set later''' + def set(self, value): + if util.safehasattr(self, 'value'): + raise error.RepoError("future is already set") + self.value = value + +class batcher(object): + '''base class for batches of commands submittable in a single request + + All methods invoked on instances of this class are simply queued and return a + a future for the result. Once you call submit(), all the queued calls are + performed and the results set in their respective futures. + ''' + def __init__(self): + self.calls = [] + def __getattr__(self, name): + def call(*args, **opts): + resref = future() + self.calls.append((name, args, opts, resref,)) + return resref + return call + def submit(self): + pass + +class localbatch(batcher): + '''performs the queued calls directly''' + def __init__(self, local): + batcher.__init__(self) + self.local = local + def submit(self): + for name, args, opts, resref in self.calls: + resref.set(getattr(self.local, name)(*args, **opts)) + +class remotebatch(batcher): + '''batches the queued calls; uses as few roundtrips as possible''' + def __init__(self, remote): + '''remote must support _submitbatch(encbatch) and _submitone(op, encargs)''' + batcher.__init__(self) + self.remote = remote + def submit(self): + req, rsp = [], [] + for name, args, opts, resref in self.calls: + mtd = getattr(self.remote, name) + batchablefn = getattr(mtd, 'batchable', None) + if batchablefn is not None: + batchable = batchablefn(mtd.im_self, *args, **opts) + encargsorres, encresref = batchable.next() + if encresref: + req.append((name, encargsorres,)) + rsp.append((batchable, encresref, resref,)) + else: + resref.set(encargsorres) + else: + if req: + self._submitreq(req, rsp) + req, rsp = [], [] + resref.set(mtd(*args, **opts)) + if req: + self._submitreq(req, rsp) + def _submitreq(self, req, rsp): + encresults = self.remote._submitbatch(req) + for encres, r in zip(encresults, rsp): + batchable, encresref, resref = r + encresref.set(encres) + resref.set(batchable.next()) + +def batchable(f): + '''annotation for batchable methods + + Such methods must implement a coroutine as follows: + + @batchable + def sample(self, one, two=None): + # Handle locally computable results first: + if not one: + yield "a local result", None + # Build list of encoded arguments suitable for your wire protocol: + encargs = [('one', encode(one),), ('two', encode(two),)] + # Create future for injection of encoded result: + encresref = future() + # Return encoded arguments and future: + yield encargs, encresref + # Assuming the future to be filled with the result from the batched request + # now. Decode it: + yield decode(encresref.value) + + The decorator returns a function which wraps this coroutine as a plain method, + but adds the original method as an attribute called "batchable", which is + used by remotebatch to split the call into separate encoding and decoding + phases. + ''' + def plain(*args, **opts): + batchable = f(*args, **opts) + encargsorres, encresref = batchable.next() + if not encresref: + return encargsorres # a local result in this case + self = args[0] + encresref.set(self._submitone(f.func_name, encargsorres)) + return batchable.next() + setattr(plain, 'batchable', f) + return plain + +# list of nodes encoding / decoding + +def decodelist(l, sep=' '): + if l: + return map(bin, l.split(sep)) + return [] + +def encodelist(l, sep=' '): + return sep.join(map(hex, l)) + +# batched call argument encoding + +def escapearg(plain): + return (plain + .replace(':', '::') + .replace(',', ':,') + .replace(';', ':;') + .replace('=', ':=')) + +def unescapearg(escaped): + return (escaped + .replace(':=', '=') + .replace(':;', ';') + .replace(':,', ',') + .replace('::', ':')) + +# client side + +def todict(**args): + return args + +class wirerepository(repo.repository): + + def batch(self): + return remotebatch(self) + def _submitbatch(self, req): + cmds = [] + for op, argsdict in req: + args = ','.join('%s=%s' % p for p in argsdict.iteritems()) + cmds.append('%s %s' % (op, args)) + rsp = self._call("batch", cmds=';'.join(cmds)) + return rsp.split(';') + def _submitone(self, op, args): + return self._call(op, **args) + + @batchable + def lookup(self, key): + self.requirecap('lookup', _('look up remote revision')) + f = future() + yield todict(key=encoding.fromlocal(key)), f + d = f.value + success, data = d[:-1].split(" ", 1) + if int(success): + yield bin(data) + self._abort(error.RepoError(data)) + + @batchable + def heads(self): + f = future() + yield {}, f + d = f.value + try: + yield decodelist(d[:-1]) + except ValueError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + @batchable + def known(self, nodes): + f = future() + yield todict(nodes=encodelist(nodes)), f + d = f.value + try: + yield [bool(int(f)) for f in d] + except ValueError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + @batchable + def branchmap(self): + f = future() + yield {}, f + d = f.value + try: + branchmap = {} + for branchpart in d.splitlines(): + branchname, branchheads = branchpart.split(' ', 1) + branchname = encoding.tolocal(urllib.unquote(branchname)) + branchheads = decodelist(branchheads) + branchmap[branchname] = branchheads + yield branchmap + except TypeError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + def branches(self, nodes): + n = encodelist(nodes) + d = self._call("branches", nodes=n) + try: + br = [tuple(decodelist(b)) for b in d.splitlines()] + return br + except ValueError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + def between(self, pairs): + batch = 8 # avoid giant requests + r = [] + for i in xrange(0, len(pairs), batch): + n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]]) + d = self._call("between", pairs=n) + try: + r.extend(l and decodelist(l) or [] for l in d.splitlines()) + except ValueError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + return r + + @batchable + def pushkey(self, namespace, key, old, new): + if not self.capable('pushkey'): + yield False, None + f = future() + yield todict(namespace=encoding.fromlocal(namespace), + key=encoding.fromlocal(key), + old=encoding.fromlocal(old), + new=encoding.fromlocal(new)), f + d = f.value + try: + d = bool(int(d)) + except ValueError: + raise error.ResponseError( + _('push failed (unexpected response):'), d) + yield d + + @batchable + def listkeys(self, namespace): + if not self.capable('pushkey'): + yield {}, None + f = future() + yield todict(namespace=encoding.fromlocal(namespace)), f + d = f.value + r = {} + for l in d.splitlines(): + k, v = l.split('\t') + r[encoding.tolocal(k)] = encoding.tolocal(v) + yield r + + def stream_out(self): + return self._callstream('stream_out') + + def changegroup(self, nodes, kind): + n = encodelist(nodes) + f = self._callstream("changegroup", roots=n) + return changegroupmod.unbundle10(self._decompress(f), 'UN') + + def changegroupsubset(self, bases, heads, kind): + self.requirecap('changegroupsubset', _('look up remote changes')) + bases = encodelist(bases) + heads = encodelist(heads) + f = self._callstream("changegroupsubset", + bases=bases, heads=heads) + return changegroupmod.unbundle10(self._decompress(f), 'UN') + + def getbundle(self, source, heads=None, common=None): + self.requirecap('getbundle', _('look up remote changes')) + opts = {} + if heads is not None: + opts['heads'] = encodelist(heads) + if common is not None: + opts['common'] = encodelist(common) + f = self._callstream("getbundle", **opts) + return changegroupmod.unbundle10(self._decompress(f), 'UN') + + def unbundle(self, cg, heads, source): + '''Send cg (a readable file-like object representing the + changegroup to push, typically a chunkbuffer object) to the + remote server as a bundle. Return an integer indicating the + result of the push (see localrepository.addchangegroup()).''' + + if heads != ['force'] and self.capable('unbundlehash'): + heads = encodelist(['hashed', + util.sha1(''.join(sorted(heads))).digest()]) + else: + heads = encodelist(heads) + + ret, output = self._callpush("unbundle", cg, heads=heads) + if ret == "": + raise error.ResponseError( + _('push failed:'), output) + try: + ret = int(ret) + except ValueError: + raise error.ResponseError( + _('push failed (unexpected response):'), ret) + + for l in output.splitlines(True): + self.ui.status(_('remote: '), l) + return ret + + def debugwireargs(self, one, two, three=None, four=None, five=None): + # don't pass optional arguments left at their default value + opts = {} + if three is not None: + opts['three'] = three + if four is not None: + opts['four'] = four + return self._call('debugwireargs', one=one, two=two, **opts) + +# server side + +class streamres(object): + def __init__(self, gen): + self.gen = gen + +class pushres(object): + def __init__(self, res): + self.res = res + +class pusherr(object): + def __init__(self, res): + self.res = res + +class ooberror(object): + def __init__(self, message): + self.message = message + +def dispatch(repo, proto, command): + func, spec = commands[command] + args = proto.getargs(spec) + return func(repo, proto, *args) + +def options(cmd, keys, others): + opts = {} + for k in keys: + if k in others: + opts[k] = others[k] + del others[k] + if others: + sys.stderr.write("abort: %s got unexpected arguments %s\n" + % (cmd, ",".join(others))) + return opts + +def batch(repo, proto, cmds, others): + res = [] + for pair in cmds.split(';'): + op, args = pair.split(' ', 1) + vals = {} + for a in args.split(','): + if a: + n, v = a.split('=') + vals[n] = unescapearg(v) + func, spec = commands[op] + if spec: + keys = spec.split() + data = {} + for k in keys: + if k == '*': + star = {} + for key in vals.keys(): + if key not in keys: + star[key] = vals[key] + data['*'] = star + else: + data[k] = vals[k] + result = func(repo, proto, *[data[k] for k in keys]) + else: + result = func(repo, proto) + if isinstance(result, ooberror): + return result + res.append(escapearg(result)) + return ';'.join(res) + +def between(repo, proto, pairs): + pairs = [decodelist(p, '-') for p in pairs.split(" ")] + r = [] + for b in repo.between(pairs): + r.append(encodelist(b) + "\n") + return "".join(r) + +def branchmap(repo, proto): + branchmap = repo.branchmap() + heads = [] + for branch, nodes in branchmap.iteritems(): + branchname = urllib.quote(encoding.fromlocal(branch)) + branchnodes = encodelist(nodes) + heads.append('%s %s' % (branchname, branchnodes)) + return '\n'.join(heads) + +def branches(repo, proto, nodes): + nodes = decodelist(nodes) + r = [] + for b in repo.branches(nodes): + r.append(encodelist(b) + "\n") + return "".join(r) + +def capabilities(repo, proto): + caps = ('lookup changegroupsubset branchmap pushkey known getbundle ' + 'unbundlehash batch').split() + if _allowstream(repo.ui): + requiredformats = repo.requirements & repo.supportedformats + # if our local revlogs are just revlogv1, add 'stream' cap + if not requiredformats - set(('revlogv1',)): + caps.append('stream') + # otherwise, add 'streamreqs' detailing our local revlog format + else: + caps.append('streamreqs=%s' % ','.join(requiredformats)) + caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority)) + caps.append('httpheader=1024') + return ' '.join(caps) + +def changegroup(repo, proto, roots): + nodes = decodelist(roots) + cg = repo.changegroup(nodes, 'serve') + return streamres(proto.groupchunks(cg)) + +def changegroupsubset(repo, proto, bases, heads): + bases = decodelist(bases) + heads = decodelist(heads) + cg = repo.changegroupsubset(bases, heads, 'serve') + return streamres(proto.groupchunks(cg)) + +def debugwireargs(repo, proto, one, two, others): + # only accept optional args from the known set + opts = options('debugwireargs', ['three', 'four'], others) + return repo.debugwireargs(one, two, **opts) + +def getbundle(repo, proto, others): + opts = options('getbundle', ['heads', 'common'], others) + for k, v in opts.iteritems(): + opts[k] = decodelist(v) + cg = repo.getbundle('serve', **opts) + return streamres(proto.groupchunks(cg)) + +def heads(repo, proto): + h = repo.heads() + return encodelist(h) + "\n" + +def hello(repo, proto): + '''the hello command returns a set of lines describing various + interesting things about the server, in an RFC822-like format. + Currently the only one defined is "capabilities", which + consists of a line in the form: + + capabilities: space separated list of tokens + ''' + return "capabilities: %s\n" % (capabilities(repo, proto)) + +def listkeys(repo, proto, namespace): + d = repo.listkeys(encoding.tolocal(namespace)).items() + t = '\n'.join(['%s\t%s' % (encoding.fromlocal(k), encoding.fromlocal(v)) + for k, v in d]) + return t + +def lookup(repo, proto, key): + try: + r = hex(repo.lookup(encoding.tolocal(key))) + success = 1 + except Exception, inst: + r = str(inst) + success = 0 + return "%s %s\n" % (success, r) + +def known(repo, proto, nodes, others): + return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes))) + +def pushkey(repo, proto, namespace, key, old, new): + # compatibility with pre-1.8 clients which were accidentally + # sending raw binary nodes rather than utf-8-encoded hex + if len(new) == 20 and new.encode('string-escape') != new: + # looks like it could be a binary node + try: + new.decode('utf-8') + new = encoding.tolocal(new) # but cleanly decodes as UTF-8 + except UnicodeDecodeError: + pass # binary, leave unmodified + else: + new = encoding.tolocal(new) # normal path + + r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key), + encoding.tolocal(old), new) + return '%s\n' % int(r) + +def _allowstream(ui): + return ui.configbool('server', 'uncompressed', True, untrusted=True) + +def stream(repo, proto): + '''If the server supports streaming clone, it advertises the "stream" + capability with a value representing the version and flags of the repo + it is serving. Client checks to see if it understands the format. + + The format is simple: the server writes out a line with the amount + of files, then the total amount of bytes to be transfered (separated + by a space). Then, for each file, the server first writes the filename + and filesize (separated by the null character), then the file contents. + ''' + + if not _allowstream(repo.ui): + return '1\n' + + entries = [] + total_bytes = 0 + try: + # get consistent snapshot of repo, lock during scan + lock = repo.lock() + try: + repo.ui.debug('scanning\n') + for name, ename, size in repo.store.walk(): + entries.append((name, size)) + total_bytes += size + finally: + lock.release() + except error.LockError: + return '2\n' # error: 2 + + def streamer(repo, entries, total): + '''stream out all metadata files in repository.''' + yield '0\n' # success + repo.ui.debug('%d files, %d bytes to transfer\n' % + (len(entries), total_bytes)) + yield '%d %d\n' % (len(entries), total_bytes) + for name, size in entries: + repo.ui.debug('sending %s (%d bytes)\n' % (name, size)) + # partially encode name over the wire for backwards compat + yield '%s\0%d\n' % (store.encodedir(name), size) + for chunk in util.filechunkiter(repo.sopener(name), limit=size): + yield chunk + + return streamres(streamer(repo, entries, total_bytes)) + +def unbundle(repo, proto, heads): + their_heads = decodelist(heads) + + def check_heads(): + heads = repo.heads() + heads_hash = util.sha1(''.join(sorted(heads))).digest() + return (their_heads == ['force'] or their_heads == heads or + their_heads == ['hashed', heads_hash]) + + proto.redirect() + + # fail early if possible + if not check_heads(): + return pusherr('unsynced changes') + + # write bundle data to temporary file because it can be big + fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') + fp = os.fdopen(fd, 'wb+') + r = 0 + try: + proto.getfile(fp) + lock = repo.lock() + try: + if not check_heads(): + # someone else committed/pushed/unbundled while we + # were transferring data + return pusherr('unsynced changes') + + # push can proceed + fp.seek(0) + gen = changegroupmod.readbundle(fp, None) + + try: + r = repo.addchangegroup(gen, 'serve', proto._client(), + lock=lock) + except util.Abort, inst: + sys.stderr.write("abort: %s\n" % inst) + finally: + lock.release() + return pushres(r) + + finally: + fp.close() + os.unlink(tempname) + +commands = { + 'batch': (batch, 'cmds *'), + 'between': (between, 'pairs'), + 'branchmap': (branchmap, ''), + 'branches': (branches, 'nodes'), + 'capabilities': (capabilities, ''), + 'changegroup': (changegroup, 'roots'), + 'changegroupsubset': (changegroupsubset, 'bases heads'), + 'debugwireargs': (debugwireargs, 'one two *'), + 'getbundle': (getbundle, '*'), + 'heads': (heads, ''), + 'hello': (hello, ''), + 'known': (known, 'nodes *'), + 'listkeys': (listkeys, 'namespace'), + 'lookup': (lookup, 'key'), + 'pushkey': (pushkey, 'namespace key old new'), + 'stream_out': (stream, ''), + 'unbundle': (unbundle, 'heads'), +} -- cgit v0.9.1