diff options
Diffstat (limited to 'eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial')
329 files changed, 40583 insertions, 0 deletions
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.py diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.pyo Binary files differnew file mode 100644 index 0000000..c642cf9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__init__.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.py new file mode 100644 index 0000000..550dfb6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.py @@ -0,0 +1,2 @@ +# this file is autogenerated by setup.py +version = "1.7.3" diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.pyo Binary files differnew file mode 100644 index 0000000..c55de9b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/__version__.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.py new file mode 100644 index 0000000..52f4dc1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.py @@ -0,0 +1,88 @@ +# ancestor.py - generic DAG ancestor algorithm for mercurial +# +# Copyright 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import heapq + +def ancestor(a, b, pfunc): + """ + return a minimal-distance ancestor of nodes a and b, or None if there is no + such ancestor. Note that there can be several ancestors with the same + (minimal) distance, and the one returned is arbitrary. + + pfunc must return a list of parent vertices for a given vertex + """ + + if a == b: + return a + + a, b = sorted([a, b]) + + # find depth from root of all ancestors + parentcache = {} + visit = [a, b] + depth = {} + while visit: + vertex = visit[-1] + pl = pfunc(vertex) + parentcache[vertex] = pl + if not pl: + depth[vertex] = 0 + visit.pop() + else: + for p in pl: + if p == a or p == b: # did we find a or b as a parent? + return p # we're done + if p not in depth: + visit.append(p) + if visit[-1] == vertex: + depth[vertex] = min([depth[p] for p in pl]) - 1 + visit.pop() + + # traverse ancestors in order of decreasing distance from root + def ancestors(vertex): + h = [(depth[vertex], vertex)] + seen = set() + while h: + d, n = heapq.heappop(h) + if n not in seen: + seen.add(n) + yield (d, n) + for p in parentcache[n]: + heapq.heappush(h, (depth[p], p)) + + def generations(vertex): + sg, s = None, set() + for g, v in ancestors(vertex): + if g != sg: + if sg: + yield sg, s + sg, s = g, set((v,)) + else: + s.add(v) + yield sg, s + + x = generations(a) + y = generations(b) + gx = x.next() + gy = y.next() + + # increment each ancestor list until it is closer to root than + # the other, or they match + try: + while 1: + if gx[0] == gy[0]: + for v in gx[1]: + if v in gy[1]: + return v + gy = y.next() + gx = x.next() + elif gx[0] > gy[0]: + gy = y.next() + else: + gx = x.next() + except StopIteration: + return None diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.pyo Binary files differnew file mode 100644 index 0000000..6a7d32a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ancestor.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.py new file mode 100644 index 0000000..a2b0e93 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.py @@ -0,0 +1,274 @@ +# archival.py - revision archival for mercurial +# +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from node import hex +import cmdutil +import util +import cStringIO, os, stat, tarfile, time, zipfile +import zlib, gzip + +def tidyprefix(dest, kind, prefix): + '''choose prefix to use for names in archive. make sure prefix is + safe for consumers.''' + + if prefix: + prefix = util.normpath(prefix) + else: + if not isinstance(dest, str): + raise ValueError('dest must be string if no prefix') + prefix = os.path.basename(dest) + lower = prefix.lower() + for sfx in exts.get(kind, []): + if lower.endswith(sfx): + prefix = prefix[:-len(sfx)] + break + lpfx = os.path.normpath(util.localpath(prefix)) + prefix = util.pconvert(lpfx) + if not prefix.endswith('/'): + prefix += '/' + if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: + raise util.Abort(_('archive prefix contains illegal components')) + return prefix + +exts = { + 'tar': ['.tar'], + 'tbz2': ['.tbz2', '.tar.bz2'], + 'tgz': ['.tgz', '.tar.gz'], + 'zip': ['.zip'], + } + +def guesskind(dest): + for kind, extensions in exts.iteritems(): + if util.any(dest.endswith(ext) for ext in extensions): + return kind + return None + + +class tarit(object): + '''write archive to tar file or stream. can write uncompressed, + or compress with gzip or bzip2.''' + + class GzipFileWithTime(gzip.GzipFile): + + def __init__(self, *args, **kw): + timestamp = None + if 'timestamp' in kw: + timestamp = kw.pop('timestamp') + if timestamp is None: + self.timestamp = time.time() + else: + self.timestamp = timestamp + gzip.GzipFile.__init__(self, *args, **kw) + + def _write_gzip_header(self): + self.fileobj.write('\037\213') # magic header + self.fileobj.write('\010') # compression method + # Python 2.6 deprecates self.filename + fname = getattr(self, 'name', None) or self.filename + if fname and fname.endswith('.gz'): + fname = fname[:-3] + flags = 0 + if fname: + flags = gzip.FNAME + self.fileobj.write(chr(flags)) + gzip.write32u(self.fileobj, long(self.timestamp)) + self.fileobj.write('\002') + self.fileobj.write('\377') + if fname: + self.fileobj.write(fname + '\000') + + def __init__(self, dest, mtime, kind=''): + self.mtime = mtime + + def taropen(name, mode, fileobj=None): + if kind == 'gz': + mode = mode[0] + if not fileobj: + fileobj = open(name, mode + 'b') + gzfileobj = self.GzipFileWithTime(name, mode + 'b', + zlib.Z_BEST_COMPRESSION, + fileobj, timestamp=mtime) + return tarfile.TarFile.taropen(name, mode, gzfileobj) + else: + return tarfile.open(name, mode + kind, fileobj) + + if isinstance(dest, str): + self.z = taropen(dest, mode='w:') + else: + # Python 2.5-2.5.1 have a regression that requires a name arg + self.z = taropen(name='', mode='w|', fileobj=dest) + + def addfile(self, name, mode, islink, data): + i = tarfile.TarInfo(name) + i.mtime = self.mtime + i.size = len(data) + if islink: + i.type = tarfile.SYMTYPE + i.mode = 0777 + i.linkname = data + data = None + i.size = 0 + else: + i.mode = mode + data = cStringIO.StringIO(data) + self.z.addfile(i, data) + + def done(self): + self.z.close() + +class tellable(object): + '''provide tell method for zipfile.ZipFile when writing to http + response file object.''' + + def __init__(self, fp): + self.fp = fp + self.offset = 0 + + def __getattr__(self, key): + return getattr(self.fp, key) + + def write(self, s): + self.fp.write(s) + self.offset += len(s) + + def tell(self): + return self.offset + +class zipit(object): + '''write archive to zip file or stream. can write uncompressed, + or compressed with deflate.''' + + def __init__(self, dest, mtime, compress=True): + if not isinstance(dest, str): + try: + dest.tell() + except (AttributeError, IOError): + dest = tellable(dest) + self.z = zipfile.ZipFile(dest, 'w', + compress and zipfile.ZIP_DEFLATED or + zipfile.ZIP_STORED) + + # Python's zipfile module emits deprecation warnings if we try + # to store files with a date before 1980. + epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) + if mtime < epoch: + mtime = epoch + + self.date_time = time.gmtime(mtime)[:6] + + def addfile(self, name, mode, islink, data): + i = zipfile.ZipInfo(name, self.date_time) + i.compress_type = self.z.compression + # unzip will not honor unix file modes unless file creator is + # set to unix (id 3). + i.create_system = 3 + ftype = stat.S_IFREG + if islink: + mode = 0777 + ftype = stat.S_IFLNK + i.external_attr = (mode | ftype) << 16L + self.z.writestr(i, data) + + def done(self): + self.z.close() + +class fileit(object): + '''write archive as files in directory.''' + + def __init__(self, name, mtime): + self.basedir = name + self.opener = util.opener(self.basedir) + + def addfile(self, name, mode, islink, data): + if islink: + self.opener.symlink(data, name) + return + f = self.opener(name, "w", atomictemp=True) + f.write(data) + f.rename() + destfile = os.path.join(self.basedir, name) + os.chmod(destfile, mode) + + def done(self): + pass + +archivers = { + 'files': fileit, + 'tar': tarit, + 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), + 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), + 'uzip': lambda name, mtime: zipit(name, mtime, False), + 'zip': zipit, + } + +def archive(repo, dest, node, kind, decode=True, matchfn=None, + prefix=None, mtime=None, subrepos=False): + '''create archive of repo as it was at node. + + dest can be name of directory, name of archive file, or file + object to write archive to. + + kind is type of archive to create. + + decode tells whether to put files through decode filters from + hgrc. + + matchfn is function to filter names of files to write to archive. + + prefix is name of path to put before every archive member.''' + + if kind == 'files': + if prefix: + raise util.Abort(_('cannot give prefix when archiving to files')) + else: + prefix = tidyprefix(dest, kind, prefix) + + def write(name, mode, islink, getdata): + if matchfn and not matchfn(name): + return + data = getdata() + if decode: + data = repo.wwritedata(name, data) + archiver.addfile(prefix + name, mode, islink, data) + + if kind not in archivers: + raise util.Abort(_("unknown archive type '%s'") % kind) + + ctx = repo[node] + archiver = archivers[kind](dest, mtime or ctx.date()[0]) + + if repo.ui.configbool("ui", "archivemeta", True): + def metadata(): + base = 'repo: %s\nnode: %s\nbranch: %s\n' % ( + repo[0].hex(), hex(node), ctx.branch()) + + tags = ''.join('tag: %s\n' % t for t in ctx.tags() + if repo.tagtype(t) == 'global') + if not tags: + repo.ui.pushbuffer() + opts = {'template': '{latesttag}\n{latesttagdistance}', + 'style': '', 'patch': None, 'git': None} + cmdutil.show_changeset(repo.ui, repo, opts).show(ctx) + ltags, dist = repo.ui.popbuffer().split('\n') + tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':')) + tags += 'latesttagdistance: %s\n' % dist + + return base + tags + + write('.hg_archival.txt', 0644, False, metadata) + + for f in ctx: + ff = ctx.flags(f) + write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data) + + if subrepos: + for subpath in ctx.substate: + sub = ctx.sub(subpath) + sub.archive(archiver, prefix) + + archiver.done() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.pyo Binary files differnew file mode 100644 index 0000000..49099a1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/archival.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.py new file mode 100644 index 0000000..dfdfe4f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'base85.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.pyo Binary files differnew file mode 100644 index 0000000..f58ee56 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.so Binary files differnew file mode 100755 index 0000000..5de5836 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/base85.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.py new file mode 100644 index 0000000..7cabf35 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'bdiff.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.pyo Binary files differnew file mode 100644 index 0000000..5074db0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.so Binary files differnew file mode 100755 index 0000000..45f02c9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bdiff.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.py new file mode 100644 index 0000000..db4e0f3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.py @@ -0,0 +1,324 @@ +# bundlerepo.py - repository class for viewing uncompressed bundles +# +# Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Repository class for viewing uncompressed bundles. + +This provides a read-only repository interface to bundles as if they +were part of the actual repository. +""" + +from node import nullid +from i18n import _ +import os, struct, tempfile, shutil +import changegroup, util, mdiff, discovery +import localrepo, changelog, manifest, filelog, revlog, error + +class bundlerevlog(revlog.revlog): + def __init__(self, opener, indexfile, bundle, + linkmapper=None): + # How it works: + # to retrieve a revision, we need to know the offset of + # the revision in the bundle (an unbundle object). + # + # We store this offset in the index (start), to differentiate a + # rev in the bundle and from a rev in the revlog, we check + # len(index[r]). If the tuple is bigger than 7, it is a bundle + # (it is bigger since we store the node to which the delta is) + # + revlog.revlog.__init__(self, opener, indexfile) + self.bundle = bundle + self.basemap = {} + def chunkpositer(): + while 1: + chunk = bundle.chunk() + if not chunk: + break + pos = bundle.tell() + yield chunk, pos - len(chunk) + n = len(self) + prev = None + for chunk, start in chunkpositer(): + size = len(chunk) + if size < 80: + raise util.Abort(_("invalid changegroup")) + start += 80 + size -= 80 + node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80]) + if node in self.nodemap: + prev = node + continue + for p in (p1, p2): + if not p in self.nodemap: + raise error.LookupError(p, self.indexfile, + _("unknown parent")) + if linkmapper is None: + link = n + else: + link = linkmapper(cs) + + if not prev: + prev = p1 + # start, size, full unc. size, base (unused), link, p1, p2, node + e = (revlog.offset_type(start, 0), size, -1, -1, link, + self.rev(p1), self.rev(p2), node) + self.basemap[n] = prev + self.index.insert(-1, e) + self.nodemap[node] = n + prev = node + n += 1 + + def inbundle(self, rev): + """is rev from the bundle""" + if rev < 0: + return False + return rev in self.basemap + def bundlebase(self, rev): + return self.basemap[rev] + def _chunk(self, rev): + # Warning: in case of bundle, the diff is against bundlebase, + # not against rev - 1 + # XXX: could use some caching + if not self.inbundle(rev): + return revlog.revlog._chunk(self, rev) + self.bundle.seek(self.start(rev)) + return self.bundle.read(self.length(rev)) + + def revdiff(self, rev1, rev2): + """return or calculate a delta between two revisions""" + if self.inbundle(rev1) and self.inbundle(rev2): + # hot path for bundle + revb = self.rev(self.bundlebase(rev2)) + if revb == rev1: + return self._chunk(rev2) + elif not self.inbundle(rev1) and not self.inbundle(rev2): + return revlog.revlog.revdiff(self, rev1, rev2) + + return mdiff.textdiff(self.revision(self.node(rev1)), + self.revision(self.node(rev2))) + + def revision(self, node): + """return an uncompressed revision of a given""" + if node == nullid: + return "" + + text = None + chain = [] + iter_node = node + rev = self.rev(iter_node) + # reconstruct the revision if it is from a changegroup + while self.inbundle(rev): + if self._cache and self._cache[0] == iter_node: + text = self._cache[2] + break + chain.append(rev) + iter_node = self.bundlebase(rev) + rev = self.rev(iter_node) + if text is None: + text = revlog.revlog.revision(self, iter_node) + + while chain: + delta = self._chunk(chain.pop()) + text = mdiff.patches(text, [delta]) + + p1, p2 = self.parents(node) + if node != revlog.hash(text, p1, p2): + raise error.RevlogError(_("integrity check failed on %s:%d") + % (self.datafile, self.rev(node))) + + self._cache = (node, self.rev(node), text) + return text + + def addrevision(self, text, transaction, link, p1=None, p2=None, d=None): + raise NotImplementedError + def addgroup(self, revs, linkmapper, transaction): + raise NotImplementedError + def strip(self, rev, minlink): + raise NotImplementedError + def checksize(self): + raise NotImplementedError + +class bundlechangelog(bundlerevlog, changelog.changelog): + def __init__(self, opener, bundle): + changelog.changelog.__init__(self, opener) + bundlerevlog.__init__(self, opener, self.indexfile, bundle) + +class bundlemanifest(bundlerevlog, manifest.manifest): + def __init__(self, opener, bundle, linkmapper): + manifest.manifest.__init__(self, opener) + bundlerevlog.__init__(self, opener, self.indexfile, bundle, + linkmapper) + +class bundlefilelog(bundlerevlog, filelog.filelog): + def __init__(self, opener, path, bundle, linkmapper): + filelog.filelog.__init__(self, opener, path) + bundlerevlog.__init__(self, opener, self.indexfile, bundle, + linkmapper) + +class bundlerepository(localrepo.localrepository): + def __init__(self, ui, path, bundlename): + self._tempparent = None + try: + localrepo.localrepository.__init__(self, ui, path) + except error.RepoError: + self._tempparent = tempfile.mkdtemp() + localrepo.instance(ui, self._tempparent, 1) + localrepo.localrepository.__init__(self, ui, self._tempparent) + + if path: + self._url = 'bundle:' + util.expandpath(path) + '+' + bundlename + else: + self._url = 'bundle:' + bundlename + + self.tempfile = None + f = open(bundlename, "rb") + self.bundle = changegroup.readbundle(f, bundlename) + if self.bundle.compressed(): + fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-", + suffix=".hg10un", dir=self.path) + self.tempfile = temp + fptemp = os.fdopen(fdtemp, 'wb') + + try: + fptemp.write("HG10UN") + while 1: + chunk = self.bundle.read(2**18) + if not chunk: + break + fptemp.write(chunk) + finally: + fptemp.close() + + f = open(self.tempfile, "rb") + self.bundle = changegroup.readbundle(f, bundlename) + + # dict with the mapping 'filename' -> position in the bundle + self.bundlefilespos = {} + + @util.propertycache + def changelog(self): + c = bundlechangelog(self.sopener, self.bundle) + self.manstart = self.bundle.tell() + return c + + @util.propertycache + def manifest(self): + self.bundle.seek(self.manstart) + m = bundlemanifest(self.sopener, self.bundle, self.changelog.rev) + self.filestart = self.bundle.tell() + return m + + @util.propertycache + def manstart(self): + self.changelog + return self.manstart + + @util.propertycache + def filestart(self): + self.manifest + return self.filestart + + def url(self): + return self._url + + def file(self, f): + if not self.bundlefilespos: + self.bundle.seek(self.filestart) + while 1: + chunk = self.bundle.chunk() + if not chunk: + break + self.bundlefilespos[chunk] = self.bundle.tell() + while 1: + c = self.bundle.chunk() + if not c: + break + + if f[0] == '/': + f = f[1:] + if f in self.bundlefilespos: + self.bundle.seek(self.bundlefilespos[f]) + return bundlefilelog(self.sopener, f, self.bundle, + self.changelog.rev) + else: + return filelog.filelog(self.sopener, f) + + def close(self): + """Close assigned bundle file immediately.""" + self.bundle.close() + if self.tempfile is not None: + os.unlink(self.tempfile) + + def __del__(self): + del self.bundle + if self.tempfile is not None: + os.unlink(self.tempfile) + if self._tempparent: + shutil.rmtree(self._tempparent, True) + + def cancopy(self): + return False + + def getcwd(self): + return os.getcwd() # always outside the repo + +def instance(ui, path, create): + if create: + raise util.Abort(_('cannot create new bundle repository')) + parentpath = ui.config("bundle", "mainreporoot", "") + if parentpath: + # Try to make the full path relative so we get a nice, short URL. + # In particular, we don't want temp dir names in test outputs. + cwd = os.getcwd() + if parentpath == cwd: + parentpath = '' + else: + cwd = os.path.join(cwd,'') + if parentpath.startswith(cwd): + parentpath = parentpath[len(cwd):] + path = util.drop_scheme('file', path) + if path.startswith('bundle:'): + path = util.drop_scheme('bundle', path) + s = path.split("+", 1) + if len(s) == 1: + repopath, bundlename = parentpath, s[0] + else: + repopath, bundlename = s + else: + repopath, bundlename = parentpath, path + return bundlerepository(ui, repopath, bundlename) + +def getremotechanges(ui, repo, other, revs=None, bundlename=None, force=False): + tmp = discovery.findcommonincoming(repo, other, heads=revs, force=force) + common, incoming, rheads = tmp + if not incoming: + try: + os.unlink(bundlename) + except: + pass + return other, None, None + + bundle = None + if bundlename or not other.local(): + # create a bundle (uncompressed if other repo is not local) + + if revs is None and other.capable('changegroupsubset'): + revs = rheads + + if revs is None: + cg = other.changegroup(incoming, "incoming") + else: + cg = other.changegroupsubset(incoming, revs, 'incoming') + bundletype = other.local() and "HG10BZ" or "HG10UN" + fname = bundle = changegroup.writebundle(cg, bundlename, bundletype) + # keep written bundle? + if bundlename: + bundle = None + if not other.local(): + # use the created uncompressed bundlerepo + other = bundlerepository(ui, repo.root, fname) + return (other, incoming, bundle) + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.pyo Binary files differnew file mode 100644 index 0000000..a92f9b3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/bundlerepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.py new file mode 100644 index 0000000..f8fb3f6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.py @@ -0,0 +1,466 @@ +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA + +# This file is part of urlgrabber, a high-level cross-protocol url-grabber +# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko + +# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $ + +import os +import stat +import urllib +import urllib2 +import email.Utils + +class RangeError(IOError): + """Error raised when an unsatisfiable range is requested.""" + pass + +class HTTPRangeHandler(urllib2.BaseHandler): + """Handler that enables HTTP Range headers. + + This was extremely simple. The Range header is a HTTP feature to + begin with so all this class does is tell urllib2 that the + "206 Partial Content" reponse from the HTTP server is what we + expected. + + Example: + import urllib2 + import byterange + + range_handler = range.HTTPRangeHandler() + opener = urllib2.build_opener(range_handler) + + # install it + urllib2.install_opener(opener) + + # create Request and set Range header + req = urllib2.Request('http://www.python.org/') + req.header['Range'] = 'bytes=30-50' + f = urllib2.urlopen(req) + """ + + def http_error_206(self, req, fp, code, msg, hdrs): + # 206 Partial Content Response + r = urllib.addinfourl(fp, hdrs, req.get_full_url()) + r.code = code + r.msg = msg + return r + + def http_error_416(self, req, fp, code, msg, hdrs): + # HTTP's Range Not Satisfiable error + raise RangeError('Requested Range Not Satisfiable') + +class RangeableFileObject: + """File object wrapper to enable raw range handling. + This was implemented primarilary for handling range + specifications for file:// urls. This object effectively makes + a file object look like it consists only of a range of bytes in + the stream. + + Examples: + # expose 10 bytes, starting at byte position 20, from + # /etc/aliases. + >>> fo = RangeableFileObject(file('/etc/passwd', 'r'), (20,30)) + # seek seeks within the range (to position 23 in this case) + >>> fo.seek(3) + # tell tells where your at _within the range_ (position 3 in + # this case) + >>> fo.tell() + # read EOFs if an attempt is made to read past the last + # byte in the range. the following will return only 7 bytes. + >>> fo.read(30) + """ + + def __init__(self, fo, rangetup): + """Create a RangeableFileObject. + fo -- a file like object. only the read() method need be + supported but supporting an optimized seek() is + preferable. + rangetup -- a (firstbyte,lastbyte) tuple specifying the range + to work over. + The file object provided is assumed to be at byte offset 0. + """ + self.fo = fo + (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup) + self.realpos = 0 + self._do_seek(self.firstbyte) + + def __getattr__(self, name): + """This effectively allows us to wrap at the instance level. + Any attribute not found in _this_ object will be searched for + in self.fo. This includes methods.""" + if hasattr(self.fo, name): + return getattr(self.fo, name) + raise AttributeError(name) + + def tell(self): + """Return the position within the range. + This is different from fo.seek in that position 0 is the + first byte position of the range tuple. For example, if + this object was created with a range tuple of (500,899), + tell() will return 0 when at byte position 500 of the file. + """ + return (self.realpos - self.firstbyte) + + def seek(self, offset, whence=0): + """Seek within the byte range. + Positioning is identical to that described under tell(). + """ + assert whence in (0, 1, 2) + if whence == 0: # absolute seek + realoffset = self.firstbyte + offset + elif whence == 1: # relative seek + realoffset = self.realpos + offset + elif whence == 2: # absolute from end of file + # XXX: are we raising the right Error here? + raise IOError('seek from end of file not supported.') + + # do not allow seek past lastbyte in range + if self.lastbyte and (realoffset >= self.lastbyte): + realoffset = self.lastbyte + + self._do_seek(realoffset - self.realpos) + + def read(self, size=-1): + """Read within the range. + This method will limit the size read based on the range. + """ + size = self._calc_read_size(size) + rslt = self.fo.read(size) + self.realpos += len(rslt) + return rslt + + def readline(self, size=-1): + """Read lines within the range. + This method will limit the size read based on the range. + """ + size = self._calc_read_size(size) + rslt = self.fo.readline(size) + self.realpos += len(rslt) + return rslt + + def _calc_read_size(self, size): + """Handles calculating the amount of data to read based on + the range. + """ + if self.lastbyte: + if size > -1: + if ((self.realpos + size) >= self.lastbyte): + size = (self.lastbyte - self.realpos) + else: + size = (self.lastbyte - self.realpos) + return size + + def _do_seek(self, offset): + """Seek based on whether wrapped object supports seek(). + offset is relative to the current position (self.realpos). + """ + assert offset >= 0 + if not hasattr(self.fo, 'seek'): + self._poor_mans_seek(offset) + else: + self.fo.seek(self.realpos + offset) + self.realpos += offset + + def _poor_mans_seek(self, offset): + """Seek by calling the wrapped file objects read() method. + This is used for file like objects that do not have native + seek support. The wrapped objects read() method is called + to manually seek to the desired position. + offset -- read this number of bytes from the wrapped + file object. + raise RangeError if we encounter EOF before reaching the + specified offset. + """ + pos = 0 + bufsize = 1024 + while pos < offset: + if (pos + bufsize) > offset: + bufsize = offset - pos + buf = self.fo.read(bufsize) + if len(buf) != bufsize: + raise RangeError('Requested Range Not Satisfiable') + pos += bufsize + +class FileRangeHandler(urllib2.FileHandler): + """FileHandler subclass that adds Range support. + This class handles Range headers exactly like an HTTP + server would. + """ + def open_local_file(self, req): + import mimetypes + import email + host = req.get_host() + file = req.get_selector() + localfile = urllib.url2pathname(file) + stats = os.stat(localfile) + size = stats[stat.ST_SIZE] + modified = email.Utils.formatdate(stats[stat.ST_MTIME]) + mtype = mimetypes.guess_type(file)[0] + if host: + host, port = urllib.splitport(host) + if port or socket.gethostbyname(host) not in self.get_names(): + raise urllib2.URLError('file not on local host') + fo = open(localfile,'rb') + brange = req.headers.get('Range', None) + brange = range_header_to_tuple(brange) + assert brange != () + if brange: + (fb, lb) = brange + if lb == '': + lb = size + if fb < 0 or fb > size or lb > size: + raise RangeError('Requested Range Not Satisfiable') + size = (lb - fb) + fo = RangeableFileObject(fo, (fb, lb)) + headers = email.message_from_string( + 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' % + (mtype or 'text/plain', size, modified)) + return urllib.addinfourl(fo, headers, 'file:'+file) + + +# FTP Range Support +# Unfortunately, a large amount of base FTP code had to be copied +# from urllib and urllib2 in order to insert the FTP REST command. +# Code modifications for range support have been commented as +# follows: +# -- range support modifications start/end here + +from urllib import splitport, splituser, splitpasswd, splitattr, \ + unquote, addclosehook, addinfourl +import ftplib +import socket +import sys +import mimetypes +import email + +class FTPRangeHandler(urllib2.FTPHandler): + def ftp_open(self, req): + host = req.get_host() + if not host: + raise IOError('ftp error', 'no host given') + host, port = splitport(host) + if port is None: + port = ftplib.FTP_PORT + else: + port = int(port) + + # username/password handling + user, host = splituser(host) + if user: + user, passwd = splitpasswd(user) + else: + passwd = None + host = unquote(host) + user = unquote(user or '') + passwd = unquote(passwd or '') + + try: + host = socket.gethostbyname(host) + except socket.error, msg: + raise urllib2.URLError(msg) + path, attrs = splitattr(req.get_selector()) + dirs = path.split('/') + dirs = map(unquote, dirs) + dirs, file = dirs[:-1], dirs[-1] + if dirs and not dirs[0]: + dirs = dirs[1:] + try: + fw = self.connect_ftp(user, passwd, host, port, dirs) + type = file and 'I' or 'D' + for attr in attrs: + attr, value = splitattr(attr) + if attr.lower() == 'type' and \ + value in ('a', 'A', 'i', 'I', 'd', 'D'): + type = value.upper() + + # -- range support modifications start here + rest = None + range_tup = range_header_to_tuple(req.headers.get('Range', None)) + assert range_tup != () + if range_tup: + (fb, lb) = range_tup + if fb > 0: + rest = fb + # -- range support modifications end here + + fp, retrlen = fw.retrfile(file, type, rest) + + # -- range support modifications start here + if range_tup: + (fb, lb) = range_tup + if lb == '': + if retrlen is None or retrlen == 0: + raise RangeError('Requested Range Not Satisfiable due' + ' to unobtainable file length.') + lb = retrlen + retrlen = lb - fb + if retrlen < 0: + # beginning of range is larger than file + raise RangeError('Requested Range Not Satisfiable') + else: + retrlen = lb - fb + fp = RangeableFileObject(fp, (0, retrlen)) + # -- range support modifications end here + + headers = "" + mtype = mimetypes.guess_type(req.get_full_url())[0] + if mtype: + headers += "Content-Type: %s\n" % mtype + if retrlen is not None and retrlen >= 0: + headers += "Content-Length: %d\n" % retrlen + headers = email.message_from_string(headers) + return addinfourl(fp, headers, req.get_full_url()) + except ftplib.all_errors, msg: + raise IOError('ftp error', msg), sys.exc_info()[2] + + def connect_ftp(self, user, passwd, host, port, dirs): + fw = ftpwrapper(user, passwd, host, port, dirs) + return fw + +class ftpwrapper(urllib.ftpwrapper): + # range support note: + # this ftpwrapper code is copied directly from + # urllib. The only enhancement is to add the rest + # argument and pass it on to ftp.ntransfercmd + def retrfile(self, file, type, rest=None): + self.endtransfer() + if type in ('d', 'D'): + cmd = 'TYPE A' + isdir = 1 + else: + cmd = 'TYPE ' + type + isdir = 0 + try: + self.ftp.voidcmd(cmd) + except ftplib.all_errors: + self.init() + self.ftp.voidcmd(cmd) + conn = None + if file and not isdir: + # Use nlst to see if the file exists at all + try: + self.ftp.nlst(file) + except ftplib.error_perm, reason: + raise IOError('ftp error', reason), sys.exc_info()[2] + # Restore the transfer mode! + self.ftp.voidcmd(cmd) + # Try to retrieve as a file + try: + cmd = 'RETR ' + file + conn = self.ftp.ntransfercmd(cmd, rest) + except ftplib.error_perm, reason: + if str(reason).startswith('501'): + # workaround for REST not supported error + fp, retrlen = self.retrfile(file, type) + fp = RangeableFileObject(fp, (rest,'')) + return (fp, retrlen) + elif not str(reason).startswith('550'): + raise IOError('ftp error', reason), sys.exc_info()[2] + if not conn: + # Set transfer mode to ASCII! + self.ftp.voidcmd('TYPE A') + # Try a directory listing + if file: + cmd = 'LIST ' + file + else: + cmd = 'LIST' + conn = self.ftp.ntransfercmd(cmd) + self.busy = 1 + # Pass back both a suitably decorated object and a retrieval length + return (addclosehook(conn[0].makefile('rb'), + self.endtransfer), conn[1]) + + +#################################################################### +# Range Tuple Functions +# XXX: These range tuple functions might go better in a class. + +_rangere = None +def range_header_to_tuple(range_header): + """Get a (firstbyte,lastbyte) tuple from a Range header value. + + Range headers have the form "bytes=<firstbyte>-<lastbyte>". This + function pulls the firstbyte and lastbyte values and returns + a (firstbyte,lastbyte) tuple. If lastbyte is not specified in + the header value, it is returned as an empty string in the + tuple. + + Return None if range_header is None + Return () if range_header does not conform to the range spec + pattern. + + """ + global _rangere + if range_header is None: + return None + if _rangere is None: + import re + _rangere = re.compile(r'^bytes=(\d{1,})-(\d*)') + match = _rangere.match(range_header) + if match: + tup = range_tuple_normalize(match.group(1, 2)) + if tup and tup[1]: + tup = (tup[0], tup[1]+1) + return tup + return () + +def range_tuple_to_header(range_tup): + """Convert a range tuple to a Range header value. + Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None + if no range is needed. + """ + if range_tup is None: + return None + range_tup = range_tuple_normalize(range_tup) + if range_tup: + if range_tup[1]: + range_tup = (range_tup[0], range_tup[1] - 1) + return 'bytes=%s-%s' % range_tup + +def range_tuple_normalize(range_tup): + """Normalize a (first_byte,last_byte) range tuple. + Return a tuple whose first element is guaranteed to be an int + and whose second element will be '' (meaning: the last byte) or + an int. Finally, return None if the normalized tuple == (0,'') + as that is equivelant to retrieving the entire file. + """ + if range_tup is None: + return None + # handle first byte + fb = range_tup[0] + if fb in (None, ''): + fb = 0 + else: + fb = int(fb) + # handle last byte + try: + lb = range_tup[1] + except IndexError: + lb = '' + else: + if lb is None: + lb = '' + elif lb != '': + lb = int(lb) + # check if range is over the entire file + if (fb, lb) == (0, ''): + return None + # check that the range is valid + if lb < fb: + raise RangeError('Invalid byte range: %s-%s' % (fb, lb)) + return (fb, lb) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.pyo Binary files differnew file mode 100644 index 0000000..4f887ba --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/byterange.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.py new file mode 100644 index 0000000..c509cf6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.py @@ -0,0 +1,204 @@ +# changegroup.py - Mercurial changegroup manipulation functions +# +# Copyright 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util +import struct, os, bz2, zlib, tempfile + +def getchunk(source): + """return the next chunk from changegroup 'source' as a string""" + d = source.read(4) + if not d: + return "" + l = struct.unpack(">l", d)[0] + if l <= 4: + return "" + d = source.read(l - 4) + if len(d) < l - 4: + raise util.Abort(_("premature EOF reading chunk" + " (got %d bytes, expected %d)") + % (len(d), l - 4)) + return d + +def chunkheader(length): + """return a changegroup chunk header (string)""" + return struct.pack(">l", length + 4) + +def closechunk(): + """return a changegroup chunk header (string) for a zero-length chunk""" + return struct.pack(">l", 0) + +class nocompress(object): + def compress(self, x): + return x + def flush(self): + return "" + +bundletypes = { + "": ("", nocompress), + "HG10UN": ("HG10UN", nocompress), + "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()), + "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), +} + +def collector(cl, mmfs, files): + # Gather information about changeset nodes going out in a bundle. + # We want to gather manifests needed and filelogs affected. + def collect(node): + c = cl.read(node) + files.update(c[3]) + mmfs.setdefault(c[0], node) + return collect + +# hgweb uses this list to communicate its preferred type +bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] + +def writebundle(cg, filename, bundletype): + """Write a bundle file and return its filename. + + Existing files will not be overwritten. + If no filename is specified, a temporary file is created. + bz2 compression can be turned off. + The bundle file will be deleted in case of errors. + """ + + fh = None + cleanup = None + try: + if filename: + fh = open(filename, "wb") + else: + fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") + fh = os.fdopen(fd, "wb") + cleanup = filename + + header, compressor = bundletypes[bundletype] + fh.write(header) + z = compressor() + + # parse the changegroup data, otherwise we will block + # in case of sshrepo because we don't know the end of the stream + + # an empty chunkgroup is the end of the changegroup + # a changegroup has at least 2 chunkgroups (changelog and manifest). + # after that, an empty chunkgroup is the end of the changegroup + empty = False + count = 0 + while not empty or count <= 2: + empty = True + count += 1 + while 1: + chunk = getchunk(cg) + if not chunk: + break + empty = False + fh.write(z.compress(chunkheader(len(chunk)))) + pos = 0 + while pos < len(chunk): + next = pos + 2**20 + fh.write(z.compress(chunk[pos:next])) + pos = next + fh.write(z.compress(closechunk())) + fh.write(z.flush()) + cleanup = None + return filename + finally: + if fh is not None: + fh.close() + if cleanup is not None: + os.unlink(cleanup) + +def decompressor(fh, alg): + if alg == 'UN': + return fh + elif alg == 'GZ': + def generator(f): + zd = zlib.decompressobj() + for chunk in f: + yield zd.decompress(chunk) + elif alg == 'BZ': + def generator(f): + zd = bz2.BZ2Decompressor() + zd.decompress("BZ") + for chunk in util.filechunkiter(f, 4096): + yield zd.decompress(chunk) + else: + raise util.Abort("unknown bundle compression '%s'" % alg) + return util.chunkbuffer(generator(fh)) + +class unbundle10(object): + def __init__(self, fh, alg): + self._stream = decompressor(fh, alg) + self._type = alg + self.callback = None + def compressed(self): + return self._type != 'UN' + def read(self, l): + return self._stream.read(l) + def seek(self, pos): + return self._stream.seek(pos) + def tell(self): + return self._stream.tell() + def close(self): + return self._stream.close() + + def chunklength(self): + d = self.read(4) + if not d: + return 0 + l = max(0, struct.unpack(">l", d)[0] - 4) + if l and self.callback: + self.callback() + return l + + def chunk(self): + """return the next chunk from changegroup 'source' as a string""" + l = self.chunklength() + d = self.read(l) + if len(d) < l: + raise util.Abort(_("premature EOF reading chunk" + " (got %d bytes, expected %d)") + % (len(d), l)) + return d + + def parsechunk(self): + l = self.chunklength() + if not l: + return {} + h = self.read(80) + node, p1, p2, cs = struct.unpack("20s20s20s20s", h) + data = self.read(l - 80) + return dict(node=node, p1=p1, p2=p2, cs=cs, data=data) + +class headerlessfixup(object): + def __init__(self, fh, h): + self._h = h + self._fh = fh + def read(self, n): + if self._h: + d, self._h = self._h[:n], self._h[n:] + if len(d) < n: + d += self._fh.read(n - len(d)) + return d + return self._fh.read(n) + +def readbundle(fh, fname): + header = fh.read(6) + + if not fname: + fname = "stream" + if not header.startswith('HG') and header.startswith('\0'): + fh = headerlessfixup(fh, header) + header = "HG10UN" + + magic, version, alg = header[0:2], header[2:4], header[4:6] + + if magic != 'HG': + raise util.Abort(_('%s: not a Mercurial bundle') % fname) + if version != '10': + raise util.Abort(_('%s: unknown bundle version %s') % (fname, version)) + return unbundle10(fh, alg) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.pyo Binary files differnew file mode 100644 index 0000000..051fb31 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changegroup.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.py new file mode 100644 index 0000000..bfecee0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.py @@ -0,0 +1,233 @@ +# changelog.py - changelog class for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import bin, hex, nullid +from i18n import _ +import util, error, revlog, encoding + +def _string_escape(text): + """ + >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)} + >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d + >>> s + 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n' + >>> res = _string_escape(s) + >>> s == res.decode('string_escape') + True + """ + # subset of the string_escape codec + text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r') + return text.replace('\0', '\\0') + +def decodeextra(text): + extra = {} + for l in text.split('\0'): + if l: + k, v = l.decode('string_escape').split(':', 1) + extra[k] = v + return extra + +def encodeextra(d): + # keys must be sorted to produce a deterministic changelog entry + items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)] + return "\0".join(items) + +class appender(object): + '''the changelog index must be updated last on disk, so we use this class + to delay writes to it''' + def __init__(self, fp, buf): + self.data = buf + self.fp = fp + self.offset = fp.tell() + self.size = util.fstat(fp).st_size + + def end(self): + return self.size + len("".join(self.data)) + def tell(self): + return self.offset + def flush(self): + pass + def close(self): + self.fp.close() + + def seek(self, offset, whence=0): + '''virtual file offset spans real file and data''' + if whence == 0: + self.offset = offset + elif whence == 1: + self.offset += offset + elif whence == 2: + self.offset = self.end() + offset + if self.offset < self.size: + self.fp.seek(self.offset) + + def read(self, count=-1): + '''only trick here is reads that span real file and data''' + ret = "" + if self.offset < self.size: + s = self.fp.read(count) + ret = s + self.offset += len(s) + if count > 0: + count -= len(s) + if count != 0: + doff = self.offset - self.size + self.data.insert(0, "".join(self.data)) + del self.data[1:] + s = self.data[0][doff:doff + count] + self.offset += len(s) + ret += s + return ret + + def write(self, s): + self.data.append(str(s)) + self.offset += len(s) + +def delayopener(opener, target, divert, buf): + def o(name, mode='r'): + if name != target: + return opener(name, mode) + if divert: + return opener(name + ".a", mode.replace('a', 'w')) + # otherwise, divert to memory + return appender(opener(name, mode), buf) + return o + +class changelog(revlog.revlog): + def __init__(self, opener): + revlog.revlog.__init__(self, opener, "00changelog.i") + self._realopener = opener + self._delayed = False + self._divert = False + + def delayupdate(self): + "delay visibility of index updates to other readers" + self._delayed = True + self._divert = (len(self) == 0) + self._delaybuf = [] + self.opener = delayopener(self._realopener, self.indexfile, + self._divert, self._delaybuf) + + def finalize(self, tr): + "finalize index updates" + self._delayed = False + self.opener = self._realopener + # move redirected index data back into place + if self._divert: + n = self.opener(self.indexfile + ".a").name + util.rename(n, n[:-2]) + elif self._delaybuf: + fp = self.opener(self.indexfile, 'a') + fp.write("".join(self._delaybuf)) + fp.close() + self._delaybuf = [] + # split when we're done + self.checkinlinesize(tr) + + def readpending(self, file): + r = revlog.revlog(self.opener, file) + self.index = r.index + self.nodemap = r.nodemap + self._chunkcache = r._chunkcache + + def writepending(self): + "create a file containing the unfinalized state for pretxnchangegroup" + if self._delaybuf: + # make a temporary copy of the index + fp1 = self._realopener(self.indexfile) + fp2 = self._realopener(self.indexfile + ".a", "w") + fp2.write(fp1.read()) + # add pending data + fp2.write("".join(self._delaybuf)) + fp2.close() + # switch modes so finalize can simply rename + self._delaybuf = [] + self._divert = True + + if self._divert: + return True + + return False + + def checkinlinesize(self, tr, fp=None): + if not self._delayed: + revlog.revlog.checkinlinesize(self, tr, fp) + + def read(self, node): + """ + format used: + nodeid\n : manifest node in ascii + user\n : user, no \n or \r allowed + time tz extra\n : date (time is int or float, timezone is int) + : extra is metadatas, encoded and separated by '\0' + : older versions ignore it + files\n\n : files modified by the cset, no \n or \r allowed + (.*) : comment (free text, ideally utf-8) + + changelog v0 doesn't use extra + """ + text = self.revision(node) + if not text: + return (nullid, "", (0, 0), [], "", {'branch': 'default'}) + last = text.index("\n\n") + desc = encoding.tolocal(text[last + 2:]) + l = text[:last].split('\n') + manifest = bin(l[0]) + user = encoding.tolocal(l[1]) + + extra_data = l[2].split(' ', 2) + if len(extra_data) != 3: + time = float(extra_data.pop(0)) + try: + # various tools did silly things with the time zone field. + timezone = int(extra_data[0]) + except: + timezone = 0 + extra = {} + else: + time, timezone, extra = extra_data + time, timezone = float(time), int(timezone) + extra = decodeextra(extra) + if not extra.get('branch'): + extra['branch'] = 'default' + files = l[3:] + return (manifest, user, (time, timezone), files, desc, extra) + + def add(self, manifest, files, desc, transaction, p1, p2, + user, date=None, extra=None): + user = user.strip() + # An empty username or a username with a "\n" will make the + # revision text contain two "\n\n" sequences -> corrupt + # repository since read cannot unpack the revision. + if not user: + raise error.RevlogError(_("empty username")) + if "\n" in user: + raise error.RevlogError(_("username %s contains a newline") + % repr(user)) + + # strip trailing whitespace and leading and trailing empty lines + desc = '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n') + + user, desc = encoding.fromlocal(user), encoding.fromlocal(desc) + + if date: + parseddate = "%d %d" % util.parsedate(date) + else: + parseddate = "%d %d" % util.makedate() + if extra: + branch = extra.get("branch") + if branch in ("default", ""): + del extra["branch"] + elif branch in (".", "null", "tip"): + raise error.RevlogError(_('the name \'%s\' is reserved') + % branch) + if extra: + extra = encodeextra(extra) + parseddate = "%s %s" % (parseddate, extra) + l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc] + text = "\n".join(l) + return self.addrevision(text, transaction, len(self), p1, p2) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.pyo Binary files differnew file mode 100644 index 0000000..4e0f428 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/changelog.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.py new file mode 100644 index 0000000..7d304d6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.py @@ -0,0 +1,1374 @@ +# cmdutil.py - help for command processing in mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import hex, nullid, nullrev, short +from i18n import _ +import os, sys, errno, re, glob, tempfile +import util, templater, patch, error, encoding, templatekw +import match as matchmod +import similar, revset, subrepo + +revrangesep = ':' + +def parsealiases(cmd): + return cmd.lstrip("^").split("|") + +def findpossible(cmd, table, strict=False): + """ + Return cmd -> (aliases, command table entry) + for each matching command. + Return debug commands (or their aliases) only if no normal command matches. + """ + choice = {} + debugchoice = {} + for e in table.keys(): + aliases = parsealiases(e) + found = None + if cmd in aliases: + found = cmd + elif not strict: + for a in aliases: + if a.startswith(cmd): + found = a + break + if found is not None: + if aliases[0].startswith("debug") or found.startswith("debug"): + debugchoice[found] = (aliases, table[e]) + else: + choice[found] = (aliases, table[e]) + + if not choice and debugchoice: + choice = debugchoice + + return choice + +def findcmd(cmd, table, strict=True): + """Return (aliases, command table entry) for command string.""" + choice = findpossible(cmd, table, strict) + + if cmd in choice: + return choice[cmd] + + if len(choice) > 1: + clist = choice.keys() + clist.sort() + raise error.AmbiguousCommand(cmd, clist) + + if choice: + return choice.values()[0] + + raise error.UnknownCommand(cmd) + +def findrepo(p): + while not os.path.isdir(os.path.join(p, ".hg")): + oldp, p = p, os.path.dirname(p) + if p == oldp: + return None + + return p + +def bail_if_changed(repo): + if repo.dirstate.parents()[1] != nullid: + raise util.Abort(_('outstanding uncommitted merge')) + modified, added, removed, deleted = repo.status()[:4] + if modified or added or removed or deleted: + raise util.Abort(_("outstanding uncommitted changes")) + +def logmessage(opts): + """ get the log message according to -m and -l option """ + message = opts.get('message') + logfile = opts.get('logfile') + + if message and logfile: + raise util.Abort(_('options --message and --logfile are mutually ' + 'exclusive')) + if not message and logfile: + try: + if logfile == '-': + message = sys.stdin.read() + else: + message = open(logfile).read() + except IOError, inst: + raise util.Abort(_("can't read commit message '%s': %s") % + (logfile, inst.strerror)) + return message + +def loglimit(opts): + """get the log limit according to option -l/--limit""" + limit = opts.get('limit') + if limit: + try: + limit = int(limit) + except ValueError: + raise util.Abort(_('limit must be a positive integer')) + if limit <= 0: + raise util.Abort(_('limit must be positive')) + else: + limit = None + return limit + +def revsingle(repo, revspec, default='.'): + if not revspec: + return repo[default] + + l = revrange(repo, [revspec]) + if len(l) < 1: + raise util.Abort(_('empty revision set')) + return repo[l[-1]] + +def revpair(repo, revs): + if not revs: + return repo.dirstate.parents()[0], None + + l = revrange(repo, revs) + + if len(l) == 0: + return repo.dirstate.parents()[0], None + + if len(l) == 1: + return repo.lookup(l[0]), None + + return repo.lookup(l[0]), repo.lookup(l[-1]) + +def revrange(repo, revs): + """Yield revision as strings from a list of revision specifications.""" + + def revfix(repo, val, defval): + if not val and val != 0 and defval is not None: + return defval + return repo.changelog.rev(repo.lookup(val)) + + seen, l = set(), [] + for spec in revs: + # attempt to parse old-style ranges first to deal with + # things like old-tag which contain query metacharacters + try: + if revrangesep in spec: + start, end = spec.split(revrangesep, 1) + start = revfix(repo, start, 0) + end = revfix(repo, end, len(repo) - 1) + step = start > end and -1 or 1 + for rev in xrange(start, end + step, step): + if rev in seen: + continue + seen.add(rev) + l.append(rev) + continue + elif spec and spec in repo: # single unquoted rev + rev = revfix(repo, spec, None) + if rev in seen: + continue + seen.add(rev) + l.append(rev) + continue + except error.RepoLookupError: + pass + + # fall through to new-style queries if old-style fails + m = revset.match(spec) + for r in m(repo, range(len(repo))): + if r not in seen: + l.append(r) + seen.update(l) + + return l + +def make_filename(repo, pat, node, + total=None, seqno=None, revwidth=None, pathname=None): + node_expander = { + 'H': lambda: hex(node), + 'R': lambda: str(repo.changelog.rev(node)), + 'h': lambda: short(node), + } + expander = { + '%': lambda: '%', + 'b': lambda: os.path.basename(repo.root), + } + + try: + if node: + expander.update(node_expander) + if node: + expander['r'] = (lambda: + str(repo.changelog.rev(node)).zfill(revwidth or 0)) + if total is not None: + expander['N'] = lambda: str(total) + if seqno is not None: + expander['n'] = lambda: str(seqno) + if total is not None and seqno is not None: + expander['n'] = lambda: str(seqno).zfill(len(str(total))) + if pathname is not None: + expander['s'] = lambda: os.path.basename(pathname) + expander['d'] = lambda: os.path.dirname(pathname) or '.' + expander['p'] = lambda: pathname + + newname = [] + patlen = len(pat) + i = 0 + while i < patlen: + c = pat[i] + if c == '%': + i += 1 + c = pat[i] + c = expander[c]() + newname.append(c) + i += 1 + return ''.join(newname) + except KeyError, inst: + raise util.Abort(_("invalid format spec '%%%s' in output filename") % + inst.args[0]) + +def make_file(repo, pat, node=None, + total=None, seqno=None, revwidth=None, mode='wb', pathname=None): + + writable = 'w' in mode or 'a' in mode + + if not pat or pat == '-': + return writable and sys.stdout or sys.stdin + if hasattr(pat, 'write') and writable: + return pat + if hasattr(pat, 'read') and 'r' in mode: + return pat + return open(make_filename(repo, pat, node, total, seqno, revwidth, + pathname), + mode) + +def expandpats(pats): + if not util.expandglobs: + return list(pats) + ret = [] + for p in pats: + kind, name = matchmod._patsplit(p, None) + if kind is None: + try: + globbed = glob.glob(name) + except re.error: + globbed = [name] + if globbed: + ret.extend(globbed) + continue + ret.append(p) + return ret + +def match(repo, pats=[], opts={}, globbed=False, default='relpath'): + if not globbed and default == 'relpath': + pats = expandpats(pats or []) + m = matchmod.match(repo.root, repo.getcwd(), pats, + opts.get('include'), opts.get('exclude'), default, + auditor=repo.auditor) + def badfn(f, msg): + repo.ui.warn("%s: %s\n" % (m.rel(f), msg)) + m.bad = badfn + return m + +def matchall(repo): + return matchmod.always(repo.root, repo.getcwd()) + +def matchfiles(repo, files): + return matchmod.exact(repo.root, repo.getcwd(), files) + +def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None): + if dry_run is None: + dry_run = opts.get('dry_run') + if similarity is None: + similarity = float(opts.get('similarity') or 0) + # we'd use status here, except handling of symlinks and ignore is tricky + added, unknown, deleted, removed = [], [], [], [] + audit_path = util.path_auditor(repo.root) + m = match(repo, pats, opts) + for abs in repo.walk(m): + target = repo.wjoin(abs) + good = True + try: + audit_path(abs) + except: + good = False + rel = m.rel(abs) + exact = m.exact(abs) + if good and abs not in repo.dirstate: + unknown.append(abs) + if repo.ui.verbose or not exact: + repo.ui.status(_('adding %s\n') % ((pats and rel) or abs)) + elif repo.dirstate[abs] != 'r' and (not good or not os.path.lexists(target) + or (os.path.isdir(target) and not os.path.islink(target))): + deleted.append(abs) + if repo.ui.verbose or not exact: + repo.ui.status(_('removing %s\n') % ((pats and rel) or abs)) + # for finding renames + elif repo.dirstate[abs] == 'r': + removed.append(abs) + elif repo.dirstate[abs] == 'a': + added.append(abs) + copies = {} + if similarity > 0: + for old, new, score in similar.findrenames(repo, + added + unknown, removed + deleted, similarity): + if repo.ui.verbose or not m.exact(old) or not m.exact(new): + repo.ui.status(_('recording removal of %s as rename to %s ' + '(%d%% similar)\n') % + (m.rel(old), m.rel(new), score * 100)) + copies[new] = old + + if not dry_run: + wctx = repo[None] + wlock = repo.wlock() + try: + wctx.remove(deleted) + wctx.add(unknown) + for new, old in copies.iteritems(): + wctx.copy(old, new) + finally: + wlock.release() + +def updatedir(ui, repo, patches, similarity=0): + '''Update dirstate after patch application according to metadata''' + if not patches: + return + copies = [] + removes = set() + cfiles = patches.keys() + cwd = repo.getcwd() + if cwd: + cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()] + for f in patches: + gp = patches[f] + if not gp: + continue + if gp.op == 'RENAME': + copies.append((gp.oldpath, gp.path)) + removes.add(gp.oldpath) + elif gp.op == 'COPY': + copies.append((gp.oldpath, gp.path)) + elif gp.op == 'DELETE': + removes.add(gp.path) + + wctx = repo[None] + for src, dst in copies: + dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd) + if (not similarity) and removes: + wctx.remove(sorted(removes), True) + + for f in patches: + gp = patches[f] + if gp and gp.mode: + islink, isexec = gp.mode + dst = repo.wjoin(gp.path) + # patch won't create empty files + if gp.op == 'ADD' and not os.path.lexists(dst): + flags = (isexec and 'x' or '') + (islink and 'l' or '') + repo.wwrite(gp.path, '', flags) + util.set_flags(dst, islink, isexec) + addremove(repo, cfiles, similarity=similarity) + files = patches.keys() + files.extend([r for r in removes if r not in files]) + return sorted(files) + +def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): + """Update the dirstate to reflect the intent of copying src to dst. For + different reasons it might not end with dst being marked as copied from src. + """ + origsrc = repo.dirstate.copied(src) or src + if dst == origsrc: # copying back a copy? + if repo.dirstate[dst] not in 'mn' and not dryrun: + repo.dirstate.normallookup(dst) + else: + if repo.dirstate[origsrc] == 'a' and origsrc == src: + if not ui.quiet: + ui.warn(_("%s has not been committed yet, so no copy " + "data will be stored for %s.\n") + % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) + if repo.dirstate[dst] in '?r' and not dryrun: + wctx.add([dst]) + elif not dryrun: + wctx.copy(origsrc, dst) + +def copy(ui, repo, pats, opts, rename=False): + # called with the repo lock held + # + # hgsep => pathname that uses "/" to separate directories + # ossep => pathname that uses os.sep to separate directories + cwd = repo.getcwd() + targets = {} + after = opts.get("after") + dryrun = opts.get("dry_run") + wctx = repo[None] + + def walkpat(pat): + srcs = [] + badstates = after and '?' or '?r' + m = match(repo, [pat], opts, globbed=True) + for abs in repo.walk(m): + state = repo.dirstate[abs] + rel = m.rel(abs) + exact = m.exact(abs) + if state in badstates: + if exact and state == '?': + ui.warn(_('%s: not copying - file is not managed\n') % rel) + if exact and state == 'r': + ui.warn(_('%s: not copying - file has been marked for' + ' remove\n') % rel) + continue + # abs: hgsep + # rel: ossep + srcs.append((abs, rel, exact)) + return srcs + + # abssrc: hgsep + # relsrc: ossep + # otarget: ossep + def copyfile(abssrc, relsrc, otarget, exact): + abstarget = util.canonpath(repo.root, cwd, otarget) + reltarget = repo.pathto(abstarget, cwd) + target = repo.wjoin(abstarget) + src = repo.wjoin(abssrc) + state = repo.dirstate[abstarget] + + # check for collisions + prevsrc = targets.get(abstarget) + if prevsrc is not None: + ui.warn(_('%s: not overwriting - %s collides with %s\n') % + (reltarget, repo.pathto(abssrc, cwd), + repo.pathto(prevsrc, cwd))) + return + + # check for overwrites + exists = os.path.lexists(target) + if not after and exists or after and state in 'mn': + if not opts['force']: + ui.warn(_('%s: not overwriting - file exists\n') % + reltarget) + return + + if after: + if not exists: + if rename: + ui.warn(_('%s: not recording move - %s does not exist\n') % + (relsrc, reltarget)) + else: + ui.warn(_('%s: not recording copy - %s does not exist\n') % + (relsrc, reltarget)) + return + elif not dryrun: + try: + if exists: + os.unlink(target) + targetdir = os.path.dirname(target) or '.' + if not os.path.isdir(targetdir): + os.makedirs(targetdir) + util.copyfile(src, target) + except IOError, inst: + if inst.errno == errno.ENOENT: + ui.warn(_('%s: deleted in working copy\n') % relsrc) + else: + ui.warn(_('%s: cannot copy - %s\n') % + (relsrc, inst.strerror)) + return True # report a failure + + if ui.verbose or not exact: + if rename: + ui.status(_('moving %s to %s\n') % (relsrc, reltarget)) + else: + ui.status(_('copying %s to %s\n') % (relsrc, reltarget)) + + targets[abstarget] = abssrc + + # fix up dirstate + dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd) + if rename and not dryrun: + wctx.remove([abssrc], not after) + + # pat: ossep + # dest ossep + # srcs: list of (hgsep, hgsep, ossep, bool) + # return: function that takes hgsep and returns ossep + def targetpathfn(pat, dest, srcs): + if os.path.isdir(pat): + abspfx = util.canonpath(repo.root, cwd, pat) + abspfx = util.localpath(abspfx) + if destdirexists: + striplen = len(os.path.split(abspfx)[0]) + else: + striplen = len(abspfx) + if striplen: + striplen += len(os.sep) + res = lambda p: os.path.join(dest, util.localpath(p)[striplen:]) + elif destdirexists: + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + res = lambda p: dest + return res + + # pat: ossep + # dest ossep + # srcs: list of (hgsep, hgsep, ossep, bool) + # return: function that takes hgsep and returns ossep + def targetpathafterfn(pat, dest, srcs): + if matchmod.patkind(pat): + # a mercurial pattern + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + abspfx = util.canonpath(repo.root, cwd, pat) + if len(abspfx) < len(srcs[0][0]): + # A directory. Either the target path contains the last + # component of the source path or it does not. + def evalpath(striplen): + score = 0 + for s in srcs: + t = os.path.join(dest, util.localpath(s[0])[striplen:]) + if os.path.lexists(t): + score += 1 + return score + + abspfx = util.localpath(abspfx) + striplen = len(abspfx) + if striplen: + striplen += len(os.sep) + if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])): + score = evalpath(striplen) + striplen1 = len(os.path.split(abspfx)[0]) + if striplen1: + striplen1 += len(os.sep) + if evalpath(striplen1) > score: + striplen = striplen1 + res = lambda p: os.path.join(dest, + util.localpath(p)[striplen:]) + else: + # a file + if destdirexists: + res = lambda p: os.path.join(dest, + os.path.basename(util.localpath(p))) + else: + res = lambda p: dest + return res + + + pats = expandpats(pats) + if not pats: + raise util.Abort(_('no source or destination specified')) + if len(pats) == 1: + raise util.Abort(_('no destination specified')) + dest = pats.pop() + destdirexists = os.path.isdir(dest) and not os.path.islink(dest) + if not destdirexists: + if len(pats) > 1 or matchmod.patkind(pats[0]): + raise util.Abort(_('with multiple sources, destination must be an ' + 'existing directory')) + if util.endswithsep(dest): + raise util.Abort(_('destination %s is not a directory') % dest) + + tfn = targetpathfn + if after: + tfn = targetpathafterfn + copylist = [] + for pat in pats: + srcs = walkpat(pat) + if not srcs: + continue + copylist.append((tfn(pat, dest, srcs), srcs)) + if not copylist: + raise util.Abort(_('no files to copy')) + + errors = 0 + for targetpath, srcs in copylist: + for abssrc, relsrc, exact in srcs: + if copyfile(abssrc, relsrc, targetpath(abssrc), exact): + errors += 1 + + if errors: + ui.warn(_('(consider using --after)\n')) + + return errors != 0 + +def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None, + runargs=None, appendpid=False): + '''Run a command as a service.''' + + if opts['daemon'] and not opts['daemon_pipefds']: + # Signal child process startup with file removal + lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-') + os.close(lockfd) + try: + if not runargs: + runargs = util.hgcmd() + sys.argv[1:] + runargs.append('--daemon-pipefds=%s' % lockpath) + # Don't pass --cwd to the child process, because we've already + # changed directory. + for i in xrange(1, len(runargs)): + if runargs[i].startswith('--cwd='): + del runargs[i] + break + elif runargs[i].startswith('--cwd'): + del runargs[i:i + 2] + break + def condfn(): + return not os.path.exists(lockpath) + pid = util.rundetached(runargs, condfn) + if pid < 0: + raise util.Abort(_('child process failed to start')) + finally: + try: + os.unlink(lockpath) + except OSError, e: + if e.errno != errno.ENOENT: + raise + if parentfn: + return parentfn(pid) + else: + return + + if initfn: + initfn() + + if opts['pid_file']: + mode = appendpid and 'a' or 'w' + fp = open(opts['pid_file'], mode) + fp.write(str(os.getpid()) + '\n') + fp.close() + + if opts['daemon_pipefds']: + lockpath = opts['daemon_pipefds'] + try: + os.setsid() + except AttributeError: + pass + os.unlink(lockpath) + util.hidewindow() + sys.stdout.flush() + sys.stderr.flush() + + nullfd = os.open(util.nulldev, os.O_RDWR) + logfilefd = nullfd + if logfile: + logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND) + os.dup2(nullfd, 0) + os.dup2(logfilefd, 1) + os.dup2(logfilefd, 2) + if nullfd not in (0, 1, 2): + os.close(nullfd) + if logfile and logfilefd not in (0, 1, 2): + os.close(logfilefd) + + if runfn: + return runfn() + +def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False, + opts=None): + '''export changesets as hg patches.''' + + total = len(revs) + revwidth = max([len(str(rev)) for rev in revs]) + + def single(rev, seqno, fp): + ctx = repo[rev] + node = ctx.node() + parents = [p.node() for p in ctx.parents() if p] + branch = ctx.branch() + if switch_parent: + parents.reverse() + prev = (parents and parents[0]) or nullid + + if not fp: + fp = make_file(repo, template, node, total=total, seqno=seqno, + revwidth=revwidth, mode='ab') + if fp != sys.stdout and hasattr(fp, 'name'): + repo.ui.note("%s\n" % fp.name) + + fp.write("# HG changeset patch\n") + fp.write("# User %s\n" % ctx.user()) + fp.write("# Date %d %d\n" % ctx.date()) + if branch and branch != 'default': + fp.write("# Branch %s\n" % branch) + fp.write("# Node ID %s\n" % hex(node)) + fp.write("# Parent %s\n" % hex(prev)) + if len(parents) > 1: + fp.write("# Parent %s\n" % hex(parents[1])) + fp.write(ctx.description().rstrip()) + fp.write("\n\n") + + for chunk in patch.diff(repo, prev, node, opts=opts): + fp.write(chunk) + + for seqno, rev in enumerate(revs): + single(rev, seqno + 1, fp) + +def diffordiffstat(ui, repo, diffopts, node1, node2, match, + changes=None, stat=False, fp=None, prefix='', + listsubrepos=False): + '''show diff or diffstat.''' + if fp is None: + write = ui.write + else: + def write(s, **kw): + fp.write(s) + + if stat: + diffopts = diffopts.copy(context=0) + width = 80 + if not ui.plain(): + width = ui.termwidth() + chunks = patch.diff(repo, node1, node2, match, changes, diffopts, + prefix=prefix) + for chunk, label in patch.diffstatui(util.iterlines(chunks), + width=width, + git=diffopts.git): + write(chunk, label=label) + else: + for chunk, label in patch.diffui(repo, node1, node2, match, + changes, diffopts, prefix=prefix): + write(chunk, label=label) + + if listsubrepos: + ctx1 = repo[node1] + ctx2 = repo[node2] + for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + if node2 is not None: + node2 = ctx2.substate[subpath][1] + submatch = matchmod.narrowmatcher(subpath, match) + sub.diff(diffopts, node2, submatch, changes=changes, + stat=stat, fp=fp, prefix=prefix) + +class changeset_printer(object): + '''show changeset information when templating not requested.''' + + def __init__(self, ui, repo, patch, diffopts, buffered): + self.ui = ui + self.repo = repo + self.buffered = buffered + self.patch = patch + self.diffopts = diffopts + self.header = {} + self.hunk = {} + self.lastheader = None + self.footer = None + + def flush(self, rev): + if rev in self.header: + h = self.header[rev] + if h != self.lastheader: + self.lastheader = h + self.ui.write(h) + del self.header[rev] + if rev in self.hunk: + self.ui.write(self.hunk[rev]) + del self.hunk[rev] + return 1 + return 0 + + def close(self): + if self.footer: + self.ui.write(self.footer) + + def show(self, ctx, copies=None, matchfn=None, **props): + if self.buffered: + self.ui.pushbuffer() + self._show(ctx, copies, matchfn, props) + self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True) + else: + self._show(ctx, copies, matchfn, props) + + def _show(self, ctx, copies, matchfn, props): + '''show a single changeset or file revision''' + changenode = ctx.node() + rev = ctx.rev() + + if self.ui.quiet: + self.ui.write("%d:%s\n" % (rev, short(changenode)), + label='log.node') + return + + log = self.repo.changelog + date = util.datestr(ctx.date()) + + hexfunc = self.ui.debugflag and hex or short + + parents = [(p, hexfunc(log.node(p))) + for p in self._meaningful_parentrevs(log, rev)] + + self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)), + label='log.changeset') + + branch = ctx.branch() + # don't show the default branch name + if branch != 'default': + branch = encoding.tolocal(branch) + self.ui.write(_("branch: %s\n") % branch, + label='log.branch') + for tag in self.repo.nodetags(changenode): + self.ui.write(_("tag: %s\n") % tag, + label='log.tag') + for parent in parents: + self.ui.write(_("parent: %d:%s\n") % parent, + label='log.parent') + + if self.ui.debugflag: + mnode = ctx.manifestnode() + self.ui.write(_("manifest: %d:%s\n") % + (self.repo.manifest.rev(mnode), hex(mnode)), + label='ui.debug log.manifest') + self.ui.write(_("user: %s\n") % ctx.user(), + label='log.user') + self.ui.write(_("date: %s\n") % date, + label='log.date') + + if self.ui.debugflag: + files = self.repo.status(log.parents(changenode)[0], changenode)[:3] + for key, value in zip([_("files:"), _("files+:"), _("files-:")], + files): + if value: + self.ui.write("%-12s %s\n" % (key, " ".join(value)), + label='ui.debug log.files') + elif ctx.files() and self.ui.verbose: + self.ui.write(_("files: %s\n") % " ".join(ctx.files()), + label='ui.note log.files') + if copies and self.ui.verbose: + copies = ['%s (%s)' % c for c in copies] + self.ui.write(_("copies: %s\n") % ' '.join(copies), + label='ui.note log.copies') + + extra = ctx.extra() + if extra and self.ui.debugflag: + for key, value in sorted(extra.items()): + self.ui.write(_("extra: %s=%s\n") + % (key, value.encode('string_escape')), + label='ui.debug log.extra') + + description = ctx.description().strip() + if description: + if self.ui.verbose: + self.ui.write(_("description:\n"), + label='ui.note log.description') + self.ui.write(description, + label='ui.note log.description') + self.ui.write("\n\n") + else: + self.ui.write(_("summary: %s\n") % + description.splitlines()[0], + label='log.summary') + self.ui.write("\n") + + self.showpatch(changenode, matchfn) + + def showpatch(self, node, matchfn): + if not matchfn: + matchfn = self.patch + if matchfn: + stat = self.diffopts.get('stat') + diff = self.diffopts.get('patch') + diffopts = patch.diffopts(self.ui, self.diffopts) + prev = self.repo.changelog.parents(node)[0] + if stat: + diffordiffstat(self.ui, self.repo, diffopts, prev, node, + match=matchfn, stat=True) + if diff: + if stat: + self.ui.write("\n") + diffordiffstat(self.ui, self.repo, diffopts, prev, node, + match=matchfn, stat=False) + self.ui.write("\n") + + def _meaningful_parentrevs(self, log, rev): + """Return list of meaningful (or all if debug) parentrevs for rev. + + For merges (two non-nullrev revisions) both parents are meaningful. + Otherwise the first parent revision is considered meaningful if it + is not the preceding revision. + """ + parents = log.parentrevs(rev) + if not self.ui.debugflag and parents[1] == nullrev: + if parents[0] >= rev - 1: + parents = [] + else: + parents = [parents[0]] + return parents + + +class changeset_templater(changeset_printer): + '''format changeset information.''' + + def __init__(self, ui, repo, patch, diffopts, mapfile, buffered): + changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered) + formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12]) + defaulttempl = { + 'parent': '{rev}:{node|formatnode} ', + 'manifest': '{rev}:{node|formatnode}', + 'file_copy': '{name} ({source})', + 'extra': '{key}={value|stringescape}' + } + # filecopy is preserved for compatibility reasons + defaulttempl['filecopy'] = defaulttempl['file_copy'] + self.t = templater.templater(mapfile, {'formatnode': formatnode}, + cache=defaulttempl) + self.cache = {} + + def use_template(self, t): + '''set template string to use''' + self.t.cache['changeset'] = t + + def _meaningful_parentrevs(self, ctx): + """Return list of meaningful (or all if debug) parentrevs for rev. + """ + parents = ctx.parents() + if len(parents) > 1: + return parents + if self.ui.debugflag: + return [parents[0], self.repo['null']] + if parents[0].rev() >= ctx.rev() - 1: + return [] + return parents + + def _show(self, ctx, copies, matchfn, props): + '''show a single changeset or file revision''' + + showlist = templatekw.showlist + + # showparents() behaviour depends on ui trace level which + # causes unexpected behaviours at templating level and makes + # it harder to extract it in a standalone function. Its + # behaviour cannot be changed so leave it here for now. + def showparents(**args): + ctx = args['ctx'] + parents = [[('rev', p.rev()), ('node', p.hex())] + for p in self._meaningful_parentrevs(ctx)] + return showlist('parent', parents, **args) + + props = props.copy() + props.update(templatekw.keywords) + props['parents'] = showparents + props['templ'] = self.t + props['ctx'] = ctx + props['repo'] = self.repo + props['revcache'] = {'copies': copies} + props['cache'] = self.cache + + # find correct templates for current mode + + tmplmodes = [ + (True, None), + (self.ui.verbose, 'verbose'), + (self.ui.quiet, 'quiet'), + (self.ui.debugflag, 'debug'), + ] + + types = {'header': '', 'footer':'', 'changeset': 'changeset'} + for mode, postfix in tmplmodes: + for type in types: + cur = postfix and ('%s_%s' % (type, postfix)) or type + if mode and cur in self.t: + types[type] = cur + + try: + + # write header + if types['header']: + h = templater.stringify(self.t(types['header'], **props)) + if self.buffered: + self.header[ctx.rev()] = h + else: + if self.lastheader != h: + self.lastheader = h + self.ui.write(h) + + # write changeset metadata, then patch if requested + key = types['changeset'] + self.ui.write(templater.stringify(self.t(key, **props))) + self.showpatch(ctx.node(), matchfn) + + if types['footer']: + if not self.footer: + self.footer = templater.stringify(self.t(types['footer'], + **props)) + + except KeyError, inst: + msg = _("%s: no key named '%s'") + raise util.Abort(msg % (self.t.mapfile, inst.args[0])) + except SyntaxError, inst: + raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0])) + +def show_changeset(ui, repo, opts, buffered=False): + """show one changeset using template or regular display. + + Display format will be the first non-empty hit of: + 1. option 'template' + 2. option 'style' + 3. [ui] setting 'logtemplate' + 4. [ui] setting 'style' + If all of these values are either the unset or the empty string, + regular display via changeset_printer() is done. + """ + # options + patch = False + if opts.get('patch') or opts.get('stat'): + patch = matchall(repo) + + tmpl = opts.get('template') + style = None + if tmpl: + tmpl = templater.parsestring(tmpl, quoted=False) + else: + style = opts.get('style') + + # ui settings + if not (tmpl or style): + tmpl = ui.config('ui', 'logtemplate') + if tmpl: + tmpl = templater.parsestring(tmpl) + else: + style = util.expandpath(ui.config('ui', 'style', '')) + + if not (tmpl or style): + return changeset_printer(ui, repo, patch, opts, buffered) + + mapfile = None + if style and not tmpl: + mapfile = style + if not os.path.split(mapfile)[0]: + mapname = (templater.templatepath('map-cmdline.' + mapfile) + or templater.templatepath(mapfile)) + if mapname: + mapfile = mapname + + try: + t = changeset_templater(ui, repo, patch, opts, mapfile, buffered) + except SyntaxError, inst: + raise util.Abort(inst.args[0]) + if tmpl: + t.use_template(tmpl) + return t + +def finddate(ui, repo, date): + """Find the tipmost changeset that matches the given date spec""" + + df = util.matchdate(date) + m = matchall(repo) + results = {} + + def prep(ctx, fns): + d = ctx.date() + if df(d[0]): + results[ctx.rev()] = d + + for ctx in walkchangerevs(repo, m, {'rev': None}, prep): + rev = ctx.rev() + if rev in results: + ui.status(_("Found revision %s from %s\n") % + (rev, util.datestr(results[rev]))) + return str(rev) + + raise util.Abort(_("revision matching date not found")) + +def walkchangerevs(repo, match, opts, prepare): + '''Iterate over files and the revs in which they changed. + + Callers most commonly need to iterate backwards over the history + in which they are interested. Doing so has awful (quadratic-looking) + performance, so we use iterators in a "windowed" way. + + We walk a window of revisions in the desired order. Within the + window, we first walk forwards to gather data, then in the desired + order (usually backwards) to display it. + + This function returns an iterator yielding contexts. Before + yielding each context, the iterator will first call the prepare + function on each context in the window in forward order.''' + + def increasing_windows(start, end, windowsize=8, sizelimit=512): + if start < end: + while start < end: + yield start, min(windowsize, end - start) + start += windowsize + if windowsize < sizelimit: + windowsize *= 2 + else: + while start > end: + yield start, min(windowsize, start - end - 1) + start -= windowsize + if windowsize < sizelimit: + windowsize *= 2 + + follow = opts.get('follow') or opts.get('follow_first') + + if not len(repo): + return [] + + if follow: + defrange = '%s:0' % repo['.'].rev() + else: + defrange = '-1:0' + revs = revrange(repo, opts['rev'] or [defrange]) + if not revs: + return [] + wanted = set() + slowpath = match.anypats() or (match.files() and opts.get('removed')) + fncache = {} + change = util.cachefunc(repo.changectx) + + # First step is to fill wanted, the set of revisions that we want to yield. + # When it does not induce extra cost, we also fill fncache for revisions in + # wanted: a cache of filenames that were changed (ctx.files()) and that + # match the file filtering conditions. + + if not slowpath and not match.files(): + # No files, no patterns. Display all revs. + wanted = set(revs) + copies = [] + + if not slowpath: + # We only have to read through the filelog to find wanted revisions + + minrev, maxrev = min(revs), max(revs) + def filerevgen(filelog, last): + """ + Only files, no patterns. Check the history of each file. + + Examines filelog entries within minrev, maxrev linkrev range + Returns an iterator yielding (linkrev, parentlinkrevs, copied) + tuples in backwards order + """ + cl_count = len(repo) + revs = [] + for j in xrange(0, last + 1): + linkrev = filelog.linkrev(j) + if linkrev < minrev: + continue + # only yield rev for which we have the changelog, it can + # happen while doing "hg log" during a pull or commit + if linkrev >= cl_count: + break + + parentlinkrevs = [] + for p in filelog.parentrevs(j): + if p != nullrev: + parentlinkrevs.append(filelog.linkrev(p)) + n = filelog.node(j) + revs.append((linkrev, parentlinkrevs, + follow and filelog.renamed(n))) + + return reversed(revs) + def iterfiles(): + for filename in match.files(): + yield filename, None + for filename_node in copies: + yield filename_node + for file_, node in iterfiles(): + filelog = repo.file(file_) + if not len(filelog): + if node is None: + # A zero count may be a directory or deleted file, so + # try to find matching entries on the slow path. + if follow: + raise util.Abort( + _('cannot follow nonexistent file: "%s"') % file_) + slowpath = True + break + else: + continue + + if node is None: + last = len(filelog) - 1 + else: + last = filelog.rev(node) + + + # keep track of all ancestors of the file + ancestors = set([filelog.linkrev(last)]) + + # iterate from latest to oldest revision + for rev, flparentlinkrevs, copied in filerevgen(filelog, last): + if not follow: + if rev > maxrev: + continue + else: + # Note that last might not be the first interesting + # rev to us: + # if the file has been changed after maxrev, we'll + # have linkrev(last) > maxrev, and we still need + # to explore the file graph + if rev not in ancestors: + continue + # XXX insert 1327 fix here + if flparentlinkrevs: + ancestors.update(flparentlinkrevs) + + fncache.setdefault(rev, []).append(file_) + wanted.add(rev) + if copied: + copies.append(copied) + if slowpath: + # We have to read the changelog to match filenames against + # changed files + + if follow: + raise util.Abort(_('can only follow copies/renames for explicit ' + 'filenames')) + + # The slow path checks files modified in every changeset. + for i in sorted(revs): + ctx = change(i) + matches = filter(match, ctx.files()) + if matches: + fncache[i] = matches + wanted.add(i) + + class followfilter(object): + def __init__(self, onlyfirst=False): + self.startrev = nullrev + self.roots = set() + self.onlyfirst = onlyfirst + + def match(self, rev): + def realparents(rev): + if self.onlyfirst: + return repo.changelog.parentrevs(rev)[0:1] + else: + return filter(lambda x: x != nullrev, + repo.changelog.parentrevs(rev)) + + if self.startrev == nullrev: + self.startrev = rev + return True + + if rev > self.startrev: + # forward: all descendants + if not self.roots: + self.roots.add(self.startrev) + for parent in realparents(rev): + if parent in self.roots: + self.roots.add(rev) + return True + else: + # backwards: all parents + if not self.roots: + self.roots.update(realparents(self.startrev)) + if rev in self.roots: + self.roots.remove(rev) + self.roots.update(realparents(rev)) + return True + + return False + + # it might be worthwhile to do this in the iterator if the rev range + # is descending and the prune args are all within that range + for rev in opts.get('prune', ()): + rev = repo.changelog.rev(repo.lookup(rev)) + ff = followfilter() + stop = min(revs[0], revs[-1]) + for x in xrange(rev, stop - 1, -1): + if ff.match(x): + wanted.discard(x) + + # Now that wanted is correctly initialized, we can iterate over the + # revision range, yielding only revisions in wanted. + def iterate(): + if follow and not match.files(): + ff = followfilter(onlyfirst=opts.get('follow_first')) + def want(rev): + return ff.match(rev) and rev in wanted + else: + def want(rev): + return rev in wanted + + for i, window in increasing_windows(0, len(revs)): + nrevs = [rev for rev in revs[i:i + window] if want(rev)] + for rev in sorted(nrevs): + fns = fncache.get(rev) + ctx = change(rev) + if not fns: + def fns_generator(): + for f in ctx.files(): + if match(f): + yield f + fns = fns_generator() + prepare(ctx, fns) + for rev in nrevs: + yield change(rev) + return iterate() + +def add(ui, repo, match, dryrun, listsubrepos, prefix): + join = lambda f: os.path.join(prefix, f) + bad = [] + oldbad = match.bad + match.bad = lambda x, y: bad.append(x) or oldbad(x, y) + names = [] + wctx = repo[None] + for f in repo.walk(match): + exact = match.exact(f) + if exact or f not in repo.dirstate: + names.append(f) + if ui.verbose or not exact: + ui.status(_('adding %s\n') % match.rel(join(f))) + + if listsubrepos: + for subpath in wctx.substate: + sub = wctx.sub(subpath) + try: + submatch = matchmod.narrowmatcher(subpath, match) + bad.extend(sub.add(ui, submatch, dryrun, prefix)) + except error.LookupError: + ui.status(_("skipping missing subrepository: %s\n") + % join(subpath)) + + if not dryrun: + rejected = wctx.add(names, prefix) + bad.extend(f for f in rejected if f in match.files()) + return bad + +def commit(ui, repo, commitfunc, pats, opts): + '''commit the specified files or all outstanding changes''' + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + message = logmessage(opts) + + # extract addremove carefully -- this function can be called from a command + # that doesn't support addremove + if opts.get('addremove'): + addremove(repo, pats, opts) + + return commitfunc(ui, repo, message, match(repo, pats, opts), opts) + +def commiteditor(repo, ctx, subs): + if ctx.description(): + return ctx.description() + return commitforceeditor(repo, ctx, subs) + +def commitforceeditor(repo, ctx, subs): + edittext = [] + modified, added, removed = ctx.modified(), ctx.added(), ctx.removed() + if ctx.description(): + edittext.append(ctx.description()) + edittext.append("") + edittext.append("") # Empty line between message and comments. + edittext.append(_("HG: Enter commit message." + " Lines beginning with 'HG:' are removed.")) + edittext.append(_("HG: Leave message empty to abort commit.")) + edittext.append("HG: --") + edittext.append(_("HG: user: %s") % ctx.user()) + if ctx.p2(): + edittext.append(_("HG: branch merge")) + if ctx.branch(): + edittext.append(_("HG: branch '%s'") + % encoding.tolocal(ctx.branch())) + edittext.extend([_("HG: subrepo %s") % s for s in subs]) + edittext.extend([_("HG: added %s") % f for f in added]) + edittext.extend([_("HG: changed %s") % f for f in modified]) + edittext.extend([_("HG: removed %s") % f for f in removed]) + if not added and not modified and not removed: + edittext.append(_("HG: no files changed")) + edittext.append("") + # run editor in the repository root + olddir = os.getcwd() + os.chdir(repo.root) + text = repo.ui.edit("\n".join(edittext), ctx.user()) + text = re.sub("(?m)^HG:.*(\n|$)", "", text) + os.chdir(olddir) + + if not text.strip(): + raise util.Abort(_("empty commit message")) + + return text diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.pyo Binary files differnew file mode 100644 index 0000000..b4d8519 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/cmdutil.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.py new file mode 100644 index 0000000..ce27a3e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.py @@ -0,0 +1,4530 @@ +# commands.py - command processing for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import hex, nullid, nullrev, short +from lock import release +from i18n import _, gettext +import os, re, sys, difflib, time, tempfile +import hg, util, revlog, extensions, copies, error +import patch, help, mdiff, url, encoding, templatekw, discovery +import archival, changegroup, cmdutil, sshserver, hbisect, hgweb, hgweb.server +import merge as mergemod +import minirst, revset +import dagparser + +# Commands start here, listed alphabetically + +def add(ui, repo, *pats, **opts): + """add the specified files on the next commit + + Schedule files to be version controlled and added to the + repository. + + The files will be added to the repository at the next commit. To + undo an add before that, see :hg:`forget`. + + If no names are given, add all files to the repository. + + .. container:: verbose + + An example showing how new (unknown) files are added + automatically by :hg:`add`:: + + $ ls + foo.c + $ hg status + ? foo.c + $ hg add + adding foo.c + $ hg status + A foo.c + + Returns 0 if all files are successfully added. + """ + + m = cmdutil.match(repo, pats, opts) + rejected = cmdutil.add(ui, repo, m, opts.get('dry_run'), + opts.get('subrepos'), prefix="") + return rejected and 1 or 0 + +def addremove(ui, repo, *pats, **opts): + """add all new files, delete all missing files + + Add all new files and remove all missing files from the + repository. + + New files are ignored if they match any of the patterns in + .hgignore. As with add, these changes take effect at the next + commit. + + Use the -s/--similarity option to detect renamed files. With a + parameter greater than 0, this compares every removed file with + every added file and records those similar enough as renames. This + option takes a percentage between 0 (disabled) and 100 (files must + be identical) as its parameter. Detecting renamed files this way + can be expensive. After using this option, :hg:`status -C` can be + used to check which files were identified as moved or renamed. + + Returns 0 if all files are successfully added. + """ + try: + sim = float(opts.get('similarity') or 100) + except ValueError: + raise util.Abort(_('similarity must be a number')) + if sim < 0 or sim > 100: + raise util.Abort(_('similarity must be between 0 and 100')) + return cmdutil.addremove(repo, pats, opts, similarity=sim / 100.0) + +def annotate(ui, repo, *pats, **opts): + """show changeset information by line for each file + + List changes in files, showing the revision id responsible for + each line + + This command is useful for discovering when a change was made and + by whom. + + Without the -a/--text option, annotate will avoid processing files + it detects as binary. With -a, annotate will annotate the file + anyway, although the results will probably be neither useful + nor desirable. + + Returns 0 on success. + """ + if opts.get('follow'): + # --follow is deprecated and now just an alias for -f/--file + # to mimic the behavior of Mercurial before version 1.5 + opts['file'] = 1 + + datefunc = ui.quiet and util.shortdate or util.datestr + getdate = util.cachefunc(lambda x: datefunc(x[0].date())) + + if not pats: + raise util.Abort(_('at least one filename or pattern is required')) + + opmap = [('user', lambda x: ui.shortuser(x[0].user())), + ('number', lambda x: str(x[0].rev())), + ('changeset', lambda x: short(x[0].node())), + ('date', getdate), + ('file', lambda x: x[0].path()), + ] + + if (not opts.get('user') and not opts.get('changeset') + and not opts.get('date') and not opts.get('file')): + opts['number'] = 1 + + linenumber = opts.get('line_number') is not None + if linenumber and (not opts.get('changeset')) and (not opts.get('number')): + raise util.Abort(_('at least one of -n/-c is required for -l')) + + funcmap = [func for op, func in opmap if opts.get(op)] + if linenumber: + lastfunc = funcmap[-1] + funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1]) + + ctx = repo[opts.get('rev')] + m = cmdutil.match(repo, pats, opts) + follow = not opts.get('no_follow') + for abs in ctx.walk(m): + fctx = ctx[abs] + if not opts.get('text') and util.binary(fctx.data()): + ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs)) + continue + + lines = fctx.annotate(follow=follow, linenumber=linenumber) + pieces = [] + + for f in funcmap: + l = [f(n) for n, dummy in lines] + if l: + sized = [(x, encoding.colwidth(x)) for x in l] + ml = max([w for x, w in sized]) + pieces.append(["%s%s" % (' ' * (ml - w), x) for x, w in sized]) + + if pieces: + for p, l in zip(zip(*pieces), lines): + ui.write("%s: %s" % (" ".join(p), l[1])) + +def archive(ui, repo, dest, **opts): + '''create an unversioned archive of a repository revision + + By default, the revision used is the parent of the working + directory; use -r/--rev to specify a different revision. + + The archive type is automatically detected based on file + extension (or override using -t/--type). + + Valid types are: + + :``files``: a directory full of files (default) + :``tar``: tar archive, uncompressed + :``tbz2``: tar archive, compressed using bzip2 + :``tgz``: tar archive, compressed using gzip + :``uzip``: zip archive, uncompressed + :``zip``: zip archive, compressed using deflate + + The exact name of the destination archive or directory is given + using a format string; see :hg:`help export` for details. + + Each member added to an archive file has a directory prefix + prepended. Use -p/--prefix to specify a format string for the + prefix. The default is the basename of the archive, with suffixes + removed. + + Returns 0 on success. + ''' + + ctx = repo[opts.get('rev')] + if not ctx: + raise util.Abort(_('no working directory: please specify a revision')) + node = ctx.node() + dest = cmdutil.make_filename(repo, dest, node) + if os.path.realpath(dest) == repo.root: + raise util.Abort(_('repository root cannot be destination')) + + kind = opts.get('type') or archival.guesskind(dest) or 'files' + prefix = opts.get('prefix') + + if dest == '-': + if kind == 'files': + raise util.Abort(_('cannot archive plain files to stdout')) + dest = sys.stdout + if not prefix: + prefix = os.path.basename(repo.root) + '-%h' + + prefix = cmdutil.make_filename(repo, prefix, node) + matchfn = cmdutil.match(repo, [], opts) + archival.archive(repo, dest, node, kind, not opts.get('no_decode'), + matchfn, prefix, subrepos=opts.get('subrepos')) + +def backout(ui, repo, node=None, rev=None, **opts): + '''reverse effect of earlier changeset + + The backout command merges the reverse effect of the reverted + changeset into the working directory. + + With the --merge option, it first commits the reverted changes + as a new changeset. This new changeset is a child of the reverted + changeset. + The --merge option remembers the parent of the working directory + before starting the backout, then merges the new head with that + changeset afterwards. + This will result in an explicit merge in the history. + + If you backout a changeset other than the original parent of the + working directory, the result of this merge is not committed, + as with a normal merge. Otherwise, no merge is needed and the + commit is automatic. + + Note that the default behavior (without --merge) has changed in + version 1.7. To restore the previous default behavior, use + :hg:`backout --merge` and then :hg:`update --clean .` to get rid of + the ongoing merge. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success. + ''' + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if not rev: + rev = node + + if not rev: + raise util.Abort(_("please specify a revision to backout")) + + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + + cmdutil.bail_if_changed(repo) + node = repo.lookup(rev) + + op1, op2 = repo.dirstate.parents() + a = repo.changelog.ancestor(op1, node) + if a != node: + raise util.Abort(_('cannot backout change on a different branch')) + + p1, p2 = repo.changelog.parents(node) + if p1 == nullid: + raise util.Abort(_('cannot backout a change with no parents')) + if p2 != nullid: + if not opts.get('parent'): + raise util.Abort(_('cannot backout a merge changeset without ' + '--parent')) + p = repo.lookup(opts['parent']) + if p not in (p1, p2): + raise util.Abort(_('%s is not a parent of %s') % + (short(p), short(node))) + parent = p + else: + if opts.get('parent'): + raise util.Abort(_('cannot use --parent on non-merge changeset')) + parent = p1 + + # the backout should appear on the same branch + branch = repo.dirstate.branch() + hg.clean(repo, node, show_stats=False) + repo.dirstate.setbranch(branch) + revert_opts = opts.copy() + revert_opts['date'] = None + revert_opts['all'] = True + revert_opts['rev'] = hex(parent) + revert_opts['no_backup'] = None + revert(ui, repo, **revert_opts) + if not opts.get('merge') and op1 != node: + try: + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.update(repo, op1) + finally: + ui.setconfig('ui', 'forcemerge', '') + + commit_opts = opts.copy() + commit_opts['addremove'] = False + if not commit_opts['message'] and not commit_opts['logfile']: + # we don't translate commit messages + commit_opts['message'] = "Backed out changeset %s" % short(node) + commit_opts['force_editor'] = True + commit(ui, repo, **commit_opts) + def nice(node): + return '%d:%s' % (repo.changelog.rev(node), short(node)) + ui.status(_('changeset %s backs out changeset %s\n') % + (nice(repo.changelog.tip()), nice(node))) + if opts.get('merge') and op1 != node: + hg.clean(repo, op1, show_stats=False) + ui.status(_('merging with changeset %s\n') + % nice(repo.changelog.tip())) + try: + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.merge(repo, hex(repo.changelog.tip())) + finally: + ui.setconfig('ui', 'forcemerge', '') + return 0 + +def bisect(ui, repo, rev=None, extra=None, command=None, + reset=None, good=None, bad=None, skip=None, noupdate=None): + """subdivision search of changesets + + This command helps to find changesets which introduce problems. To + use, mark the earliest changeset you know exhibits the problem as + bad, then mark the latest changeset which is free from the problem + as good. Bisect will update your working directory to a revision + for testing (unless the -U/--noupdate option is specified). Once + you have performed tests, mark the working directory as good or + bad, and bisect will either update to another candidate changeset + or announce that it has found the bad revision. + + As a shortcut, you can also use the revision argument to mark a + revision as good or bad without checking it out first. + + If you supply a command, it will be used for automatic bisection. + Its exit status will be used to mark revisions as good or bad: + status 0 means good, 125 means to skip the revision, 127 + (command not found) will abort the bisection, and any other + non-zero exit status means the revision is bad. + + Returns 0 on success. + """ + def print_result(nodes, good): + displayer = cmdutil.show_changeset(ui, repo, {}) + if len(nodes) == 1: + # narrowed it down to a single revision + if good: + ui.write(_("The first good revision is:\n")) + else: + ui.write(_("The first bad revision is:\n")) + displayer.show(repo[nodes[0]]) + parents = repo[nodes[0]].parents() + if len(parents) > 1: + side = good and state['bad'] or state['good'] + num = len(set(i.node() for i in parents) & set(side)) + if num == 1: + common = parents[0].ancestor(parents[1]) + ui.write(_('Not all ancestors of this changeset have been' + ' checked.\nTo check the other ancestors, start' + ' from the common ancestor, %s.\n' % common)) + else: + # multiple possible revisions + if good: + ui.write(_("Due to skipped revisions, the first " + "good revision could be any of:\n")) + else: + ui.write(_("Due to skipped revisions, the first " + "bad revision could be any of:\n")) + for n in nodes: + displayer.show(repo[n]) + displayer.close() + + def check_state(state, interactive=True): + if not state['good'] or not state['bad']: + if (good or bad or skip or reset) and interactive: + return + if not state['good']: + raise util.Abort(_('cannot bisect (no known good revisions)')) + else: + raise util.Abort(_('cannot bisect (no known bad revisions)')) + return True + + # backward compatibility + if rev in "good bad reset init".split(): + ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n")) + cmd, rev, extra = rev, extra, None + if cmd == "good": + good = True + elif cmd == "bad": + bad = True + else: + reset = True + elif extra or good + bad + skip + reset + bool(command) > 1: + raise util.Abort(_('incompatible arguments')) + + if reset: + p = repo.join("bisect.state") + if os.path.exists(p): + os.unlink(p) + return + + state = hbisect.load_state(repo) + + if command: + changesets = 1 + try: + while changesets: + # update state + status = util.system(command) + if status == 125: + transition = "skip" + elif status == 0: + transition = "good" + # status < 0 means process was killed + elif status == 127: + raise util.Abort(_("failed to execute %s") % command) + elif status < 0: + raise util.Abort(_("%s killed") % command) + else: + transition = "bad" + ctx = repo[rev or '.'] + state[transition].append(ctx.node()) + ui.status(_('Changeset %d:%s: %s\n') % (ctx, ctx, transition)) + check_state(state, interactive=False) + # bisect + nodes, changesets, good = hbisect.bisect(repo.changelog, state) + # update to next check + cmdutil.bail_if_changed(repo) + hg.clean(repo, nodes[0], show_stats=False) + finally: + hbisect.save_state(repo, state) + print_result(nodes, good) + return + + # update state + + if rev: + nodes = [repo.lookup(i) for i in cmdutil.revrange(repo, [rev])] + else: + nodes = [repo.lookup('.')] + + if good or bad or skip: + if good: + state['good'] += nodes + elif bad: + state['bad'] += nodes + elif skip: + state['skip'] += nodes + hbisect.save_state(repo, state) + + if not check_state(state): + return + + # actually bisect + nodes, changesets, good = hbisect.bisect(repo.changelog, state) + if changesets == 0: + print_result(nodes, good) + else: + assert len(nodes) == 1 # only a single node can be tested next + node = nodes[0] + # compute the approximate number of remaining tests + tests, size = 0, 2 + while size <= changesets: + tests, size = tests + 1, size * 2 + rev = repo.changelog.rev(node) + ui.write(_("Testing changeset %d:%s " + "(%d changesets remaining, ~%d tests)\n") + % (rev, short(node), changesets, tests)) + if not noupdate: + cmdutil.bail_if_changed(repo) + return hg.clean(repo, node) + +def branch(ui, repo, label=None, **opts): + """set or show the current branch name + + With no argument, show the current branch name. With one argument, + set the working directory branch name (the branch will not exist + in the repository until the next commit). Standard practice + recommends that primary development take place on the 'default' + branch. + + Unless -f/--force is specified, branch will not let you set a + branch name that already exists, even if it's inactive. + + Use -C/--clean to reset the working directory branch to that of + the parent of the working directory, negating a previous branch + change. + + Use the command :hg:`update` to switch to an existing branch. Use + :hg:`commit --close-branch` to mark this branch as closed. + + Returns 0 on success. + """ + + if opts.get('clean'): + label = repo[None].parents()[0].branch() + repo.dirstate.setbranch(label) + ui.status(_('reset working directory to branch %s\n') % label) + elif label: + utflabel = encoding.fromlocal(label) + if not opts.get('force') and utflabel in repo.branchtags(): + if label not in [p.branch() for p in repo.parents()]: + raise util.Abort(_('a branch of the same name already exists' + " (use 'hg update' to switch to it)")) + repo.dirstate.setbranch(utflabel) + ui.status(_('marked working directory as branch %s\n') % label) + else: + ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch())) + +def branches(ui, repo, active=False, closed=False): + """list repository named branches + + List the repository's named branches, indicating which ones are + inactive. If -c/--closed is specified, also list branches which have + been marked closed (see :hg:`commit --close-branch`). + + If -a/--active is specified, only show active branches. A branch + is considered active if it contains repository heads. + + Use the command :hg:`update` to switch to an existing branch. + + Returns 0. + """ + + hexfunc = ui.debugflag and hex or short + activebranches = [repo[n].branch() for n in repo.heads()] + def testactive(tag, node): + realhead = tag in activebranches + open = node in repo.branchheads(tag, closed=False) + return realhead and open + branches = sorted([(testactive(tag, node), repo.changelog.rev(node), tag) + for tag, node in repo.branchtags().items()], + reverse=True) + + for isactive, node, tag in branches: + if (not active) or isactive: + encodedtag = encoding.tolocal(tag) + if ui.quiet: + ui.write("%s\n" % encodedtag) + else: + hn = repo.lookup(node) + if isactive: + label = 'branches.active' + notice = '' + elif hn not in repo.branchheads(tag, closed=False): + if not closed: + continue + label = 'branches.closed' + notice = _(' (closed)') + else: + label = 'branches.inactive' + notice = _(' (inactive)') + if tag == repo.dirstate.branch(): + label = 'branches.current' + rev = str(node).rjust(31 - encoding.colwidth(encodedtag)) + rev = ui.label('%s:%s' % (rev, hexfunc(hn)), 'log.changeset') + encodedtag = ui.label(encodedtag, label) + ui.write("%s %s%s\n" % (encodedtag, rev, notice)) + +def bundle(ui, repo, fname, dest=None, **opts): + """create a changegroup file + + Generate a compressed changegroup file collecting changesets not + known to be in another repository. + + If you omit the destination repository, then hg assumes the + destination will have all the nodes you specify with --base + parameters. To create a bundle containing all changesets, use + -a/--all (or --base null). + + You can change compression method with the -t/--type option. + The available compression methods are: none, bzip2, and + gzip (by default, bundles are compressed using bzip2). + + The bundle file can then be transferred using conventional means + and applied to another repository with the unbundle or pull + command. This is useful when direct push and pull are not + available or when exporting an entire repository is undesirable. + + Applying bundles preserves all changeset contents including + permissions, copy/rename information, and revision history. + + Returns 0 on success, 1 if no changes found. + """ + revs = opts.get('rev') or None + if opts.get('all'): + base = ['null'] + else: + base = opts.get('base') + if base: + if dest: + raise util.Abort(_("--base is incompatible with specifying " + "a destination")) + base = [repo.lookup(rev) for rev in base] + # create the right base + # XXX: nodesbetween / changegroup* should be "fixed" instead + o = [] + has = set((nullid,)) + for n in base: + has.update(repo.changelog.reachable(n)) + if revs: + revs = [repo.lookup(rev) for rev in revs] + visit = revs[:] + has.difference_update(visit) + else: + visit = repo.changelog.heads() + seen = {} + while visit: + n = visit.pop(0) + parents = [p for p in repo.changelog.parents(n) if p not in has] + if len(parents) == 0: + if n not in has: + o.append(n) + else: + for p in parents: + if p not in seen: + seen[p] = 1 + visit.append(p) + else: + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + other = hg.repository(hg.remoteui(repo, opts), dest) + revs, checkout = hg.addbranchrevs(repo, other, branches, revs) + if revs: + revs = [repo.lookup(rev) for rev in revs] + o = discovery.findoutgoing(repo, other, force=opts.get('force')) + + if not o: + ui.status(_("no changes found\n")) + return 1 + + if revs: + cg = repo.changegroupsubset(o, revs, 'bundle') + else: + cg = repo.changegroup(o, 'bundle') + + bundletype = opts.get('type', 'bzip2').lower() + btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'} + bundletype = btypes.get(bundletype) + if bundletype not in changegroup.bundletypes: + raise util.Abort(_('unknown bundle type specified with --type')) + + changegroup.writebundle(cg, fname, bundletype) + +def cat(ui, repo, file1, *pats, **opts): + """output the current or given revision of files + + Print the specified files as they were at the given revision. If + no revision is given, the parent of the working directory is used, + or tip if no revision is checked out. + + Output may be to a file, in which case the name of the file is + given using a format string. The formatting rules are the same as + for the export command, with the following additions: + + :``%s``: basename of file being printed + :``%d``: dirname of file being printed, or '.' if in repository root + :``%p``: root-relative path name of file being printed + + Returns 0 on success. + """ + ctx = cmdutil.revsingle(repo, opts.get('rev')) + err = 1 + m = cmdutil.match(repo, (file1,) + pats, opts) + for abs in ctx.walk(m): + fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs) + data = ctx[abs].data() + if opts.get('decode'): + data = repo.wwritedata(abs, data) + fp.write(data) + err = 0 + return err + +def clone(ui, source, dest=None, **opts): + """make a copy of an existing repository + + Create a copy of an existing repository in a new directory. + + If no destination directory name is specified, it defaults to the + basename of the source. + + The location of the source is added to the new repository's + .hg/hgrc file, as the default to be used for future pulls. + + See :hg:`help urls` for valid source format details. + + It is possible to specify an ``ssh://`` URL as the destination, but no + .hg/hgrc and working directory will be created on the remote side. + Please see :hg:`help urls` for important details about ``ssh://`` URLs. + + A set of changesets (tags, or branch names) to pull may be specified + by listing each changeset (tag, or branch name) with -r/--rev. + If -r/--rev is used, the cloned repository will contain only a subset + of the changesets of the source repository. Only the set of changesets + defined by all -r/--rev options (including all their ancestors) + will be pulled into the destination repository. + No subsequent changesets (including subsequent tags) will be present + in the destination. + + Using -r/--rev (or 'clone src#rev dest') implies --pull, even for + local source repositories. + + For efficiency, hardlinks are used for cloning whenever the source + and destination are on the same filesystem (note this applies only + to the repository data, not to the working directory). Some + filesystems, such as AFS, implement hardlinking incorrectly, but + do not report errors. In these cases, use the --pull option to + avoid hardlinking. + + In some cases, you can clone repositories and the working directory + using full hardlinks with :: + + $ cp -al REPO REPOCLONE + + This is the fastest way to clone, but it is not always safe. The + operation is not atomic (making sure REPO is not modified during + the operation is up to you) and you have to make sure your editor + breaks hardlinks (Emacs and most Linux Kernel tools do so). Also, + this is not compatible with certain extensions that place their + metadata under the .hg directory, such as mq. + + Mercurial will update the working directory to the first applicable + revision from this list: + + a) null if -U or the source repository has no changesets + b) if -u . and the source repository is local, the first parent of + the source repository's working directory + c) the changeset specified with -u (if a branch name, this means the + latest head of that branch) + d) the changeset specified with -r + e) the tipmost head specified with -b + f) the tipmost head specified with the url#branch source syntax + g) the tipmost head of the default branch + h) tip + + Returns 0 on success. + """ + if opts.get('noupdate') and opts.get('updaterev'): + raise util.Abort(_("cannot specify both --noupdate and --updaterev")) + + r = hg.clone(hg.remoteui(ui, opts), source, dest, + pull=opts.get('pull'), + stream=opts.get('uncompressed'), + rev=opts.get('rev'), + update=opts.get('updaterev') or not opts.get('noupdate'), + branch=opts.get('branch')) + + return r is None + +def commit(ui, repo, *pats, **opts): + """commit the specified files or all outstanding changes + + Commit changes to the given files into the repository. Unlike a + centralized RCS, this operation is a local operation. See + :hg:`push` for a way to actively distribute your changes. + + If a list of files is omitted, all changes reported by :hg:`status` + will be committed. + + If you are committing the result of a merge, do not provide any + filenames or -I/-X filters. + + If no commit message is specified, Mercurial starts your + configured editor where you can enter a message. In case your + commit fails, you will find a backup of your message in + ``.hg/last-message.txt``. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success, 1 if nothing changed. + """ + extra = {} + if opts.get('close_branch'): + if repo['.'].node() not in repo.branchheads(): + # The topo heads set is included in the branch heads set of the + # current branch, so it's sufficient to test branchheads + raise util.Abort(_('can only close branch heads')) + extra['close'] = 1 + e = cmdutil.commiteditor + if opts.get('force_editor'): + e = cmdutil.commitforceeditor + + def commitfunc(ui, repo, message, match, opts): + return repo.commit(message, opts.get('user'), opts.get('date'), match, + editor=e, extra=extra) + + branch = repo[None].branch() + bheads = repo.branchheads(branch) + + node = cmdutil.commit(ui, repo, commitfunc, pats, opts) + if not node: + ui.status(_("nothing changed\n")) + return 1 + + ctx = repo[node] + parents = ctx.parents() + + if bheads and not [x for x in parents + if x.node() in bheads and x.branch() == branch]: + ui.status(_('created new head\n')) + # The message is not printed for initial roots. For the other + # changesets, it is printed in the following situations: + # + # Par column: for the 2 parents with ... + # N: null or no parent + # B: parent is on another named branch + # C: parent is a regular non head changeset + # H: parent was a branch head of the current branch + # Msg column: whether we print "created new head" message + # In the following, it is assumed that there already exists some + # initial branch heads of the current branch, otherwise nothing is + # printed anyway. + # + # Par Msg Comment + # NN y additional topo root + # + # BN y additional branch root + # CN y additional topo head + # HN n usual case + # + # BB y weird additional branch root + # CB y branch merge + # HB n merge with named branch + # + # CC y additional head from merge + # CH n merge with a head + # + # HH n head merge: head count decreases + + if not opts.get('close_branch'): + for r in parents: + if r.extra().get('close') and r.branch() == branch: + ui.status(_('reopening closed branch head %d\n') % r) + + if ui.debugflag: + ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex())) + elif ui.verbose: + ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx)) + +def copy(ui, repo, *pats, **opts): + """mark files as copied for the next commit + + Mark dest as having copies of source files. If dest is a + directory, copies are put in that directory. If dest is a file, + the source must be a single file. + + By default, this command copies the contents of files as they + exist in the working directory. If invoked with -A/--after, the + operation is recorded, but no copying is performed. + + This command takes effect with the next commit. To undo a copy + before that, see :hg:`revert`. + + Returns 0 on success, 1 if errors are encountered. + """ + wlock = repo.wlock(False) + try: + return cmdutil.copy(ui, repo, pats, opts) + finally: + wlock.release() + +def debugancestor(ui, repo, *args): + """find the ancestor revision of two revisions in a given index""" + if len(args) == 3: + index, rev1, rev2 = args + r = revlog.revlog(util.opener(os.getcwd(), audit=False), index) + lookup = r.lookup + elif len(args) == 2: + if not repo: + raise util.Abort(_("there is no Mercurial repository here " + "(.hg not found)")) + rev1, rev2 = args + r = repo.changelog + lookup = repo.lookup + else: + raise util.Abort(_('either two or three arguments required')) + a = r.ancestor(lookup(rev1), lookup(rev2)) + ui.write("%d:%s\n" % (r.rev(a), hex(a))) + +def debugbuilddag(ui, repo, text, + mergeable_file=False, + appended_file=False, + overwritten_file=False, + new_file=False): + """builds a repo with a given dag from scratch in the current empty repo + + Elements: + + - "+n" is a linear run of n nodes based on the current default parent + - "." is a single node based on the current default parent + - "$" resets the default parent to null (implied at the start); + otherwise the default parent is always the last node created + - "<p" sets the default parent to the backref p + - "*p" is a fork at parent p, which is a backref + - "*p1/p2" is a merge of parents p1 and p2, which are backrefs + - "/p2" is a merge of the preceding node and p2 + - ":tag" defines a local tag for the preceding node + - "@branch" sets the named branch for subsequent nodes + - "!command" runs the command using your shell + - "!!my command\\n" is like "!", but to the end of the line + - "#...\\n" is a comment up to the end of the line + + Whitespace between the above elements is ignored. + + A backref is either + + - a number n, which references the node curr-n, where curr is the current + node, or + - the name of a local tag you placed earlier using ":tag", or + - empty to denote the default parent. + + All string valued-elements are either strictly alphanumeric, or must + be enclosed in double quotes ("..."), with "\\" as escape character. + + Note that the --overwritten-file and --appended-file options imply the + use of "HGMERGE=internal:local" during DAG buildup. + """ + + if not (mergeable_file or appended_file or overwritten_file or new_file): + raise util.Abort(_('need at least one of -m, -a, -o, -n')) + + if len(repo.changelog) > 0: + raise util.Abort(_('repository is not empty')) + + if overwritten_file or appended_file: + # we don't want to fail in merges during buildup + os.environ['HGMERGE'] = 'internal:local' + + def writefile(fname, text, fmode="wb"): + f = open(fname, fmode) + try: + f.write(text) + finally: + f.close() + + if mergeable_file: + linesperrev = 2 + # determine number of revs in DAG + n = 0 + for type, data in dagparser.parsedag(text): + if type == 'n': + n += 1 + # make a file with k lines per rev + writefile("mf", "\n".join(str(i) for i in xrange(0, n * linesperrev)) + + "\n") + + at = -1 + atbranch = 'default' + for type, data in dagparser.parsedag(text): + if type == 'n': + ui.status('node %s\n' % str(data)) + id, ps = data + p1 = ps[0] + if p1 != at: + update(ui, repo, node=str(p1), clean=True) + at = p1 + if repo.dirstate.branch() != atbranch: + branch(ui, repo, atbranch, force=True) + if len(ps) > 1: + p2 = ps[1] + merge(ui, repo, node=p2) + + if mergeable_file: + f = open("mf", "rb+") + try: + lines = f.read().split("\n") + lines[id * linesperrev] += " r%i" % id + f.seek(0) + f.write("\n".join(lines)) + finally: + f.close() + + if appended_file: + writefile("af", "r%i\n" % id, "ab") + + if overwritten_file: + writefile("of", "r%i\n" % id) + + if new_file: + writefile("nf%i" % id, "r%i\n" % id) + + commit(ui, repo, addremove=True, message="r%i" % id, date=(id, 0)) + at = id + elif type == 'l': + id, name = data + ui.status('tag %s\n' % name) + tag(ui, repo, name, local=True) + elif type == 'a': + ui.status('branch %s\n' % data) + atbranch = data + elif type in 'cC': + r = util.system(data, cwd=repo.root) + if r: + desc, r = util.explain_exit(r) + raise util.Abort(_('%s command %s') % (data, desc)) + +def debugcommands(ui, cmd='', *args): + """list all available commands and options""" + for cmd, vals in sorted(table.iteritems()): + cmd = cmd.split('|')[0].strip('^') + opts = ', '.join([i[1] for i in vals[1]]) + ui.write('%s: %s\n' % (cmd, opts)) + +def debugcomplete(ui, cmd='', **opts): + """returns the completion list associated with the given command""" + + if opts.get('options'): + options = [] + otables = [globalopts] + if cmd: + aliases, entry = cmdutil.findcmd(cmd, table, False) + otables.append(entry[1]) + for t in otables: + for o in t: + if "(DEPRECATED)" in o[3]: + continue + if o[0]: + options.append('-%s' % o[0]) + options.append('--%s' % o[1]) + ui.write("%s\n" % "\n".join(options)) + return + + cmdlist = cmdutil.findpossible(cmd, table) + if ui.verbose: + cmdlist = [' '.join(c[0]) for c in cmdlist.values()] + ui.write("%s\n" % "\n".join(sorted(cmdlist))) + +def debugfsinfo(ui, path = "."): + """show information detected about current filesystem""" + open('.debugfsinfo', 'w').write('') + ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no')) + ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no')) + ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo') + and 'yes' or 'no')) + os.unlink('.debugfsinfo') + +def debugrebuildstate(ui, repo, rev="tip"): + """rebuild the dirstate as it would look like for the given revision""" + ctx = repo[rev] + wlock = repo.wlock() + try: + repo.dirstate.rebuild(ctx.node(), ctx.manifest()) + finally: + wlock.release() + +def debugcheckstate(ui, repo): + """validate the correctness of the current dirstate""" + parent1, parent2 = repo.dirstate.parents() + m1 = repo[parent1].manifest() + m2 = repo[parent2].manifest() + errors = 0 + for f in repo.dirstate: + state = repo.dirstate[f] + if state in "nr" and f not in m1: + ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) + errors += 1 + if state in "a" and f in m1: + ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) + errors += 1 + if state in "m" and f not in m1 and f not in m2: + ui.warn(_("%s in state %s, but not in either manifest\n") % + (f, state)) + errors += 1 + for f in m1: + state = repo.dirstate[f] + if state not in "nrm": + ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) + errors += 1 + if errors: + error = _(".hg/dirstate inconsistent with current parent's manifest") + raise util.Abort(error) + +def showconfig(ui, repo, *values, **opts): + """show combined config settings from all hgrc files + + With no arguments, print names and values of all config items. + + With one argument of the form section.name, print just the value + of that config item. + + With multiple arguments, print names and values of all config + items with matching section names. + + With --debug, the source (filename and line number) is printed + for each config item. + + Returns 0 on success. + """ + + for f in util.rcpath(): + ui.debug(_('read config from: %s\n') % f) + untrusted = bool(opts.get('untrusted')) + if values: + sections = [v for v in values if '.' not in v] + items = [v for v in values if '.' in v] + if len(items) > 1 or items and sections: + raise util.Abort(_('only one config item permitted')) + for section, name, value in ui.walkconfig(untrusted=untrusted): + sectname = section + '.' + name + if values: + for v in values: + if v == section: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + elif v == sectname: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write(value, '\n') + else: + ui.debug('%s: ' % + ui.configsource(section, name, untrusted)) + ui.write('%s=%s\n' % (sectname, value)) + +def debugpushkey(ui, repopath, namespace, *keyinfo): + '''access the pushkey key/value protocol + + With two args, list the keys in the given namespace. + + With five args, set a key to new if it currently is set to old. + Reports success or failure. + ''' + + target = hg.repository(ui, repopath) + if keyinfo: + key, old, new = keyinfo + r = target.pushkey(namespace, key, old, new) + ui.status(str(r) + '\n') + return not(r) + else: + for k, v in target.listkeys(namespace).iteritems(): + ui.write("%s\t%s\n" % (k.encode('string-escape'), + v.encode('string-escape'))) + +def debugrevspec(ui, repo, expr): + '''parse and apply a revision specification''' + if ui.verbose: + tree = revset.parse(expr) + ui.note(tree, "\n") + func = revset.match(expr) + for c in func(repo, range(len(repo))): + ui.write("%s\n" % c) + +def debugsetparents(ui, repo, rev1, rev2=None): + """manually set the parents of the current working directory + + This is useful for writing repository conversion tools, but should + be used with care. + + Returns 0 on success. + """ + + if not rev2: + rev2 = hex(nullid) + + wlock = repo.wlock() + try: + repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2)) + finally: + wlock.release() + +def debugstate(ui, repo, nodates=None): + """show the contents of the current dirstate""" + timestr = "" + showdate = not nodates + for file_, ent in sorted(repo.dirstate._map.iteritems()): + if showdate: + if ent[3] == -1: + # Pad or slice to locale representation + locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", + time.localtime(0))) + timestr = 'unset' + timestr = (timestr[:locale_len] + + ' ' * (locale_len - len(timestr))) + else: + timestr = time.strftime("%Y-%m-%d %H:%M:%S ", + time.localtime(ent[3])) + if ent[1] & 020000: + mode = 'lnk' + else: + mode = '%3o' % (ent[1] & 0777) + ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) + for f in repo.dirstate.copies(): + ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) + +def debugsub(ui, repo, rev=None): + if rev == '': + rev = None + for k, v in sorted(repo[rev].substate.items()): + ui.write('path %s\n' % k) + ui.write(' source %s\n' % v[0]) + ui.write(' revision %s\n' % v[1]) + +def debugdag(ui, repo, file_=None, *revs, **opts): + """format the changelog or an index DAG as a concise textual description + + If you pass a revlog index, the revlog's DAG is emitted. If you list + revision numbers, they get labelled in the output as rN. + + Otherwise, the changelog DAG of the current repo is emitted. + """ + spaces = opts.get('spaces') + dots = opts.get('dots') + if file_: + rlog = revlog.revlog(util.opener(os.getcwd(), audit=False), file_) + revs = set((int(r) for r in revs)) + def events(): + for r in rlog: + yield 'n', (r, list(set(p for p in rlog.parentrevs(r) if p != -1))) + if r in revs: + yield 'l', (r, "r%i" % r) + elif repo: + cl = repo.changelog + tags = opts.get('tags') + branches = opts.get('branches') + if tags: + labels = {} + for l, n in repo.tags().items(): + labels.setdefault(cl.rev(n), []).append(l) + def events(): + b = "default" + for r in cl: + if branches: + newb = cl.read(cl.node(r))[5]['branch'] + if newb != b: + yield 'a', newb + b = newb + yield 'n', (r, list(set(p for p in cl.parentrevs(r) if p != -1))) + if tags: + ls = labels.get(r) + if ls: + for l in ls: + yield 'l', (r, l) + else: + raise util.Abort(_('need repo for changelog dag')) + + for line in dagparser.dagtextlines(events(), + addspaces=spaces, + wraplabels=True, + wrapannotations=True, + wrapnonlinear=dots, + usedots=dots, + maxlinewidth=70): + ui.write(line) + ui.write("\n") + +def debugdata(ui, repo, file_, rev): + """dump the contents of a data file revision""" + r = None + if repo: + filelog = repo.file(file_) + if len(filelog): + r = filelog + if not r: + r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i") + try: + ui.write(r.revision(r.lookup(rev))) + except KeyError: + raise util.Abort(_('invalid revision identifier %s') % rev) + +def debugdate(ui, date, range=None, **opts): + """parse and display a date""" + if opts["extended"]: + d = util.parsedate(date, util.extendeddateformats) + else: + d = util.parsedate(date) + ui.write("internal: %s %s\n" % d) + ui.write("standard: %s\n" % util.datestr(d)) + if range: + m = util.matchdate(range) + ui.write("match: %s\n" % m(d[0])) + +def debugindex(ui, repo, file_, **opts): + """dump the contents of an index file""" + r = None + if repo: + filelog = repo.file(file_) + if len(filelog): + r = filelog + + format = opts.get('format', 0) + if format not in (0, 1): + raise util.Abort("unknown format %d" % format) + + if not r: + r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_) + + if format == 0: + ui.write(" rev offset length base linkrev" + " nodeid p1 p2\n") + elif format == 1: + ui.write(" rev flag offset length" + " size base link p1 p2 nodeid\n") + + for i in r: + node = r.node(i) + if format == 0: + try: + pp = r.parents(node) + except: + pp = [nullid, nullid] + ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % ( + i, r.start(i), r.length(i), r.base(i), r.linkrev(i), + short(node), short(pp[0]), short(pp[1]))) + elif format == 1: + pr = r.parentrevs(i) + ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d % 6d %s\n" % ( + i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), + r.base(i), r.linkrev(i), pr[0], pr[1], short(node))) + +def debugindexdot(ui, repo, file_): + """dump an index DAG as a graphviz dot file""" + r = None + if repo: + filelog = repo.file(file_) + if len(filelog): + r = filelog + if not r: + r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_) + ui.write("digraph G {\n") + for i in r: + node = r.node(i) + pp = r.parents(node) + ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) + if pp[1] != nullid: + ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) + ui.write("}\n") + +def debuginstall(ui): + '''test Mercurial installation + + Returns 0 on success. + ''' + + def writetemp(contents): + (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-") + f = os.fdopen(fd, "wb") + f.write(contents) + f.close() + return name + + problems = 0 + + # encoding + ui.status(_("Checking encoding (%s)...\n") % encoding.encoding) + try: + encoding.fromlocal("test") + except util.Abort, inst: + ui.write(" %s\n" % inst) + ui.write(_(" (check that your locale is properly set)\n")) + problems += 1 + + # compiled modules + ui.status(_("Checking installed modules (%s)...\n") + % os.path.dirname(__file__)) + try: + import bdiff, mpatch, base85, osutil + except Exception, inst: + ui.write(" %s\n" % inst) + ui.write(_(" One or more extensions could not be found")) + ui.write(_(" (check that you compiled the extensions)\n")) + problems += 1 + + # templates + ui.status(_("Checking templates...\n")) + try: + import templater + templater.templater(templater.templatepath("map-cmdline.default")) + except Exception, inst: + ui.write(" %s\n" % inst) + ui.write(_(" (templates seem to have been installed incorrectly)\n")) + problems += 1 + + # patch + ui.status(_("Checking patch...\n")) + patchproblems = 0 + a = "1\n2\n3\n4\n" + b = "1\n2\n3\ninsert\n4\n" + fa = writetemp(a) + d = mdiff.unidiff(a, None, b, None, os.path.basename(fa), + os.path.basename(fa)) + fd = writetemp(d) + + files = {} + try: + patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files) + except util.Abort, e: + ui.write(_(" patch call failed:\n")) + ui.write(" " + str(e) + "\n") + patchproblems += 1 + else: + if list(files) != [os.path.basename(fa)]: + ui.write(_(" unexpected patch output!\n")) + patchproblems += 1 + a = open(fa).read() + if a != b: + ui.write(_(" patch test failed!\n")) + patchproblems += 1 + + if patchproblems: + if ui.config('ui', 'patch'): + ui.write(_(" (Current patch tool may be incompatible with patch," + " or misconfigured. Please check your configuration" + " file)\n")) + else: + ui.write(_(" Internal patcher failure, please report this error" + " to http://mercurial.selenic.com/wiki/BugTracker\n")) + problems += patchproblems + + os.unlink(fa) + os.unlink(fd) + + # editor + ui.status(_("Checking commit editor...\n")) + editor = ui.geteditor() + cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0]) + if not cmdpath: + if editor == 'vi': + ui.write(_(" No commit editor set and can't find vi in PATH\n")) + ui.write(_(" (specify a commit editor in your configuration" + " file)\n")) + else: + ui.write(_(" Can't find editor '%s' in PATH\n") % editor) + ui.write(_(" (specify a commit editor in your configuration" + " file)\n")) + problems += 1 + + # check username + ui.status(_("Checking username...\n")) + try: + ui.username() + except util.Abort, e: + ui.write(" %s\n" % e) + ui.write(_(" (specify a username in your configuration file)\n")) + problems += 1 + + if not problems: + ui.status(_("No problems detected\n")) + else: + ui.write(_("%s problems detected," + " please check your install!\n") % problems) + + return problems + +def debugrename(ui, repo, file1, *pats, **opts): + """dump rename information""" + + ctx = repo[opts.get('rev')] + m = cmdutil.match(repo, (file1,) + pats, opts) + for abs in ctx.walk(m): + fctx = ctx[abs] + o = fctx.filelog().renamed(fctx.filenode()) + rel = m.rel(abs) + if o: + ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) + else: + ui.write(_("%s not renamed\n") % rel) + +def debugwalk(ui, repo, *pats, **opts): + """show how files match on given patterns""" + m = cmdutil.match(repo, pats, opts) + items = list(repo.walk(m)) + if not items: + return + fmt = 'f %%-%ds %%-%ds %%s' % ( + max([len(abs) for abs in items]), + max([len(m.rel(abs)) for abs in items])) + for abs in items: + line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '') + ui.write("%s\n" % line.rstrip()) + +def diff(ui, repo, *pats, **opts): + """diff repository (or selected files) + + Show differences between revisions for the specified files. + + Differences between files are shown using the unified diff format. + + .. note:: + diff may generate unexpected results for merges, as it will + default to comparing against the working directory's first + parent changeset if no revisions are specified. + + When two revision arguments are given, then changes are shown + between those revisions. If only one revision is specified then + that revision is compared to the working directory, and, when no + revisions are specified, the working directory files are compared + to its parent. + + Alternatively you can specify -c/--change with a revision to see + the changes in that changeset relative to its first parent. + + Without the -a/--text option, diff will avoid generating diffs of + files it detects as binary. With -a, diff will generate a diff + anyway, probably with undesirable results. + + Use the -g/--git option to generate diffs in the git extended diff + format. For more information, read :hg:`help diffs`. + + Returns 0 on success. + """ + + revs = opts.get('rev') + change = opts.get('change') + stat = opts.get('stat') + reverse = opts.get('reverse') + + if revs and change: + msg = _('cannot specify --rev and --change at the same time') + raise util.Abort(msg) + elif change: + node2 = repo.lookup(change) + node1 = repo[node2].parents()[0].node() + else: + node1, node2 = cmdutil.revpair(repo, revs) + + if reverse: + node1, node2 = node2, node1 + + diffopts = patch.diffopts(ui, opts) + m = cmdutil.match(repo, pats, opts) + cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, + listsubrepos=opts.get('subrepos')) + +def export(ui, repo, *changesets, **opts): + """dump the header and diffs for one or more changesets + + Print the changeset header and diffs for one or more revisions. + + The information shown in the changeset header is: author, date, + branch name (if non-default), changeset hash, parent(s) and commit + comment. + + .. note:: + export may generate unexpected diff output for merge + changesets, as it will compare the merge changeset against its + first parent only. + + Output may be to a file, in which case the name of the file is + given using a format string. The formatting rules are as follows: + + :``%%``: literal "%" character + :``%H``: changeset hash (40 hexadecimal digits) + :``%N``: number of patches being generated + :``%R``: changeset revision number + :``%b``: basename of the exporting repository + :``%h``: short-form changeset hash (12 hexadecimal digits) + :``%n``: zero-padded sequence number, starting at 1 + :``%r``: zero-padded changeset revision number + + Without the -a/--text option, export will avoid generating diffs + of files it detects as binary. With -a, export will generate a + diff anyway, probably with undesirable results. + + Use the -g/--git option to generate diffs in the git extended diff + format. See :hg:`help diffs` for more information. + + With the --switch-parent option, the diff will be against the + second parent. It can be useful to review a merge. + + Returns 0 on success. + """ + changesets += tuple(opts.get('rev', [])) + if not changesets: + raise util.Abort(_("export requires at least one changeset")) + revs = cmdutil.revrange(repo, changesets) + if len(revs) > 1: + ui.note(_('exporting patches:\n')) + else: + ui.note(_('exporting patch:\n')) + cmdutil.export(repo, revs, template=opts.get('output'), + switch_parent=opts.get('switch_parent'), + opts=patch.diffopts(ui, opts)) + +def forget(ui, repo, *pats, **opts): + """forget the specified files on the next commit + + Mark the specified files so they will no longer be tracked + after the next commit. + + This only removes files from the current branch, not from the + entire project history, and it does not delete them from the + working directory. + + To undo a forget before the next commit, see :hg:`add`. + + Returns 0 on success. + """ + + if not pats: + raise util.Abort(_('no files specified')) + + m = cmdutil.match(repo, pats, opts) + s = repo.status(match=m, clean=True) + forget = sorted(s[0] + s[1] + s[3] + s[6]) + errs = 0 + + for f in m.files(): + if f not in repo.dirstate and not os.path.isdir(m.rel(f)): + ui.warn(_('not removing %s: file is already untracked\n') + % m.rel(f)) + errs = 1 + + for f in forget: + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + repo[None].remove(forget, unlink=False) + return errs + +def grep(ui, repo, pattern, *pats, **opts): + """search for a pattern in specified files and revisions + + Search revisions of files for a regular expression. + + This command behaves differently than Unix grep. It only accepts + Python/Perl regexps. It searches repository history, not the + working directory. It always prints the revision number in which a + match appears. + + By default, grep only prints output for the first revision of a + file in which it finds a match. To get it to print every revision + that contains a change in match status ("-" for a match that + becomes a non-match, or "+" for a non-match that becomes a match), + use the --all flag. + + Returns 0 if a match is found, 1 otherwise. + """ + reflags = 0 + if opts.get('ignore_case'): + reflags |= re.I + try: + regexp = re.compile(pattern, reflags) + except re.error, inst: + ui.warn(_("grep: invalid match pattern: %s\n") % inst) + return 1 + sep, eol = ':', '\n' + if opts.get('print0'): + sep = eol = '\0' + + getfile = util.lrucachefunc(repo.file) + + def matchlines(body): + begin = 0 + linenum = 0 + while True: + match = regexp.search(body, begin) + if not match: + break + mstart, mend = match.span() + linenum += body.count('\n', begin, mstart) + 1 + lstart = body.rfind('\n', begin, mstart) + 1 or begin + begin = body.find('\n', mend) + 1 or len(body) + lend = begin - 1 + yield linenum, mstart - lstart, mend - lstart, body[lstart:lend] + + class linestate(object): + def __init__(self, line, linenum, colstart, colend): + self.line = line + self.linenum = linenum + self.colstart = colstart + self.colend = colend + + def __hash__(self): + return hash((self.linenum, self.line)) + + def __eq__(self, other): + return self.line == other.line + + matches = {} + copies = {} + def grepbody(fn, rev, body): + matches[rev].setdefault(fn, []) + m = matches[rev][fn] + for lnum, cstart, cend, line in matchlines(body): + s = linestate(line, lnum, cstart, cend) + m.append(s) + + def difflinestates(a, b): + sm = difflib.SequenceMatcher(None, a, b) + for tag, alo, ahi, blo, bhi in sm.get_opcodes(): + if tag == 'insert': + for i in xrange(blo, bhi): + yield ('+', b[i]) + elif tag == 'delete': + for i in xrange(alo, ahi): + yield ('-', a[i]) + elif tag == 'replace': + for i in xrange(alo, ahi): + yield ('-', a[i]) + for i in xrange(blo, bhi): + yield ('+', b[i]) + + def display(fn, ctx, pstates, states): + rev = ctx.rev() + datefunc = ui.quiet and util.shortdate or util.datestr + found = False + filerevmatches = {} + if opts.get('all'): + iter = difflinestates(pstates, states) + else: + iter = [('', l) for l in states] + for change, l in iter: + cols = [fn, str(rev)] + before, match, after = None, None, None + if opts.get('line_number'): + cols.append(str(l.linenum)) + if opts.get('all'): + cols.append(change) + if opts.get('user'): + cols.append(ui.shortuser(ctx.user())) + if opts.get('date'): + cols.append(datefunc(ctx.date())) + if opts.get('files_with_matches'): + c = (fn, rev) + if c in filerevmatches: + continue + filerevmatches[c] = 1 + else: + before = l.line[:l.colstart] + match = l.line[l.colstart:l.colend] + after = l.line[l.colend:] + ui.write(sep.join(cols)) + if before is not None: + ui.write(sep + before) + ui.write(match, label='grep.match') + ui.write(after) + ui.write(eol) + found = True + return found + + skip = {} + revfiles = {} + matchfn = cmdutil.match(repo, pats, opts) + found = False + follow = opts.get('follow') + + def prep(ctx, fns): + rev = ctx.rev() + pctx = ctx.parents()[0] + parent = pctx.rev() + matches.setdefault(rev, {}) + matches.setdefault(parent, {}) + files = revfiles.setdefault(rev, []) + for fn in fns: + flog = getfile(fn) + try: + fnode = ctx.filenode(fn) + except error.LookupError: + continue + + copied = flog.renamed(fnode) + copy = follow and copied and copied[0] + if copy: + copies.setdefault(rev, {})[fn] = copy + if fn in skip: + if copy: + skip[copy] = True + continue + files.append(fn) + + if fn not in matches[rev]: + grepbody(fn, rev, flog.read(fnode)) + + pfn = copy or fn + if pfn not in matches[parent]: + try: + fnode = pctx.filenode(pfn) + grepbody(pfn, parent, flog.read(fnode)) + except error.LookupError: + pass + + for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): + rev = ctx.rev() + parent = ctx.parents()[0].rev() + for fn in sorted(revfiles.get(rev, [])): + states = matches[rev][fn] + copy = copies.get(rev, {}).get(fn) + if fn in skip: + if copy: + skip[copy] = True + continue + pstates = matches.get(parent, {}).get(copy or fn, []) + if pstates or states: + r = display(fn, ctx, pstates, states) + found = found or r + if r and not opts.get('all'): + skip[fn] = True + if copy: + skip[copy] = True + del matches[rev] + del revfiles[rev] + + return not found + +def heads(ui, repo, *branchrevs, **opts): + """show current repository heads or show branch heads + + With no arguments, show all repository branch heads. + + Repository "heads" are changesets with no child changesets. They are + where development generally takes place and are the usual targets + for update and merge operations. Branch heads are changesets that have + no child changeset on the same branch. + + If one or more REVs are given, only branch heads on the branches + associated with the specified changesets are shown. + + If -c/--closed is specified, also show branch heads marked closed + (see :hg:`commit --close-branch`). + + If STARTREV is specified, only those heads that are descendants of + STARTREV will be displayed. + + If -t/--topo is specified, named branch mechanics will be ignored and only + changesets without children will be shown. + + Returns 0 if matching heads are found, 1 if not. + """ + + if opts.get('rev'): + start = repo.lookup(opts['rev']) + else: + start = None + + if opts.get('topo'): + heads = [repo[h] for h in repo.heads(start)] + else: + heads = [] + for b, ls in repo.branchmap().iteritems(): + if start is None: + heads += [repo[h] for h in ls] + continue + startrev = repo.changelog.rev(start) + descendants = set(repo.changelog.descendants(startrev)) + descendants.add(startrev) + rev = repo.changelog.rev + heads += [repo[h] for h in ls if rev(h) in descendants] + + if branchrevs: + decode, encode = encoding.fromlocal, encoding.tolocal + branches = set(repo[decode(br)].branch() for br in branchrevs) + heads = [h for h in heads if h.branch() in branches] + + if not opts.get('closed'): + heads = [h for h in heads if not h.extra().get('close')] + + if opts.get('active') and branchrevs: + dagheads = repo.heads(start) + heads = [h for h in heads if h.node() in dagheads] + + if branchrevs: + haveheads = set(h.branch() for h in heads) + if branches - haveheads: + headless = ', '.join(encode(b) for b in branches - haveheads) + msg = _('no open branch heads found on branches %s') + if opts.get('rev'): + msg += _(' (started at %s)' % opts['rev']) + ui.warn((msg + '\n') % headless) + + if not heads: + return 1 + + heads = sorted(heads, key=lambda x: -x.rev()) + displayer = cmdutil.show_changeset(ui, repo, opts) + for ctx in heads: + displayer.show(ctx) + displayer.close() + +def help_(ui, name=None, with_version=False, unknowncmd=False): + """show help for a given topic or a help overview + + With no arguments, print a list of commands with short help messages. + + Given a topic, extension, or command name, print help for that + topic. + + Returns 0 if successful. + """ + option_lists = [] + textwidth = ui.termwidth() - 2 + + def addglobalopts(aliases): + if ui.verbose: + option_lists.append((_("global options:"), globalopts)) + if name == 'shortlist': + option_lists.append((_('use "hg help" for the full list ' + 'of commands'), ())) + else: + if name == 'shortlist': + msg = _('use "hg help" for the full list of commands ' + 'or "hg -v" for details') + elif aliases: + msg = _('use "hg -v help%s" to show aliases and ' + 'global options') % (name and " " + name or "") + else: + msg = _('use "hg -v help %s" to show global options') % name + option_lists.append((msg, ())) + + def helpcmd(name): + if with_version: + version_(ui) + ui.write('\n') + + try: + aliases, entry = cmdutil.findcmd(name, table, strict=unknowncmd) + except error.AmbiguousCommand, inst: + # py3k fix: except vars can't be used outside the scope of the + # except block, nor can be used inside a lambda. python issue4617 + prefix = inst.args[0] + select = lambda c: c.lstrip('^').startswith(prefix) + helplist(_('list of commands:\n\n'), select) + return + + # check if it's an invalid alias and display its error if it is + if getattr(entry[0], 'badalias', False): + if not unknowncmd: + entry[0](ui) + return + + # synopsis + if len(entry) > 2: + if entry[2].startswith('hg'): + ui.write("%s\n" % entry[2]) + else: + ui.write('hg %s %s\n' % (aliases[0], entry[2])) + else: + ui.write('hg %s\n' % aliases[0]) + + # aliases + if not ui.quiet and len(aliases) > 1: + ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:])) + + # description + doc = gettext(entry[0].__doc__) + if not doc: + doc = _("(no help text available)") + if hasattr(entry[0], 'definition'): # aliased command + if entry[0].definition.startswith('!'): # shell alias + doc = _('shell alias for::\n\n %s') % entry[0].definition[1:] + else: + doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc) + if ui.quiet: + doc = doc.splitlines()[0] + keep = ui.verbose and ['verbose'] or [] + formatted, pruned = minirst.format(doc, textwidth, keep=keep) + ui.write("\n%s\n" % formatted) + if pruned: + ui.write(_('\nuse "hg -v help %s" to show verbose help\n') % name) + + if not ui.quiet: + # options + if entry[1]: + option_lists.append((_("options:\n"), entry[1])) + + addglobalopts(False) + + def helplist(header, select=None): + h = {} + cmds = {} + for c, e in table.iteritems(): + f = c.split("|", 1)[0] + if select and not select(f): + continue + if (not select and name != 'shortlist' and + e[0].__module__ != __name__): + continue + if name == "shortlist" and not f.startswith("^"): + continue + f = f.lstrip("^") + if not ui.debugflag and f.startswith("debug"): + continue + doc = e[0].__doc__ + if doc and 'DEPRECATED' in doc and not ui.verbose: + continue + doc = gettext(doc) + if not doc: + doc = _("(no help text available)") + h[f] = doc.splitlines()[0].rstrip() + cmds[f] = c.lstrip("^") + + if not h: + ui.status(_('no commands defined\n')) + return + + ui.status(header) + fns = sorted(h) + m = max(map(len, fns)) + for f in fns: + if ui.verbose: + commands = cmds[f].replace("|",", ") + ui.write(" %s:\n %s\n"%(commands, h[f])) + else: + ui.write('%s\n' % (util.wrap(h[f], textwidth, + initindent=' %-*s ' % (m, f), + hangindent=' ' * (m + 4)))) + + if not ui.quiet: + addglobalopts(True) + + def helptopic(name): + for names, header, doc in help.helptable: + if name in names: + break + else: + raise error.UnknownCommand(name) + + # description + if not doc: + doc = _("(no help text available)") + if hasattr(doc, '__call__'): + doc = doc() + + ui.write("%s\n\n" % header) + ui.write("%s\n" % minirst.format(doc, textwidth, indent=4)) + + def helpext(name): + try: + mod = extensions.find(name) + doc = gettext(mod.__doc__) or _('no help text available') + except KeyError: + mod = None + doc = extensions.disabledext(name) + if not doc: + raise error.UnknownCommand(name) + + if '\n' not in doc: + head, tail = doc, "" + else: + head, tail = doc.split('\n', 1) + ui.write(_('%s extension - %s\n\n') % (name.split('.')[-1], head)) + if tail: + ui.write(minirst.format(tail, textwidth)) + ui.status('\n\n') + + if mod: + try: + ct = mod.cmdtable + except AttributeError: + ct = {} + modcmds = set([c.split('|', 1)[0] for c in ct]) + helplist(_('list of commands:\n\n'), modcmds.__contains__) + else: + ui.write(_('use "hg help extensions" for information on enabling ' + 'extensions\n')) + + def helpextcmd(name): + cmd, ext, mod = extensions.disabledcmd(name, ui.config('ui', 'strict')) + doc = gettext(mod.__doc__).splitlines()[0] + + msg = help.listexts(_("'%s' is provided by the following " + "extension:") % cmd, {ext: doc}, len(ext), + indent=4) + ui.write(minirst.format(msg, textwidth)) + ui.write('\n\n') + ui.write(_('use "hg help extensions" for information on enabling ' + 'extensions\n')) + + help.addtopichook('revsets', revset.makedoc) + + if name and name != 'shortlist': + i = None + if unknowncmd: + queries = (helpextcmd,) + else: + queries = (helptopic, helpcmd, helpext, helpextcmd) + for f in queries: + try: + f(name) + i = None + break + except error.UnknownCommand, inst: + i = inst + if i: + raise i + + else: + # program name + if ui.verbose or with_version: + version_(ui) + else: + ui.status(_("Mercurial Distributed SCM\n")) + ui.status('\n') + + # list of commands + if name == "shortlist": + header = _('basic commands:\n\n') + else: + header = _('list of commands:\n\n') + + helplist(header) + if name != 'shortlist': + exts, maxlength = extensions.enabled() + text = help.listexts(_('enabled extensions:'), exts, maxlength) + if text: + ui.write("\n%s\n" % minirst.format(text, textwidth)) + + # list all option lists + opt_output = [] + multioccur = False + for title, options in option_lists: + opt_output.append(("\n%s" % title, None)) + for option in options: + if len(option) == 5: + shortopt, longopt, default, desc, optlabel = option + else: + shortopt, longopt, default, desc = option + optlabel = _("VALUE") # default label + + if _("DEPRECATED") in desc and not ui.verbose: + continue + if isinstance(default, list): + numqualifier = " %s [+]" % optlabel + multioccur = True + elif (default is not None) and not isinstance(default, bool): + numqualifier = " %s" % optlabel + else: + numqualifier = "" + opt_output.append(("%2s%s" % + (shortopt and "-%s" % shortopt, + longopt and " --%s%s" % + (longopt, numqualifier)), + "%s%s" % (desc, + default + and _(" (default: %s)") % default + or ""))) + if multioccur: + msg = _("\n[+] marked option can be specified multiple times") + if ui.verbose and name != 'shortlist': + opt_output.append((msg, None)) + else: + opt_output.insert(-1, (msg, None)) + + if not name: + ui.write(_("\nadditional help topics:\n\n")) + topics = [] + for names, header, doc in help.helptable: + topics.append((sorted(names, key=len, reverse=True)[0], header)) + topics_len = max([len(s[0]) for s in topics]) + for t, desc in topics: + ui.write(" %-*s %s\n" % (topics_len, t, desc)) + + if opt_output: + colwidth = encoding.colwidth + # normalize: (opt or message, desc or None, width of opt) + entries = [desc and (opt, desc, colwidth(opt)) or (opt, None, 0) + for opt, desc in opt_output] + hanging = max([e[2] for e in entries]) + for opt, desc, width in entries: + if desc: + initindent = ' %s%s ' % (opt, ' ' * (hanging - width)) + hangindent = ' ' * (hanging + 3) + ui.write('%s\n' % (util.wrap(desc, textwidth, + initindent=initindent, + hangindent=hangindent))) + else: + ui.write("%s\n" % opt) + +def identify(ui, repo, source=None, + rev=None, num=None, id=None, branch=None, tags=None): + """identify the working copy or specified revision + + With no revision, print a summary of the current state of the + repository. + + Specifying a path to a repository root or Mercurial bundle will + cause lookup to operate on that repository/bundle. + + This summary identifies the repository state using one or two + parent hash identifiers, followed by a "+" if there are + uncommitted changes in the working directory, a list of tags for + this revision and a branch name for non-default branches. + + Returns 0 if successful. + """ + + if not repo and not source: + raise util.Abort(_("there is no Mercurial repository here " + "(.hg not found)")) + + hexfunc = ui.debugflag and hex or short + default = not (num or id or branch or tags) + output = [] + + revs = [] + if source: + source, branches = hg.parseurl(ui.expandpath(source)) + repo = hg.repository(ui, source) + revs, checkout = hg.addbranchrevs(repo, repo, branches, None) + + if not repo.local(): + if not rev and revs: + rev = revs[0] + if not rev: + rev = "tip" + if num or branch or tags: + raise util.Abort( + "can't query remote revision number, branch, or tags") + output = [hexfunc(repo.lookup(rev))] + elif not rev: + ctx = repo[None] + parents = ctx.parents() + changed = False + if default or id or num: + changed = util.any(repo.status()) + if default or id: + output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]), + (changed) and "+" or "")] + if num: + output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]), + (changed) and "+" or "")) + else: + ctx = repo[rev] + if default or id: + output = [hexfunc(ctx.node())] + if num: + output.append(str(ctx.rev())) + + if repo.local() and default and not ui.quiet: + b = encoding.tolocal(ctx.branch()) + if b != 'default': + output.append("(%s)" % b) + + # multiple tags for a single parent separated by '/' + t = "/".join(ctx.tags()) + if t: + output.append(t) + + if branch: + output.append(encoding.tolocal(ctx.branch())) + + if tags: + output.extend(ctx.tags()) + + ui.write("%s\n" % ' '.join(output)) + +def import_(ui, repo, patch1, *patches, **opts): + """import an ordered set of patches + + Import a list of patches and commit them individually (unless + --no-commit is specified). + + If there are outstanding changes in the working directory, import + will abort unless given the -f/--force flag. + + You can import a patch straight from a mail message. Even patches + as attachments work (to use the body part, it must have type + text/plain or text/x-patch). From and Subject headers of email + message are used as default committer and commit message. All + text/plain body parts before first diff are added to commit + message. + + If the imported patch was generated by :hg:`export`, user and + description from patch override values from message headers and + body. Values given on command line with -m/--message and -u/--user + override these. + + If --exact is specified, import will set the working directory to + the parent of each patch before applying it, and will abort if the + resulting changeset has a different ID than the one recorded in + the patch. This may happen due to character set problems or other + deficiencies in the text patch format. + + With -s/--similarity, hg will attempt to discover renames and + copies in the patch in the same way as 'addremove'. + + To read a patch from standard input, use "-" as the patch name. If + a URL is specified, the patch will be downloaded from it. + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success. + """ + patches = (patch1,) + patches + + date = opts.get('date') + if date: + opts['date'] = util.parsedate(date) + + try: + sim = float(opts.get('similarity') or 0) + except ValueError: + raise util.Abort(_('similarity must be a number')) + if sim < 0 or sim > 100: + raise util.Abort(_('similarity must be between 0 and 100')) + + if opts.get('exact') or not opts.get('force'): + cmdutil.bail_if_changed(repo) + + d = opts["base"] + strip = opts["strip"] + wlock = lock = None + + def tryone(ui, hunk): + tmpname, message, user, date, branch, nodeid, p1, p2 = \ + patch.extract(ui, hunk) + + if not tmpname: + return None + commitid = _('to working directory') + + try: + cmdline_message = cmdutil.logmessage(opts) + if cmdline_message: + # pickup the cmdline msg + message = cmdline_message + elif message: + # pickup the patch msg + message = message.strip() + else: + # launch the editor + message = None + ui.debug('message:\n%s\n' % message) + + wp = repo.parents() + if opts.get('exact'): + if not nodeid or not p1: + raise util.Abort(_('not a Mercurial patch')) + p1 = repo.lookup(p1) + p2 = repo.lookup(p2 or hex(nullid)) + + if p1 != wp[0].node(): + hg.clean(repo, p1) + repo.dirstate.setparents(p1, p2) + elif p2: + try: + p1 = repo.lookup(p1) + p2 = repo.lookup(p2) + if p1 == wp[0].node(): + repo.dirstate.setparents(p1, p2) + except error.RepoError: + pass + if opts.get('exact') or opts.get('import_branch'): + repo.dirstate.setbranch(branch or 'default') + + files = {} + try: + patch.patch(tmpname, ui, strip=strip, cwd=repo.root, + files=files, eolmode=None) + finally: + files = cmdutil.updatedir(ui, repo, files, + similarity=sim / 100.0) + if not opts.get('no_commit'): + if opts.get('exact'): + m = None + else: + m = cmdutil.matchfiles(repo, files or []) + n = repo.commit(message, opts.get('user') or user, + opts.get('date') or date, match=m, + editor=cmdutil.commiteditor) + if opts.get('exact'): + if hex(n) != nodeid: + repo.rollback() + raise util.Abort(_('patch is damaged' + ' or loses information')) + # Force a dirstate write so that the next transaction + # backups an up-do-date file. + repo.dirstate.write() + if n: + commitid = short(n) + + return commitid + finally: + os.unlink(tmpname) + + try: + wlock = repo.wlock() + lock = repo.lock() + lastcommit = None + for p in patches: + pf = os.path.join(d, p) + + if pf == '-': + ui.status(_("applying patch from stdin\n")) + pf = sys.stdin + else: + ui.status(_("applying %s\n") % p) + pf = url.open(ui, pf) + + haspatch = False + for hunk in patch.split(pf): + commitid = tryone(ui, hunk) + if commitid: + haspatch = True + if lastcommit: + ui.status(_('applied %s\n') % lastcommit) + lastcommit = commitid + + if not haspatch: + raise util.Abort(_('no diffs found')) + + finally: + release(lock, wlock) + +def incoming(ui, repo, source="default", **opts): + """show new changesets found in source + + Show new changesets found in the specified path/URL or the default + pull location. These are the changesets that would have been pulled + if a pull at the time you issued this command. + + For remote repository, using --bundle avoids downloading the + changesets twice if the incoming is followed by a pull. + + See pull for valid source format details. + + Returns 0 if there are incoming changes, 1 otherwise. + """ + if opts.get('bundle') and opts.get('subrepos'): + raise util.Abort(_('cannot combine --bundle and --subrepos')) + + ret = hg.incoming(ui, repo, source, opts) + return ret + +def init(ui, dest=".", **opts): + """create a new repository in the given directory + + Initialize a new repository in the given directory. If the given + directory does not exist, it will be created. + + If no directory is given, the current directory is used. + + It is possible to specify an ``ssh://`` URL as the destination. + See :hg:`help urls` for more information. + + Returns 0 on success. + """ + hg.repository(hg.remoteui(ui, opts), ui.expandpath(dest), create=1) + +def locate(ui, repo, *pats, **opts): + """locate files matching specific patterns + + Print files under Mercurial control in the working directory whose + names match the given patterns. + + By default, this command searches all directories in the working + directory. To search just the current directory and its + subdirectories, use "--include .". + + If no patterns are given to match, this command prints the names + of all files under Mercurial control in the working directory. + + If you want to feed the output of this command into the "xargs" + command, use the -0 option to both this command and "xargs". This + will avoid the problem of "xargs" treating single filenames that + contain whitespace as multiple filenames. + + Returns 0 if a match is found, 1 otherwise. + """ + end = opts.get('print0') and '\0' or '\n' + rev = opts.get('rev') or None + + ret = 1 + m = cmdutil.match(repo, pats, opts, default='relglob') + m.bad = lambda x, y: False + for abs in repo[rev].walk(m): + if not rev and abs not in repo.dirstate: + continue + if opts.get('fullpath'): + ui.write(repo.wjoin(abs), end) + else: + ui.write(((pats and m.rel(abs)) or abs), end) + ret = 0 + + return ret + +def log(ui, repo, *pats, **opts): + """show revision history of entire repository or files + + Print the revision history of the specified files or the entire + project. + + File history is shown without following rename or copy history of + files. Use -f/--follow with a filename to follow history across + renames and copies. --follow without a filename will only show + ancestors or descendants of the starting revision. --follow-first + only follows the first parent of merge revisions. + + If no revision range is specified, the default is ``tip:0`` unless + --follow is set, in which case the working directory parent is + used as the starting revision. You can specify a revision set for + log, see :hg:`help revsets` for more information. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + By default this command prints revision number and changeset id, + tags, non-trivial parents, user, date and time, and a summary for + each commit. When the -v/--verbose switch is used, the list of + changed files and full commit message are shown. + + .. note:: + log -p/--patch may generate unexpected diff output for merge + changesets, as it will only compare the merge changeset against + its first parent. Also, only files different from BOTH parents + will appear in files:. + + Returns 0 on success. + """ + + matchfn = cmdutil.match(repo, pats, opts) + limit = cmdutil.loglimit(opts) + count = 0 + + endrev = None + if opts.get('copies') and opts.get('rev'): + endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1 + + df = False + if opts["date"]: + df = util.matchdate(opts["date"]) + + branches = opts.get('branch', []) + opts.get('only_branch', []) + opts['branch'] = [repo.lookupbranch(b) for b in branches] + + displayer = cmdutil.show_changeset(ui, repo, opts, True) + def prep(ctx, fns): + rev = ctx.rev() + parents = [p for p in repo.changelog.parentrevs(rev) + if p != nullrev] + if opts.get('no_merges') and len(parents) == 2: + return + if opts.get('only_merges') and len(parents) != 2: + return + if opts.get('branch') and ctx.branch() not in opts['branch']: + return + if df and not df(ctx.date()[0]): + return + if opts['user'] and not [k for k in opts['user'] + if k.lower() in ctx.user().lower()]: + return + if opts.get('keyword'): + for k in [kw.lower() for kw in opts['keyword']]: + if (k in ctx.user().lower() or + k in ctx.description().lower() or + k in " ".join(ctx.files()).lower()): + break + else: + return + + copies = None + if opts.get('copies') and rev: + copies = [] + getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) + for fn in ctx.files(): + rename = getrenamed(fn, rev) + if rename: + copies.append((fn, rename[0])) + + revmatchfn = None + if opts.get('patch') or opts.get('stat'): + if opts.get('follow') or opts.get('follow_first'): + # note: this might be wrong when following through merges + revmatchfn = cmdutil.match(repo, fns, default='path') + else: + revmatchfn = matchfn + + displayer.show(ctx, copies=copies, matchfn=revmatchfn) + + for ctx in cmdutil.walkchangerevs(repo, matchfn, opts, prep): + if count == limit: + break + if displayer.flush(ctx.rev()): + count += 1 + displayer.close() + +def manifest(ui, repo, node=None, rev=None): + """output the current or given revision of the project manifest + + Print a list of version controlled files for the given revision. + If no revision is given, the first parent of the working directory + is used, or the null revision if no revision is checked out. + + With -v, print file permissions, symlink and executable bits. + With --debug, print file revision hashes. + + Returns 0 on success. + """ + + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if not node: + node = rev + + decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '} + ctx = repo[node] + for f in ctx: + if ui.debugflag: + ui.write("%40s " % hex(ctx.manifest()[f])) + if ui.verbose: + ui.write(decor[ctx.flags(f)]) + ui.write("%s\n" % f) + +def merge(ui, repo, node=None, **opts): + """merge working directory with another revision + + The current working directory is updated with all changes made in + the requested revision since the last common predecessor revision. + + Files that changed between either parent are marked as changed for + the next commit and a commit must be performed before any further + updates to the repository are allowed. The next commit will have + two parents. + + ``--tool`` can be used to specify the merge tool used for file + merges. It overrides the HGMERGE environment variable and your + configuration files. + + If no revision is specified, the working directory's parent is a + head revision, and the current branch contains exactly one other + head, the other head is merged with by default. Otherwise, an + explicit revision with which to merge with must be provided. + + :hg:`resolve` must be used to resolve unresolved files. + + To undo an uncommitted merge, use :hg:`update --clean .` which + will check out a clean copy of the original merge parent, losing + all changes. + + Returns 0 on success, 1 if there are unresolved files. + """ + + if opts.get('rev') and node: + raise util.Abort(_("please specify just one revision")) + if not node: + node = opts.get('rev') + + if not node: + branch = repo.changectx(None).branch() + bheads = repo.branchheads(branch) + if len(bheads) > 2: + raise util.Abort(_( + 'branch \'%s\' has %d heads - ' + 'please merge with an explicit rev\n' + '(run \'hg heads .\' to see heads)') + % (branch, len(bheads))) + + parent = repo.dirstate.parents()[0] + if len(bheads) == 1: + if len(repo.heads()) > 1: + raise util.Abort(_( + 'branch \'%s\' has one head - ' + 'please merge with an explicit rev\n' + '(run \'hg heads\' to see all heads)') + % branch) + msg = _('there is nothing to merge') + if parent != repo.lookup(repo[None].branch()): + msg = _('%s - use "hg update" instead') % msg + raise util.Abort(msg) + + if parent not in bheads: + raise util.Abort(_('working dir not at a head rev - ' + 'use "hg update" or merge with an explicit rev')) + node = parent == bheads[0] and bheads[-1] or bheads[0] + + if opts.get('preview'): + # find nodes that are ancestors of p2 but not of p1 + p1 = repo.lookup('.') + p2 = repo.lookup(node) + nodes = repo.changelog.findmissing(common=[p1], heads=[p2]) + + displayer = cmdutil.show_changeset(ui, repo, opts) + for node in nodes: + displayer.show(repo[node]) + displayer.close() + return 0 + + try: + # ui.forcemerge is an internal variable, do not document + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + return hg.merge(repo, node, force=opts.get('force')) + finally: + ui.setconfig('ui', 'forcemerge', '') + +def outgoing(ui, repo, dest=None, **opts): + """show changesets not found in the destination + + Show changesets not found in the specified destination repository + or the default push location. These are the changesets that would + be pushed if a push was requested. + + See pull for details of valid destination formats. + + Returns 0 if there are outgoing changes, 1 otherwise. + """ + ret = hg.outgoing(ui, repo, dest, opts) + return ret + +def parents(ui, repo, file_=None, **opts): + """show the parents of the working directory or revision + + Print the working directory's parent revisions. If a revision is + given via -r/--rev, the parent of that revision will be printed. + If a file argument is given, the revision in which the file was + last changed (before the working directory revision or the + argument to --rev if given) is printed. + + Returns 0 on success. + """ + rev = opts.get('rev') + if rev: + ctx = repo[rev] + else: + ctx = repo[None] + + if file_: + m = cmdutil.match(repo, (file_,), opts) + if m.anypats() or len(m.files()) != 1: + raise util.Abort(_('can only specify an explicit filename')) + file_ = m.files()[0] + filenodes = [] + for cp in ctx.parents(): + if not cp: + continue + try: + filenodes.append(cp.filenode(file_)) + except error.LookupError: + pass + if not filenodes: + raise util.Abort(_("'%s' not found in manifest!") % file_) + fl = repo.file(file_) + p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes] + else: + p = [cp.node() for cp in ctx.parents()] + + displayer = cmdutil.show_changeset(ui, repo, opts) + for n in p: + if n != nullid: + displayer.show(repo[n]) + displayer.close() + +def paths(ui, repo, search=None): + """show aliases for remote repositories + + Show definition of symbolic path name NAME. If no name is given, + show definition of all available names. + + Path names are defined in the [paths] section of your + configuration file and in ``/etc/mercurial/hgrc``. If run inside a + repository, ``.hg/hgrc`` is used, too. + + The path names ``default`` and ``default-push`` have a special + meaning. When performing a push or pull operation, they are used + as fallbacks if no location is specified on the command-line. + When ``default-push`` is set, it will be used for push and + ``default`` will be used for pull; otherwise ``default`` is used + as the fallback for both. When cloning a repository, the clone + source is written as ``default`` in ``.hg/hgrc``. Note that + ``default`` and ``default-push`` apply to all inbound (e.g. + :hg:`incoming`) and outbound (e.g. :hg:`outgoing`, :hg:`email` and + :hg:`bundle`) operations. + + See :hg:`help urls` for more information. + + Returns 0 on success. + """ + if search: + for name, path in ui.configitems("paths"): + if name == search: + ui.write("%s\n" % url.hidepassword(path)) + return + ui.warn(_("not found!\n")) + return 1 + else: + for name, path in ui.configitems("paths"): + ui.write("%s = %s\n" % (name, url.hidepassword(path))) + +def postincoming(ui, repo, modheads, optupdate, checkout): + if modheads == 0: + return + if optupdate: + if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout: + return hg.update(repo, checkout) + else: + ui.status(_("not updating, since new heads added\n")) + if modheads > 1: + ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n")) + else: + ui.status(_("(run 'hg update' to get a working copy)\n")) + +def pull(ui, repo, source="default", **opts): + """pull changes from the specified source + + Pull changes from a remote repository to a local one. + + This finds all changes from the repository at the specified path + or URL and adds them to a local repository (the current one unless + -R is specified). By default, this does not update the copy of the + project in the working directory. + + Use :hg:`incoming` if you want to see what would have been added + by a pull at the time you issued this command. If you then decide + to add those changes to the repository, you should use :hg:`pull + -r X` where ``X`` is the last changeset listed by :hg:`incoming`. + + If SOURCE is omitted, the 'default' path will be used. + See :hg:`help urls` for more information. + + Returns 0 on success, 1 if an update had unresolved files. + """ + source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) + other = hg.repository(hg.remoteui(repo, opts), source) + ui.status(_('pulling from %s\n') % url.hidepassword(source)) + revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) + if revs: + try: + revs = [other.lookup(rev) for rev in revs] + except error.CapabilityError: + err = _("other repository doesn't support revision lookup, " + "so a rev cannot be specified.") + raise util.Abort(err) + + modheads = repo.pull(other, heads=revs, force=opts.get('force')) + if checkout: + checkout = str(repo.changelog.rev(other.lookup(checkout))) + repo._subtoppath = source + try: + return postincoming(ui, repo, modheads, opts.get('update'), checkout) + finally: + del repo._subtoppath + +def push(ui, repo, dest=None, **opts): + """push changes to the specified destination + + Push changesets from the local repository to the specified + destination. + + This operation is symmetrical to pull: it is identical to a pull + in the destination repository from the current one. + + By default, push will not allow creation of new heads at the + destination, since multiple heads would make it unclear which head + to use. In this situation, it is recommended to pull and merge + before pushing. + + Use --new-branch if you want to allow push to create a new named + branch that is not present at the destination. This allows you to + only create a new branch without forcing other changes. + + Use -f/--force to override the default behavior and push all + changesets on all branches. + + If -r/--rev is used, the specified revision and all its ancestors + will be pushed to the remote repository. + + Please see :hg:`help urls` for important details about ``ssh://`` + URLs. If DESTINATION is omitted, a default path will be used. + + Returns 0 if push was successful, 1 if nothing to push. + """ + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest, opts.get('branch')) + revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev')) + other = hg.repository(hg.remoteui(repo, opts), dest) + ui.status(_('pushing to %s\n') % url.hidepassword(dest)) + if revs: + revs = [repo.lookup(rev) for rev in revs] + + repo._subtoppath = dest + try: + # push subrepos depth-first for coherent ordering + c = repo[''] + subs = c.substate # only repos that are committed + for s in sorted(subs): + if not c.sub(s).push(opts.get('force')): + return False + finally: + del repo._subtoppath + r = repo.push(other, opts.get('force'), revs=revs, + newbranch=opts.get('new_branch')) + return r == 0 + +def recover(ui, repo): + """roll back an interrupted transaction + + Recover from an interrupted commit or pull. + + This command tries to fix the repository status after an + interrupted operation. It should only be necessary when Mercurial + suggests it. + + Returns 0 if successful, 1 if nothing to recover or verify fails. + """ + if repo.recover(): + return hg.verify(repo) + return 1 + +def remove(ui, repo, *pats, **opts): + """remove the specified files on the next commit + + Schedule the indicated files for removal from the repository. + + This only removes files from the current branch, not from the + entire project history. -A/--after can be used to remove only + files that have already been deleted, -f/--force can be used to + force deletion, and -Af can be used to remove files from the next + revision without deleting them from the working directory. + + The following table details the behavior of remove for different + file states (columns) and option combinations (rows). The file + states are Added [A], Clean [C], Modified [M] and Missing [!] (as + reported by :hg:`status`). The actions are Warn, Remove (from + branch) and Delete (from disk):: + + A C M ! + none W RD W R + -f R RD RD R + -A W W W R + -Af R R R R + + This command schedules the files to be removed at the next commit. + To undo a remove before that, see :hg:`revert`. + + Returns 0 on success, 1 if any warnings encountered. + """ + + ret = 0 + after, force = opts.get('after'), opts.get('force') + if not pats and not after: + raise util.Abort(_('no files specified')) + + m = cmdutil.match(repo, pats, opts) + s = repo.status(match=m, clean=True) + modified, added, deleted, clean = s[0], s[1], s[3], s[6] + + for f in m.files(): + if f not in repo.dirstate and not os.path.isdir(m.rel(f)): + ui.warn(_('not removing %s: file is untracked\n') % m.rel(f)) + ret = 1 + + if force: + remove, forget = modified + deleted + clean, added + elif after: + remove, forget = deleted, [] + for f in modified + added + clean: + ui.warn(_('not removing %s: file still exists (use -f' + ' to force removal)\n') % m.rel(f)) + ret = 1 + else: + remove, forget = deleted + clean, [] + for f in modified: + ui.warn(_('not removing %s: file is modified (use -f' + ' to force removal)\n') % m.rel(f)) + ret = 1 + for f in added: + ui.warn(_('not removing %s: file has been marked for add (use -f' + ' to force removal)\n') % m.rel(f)) + ret = 1 + + for f in sorted(remove + forget): + if ui.verbose or not m.exact(f): + ui.status(_('removing %s\n') % m.rel(f)) + + repo[None].forget(forget) + repo[None].remove(remove, unlink=not after) + return ret + +def rename(ui, repo, *pats, **opts): + """rename files; equivalent of copy + remove + + Mark dest as copies of sources; mark sources for deletion. If dest + is a directory, copies are put in that directory. If dest is a + file, there can only be one source. + + By default, this command copies the contents of files as they + exist in the working directory. If invoked with -A/--after, the + operation is recorded, but no copying is performed. + + This command takes effect at the next commit. To undo a rename + before that, see :hg:`revert`. + + Returns 0 on success, 1 if errors are encountered. + """ + wlock = repo.wlock(False) + try: + return cmdutil.copy(ui, repo, pats, opts, rename=True) + finally: + wlock.release() + +def resolve(ui, repo, *pats, **opts): + """redo merges or set/view the merge status of files + + Merges with unresolved conflicts are often the result of + non-interactive merging using the ``internal:merge`` configuration + setting, or a command-line merge tool like ``diff3``. The resolve + command is used to manage the files involved in a merge, after + :hg:`merge` has been run, and before :hg:`commit` is run (i.e. the + working directory must have two parents). + + The resolve command can be used in the following ways: + + - :hg:`resolve [--tool TOOL] FILE...`: attempt to re-merge the specified + files, discarding any previous merge attempts. Re-merging is not + performed for files already marked as resolved. Use ``--all/-a`` + to selects all unresolved files. ``--tool`` can be used to specify + the merge tool used for the given files. It overrides the HGMERGE + environment variable and your configuration files. + + - :hg:`resolve -m [FILE]`: mark a file as having been resolved + (e.g. after having manually fixed-up the files). The default is + to mark all unresolved files. + + - :hg:`resolve -u [FILE]...`: mark a file as unresolved. The + default is to mark all resolved files. + + - :hg:`resolve -l`: list files which had or still have conflicts. + In the printed list, ``U`` = unresolved and ``R`` = resolved. + + Note that Mercurial will not let you commit files with unresolved + merge conflicts. You must use :hg:`resolve -m ...` before you can + commit after a conflicting merge. + + Returns 0 on success, 1 if any files fail a resolve attempt. + """ + + all, mark, unmark, show, nostatus = \ + [opts.get(o) for o in 'all mark unmark list no_status'.split()] + + if (show and (mark or unmark)) or (mark and unmark): + raise util.Abort(_("too many options specified")) + if pats and all: + raise util.Abort(_("can't specify --all and patterns")) + if not (all or pats or show or mark or unmark): + raise util.Abort(_('no files or directories specified; ' + 'use --all to remerge all files')) + + ms = mergemod.mergestate(repo) + m = cmdutil.match(repo, pats, opts) + ret = 0 + + for f in ms: + if m(f): + if show: + if nostatus: + ui.write("%s\n" % f) + else: + ui.write("%s %s\n" % (ms[f].upper(), f), + label='resolve.' + + {'u': 'unresolved', 'r': 'resolved'}[ms[f]]) + elif mark: + ms.mark(f, "r") + elif unmark: + ms.mark(f, "u") + else: + wctx = repo[None] + mctx = wctx.parents()[-1] + + # backup pre-resolve (merge uses .orig for its own purposes) + a = repo.wjoin(f) + util.copyfile(a, a + ".resolve") + + try: + # resolve file + ui.setconfig('ui', 'forcemerge', opts.get('tool', '')) + if ms.resolve(f, wctx, mctx): + ret = 1 + finally: + ui.setconfig('ui', 'forcemerge', '') + + # replace filemerge's .orig file with our resolve file + util.rename(a + ".resolve", a + ".orig") + + ms.commit() + return ret + +def revert(ui, repo, *pats, **opts): + """restore individual files or directories to an earlier state + + .. note:: + This command is most likely not what you are looking for. + Revert will partially overwrite content in the working + directory without changing the working directory parents. Use + :hg:`update -r rev` to check out earlier revisions, or + :hg:`update --clean .` to undo a merge which has added another + parent. + + With no revision specified, revert the named files or directories + to the contents they had in the parent of the working directory. + This restores the contents of the affected files to an unmodified + state and unschedules adds, removes, copies, and renames. If the + working directory has two parents, you must explicitly specify a + revision. + + Using the -r/--rev option, revert the given files or directories + to their contents as of a specific revision. This can be helpful + to "roll back" some or all of an earlier change. See :hg:`help + dates` for a list of formats valid for -d/--date. + + Revert modifies the working directory. It does not commit any + changes, or change the parent of the working directory. If you + revert to a revision other than the parent of the working + directory, the reverted files will thus appear modified + afterwards. + + If a file has been deleted, it is restored. If the executable mode + of a file was changed, it is reset. + + If names are given, all files matching the names are reverted. + If no arguments are given, no files are reverted. + + Modified files are saved with a .orig suffix before reverting. + To disable these backups, use --no-backup. + + Returns 0 on success. + """ + + if opts.get("date"): + if opts.get("rev"): + raise util.Abort(_("you can't specify a revision and a date")) + opts["rev"] = cmdutil.finddate(ui, repo, opts["date"]) + + if not pats and not opts.get('all'): + raise util.Abort(_('no files or directories specified; ' + 'use --all to revert the whole repo')) + + parent, p2 = repo.dirstate.parents() + if not opts.get('rev') and p2 != nullid: + raise util.Abort(_('uncommitted merge - please provide a ' + 'specific revision')) + ctx = repo[opts.get('rev')] + node = ctx.node() + mf = ctx.manifest() + if node == parent: + pmf = mf + else: + pmf = None + + # need all matching names in dirstate and manifest of target rev, + # so have to walk both. do not print errors if files exist in one + # but not other. + + names = {} + + wlock = repo.wlock() + try: + # walk dirstate. + + m = cmdutil.match(repo, pats, opts) + m.bad = lambda x, y: False + for abs in repo.walk(m): + names[abs] = m.rel(abs), m.exact(abs) + + # walk target manifest. + + def badfn(path, msg): + if path in names: + return + path_ = path + '/' + for f in names: + if f.startswith(path_): + return + ui.warn("%s: %s\n" % (m.rel(path), msg)) + + m = cmdutil.match(repo, pats, opts) + m.bad = badfn + for abs in repo[node].walk(m): + if abs not in names: + names[abs] = m.rel(abs), m.exact(abs) + + m = cmdutil.matchfiles(repo, names) + changes = repo.status(match=m)[:4] + modified, added, removed, deleted = map(set, changes) + + # if f is a rename, also revert the source + cwd = repo.getcwd() + for f in added: + src = repo.dirstate.copied(f) + if src and src not in names and repo.dirstate[src] == 'r': + removed.add(src) + names[src] = (repo.pathto(src, cwd), True) + + def removeforget(abs): + if repo.dirstate[abs] == 'a': + return _('forgetting %s\n') + return _('removing %s\n') + + revert = ([], _('reverting %s\n')) + add = ([], _('adding %s\n')) + remove = ([], removeforget) + undelete = ([], _('undeleting %s\n')) + + disptable = ( + # dispatch table: + # file state + # action if in target manifest + # action if not in target manifest + # make backup if in target manifest + # make backup if not in target manifest + (modified, revert, remove, True, True), + (added, revert, remove, True, False), + (removed, undelete, None, False, False), + (deleted, revert, remove, False, False), + ) + + for abs, (rel, exact) in sorted(names.items()): + mfentry = mf.get(abs) + target = repo.wjoin(abs) + def handle(xlist, dobackup): + xlist[0].append(abs) + if (dobackup and not opts.get('no_backup') and + os.path.lexists(target)): + bakname = "%s.orig" % rel + ui.note(_('saving current version of %s as %s\n') % + (rel, bakname)) + if not opts.get('dry_run'): + util.rename(target, bakname) + if ui.verbose or not exact: + msg = xlist[1] + if not isinstance(msg, basestring): + msg = msg(abs) + ui.status(msg % rel) + for table, hitlist, misslist, backuphit, backupmiss in disptable: + if abs not in table: + continue + # file has changed in dirstate + if mfentry: + handle(hitlist, backuphit) + elif misslist is not None: + handle(misslist, backupmiss) + break + else: + if abs not in repo.dirstate: + if mfentry: + handle(add, True) + elif exact: + ui.warn(_('file not managed: %s\n') % rel) + continue + # file has not changed in dirstate + if node == parent: + if exact: + ui.warn(_('no changes needed to %s\n') % rel) + continue + if pmf is None: + # only need parent manifest in this unlikely case, + # so do not read by default + pmf = repo[parent].manifest() + if abs in pmf: + if mfentry: + # if version of file is same in parent and target + # manifests, do nothing + if (pmf[abs] != mfentry or + pmf.flags(abs) != mf.flags(abs)): + handle(revert, False) + else: + handle(remove, False) + + if not opts.get('dry_run'): + def checkout(f): + fc = ctx[f] + repo.wwrite(f, fc.data(), fc.flags()) + + audit_path = util.path_auditor(repo.root) + for f in remove[0]: + if repo.dirstate[f] == 'a': + repo.dirstate.forget(f) + continue + audit_path(f) + try: + util.unlink(repo.wjoin(f)) + except OSError: + pass + repo.dirstate.remove(f) + + normal = None + if node == parent: + # We're reverting to our parent. If possible, we'd like status + # to report the file as clean. We have to use normallookup for + # merges to avoid losing information about merged/dirty files. + if p2 != nullid: + normal = repo.dirstate.normallookup + else: + normal = repo.dirstate.normal + for f in revert[0]: + checkout(f) + if normal: + normal(f) + + for f in add[0]: + checkout(f) + repo.dirstate.add(f) + + normal = repo.dirstate.normallookup + if node == parent and p2 == nullid: + normal = repo.dirstate.normal + for f in undelete[0]: + checkout(f) + normal(f) + + finally: + wlock.release() + +def rollback(ui, repo, **opts): + """roll back the last transaction (dangerous) + + This command should be used with care. There is only one level of + rollback, and there is no way to undo a rollback. It will also + restore the dirstate at the time of the last transaction, losing + any dirstate changes since that time. This command does not alter + the working directory. + + Transactions are used to encapsulate the effects of all commands + that create new changesets or propagate existing changesets into a + repository. For example, the following commands are transactional, + and their effects can be rolled back: + + - commit + - import + - pull + - push (with this repository as the destination) + - unbundle + + This command is not intended for use on public repositories. Once + changes are visible for pull by other users, rolling a transaction + back locally is ineffective (someone else may already have pulled + the changes). Furthermore, a race is possible with readers of the + repository; for example an in-progress pull from the repository + may fail if a rollback is performed. + + Returns 0 on success, 1 if no rollback data is available. + """ + return repo.rollback(opts.get('dry_run')) + +def root(ui, repo): + """print the root (top) of the current working directory + + Print the root directory of the current repository. + + Returns 0 on success. + """ + ui.write(repo.root + "\n") + +def serve(ui, repo, **opts): + """start stand-alone webserver + + Start a local HTTP repository browser and pull server. You can use + this for ad-hoc sharing and browsing of repositories. It is + recommended to use a real web server to serve a repository for + longer periods of time. + + Please note that the server does not implement access control. + This means that, by default, anybody can read from the server and + nobody can write to it by default. Set the ``web.allow_push`` + option to ``*`` to allow everybody to push to the server. You + should use a real web server if you need to authenticate users. + + By default, the server logs accesses to stdout and errors to + stderr. Use the -A/--accesslog and -E/--errorlog options to log to + files. + + To have the server choose a free port number to listen on, specify + a port number of 0; in this case, the server will print the port + number it uses. + + Returns 0 on success. + """ + + if opts["stdio"]: + if repo is None: + raise error.RepoError(_("There is no Mercurial repository here" + " (.hg not found)")) + s = sshserver.sshserver(ui, repo) + s.serve_forever() + + # this way we can check if something was given in the command-line + if opts.get('port'): + opts['port'] = util.getport(opts.get('port')) + + baseui = repo and repo.baseui or ui + optlist = ("name templates style address port prefix ipv6" + " accesslog errorlog certificate encoding") + for o in optlist.split(): + val = opts.get(o, '') + if val in (None, ''): # should check against default options instead + continue + baseui.setconfig("web", o, val) + if repo and repo.ui != baseui: + repo.ui.setconfig("web", o, val) + + o = opts.get('web_conf') or opts.get('webdir_conf') + if not o: + if not repo: + raise error.RepoError(_("There is no Mercurial repository" + " here (.hg not found)")) + o = repo.root + + app = hgweb.hgweb(o, baseui=ui) + + class service(object): + def init(self): + util.set_signal_handler() + self.httpd = hgweb.server.create_server(ui, app) + + if opts['port'] and not ui.verbose: + return + + if self.httpd.prefix: + prefix = self.httpd.prefix.strip('/') + '/' + else: + prefix = '' + + port = ':%d' % self.httpd.port + if port == ':80': + port = '' + + bindaddr = self.httpd.addr + if bindaddr == '0.0.0.0': + bindaddr = '*' + elif ':' in bindaddr: # IPv6 + bindaddr = '[%s]' % bindaddr + + fqaddr = self.httpd.fqaddr + if ':' in fqaddr: + fqaddr = '[%s]' % fqaddr + if opts['port']: + write = ui.status + else: + write = ui.write + write(_('listening at http://%s%s/%s (bound to %s:%d)\n') % + (fqaddr, port, prefix, bindaddr, self.httpd.port)) + + def run(self): + self.httpd.serve_forever() + + service = service() + + cmdutil.service(opts, initfn=service.init, runfn=service.run) + +def status(ui, repo, *pats, **opts): + """show changed files in the working directory + + Show status of files in the repository. If names are given, only + files that match are shown. Files that are clean or ignored or + the source of a copy/move operation, are not listed unless + -c/--clean, -i/--ignored, -C/--copies or -A/--all are given. + Unless options described with "show only ..." are given, the + options -mardu are used. + + Option -q/--quiet hides untracked (unknown and ignored) files + unless explicitly requested with -u/--unknown or -i/--ignored. + + .. note:: + status may appear to disagree with diff if permissions have + changed or a merge has occurred. The standard diff format does + not report permission changes and diff only reports changes + relative to one merge parent. + + If one revision is given, it is used as the base revision. + If two revisions are given, the differences between them are + shown. The --change option can also be used as a shortcut to list + the changed files of a revision from its first parent. + + The codes used to show the status of files are:: + + M = modified + A = added + R = removed + C = clean + ! = missing (deleted by non-hg command, but still tracked) + ? = not tracked + I = ignored + = origin of the previous file listed as A (added) + + Returns 0 on success. + """ + + revs = opts.get('rev') + change = opts.get('change') + + if revs and change: + msg = _('cannot specify --rev and --change at the same time') + raise util.Abort(msg) + elif change: + node2 = repo.lookup(change) + node1 = repo[node2].parents()[0].node() + else: + node1, node2 = cmdutil.revpair(repo, revs) + + cwd = (pats and repo.getcwd()) or '' + end = opts.get('print0') and '\0' or '\n' + copy = {} + states = 'modified added removed deleted unknown ignored clean'.split() + show = [k for k in states if opts.get(k)] + if opts.get('all'): + show += ui.quiet and (states[:4] + ['clean']) or states + if not show: + show = ui.quiet and states[:4] or states[:5] + + stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts), + 'ignored' in show, 'clean' in show, 'unknown' in show, + opts.get('subrepos')) + changestates = zip(states, 'MAR!?IC', stat) + + if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'): + ctxn = repo[nullid] + ctx1 = repo[node1] + ctx2 = repo[node2] + added = stat[1] + if node2 is None: + added = stat[0] + stat[1] # merged? + + for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems(): + if k in added: + copy[k] = v + elif v in added: + copy[v] = k + + for state, char, files in changestates: + if state in show: + format = "%s %%s%s" % (char, end) + if opts.get('no_status'): + format = "%%s%s" % end + + for f in files: + ui.write(format % repo.pathto(f, cwd), + label='status.' + state) + if f in copy: + ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end), + label='status.copied') + +def summary(ui, repo, **opts): + """summarize working directory state + + This generates a brief summary of the working directory state, + including parents, branch, commit status, and available updates. + + With the --remote option, this will check the default paths for + incoming and outgoing changes. This can be time-consuming. + + Returns 0 on success. + """ + + ctx = repo[None] + parents = ctx.parents() + pnode = parents[0].node() + + for p in parents: + # label with log.changeset (instead of log.parent) since this + # shows a working directory parent *changeset*: + ui.write(_('parent: %d:%s ') % (p.rev(), str(p)), + label='log.changeset') + ui.write(' '.join(p.tags()), label='log.tag') + if p.rev() == -1: + if not len(repo): + ui.write(_(' (empty repository)')) + else: + ui.write(_(' (no revision checked out)')) + ui.write('\n') + if p.description(): + ui.status(' ' + p.description().splitlines()[0].strip() + '\n', + label='log.summary') + + branch = ctx.branch() + bheads = repo.branchheads(branch) + m = _('branch: %s\n') % branch + if branch != 'default': + ui.write(m, label='log.branch') + else: + ui.status(m, label='log.branch') + + st = list(repo.status(unknown=True))[:6] + + c = repo.dirstate.copies() + copied, renamed = [], [] + for d, s in c.iteritems(): + if s in st[2]: + st[2].remove(s) + renamed.append(d) + else: + copied.append(d) + if d in st[1]: + st[1].remove(d) + st.insert(3, renamed) + st.insert(4, copied) + + ms = mergemod.mergestate(repo) + st.append([f for f in ms if ms[f] == 'u']) + + subs = [s for s in ctx.substate if ctx.sub(s).dirty()] + st.append(subs) + + labels = [ui.label(_('%d modified'), 'status.modified'), + ui.label(_('%d added'), 'status.added'), + ui.label(_('%d removed'), 'status.removed'), + ui.label(_('%d renamed'), 'status.copied'), + ui.label(_('%d copied'), 'status.copied'), + ui.label(_('%d deleted'), 'status.deleted'), + ui.label(_('%d unknown'), 'status.unknown'), + ui.label(_('%d ignored'), 'status.ignored'), + ui.label(_('%d unresolved'), 'resolve.unresolved'), + ui.label(_('%d subrepos'), 'status.modified')] + t = [] + for s, l in zip(st, labels): + if s: + t.append(l % len(s)) + + t = ', '.join(t) + cleanworkdir = False + + if len(parents) > 1: + t += _(' (merge)') + elif branch != parents[0].branch(): + t += _(' (new branch)') + elif (parents[0].extra().get('close') and + pnode in repo.branchheads(branch, closed=True)): + t += _(' (head closed)') + elif not (st[0] or st[1] or st[2] or st[3] or st[4] or st[9]): + t += _(' (clean)') + cleanworkdir = True + elif pnode not in bheads: + t += _(' (new branch head)') + + if cleanworkdir: + ui.status(_('commit: %s\n') % t.strip()) + else: + ui.write(_('commit: %s\n') % t.strip()) + + # all ancestors of branch heads - all ancestors of parent = new csets + new = [0] * len(repo) + cl = repo.changelog + for a in [cl.rev(n) for n in bheads]: + new[a] = 1 + for a in cl.ancestors(*[cl.rev(n) for n in bheads]): + new[a] = 1 + for a in [p.rev() for p in parents]: + if a >= 0: + new[a] = 0 + for a in cl.ancestors(*[p.rev() for p in parents]): + new[a] = 0 + new = sum(new) + + if new == 0: + ui.status(_('update: (current)\n')) + elif pnode not in bheads: + ui.write(_('update: %d new changesets (update)\n') % new) + else: + ui.write(_('update: %d new changesets, %d branch heads (merge)\n') % + (new, len(bheads))) + + if opts.get('remote'): + t = [] + source, branches = hg.parseurl(ui.expandpath('default')) + other = hg.repository(hg.remoteui(repo, {}), source) + revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) + ui.debug('comparing with %s\n' % url.hidepassword(source)) + repo.ui.pushbuffer() + common, incoming, rheads = discovery.findcommonincoming(repo, other) + repo.ui.popbuffer() + if incoming: + t.append(_('1 or more incoming')) + + dest, branches = hg.parseurl(ui.expandpath('default-push', 'default')) + revs, checkout = hg.addbranchrevs(repo, repo, branches, None) + other = hg.repository(hg.remoteui(repo, {}), dest) + ui.debug('comparing with %s\n' % url.hidepassword(dest)) + repo.ui.pushbuffer() + o = discovery.findoutgoing(repo, other) + repo.ui.popbuffer() + o = repo.changelog.nodesbetween(o, None)[0] + if o: + t.append(_('%d outgoing') % len(o)) + + if t: + ui.write(_('remote: %s\n') % (', '.join(t))) + else: + ui.status(_('remote: (synced)\n')) + +def tag(ui, repo, name1, *names, **opts): + """add one or more tags for the current or given revision + + Name a particular revision using <name>. + + Tags are used to name particular revisions of the repository and are + very useful to compare different revisions, to go back to significant + earlier versions or to mark branch points as releases, etc. Changing + an existing tag is normally disallowed; use -f/--force to override. + + If no revision is given, the parent of the working directory is + used, or tip if no revision is checked out. + + To facilitate version control, distribution, and merging of tags, + they are stored as a file named ".hgtags" which is managed similarly + to other project files and can be hand-edited if necessary. This + also means that tagging creates a new commit. The file + ".hg/localtags" is used for local tags (not shared among + repositories). + + Tag commits are usually made at the head of a branch. If the parent + of the working directory is not a branch head, :hg:`tag` aborts; use + -f/--force to force the tag commit to be based on a non-head + changeset. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Since tag names have priority over branch names during revision + lookup, using an existing branch name as a tag name is discouraged. + + Returns 0 on success. + """ + + rev_ = "." + names = [t.strip() for t in (name1,) + names] + if len(names) != len(set(names)): + raise util.Abort(_('tag names must be unique')) + for n in names: + if n in ['tip', '.', 'null']: + raise util.Abort(_('the name \'%s\' is reserved') % n) + if not n: + raise util.Abort(_('tag names cannot consist entirely of whitespace')) + if opts.get('rev') and opts.get('remove'): + raise util.Abort(_("--rev and --remove are incompatible")) + if opts.get('rev'): + rev_ = opts['rev'] + message = opts.get('message') + if opts.get('remove'): + expectedtype = opts.get('local') and 'local' or 'global' + for n in names: + if not repo.tagtype(n): + raise util.Abort(_('tag \'%s\' does not exist') % n) + if repo.tagtype(n) != expectedtype: + if expectedtype == 'global': + raise util.Abort(_('tag \'%s\' is not a global tag') % n) + else: + raise util.Abort(_('tag \'%s\' is not a local tag') % n) + rev_ = nullid + if not message: + # we don't translate commit messages + message = 'Removed tag %s' % ', '.join(names) + elif not opts.get('force'): + for n in names: + if n in repo.tags(): + raise util.Abort(_('tag \'%s\' already exists ' + '(use -f to force)') % n) + if not opts.get('local'): + p1, p2 = repo.dirstate.parents() + if p2 != nullid: + raise util.Abort(_('uncommitted merge')) + bheads = repo.branchheads() + if not opts.get('force') and bheads and p1 not in bheads: + raise util.Abort(_('not at a branch head (use -f to force)')) + r = repo[rev_].node() + + if not message: + # we don't translate commit messages + message = ('Added tag %s for changeset %s' % + (', '.join(names), short(r))) + + date = opts.get('date') + if date: + date = util.parsedate(date) + + if opts.get('edit'): + message = ui.edit(message, ui.username()) + + repo.tag(names, r, message, opts.get('local'), opts.get('user'), date) + +def tags(ui, repo): + """list repository tags + + This lists both regular and local tags. When the -v/--verbose + switch is used, a third column "local" is printed for local tags. + + Returns 0 on success. + """ + + hexfunc = ui.debugflag and hex or short + tagtype = "" + + for t, n in reversed(repo.tagslist()): + if ui.quiet: + ui.write("%s\n" % t) + continue + + try: + hn = hexfunc(n) + r = "%5d:%s" % (repo.changelog.rev(n), hn) + except error.LookupError: + r = " ?:%s" % hn + else: + spaces = " " * (30 - encoding.colwidth(t)) + if ui.verbose: + if repo.tagtype(t) == 'local': + tagtype = " local" + else: + tagtype = "" + ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype)) + +def tip(ui, repo, **opts): + """show the tip revision + + The tip revision (usually just called the tip) is the changeset + most recently added to the repository (and therefore the most + recently changed head). + + If you have just made a commit, that commit will be the tip. If + you have just pulled changes from another repository, the tip of + that repository becomes the current tip. The "tip" tag is special + and cannot be renamed or assigned to a different changeset. + + Returns 0 on success. + """ + displayer = cmdutil.show_changeset(ui, repo, opts) + displayer.show(repo[len(repo) - 1]) + displayer.close() + +def unbundle(ui, repo, fname1, *fnames, **opts): + """apply one or more changegroup files + + Apply one or more compressed changegroup files generated by the + bundle command. + + Returns 0 on success, 1 if an update has unresolved files. + """ + fnames = (fname1,) + fnames + + lock = repo.lock() + try: + for fname in fnames: + f = url.open(ui, fname) + gen = changegroup.readbundle(f, fname) + modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname, + lock=lock) + finally: + lock.release() + + return postincoming(ui, repo, modheads, opts.get('update'), None) + +def update(ui, repo, node=None, rev=None, clean=False, date=None, check=False): + """update working directory (or switch revisions) + + Update the repository's working directory to the specified + changeset. If no changeset is specified, update to the tip of the + current named branch. + + If the changeset is not a descendant of the working directory's + parent, the update is aborted. With the -c/--check option, the + working directory is checked for uncommitted changes; if none are + found, the working directory is updated to the specified + changeset. + + The following rules apply when the working directory contains + uncommitted changes: + + 1. If neither -c/--check nor -C/--clean is specified, and if + the requested changeset is an ancestor or descendant of + the working directory's parent, the uncommitted changes + are merged into the requested changeset and the merged + result is left uncommitted. If the requested changeset is + not an ancestor or descendant (that is, it is on another + branch), the update is aborted and the uncommitted changes + are preserved. + + 2. With the -c/--check option, the update is aborted and the + uncommitted changes are preserved. + + 3. With the -C/--clean option, uncommitted changes are discarded and + the working directory is updated to the requested changeset. + + Use null as the changeset to remove the working directory (like + :hg:`clone -U`). + + If you want to update just one file to an older changeset, use + :hg:`revert`. + + See :hg:`help dates` for a list of formats valid for -d/--date. + + Returns 0 on success, 1 if there are unresolved files. + """ + if rev and node: + raise util.Abort(_("please specify just one revision")) + + if not rev: + rev = node + + rev = cmdutil.revsingle(repo, rev, rev).rev() + + if check and clean: + raise util.Abort(_("cannot specify both -c/--check and -C/--clean")) + + if check: + # we could use dirty() but we can ignore merge and branch trivia + c = repo[None] + if c.modified() or c.added() or c.removed(): + raise util.Abort(_("uncommitted local changes")) + + if date: + if rev: + raise util.Abort(_("you can't specify a revision and a date")) + rev = cmdutil.finddate(ui, repo, date) + + if clean or check: + return hg.clean(repo, rev) + else: + return hg.update(repo, rev) + +def verify(ui, repo): + """verify the integrity of the repository + + Verify the integrity of the current repository. + + This will perform an extensive check of the repository's + integrity, validating the hashes and checksums of each entry in + the changelog, manifest, and tracked files, as well as the + integrity of their crosslinks and indices. + + Returns 0 on success, 1 if errors are encountered. + """ + return hg.verify(repo) + +def version_(ui): + """output version and copyright information""" + ui.write(_("Mercurial Distributed SCM (version %s)\n") + % util.version()) + ui.status(_( + "(see http://mercurial.selenic.com for more information)\n" + "\nCopyright (C) 2005-2010 Matt Mackall and others\n" + "This is free software; see the source for copying conditions. " + "There is NO\nwarranty; " + "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n" + )) + +# Command options and aliases are listed here, alphabetically + +globalopts = [ + ('R', 'repository', '', + _('repository root directory or name of overlay bundle file'), + _('REPO')), + ('', 'cwd', '', + _('change working directory'), _('DIR')), + ('y', 'noninteractive', None, + _('do not prompt, assume \'yes\' for any required answers')), + ('q', 'quiet', None, _('suppress output')), + ('v', 'verbose', None, _('enable additional output')), + ('', 'config', [], + _('set/override config option (use \'section.name=value\')'), + _('CONFIG')), + ('', 'debug', None, _('enable debugging output')), + ('', 'debugger', None, _('start debugger')), + ('', 'encoding', encoding.encoding, _('set the charset encoding'), + _('ENCODE')), + ('', 'encodingmode', encoding.encodingmode, + _('set the charset encoding mode'), _('MODE')), + ('', 'traceback', None, _('always print a traceback on exception')), + ('', 'time', None, _('time how long the command takes')), + ('', 'profile', None, _('print command execution profile')), + ('', 'version', None, _('output version information and exit')), + ('h', 'help', None, _('display help and exit')), +] + +dryrunopts = [('n', 'dry-run', None, + _('do not perform actions, just print output'))] + +remoteopts = [ + ('e', 'ssh', '', + _('specify ssh command to use'), _('CMD')), + ('', 'remotecmd', '', + _('specify hg command to run on the remote side'), _('CMD')), +] + +walkopts = [ + ('I', 'include', [], + _('include names matching the given patterns'), _('PATTERN')), + ('X', 'exclude', [], + _('exclude names matching the given patterns'), _('PATTERN')), +] + +commitopts = [ + ('m', 'message', '', + _('use text as commit message'), _('TEXT')), + ('l', 'logfile', '', + _('read commit message from file'), _('FILE')), +] + +commitopts2 = [ + ('d', 'date', '', + _('record datecode as commit date'), _('DATE')), + ('u', 'user', '', + _('record the specified user as committer'), _('USER')), +] + +templateopts = [ + ('', 'style', '', + _('display using template map file'), _('STYLE')), + ('', 'template', '', + _('display with template'), _('TEMPLATE')), +] + +logopts = [ + ('p', 'patch', None, _('show patch')), + ('g', 'git', None, _('use git extended diff format')), + ('l', 'limit', '', + _('limit number of changes displayed'), _('NUM')), + ('M', 'no-merges', None, _('do not show merges')), + ('', 'stat', None, _('output diffstat-style summary of changes')), +] + templateopts + +diffopts = [ + ('a', 'text', None, _('treat all files as text')), + ('g', 'git', None, _('use git extended diff format')), + ('', 'nodates', None, _('omit dates from diff headers')) +] + +diffopts2 = [ + ('p', 'show-function', None, _('show which function each change is in')), + ('', 'reverse', None, _('produce a diff that undoes the changes')), + ('w', 'ignore-all-space', None, + _('ignore white space when comparing lines')), + ('b', 'ignore-space-change', None, + _('ignore changes in the amount of white space')), + ('B', 'ignore-blank-lines', None, + _('ignore changes whose lines are all blank')), + ('U', 'unified', '', + _('number of lines of context to show'), _('NUM')), + ('', 'stat', None, _('output diffstat-style summary of changes')), +] + +similarityopts = [ + ('s', 'similarity', '', + _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY')) +] + +subrepoopts = [ + ('S', 'subrepos', None, + _('recurse into subrepositories')) +] + +table = { + "^add": (add, walkopts + subrepoopts + dryrunopts, + _('[OPTION]... [FILE]...')), + "addremove": + (addremove, similarityopts + walkopts + dryrunopts, + _('[OPTION]... [FILE]...')), + "^annotate|blame": + (annotate, + [('r', 'rev', '', + _('annotate the specified revision'), _('REV')), + ('', 'follow', None, + _('follow copies/renames and list the filename (DEPRECATED)')), + ('', 'no-follow', None, _("don't follow copies and renames")), + ('a', 'text', None, _('treat all files as text')), + ('u', 'user', None, _('list the author (long with -v)')), + ('f', 'file', None, _('list the filename')), + ('d', 'date', None, _('list the date (short with -q)')), + ('n', 'number', None, _('list the revision number (default)')), + ('c', 'changeset', None, _('list the changeset')), + ('l', 'line-number', None, + _('show line number at the first appearance')) + ] + walkopts, + _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')), + "archive": + (archive, + [('', 'no-decode', None, _('do not pass files through decoders')), + ('p', 'prefix', '', + _('directory prefix for files in archive'), _('PREFIX')), + ('r', 'rev', '', + _('revision to distribute'), _('REV')), + ('t', 'type', '', + _('type of distribution to create'), _('TYPE')), + ] + subrepoopts + walkopts, + _('[OPTION]... DEST')), + "backout": + (backout, + [('', 'merge', None, + _('merge with old dirstate parent after backout')), + ('', 'parent', '', + _('parent to choose when backing out merge'), _('REV')), + ('t', 'tool', '', + _('specify merge tool')), + ('r', 'rev', '', + _('revision to backout'), _('REV')), + ] + walkopts + commitopts + commitopts2, + _('[OPTION]... [-r] REV')), + "bisect": + (bisect, + [('r', 'reset', False, _('reset bisect state')), + ('g', 'good', False, _('mark changeset good')), + ('b', 'bad', False, _('mark changeset bad')), + ('s', 'skip', False, _('skip testing changeset')), + ('c', 'command', '', + _('use command to check changeset state'), _('CMD')), + ('U', 'noupdate', False, _('do not update to target'))], + _("[-gbsr] [-U] [-c CMD] [REV]")), + "branch": + (branch, + [('f', 'force', None, + _('set branch name even if it shadows an existing branch')), + ('C', 'clean', None, _('reset branch name to parent branch name'))], + _('[-fC] [NAME]')), + "branches": + (branches, + [('a', 'active', False, + _('show only branches that have unmerged heads')), + ('c', 'closed', False, + _('show normal and closed branches'))], + _('[-ac]')), + "bundle": + (bundle, + [('f', 'force', None, + _('run even when the destination is unrelated')), + ('r', 'rev', [], + _('a changeset intended to be added to the destination'), + _('REV')), + ('b', 'branch', [], + _('a specific branch you would like to bundle'), + _('BRANCH')), + ('', 'base', [], + _('a base changeset assumed to be available at the destination'), + _('REV')), + ('a', 'all', None, _('bundle all changesets in the repository')), + ('t', 'type', 'bzip2', + _('bundle compression type to use'), _('TYPE')), + ] + remoteopts, + _('[-f] [-t TYPE] [-a] [-r REV]... [--base REV]... FILE [DEST]')), + "cat": + (cat, + [('o', 'output', '', + _('print output to file with formatted name'), _('FORMAT')), + ('r', 'rev', '', + _('print the given revision'), _('REV')), + ('', 'decode', None, _('apply any matching decode filter')), + ] + walkopts, + _('[OPTION]... FILE...')), + "^clone": + (clone, + [('U', 'noupdate', None, + _('the clone will include an empty working copy (only a repository)')), + ('u', 'updaterev', '', + _('revision, tag or branch to check out'), _('REV')), + ('r', 'rev', [], + _('include the specified changeset'), _('REV')), + ('b', 'branch', [], + _('clone only the specified branch'), _('BRANCH')), + ('', 'pull', None, _('use pull protocol to copy metadata')), + ('', 'uncompressed', None, + _('use uncompressed transfer (fast over LAN)')), + ] + remoteopts, + _('[OPTION]... SOURCE [DEST]')), + "^commit|ci": + (commit, + [('A', 'addremove', None, + _('mark new/missing files as added/removed before committing')), + ('', 'close-branch', None, + _('mark a branch as closed, hiding it from the branch list')), + ] + walkopts + commitopts + commitopts2, + _('[OPTION]... [FILE]...')), + "copy|cp": + (copy, + [('A', 'after', None, _('record a copy that has already occurred')), + ('f', 'force', None, + _('forcibly copy over an existing managed file')), + ] + walkopts + dryrunopts, + _('[OPTION]... [SOURCE]... DEST')), + "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')), + "debugbuilddag": + (debugbuilddag, + [('m', 'mergeable-file', None, _('add single file mergeable changes')), + ('a', 'appended-file', None, _('add single file all revs append to')), + ('o', 'overwritten-file', None, _('add single file all revs overwrite')), + ('n', 'new-file', None, _('add new file at each rev')), + ], + _('[OPTION]... TEXT')), + "debugcheckstate": (debugcheckstate, [], ''), + "debugcommands": (debugcommands, [], _('[COMMAND]')), + "debugcomplete": + (debugcomplete, + [('o', 'options', None, _('show the command options'))], + _('[-o] CMD')), + "debugdag": + (debugdag, + [('t', 'tags', None, _('use tags as labels')), + ('b', 'branches', None, _('annotate with branch names')), + ('', 'dots', None, _('use dots for runs')), + ('s', 'spaces', None, _('separate elements by spaces')), + ], + _('[OPTION]... [FILE [REV]...]')), + "debugdate": + (debugdate, + [('e', 'extended', None, _('try extended date formats'))], + _('[-e] DATE [RANGE]')), + "debugdata": (debugdata, [], _('FILE REV')), + "debugfsinfo": (debugfsinfo, [], _('[PATH]')), + "debugindex": (debugindex, + [('f', 'format', 0, _('revlog format'), _('FORMAT'))], + _('FILE')), + "debugindexdot": (debugindexdot, [], _('FILE')), + "debuginstall": (debuginstall, [], ''), + "debugpushkey": (debugpushkey, [], _('REPO NAMESPACE [KEY OLD NEW]')), + "debugrebuildstate": + (debugrebuildstate, + [('r', 'rev', '', + _('revision to rebuild to'), _('REV'))], + _('[-r REV] [REV]')), + "debugrename": + (debugrename, + [('r', 'rev', '', + _('revision to debug'), _('REV'))], + _('[-r REV] FILE')), + "debugrevspec": + (debugrevspec, [], ('REVSPEC')), + "debugsetparents": + (debugsetparents, [], _('REV1 [REV2]')), + "debugstate": + (debugstate, + [('', 'nodates', None, _('do not display the saved mtime'))], + _('[OPTION]...')), + "debugsub": + (debugsub, + [('r', 'rev', '', + _('revision to check'), _('REV'))], + _('[-r REV] [REV]')), + "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')), + "^diff": + (diff, + [('r', 'rev', [], + _('revision'), _('REV')), + ('c', 'change', '', + _('change made by revision'), _('REV')) + ] + diffopts + diffopts2 + walkopts + subrepoopts, + _('[OPTION]... ([-c REV] | [-r REV1 [-r REV2]]) [FILE]...')), + "^export": + (export, + [('o', 'output', '', + _('print output to file with formatted name'), _('FORMAT')), + ('', 'switch-parent', None, _('diff against the second parent')), + ('r', 'rev', [], + _('revisions to export'), _('REV')), + ] + diffopts, + _('[OPTION]... [-o OUTFILESPEC] REV...')), + "^forget": + (forget, + [] + walkopts, + _('[OPTION]... FILE...')), + "grep": + (grep, + [('0', 'print0', None, _('end fields with NUL')), + ('', 'all', None, _('print all revisions that match')), + ('f', 'follow', None, + _('follow changeset history,' + ' or file history across copies and renames')), + ('i', 'ignore-case', None, _('ignore case when matching')), + ('l', 'files-with-matches', None, + _('print only filenames and revisions that match')), + ('n', 'line-number', None, _('print matching line numbers')), + ('r', 'rev', [], + _('only search files changed within revision range'), _('REV')), + ('u', 'user', None, _('list the author (long with -v)')), + ('d', 'date', None, _('list the date (short with -q)')), + ] + walkopts, + _('[OPTION]... PATTERN [FILE]...')), + "heads": + (heads, + [('r', 'rev', '', + _('show only heads which are descendants of STARTREV'), + _('STARTREV')), + ('t', 'topo', False, _('show topological heads only')), + ('a', 'active', False, + _('show active branchheads only (DEPRECATED)')), + ('c', 'closed', False, + _('show normal and closed branch heads')), + ] + templateopts, + _('[-ac] [-r STARTREV] [REV]...')), + "help": (help_, [], _('[TOPIC]')), + "identify|id": + (identify, + [('r', 'rev', '', + _('identify the specified revision'), _('REV')), + ('n', 'num', None, _('show local revision number')), + ('i', 'id', None, _('show global revision id')), + ('b', 'branch', None, _('show branch')), + ('t', 'tags', None, _('show tags'))], + _('[-nibt] [-r REV] [SOURCE]')), + "import|patch": + (import_, + [('p', 'strip', 1, + _('directory strip option for patch. This has the same ' + 'meaning as the corresponding patch option'), + _('NUM')), + ('b', 'base', '', + _('base path'), _('PATH')), + ('f', 'force', None, + _('skip check for outstanding uncommitted changes')), + ('', 'no-commit', None, + _("don't commit, just update the working directory")), + ('', 'exact', None, + _('apply patch to the nodes from which it was generated')), + ('', 'import-branch', None, + _('use any branch information in patch (implied by --exact)'))] + + commitopts + commitopts2 + similarityopts, + _('[OPTION]... PATCH...')), + "incoming|in": + (incoming, + [('f', 'force', None, + _('run even if remote repository is unrelated')), + ('n', 'newest-first', None, _('show newest record first')), + ('', 'bundle', '', + _('file to store the bundles into'), _('FILE')), + ('r', 'rev', [], + _('a remote changeset intended to be added'), _('REV')), + ('b', 'branch', [], + _('a specific branch you would like to pull'), _('BRANCH')), + ] + logopts + remoteopts + subrepoopts, + _('[-p] [-n] [-M] [-f] [-r REV]...' + ' [--bundle FILENAME] [SOURCE]')), + "^init": + (init, + remoteopts, + _('[-e CMD] [--remotecmd CMD] [DEST]')), + "locate": + (locate, + [('r', 'rev', '', + _('search the repository as it is in REV'), _('REV')), + ('0', 'print0', None, + _('end filenames with NUL, for use with xargs')), + ('f', 'fullpath', None, + _('print complete paths from the filesystem root')), + ] + walkopts, + _('[OPTION]... [PATTERN]...')), + "^log|history": + (log, + [('f', 'follow', None, + _('follow changeset history,' + ' or file history across copies and renames')), + ('', 'follow-first', None, + _('only follow the first parent of merge changesets')), + ('d', 'date', '', + _('show revisions matching date spec'), _('DATE')), + ('C', 'copies', None, _('show copied files')), + ('k', 'keyword', [], + _('do case-insensitive search for a given text'), _('TEXT')), + ('r', 'rev', [], + _('show the specified revision or range'), _('REV')), + ('', 'removed', None, _('include revisions where files were removed')), + ('m', 'only-merges', None, _('show only merges')), + ('u', 'user', [], + _('revisions committed by user'), _('USER')), + ('', 'only-branch', [], + _('show only changesets within the given named branch (DEPRECATED)'), + _('BRANCH')), + ('b', 'branch', [], + _('show changesets within the given named branch'), _('BRANCH')), + ('P', 'prune', [], + _('do not display revision or any of its ancestors'), _('REV')), + ] + logopts + walkopts, + _('[OPTION]... [FILE]')), + "manifest": + (manifest, + [('r', 'rev', '', + _('revision to display'), _('REV'))], + _('[-r REV]')), + "^merge": + (merge, + [('f', 'force', None, _('force a merge with outstanding changes')), + ('t', 'tool', '', _('specify merge tool')), + ('r', 'rev', '', + _('revision to merge'), _('REV')), + ('P', 'preview', None, + _('review revisions to merge (no merge is performed)'))], + _('[-P] [-f] [[-r] REV]')), + "outgoing|out": + (outgoing, + [('f', 'force', None, + _('run even when the destination is unrelated')), + ('r', 'rev', [], + _('a changeset intended to be included in the destination'), + _('REV')), + ('n', 'newest-first', None, _('show newest record first')), + ('b', 'branch', [], + _('a specific branch you would like to push'), _('BRANCH')), + ] + logopts + remoteopts + subrepoopts, + _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')), + "parents": + (parents, + [('r', 'rev', '', + _('show parents of the specified revision'), _('REV')), + ] + templateopts, + _('[-r REV] [FILE]')), + "paths": (paths, [], _('[NAME]')), + "^pull": + (pull, + [('u', 'update', None, + _('update to new branch head if changesets were pulled')), + ('f', 'force', None, + _('run even when remote repository is unrelated')), + ('r', 'rev', [], + _('a remote changeset intended to be added'), _('REV')), + ('b', 'branch', [], + _('a specific branch you would like to pull'), _('BRANCH')), + ] + remoteopts, + _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')), + "^push": + (push, + [('f', 'force', None, _('force push')), + ('r', 'rev', [], + _('a changeset intended to be included in the destination'), + _('REV')), + ('b', 'branch', [], + _('a specific branch you would like to push'), _('BRANCH')), + ('', 'new-branch', False, _('allow pushing a new branch')), + ] + remoteopts, + _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')), + "recover": (recover, []), + "^remove|rm": + (remove, + [('A', 'after', None, _('record delete for missing files')), + ('f', 'force', None, + _('remove (and delete) file even if added or modified')), + ] + walkopts, + _('[OPTION]... FILE...')), + "rename|move|mv": + (rename, + [('A', 'after', None, _('record a rename that has already occurred')), + ('f', 'force', None, + _('forcibly copy over an existing managed file')), + ] + walkopts + dryrunopts, + _('[OPTION]... SOURCE... DEST')), + "resolve": + (resolve, + [('a', 'all', None, _('select all unresolved files')), + ('l', 'list', None, _('list state of files needing merge')), + ('m', 'mark', None, _('mark files as resolved')), + ('u', 'unmark', None, _('mark files as unresolved')), + ('t', 'tool', '', _('specify merge tool')), + ('n', 'no-status', None, _('hide status prefix'))] + + walkopts, + _('[OPTION]... [FILE]...')), + "revert": + (revert, + [('a', 'all', None, _('revert all changes when no arguments given')), + ('d', 'date', '', + _('tipmost revision matching date'), _('DATE')), + ('r', 'rev', '', + _('revert to the specified revision'), _('REV')), + ('', 'no-backup', None, _('do not save backup copies of files')), + ] + walkopts + dryrunopts, + _('[OPTION]... [-r REV] [NAME]...')), + "rollback": (rollback, dryrunopts), + "root": (root, []), + "^serve": + (serve, + [('A', 'accesslog', '', + _('name of access log file to write to'), _('FILE')), + ('d', 'daemon', None, _('run server in background')), + ('', 'daemon-pipefds', '', + _('used internally by daemon mode'), _('NUM')), + ('E', 'errorlog', '', + _('name of error log file to write to'), _('FILE')), + # use string type, then we can check if something was passed + ('p', 'port', '', + _('port to listen on (default: 8000)'), _('PORT')), + ('a', 'address', '', + _('address to listen on (default: all interfaces)'), _('ADDR')), + ('', 'prefix', '', + _('prefix path to serve from (default: server root)'), _('PREFIX')), + ('n', 'name', '', + _('name to show in web pages (default: working directory)'), + _('NAME')), + ('', 'web-conf', '', + _('name of the hgweb config file (see "hg help hgweb")'), + _('FILE')), + ('', 'webdir-conf', '', + _('name of the hgweb config file (DEPRECATED)'), _('FILE')), + ('', 'pid-file', '', + _('name of file to write process ID to'), _('FILE')), + ('', 'stdio', None, _('for remote clients')), + ('t', 'templates', '', + _('web templates to use'), _('TEMPLATE')), + ('', 'style', '', + _('template style to use'), _('STYLE')), + ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')), + ('', 'certificate', '', + _('SSL certificate file'), _('FILE'))], + _('[OPTION]...')), + "showconfig|debugconfig": + (showconfig, + [('u', 'untrusted', None, _('show untrusted configuration options'))], + _('[-u] [NAME]...')), + "^summary|sum": + (summary, + [('', 'remote', None, _('check for push and pull'))], '[--remote]'), + "^status|st": + (status, + [('A', 'all', None, _('show status of all files')), + ('m', 'modified', None, _('show only modified files')), + ('a', 'added', None, _('show only added files')), + ('r', 'removed', None, _('show only removed files')), + ('d', 'deleted', None, _('show only deleted (but tracked) files')), + ('c', 'clean', None, _('show only files without changes')), + ('u', 'unknown', None, _('show only unknown (not tracked) files')), + ('i', 'ignored', None, _('show only ignored files')), + ('n', 'no-status', None, _('hide status prefix')), + ('C', 'copies', None, _('show source of copied files')), + ('0', 'print0', None, + _('end filenames with NUL, for use with xargs')), + ('', 'rev', [], + _('show difference from revision'), _('REV')), + ('', 'change', '', + _('list the changed files of a revision'), _('REV')), + ] + walkopts + subrepoopts, + _('[OPTION]... [FILE]...')), + "tag": + (tag, + [('f', 'force', None, _('force tag')), + ('l', 'local', None, _('make the tag local')), + ('r', 'rev', '', + _('revision to tag'), _('REV')), + ('', 'remove', None, _('remove a tag')), + # -l/--local is already there, commitopts cannot be used + ('e', 'edit', None, _('edit commit message')), + ('m', 'message', '', + _('use <text> as commit message'), _('TEXT')), + ] + commitopts2, + _('[-f] [-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')), + "tags": (tags, [], ''), + "tip": + (tip, + [('p', 'patch', None, _('show patch')), + ('g', 'git', None, _('use git extended diff format')), + ] + templateopts, + _('[-p] [-g]')), + "unbundle": + (unbundle, + [('u', 'update', None, + _('update to new branch head if changesets were unbundled'))], + _('[-u] FILE...')), + "^update|up|checkout|co": + (update, + [('C', 'clean', None, _('discard uncommitted changes (no backup)')), + ('c', 'check', None, + _('update across branches if no uncommitted changes')), + ('d', 'date', '', + _('tipmost revision matching date'), _('DATE')), + ('r', 'rev', '', + _('revision'), _('REV'))], + _('[-c] [-C] [-d DATE] [[-r] REV]')), + "verify": (verify, []), + "version": (version_, []), +} + +norepo = ("clone init version help debugcommands debugcomplete" + " debugdate debuginstall debugfsinfo debugpushkey") +optionalrepo = ("identify paths serve showconfig debugancestor debugdag" + " debugdata debugindex debugindexdot") diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.pyo Binary files differnew file mode 100644 index 0000000..eb9254f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/commands.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.py new file mode 100644 index 0000000..802f444 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.py @@ -0,0 +1,142 @@ +# config.py - configuration parsing for Mercurial +# +# Copyright 2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import error, util +import re, os + +class sortdict(dict): + 'a simple sorted dictionary' + def __init__(self, data=None): + self._list = [] + if data: + self.update(data) + def copy(self): + return sortdict(self) + def __setitem__(self, key, val): + if key in self: + self._list.remove(key) + self._list.append(key) + dict.__setitem__(self, key, val) + def __iter__(self): + return self._list.__iter__() + def update(self, src): + for k in src: + self[k] = src[k] + def items(self): + return [(k, self[k]) for k in self._list] + def __delitem__(self, key): + dict.__delitem__(self, key) + self._list.remove(key) + +class config(object): + def __init__(self, data=None): + self._data = {} + self._source = {} + if data: + for k in data._data: + self._data[k] = data[k].copy() + self._source = data._source.copy() + def copy(self): + return config(self) + def __contains__(self, section): + return section in self._data + def __getitem__(self, section): + return self._data.get(section, {}) + def __iter__(self): + for d in self.sections(): + yield d + def update(self, src): + for s in src: + if s not in self: + self._data[s] = sortdict() + self._data[s].update(src._data[s]) + self._source.update(src._source) + def get(self, section, item, default=None): + return self._data.get(section, {}).get(item, default) + def source(self, section, item): + return self._source.get((section, item), "") + def sections(self): + return sorted(self._data.keys()) + def items(self, section): + return self._data.get(section, {}).items() + def set(self, section, item, value, source=""): + if section not in self: + self._data[section] = sortdict() + self._data[section][item] = value + self._source[(section, item)] = source + + def parse(self, src, data, sections=None, remap=None, include=None): + sectionre = re.compile(r'\[([^\[]+)\]') + itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)') + contre = re.compile(r'\s+(\S|\S.*\S)\s*$') + emptyre = re.compile(r'(;|#|\s*$)') + unsetre = re.compile(r'%unset\s+(\S+)') + includere = re.compile(r'%include\s+(\S|\S.*\S)\s*$') + section = "" + item = None + line = 0 + cont = False + + for l in data.splitlines(True): + line += 1 + if cont: + m = contre.match(l) + if m: + if sections and section not in sections: + continue + v = self.get(section, item) + "\n" + m.group(1) + self.set(section, item, v, "%s:%d" % (src, line)) + continue + item = None + cont = False + m = includere.match(l) + if m: + inc = util.expandpath(m.group(1)) + base = os.path.dirname(src) + inc = os.path.normpath(os.path.join(base, inc)) + if include: + try: + include(inc, remap=remap, sections=sections) + except IOError, inst: + raise error.ParseError(_("cannot include %s (%s)") + % (inc, inst.strerror), + "%s:%s" % (src, line)) + continue + if emptyre.match(l): + continue + m = sectionre.match(l) + if m: + section = m.group(1) + if remap: + section = remap.get(section, section) + if section not in self: + self._data[section] = sortdict() + continue + m = itemre.match(l) + if m: + item = m.group(1) + cont = True + if sections and section not in sections: + continue + self.set(section, item, m.group(2), "%s:%d" % (src, line)) + continue + m = unsetre.match(l) + if m: + name = m.group(1) + if sections and section not in sections: + continue + if self.get(section, name) != None: + del self._data[section][name] + continue + + raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line))) + + def read(self, path, fp=None, sections=None, remap=None): + if not fp: + fp = open(path) + self.parse(path, fp.read(), sections, remap, self.read) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.pyo Binary files differnew file mode 100644 index 0000000..a349147 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/config.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.py new file mode 100644 index 0000000..ff1bfa7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.py @@ -0,0 +1,1098 @@ +# context.py - changeset and file context objects for mercurial +# +# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, nullrev, short, hex +from i18n import _ +import ancestor, bdiff, error, util, subrepo, patch +import os, errno, stat + +propertycache = util.propertycache + +class changectx(object): + """A changecontext object makes access to data related to a particular + changeset convenient.""" + def __init__(self, repo, changeid=''): + """changeid is a revision number, node, or tag""" + if changeid == '': + changeid = '.' + self._repo = repo + if isinstance(changeid, (long, int)): + self._rev = changeid + self._node = self._repo.changelog.node(changeid) + else: + self._node = self._repo.lookup(changeid) + self._rev = self._repo.changelog.rev(self._node) + + def __str__(self): + return short(self.node()) + + def __int__(self): + return self.rev() + + def __repr__(self): + return "<changectx %s>" % str(self) + + def __hash__(self): + try: + return hash(self._rev) + except AttributeError: + return id(self) + + def __eq__(self, other): + try: + return self._rev == other._rev + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __nonzero__(self): + return self._rev != nullrev + + @propertycache + def _changeset(self): + return self._repo.changelog.read(self.node()) + + @propertycache + def _manifest(self): + return self._repo.manifest.read(self._changeset[0]) + + @propertycache + def _manifestdelta(self): + return self._repo.manifest.readdelta(self._changeset[0]) + + @propertycache + def _parents(self): + p = self._repo.changelog.parentrevs(self._rev) + if p[1] == nullrev: + p = p[:-1] + return [changectx(self._repo, x) for x in p] + + @propertycache + def substate(self): + return subrepo.state(self, self._repo.ui) + + def __contains__(self, key): + return key in self._manifest + + def __getitem__(self, key): + return self.filectx(key) + + def __iter__(self): + for f in sorted(self._manifest): + yield f + + def changeset(self): + return self._changeset + def manifest(self): + return self._manifest + def manifestnode(self): + return self._changeset[0] + + def rev(self): + return self._rev + def node(self): + return self._node + def hex(self): + return hex(self._node) + def user(self): + return self._changeset[1] + def date(self): + return self._changeset[2] + def files(self): + return self._changeset[3] + def description(self): + return self._changeset[4] + def branch(self): + return self._changeset[5].get("branch") + def extra(self): + return self._changeset[5] + def tags(self): + return self._repo.nodetags(self._node) + + def parents(self): + """return contexts for each parent changeset""" + return self._parents + + def p1(self): + return self._parents[0] + + def p2(self): + if len(self._parents) == 2: + return self._parents[1] + return changectx(self._repo, -1) + + def children(self): + """return contexts for each child changeset""" + c = self._repo.changelog.children(self._node) + return [changectx(self._repo, x) for x in c] + + def ancestors(self): + for a in self._repo.changelog.ancestors(self._rev): + yield changectx(self._repo, a) + + def descendants(self): + for d in self._repo.changelog.descendants(self._rev): + yield changectx(self._repo, d) + + def _fileinfo(self, path): + if '_manifest' in self.__dict__: + try: + return self._manifest[path], self._manifest.flags(path) + except KeyError: + raise error.LookupError(self._node, path, + _('not found in manifest')) + if '_manifestdelta' in self.__dict__ or path in self.files(): + if path in self._manifestdelta: + return self._manifestdelta[path], self._manifestdelta.flags(path) + node, flag = self._repo.manifest.find(self._changeset[0], path) + if not node: + raise error.LookupError(self._node, path, + _('not found in manifest')) + + return node, flag + + def filenode(self, path): + return self._fileinfo(path)[0] + + def flags(self, path): + try: + return self._fileinfo(path)[1] + except error.LookupError: + return '' + + def filectx(self, path, fileid=None, filelog=None): + """get a file context from this changeset""" + if fileid is None: + fileid = self.filenode(path) + return filectx(self._repo, path, fileid=fileid, + changectx=self, filelog=filelog) + + def ancestor(self, c2): + """ + return the ancestor context of self and c2 + """ + # deal with workingctxs + n2 = c2._node + if n2 == None: + n2 = c2._parents[0]._node + n = self._repo.changelog.ancestor(self._node, n2) + return changectx(self._repo, n) + + def walk(self, match): + fset = set(match.files()) + # for dirstate.walk, files=['.'] means "walk the whole tree". + # follow that here, too + fset.discard('.') + for fn in self: + for ffn in fset: + # match if the file is the exact name or a directory + if ffn == fn or fn.startswith("%s/" % ffn): + fset.remove(ffn) + break + if match(fn): + yield fn + for fn in sorted(fset): + if match.bad(fn, _('no such file in rev %s') % self) and match(fn): + yield fn + + def sub(self, path): + return subrepo.subrepo(self, path) + + def diff(self, ctx2=None, match=None, **opts): + """Returns a diff generator for the given contexts and matcher""" + if ctx2 is None: + ctx2 = self.p1() + if ctx2 is not None and not isinstance(ctx2, changectx): + ctx2 = self._repo[ctx2] + diffopts = patch.diffopts(self._repo.ui, opts) + return patch.diff(self._repo, ctx2.node(), self.node(), + match=match, opts=diffopts) + +class filectx(object): + """A filecontext object makes access to data related to a particular + filerevision convenient.""" + def __init__(self, repo, path, changeid=None, fileid=None, + filelog=None, changectx=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + self._repo = repo + self._path = path + + assert (changeid is not None + or fileid is not None + or changectx is not None), \ + ("bad args: changeid=%r, fileid=%r, changectx=%r" + % (changeid, fileid, changectx)) + + if filelog: + self._filelog = filelog + + if changeid is not None: + self._changeid = changeid + if changectx is not None: + self._changectx = changectx + if fileid is not None: + self._fileid = fileid + + @propertycache + def _changectx(self): + return changectx(self._repo, self._changeid) + + @propertycache + def _filelog(self): + return self._repo.file(self._path) + + @propertycache + def _changeid(self): + if '_changectx' in self.__dict__: + return self._changectx.rev() + else: + return self._filelog.linkrev(self._filerev) + + @propertycache + def _filenode(self): + if '_fileid' in self.__dict__: + return self._filelog.lookup(self._fileid) + else: + return self._changectx.filenode(self._path) + + @propertycache + def _filerev(self): + return self._filelog.rev(self._filenode) + + @propertycache + def _repopath(self): + return self._path + + def __nonzero__(self): + try: + self._filenode + return True + except error.LookupError: + # file is missing + return False + + def __str__(self): + return "%s@%s" % (self.path(), short(self.node())) + + def __repr__(self): + return "<filectx %s>" % str(self) + + def __hash__(self): + try: + return hash((self._path, self._filenode)) + except AttributeError: + return id(self) + + def __eq__(self, other): + try: + return (self._path == other._path + and self._filenode == other._filenode) + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def filectx(self, fileid): + '''opens an arbitrary revision of the file without + opening a new filelog''' + return filectx(self._repo, self._path, fileid=fileid, + filelog=self._filelog) + + def filerev(self): + return self._filerev + def filenode(self): + return self._filenode + def flags(self): + return self._changectx.flags(self._path) + def filelog(self): + return self._filelog + + def rev(self): + if '_changectx' in self.__dict__: + return self._changectx.rev() + if '_changeid' in self.__dict__: + return self._changectx.rev() + return self._filelog.linkrev(self._filerev) + + def linkrev(self): + return self._filelog.linkrev(self._filerev) + def node(self): + return self._changectx.node() + def hex(self): + return hex(self.node()) + def user(self): + return self._changectx.user() + def date(self): + return self._changectx.date() + def files(self): + return self._changectx.files() + def description(self): + return self._changectx.description() + def branch(self): + return self._changectx.branch() + def extra(self): + return self._changectx.extra() + def manifest(self): + return self._changectx.manifest() + def changectx(self): + return self._changectx + + def data(self): + return self._filelog.read(self._filenode) + def path(self): + return self._path + def size(self): + return self._filelog.size(self._filerev) + + def cmp(self, fctx): + """compare with other file context + + returns True if different than fctx. + """ + if (fctx._filerev is None and self._repo._encodefilterpats + or self.size() == fctx.size()): + return self._filelog.cmp(self._filenode, fctx.data()) + + return True + + def renamed(self): + """check if file was actually renamed in this changeset revision + + If rename logged in file revision, we report copy for changeset only + if file revisions linkrev points back to the changeset in question + or both changeset parents contain different file revisions. + """ + + renamed = self._filelog.renamed(self._filenode) + if not renamed: + return renamed + + if self.rev() == self.linkrev(): + return renamed + + name = self.path() + fnode = self._filenode + for p in self._changectx.parents(): + try: + if fnode == p.filenode(name): + return None + except error.LookupError: + pass + return renamed + + def parents(self): + p = self._path + fl = self._filelog + pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)] + + r = self._filelog.renamed(self._filenode) + if r: + pl[0] = (r[0], r[1], None) + + return [filectx(self._repo, p, fileid=n, filelog=l) + for p, n, l in pl if n != nullid] + + def children(self): + # hard for renames + c = self._filelog.children(self._filenode) + return [filectx(self._repo, self._path, fileid=x, + filelog=self._filelog) for x in c] + + def annotate(self, follow=False, linenumber=None): + '''returns a list of tuples of (ctx, line) for each line + in the file, where ctx is the filectx of the node where + that line was last changed. + This returns tuples of ((ctx, linenumber), line) for each line, + if "linenumber" parameter is NOT "None". + In such tuples, linenumber means one at the first appearance + in the managed file. + To reduce annotation cost, + this returns fixed value(False is used) as linenumber, + if "linenumber" parameter is "False".''' + + def decorate_compat(text, rev): + return ([rev] * len(text.splitlines()), text) + + def without_linenumber(text, rev): + return ([(rev, False)] * len(text.splitlines()), text) + + def with_linenumber(text, rev): + size = len(text.splitlines()) + return ([(rev, i) for i in xrange(1, size + 1)], text) + + decorate = (((linenumber is None) and decorate_compat) or + (linenumber and with_linenumber) or + without_linenumber) + + def pair(parent, child): + for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]): + child[0][b1:b2] = parent[0][a1:a2] + return child + + getlog = util.lrucachefunc(lambda x: self._repo.file(x)) + def getctx(path, fileid): + log = path == self._path and self._filelog or getlog(path) + return filectx(self._repo, path, fileid=fileid, filelog=log) + getctx = util.lrucachefunc(getctx) + + def parents(f): + # we want to reuse filectx objects as much as possible + p = f._path + if f._filerev is None: # working dir + pl = [(n.path(), n.filerev()) for n in f.parents()] + else: + pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)] + + if follow: + r = f.renamed() + if r: + pl[0] = (r[0], getlog(r[0]).rev(r[1])) + + return [getctx(p, n) for p, n in pl if n != nullrev] + + # use linkrev to find the first changeset where self appeared + if self.rev() != self.linkrev(): + base = self.filectx(self.filerev()) + else: + base = self + + # find all ancestors + needed = {base: 1} + visit = [base] + files = [base._path] + while visit: + f = visit.pop(0) + for p in parents(f): + if p not in needed: + needed[p] = 1 + visit.append(p) + if p._path not in files: + files.append(p._path) + else: + # count how many times we'll use this + needed[p] += 1 + + # sort by revision (per file) which is a topological order + visit = [] + for f in files: + visit.extend(n for n in needed if n._path == f) + + hist = {} + for f in sorted(visit, key=lambda x: x.rev()): + curr = decorate(f.data(), f) + for p in parents(f): + curr = pair(hist[p], curr) + # trim the history of unneeded revs + needed[p] -= 1 + if not needed[p]: + del hist[p] + hist[f] = curr + + return zip(hist[f][0], hist[f][1].splitlines(True)) + + def ancestor(self, fc2, actx=None): + """ + find the common ancestor file context, if any, of self, and fc2 + + If actx is given, it must be the changectx of the common ancestor + of self's and fc2's respective changesets. + """ + + if actx is None: + actx = self.changectx().ancestor(fc2.changectx()) + + # the trivial case: changesets are unrelated, files must be too + if not actx: + return None + + # the easy case: no (relevant) renames + if fc2.path() == self.path() and self.path() in actx: + return actx[self.path()] + acache = {} + + # prime the ancestor cache for the working directory + for c in (self, fc2): + if c._filerev is None: + pl = [(n.path(), n.filenode()) for n in c.parents()] + acache[(c._path, None)] = pl + + flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog} + def parents(vertex): + if vertex in acache: + return acache[vertex] + f, n = vertex + if f not in flcache: + flcache[f] = self._repo.file(f) + fl = flcache[f] + pl = [(f, p) for p in fl.parents(n) if p != nullid] + re = fl.renamed(n) + if re: + pl.append(re) + acache[vertex] = pl + return pl + + a, b = (self._path, self._filenode), (fc2._path, fc2._filenode) + v = ancestor.ancestor(a, b, parents) + if v: + f, n = v + return filectx(self._repo, f, fileid=n, filelog=flcache[f]) + + return None + + def ancestors(self): + seen = set(str(self)) + visit = [self] + while visit: + for parent in visit.pop(0).parents(): + s = str(parent) + if s not in seen: + visit.append(parent) + seen.add(s) + yield parent + +class workingctx(changectx): + """A workingctx object makes access to data related to + the current working directory convenient. + date - any valid date string or (unixtime, offset), or None. + user - username string, or None. + extra - a dictionary of extra values, or None. + changes - a list of file lists as returned by localrepo.status() + or None to use the repository status. + """ + def __init__(self, repo, text="", user=None, date=None, extra=None, + changes=None): + self._repo = repo + self._rev = None + self._node = None + self._text = text + if date: + self._date = util.parsedate(date) + if user: + self._user = user + if changes: + self._status = list(changes[:4]) + self._unknown = changes[4] + self._ignored = changes[5] + self._clean = changes[6] + else: + self._unknown = None + self._ignored = None + self._clean = None + + self._extra = {} + if extra: + self._extra = extra.copy() + if 'branch' not in self._extra: + branch = self._repo.dirstate.branch() + try: + branch = branch.decode('UTF-8').encode('UTF-8') + except UnicodeDecodeError: + raise util.Abort(_('branch name not in UTF-8!')) + self._extra['branch'] = branch + if self._extra['branch'] == '': + self._extra['branch'] = 'default' + + def __str__(self): + return str(self._parents[0]) + "+" + + def __nonzero__(self): + return True + + def __contains__(self, key): + return self._repo.dirstate[key] not in "?r" + + @propertycache + def _manifest(self): + """generate a manifest corresponding to the working directory""" + + if self._unknown is None: + self.status(unknown=True) + + man = self._parents[0].manifest().copy() + copied = self._repo.dirstate.copies() + if len(self._parents) > 1: + man2 = self.p2().manifest() + def getman(f): + if f in man: + return man + return man2 + else: + getman = lambda f: man + def cf(f): + f = copied.get(f, f) + return getman(f).flags(f) + ff = self._repo.dirstate.flagfunc(cf) + modified, added, removed, deleted = self._status + unknown = self._unknown + for i, l in (("a", added), ("m", modified), ("u", unknown)): + for f in l: + orig = copied.get(f, f) + man[f] = getman(orig).get(orig, nullid) + i + try: + man.set(f, ff(f)) + except OSError: + pass + + for f in deleted + removed: + if f in man: + del man[f] + + return man + + @propertycache + def _status(self): + return self._repo.status()[:4] + + @propertycache + def _user(self): + return self._repo.ui.username() + + @propertycache + def _date(self): + return util.makedate() + + @propertycache + def _parents(self): + p = self._repo.dirstate.parents() + if p[1] == nullid: + p = p[:-1] + self._parents = [changectx(self._repo, x) for x in p] + return self._parents + + def status(self, ignored=False, clean=False, unknown=False): + """Explicit status query + Unless this method is used to query the working copy status, the + _status property will implicitly read the status using its default + arguments.""" + stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown) + self._unknown = self._ignored = self._clean = None + if unknown: + self._unknown = stat[4] + if ignored: + self._ignored = stat[5] + if clean: + self._clean = stat[6] + self._status = stat[:4] + return stat + + def manifest(self): + return self._manifest + def user(self): + return self._user or self._repo.ui.username() + def date(self): + return self._date + def description(self): + return self._text + def files(self): + return sorted(self._status[0] + self._status[1] + self._status[2]) + + def modified(self): + return self._status[0] + def added(self): + return self._status[1] + def removed(self): + return self._status[2] + def deleted(self): + return self._status[3] + def unknown(self): + assert self._unknown is not None # must call status first + return self._unknown + def ignored(self): + assert self._ignored is not None # must call status first + return self._ignored + def clean(self): + assert self._clean is not None # must call status first + return self._clean + def branch(self): + return self._extra['branch'] + def extra(self): + return self._extra + + def tags(self): + t = [] + [t.extend(p.tags()) for p in self.parents()] + return t + + def children(self): + return [] + + def flags(self, path): + if '_manifest' in self.__dict__: + try: + return self._manifest.flags(path) + except KeyError: + return '' + + orig = self._repo.dirstate.copies().get(path, path) + + def findflag(ctx): + mnode = ctx.changeset()[0] + node, flag = self._repo.manifest.find(mnode, orig) + ff = self._repo.dirstate.flagfunc(lambda x: flag or '') + try: + return ff(path) + except OSError: + pass + + flag = findflag(self._parents[0]) + if flag is None and len(self.parents()) > 1: + flag = findflag(self._parents[1]) + if flag is None or self._repo.dirstate[path] == 'r': + return '' + return flag + + def filectx(self, path, filelog=None): + """get a file context from the working directory""" + return workingfilectx(self._repo, path, workingctx=self, + filelog=filelog) + + def ancestor(self, c2): + """return the ancestor context of self and c2""" + return self._parents[0].ancestor(c2) # punt on two parents for now + + def walk(self, match): + return sorted(self._repo.dirstate.walk(match, self.substate.keys(), + True, False)) + + def dirty(self, missing=False): + "check whether a working directory is modified" + # check subrepos first + for s in self.substate: + if self.sub(s).dirty(): + return True + # check current working dir + return (self.p2() or self.branch() != self.p1().branch() or + self.modified() or self.added() or self.removed() or + (missing and self.deleted())) + + def add(self, list, prefix=""): + join = lambda f: os.path.join(prefix, f) + wlock = self._repo.wlock() + ui, ds = self._repo.ui, self._repo.dirstate + try: + rejected = [] + for f in list: + p = self._repo.wjoin(f) + try: + st = os.lstat(p) + except: + ui.warn(_("%s does not exist!\n") % join(f)) + rejected.append(f) + continue + if st.st_size > 10000000: + ui.warn(_("%s: up to %d MB of RAM may be required " + "to manage this file\n" + "(use 'hg revert %s' to cancel the " + "pending addition)\n") + % (f, 3 * st.st_size // 1000000, join(f))) + if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): + ui.warn(_("%s not added: only files and symlinks " + "supported currently\n") % join(f)) + rejected.append(p) + elif ds[f] in 'amn': + ui.warn(_("%s already tracked!\n") % join(f)) + elif ds[f] == 'r': + ds.normallookup(f) + else: + ds.add(f) + return rejected + finally: + wlock.release() + + def forget(self, list): + wlock = self._repo.wlock() + try: + for f in list: + if self._repo.dirstate[f] != 'a': + self._repo.ui.warn(_("%s not added!\n") % f) + else: + self._repo.dirstate.forget(f) + finally: + wlock.release() + + def ancestors(self): + for a in self._repo.changelog.ancestors( + *[p.rev() for p in self._parents]): + yield changectx(self._repo, a) + + def remove(self, list, unlink=False): + if unlink: + for f in list: + try: + util.unlink(self._repo.wjoin(f)) + except OSError, inst: + if inst.errno != errno.ENOENT: + raise + wlock = self._repo.wlock() + try: + for f in list: + if unlink and os.path.lexists(self._repo.wjoin(f)): + self._repo.ui.warn(_("%s still exists!\n") % f) + elif self._repo.dirstate[f] == 'a': + self._repo.dirstate.forget(f) + elif f not in self._repo.dirstate: + self._repo.ui.warn(_("%s not tracked!\n") % f) + else: + self._repo.dirstate.remove(f) + finally: + wlock.release() + + def undelete(self, list): + pctxs = self.parents() + wlock = self._repo.wlock() + try: + for f in list: + if self._repo.dirstate[f] != 'r': + self._repo.ui.warn(_("%s not removed!\n") % f) + else: + fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] + t = fctx.data() + self._repo.wwrite(f, t, fctx.flags()) + self._repo.dirstate.normal(f) + finally: + wlock.release() + + def copy(self, source, dest): + p = self._repo.wjoin(dest) + if not os.path.lexists(p): + self._repo.ui.warn(_("%s does not exist!\n") % dest) + elif not (os.path.isfile(p) or os.path.islink(p)): + self._repo.ui.warn(_("copy failed: %s is not a file or a " + "symbolic link\n") % dest) + else: + wlock = self._repo.wlock() + try: + if self._repo.dirstate[dest] in '?r': + self._repo.dirstate.add(dest) + self._repo.dirstate.copy(source, dest) + finally: + wlock.release() + +class workingfilectx(filectx): + """A workingfilectx object makes access to data related to a particular + file in the working directory convenient.""" + def __init__(self, repo, path, filelog=None, workingctx=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + self._repo = repo + self._path = path + self._changeid = None + self._filerev = self._filenode = None + + if filelog: + self._filelog = filelog + if workingctx: + self._changectx = workingctx + + @propertycache + def _changectx(self): + return workingctx(self._repo) + + def __nonzero__(self): + return True + + def __str__(self): + return "%s@%s" % (self.path(), self._changectx) + + def data(self): + return self._repo.wread(self._path) + def renamed(self): + rp = self._repo.dirstate.copied(self._path) + if not rp: + return None + return rp, self._changectx._parents[0]._manifest.get(rp, nullid) + + def parents(self): + '''return parent filectxs, following copies if necessary''' + def filenode(ctx, path): + return ctx._manifest.get(path, nullid) + + path = self._path + fl = self._filelog + pcl = self._changectx._parents + renamed = self.renamed() + + if renamed: + pl = [renamed + (None,)] + else: + pl = [(path, filenode(pcl[0], path), fl)] + + for pc in pcl[1:]: + pl.append((path, filenode(pc, path), fl)) + + return [filectx(self._repo, p, fileid=n, filelog=l) + for p, n, l in pl if n != nullid] + + def children(self): + return [] + + def size(self): + return os.lstat(self._repo.wjoin(self._path)).st_size + def date(self): + t, tz = self._changectx.date() + try: + return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz) + except OSError, err: + if err.errno != errno.ENOENT: + raise + return (t, tz) + + def cmp(self, fctx): + """compare with other file context + + returns True if different than fctx. + """ + # fctx should be a filectx (not a wfctx) + # invert comparison to reuse the same code path + return fctx.cmp(self) + +class memctx(object): + """Use memctx to perform in-memory commits via localrepo.commitctx(). + + Revision information is supplied at initialization time while + related files data and is made available through a callback + mechanism. 'repo' is the current localrepo, 'parents' is a + sequence of two parent revisions identifiers (pass None for every + missing parent), 'text' is the commit message and 'files' lists + names of files touched by the revision (normalized and relative to + repository root). + + filectxfn(repo, memctx, path) is a callable receiving the + repository, the current memctx object and the normalized path of + requested file, relative to repository root. It is fired by the + commit function for every file in 'files', but calls order is + undefined. If the file is available in the revision being + committed (updated or added), filectxfn returns a memfilectx + object. If the file was removed, filectxfn raises an + IOError. Moved files are represented by marking the source file + removed and the new file added with copy information (see + memfilectx). + + user receives the committer name and defaults to current + repository username, date is the commit date in any format + supported by util.parsedate() and defaults to current date, extra + is a dictionary of metadata or is left empty. + """ + def __init__(self, repo, parents, text, files, filectxfn, user=None, + date=None, extra=None): + self._repo = repo + self._rev = None + self._node = None + self._text = text + self._date = date and util.parsedate(date) or util.makedate() + self._user = user + parents = [(p or nullid) for p in parents] + p1, p2 = parents + self._parents = [changectx(self._repo, p) for p in (p1, p2)] + files = sorted(set(files)) + self._status = [files, [], [], [], []] + self._filectxfn = filectxfn + + self._extra = extra and extra.copy() or {} + if 'branch' not in self._extra: + self._extra['branch'] = 'default' + elif self._extra.get('branch') == '': + self._extra['branch'] = 'default' + + def __str__(self): + return str(self._parents[0]) + "+" + + def __int__(self): + return self._rev + + def __nonzero__(self): + return True + + def __getitem__(self, key): + return self.filectx(key) + + def p1(self): + return self._parents[0] + def p2(self): + return self._parents[1] + + def user(self): + return self._user or self._repo.ui.username() + def date(self): + return self._date + def description(self): + return self._text + def files(self): + return self.modified() + def modified(self): + return self._status[0] + def added(self): + return self._status[1] + def removed(self): + return self._status[2] + def deleted(self): + return self._status[3] + def unknown(self): + return self._status[4] + def ignored(self): + return self._status[5] + def clean(self): + return self._status[6] + def branch(self): + return self._extra['branch'] + def extra(self): + return self._extra + def flags(self, f): + return self[f].flags() + + def parents(self): + """return contexts for each parent changeset""" + return self._parents + + def filectx(self, path, filelog=None): + """get a file context from the working directory""" + return self._filectxfn(self._repo, self, path) + + def commit(self): + """commit context to the repo""" + return self._repo.commitctx(self) + +class memfilectx(object): + """memfilectx represents an in-memory file to commit. + + See memctx for more details. + """ + def __init__(self, path, data, islink=False, isexec=False, copied=None): + """ + path is the normalized file path relative to repository root. + data is the file content as a string. + islink is True if the file is a symbolic link. + isexec is True if the file is executable. + copied is the source file path if current file was copied in the + revision being committed, or None.""" + self._path = path + self._data = data + self._flags = (islink and 'l' or '') + (isexec and 'x' or '') + self._copied = None + if copied: + self._copied = (copied, nullid) + + def __nonzero__(self): + return True + def __str__(self): + return "%s@%s" % (self.path(), self._changectx) + def path(self): + return self._path + def data(self): + return self._data + def flags(self): + return self._flags + def isexec(self): + return 'x' in self._flags + def islink(self): + return 'l' in self._flags + def renamed(self): + return self._copied diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.pyo Binary files differnew file mode 100644 index 0000000..f6da42c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/context.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.py new file mode 100644 index 0000000..05342e1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.py @@ -0,0 +1,267 @@ +# copies.py - copy detection for Mercurial +# +# Copyright 2008 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import util +import heapq + +def _nonoverlap(d1, d2, d3): + "Return list of elements in d1 not in d2 or d3" + return sorted([d for d in d1 if d not in d3 and d not in d2]) + +def _dirname(f): + s = f.rfind("/") + if s == -1: + return "" + return f[:s] + +def _dirs(files): + d = set() + for f in files: + f = _dirname(f) + while f not in d: + d.add(f) + f = _dirname(f) + return d + +def _findlimit(repo, a, b): + """Find the earliest revision that's an ancestor of a or b but not both, + None if no such revision exists. + """ + # basic idea: + # - mark a and b with different sides + # - if a parent's children are all on the same side, the parent is + # on that side, otherwise it is on no side + # - walk the graph in topological order with the help of a heap; + # - add unseen parents to side map + # - clear side of any parent that has children on different sides + # - track number of interesting revs that might still be on a side + # - track the lowest interesting rev seen + # - quit when interesting revs is zero + + cl = repo.changelog + working = len(cl) # pseudo rev for the working directory + if a is None: + a = working + if b is None: + b = working + + side = {a: -1, b: 1} + visit = [-a, -b] + heapq.heapify(visit) + interesting = len(visit) + hascommonancestor = False + limit = working + + while interesting: + r = -heapq.heappop(visit) + if r == working: + parents = [cl.rev(p) for p in repo.dirstate.parents()] + else: + parents = cl.parentrevs(r) + for p in parents: + if p < 0: + continue + if p not in side: + # first time we see p; add it to visit + side[p] = side[r] + if side[p]: + interesting += 1 + heapq.heappush(visit, -p) + elif side[p] and side[p] != side[r]: + # p was interesting but now we know better + side[p] = 0 + interesting -= 1 + hascommonancestor = True + if side[r]: + limit = r # lowest rev visited + interesting -= 1 + + if not hascommonancestor: + return None + return limit + +def copies(repo, c1, c2, ca, checkdirs=False): + """ + Find moves and copies between context c1 and c2 + """ + # avoid silly behavior for update from empty dir + if not c1 or not c2 or c1 == c2: + return {}, {} + + # avoid silly behavior for parent -> working dir + if c2.node() is None and c1.node() == repo.dirstate.parents()[0]: + return repo.dirstate.copies(), {} + + limit = _findlimit(repo, c1.rev(), c2.rev()) + if limit is None: + # no common ancestor, no copies + return {}, {} + m1 = c1.manifest() + m2 = c2.manifest() + ma = ca.manifest() + + def makectx(f, n): + if len(n) != 20: # in a working context? + if c1.rev() is None: + return c1.filectx(f) + return c2.filectx(f) + return repo.filectx(f, fileid=n) + + ctx = util.lrucachefunc(makectx) + copy = {} + fullcopy = {} + diverge = {} + + def related(f1, f2, limit): + # Walk back to common ancestor to see if the two files originate + # from the same file. Since workingfilectx's rev() is None it messes + # up the integer comparison logic, hence the pre-step check for + # None (f1 and f2 can only be workingfilectx's initially). + + if f1 == f2: + return f1 # a match + + g1, g2 = f1.ancestors(), f2.ancestors() + try: + f1r, f2r = f1.rev(), f2.rev() + + if f1r is None: + f1 = g1.next() + if f2r is None: + f2 = g2.next() + + while 1: + f1r, f2r = f1.rev(), f2.rev() + if f1r > f2r: + f1 = g1.next() + elif f2r > f1r: + f2 = g2.next() + elif f1 == f2: + return f1 # a match + elif f1r == f2r or f1r < limit or f2r < limit: + return False # copy no longer relevant + except StopIteration: + return False + + def checkcopies(f, m1, m2): + '''check possible copies of f from m1 to m2''' + of = None + seen = set([f]) + for oc in ctx(f, m1[f]).ancestors(): + ocr = oc.rev() + of = oc.path() + if of in seen: + # check limit late - grab last rename before + if ocr < limit: + break + continue + seen.add(of) + + fullcopy[f] = of # remember for dir rename detection + if of not in m2: + continue # no match, keep looking + if m2[of] == ma.get(of): + break # no merge needed, quit early + c2 = ctx(of, m2[of]) + cr = related(oc, c2, ca.rev()) + if cr and (of == f or of == c2.path()): # non-divergent + copy[f] = of + of = None + break + + if of in ma: + diverge.setdefault(of, []).append(f) + + repo.ui.debug(" searching for copies back to rev %d\n" % limit) + + u1 = _nonoverlap(m1, m2, ma) + u2 = _nonoverlap(m2, m1, ma) + + if u1: + repo.ui.debug(" unmatched files in local:\n %s\n" + % "\n ".join(u1)) + if u2: + repo.ui.debug(" unmatched files in other:\n %s\n" + % "\n ".join(u2)) + + for f in u1: + checkcopies(f, m1, m2) + for f in u2: + checkcopies(f, m2, m1) + + diverge2 = set() + for of, fl in diverge.items(): + if len(fl) == 1 or of in c2: + del diverge[of] # not actually divergent, or not a rename + else: + diverge2.update(fl) # reverse map for below + + if fullcopy: + repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n") + for f in fullcopy: + note = "" + if f in copy: + note += "*" + if f in diverge2: + note += "!" + repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note)) + del diverge2 + + if not fullcopy or not checkdirs: + return copy, diverge + + repo.ui.debug(" checking for directory renames\n") + + # generate a directory move map + d1, d2 = _dirs(m1), _dirs(m2) + invalid = set() + dirmove = {} + + # examine each file copy for a potential directory move, which is + # when all the files in a directory are moved to a new directory + for dst, src in fullcopy.iteritems(): + dsrc, ddst = _dirname(src), _dirname(dst) + if dsrc in invalid: + # already seen to be uninteresting + continue + elif dsrc in d1 and ddst in d1: + # directory wasn't entirely moved locally + invalid.add(dsrc) + elif dsrc in d2 and ddst in d2: + # directory wasn't entirely moved remotely + invalid.add(dsrc) + elif dsrc in dirmove and dirmove[dsrc] != ddst: + # files from the same directory moved to two different places + invalid.add(dsrc) + else: + # looks good so far + dirmove[dsrc + "/"] = ddst + "/" + + for i in invalid: + if i in dirmove: + del dirmove[i] + del d1, d2, invalid + + if not dirmove: + return copy, diverge + + for d in dirmove: + repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d])) + + # check unaccounted nonoverlapping files against directory moves + for f in u1 + u2: + if f not in fullcopy: + for d in dirmove: + if f.startswith(d): + # new file added in a directory that was moved, move it + df = dirmove[d] + f[len(d):] + if df not in copy: + copy[f] = df + repo.ui.debug(" file %s -> %s\n" % (f, copy[f])) + break + + return copy, diverge diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.pyo Binary files differnew file mode 100644 index 0000000..c69481f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/copies.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.py new file mode 100644 index 0000000..e02faa5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.py @@ -0,0 +1,474 @@ +# dagparser.py - parser and generator for concise description of DAGs +# +# Copyright 2010 Peter Arrenbrecht <peter@arrenbrecht.ch> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import re, string +import util +from i18n import _ + +def parsedag(desc): + '''parses a DAG from a concise textual description; generates events + + "+n" is a linear run of n nodes based on the current default parent + "." is a single node based on the current default parent + "$" resets the default parent to -1 (implied at the start); + otherwise the default parent is always the last node created + "<p" sets the default parent to the backref p + "*p" is a fork at parent p, where p is a backref + "*p1/p2/.../pn" is a merge of parents p1..pn, where the pi are backrefs + "/p2/.../pn" is a merge of the preceding node and p2..pn + ":name" defines a label for the preceding node; labels can be redefined + "@text" emits an annotation event for text + "!command" emits an action event for the current node + "!!my command\n" is like "!", but to the end of the line + "#...\n" is a comment up to the end of the line + + Whitespace between the above elements is ignored. + + A backref is either + * a number n, which references the node curr-n, where curr is the current + node, or + * the name of a label you placed earlier using ":name", or + * empty to denote the default parent. + + All string valued-elements are either strictly alphanumeric, or must + be enclosed in double quotes ("..."), with "\" as escape character. + + Generates sequence of + + ('n', (id, [parentids])) for node creation + ('l', (id, labelname)) for labels on nodes + ('a', text) for annotations + ('c', command) for actions (!) + ('C', command) for line actions (!!) + + Examples + -------- + + Example of a complex graph (output not shown for brevity): + + >>> len(list(parsedag(""" + ... + ... +3 # 3 nodes in linear run + ... :forkhere # a label for the last of the 3 nodes from above + ... +5 # 5 more nodes on one branch + ... :mergethis # label again + ... <forkhere # set default parent to labelled fork node + ... +10 # 10 more nodes on a parallel branch + ... @stable # following nodes will be annotated as "stable" + ... +5 # 5 nodes in stable + ... !addfile # custom command; could trigger new file in next node + ... +2 # two more nodes + ... /mergethis # merge last node with labelled node + ... +4 # 4 more nodes descending from merge node + ... + ... """))) + 34 + + Empty list: + + >>> list(parsedag("")) + [] + + A simple linear run: + + >>> list(parsedag("+3")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + Some non-standard ways to define such runs: + + >>> list(parsedag("+1+2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + >>> list(parsedag("+1*1*")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + >>> list(parsedag("*")) + [('n', (0, [-1]))] + + >>> list(parsedag("...")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))] + + A fork and a join, using numeric back references: + + >>> list(parsedag("+2*2*/2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))] + + >>> list(parsedag("+2<2+1/2")) + [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))] + + Placing a label: + + >>> list(parsedag("+1 :mylabel +1")) + [('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))] + + An empty label (silly, really): + + >>> list(parsedag("+1:+1")) + [('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))] + + Fork and join, but with labels instead of numeric back references: + + >>> list(parsedag("+1:f +1:p2 *f */p2")) + [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')), + ('n', (2, [0])), ('n', (3, [2, 1]))] + + >>> list(parsedag("+1:f +1:p2 <f +1 /p2")) + [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')), + ('n', (2, [0])), ('n', (3, [2, 1]))] + + Restarting from the root: + + >>> list(parsedag("+1 $ +1")) + [('n', (0, [-1])), ('n', (1, [-1]))] + + Annotations, which are meant to introduce sticky state for subsequent nodes: + + >>> list(parsedag("+1 @ann +1")) + [('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))] + + >>> list(parsedag('+1 @"my annotation" +1')) + [('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))] + + Commands, which are meant to operate on the most recently created node: + + >>> list(parsedag("+1 !cmd +1")) + [('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))] + + >>> list(parsedag('+1 !"my command" +1')) + [('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))] + + >>> list(parsedag('+1 !!my command line\\n +1')) + [('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))] + + Comments, which extend to the end of the line: + + >>> list(parsedag('+1 # comment\\n+1')) + [('n', (0, [-1])), ('n', (1, [0]))] + + Error: + + >>> try: list(parsedag('+1 bad')) + ... except Exception, e: print e + invalid character in dag description: bad... + + ''' + if not desc: + return + + wordchars = string.ascii_letters + string.digits + + labels = {} + p1 = -1 + r = 0 + + def resolve(ref): + if not ref: + return p1 + elif ref[0] in string.digits: + return r - int(ref) + else: + return labels[ref] + + chiter = (c for c in desc) + + def nextch(): + try: + return chiter.next() + except StopIteration: + return '\0' + + def nextrun(c, allow): + s = '' + while c in allow: + s += c + c = nextch() + return c, s + + def nextdelimited(c, limit, escape): + s = '' + while c != limit: + if c == escape: + c = nextch() + s += c + c = nextch() + return nextch(), s + + def nextstring(c): + if c == '"': + return nextdelimited(nextch(), '"', '\\') + else: + return nextrun(c, wordchars) + + c = nextch() + while c != '\0': + while c in string.whitespace: + c = nextch() + if c == '.': + yield 'n', (r, [p1]) + p1 = r + r += 1 + c = nextch() + elif c == '+': + c, digs = nextrun(nextch(), string.digits) + n = int(digs) + for i in xrange(0, n): + yield 'n', (r, [p1]) + p1 = r + r += 1 + elif c in '*/': + if c == '*': + c = nextch() + c, pref = nextstring(c) + prefs = [pref] + while c == '/': + c, pref = nextstring(nextch()) + prefs.append(pref) + ps = [resolve(ref) for ref in prefs] + yield 'n', (r, ps) + p1 = r + r += 1 + elif c == '<': + c, ref = nextstring(nextch()) + p1 = resolve(ref) + elif c == ':': + c, name = nextstring(nextch()) + labels[name] = p1 + yield 'l', (p1, name) + elif c == '@': + c, text = nextstring(nextch()) + yield 'a', text + elif c == '!': + c = nextch() + if c == '!': + cmd = '' + c = nextch() + while c not in '\n\r\0': + cmd += c + c = nextch() + yield 'C', cmd + else: + c, cmd = nextstring(c) + yield 'c', cmd + elif c == '#': + while c not in '\n\r\0': + c = nextch() + elif c == '$': + p1 = -1 + c = nextch() + elif c == '\0': + return # in case it was preceded by whitespace + else: + s = '' + i = 0 + while c != '\0' and i < 10: + s += c + i += 1 + c = nextch() + raise util.Abort(_("invalid character in dag description: %s...") % s) + +def dagtextlines(events, + addspaces=True, + wraplabels=False, + wrapannotations=False, + wrapcommands=False, + wrapnonlinear=False, + usedots=False, + maxlinewidth=70): + '''generates single lines for dagtext()''' + + def wrapstring(text): + if re.match("^[0-9a-z]*$", text): + return text + return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"' + + def gen(): + labels = {} + run = 0 + wantr = 0 + needroot = False + for kind, data in events: + if kind == 'n': + r, ps = data + + # sanity check + if r != wantr: + raise util.Abort(_("expected id %i, got %i") % (wantr, r)) + if not ps: + ps = [-1] + else: + for p in ps: + if p >= r: + raise util.Abort(_("parent id %i is larger than " + "current id %i") % (p, r)) + wantr += 1 + + # new root? + p1 = r - 1 + if len(ps) == 1 and ps[0] == -1: + if needroot: + if run: + yield '+' + str(run) + run = 0 + if wrapnonlinear: + yield '\n' + yield '$' + p1 = -1 + else: + needroot = True + if len(ps) == 1 and ps[0] == p1: + if usedots: + yield "." + else: + run += 1 + else: + if run: + yield '+' + str(run) + run = 0 + if wrapnonlinear: + yield '\n' + prefs = [] + for p in ps: + if p == p1: + prefs.append('') + elif p in labels: + prefs.append(labels[p]) + else: + prefs.append(str(r - p)) + yield '*' + '/'.join(prefs) + else: + if run: + yield '+' + str(run) + run = 0 + if kind == 'l': + rid, name = data + labels[rid] = name + yield ':' + name + if wraplabels: + yield '\n' + elif kind == 'c': + yield '!' + wrapstring(data) + if wrapcommands: + yield '\n' + elif kind == 'C': + yield '!!' + data + yield '\n' + elif kind == 'a': + if wrapannotations: + yield '\n' + yield '@' + wrapstring(data) + elif kind == '#': + yield '#' + data + yield '\n' + else: + raise util.Abort(_("invalid event type in dag: %s") + % str((type, data))) + if run: + yield '+' + str(run) + + line = '' + for part in gen(): + if part == '\n': + if line: + yield line + line = '' + else: + if len(line) + len(part) >= maxlinewidth: + yield line + line = '' + elif addspaces and line and part != '.': + line += ' ' + line += part + if line: + yield line + +def dagtext(dag, + addspaces=True, + wraplabels=False, + wrapannotations=False, + wrapcommands=False, + wrapnonlinear=False, + usedots=False, + maxlinewidth=70): + '''generates lines of a textual representation for a dag event stream + + events should generate what parsedag() does, so: + + ('n', (id, [parentids])) for node creation + ('l', (id, labelname)) for labels on nodes + ('a', text) for annotations + ('c', text) for commands + ('C', text) for line commands ('!!') + ('#', text) for comment lines + + Parent nodes must come before child nodes. + + Examples + -------- + + Linear run: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [0]))]) + '+2' + + Two roots: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))]) + '+1 $ +1' + + Fork and join: + + >>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), + ... ('n', (3, [2, 1]))]) + '+2 *2 */2' + + Fork and join with labels: + + >>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), + ... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))]) + '+1 :f +1 :p2 *f */p2' + + Annotations: + + >>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]) + '+1 @ann +1' + + >>> dagtext([('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]) + '+1 @"my annotation" +1' + + Commands: + + >>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]) + '+1 !cmd +1' + + >>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]) + '+1 !"my command" +1' + + >>> dagtext([('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]) + '+1 !!my command line\\n+1' + + Comments: + + >>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))]) + '+1 # comment\\n+1' + + >>> dagtext([]) + '' + + Combining parsedag and dagtext: + + >>> dagtext(parsedag('+1 :f +1 :p2 *f */p2')) + '+1 :f +1 :p2 *f */p2' + + ''' + return "\n".join(dagtextlines(dag, + addspaces, + wraplabels, + wrapannotations, + wrapcommands, + wrapnonlinear, + usedots, + maxlinewidth)) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.pyo Binary files differnew file mode 100644 index 0000000..1beb9cb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dagparser.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.py new file mode 100644 index 0000000..cee569b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.py @@ -0,0 +1,146 @@ +# demandimport.py - global demand-loading of modules for Mercurial +# +# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +''' +demandimport - automatic demandloading of modules + +To enable this module, do: + + import demandimport; demandimport.enable() + +Imports of the following forms will be demand-loaded: + + import a, b.c + import a.b as c + from a import b,c # a will be loaded immediately + +These imports will not be delayed: + + from a import * + b = __import__(a) +''' + +import __builtin__ +_origimport = __import__ + +class _demandmod(object): + """module demand-loader and proxy""" + def __init__(self, name, globals, locals): + if '.' in name: + head, rest = name.split('.', 1) + after = [rest] + else: + head = name + after = [] + object.__setattr__(self, "_data", (head, globals, locals, after)) + object.__setattr__(self, "_module", None) + def _extend(self, name): + """add to the list of submodules to load""" + self._data[3].append(name) + def _load(self): + if not self._module: + head, globals, locals, after = self._data + mod = _origimport(head, globals, locals) + # load submodules + def subload(mod, p): + h, t = p, None + if '.' in p: + h, t = p.split('.', 1) + if not hasattr(mod, h): + setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__)) + elif t: + subload(getattr(mod, h), t) + + for x in after: + subload(mod, x) + + # are we in the locals dictionary still? + if locals and locals.get(head) == self: + locals[head] = mod + object.__setattr__(self, "_module", mod) + + def __repr__(self): + if self._module: + return "<proxied module '%s'>" % self._data[0] + return "<unloaded module '%s'>" % self._data[0] + def __call__(self, *args, **kwargs): + raise TypeError("%s object is not callable" % repr(self)) + def __getattribute__(self, attr): + if attr in ('_data', '_extend', '_load', '_module'): + return object.__getattribute__(self, attr) + self._load() + return getattr(self._module, attr) + def __setattr__(self, attr, val): + self._load() + setattr(self._module, attr, val) + +def _demandimport(name, globals=None, locals=None, fromlist=None, level=None): + if not locals or name in ignore or fromlist == ('*',): + # these cases we can't really delay + if level is None: + return _origimport(name, globals, locals, fromlist) + else: + return _origimport(name, globals, locals, fromlist, level) + elif not fromlist: + # import a [as b] + if '.' in name: # a.b + base, rest = name.split('.', 1) + # email.__init__ loading email.mime + if globals and globals.get('__name__', None) == base: + return _origimport(name, globals, locals, fromlist) + # if a is already demand-loaded, add b to its submodule list + if base in locals: + if isinstance(locals[base], _demandmod): + locals[base]._extend(rest) + return locals[base] + return _demandmod(name, globals, locals) + else: + if level is not None: + # from . import b,c,d or from .a import b,c,d + return _origimport(name, globals, locals, fromlist, level) + # from a import b,c,d + mod = _origimport(name, globals, locals) + # recurse down the module chain + for comp in name.split('.')[1:]: + if not hasattr(mod, comp): + setattr(mod, comp, _demandmod(comp, mod.__dict__, mod.__dict__)) + mod = getattr(mod, comp) + for x in fromlist: + # set requested submodules for demand load + if not(hasattr(mod, x)): + setattr(mod, x, _demandmod(x, mod.__dict__, locals)) + return mod + +ignore = [ + '_hashlib', + '_xmlplus', + 'fcntl', + 'win32com.gen_py', + '_winreg', # 2.7 mimetypes needs immediate ImportError + 'pythoncom', + # imported by tarfile, not available under Windows + 'pwd', + 'grp', + # imported by profile, itself imported by hotshot.stats, + # not available under Windows + 'resource', + # this trips up many extension authors + 'gtk', + # setuptools' pkg_resources.py expects "from __main__ import x" to + # raise ImportError if x not defined + '__main__', + '_ssl', # conditional imports in the stdlib, issue1964 + ] + +def enable(): + "enable global demand-loading of modules" + __builtin__.__import__ = _demandimport + +def disable(): + "disable global demand-loading of modules" + __builtin__.__import__ = _origimport + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.pyo Binary files differnew file mode 100644 index 0000000..eb34c3c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/demandimport.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.py new file mode 100644 index 0000000..b92bb45 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'diffhelpers.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.pyo Binary files differnew file mode 100644 index 0000000..4e81b43 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.so Binary files differnew file mode 100755 index 0000000..294c32e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/diffhelpers.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.py new file mode 100644 index 0000000..df27f87 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.py @@ -0,0 +1,681 @@ +# dirstate.py - working directory tracking for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid +from i18n import _ +import util, ignore, osutil, parsers +import struct, os, stat, errno +import cStringIO + +_format = ">cllll" +propertycache = util.propertycache + +def _finddirs(path): + pos = path.rfind('/') + while pos != -1: + yield path[:pos] + pos = path.rfind('/', 0, pos) + +def _incdirs(dirs, path): + for base in _finddirs(path): + if base in dirs: + dirs[base] += 1 + return + dirs[base] = 1 + +def _decdirs(dirs, path): + for base in _finddirs(path): + if dirs[base] > 1: + dirs[base] -= 1 + return + del dirs[base] + +class dirstate(object): + + def __init__(self, opener, ui, root): + '''Create a new dirstate object. + + opener is an open()-like callable that can be used to open the + dirstate file; root is the root of the directory tracked by + the dirstate. + ''' + self._opener = opener + self._root = root + self._rootdir = os.path.join(root, '') + self._dirty = False + self._dirtypl = False + self._ui = ui + + @propertycache + def _map(self): + '''Return the dirstate contents as a map from filename to + (state, mode, size, time).''' + self._read() + return self._map + + @propertycache + def _copymap(self): + self._read() + return self._copymap + + @propertycache + def _foldmap(self): + f = {} + for name in self._map: + f[os.path.normcase(name)] = name + return f + + @propertycache + def _branch(self): + try: + return self._opener("branch").read().strip() or "default" + except IOError: + return "default" + + @propertycache + def _pl(self): + try: + st = self._opener("dirstate").read(40) + l = len(st) + if l == 40: + return st[:20], st[20:40] + elif l > 0 and l < 40: + raise util.Abort(_('working directory state appears damaged!')) + except IOError, err: + if err.errno != errno.ENOENT: + raise + return [nullid, nullid] + + @propertycache + def _dirs(self): + dirs = {} + for f, s in self._map.iteritems(): + if s[0] != 'r': + _incdirs(dirs, f) + return dirs + + @propertycache + def _ignore(self): + files = [self._join('.hgignore')] + for name, path in self._ui.configitems("ui"): + if name == 'ignore' or name.startswith('ignore.'): + files.append(util.expandpath(path)) + return ignore.ignore(self._root, files, self._ui.warn) + + @propertycache + def _slash(self): + return self._ui.configbool('ui', 'slash') and os.sep != '/' + + @propertycache + def _checklink(self): + return util.checklink(self._root) + + @propertycache + def _checkexec(self): + return util.checkexec(self._root) + + @propertycache + def _checkcase(self): + return not util.checkcase(self._join('.hg')) + + def _join(self, f): + # much faster than os.path.join() + # it's safe because f is always a relative path + return self._rootdir + f + + def flagfunc(self, fallback): + if self._checklink: + if self._checkexec: + def f(x): + p = self._join(x) + if os.path.islink(p): + return 'l' + if util.is_exec(p): + return 'x' + return '' + return f + def f(x): + if os.path.islink(self._join(x)): + return 'l' + if 'x' in fallback(x): + return 'x' + return '' + return f + if self._checkexec: + def f(x): + if 'l' in fallback(x): + return 'l' + if util.is_exec(self._join(x)): + return 'x' + return '' + return f + return fallback + + def getcwd(self): + cwd = os.getcwd() + if cwd == self._root: + return '' + # self._root ends with a path separator if self._root is '/' or 'C:\' + rootsep = self._root + if not util.endswithsep(rootsep): + rootsep += os.sep + if cwd.startswith(rootsep): + return cwd[len(rootsep):] + else: + # we're outside the repo. return an absolute path. + return cwd + + def pathto(self, f, cwd=None): + if cwd is None: + cwd = self.getcwd() + path = util.pathto(self._root, cwd, f) + if self._slash: + return util.normpath(path) + return path + + def __getitem__(self, key): + '''Return the current state of key (a filename) in the dirstate. + + States are: + n normal + m needs merging + r marked for removal + a marked for addition + ? not tracked + ''' + return self._map.get(key, ("?",))[0] + + def __contains__(self, key): + return key in self._map + + def __iter__(self): + for x in sorted(self._map): + yield x + + def parents(self): + return self._pl + + def branch(self): + return self._branch + + def setparents(self, p1, p2=nullid): + self._dirty = self._dirtypl = True + self._pl = p1, p2 + + def setbranch(self, branch): + if branch in ['tip', '.', 'null']: + raise util.Abort(_('the name \'%s\' is reserved') % branch) + self._branch = branch + self._opener("branch", "w").write(branch + '\n') + + def _read(self): + self._map = {} + self._copymap = {} + try: + st = self._opener("dirstate").read() + except IOError, err: + if err.errno != errno.ENOENT: + raise + return + if not st: + return + + p = parsers.parse_dirstate(self._map, self._copymap, st) + if not self._dirtypl: + self._pl = p + + def invalidate(self): + for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split(): + if a in self.__dict__: + delattr(self, a) + self._dirty = False + + def copy(self, source, dest): + """Mark dest as a copy of source. Unmark dest if source is None.""" + if source == dest: + return + self._dirty = True + if source is not None: + self._copymap[dest] = source + elif dest in self._copymap: + del self._copymap[dest] + + def copied(self, file): + return self._copymap.get(file, None) + + def copies(self): + return self._copymap + + def _droppath(self, f): + if self[f] not in "?r" and "_dirs" in self.__dict__: + _decdirs(self._dirs, f) + + def _addpath(self, f, check=False): + oldstate = self[f] + if check or oldstate == "r": + if '\r' in f or '\n' in f: + raise util.Abort( + _("'\\n' and '\\r' disallowed in filenames: %r") % f) + if f in self._dirs: + raise util.Abort(_('directory %r already in dirstate') % f) + # shadows + for d in _finddirs(f): + if d in self._dirs: + break + if d in self._map and self[d] != 'r': + raise util.Abort( + _('file %r in dirstate clashes with %r') % (d, f)) + if oldstate in "?r" and "_dirs" in self.__dict__: + _incdirs(self._dirs, f) + + def normal(self, f): + '''Mark a file normal and clean.''' + self._dirty = True + self._addpath(f) + s = os.lstat(self._join(f)) + self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime)) + if f in self._copymap: + del self._copymap[f] + + def normallookup(self, f): + '''Mark a file normal, but possibly dirty.''' + if self._pl[1] != nullid and f in self._map: + # if there is a merge going on and the file was either + # in state 'm' (-1) or coming from other parent (-2) before + # being removed, restore that state. + entry = self._map[f] + if entry[0] == 'r' and entry[2] in (-1, -2): + source = self._copymap.get(f) + if entry[2] == -1: + self.merge(f) + elif entry[2] == -2: + self.otherparent(f) + if source: + self.copy(source, f) + return + if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2: + return + self._dirty = True + self._addpath(f) + self._map[f] = ('n', 0, -1, -1) + if f in self._copymap: + del self._copymap[f] + + def otherparent(self, f): + '''Mark as coming from the other parent, always dirty.''' + if self._pl[1] == nullid: + raise util.Abort(_("setting %r to other parent " + "only allowed in merges") % f) + self._dirty = True + self._addpath(f) + self._map[f] = ('n', 0, -2, -1) + if f in self._copymap: + del self._copymap[f] + + def add(self, f): + '''Mark a file added.''' + self._dirty = True + self._addpath(f, True) + self._map[f] = ('a', 0, -1, -1) + if f in self._copymap: + del self._copymap[f] + + def remove(self, f): + '''Mark a file removed.''' + self._dirty = True + self._droppath(f) + size = 0 + if self._pl[1] != nullid and f in self._map: + # backup the previous state + entry = self._map[f] + if entry[0] == 'm': # merge + size = -1 + elif entry[0] == 'n' and entry[2] == -2: # other parent + size = -2 + self._map[f] = ('r', 0, size, 0) + if size == 0 and f in self._copymap: + del self._copymap[f] + + def merge(self, f): + '''Mark a file merged.''' + self._dirty = True + s = os.lstat(self._join(f)) + self._addpath(f) + self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime)) + if f in self._copymap: + del self._copymap[f] + + def forget(self, f): + '''Forget a file.''' + self._dirty = True + try: + self._droppath(f) + del self._map[f] + except KeyError: + self._ui.warn(_("not in dirstate: %s\n") % f) + + def _normalize(self, path, knownpath): + norm_path = os.path.normcase(path) + fold_path = self._foldmap.get(norm_path, None) + if fold_path is None: + if knownpath or not os.path.lexists(os.path.join(self._root, path)): + fold_path = path + else: + fold_path = self._foldmap.setdefault(norm_path, + util.fspath(path, self._root)) + return fold_path + + def clear(self): + self._map = {} + if "_dirs" in self.__dict__: + delattr(self, "_dirs") + self._copymap = {} + self._pl = [nullid, nullid] + self._dirty = True + + def rebuild(self, parent, files): + self.clear() + for f in files: + if 'x' in files.flags(f): + self._map[f] = ('n', 0777, -1, 0) + else: + self._map[f] = ('n', 0666, -1, 0) + self._pl = (parent, nullid) + self._dirty = True + + def write(self): + if not self._dirty: + return + st = self._opener("dirstate", "w", atomictemp=True) + + # use the modification time of the newly created temporary file as the + # filesystem's notion of 'now' + now = int(util.fstat(st).st_mtime) + + cs = cStringIO.StringIO() + copymap = self._copymap + pack = struct.pack + write = cs.write + write("".join(self._pl)) + for f, e in self._map.iteritems(): + if e[0] == 'n' and e[3] == now: + # The file was last modified "simultaneously" with the current + # write to dirstate (i.e. within the same second for file- + # systems with a granularity of 1 sec). This commonly happens + # for at least a couple of files on 'update'. + # The user could change the file without changing its size + # within the same second. Invalidate the file's stat data in + # dirstate, forcing future 'status' calls to compare the + # contents of the file. This prevents mistakenly treating such + # files as clean. + e = (e[0], 0, -1, -1) # mark entry as 'unset' + self._map[f] = e + + if f in copymap: + f = "%s\0%s" % (f, copymap[f]) + e = pack(_format, e[0], e[1], e[2], e[3], len(f)) + write(e) + write(f) + st.write(cs.getvalue()) + st.rename() + self._dirty = self._dirtypl = False + + def _dirignore(self, f): + if f == '.': + return False + if self._ignore(f): + return True + for p in _finddirs(f): + if self._ignore(p): + return True + return False + + def walk(self, match, subrepos, unknown, ignored): + ''' + Walk recursively through the directory tree, finding all files + matched by match. + + Return a dict mapping filename to stat-like object (either + mercurial.osutil.stat instance or return value of os.stat()). + ''' + + def fwarn(f, msg): + self._ui.warn('%s: %s\n' % (self.pathto(f), msg)) + return False + + def badtype(mode): + kind = _('unknown') + if stat.S_ISCHR(mode): + kind = _('character device') + elif stat.S_ISBLK(mode): + kind = _('block device') + elif stat.S_ISFIFO(mode): + kind = _('fifo') + elif stat.S_ISSOCK(mode): + kind = _('socket') + elif stat.S_ISDIR(mode): + kind = _('directory') + return _('unsupported file type (type is %s)') % kind + + ignore = self._ignore + dirignore = self._dirignore + if ignored: + ignore = util.never + dirignore = util.never + elif not unknown: + # if unknown and ignored are False, skip step 2 + ignore = util.always + dirignore = util.always + + matchfn = match.matchfn + badfn = match.bad + dmap = self._map + normpath = util.normpath + listdir = osutil.listdir + lstat = os.lstat + getkind = stat.S_IFMT + dirkind = stat.S_IFDIR + regkind = stat.S_IFREG + lnkkind = stat.S_IFLNK + join = self._join + work = [] + wadd = work.append + + exact = skipstep3 = False + if matchfn == match.exact: # match.exact + exact = True + dirignore = util.always # skip step 2 + elif match.files() and not match.anypats(): # match.match, no patterns + skipstep3 = True + + if self._checkcase: + normalize = self._normalize + skipstep3 = False + else: + normalize = lambda x, y: x + + files = sorted(match.files()) + subrepos.sort() + i, j = 0, 0 + while i < len(files) and j < len(subrepos): + subpath = subrepos[j] + "/" + if not files[i].startswith(subpath): + i += 1 + continue + while files and files[i].startswith(subpath): + del files[i] + j += 1 + + if not files or '.' in files: + files = [''] + results = dict.fromkeys(subrepos) + results['.hg'] = None + + # step 1: find all explicit files + for ff in files: + nf = normalize(normpath(ff), False) + if nf in results: + continue + + try: + st = lstat(join(nf)) + kind = getkind(st.st_mode) + if kind == dirkind: + skipstep3 = False + if nf in dmap: + #file deleted on disk but still in dirstate + results[nf] = None + match.dir(nf) + if not dirignore(nf): + wadd(nf) + elif kind == regkind or kind == lnkkind: + results[nf] = st + else: + badfn(ff, badtype(kind)) + if nf in dmap: + results[nf] = None + except OSError, inst: + if nf in dmap: # does it exactly match a file? + results[nf] = None + else: # does it match a directory? + prefix = nf + "/" + for fn in dmap: + if fn.startswith(prefix): + match.dir(nf) + skipstep3 = False + break + else: + badfn(ff, inst.strerror) + + # step 2: visit subdirectories + while work: + nd = work.pop() + skip = None + if nd == '.': + nd = '' + else: + skip = '.hg' + try: + entries = listdir(join(nd), stat=True, skip=skip) + except OSError, inst: + if inst.errno == errno.EACCES: + fwarn(nd, inst.strerror) + continue + raise + for f, kind, st in entries: + nf = normalize(nd and (nd + "/" + f) or f, True) + if nf not in results: + if kind == dirkind: + if not ignore(nf): + match.dir(nf) + wadd(nf) + if nf in dmap and matchfn(nf): + results[nf] = None + elif kind == regkind or kind == lnkkind: + if nf in dmap: + if matchfn(nf): + results[nf] = st + elif matchfn(nf) and not ignore(nf): + results[nf] = st + elif nf in dmap and matchfn(nf): + results[nf] = None + + # step 3: report unseen items in the dmap hash + if not skipstep3 and not exact: + visit = sorted([f for f in dmap if f not in results and matchfn(f)]) + for nf, st in zip(visit, util.statfiles([join(i) for i in visit])): + if not st is None and not getkind(st.st_mode) in (regkind, lnkkind): + st = None + results[nf] = st + for s in subrepos: + del results[s] + del results['.hg'] + return results + + def status(self, match, subrepos, ignored, clean, unknown): + '''Determine the status of the working copy relative to the + dirstate and return a tuple of lists (unsure, modified, added, + removed, deleted, unknown, ignored, clean), where: + + unsure: + files that might have been modified since the dirstate was + written, but need to be read to be sure (size is the same + but mtime differs) + modified: + files that have definitely been modified since the dirstate + was written (different size or mode) + added: + files that have been explicitly added with hg add + removed: + files that have been explicitly removed with hg remove + deleted: + files that have been deleted through other means ("missing") + unknown: + files not in the dirstate that are not ignored + ignored: + files not in the dirstate that are ignored + (by _dirignore()) + clean: + files that have definitely not been modified since the + dirstate was written + ''' + listignored, listclean, listunknown = ignored, clean, unknown + lookup, modified, added, unknown, ignored = [], [], [], [], [] + removed, deleted, clean = [], [], [] + + dmap = self._map + ladd = lookup.append # aka "unsure" + madd = modified.append + aadd = added.append + uadd = unknown.append + iadd = ignored.append + radd = removed.append + dadd = deleted.append + cadd = clean.append + + lnkkind = stat.S_IFLNK + + for fn, st in self.walk(match, subrepos, listunknown, + listignored).iteritems(): + if fn not in dmap: + if (listignored or match.exact(fn)) and self._dirignore(fn): + if listignored: + iadd(fn) + elif listunknown: + uadd(fn) + continue + + state, mode, size, time = dmap[fn] + + if not st and state in "nma": + dadd(fn) + elif state == 'n': + # The "mode & lnkkind != lnkkind or self._checklink" + # lines are an expansion of "islink => checklink" + # where islink means "is this a link?" and checklink + # means "can we check links?". + if (size >= 0 and + (size != st.st_size + or ((mode ^ st.st_mode) & 0100 and self._checkexec)) + and (mode & lnkkind != lnkkind or self._checklink) + or size == -2 # other parent + or fn in self._copymap): + madd(fn) + elif (time != int(st.st_mtime) + and (mode & lnkkind != lnkkind or self._checklink)): + ladd(fn) + elif listclean: + cadd(fn) + elif state == 'm': + madd(fn) + elif state == 'a': + aadd(fn) + elif state == 'r': + radd(fn) + + return (lookup, modified, added, removed, deleted, unknown, ignored, + clean) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.pyo Binary files differnew file mode 100644 index 0000000..ed809e8 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dirstate.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.py new file mode 100644 index 0000000..cd361d6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.py @@ -0,0 +1,308 @@ +# discovery.py - protocol changeset discovery functions +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, short +from i18n import _ +import util, error + +def findcommonincoming(repo, remote, heads=None, force=False): + """Return a tuple (common, missing roots, heads) used to identify + missing nodes from remote. + + If a list of heads is specified, return only nodes which are heads + or ancestors of these heads. + """ + m = repo.changelog.nodemap + search = [] + fetch = set() + seen = set() + seenbranch = set() + base = set() + + if not heads: + heads = remote.heads() + + if repo.changelog.tip() == nullid: + base.add(nullid) + if heads != [nullid]: + return [nullid], [nullid], list(heads) + return [nullid], [], [] + + # assume we're closer to the tip than the root + # and start by examining the heads + repo.ui.status(_("searching for changes\n")) + + unknown = [] + for h in heads: + if h not in m: + unknown.append(h) + else: + base.add(h) + + heads = unknown + if not unknown: + return list(base), [], [] + + req = set(unknown) + reqcnt = 0 + + # search through remote branches + # a 'branch' here is a linear segment of history, with four parts: + # head, root, first parent, second parent + # (a branch always has two parents (or none) by definition) + unknown = remote.branches(unknown) + while unknown: + r = [] + while unknown: + n = unknown.pop(0) + if n[0] in seen: + continue + + repo.ui.debug("examining %s:%s\n" + % (short(n[0]), short(n[1]))) + if n[0] == nullid: # found the end of the branch + pass + elif n in seenbranch: + repo.ui.debug("branch already found\n") + continue + elif n[1] and n[1] in m: # do we know the base? + repo.ui.debug("found incomplete branch %s:%s\n" + % (short(n[0]), short(n[1]))) + search.append(n[0:2]) # schedule branch range for scanning + seenbranch.add(n) + else: + if n[1] not in seen and n[1] not in fetch: + if n[2] in m and n[3] in m: + repo.ui.debug("found new changeset %s\n" % + short(n[1])) + fetch.add(n[1]) # earliest unknown + for p in n[2:4]: + if p in m: + base.add(p) # latest known + + for p in n[2:4]: + if p not in req and p not in m: + r.append(p) + req.add(p) + seen.add(n[0]) + + if r: + reqcnt += 1 + repo.ui.progress(_('searching'), reqcnt, unit=_('queries')) + repo.ui.debug("request %d: %s\n" % + (reqcnt, " ".join(map(short, r)))) + for p in xrange(0, len(r), 10): + for b in remote.branches(r[p:p + 10]): + repo.ui.debug("received %s:%s\n" % + (short(b[0]), short(b[1]))) + unknown.append(b) + + # do binary search on the branches we found + while search: + newsearch = [] + reqcnt += 1 + repo.ui.progress(_('searching'), reqcnt, unit=_('queries')) + for n, l in zip(search, remote.between(search)): + l.append(n[1]) + p = n[0] + f = 1 + for i in l: + repo.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i))) + if i in m: + if f <= 2: + repo.ui.debug("found new branch changeset %s\n" % + short(p)) + fetch.add(p) + base.add(i) + else: + repo.ui.debug("narrowed branch search to %s:%s\n" + % (short(p), short(i))) + newsearch.append((p, i)) + break + p, f = i, f * 2 + search = newsearch + + # sanity check our fetch list + for f in fetch: + if f in m: + raise error.RepoError(_("already have changeset ") + + short(f[:4])) + + base = list(base) + if base == [nullid]: + if force: + repo.ui.warn(_("warning: repository is unrelated\n")) + else: + raise util.Abort(_("repository is unrelated")) + + repo.ui.debug("found new changesets starting at " + + " ".join([short(f) for f in fetch]) + "\n") + + repo.ui.progress(_('searching'), None) + repo.ui.debug("%d total queries\n" % reqcnt) + + return base, list(fetch), heads + +def findoutgoing(repo, remote, base=None, remoteheads=None, force=False): + """Return list of nodes that are roots of subsets not in remote + + If base dict is specified, assume that these nodes and their parents + exist on the remote side. + If remotehead is specified, assume it is the list of the heads from + the remote repository. + """ + if base is None: + base = findcommonincoming(repo, remote, heads=remoteheads, + force=force)[0] + else: + base = list(base) + + repo.ui.debug("common changesets up to " + + " ".join(map(short, base)) + "\n") + + remain = set(repo.changelog.nodemap) + + # prune everything remote has from the tree + remain.remove(nullid) + remove = base + while remove: + n = remove.pop(0) + if n in remain: + remain.remove(n) + for p in repo.changelog.parents(n): + remove.append(p) + + # find every node whose parents have been pruned + subset = [] + # find every remote head that will get new children + for n in remain: + p1, p2 = repo.changelog.parents(n) + if p1 not in remain and p2 not in remain: + subset.append(n) + + return subset + +def prepush(repo, remote, force, revs, newbranch): + '''Analyze the local and remote repositories and determine which + changesets need to be pushed to the remote. Return value depends + on circumstances: + + If we are not going to push anything, return a tuple (None, + outgoing) where outgoing is 0 if there are no outgoing + changesets and 1 if there are, but we refuse to push them + (e.g. would create new remote heads). + + Otherwise, return a tuple (changegroup, remoteheads), where + changegroup is a readable file-like object whose read() returns + successive changegroup chunks ready to be sent over the wire and + remoteheads is the list of remote heads.''' + remoteheads = remote.heads() + common, inc, rheads = findcommonincoming(repo, remote, heads=remoteheads, + force=force) + + cl = repo.changelog + update = findoutgoing(repo, remote, common, remoteheads) + outg, bases, heads = cl.nodesbetween(update, revs) + + if not bases: + repo.ui.status(_("no changes found\n")) + return None, 1 + + if not force and remoteheads != [nullid]: + if remote.capable('branchmap'): + # Check for each named branch if we're creating new remote heads. + # To be a remote head after push, node must be either: + # - unknown locally + # - a local outgoing head descended from update + # - a remote head that's known locally and not + # ancestral to an outgoing head + # + # New named branches cannot be created without --force. + + # 1. Create set of branches involved in the push. + branches = set(repo[n].branch() for n in outg) + + # 2. Check for new branches on the remote. + remotemap = remote.branchmap() + newbranches = branches - set(remotemap) + if newbranches and not newbranch: # new branch requires --new-branch + branchnames = ', '.join(sorted(newbranches)) + raise util.Abort(_("push creates new remote branches: %s!") + % branchnames, + hint=_("use 'hg push --new-branch' to create" + " new remote branches")) + branches.difference_update(newbranches) + + # 3. Construct the initial oldmap and newmap dicts. + # They contain information about the remote heads before and + # after the push, respectively. + # Heads not found locally are not included in either dict, + # since they won't be affected by the push. + # unsynced contains all branches with incoming changesets. + oldmap = {} + newmap = {} + unsynced = set() + for branch in branches: + remotebrheads = remotemap[branch] + prunedbrheads = [h for h in remotebrheads if h in cl.nodemap] + oldmap[branch] = prunedbrheads + newmap[branch] = list(prunedbrheads) + if len(remotebrheads) > len(prunedbrheads): + unsynced.add(branch) + + # 4. Update newmap with outgoing changes. + # This will possibly add new heads and remove existing ones. + ctxgen = (repo[n] for n in outg) + repo._updatebranchcache(newmap, ctxgen) + + else: + # 1-4b. old servers: Check for new topological heads. + # Construct {old,new}map with branch = None (topological branch). + # (code based on _updatebranchcache) + oldheads = set(h for h in remoteheads if h in cl.nodemap) + newheads = oldheads.union(outg) + if len(newheads) > 1: + for latest in reversed(outg): + if latest not in newheads: + continue + minhrev = min(cl.rev(h) for h in newheads) + reachable = cl.reachable(latest, cl.node(minhrev)) + reachable.remove(latest) + newheads.difference_update(reachable) + branches = set([None]) + newmap = {None: newheads} + oldmap = {None: oldheads} + unsynced = inc and branches or set() + + # 5. Check for new heads. + # If there are more heads after the push than before, a suitable + # warning, depending on unsynced status, is displayed. + for branch in branches: + if len(newmap[branch]) > len(oldmap[branch]): + if branch: + msg = _("push creates new remote heads " + "on branch '%s'!") % branch + else: + msg = _("push creates new remote heads!") + + if branch in unsynced: + hint = _("you should pull and merge or use push -f to force") + else: + hint = _("did you forget to merge? use push -f to force") + raise util.Abort(msg, hint=hint) + + # 6. Check for unsynced changes on involved branches. + if unsynced: + repo.ui.warn(_("note: unsynced remote changes!\n")) + + if revs is None: + # use the fast path, no race possible on push + nodes = repo.changelog.findmissing(common) + cg = repo._changegroup(nodes, 'push') + else: + cg = repo.changegroupsubset(update, revs, 'push') + return cg, remoteheads diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.pyo Binary files differnew file mode 100644 index 0000000..02e69c0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/discovery.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.py new file mode 100644 index 0000000..97f0e3e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.py @@ -0,0 +1,641 @@ +# dispatch.py - command dispatching for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re +import util, commands, hg, fancyopts, extensions, hook, error +import cmdutil, encoding +import ui as uimod + +def run(): + "run the command in sys.argv" + sys.exit(dispatch(sys.argv[1:])) + +def dispatch(args): + "run the command specified in args" + try: + u = uimod.ui() + if '--traceback' in args: + u.setconfig('ui', 'traceback', 'on') + except util.Abort, inst: + sys.stderr.write(_("abort: %s\n") % inst) + if inst.hint: + sys.stderr.write(_("(%s)\n") % inst.hint) + return -1 + except error.ParseError, inst: + if len(inst.args) > 1: + sys.stderr.write(_("hg: parse error at %s: %s\n") % + (inst.args[1], inst.args[0])) + else: + sys.stderr.write(_("hg: parse error: %s\n") % inst.args[0]) + return -1 + return _runcatch(u, args) + +def _runcatch(ui, args): + def catchterm(*args): + raise error.SignalInterrupt + + try: + for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM': + num = getattr(signal, name, None) + if num: + signal.signal(num, catchterm) + except ValueError: + pass # happens if called in a thread + + try: + try: + # enter the debugger before command execution + if '--debugger' in args: + ui.warn(_("entering debugger - " + "type c to continue starting hg or h for help\n")) + pdb.set_trace() + try: + return _dispatch(ui, args) + finally: + ui.flush() + except: + # enter the debugger when we hit an exception + if '--debugger' in args: + traceback.print_exc() + pdb.post_mortem(sys.exc_info()[2]) + ui.traceback() + raise + + # Global exception handling, alphabetically + # Mercurial-specific first, followed by built-in and library exceptions + except error.AmbiguousCommand, inst: + ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") % + (inst.args[0], " ".join(inst.args[1]))) + except error.ParseError, inst: + if len(inst.args) > 1: + ui.warn(_("hg: parse error at %s: %s\n") % + (inst.args[1], inst.args[0])) + else: + ui.warn(_("hg: parse error: %s\n") % inst.args[0]) + return -1 + except error.LockHeld, inst: + if inst.errno == errno.ETIMEDOUT: + reason = _('timed out waiting for lock held by %s') % inst.locker + else: + reason = _('lock held by %s') % inst.locker + ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) + except error.LockUnavailable, inst: + ui.warn(_("abort: could not lock %s: %s\n") % + (inst.desc or inst.filename, inst.strerror)) + except error.CommandError, inst: + if inst.args[0]: + ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1])) + commands.help_(ui, inst.args[0]) + else: + ui.warn(_("hg: %s\n") % inst.args[1]) + commands.help_(ui, 'shortlist') + except error.RepoError, inst: + ui.warn(_("abort: %s!\n") % inst) + except error.ResponseError, inst: + ui.warn(_("abort: %s") % inst.args[0]) + if not isinstance(inst.args[1], basestring): + ui.warn(" %r\n" % (inst.args[1],)) + elif not inst.args[1]: + ui.warn(_(" empty string\n")) + else: + ui.warn("\n%r\n" % util.ellipsis(inst.args[1])) + except error.RevlogError, inst: + ui.warn(_("abort: %s!\n") % inst) + except error.SignalInterrupt: + ui.warn(_("killed!\n")) + except error.UnknownCommand, inst: + ui.warn(_("hg: unknown command '%s'\n") % inst.args[0]) + try: + # check if the command is in a disabled extension + # (but don't check for extensions themselves) + commands.help_(ui, inst.args[0], unknowncmd=True) + except error.UnknownCommand: + commands.help_(ui, 'shortlist') + except util.Abort, inst: + ui.warn(_("abort: %s\n") % inst) + if inst.hint: + ui.warn(_("(%s)\n") % inst.hint) + except ImportError, inst: + ui.warn(_("abort: %s!\n") % inst) + m = str(inst).split()[-1] + if m in "mpatch bdiff".split(): + ui.warn(_("(did you forget to compile extensions?)\n")) + elif m in "zlib".split(): + ui.warn(_("(is your Python install correct?)\n")) + except IOError, inst: + if hasattr(inst, "code"): + ui.warn(_("abort: %s\n") % inst) + elif hasattr(inst, "reason"): + try: # usually it is in the form (errno, strerror) + reason = inst.reason.args[1] + except: # it might be anything, for example a string + reason = inst.reason + ui.warn(_("abort: error: %s\n") % reason) + elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE: + if ui.debugflag: + ui.warn(_("broken pipe\n")) + elif getattr(inst, "strerror", None): + if getattr(inst, "filename", None): + ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) + else: + ui.warn(_("abort: %s\n") % inst.strerror) + else: + raise + except OSError, inst: + if getattr(inst, "filename", None): + ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename)) + else: + ui.warn(_("abort: %s\n") % inst.strerror) + except KeyboardInterrupt: + try: + ui.warn(_("interrupted!\n")) + except IOError, inst: + if inst.errno == errno.EPIPE: + if ui.debugflag: + ui.warn(_("\nbroken pipe\n")) + else: + raise + except MemoryError: + ui.warn(_("abort: out of memory\n")) + except SystemExit, inst: + # Commands shouldn't sys.exit directly, but give a return code. + # Just in case catch this and and pass exit code to caller. + return inst.code + except socket.error, inst: + ui.warn(_("abort: %s\n") % inst.args[-1]) + except: + ui.warn(_("** unknown exception encountered," + " please report by visiting\n")) + ui.warn(_("** http://mercurial.selenic.com/wiki/BugTracker\n")) + ui.warn(_("** Python %s\n") % sys.version.replace('\n', '')) + ui.warn(_("** Mercurial Distributed SCM (version %s)\n") + % util.version()) + ui.warn(_("** Extensions loaded: %s\n") + % ", ".join([x[0] for x in extensions.extensions()])) + raise + + return -1 + +def aliasargs(fn): + if hasattr(fn, 'args'): + return fn.args + return [] + +class cmdalias(object): + def __init__(self, name, definition, cmdtable): + self.name = self.cmd = name + self.cmdname = '' + self.definition = definition + self.args = [] + self.opts = [] + self.help = '' + self.norepo = True + self.badalias = False + + try: + aliases, entry = cmdutil.findcmd(self.name, cmdtable) + for alias, e in cmdtable.iteritems(): + if e is entry: + self.cmd = alias + break + self.shadows = True + except error.UnknownCommand: + self.shadows = False + + if not self.definition: + def fn(ui, *args): + ui.warn(_("no definition for alias '%s'\n") % self.name) + return 1 + self.fn = fn + self.badalias = True + + return + + if self.definition.startswith('!'): + self.shell = True + def fn(ui, *args): + env = {'HG_ARGS': ' '.join((self.name,) + args)} + def _checkvar(m): + if int(m.groups()[0]) <= len(args): + return m.group() + else: + return '' + cmd = re.sub(r'\$(\d+)', _checkvar, self.definition[1:]) + replace = dict((str(i + 1), arg) for i, arg in enumerate(args)) + replace['0'] = self.name + replace['@'] = ' '.join(args) + cmd = util.interpolate(r'\$', replace, cmd) + return util.system(cmd, environ=env) + self.fn = fn + return + + args = shlex.split(self.definition) + self.cmdname = cmd = args.pop(0) + args = map(util.expandpath, args) + + for invalidarg in ("--cwd", "-R", "--repository", "--repo"): + if _earlygetopt([invalidarg], args): + def fn(ui, *args): + ui.warn(_("error in definition for alias '%s': %s may only " + "be given on the command line\n") + % (self.name, invalidarg)) + return 1 + + self.fn = fn + self.badalias = True + return + + try: + tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1] + if len(tableentry) > 2: + self.fn, self.opts, self.help = tableentry + else: + self.fn, self.opts = tableentry + + self.args = aliasargs(self.fn) + args + if cmd not in commands.norepo.split(' '): + self.norepo = False + if self.help.startswith("hg " + cmd): + # drop prefix in old-style help lines so hg shows the alias + self.help = self.help[4 + len(cmd):] + self.__doc__ = self.fn.__doc__ + + except error.UnknownCommand: + def fn(ui, *args): + ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \ + % (self.name, cmd)) + try: + # check if the command is in a disabled extension + commands.help_(ui, cmd, unknowncmd=True) + except error.UnknownCommand: + pass + return 1 + self.fn = fn + self.badalias = True + except error.AmbiguousCommand: + def fn(ui, *args): + ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \ + % (self.name, cmd)) + return 1 + self.fn = fn + self.badalias = True + + def __call__(self, ui, *args, **opts): + if self.shadows: + ui.debug("alias '%s' shadows command '%s'\n" % + (self.name, self.cmdname)) + + if self.definition.startswith('!'): + return self.fn(ui, *args, **opts) + else: + try: + util.checksignature(self.fn)(ui, *args, **opts) + except error.SignatureError: + args = ' '.join([self.cmdname] + self.args) + ui.debug("alias '%s' expands to '%s'\n" % (self.name, args)) + raise + +def addaliases(ui, cmdtable): + # aliases are processed after extensions have been loaded, so they + # may use extension commands. Aliases can also use other alias definitions, + # but only if they have been defined prior to the current definition. + for alias, definition in ui.configitems('alias'): + aliasdef = cmdalias(alias, definition, cmdtable) + cmdtable[aliasdef.cmd] = (aliasdef, aliasdef.opts, aliasdef.help) + if aliasdef.norepo: + commands.norepo += ' %s' % alias + +def _parse(ui, args): + options = {} + cmdoptions = {} + + try: + args = fancyopts.fancyopts(args, commands.globalopts, options) + except fancyopts.getopt.GetoptError, inst: + raise error.CommandError(None, inst) + + if args: + cmd, args = args[0], args[1:] + aliases, entry = cmdutil.findcmd(cmd, commands.table, + ui.config("ui", "strict")) + cmd = aliases[0] + args = aliasargs(entry[0]) + args + defaults = ui.config("defaults", cmd) + if defaults: + args = map(util.expandpath, shlex.split(defaults)) + args + c = list(entry[1]) + else: + cmd = None + c = [] + + # combine global options into local + for o in commands.globalopts: + c.append((o[0], o[1], options[o[1]], o[3])) + + try: + args = fancyopts.fancyopts(args, c, cmdoptions, True) + except fancyopts.getopt.GetoptError, inst: + raise error.CommandError(cmd, inst) + + # separate global options back out + for o in commands.globalopts: + n = o[1] + options[n] = cmdoptions[n] + del cmdoptions[n] + + return (cmd, cmd and entry[0] or None, args, options, cmdoptions) + +def _parseconfig(ui, config): + """parse the --config options from the command line""" + for cfg in config: + try: + name, value = cfg.split('=', 1) + section, name = name.split('.', 1) + if not section or not name: + raise IndexError + ui.setconfig(section, name, value) + except (IndexError, ValueError): + raise util.Abort(_('malformed --config option: %r ' + '(use --config section.name=value)') % cfg) + +def _earlygetopt(aliases, args): + """Return list of values for an option (or aliases). + + The values are listed in the order they appear in args. + The options and values are removed from args. + """ + try: + argcount = args.index("--") + except ValueError: + argcount = len(args) + shortopts = [opt for opt in aliases if len(opt) == 2] + values = [] + pos = 0 + while pos < argcount: + if args[pos] in aliases: + if pos + 1 >= argcount: + # ignore and let getopt report an error if there is no value + break + del args[pos] + values.append(args.pop(pos)) + argcount -= 2 + elif args[pos][:2] in shortopts: + # short option can have no following space, e.g. hg log -Rfoo + values.append(args.pop(pos)[2:]) + argcount -= 1 + else: + pos += 1 + return values + +def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions): + # run pre-hook, and abort if it fails + ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs), + pats=cmdpats, opts=cmdoptions) + if ret: + return ret + ret = _runcommand(ui, options, cmd, d) + # run post-hook, passing command result + hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs), + result=ret, pats=cmdpats, opts=cmdoptions) + return ret + +def _getlocal(ui, rpath): + """Return (path, local ui object) for the given target path. + + Takes paths in [cwd]/.hg/hgrc into account." + """ + try: + wd = os.getcwd() + except OSError, e: + raise util.Abort(_("error getting current working directory: %s") % + e.strerror) + path = cmdutil.findrepo(wd) or "" + if not path: + lui = ui + else: + lui = ui.copy() + lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) + + if rpath: + path = lui.expandpath(rpath[-1]) + lui = ui.copy() + lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) + + return path, lui + +def _checkshellalias(ui, args): + cwd = os.getcwd() + norepo = commands.norepo + options = {} + + try: + args = fancyopts.fancyopts(args, commands.globalopts, options) + except fancyopts.getopt.GetoptError: + return + + if not args: + return + + _parseconfig(ui, options['config']) + if options['cwd']: + os.chdir(options['cwd']) + + path, lui = _getlocal(ui, [options['repository']]) + + cmdtable = commands.table.copy() + addaliases(lui, cmdtable) + + cmd = args[0] + try: + aliases, entry = cmdutil.findcmd(cmd, cmdtable, lui.config("ui", "strict")) + except (error.AmbiguousCommand, error.UnknownCommand): + commands.norepo = norepo + os.chdir(cwd) + return + + cmd = aliases[0] + fn = entry[0] + + if cmd and hasattr(fn, 'shell'): + d = lambda: fn(ui, *args[1:]) + return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d, [], {}) + + commands.norepo = norepo + os.chdir(cwd) + +_loaded = set() +def _dispatch(ui, args): + shellaliasfn = _checkshellalias(ui, args) + if shellaliasfn: + return shellaliasfn() + + # read --config before doing anything else + # (e.g. to change trust settings for reading .hg/hgrc) + _parseconfig(ui, _earlygetopt(['--config'], args)) + + # check for cwd + cwd = _earlygetopt(['--cwd'], args) + if cwd: + os.chdir(cwd[-1]) + + rpath = _earlygetopt(["-R", "--repository", "--repo"], args) + path, lui = _getlocal(ui, rpath) + + # Configure extensions in phases: uisetup, extsetup, cmdtable, and + # reposetup. Programs like TortoiseHg will call _dispatch several + # times so we keep track of configured extensions in _loaded. + extensions.loadall(lui) + exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded] + # Propagate any changes to lui.__class__ by extensions + ui.__class__ = lui.__class__ + + # (uisetup and extsetup are handled in extensions.loadall) + + for name, module in exts: + cmdtable = getattr(module, 'cmdtable', {}) + overrides = [cmd for cmd in cmdtable if cmd in commands.table] + if overrides: + ui.warn(_("extension '%s' overrides commands: %s\n") + % (name, " ".join(overrides))) + commands.table.update(cmdtable) + _loaded.add(name) + + # (reposetup is handled in hg.repository) + + addaliases(lui, commands.table) + + # check for fallback encoding + fallback = lui.config('ui', 'fallbackencoding') + if fallback: + encoding.fallbackencoding = fallback + + fullargs = args + cmd, func, args, options, cmdoptions = _parse(lui, args) + + if options["config"]: + raise util.Abort(_("option --config may not be abbreviated!")) + if options["cwd"]: + raise util.Abort(_("option --cwd may not be abbreviated!")) + if options["repository"]: + raise util.Abort(_( + "Option -R has to be separated from other options (e.g. not -qR) " + "and --repository may only be abbreviated as --repo!")) + + if options["encoding"]: + encoding.encoding = options["encoding"] + if options["encodingmode"]: + encoding.encodingmode = options["encodingmode"] + if options["time"]: + def get_times(): + t = os.times() + if t[4] == 0.0: # Windows leaves this as zero, so use time.clock() + t = (t[0], t[1], t[2], t[3], time.clock()) + return t + s = get_times() + def print_time(): + t = get_times() + ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") % + (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3])) + atexit.register(print_time) + + if options['verbose'] or options['debug'] or options['quiet']: + ui.setconfig('ui', 'verbose', str(bool(options['verbose']))) + ui.setconfig('ui', 'debug', str(bool(options['debug']))) + ui.setconfig('ui', 'quiet', str(bool(options['quiet']))) + if options['traceback']: + ui.setconfig('ui', 'traceback', 'on') + if options['noninteractive']: + ui.setconfig('ui', 'interactive', 'off') + + if options['help']: + return commands.help_(ui, cmd, options['version']) + elif options['version']: + return commands.version_(ui) + elif not cmd: + return commands.help_(ui, 'shortlist') + + repo = None + cmdpats = args[:] + if cmd not in commands.norepo.split(): + try: + repo = hg.repository(ui, path=path) + ui = repo.ui + if not repo.local(): + raise util.Abort(_("repository '%s' is not local") % path) + ui.setconfig("bundle", "mainreporoot", repo.root) + except error.RepoError: + if cmd not in commands.optionalrepo.split(): + if args and not path: # try to infer -R from command args + repos = map(cmdutil.findrepo, args) + guess = repos[0] + if guess and repos.count(guess) == len(repos): + return _dispatch(ui, ['--repository', guess] + fullargs) + if not path: + raise error.RepoError(_("There is no Mercurial repository" + " here (.hg not found)")) + raise + args.insert(0, repo) + elif rpath: + ui.warn(_("warning: --repository ignored\n")) + + msg = ' '.join(' ' in a and repr(a) or a for a in fullargs) + ui.log("command", msg + "\n") + d = lambda: util.checksignature(func)(ui, *args, **cmdoptions) + return runcommand(lui, repo, cmd, fullargs, ui, options, d, + cmdpats, cmdoptions) + +def _runcommand(ui, options, cmd, cmdfunc): + def checkargs(): + try: + return cmdfunc() + except error.SignatureError: + raise error.CommandError(cmd, _("invalid arguments")) + + if options['profile']: + format = ui.config('profiling', 'format', default='text') + + if not format in ['text', 'kcachegrind']: + ui.warn(_("unrecognized profiling format '%s'" + " - Ignored\n") % format) + format = 'text' + + output = ui.config('profiling', 'output') + + if output: + path = ui.expandpath(output) + ostream = open(path, 'wb') + else: + ostream = sys.stderr + + try: + from mercurial import lsprof + except ImportError: + raise util.Abort(_( + 'lsprof not available - install from ' + 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/')) + p = lsprof.Profiler() + p.enable(subcalls=True) + try: + return checkargs() + finally: + p.disable() + + if format == 'kcachegrind': + import lsprofcalltree + calltree = lsprofcalltree.KCacheGrind(p) + calltree.output(ostream) + else: + # format == 'text' + stats = lsprof.Stats(p.getstats()) + stats.sort() + stats.pprint(top=10, file=ostream, climit=5) + + if output: + ostream.close() + else: + return checkargs() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.pyo Binary files differnew file mode 100644 index 0000000..5274dc6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/dispatch.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.py new file mode 100644 index 0000000..57270e6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.py @@ -0,0 +1,103 @@ +# encoding.py - character transcoding support for Mercurial +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import error +import unicodedata, locale, os + +def _getpreferredencoding(): + ''' + On darwin, getpreferredencoding ignores the locale environment and + always returns mac-roman. http://bugs.python.org/issue6202 fixes this + for Python 2.7 and up. This is the same corrected code for earlier + Python versions. + + However, we can't use a version check for this method, as some distributions + patch Python to fix this. Instead, we use it as a 'fixer' for the mac-roman + encoding, as it is unlikely that this encoding is the actually expected. + ''' + try: + locale.CODESET + except AttributeError: + # Fall back to parsing environment variables :-( + return locale.getdefaultlocale()[1] + + oldloc = locale.setlocale(locale.LC_CTYPE) + locale.setlocale(locale.LC_CTYPE, "") + result = locale.nl_langinfo(locale.CODESET) + locale.setlocale(locale.LC_CTYPE, oldloc) + + return result + +_encodingfixers = { + '646': lambda: 'ascii', + 'ANSI_X3.4-1968': lambda: 'ascii', + 'mac-roman': _getpreferredencoding +} + +try: + encoding = os.environ.get("HGENCODING") + if not encoding: + encoding = locale.getpreferredencoding() or 'ascii' + encoding = _encodingfixers.get(encoding, lambda: encoding)() +except locale.Error: + encoding = 'ascii' +encodingmode = os.environ.get("HGENCODINGMODE", "strict") +fallbackencoding = 'ISO-8859-1' + +def tolocal(s): + """ + Convert a string from internal UTF-8 to local encoding + + All internal strings should be UTF-8 but some repos before the + implementation of locale support may contain latin1 or possibly + other character sets. We attempt to decode everything strictly + using UTF-8, then Latin-1, and failing that, we use UTF-8 and + replace unknown characters. + """ + for e in ('UTF-8', fallbackencoding): + try: + u = s.decode(e) # attempt strict decoding + return u.encode(encoding, "replace") + except LookupError, k: + raise error.Abort("%s, please check your locale settings" % k) + except UnicodeDecodeError: + pass + u = s.decode("utf-8", "replace") # last ditch + return u.encode(encoding, "replace") + +def fromlocal(s): + """ + Convert a string from the local character encoding to UTF-8 + + We attempt to decode strings using the encoding mode set by + HGENCODINGMODE, which defaults to 'strict'. In this mode, unknown + characters will cause an error message. Other modes include + 'replace', which replaces unknown characters with a special + Unicode character, and 'ignore', which drops the character. + """ + try: + return s.decode(encoding, encodingmode).encode("utf-8") + except UnicodeDecodeError, inst: + sub = s[max(0, inst.start - 10):inst.start + 10] + raise error.Abort("decoding near '%s': %s!" % (sub, inst)) + except LookupError, k: + raise error.Abort("%s, please check your locale settings" % k) + +# How to treat ambiguous-width characters. Set to 'wide' to treat as wide. +ambiguous = os.environ.get("HGENCODINGAMBIGUOUS", "narrow") + +def colwidth(s): + "Find the column width of a UTF-8 string for display" + d = s.decode(encoding, 'replace') + if hasattr(unicodedata, 'east_asian_width'): + wide = "WF" + if ambiguous == "wide": + wide = "WFA" + w = unicodedata.east_asian_width + return sum([w(c) in wide and 2 or 1 for c in d]) + return len(d) + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.pyo Binary files differnew file mode 100644 index 0000000..8780298 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/encoding.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.py new file mode 100644 index 0000000..b02ece3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.py @@ -0,0 +1,81 @@ +# error.py - Mercurial exceptions +# +# Copyright 2005-2008 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Mercurial exceptions. + +This allows us to catch exceptions at higher levels without forcing +imports. +""" + +# Do not import anything here, please + +class RevlogError(Exception): + pass + +class LookupError(RevlogError, KeyError): + def __init__(self, name, index, message): + self.name = name + if isinstance(name, str) and len(name) == 20: + from node import short + name = short(name) + RevlogError.__init__(self, '%s@%s: %s' % (index, name, message)) + + def __str__(self): + return RevlogError.__str__(self) + +class CommandError(Exception): + """Exception raised on errors in parsing the command line.""" + +class Abort(Exception): + """Raised if a command needs to print an error and exit.""" + def __init__(self, *args, **kw): + Exception.__init__(self, *args) + self.hint = kw.get('hint') + +class ConfigError(Abort): + 'Exception raised when parsing config files' + +class ParseError(Exception): + 'Exception raised when parsing config files (msg[, pos])' + +class RepoError(Exception): + pass + +class RepoLookupError(RepoError): + pass + +class CapabilityError(RepoError): + pass + +class LockError(IOError): + def __init__(self, errno, strerror, filename, desc): + IOError.__init__(self, errno, strerror, filename) + self.desc = desc + +class LockHeld(LockError): + def __init__(self, errno, filename, desc, locker): + LockError.__init__(self, errno, 'Lock held', filename, desc) + self.locker = locker + +class LockUnavailable(LockError): + pass + +class ResponseError(Exception): + """Raised to print an error with part of output and exit.""" + +class UnknownCommand(Exception): + """Exception raised if command is not in the command table.""" + +class AmbiguousCommand(Exception): + """Exception raised if command shortcut matches more than one command.""" + +# derived from KeyboardInterrupt to simplify some breakout code +class SignalInterrupt(KeyboardInterrupt): + """Exception raised on SIGTERM and SIGHUP.""" + +class SignatureError(Exception): + pass diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.pyo Binary files differnew file mode 100644 index 0000000..d726329 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/error.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.py new file mode 100644 index 0000000..c4eecc6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.py @@ -0,0 +1,302 @@ +# extensions.py - extension handling for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import imp, os +import util, cmdutil, help, error +from i18n import _, gettext + +_extensions = {} +_order = [] + +def extensions(): + for name in _order: + module = _extensions[name] + if module: + yield name, module + +def find(name): + '''return module with given extension name''' + try: + return _extensions[name] + except KeyError: + for k, v in _extensions.iteritems(): + if k.endswith('.' + name) or k.endswith('/' + name): + return v + raise KeyError(name) + +def loadpath(path, module_name): + module_name = module_name.replace('.', '_') + path = util.expandpath(path) + if os.path.isdir(path): + # module/__init__.py style + d, f = os.path.split(path.rstrip('/')) + fd, fpath, desc = imp.find_module(f, [d]) + return imp.load_module(module_name, fd, fpath, desc) + else: + return imp.load_source(module_name, path) + +def load(ui, name, path): + # unused ui argument kept for backwards compatibility + if name.startswith('hgext.') or name.startswith('hgext/'): + shortname = name[6:] + else: + shortname = name + if shortname in _extensions: + return _extensions[shortname] + _extensions[shortname] = None + if path: + # the module will be loaded in sys.modules + # choose an unique name so that it doesn't + # conflicts with other modules + mod = loadpath(path, 'hgext.%s' % name) + else: + def importh(name): + mod = __import__(name) + components = name.split('.') + for comp in components[1:]: + mod = getattr(mod, comp) + return mod + try: + mod = importh("hgext.%s" % name) + except ImportError: + mod = importh(name) + _extensions[shortname] = mod + _order.append(shortname) + return mod + +def loadall(ui): + result = ui.configitems("extensions") + newindex = len(_order) + for (name, path) in result: + if path: + if path[0] == '!': + continue + try: + load(ui, name, path) + except KeyboardInterrupt: + raise + except Exception, inst: + if path: + ui.warn(_("*** failed to import extension %s from %s: %s\n") + % (name, path, inst)) + else: + ui.warn(_("*** failed to import extension %s: %s\n") + % (name, inst)) + if ui.traceback(): + return 1 + + for name in _order[newindex:]: + uisetup = getattr(_extensions[name], 'uisetup', None) + if uisetup: + uisetup(ui) + + for name in _order[newindex:]: + extsetup = getattr(_extensions[name], 'extsetup', None) + if extsetup: + try: + extsetup(ui) + except TypeError: + if extsetup.func_code.co_argcount != 0: + raise + extsetup() # old extsetup with no ui argument + +def wrapcommand(table, command, wrapper): + '''Wrap the command named `command' in table + + Replace command in the command table with wrapper. The wrapped command will + be inserted into the command table specified by the table argument. + + The wrapper will be called like + + wrapper(orig, *args, **kwargs) + + where orig is the original (wrapped) function, and *args, **kwargs + are the arguments passed to it. + ''' + assert hasattr(wrapper, '__call__') + aliases, entry = cmdutil.findcmd(command, table) + for alias, e in table.iteritems(): + if e is entry: + key = alias + break + + origfn = entry[0] + def wrap(*args, **kwargs): + return util.checksignature(wrapper)( + util.checksignature(origfn), *args, **kwargs) + + wrap.__doc__ = getattr(origfn, '__doc__') + wrap.__module__ = getattr(origfn, '__module__') + + newentry = list(entry) + newentry[0] = wrap + table[key] = tuple(newentry) + return entry + +def wrapfunction(container, funcname, wrapper): + '''Wrap the function named funcname in container + + Replace the funcname member in the given container with the specified + wrapper. The container is typically a module, class, or instance. + + The wrapper will be called like + + wrapper(orig, *args, **kwargs) + + where orig is the original (wrapped) function, and *args, **kwargs + are the arguments passed to it. + + Wrapping methods of the repository object is not recommended since + it conflicts with extensions that extend the repository by + subclassing. All extensions that need to extend methods of + localrepository should use this subclassing trick: namely, + reposetup() should look like + + def reposetup(ui, repo): + class myrepo(repo.__class__): + def whatever(self, *args, **kwargs): + [...extension stuff...] + super(myrepo, self).whatever(*args, **kwargs) + [...extension stuff...] + + repo.__class__ = myrepo + + In general, combining wrapfunction() with subclassing does not + work. Since you cannot control what other extensions are loaded by + your end users, you should play nicely with others by using the + subclass trick. + ''' + assert hasattr(wrapper, '__call__') + def wrap(*args, **kwargs): + return wrapper(origfn, *args, **kwargs) + + origfn = getattr(container, funcname) + assert hasattr(origfn, '__call__') + setattr(container, funcname, wrap) + return origfn + +def _disabledpaths(strip_init=False): + '''find paths of disabled extensions. returns a dict of {name: path} + removes /__init__.py from packages if strip_init is True''' + import hgext + extpath = os.path.dirname(os.path.abspath(hgext.__file__)) + try: # might not be a filesystem path + files = os.listdir(extpath) + except OSError: + return {} + + exts = {} + for e in files: + if e.endswith('.py'): + name = e.rsplit('.', 1)[0] + path = os.path.join(extpath, e) + else: + name = e + path = os.path.join(extpath, e, '__init__.py') + if not os.path.exists(path): + continue + if strip_init: + path = os.path.dirname(path) + if name in exts or name in _order or name == '__init__': + continue + exts[name] = path + return exts + +def _disabledhelp(path): + '''retrieve help synopsis of a disabled extension (without importing)''' + try: + file = open(path) + except IOError: + return + else: + doc = help.moduledoc(file) + file.close() + + if doc: # extracting localized synopsis + return gettext(doc).splitlines()[0] + else: + return _('(no help text available)') + +def disabled(): + '''find disabled extensions from hgext + returns a dict of {name: desc}, and the max name length''' + + paths = _disabledpaths() + if not paths: + return None, 0 + + exts = {} + maxlength = 0 + for name, path in paths.iteritems(): + doc = _disabledhelp(path) + if not doc: + continue + + exts[name] = doc + if len(name) > maxlength: + maxlength = len(name) + + return exts, maxlength + +def disabledext(name): + '''find a specific disabled extension from hgext. returns desc''' + paths = _disabledpaths() + if name in paths: + return _disabledhelp(paths[name]) + +def disabledcmd(cmd, strict=False): + '''import disabled extensions until cmd is found. + returns (cmdname, extname, doc)''' + + paths = _disabledpaths(strip_init=True) + if not paths: + raise error.UnknownCommand(cmd) + + def findcmd(cmd, name, path): + try: + mod = loadpath(path, 'hgext.%s' % name) + except Exception: + return + try: + aliases, entry = cmdutil.findcmd(cmd, + getattr(mod, 'cmdtable', {}), strict) + except (error.AmbiguousCommand, error.UnknownCommand): + return + for c in aliases: + if c.startswith(cmd): + cmd = c + break + else: + cmd = aliases[0] + return (cmd, name, mod) + + # first, search for an extension with the same name as the command + path = paths.pop(cmd, None) + if path: + ext = findcmd(cmd, cmd, path) + if ext: + return ext + + # otherwise, interrogate each extension until there's a match + for name, path in paths.iteritems(): + ext = findcmd(cmd, name, path) + if ext: + return ext + + raise error.UnknownCommand(cmd) + +def enabled(): + '''return a dict of {name: desc} of extensions, and the max name length''' + exts = {} + maxlength = 0 + for ename, ext in extensions(): + doc = (gettext(ext.__doc__) or _('(no help text available)')) + ename = ename.split('.')[-1] + maxlength = max(len(ename), maxlength) + exts[ename] = doc.splitlines()[0].strip() + + return exts, maxlength diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.pyo Binary files differnew file mode 100644 index 0000000..a49f044 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/extensions.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.py new file mode 100644 index 0000000..7c9e07f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.py @@ -0,0 +1,117 @@ +# fancyopts.py - better command line parsing +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import getopt + +def gnugetopt(args, options, longoptions): + """Parse options mostly like getopt.gnu_getopt. + + This is different from getopt.gnu_getopt in that an argument of - will + become an argument of - instead of vanishing completely. + """ + extraargs = [] + if '--' in args: + stopindex = args.index('--') + extraargs = args[stopindex + 1:] + args = args[:stopindex] + opts, parseargs = getopt.getopt(args, options, longoptions) + args = [] + while parseargs: + arg = parseargs.pop(0) + if arg and arg[0] == '-' and len(arg) > 1: + parseargs.insert(0, arg) + topts, newparseargs = getopt.getopt(parseargs, options, longoptions) + opts = opts + topts + parseargs = newparseargs + else: + args.append(arg) + args.extend(extraargs) + return opts, args + + +def fancyopts(args, options, state, gnu=False): + """ + read args, parse options, and store options in state + + each option is a tuple of: + + short option or '' + long option + default value + description + option value label(optional) + + option types include: + + boolean or none - option sets variable in state to true + string - parameter string is stored in state + list - parameter string is added to a list + integer - parameter strings is stored as int + function - call function with parameter + + non-option args are returned + """ + namelist = [] + shortlist = '' + argmap = {} + defmap = {} + + for option in options: + if len(option) == 5: + short, name, default, comment, dummy = option + else: + short, name, default, comment = option + # convert opts to getopt format + oname = name + name = name.replace('-', '_') + + argmap['-' + short] = argmap['--' + oname] = name + defmap[name] = default + + # copy defaults to state + if isinstance(default, list): + state[name] = default[:] + elif hasattr(default, '__call__'): + state[name] = None + else: + state[name] = default + + # does it take a parameter? + if not (default is None or default is True or default is False): + if short: + short += ':' + if oname: + oname += '=' + if short: + shortlist += short + if name: + namelist.append(oname) + + # parse arguments + if gnu: + parse = gnugetopt + else: + parse = getopt.getopt + opts, args = parse(args, shortlist, namelist) + + # transfer result to state + for opt, val in opts: + name = argmap[opt] + t = type(defmap[name]) + if t is type(fancyopts): + state[name] = defmap[name](val) + elif t is type(1): + state[name] = int(val) + elif t is type(''): + state[name] = val + elif t is type([]): + state[name].append(val) + elif t is type(None) or t is type(False): + state[name] = True + + # return unparsed args + return args diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.pyo Binary files differnew file mode 100644 index 0000000..fb665b2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/fancyopts.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.py new file mode 100644 index 0000000..1ae2b17 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.py @@ -0,0 +1,79 @@ +# filelog.py - file history class for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import revlog + +class filelog(revlog.revlog): + def __init__(self, opener, path): + revlog.revlog.__init__(self, opener, + "/".join(("data", path + ".i"))) + + def read(self, node): + t = self.revision(node) + if not t.startswith('\1\n'): + return t + s = t.index('\1\n', 2) + return t[s + 2:] + + def _readmeta(self, node): + t = self.revision(node) + if not t.startswith('\1\n'): + return {} + s = t.index('\1\n', 2) + mt = t[2:s] + m = {} + for l in mt.splitlines(): + k, v = l.split(": ", 1) + m[k] = v + return m + + def add(self, text, meta, transaction, link, p1=None, p2=None): + if meta or text.startswith('\1\n'): + mt = ["%s: %s\n" % (k, v) for k, v in sorted(meta.iteritems())] + text = "\1\n%s\1\n%s" % ("".join(mt), text) + return self.addrevision(text, transaction, link, p1, p2) + + def renamed(self, node): + if self.parents(node)[0] != revlog.nullid: + return False + m = self._readmeta(node) + if m and "copy" in m: + return (m["copy"], revlog.bin(m["copyrev"])) + return False + + def size(self, rev): + """return the size of a given revision""" + + # for revisions with renames, we have to go the slow way + node = self.node(rev) + if self.renamed(node): + return len(self.read(node)) + + # XXX if self.read(node).startswith("\1\n"), this returns (size+4) + return revlog.revlog.size(self, rev) + + def cmp(self, node, text): + """compare text with a given file revision + + returns True if text is different than what is stored. + """ + + t = text + if text.startswith('\1\n'): + t = '\1\n\1\n' + text + + samehashes = not revlog.revlog.cmp(self, node, t) + if samehashes: + return False + + # renaming a file produces a different hash, even if the data + # remains unchanged. Check if it's the case (slow): + if self.renamed(node): + t2 = self.read(node) + return t2 != text + + return True diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.pyo Binary files differnew file mode 100644 index 0000000..f60f4b3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filelog.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.py new file mode 100644 index 0000000..4d9b9a9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.py @@ -0,0 +1,267 @@ +# filemerge.py - file-level merge handling for Mercurial +# +# Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import short +from i18n import _ +import util, simplemerge, match, error +import os, tempfile, re, filecmp + +def _toolstr(ui, tool, part, default=""): + return ui.config("merge-tools", tool + "." + part, default) + +def _toolbool(ui, tool, part, default=False): + return ui.configbool("merge-tools", tool + "." + part, default) + +def _toollist(ui, tool, part, default=[]): + return ui.configlist("merge-tools", tool + "." + part, default) + +_internal = ['internal:' + s + for s in 'fail local other merge prompt dump'.split()] + +def _findtool(ui, tool): + if tool in _internal: + return tool + k = _toolstr(ui, tool, "regkey") + if k: + p = util.lookup_reg(k, _toolstr(ui, tool, "regname")) + if p: + p = util.find_exe(p + _toolstr(ui, tool, "regappend")) + if p: + return p + return util.find_exe(_toolstr(ui, tool, "executable", tool)) + +def _picktool(repo, ui, path, binary, symlink): + def check(tool, pat, symlink, binary): + tmsg = tool + if pat: + tmsg += " specified for " + pat + if not _findtool(ui, tool): + if pat: # explicitly requested tool deserves a warning + ui.warn(_("couldn't find merge tool %s\n") % tmsg) + else: # configured but non-existing tools are more silent + ui.note(_("couldn't find merge tool %s\n") % tmsg) + elif symlink and not _toolbool(ui, tool, "symlink"): + ui.warn(_("tool %s can't handle symlinks\n") % tmsg) + elif binary and not _toolbool(ui, tool, "binary"): + ui.warn(_("tool %s can't handle binary\n") % tmsg) + elif not util.gui() and _toolbool(ui, tool, "gui"): + ui.warn(_("tool %s requires a GUI\n") % tmsg) + else: + return True + return False + + # forcemerge comes from command line arguments, highest priority + force = ui.config('ui', 'forcemerge') + if force: + toolpath = _findtool(ui, force) + if toolpath: + return (force, '"' + toolpath + '"') + else: + # mimic HGMERGE if given tool not found + return (force, force) + + # HGMERGE takes next precedence + hgmerge = os.environ.get("HGMERGE") + if hgmerge: + return (hgmerge, hgmerge) + + # then patterns + for pat, tool in ui.configitems("merge-patterns"): + mf = match.match(repo.root, '', [pat]) + if mf(path) and check(tool, pat, symlink, False): + toolpath = _findtool(ui, tool) + return (tool, '"' + toolpath + '"') + + # then merge tools + tools = {} + for k, v in ui.configitems("merge-tools"): + t = k.split('.')[0] + if t not in tools: + tools[t] = int(_toolstr(ui, t, "priority", "0")) + names = tools.keys() + tools = sorted([(-p, t) for t, p in tools.items()]) + uimerge = ui.config("ui", "merge") + if uimerge: + if uimerge not in names: + return (uimerge, uimerge) + tools.insert(0, (None, uimerge)) # highest priority + tools.append((None, "hgmerge")) # the old default, if found + for p, t in tools: + if check(t, None, symlink, binary): + toolpath = _findtool(ui, t) + return (t, '"' + toolpath + '"') + # internal merge as last resort + return (not (symlink or binary) and "internal:merge" or None, None) + +def _eoltype(data): + "Guess the EOL type of a file" + if '\0' in data: # binary + return None + if '\r\n' in data: # Windows + return '\r\n' + if '\r' in data: # Old Mac + return '\r' + if '\n' in data: # UNIX + return '\n' + return None # unknown + +def _matcheol(file, origfile): + "Convert EOL markers in a file to match origfile" + tostyle = _eoltype(open(origfile, "rb").read()) + if tostyle: + data = open(file, "rb").read() + style = _eoltype(data) + if style: + newdata = data.replace(style, tostyle) + if newdata != data: + open(file, "wb").write(newdata) + +def filemerge(repo, mynode, orig, fcd, fco, fca): + """perform a 3-way merge in the working directory + + mynode = parent node before merge + orig = original local filename before merge + fco = other file context + fca = ancestor file context + fcd = local file context for current/destination file + """ + + def temp(prefix, ctx): + pre = "%s~%s." % (os.path.basename(ctx.path()), prefix) + (fd, name) = tempfile.mkstemp(prefix=pre) + data = repo.wwritedata(ctx.path(), ctx.data()) + f = os.fdopen(fd, "wb") + f.write(data) + f.close() + return name + + def isbin(ctx): + try: + return util.binary(ctx.data()) + except IOError: + return False + + if not fco.cmp(fcd): # files identical? + return None + + ui = repo.ui + fd = fcd.path() + binary = isbin(fcd) or isbin(fco) or isbin(fca) + symlink = 'l' in fcd.flags() + fco.flags() + tool, toolpath = _picktool(repo, ui, fd, binary, symlink) + ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" % + (tool, fd, binary, symlink)) + + if not tool or tool == 'internal:prompt': + tool = "internal:local" + if ui.promptchoice(_(" no tool found to merge %s\n" + "keep (l)ocal or take (o)ther?") % fd, + (_("&Local"), _("&Other")), 0): + tool = "internal:other" + if tool == "internal:local": + return 0 + if tool == "internal:other": + repo.wwrite(fd, fco.data(), fco.flags()) + return 0 + if tool == "internal:fail": + return 1 + + # do the actual merge + a = repo.wjoin(fd) + b = temp("base", fca) + c = temp("other", fco) + out = "" + back = a + ".orig" + util.copyfile(a, back) + + if orig != fco.path(): + ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd)) + else: + ui.status(_("merging %s\n") % fd) + + ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca)) + + # do we attempt to simplemerge first? + try: + premerge = _toolbool(ui, tool, "premerge", not (binary or symlink)) + except error.ConfigError: + premerge = _toolstr(ui, tool, "premerge").lower() + valid = 'keep'.split() + if premerge not in valid: + _valid = ', '.join(["'" + v + "'" for v in valid]) + raise error.ConfigError(_("%s.premerge not valid " + "('%s' is neither boolean nor %s)") % + (tool, premerge, _valid)) + + if premerge: + r = simplemerge.simplemerge(ui, a, b, c, quiet=True) + if not r: + ui.debug(" premerge successful\n") + os.unlink(back) + os.unlink(b) + os.unlink(c) + return 0 + if premerge != 'keep': + util.copyfile(back, a) # restore from backup and try again + + env = dict(HG_FILE=fd, + HG_MY_NODE=short(mynode), + HG_OTHER_NODE=str(fco.changectx()), + HG_BASE_NODE=str(fca.changectx()), + HG_MY_ISLINK='l' in fcd.flags(), + HG_OTHER_ISLINK='l' in fco.flags(), + HG_BASE_ISLINK='l' in fca.flags()) + + if tool == "internal:merge": + r = simplemerge.simplemerge(ui, a, b, c, label=['local', 'other']) + elif tool == 'internal:dump': + a = repo.wjoin(fd) + util.copyfile(a, a + ".local") + repo.wwrite(fd + ".other", fco.data(), fco.flags()) + repo.wwrite(fd + ".base", fca.data(), fca.flags()) + return 1 # unresolved + else: + args = _toolstr(ui, tool, "args", '$local $base $other') + if "$output" in args: + out, a = a, back # read input from backup, write to original + replace = dict(local=a, base=b, other=c, output=out) + args = util.interpolate(r'\$', replace, args, + lambda s: '"%s"' % util.localpath(s)) + r = util.system(toolpath + ' ' + args, cwd=repo.root, environ=env) + + if not r and (_toolbool(ui, tool, "checkconflicts") or + 'conflicts' in _toollist(ui, tool, "check")): + if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(), + re.MULTILINE): + r = 1 + + checked = False + if 'prompt' in _toollist(ui, tool, "check"): + checked = True + if ui.promptchoice(_("was merge of '%s' successful (yn)?") % fd, + (_("&Yes"), _("&No")), 1): + r = 1 + + if not r and not checked and (_toolbool(ui, tool, "checkchanged") or + 'changed' in _toollist(ui, tool, "check")): + if filecmp.cmp(repo.wjoin(fd), back): + if ui.promptchoice(_(" output file %s appears unchanged\n" + "was merge successful (yn)?") % fd, + (_("&Yes"), _("&No")), 1): + r = 1 + + if _toolbool(ui, tool, "fixeol"): + _matcheol(repo.wjoin(fd), back) + + if r: + ui.warn(_("merging %s failed!\n") % fd) + else: + os.unlink(back) + + os.unlink(b) + os.unlink(c) + return r diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.pyo Binary files differnew file mode 100644 index 0000000..62ea668 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/filemerge.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.py new file mode 100644 index 0000000..71136ea --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.py @@ -0,0 +1,122 @@ +# Revision graph generator for Mercurial +# +# Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl> +# Copyright 2007 Joel Rosdahl <joel@rosdahl.net> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""supports walking the history as DAGs suitable for graphical output + +The most basic format we use is that of:: + + (id, type, data, [parentids]) + +The node and parent ids are arbitrary integers which identify a node in the +context of the graph returned. Type is a constant specifying the node type. +Data depends on type. +""" + +from mercurial.node import nullrev + +CHANGESET = 'C' + +def revisions(repo, start, stop): + """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + + This generator function walks through the revision history from revision + start to revision stop (which must be less than or equal to start). It + returns a tuple for each node. The node and parent ids are arbitrary + integers which identify a node in the context of the graph returned. + """ + cur = start + while cur >= stop: + ctx = repo[cur] + parents = set([p.rev() for p in ctx.parents() if p.rev() != nullrev]) + yield (cur, CHANGESET, ctx, sorted(parents)) + cur -= 1 + +def filerevs(repo, path, start, stop, limit=None): + """file cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + + This generator function walks through the revision history of a single + file from revision start down to revision stop. + """ + filerev = len(repo.file(path)) - 1 + rev = stop + 1 + count = 0 + while filerev >= 0 and rev > stop: + fctx = repo.filectx(path, fileid=filerev) + parents = set([f.linkrev() for f in fctx.parents() if f.path() == path]) + rev = fctx.rev() + if rev <= start: + yield (rev, CHANGESET, fctx.changectx(), sorted(parents)) + count += 1 + if count == limit: + break + filerev -= 1 + +def nodes(repo, nodes): + """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples + + This generator function walks the given nodes. It only returns parents + that are in nodes, too. + """ + include = set(nodes) + for node in nodes: + ctx = repo[node] + parents = set([p.rev() for p in ctx.parents() if p.node() in include]) + yield (ctx.rev(), CHANGESET, ctx, sorted(parents)) + +def colored(dag): + """annotates a DAG with colored edge information + + For each DAG node this function emits tuples:: + + (id, type, data, (col, color), [(col, nextcol, color)]) + + with the following new elements: + + - Tuple (col, color) with column and color index for the current node + - A list of tuples indicating the edges between the current node and its + parents. + """ + seen = [] + colors = {} + newcolor = 1 + for (cur, type, data, parents) in dag: + + # Compute seen and next + if cur not in seen: + seen.append(cur) # new head + colors[cur] = newcolor + newcolor += 1 + + col = seen.index(cur) + color = colors.pop(cur) + next = seen[:] + + # Add parents to next + addparents = [p for p in parents if p not in next] + next[col:col + 1] = addparents + + # Set colors for the parents + for i, p in enumerate(addparents): + if not i: + colors[p] = color + else: + colors[p] = newcolor + newcolor += 1 + + # Add edges to the graph + edges = [] + for ecol, eid in enumerate(seen): + if eid in next: + edges.append((ecol, next.index(eid), colors[eid])) + elif eid == cur: + for p in parents: + edges.append((ecol, next.index(p), color)) + + # Yield and move on + yield (cur, type, data, (col, color), edges) + seen = next diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.pyo Binary files differnew file mode 100644 index 0000000..e77bbef --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/graphmod.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.py new file mode 100644 index 0000000..9a1bf9a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.py @@ -0,0 +1,155 @@ +# changelog bisection for mercurial +# +# Copyright 2007 Matt Mackall +# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> +# +# Inspired by git bisect, extension skeleton taken from mq.py. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +from i18n import _ +from node import short, hex +import util + +def bisect(changelog, state): + """find the next node (if any) for testing during a bisect search. + returns a (nodes, number, good) tuple. + + 'nodes' is the final result of the bisect if 'number' is 0. + Otherwise 'number' indicates the remaining possible candidates for + the search and 'nodes' contains the next bisect target. + 'good' is True if bisect is searching for a first good changeset, False + if searching for a first bad one. + """ + + clparents = changelog.parentrevs + skip = set([changelog.rev(n) for n in state['skip']]) + + def buildancestors(bad, good): + # only the earliest bad revision matters + badrev = min([changelog.rev(n) for n in bad]) + goodrevs = [changelog.rev(n) for n in good] + goodrev = min(goodrevs) + # build visit array + ancestors = [None] * (len(changelog) + 1) # an extra for [-1] + + # set nodes descended from goodrev + ancestors[goodrev] = [] + for rev in xrange(goodrev + 1, len(changelog)): + for prev in clparents(rev): + if ancestors[prev] == []: + ancestors[rev] = [] + + # clear good revs from array + for node in goodrevs: + ancestors[node] = None + for rev in xrange(len(changelog), -1, -1): + if ancestors[rev] is None: + for prev in clparents(rev): + ancestors[prev] = None + + if ancestors[badrev] is None: + return badrev, None + return badrev, ancestors + + good = 0 + badrev, ancestors = buildancestors(state['bad'], state['good']) + if not ancestors: # looking for bad to good transition? + good = 1 + badrev, ancestors = buildancestors(state['good'], state['bad']) + bad = changelog.node(badrev) + if not ancestors: # now we're confused + if len(state['bad']) == 1 and len(state['good']) == 1: + raise util.Abort(_("starting revisions are not directly related")) + raise util.Abort(_("inconsistent state, %s:%s is good and bad") + % (badrev, short(bad))) + + # build children dict + children = {} + visit = [badrev] + candidates = [] + while visit: + rev = visit.pop(0) + if ancestors[rev] == []: + candidates.append(rev) + for prev in clparents(rev): + if prev != -1: + if prev in children: + children[prev].append(rev) + else: + children[prev] = [rev] + visit.append(prev) + + candidates.sort() + # have we narrowed it down to one entry? + # or have all other possible candidates besides 'bad' have been skipped? + tot = len(candidates) + unskipped = [c for c in candidates if (c not in skip) and (c != badrev)] + if tot == 1 or not unskipped: + return ([changelog.node(rev) for rev in candidates], 0, good) + perfect = tot // 2 + + # find the best node to test + best_rev = None + best_len = -1 + poison = set() + for rev in candidates: + if rev in poison: + # poison children + poison.update(children.get(rev, [])) + continue + + a = ancestors[rev] or [rev] + ancestors[rev] = None + + x = len(a) # number of ancestors + y = tot - x # number of non-ancestors + value = min(x, y) # how good is this test? + if value > best_len and rev not in skip: + best_len = value + best_rev = rev + if value == perfect: # found a perfect candidate? quit early + break + + if y < perfect and rev not in skip: # all downhill from here? + # poison children + poison.update(children.get(rev, [])) + continue + + for c in children.get(rev, []): + if ancestors[c]: + ancestors[c] = list(set(ancestors[c] + a)) + else: + ancestors[c] = a + [c] + + assert best_rev is not None + best_node = changelog.node(best_rev) + + return ([best_node], tot, good) + + +def load_state(repo): + state = {'good': [], 'bad': [], 'skip': []} + if os.path.exists(repo.join("bisect.state")): + for l in repo.opener("bisect.state"): + kind, node = l[:-1].split() + node = repo.lookup(node) + if kind not in state: + raise util.Abort(_("unknown bisect kind %s") % kind) + state[kind].append(node) + return state + + +def save_state(repo, state): + f = repo.opener("bisect.state", "w", atomictemp=True) + wlock = repo.wlock() + try: + for kind in state: + for node in state[kind]: + f.write("%s %s\n" % (kind, hex(node))) + f.rename() + finally: + wlock.release() + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.pyo Binary files differnew file mode 100644 index 0000000..d65d019 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hbisect.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.py new file mode 100644 index 0000000..a30499d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.py @@ -0,0 +1,117 @@ +# help.py - help data for mercurial +# +# Copyright 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import gettext, _ +import sys, os +import extensions + + +def moduledoc(file): + '''return the top-level python documentation for the given file + + Loosely inspired by pydoc.source_synopsis(), but rewritten to + handle triple quotes and to return the whole text instead of just + the synopsis''' + result = [] + + line = file.readline() + while line[:1] == '#' or not line.strip(): + line = file.readline() + if not line: + break + + start = line[:3] + if start == '"""' or start == "'''": + line = line[3:] + while line: + if line.rstrip().endswith(start): + line = line.split(start)[0] + if line: + result.append(line) + break + elif not line: + return None # unmatched delimiter + result.append(line) + line = file.readline() + else: + return None + + return ''.join(result) + +def listexts(header, exts, maxlength, indent=1): + '''return a text listing of the given extensions''' + if not exts: + return '' + result = '\n%s\n\n' % header + for name, desc in sorted(exts.iteritems()): + result += '%s%-*s %s\n' % (' ' * indent, maxlength + 2, + ':%s:' % name, desc) + return result + +def extshelp(): + doc = loaddoc('extensions')() + + exts, maxlength = extensions.enabled() + doc += listexts(_('enabled extensions:'), exts, maxlength) + + exts, maxlength = extensions.disabled() + doc += listexts(_('disabled extensions:'), exts, maxlength) + + return doc + +def loaddoc(topic): + """Return a delayed loader for help/topic.txt.""" + + def loader(): + if hasattr(sys, 'frozen'): + module = sys.executable + else: + module = __file__ + base = os.path.dirname(module) + + for dir in ('.', '..'): + docdir = os.path.join(base, dir, 'help') + if os.path.isdir(docdir): + break + + path = os.path.join(docdir, topic + ".txt") + doc = gettext(open(path).read()) + for rewriter in helphooks.get(topic, []): + doc = rewriter(topic, doc) + return doc + + return loader + +helptable = [ + (["config", "hgrc"], _("Configuration Files"), loaddoc('config')), + (["dates"], _("Date Formats"), loaddoc('dates')), + (["patterns"], _("File Name Patterns"), loaddoc('patterns')), + (['environment', 'env'], _('Environment Variables'), + loaddoc('environment')), + (['revs', 'revisions'], _('Specifying Single Revisions'), + loaddoc('revisions')), + (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'), + loaddoc('multirevs')), + (['revset', 'revsets'], _("Specifying Revision Sets"), loaddoc('revsets')), + (['diffs'], _('Diff Formats'), loaddoc('diffs')), + (['merge-tools'], _('Merge Tools'), loaddoc('merge-tools')), + (['templating', 'templates'], _('Template Usage'), + loaddoc('templates')), + (['urls'], _('URL Paths'), loaddoc('urls')), + (["extensions"], _("Using additional features"), extshelp), + (["subrepo", "subrepos"], _("Subrepositories"), loaddoc('subrepos')), + (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')), + (["glossary"], _("Glossary"), loaddoc('glossary')), +] + +# Map topics to lists of callable taking the current topic help and +# returning the updated version +helphooks = { +} + +def addtopichook(topic, rewriter): + helphooks.setdefault(topic, []).append(rewriter) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.pyo Binary files differnew file mode 100644 index 0000000..54fcd75 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/config.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/config.txt new file mode 100644 index 0000000..1b8ac5b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/config.txt @@ -0,0 +1,51 @@ +Mercurial reads configuration data from several files, if they exist. +Below we list the most specific file first. + +On Windows, these configuration files are read: + +- ``<repo>\.hg\hgrc`` +- ``%USERPROFILE%\.hgrc`` +- ``%USERPROFILE%\mercurial.ini`` +- ``%HOME%\.hgrc`` +- ``%HOME%\mercurial.ini`` +- ``C:\mercurial\mercurial.ini`` (unless regkey or hgrc.d\ or mercurial.ini found) +- ``HKEY_LOCAL_MACHINE\SOFTWARE\Mercurial`` (unless hgrc.d\ or mercurial.ini found) +- ``<hg.exe-dir>\hgrc.d\*.rc`` (unless mercurial.ini found) +- ``<hg.exe-dir>\mercurial.ini`` + +On Unix, these files are read: + +- ``<repo>/.hg/hgrc`` +- ``$HOME/.hgrc`` +- ``/etc/mercurial/hgrc`` +- ``/etc/mercurial/hgrc.d/*.rc`` +- ``<install-root>/etc/mercurial/hgrc`` +- ``<install-root>/etc/mercurial/hgrc.d/*.rc`` + +If there is a per-repository configuration file which is not owned by +the active user, Mercurial will warn you that the file is skipped:: + + not trusting file <repo>/.hg/hgrc from untrusted user USER, group GROUP + +If this bothers you, the warning can be silenced (the file would still +be ignored) or trust can be established. Use one of the following +settings, the syntax is explained below: + +- ``ui.report_untrusted = False`` +- ``trusted.users = USER`` +- ``trusted.groups = GROUP`` + +The configuration files for Mercurial use a simple ini-file format. A +configuration file consists of sections, led by a ``[section]`` header +and followed by ``name = value`` entries:: + + [ui] + username = Firstname Lastname <firstname.lastname@example.net> + verbose = True + +The above entries will be referred to as ``ui.username`` and +``ui.verbose``, respectively. Please see the hgrc man page for a full +description of the possible configuration values: + +- on Unix-like systems: ``man hgrc`` +- online: http://www.selenic.com/mercurial/hgrc.5.html diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/dates.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/dates.txt new file mode 100644 index 0000000..80ec6f0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/dates.txt @@ -0,0 +1,36 @@ +Some commands allow the user to specify a date, e.g.: + +- backout, commit, import, tag: Specify the commit date. +- log, revert, update: Select revision(s) by date. + +Many date formats are valid. Here are some examples: + +- ``Wed Dec 6 13:18:29 2006`` (local timezone assumed) +- ``Dec 6 13:18 -0600`` (year assumed, time offset provided) +- ``Dec 6 13:18 UTC`` (UTC and GMT are aliases for +0000) +- ``Dec 6`` (midnight) +- ``13:18`` (today assumed) +- ``3:39`` (3:39AM assumed) +- ``3:39pm`` (15:39) +- ``2006-12-06 13:18:29`` (ISO 8601 format) +- ``2006-12-6 13:18`` +- ``2006-12-6`` +- ``12-6`` +- ``12/6`` +- ``12/6/6`` (Dec 6 2006) + +Lastly, there is Mercurial's internal format: + +- ``1165432709 0`` (Wed Dec 6 13:18:29 2006 UTC) + +This is the internal representation format for dates. unixtime is the +number of seconds since the epoch (1970-01-01 00:00 UTC). offset is +the offset of the local timezone, in seconds west of UTC (negative if +the timezone is east of UTC). + +The log command also accepts date ranges: + +- ``<{datetime}`` - at or before a given date/time +- ``>{datetime}`` - on or after a given date/time +- ``{datetime} to {datetime}`` - a date range, inclusive +- ``-{days}`` - within a given number of days of today diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/diffs.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/diffs.txt new file mode 100644 index 0000000..9ede0a5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/diffs.txt @@ -0,0 +1,29 @@ +Mercurial's default format for showing changes between two versions of +a file is compatible with the unified format of GNU diff, which can be +used by GNU patch and many other standard tools. + +While this standard format is often enough, it does not encode the +following information: + +- executable status and other permission bits +- copy or rename information +- changes in binary files +- creation or deletion of empty files + +Mercurial also supports the extended diff format from the git VCS +which addresses these limitations. The git diff format is not produced +by default because a few widespread tools still do not understand this +format. + +This means that when generating diffs from a Mercurial repository +(e.g. with :hg:`export`), you should be careful about things like file +copies and renames or other things mentioned above, because when +applying a standard diff to a different repository, this extra +information is lost. Mercurial's internal operations (like push and +pull) are not affected by this, because they use an internal binary +format for communicating changes. + +To make Mercurial produce the git extended diff format, use the --git +option available for many commands, or set 'git = True' in the [diff] +section of your configuration file. You do not need to set this option +when importing diffs in this format or using them in the mq extension. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/environment.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/environment.txt new file mode 100644 index 0000000..5b3f22c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/environment.txt @@ -0,0 +1,93 @@ +HG + Path to the 'hg' executable, automatically passed when running + hooks, extensions or external tools. If unset or empty, this is + the hg executable's name if it's frozen, or an executable named + 'hg' (with %PATHEXT% [defaulting to COM/EXE/BAT/CMD] extensions on + Windows) is searched. + +HGEDITOR + This is the name of the editor to run when committing. See EDITOR. + + (deprecated, use configuration file) + +HGENCODING + This overrides the default locale setting detected by Mercurial. + This setting is used to convert data including usernames, + changeset descriptions, tag names, and branches. This setting can + be overridden with the --encoding command-line option. + +HGENCODINGMODE + This sets Mercurial's behavior for handling unknown characters + while transcoding user input. The default is "strict", which + causes Mercurial to abort if it can't map a character. Other + settings include "replace", which replaces unknown characters, and + "ignore", which drops them. This setting can be overridden with + the --encodingmode command-line option. + +HGENCODINGAMBIGUOUS + This sets Mercurial's behavior for handling characters with + "ambiguous" widths like accented Latin characters with East Asian + fonts. By default, Mercurial assumes ambiguous characters are + narrow, set this variable to "wide" if such characters cause + formatting problems. + +HGMERGE + An executable to use for resolving merge conflicts. The program + will be executed with three arguments: local file, remote file, + ancestor file. + + (deprecated, use configuration file) + +HGRCPATH + A list of files or directories to search for configuration + files. Item separator is ":" on Unix, ";" on Windows. If HGRCPATH + is not set, platform default search path is used. If empty, only + the .hg/hgrc from the current repository is read. + + For each element in HGRCPATH: + + - if it's a directory, all files ending with .rc are added + - otherwise, the file itself will be added + +HGPLAIN + When set, this disables any configuration settings that might + change Mercurial's default output. This includes encoding, + defaults, verbose mode, debug mode, quiet mode, tracebacks, and + localization. This can be useful when scripting against Mercurial + in the face of existing user configuration. + + Equivalent options set via command line flags or environment + variables are not overridden. + +HGUSER + This is the string used as the author of a commit. If not set, + available values will be considered in this order: + + - HGUSER (deprecated) + - configuration files from the HGRCPATH + - EMAIL + - interactive prompt + - LOGNAME (with ``@hostname`` appended) + + (deprecated, use configuration file) + +EMAIL + May be used as the author of a commit; see HGUSER. + +LOGNAME + May be used as the author of a commit; see HGUSER. + +VISUAL + This is the name of the editor to use when committing. See EDITOR. + +EDITOR + Sometimes Mercurial needs to open a text file in an editor for a + user to modify, for example when writing commit messages. The + editor it uses is determined by looking at the environment + variables HGEDITOR, VISUAL and EDITOR, in that order. The first + non-empty one is chosen. If all of them are empty, the editor + defaults to 'vi'. + +PYTHONPATH + This is used by Python to find imported modules and may need to be + set appropriately if this Mercurial is not installed system-wide. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/extensions.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/extensions.txt new file mode 100644 index 0000000..f3d2992 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/extensions.txt @@ -0,0 +1,33 @@ +Mercurial has the ability to add new features through the use of +extensions. Extensions may add new commands, add options to +existing commands, change the default behavior of commands, or +implement hooks. + +Extensions are not loaded by default for a variety of reasons: +they can increase startup overhead; they may be meant for advanced +usage only; they may provide potentially dangerous abilities (such +as letting you destroy or modify history); they might not be ready +for prime time; or they may alter some usual behaviors of stock +Mercurial. It is thus up to the user to activate extensions as +needed. + +To enable the "foo" extension, either shipped with Mercurial or in the +Python search path, create an entry for it in your configuration file, +like this:: + + [extensions] + foo = + +You may also specify the full path to an extension:: + + [extensions] + myfeature = ~/.hgext/myfeature.py + +To explicitly disable an extension enabled in a configuration file of +broader scope, prepend its path with !:: + + [extensions] + # disabling extension bar residing in /path/to/extension/bar.py + bar = !/path/to/extension/bar.py + # ditto, but no path was supplied for extension baz + baz = ! diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/glossary.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/glossary.txt new file mode 100644 index 0000000..003fb56 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/glossary.txt @@ -0,0 +1,368 @@ +Ancestor + Any changeset that can be reached by an unbroken chain of parent + changesets from a given changeset. More precisely, the ancestors + of a changeset can be defined by two properties: a parent of a + changeset is an ancestor, and a parent of an ancestor is an + ancestor. See also: 'Descendant'. + +Branch + (Noun) A child changeset that has been created from a parent that + is not a head. These are known as topological branches, see + 'Branch, topological'. If a topological branch is named, it becomes + a named branch. If a topological branch is not named, it becomes + an anonymous branch. See 'Branch, anonymous' and 'Branch, named'. + + Branches may be created when changes are pulled from or pushed to + a remote repository, since new heads may be created by these + operations. Note that the term branch can also be used informally + to describe a development process in which certain development is + done independently of other development. This is sometimes done + explicitly with a named branch, but it can also be done locally, + using bookmarks or clones and anonymous branches. + + Example: "The experimental branch". + + (Verb) The action of creating a child changeset which results in + its parent having more than one child. + + Example: "I'm going to branch at X". + +Branch, anonymous + Every time a new child changeset is created from a parent that is not + a head and the name of the branch is not changed, a new anonymous + branch is created. + +Branch, closed + A named branch whose branch heads have all been closed. + +Branch, default + The branch assigned to a changeset when no name has previously been + assigned. + +Branch head + See 'Head, branch'. + +Branch, inactive + If a named branch has no topological heads, it is considered to be + inactive. As an example, a feature branch becomes inactive when it + is merged into the default branch. The :hg:`branches` command + shows inactive branches by default, though they can be hidden with + :hg:`branches --active`. + + NOTE: this concept is deprecated because it is too implicit. + Branches should now be explicitly closed using :hg:`commit + --close-branch` when they are no longer needed. + +Branch, named + A collection of changesets which have the same branch name. By + default, children of a changeset in a named branch belong to the + same named branch. A child can be explicitly assigned to a + different branch. See :hg:`help branch`, :hg:`help branches` and + :hg:`commit --close-branch` for more information on managing + branches. + + Named branches can be thought of as a kind of namespace, dividing + the collection of changesets that comprise the repository into a + collection of disjoint subsets. A named branch is not necessarily + a topological branch. If a new named branch is created from the + head of another named branch, or the default branch, but no + further changesets are added to that previous branch, then that + previous branch will be a branch in name only. + +Branch tip + See 'Tip, branch'. + +Branch, topological + Every time a new child changeset is created from a parent that is + not a head, a new topological branch is created. If a topological + branch is named, it becomes a named branch. If a topological + branch is not named, it becomes an anonymous branch of the + current, possibly default, branch. + +Changelog + A record of the changesets in the order in which they were added + to the repository. This includes details such as changeset id, + author, commit message, date, and list of changed files. + +Changeset + A snapshot of the state of the repository used to record a change. + +Changeset, child + The converse of parent changeset: if P is a parent of C, then C is + a child of P. There is no limit to the number of children that a + changeset may have. + +Changeset id + A SHA-1 hash that uniquely identifies a changeset. It may be + represented as either a "long" 40 hexadecimal digit string, or a + "short" 12 hexadecimal digit string. + +Changeset, merge + A changeset with two parents. This occurs when a merge is + committed. + +Changeset, parent + A revision upon which a child changeset is based. Specifically, a + parent changeset of a changeset C is a changeset whose node + immediately precedes C in the DAG. Changesets have at most two + parents. + +Checkout + (Noun) The working directory being updated to a specific + revision. This use should probably be avoided where possible, as + changeset is much more appropriate than checkout in this context. + + Example: "I'm using checkout X." + + (Verb) Updating the working directory to a specific changeset. See + :hg:`help update`. + + Example: "I'm going to check out changeset X." + +Child changeset + See 'Changeset, child'. + +Close changeset + See 'Changeset, close'. + +Closed branch + See 'Branch, closed'. + +Clone + (Noun) An entire or partial copy of a repository. The partial + clone must be in the form of a revision and its ancestors. + + Example: "Is your clone up to date?". + + (Verb) The process of creating a clone, using :hg:`clone`. + + Example: "I'm going to clone the repository". + +Closed branch head + See 'Head, closed branch'. + +Commit + (Noun) A synonym for changeset. + + Example: "Is the bug fixed in your recent commit?" + + (Verb) The act of recording changes to a repository. When files + are committed in a working directory, Mercurial finds the + differences between the committed files and their parent + changeset, creating a new changeset in the repository. + + Example: "You should commit those changes now." + +Cset + A common abbreviation of the term changeset. + +DAG + The repository of changesets of a distributed version control + system (DVCS) can be described as a directed acyclic graph (DAG), + consisting of nodes and edges, where nodes correspond to + changesets and edges imply a parent -> child relation. This graph + can be visualized by graphical tools such as :hg:`glog` + (graphlog). In Mercurial, the DAG is limited by the requirement + for children to have at most two parents. + +Default branch + See 'Branch, default'. + +Descendant + Any changeset that can be reached by a chain of child changesets + from a given changeset. More precisely, the descendants of a + changeset can be defined by two properties: the child of a + changeset is a descendant, and the child of a descendant is a + descendant. See also: 'Ancestor'. + +Diff + (Noun) The difference between the contents and attributes of files + in two changesets or a changeset and the current working + directory. The difference is usually represented in a standard + form called a "diff" or "patch". The "git diff" format is used + when the changes include copies, renames, or changes to file + attributes, none of which can be represented/handled by classic + "diff" and "patch". + + Example: "Did you see my correction in the diff?" + + (Verb) Diffing two changesets is the action of creating a diff or + patch. + + Example: "If you diff with changeset X, you will see what I mean." + +Directory, working + The working directory represents the state of the files tracked by + Mercurial, that will be recorded in the next commit. The working + directory initially corresponds to the snapshot at an existing + changeset, known as the parent of the working directory. See + 'Parent, working directory'. The state may be modified by changes + to the files introduced manually or by a merge. The repository + metadata exists in the .hg directory inside the working directory. + +Graph + See DAG and :hg:`help graphlog`. + +Head + The term 'head' may be used to refer to both a branch head or a + repository head, depending on the context. See 'Head, branch' and + 'Head, repository' for specific definitions. + + Heads are where development generally takes place and are the + usual targets for update and merge operations. + +Head, branch + A changeset with no descendants on the same named branch. + +Head, closed branch + A changeset that marks a head as no longer interesting. The closed + head is no longer listed by :hg:`heads`. A branch is considered + closed when all its heads are closed and consequently is not + listed by :hg:`branches`. + +Head, repository + A topological head which has not been closed. + +Head, topological + A changeset with no children in the repository. + +History, immutable + Once committed, changesets cannot be altered. Extensions which + appear to change history actually create new changesets that + replace existing ones, and then destroy the old changesets. Doing + so in public repositories can result in old changesets being + reintroduced to the repository. + +History, rewriting + The changesets in a repository are immutable. However, extensions + to Mercurial can be used to alter the repository, usually in such + a way as to preserve changeset contents. + +Immutable history + See 'History, immutable'. + +Merge changeset + See 'Changeset, merge'. + +Manifest + Each changeset has a manifest, which is the list of files that are + tracked by the changeset. + +Merge + Used to bring together divergent branches of work. When you update + to a changeset and then merge another changeset, you bring the + history of the latter changeset into your working directory. Once + conflicts are resolved (and marked), this merge may be committed + as a merge changeset, bringing two branches together in the DAG. + +Named branch + See 'Branch, named'. + +Null changeset + The empty changeset. It is the parent state of newly-initialized + repositories and repositories with no checked out revision. It is + thus the parent of root changesets and the effective ancestor when + merging unrelated changesets. Can be specified by the alias 'null' + or by the changeset ID '000000000000'. + +Parent + See 'Changeset, parent'. + +Parent changeset + See 'Changeset, parent'. + +Parent, working directory + The working directory parent reflects a virtual revision which is + the child of the changeset (or two changesets with an uncommitted + merge) shown by :hg:`parents`. This is changed with + :hg:`update`. Other commands to see the working directory parent + are :hg:`summary` and :hg:`id`. Can be specified by the alias ".". + +Patch + (Noun) The product of a diff operation. + + Example: "I've sent you my patch." + + (Verb) The process of using a patch file to transform one + changeset into another. + + Example: "You will need to patch that revision." + +Pull + An operation in which changesets in a remote repository which are + not in the local repository are brought into the local + repository. Note that this operation without special arguments + only updates the repository, it does not update the files in the + working directory. See :hg:`help pull`. + +Push + An operation in which changesets in a local repository which are + not in a remote repository are sent to the remote repository. Note + that this operation only adds changesets which have been committed + locally to the remote repository. Uncommitted changes are not + sent. See :hg:`help push`. + +Repository + The metadata describing all recorded states of a collection of + files. Each recorded state is represented by a changeset. A + repository is usually (but not always) found in the ``.hg`` + subdirectory of a working directory. Any recorded state can be + recreated by "updating" a working directory to a specific + changeset. + +Repository head + See 'Head, repository'. + +Revision + A state of the repository at some point in time. Earlier revisions + can be updated to by using :hg:`update`. See also 'Revision + number'; See also 'Changeset'. + +Revision number + This integer uniquely identifies a changeset in a specific + repository. It represents the order in which changesets were added + to a repository, starting with revision number 0. Note that the + revision number may be different in each clone of a repository. To + identify changesets uniquely between different clones, see + 'Changeset id'. + +Revlog + History storage mechanism used by Mercurial. It is a form of delta + encoding, with occasional full revision of data followed by delta + of each successive revision. It includes data and an index + pointing to the data. + +Rewriting history + See 'History, rewriting'. + +Root + A changeset that has only the null changeset as its parent. Most + repositories have only a single root changeset. + +Tip + The changeset with the highest revision number. It is the changeset + most recently added in a repository. + +Tip, branch + The head of a given branch with the highest revision number. When + a branch name is used as a revision identifier, it refers to the + branch tip. See also 'Branch, head'. Note that because revision + numbers may be different in different repository clones, the + branch tip may be different in different cloned repositories. + +Update + (Noun) Another synonym of changeset. + + Example: "I've pushed an update". + + (Verb) This term is usually used to describe updating the state of + the working directory to that of a specific changeset. See + :hg:`help update`. + + Example: "You should update". + +Working directory + See 'Directory, working'. + +Working directory parent + See 'Parent, working directory'. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/hgweb.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/hgweb.txt new file mode 100644 index 0000000..e1ff463 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/hgweb.txt @@ -0,0 +1,46 @@ +Mercurial's internal web server, hgweb, can serve either a single +repository, or a collection of them. In the latter case, a special +configuration file can be used to specify the repository paths to use +and global web configuration options. + +This file uses the same syntax as hgrc configuration files, but only +the following sections are recognized: + + - web + - paths + - collections + +The ``web`` section can specify all the settings described in the web +section of the hgrc documentation. + +The ``paths`` section provides mappings of physical repository +paths to virtual ones. For instance:: + + [paths] + projects/a = /foo/bar + projects/b = /baz/quux + web/root = /real/root/* + / = /real/root2/* + virtual/root2 = /real/root2/** + +- The first two entries make two repositories in different directories + appear under the same directory in the web interface +- The third entry maps every Mercurial repository found in '/real/root' + into 'web/root'. This format is preferred over the [collections] one, + since using absolute paths as configuration keys is not supported on every + platform (especially on Windows). +- The fourth entry is a special case mapping all repositories in + '/real/root2' in the root of the virtual directory. +- The fifth entry recursively finds all repositories under the real + root, and maps their relative paths under the virtual root. + +The ``collections`` section provides mappings of trees of physical +repositories paths to virtual ones, though the paths syntax is generally +preferred. For instance:: + + [collections] + /foo = /foo + +Here, the left side will be stripped off all repositories found in the +right side. Thus ``/foo/bar`` and ``foo/quux/baz`` will be listed as +``bar`` and ``quux/baz`` respectively. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/merge-tools.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/merge-tools.txt new file mode 100644 index 0000000..7324fe4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/merge-tools.txt @@ -0,0 +1,110 @@ +To merge files Mercurial uses merge tools. + +A merge tool combines two different versions of a file into a merged +file. Merge tools are given the two files and the greatest common +ancestor of the two file versions, so they can determine the changes +made on both branches. + +Merge tools are used both for :hg:`resolve`, :hg:`merge`, :hg:`update`, +:hg:`backout` and in several extensions. + +Usually, the merge tool tries to automatically reconcile the files by +combining all non-overlapping changes that occurred separately in +the two different evolutions of the same initial base file. Furthermore, some +interactive merge programs make it easier to manually resolve +conflicting merges, either in a graphical way, or by inserting some +conflict markers. Mercurial does not include any interactive merge +programs but relies on external tools for that. + +Available merge tools +""""""""""""""""""""" + +External merge tools and their properties are configured in the +merge-tools configuration section - see hgrc(5) - but they can often just +be named by their executable. + +A merge tool is generally usable if its executable can be found on the +system and if it can handle the merge. The executable is found if it +is an absolute or relative executable path or the name of an +application in the executable search path. The tool is assumed to be +able to handle the merge if it can handle symlinks if the file is a +symlink, if it can handle binary files if the file is binary, and if a +GUI is available if the tool requires a GUI. + +There are some internal merge tools which can be used. The internal +merge tools are: + +``internal:merge`` + Uses the internal non-interactive simple merge algorithm for merging + files. It will fail if there are any conflicts and leave markers in + the partially merged file. + +``internal:fail`` + Rather than attempting to merge files that were modified on both + branches, it marks them as unresolved. The resolve command must be + used to resolve these conflicts. + +``internal:local`` + Uses the local version of files as the merged version. + +``internal:other`` + Uses the other version of files as the merged version. + +``internal:prompt`` + Asks the user which of the local or the other version to keep as + the merged version. + +``internal:dump`` + Creates three versions of the files to merge, containing the + contents of local, other and base. These files can then be used to + perform a merge manually. If the file to be merged is named + ``a.txt``, these files will accordingly be named ``a.txt.local``, + ``a.txt.other`` and ``a.txt.base`` and they will be placed in the + same directory as ``a.txt``. + +Internal tools are always available and do not require a GUI but will by default +not handle symlinks or binary files. + +Choosing a merge tool +""""""""""""""""""""" + +Mercurial uses these rules when deciding which merge tool to use: + +1. If a tool has been specified with the --tool option to merge or resolve, it + is used. If it is the name of a tool in the merge-tools configuration, its + configuration is used. Otherwise the specified tool must be executable by + the shell. + +2. If the ``HGMERGE`` environment variable is present, its value is used and + must be executable by the shell. + +3. If the filename of the file to be merged matches any of the patterns in the + merge-patterns configuration section, the first usable merge tool + corresponding to a matching pattern is used. Here, binary capabilities of the + merge tool are not considered. + +4. If ui.merge is set it will be considered next. If the value is not the name + of a configured tool, the specified value is used and must be executable by + the shell. Otherwise the named tool is used if it is usable. + +5. If any usable merge tools are present in the merge-tools configuration + section, the one with the highest priority is used. + +6. If a program named ``hgmerge`` can be found on the system, it is used - but + it will by default not be used for symlinks and binary files. + +7. If the file to be merged is not binary and is not a symlink, then + ``internal:merge`` is used. + +8. The merge of the file fails and must be resolved before commit. + +.. note:: + After selecting a merge program, Mercurial will by default attempt + to merge the files using a simple merge algorithm first. Only if it doesn't + succeed because of conflicting changes Mercurial will actually execute the + merge program. Whether to use the simple merge algorithm first can be + controlled by the premerge setting of the merge tool. Premerge is enabled by + default unless the file is binary or a symlink. + +See the merge-tools and ui sections of hgrc(5) for details on the +configuration of merge tools. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/multirevs.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/multirevs.txt new file mode 100644 index 0000000..c8a2833 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/multirevs.txt @@ -0,0 +1,13 @@ +When Mercurial accepts more than one revision, they may be specified +individually, or provided as a topologically continuous range, +separated by the ":" character. + +The syntax of range notation is [BEGIN]:[END], where BEGIN and END are +revision identifiers. Both BEGIN and END are optional. If BEGIN is not +specified, it defaults to revision number 0. If END is not specified, +it defaults to the tip. The range ":" thus means "all revisions". + +If BEGIN is greater than END, revisions are treated in reverse order. + +A range acts as a closed interval. This means that a range of 3:5 +gives 3, 4 and 5. Similarly, a range of 9:6 gives 9, 8, 7, and 6. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/patterns.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/patterns.txt new file mode 100644 index 0000000..4140170 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/patterns.txt @@ -0,0 +1,41 @@ +Mercurial accepts several notations for identifying one or more files +at a time. + +By default, Mercurial treats filenames as shell-style extended glob +patterns. + +Alternate pattern notations must be specified explicitly. + +To use a plain path name without any pattern matching, start it with +``path:``. These path names must completely match starting at the +current repository root. + +To use an extended glob, start a name with ``glob:``. Globs are rooted +at the current directory; a glob such as ``*.c`` will only match files +in the current directory ending with ``.c``. + +The supported glob syntax extensions are ``**`` to match any string +across path separators and ``{a,b}`` to mean "a or b". + +To use a Perl/Python regular expression, start a name with ``re:``. +Regexp pattern matching is anchored at the root of the repository. + +Plain examples:: + + path:foo/bar a name bar in a directory named foo in the root + of the repository + path:path:name a file or directory named "path:name" + +Glob examples:: + + glob:*.c any name ending in ".c" in the current directory + *.c any name ending in ".c" in the current directory + **.c any name ending in ".c" in any subdirectory of the + current directory including itself. + foo/*.c any name ending in ".c" in the directory foo + foo/**.c any name ending in ".c" in any subdirectory of foo + including itself. + +Regexp examples:: + + re:.*\.c$ any name ending in ".c", anywhere in the repository diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revisions.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revisions.txt new file mode 100644 index 0000000..309f8e2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revisions.txt @@ -0,0 +1,29 @@ +Mercurial supports several ways to specify individual revisions. + +A plain integer is treated as a revision number. Negative integers are +treated as sequential offsets from the tip, with -1 denoting the tip, +-2 denoting the revision prior to the tip, and so forth. + +A 40-digit hexadecimal string is treated as a unique revision +identifier. + +A hexadecimal string less than 40 characters long is treated as a +unique revision identifier and is referred to as a short-form +identifier. A short-form identifier is only valid if it is the prefix +of exactly one full-length identifier. + +Any other string is treated as a tag or branch name. A tag name is a +symbolic name associated with a revision identifier. A branch name +denotes the tipmost revision of that branch. Tag and branch names must +not contain the ":" character. + +The reserved name "tip" is a special tag that always identifies the +most recent revision. + +The reserved name "null" indicates the null revision. This is the +revision of an empty repository, and the parent of revision 0. + +The reserved name "." indicates the working directory parent. If no +working directory is checked out, it is equivalent to null. If an +uncommitted merge is in progress, "." is the revision of the first +parent. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revsets.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revsets.txt new file mode 100644 index 0000000..c964aae --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/revsets.txt @@ -0,0 +1,87 @@ +Mercurial supports a functional language for selecting a set of +revisions. + +The language supports a number of predicates which are joined by infix +operators. Parenthesis can be used for grouping. + +Identifiers such as branch names must be quoted with single or double +quotes if they contain characters outside of +``[._a-zA-Z0-9\x80-\xff]`` or if they match one of the predefined +predicates. + +Special characters can be used in quoted identifiers by escaping them, +e.g., ``\n`` is interpreted as a newline. To prevent them from being +interpreted, strings can be prefixed with ``r``, e.g. ``r'...'``. + +There is a single prefix operator: + +``not x`` + Changesets not in x. Short form is ``! x``. + +These are the supported infix operators: + +``x::y`` + A DAG range, meaning all changesets that are descendants of x and + ancestors of y, including x and y themselves. If the first endpoint + is left out, this is equivalent to ``ancestors(y)``, if the second + is left out it is equivalent to ``descendants(x)``. + + An alternative syntax is ``x..y``. + +``x:y`` + All changesets with revision numbers between x and y, both + inclusive. Either endpoint can be left out, they default to 0 and + tip. + +``x and y`` + The intersection of changesets in x and y. Short form is ``x & y``. + +``x or y`` + The union of changesets in x and y. There are two alternative short + forms: ``x | y`` and ``x + y``. + +``x - y`` + Changesets in x but not in y. + +The following predicates are supported: + +.. predicatesmarker + +Command line equivalents for :hg:`log`:: + + -f -> ::. + -d x -> date(x) + -k x -> keyword(x) + -m -> merge() + -u x -> user(x) + -b x -> branch(x) + -P x -> !::x + -l x -> limit(expr, x) + +Some sample queries: + +- Changesets on the default branch:: + + hg log -r "branch(default)" + +- Changesets on the default branch since tag 1.5 (excluding merges):: + + hg log -r "branch(default) and 1.5:: and not merge()" + +- Open branch heads:: + + hg log -r "head() and not closed()" + +- Changesets between tags 1.3 and 1.5 mentioning "bug" that affect + ``hgext/*``:: + + hg log -r "1.3::1.5 and keyword(bug) and file('hgext/*')" + +- Changesets in committed May 2008, sorted by user:: + + hg log -r "sort(date('May 2008'), user)" + +- Changesets mentioning "bug" or "issue" that are not in a tagged + release:: + + hg log -r "(keyword(bug) or keyword(issue)) and not ancestors(tagged())" diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/subrepos.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/subrepos.txt new file mode 100644 index 0000000..bd4e01b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/subrepos.txt @@ -0,0 +1,127 @@ +Subrepositories let you nest external repositories or projects into a +parent Mercurial repository, and make commands operate on them as a +group. External Mercurial and Subversion projects are currently +supported. + +Subrepositories are made of three components: + +1. Nested repository checkouts. They can appear anywhere in the + parent working directory, and are Mercurial clones or Subversion + checkouts. + +2. Nested repository references. They are defined in ``.hgsub`` and + tell where the subrepository checkouts come from. Mercurial + subrepositories are referenced like: + + path/to/nested = https://example.com/nested/repo/path + + where ``path/to/nested`` is the checkout location relatively to the + parent Mercurial root, and ``https://example.com/nested/repo/path`` + is the source repository path. The source can also reference a + filesystem path. Subversion repositories are defined with: + + path/to/nested = [svn]https://example.com/nested/trunk/path + + Note that ``.hgsub`` does not exist by default in Mercurial + repositories, you have to create and add it to the parent + repository before using subrepositories. + +3. Nested repository states. They are defined in ``.hgsubstate`` and + capture whatever information is required to restore the + subrepositories to the state they were committed in a parent + repository changeset. Mercurial automatically record the nested + repositories states when committing in the parent repository. + + .. note:: + The ``.hgsubstate`` file should not be edited manually. + + +Adding a Subrepository +---------------------- + +If ``.hgsub`` does not exist, create it and add it to the parent +repository. Clone or checkout the external projects where you want it +to live in the parent repository. Edit ``.hgsub`` and add the +subrepository entry as described above. At this point, the +subrepository is tracked and the next commit will record its state in +``.hgsubstate`` and bind it to the committed changeset. + +Synchronizing a Subrepository +----------------------------- + +Subrepos do not automatically track the latest changeset of their +sources. Instead, they are updated to the changeset that corresponds +with the changeset checked out in the top-level changeset. This is so +developers always get a consistent set of compatible code and +libraries when they update. + +Thus, updating subrepos is a manual process. Simply check out target +subrepo at the desired revision, test in the top-level repo, then +commit in the parent repository to record the new combination. + +Deleting a Subrepository +------------------------ + +To remove a subrepository from the parent repository, delete its +reference from ``.hgsub``, then remove its files. + +Interaction with Mercurial Commands +----------------------------------- + +:add: add does not recurse in subrepos unless -S/--subrepos is + specified. Subversion subrepositories are currently silently + ignored. + +:archive: archive does not recurse in subrepositories unless + -S/--subrepos is specified. + +:commit: commit creates a consistent snapshot of the state of the + entire project and its subrepositories. It does this by first + attempting to commit all modified subrepositories, then recording + their state and finally committing it in the parent repository. + +:diff: diff does not recurse in subrepos unless -S/--subrepos is + specified. Changes are displayed as usual, on the subrepositories + elements. Subversion subrepositories are currently silently + ignored. + +:incoming: incoming does not recurse in subrepos unless -S/--subrepos + is specified. Subversion subrepositories are currently silently + ignored. + +:outgoing: outgoing does not recurse in subrepos unless -S/--subrepos + is specified. Subversion subrepositories are currently silently + ignored. + +:pull: pull is not recursive since it is not clear what to pull prior + to running :hg:`update`. Listing and retrieving all + subrepositories changes referenced by the parent repository pulled + changesets is expensive at best, impossible in the Subversion + case. + +:push: Mercurial will automatically push all subrepositories first + when the parent repository is being pushed. This ensures new + subrepository changes are available when referenced by top-level + repositories. + +:status: status does not recurse into subrepositories unless + -S/--subrepos is specified. Subrepository changes are displayed as + regular Mercurial changes on the subrepository + elements. Subversion subrepositories are currently silently + ignored. + +:update: update restores the subrepos in the state they were + originally committed in target changeset. If the recorded + changeset is not available in the current subrepository, Mercurial + will pull it in first before updating. This means that updating + can require network access when using subrepositories. + +Remapping Subrepositories Sources +--------------------------------- + +A subrepository source location may change during a project life, +invalidating references stored in the parent repository history. To +fix this, rewriting rules can be defined in parent repository ``hgrc`` +file or in Mercurial configuration. See the ``[subpaths]`` section in +hgrc(5) for more details. + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/templates.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/templates.txt new file mode 100644 index 0000000..6ca12e5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/templates.txt @@ -0,0 +1,160 @@ +Mercurial allows you to customize output of commands through +templates. You can either pass in a template from the command +line, via the --template option, or select an existing +template-style (--style). + +You can customize output for any "log-like" command: log, +outgoing, incoming, tip, parents, heads and glog. + +Four styles are packaged with Mercurial: default (the style used +when no explicit preference is passed), compact, changelog, +and xml. +Usage:: + + $ hg log -r1 --style changelog + +A template is a piece of text, with markup to invoke variable +expansion:: + + $ hg log -r1 --template "{node}\n" + b56ce7b07c52de7d5fd79fb89701ea538af65746 + +Strings in curly braces are called keywords. The availability of +keywords depends on the exact context of the templater. These +keywords are usually available for templating a log-like command: + +:author: String. The unmodified author of the changeset. + +:branches: List of strings. The name of the branch on which the + changeset was committed. Will be empty if the branch name was + default. + +:children: List of strings. The children of the changeset. + +:date: Date information. The date when the changeset was committed. + +:desc: String. The text of the changeset description. + +:diffstat: String. Statistics of changes with the following format: + "modified files: +added/-removed lines" + +:files: List of strings. All files modified, added, or removed by this + changeset. + +:file_adds: List of strings. Files added by this changeset. + +:file_copies: List of strings. Files copied in this changeset with + their sources. + +:file_copies_switch: List of strings. Like "file_copies" but displayed + only if the --copied switch is set. + +:file_mods: List of strings. Files modified by this changeset. + +:file_dels: List of strings. Files removed by this changeset. + +:node: String. The changeset identification hash, as a 40 hexadecimal + digit string. + +:parents: List of strings. The parents of the changeset. + +:rev: Integer. The repository-local changeset revision number. + +:tags: List of strings. Any tags associated with the changeset. + +:latesttag: String. Most recent global tag in the ancestors of this + changeset. + +:latesttagdistance: Integer. Longest path to the latest tag. + +The "date" keyword does not produce human-readable output. If you +want to use a date in your output, you can use a filter to process +it. Filters are functions which return a string based on the input +variable. Be sure to use the stringify filter first when you're +applying a string-input filter to a list-like input variable. +You can also use a chain of filters to get the desired output:: + + $ hg tip --template "{date|isodate}\n" + 2008-08-21 18:22 +0000 + +List of filters: + +:addbreaks: Any text. Add an XHTML "<br />" tag before the end of + every line except the last. + +:age: Date. Returns a human-readable date/time difference between the + given date/time and the current date/time. + +:basename: Any text. Treats the text as a path, and returns the last + component of the path after splitting by the path separator + (ignoring trailing separators). For example, "foo/bar/baz" becomes + "baz" and "foo/bar//" becomes "bar". + +:stripdir: Treat the text as path and strip a directory level, if + possible. For example, "foo" and "foo/bar" becomes "foo". + +:date: Date. Returns a date in a Unix date format, including the + timezone: "Mon Sep 04 15:13:13 2006 0700". + +:domain: Any text. Finds the first string that looks like an email + address, and extracts just the domain component. Example: ``User + <user@example.com>`` becomes ``example.com``. + +:email: Any text. Extracts the first string that looks like an email + address. Example: ``User <user@example.com>`` becomes + ``user@example.com``. + +:escape: Any text. Replaces the special XML/XHTML characters "&", "<" + and ">" with XML entities. + +:hex: Any text. Convert a binary Mercurial node identifier into + its long hexadecimal representation. + +:fill68: Any text. Wraps the text to fit in 68 columns. + +:fill76: Any text. Wraps the text to fit in 76 columns. + +:firstline: Any text. Returns the first line of text. + +:nonempty: Any text. Returns '(none)' if the string is empty. + +:hgdate: Date. Returns the date as a pair of numbers: "1157407993 + 25200" (Unix timestamp, timezone offset). + +:isodate: Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 + +0200". + +:isodatesec: Date. Returns the date in ISO 8601 format, including + seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date + filter. + +:localdate: Date. Converts a date to local date. + +:obfuscate: Any text. Returns the input text rendered as a sequence of + XML entities. + +:person: Any text. Returns the text before an email address. + +:rfc822date: Date. Returns a date using the same format used in email + headers: "Tue, 18 Aug 2009 13:00:13 +0200". + +:rfc3339date: Date. Returns a date using the Internet date format + specified in RFC 3339: "2009-08-18T13:00:13+02:00". + +:short: Changeset hash. Returns the short form of a changeset hash, + i.e. a 12 hexadecimal digit string. + +:shortdate: Date. Returns a date like "2006-09-18". + +:stringify: Any type. Turns the value into text by converting values into + text and concatenating them. + +:strip: Any text. Strips all leading and trailing whitespace. + +:tabindent: Any text. Returns the text, with every line except the + first starting with a tab character. + +:urlescape: Any text. Escapes all "special" characters. For example, + "foo bar" becomes "foo%20bar". + +:user: Any text. Returns the user portion of an email address. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/urls.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/urls.txt new file mode 100644 index 0000000..3704090 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/help/urls.txt @@ -0,0 +1,66 @@ +Valid URLs are of the form:: + + local/filesystem/path[#revision] + file://local/filesystem/path[#revision] + http://[user[:pass]@]host[:port]/[path][#revision] + https://[user[:pass]@]host[:port]/[path][#revision] + ssh://[user[:pass]@]host[:port]/[path][#revision] + +Paths in the local filesystem can either point to Mercurial +repositories or to bundle files (as created by :hg:`bundle` or :hg:` +incoming --bundle`). See also :hg:`help paths`. + +An optional identifier after # indicates a particular branch, tag, or +changeset to use from the remote repository. See also :hg:`help +revisions`. + +Some features, such as pushing to http:// and https:// URLs are only +possible if the feature is explicitly enabled on the remote Mercurial +server. + +Note that the security of HTTPS URLs depends on proper configuration of +web.cacerts. + +Some notes about using SSH with Mercurial: + +- SSH requires an accessible shell account on the destination machine + and a copy of hg in the remote path or specified with as remotecmd. +- path is relative to the remote user's home directory by default. Use + an extra slash at the start of a path to specify an absolute path:: + + ssh://example.com//tmp/repository + +- Mercurial doesn't use its own compression via SSH; the right thing + to do is to configure it in your ~/.ssh/config, e.g.:: + + Host *.mylocalnetwork.example.com + Compression no + Host * + Compression yes + + Alternatively specify "ssh -C" as your ssh command in your + configuration file or with the --ssh command line option. + +These URLs can all be stored in your configuration file with path +aliases under the [paths] section like so:: + + [paths] + alias1 = URL1 + alias2 = URL2 + ... + +You can then use the alias for any command that uses a URL (for +example :hg:`pull alias1` will be treated as :hg:`pull URL1`). + +Two path aliases are special because they are used as defaults when +you do not provide the URL to a command: + +default: + When you create a repository with hg clone, the clone command saves + the location of the source repository as the new repository's + 'default' path. This is then used when you omit path from push- and + pull-like commands (including incoming and outgoing). + +default-push: + The push command will look for a path named 'default-push', and + prefer it over 'default' if both are defined. diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.py new file mode 100644 index 0000000..0e58c67 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.py @@ -0,0 +1,556 @@ +# hg.py - repository classes for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +from lock import release +from node import hex, nullid, nullrev, short +import localrepo, bundlerepo, httprepo, sshrepo, statichttprepo +import lock, util, extensions, error, encoding, node +import cmdutil, discovery, url +import merge as mergemod +import verify as verifymod +import errno, os, shutil + +def _local(path): + path = util.expandpath(util.drop_scheme('file', path)) + return (os.path.isfile(path) and bundlerepo or localrepo) + +def addbranchrevs(lrepo, repo, branches, revs): + hashbranch, branches = branches + if not hashbranch and not branches: + return revs or None, revs and revs[0] or None + revs = revs and list(revs) or [] + if not repo.capable('branchmap'): + if branches: + raise util.Abort(_("remote branch lookup not supported")) + revs.append(hashbranch) + return revs, revs[0] + branchmap = repo.branchmap() + + def primary(butf8): + if butf8 == '.': + if not lrepo or not lrepo.local(): + raise util.Abort(_("dirstate branch not accessible")) + butf8 = lrepo.dirstate.branch() + if butf8 in branchmap: + revs.extend(node.hex(r) for r in reversed(branchmap[butf8])) + return True + else: + return False + + for branch in branches: + butf8 = encoding.fromlocal(branch) + if not primary(butf8): + raise error.RepoLookupError(_("unknown branch '%s'") % branch) + if hashbranch: + butf8 = encoding.fromlocal(hashbranch) + if not primary(butf8): + revs.append(hashbranch) + return revs, revs[0] + +def parseurl(url, branches=None): + '''parse url#branch, returning (url, (branch, branches))''' + + if '#' not in url: + return url, (None, branches or []) + url, branch = url.split('#', 1) + return url, (branch, branches or []) + +schemes = { + 'bundle': bundlerepo, + 'file': _local, + 'http': httprepo, + 'https': httprepo, + 'ssh': sshrepo, + 'static-http': statichttprepo, +} + +def _lookup(path): + scheme = 'file' + if path: + c = path.find(':') + if c > 0: + scheme = path[:c] + thing = schemes.get(scheme) or schemes['file'] + try: + return thing(path) + except TypeError: + return thing + +def islocal(repo): + '''return true if repo or path is local''' + if isinstance(repo, str): + try: + return _lookup(repo).islocal(repo) + except AttributeError: + return False + return repo.local() + +def repository(ui, path='', create=False): + """return a repository object for the specified path""" + repo = _lookup(path).instance(ui, path, create) + ui = getattr(repo, "ui", ui) + for name, module in extensions.extensions(): + hook = getattr(module, 'reposetup', None) + if hook: + hook(ui, repo) + return repo + +def defaultdest(source): + '''return default destination of clone if none is given''' + return os.path.basename(os.path.normpath(source)) + +def localpath(path): + if path.startswith('file://localhost/'): + return path[16:] + if path.startswith('file://'): + return path[7:] + if path.startswith('file:'): + return path[5:] + return path + +def share(ui, source, dest=None, update=True): + '''create a shared repository''' + + if not islocal(source): + raise util.Abort(_('can only share local repositories')) + + if not dest: + dest = defaultdest(source) + else: + dest = ui.expandpath(dest) + + if isinstance(source, str): + origsource = ui.expandpath(source) + source, branches = parseurl(origsource) + srcrepo = repository(ui, source) + rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None) + else: + srcrepo = source + origsource = source = srcrepo.url() + checkout = None + + sharedpath = srcrepo.sharedpath # if our source is already sharing + + root = os.path.realpath(dest) + roothg = os.path.join(root, '.hg') + + if os.path.exists(roothg): + raise util.Abort(_('destination already exists')) + + if not os.path.isdir(root): + os.mkdir(root) + os.mkdir(roothg) + + requirements = '' + try: + requirements = srcrepo.opener('requires').read() + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + + requirements += 'shared\n' + file(os.path.join(roothg, 'requires'), 'w').write(requirements) + file(os.path.join(roothg, 'sharedpath'), 'w').write(sharedpath) + + default = srcrepo.ui.config('paths', 'default') + if default: + f = file(os.path.join(roothg, 'hgrc'), 'w') + f.write('[paths]\ndefault = %s\n' % default) + f.close() + + r = repository(ui, root) + + if update: + r.ui.status(_("updating working directory\n")) + if update is not True: + checkout = update + for test in (checkout, 'default', 'tip'): + if test is None: + continue + try: + uprev = r.lookup(test) + break + except error.RepoLookupError: + continue + _update(r, uprev) + +def clone(ui, source, dest=None, pull=False, rev=None, update=True, + stream=False, branch=None): + """Make a copy of an existing repository. + + Create a copy of an existing repository in a new directory. The + source and destination are URLs, as passed to the repository + function. Returns a pair of repository objects, the source and + newly created destination. + + The location of the source is added to the new repository's + .hg/hgrc file, as the default to be used for future pulls and + pushes. + + If an exception is raised, the partly cloned/updated destination + repository will be deleted. + + Arguments: + + source: repository object or URL + + dest: URL of destination repository to create (defaults to base + name of source repository) + + pull: always pull from source repository, even in local case + + stream: stream raw data uncompressed from repository (fast over + LAN, slow over WAN) + + rev: revision to clone up to (implies pull=True) + + update: update working directory after clone completes, if + destination is local repository (True means update to default rev, + anything else is treated as a revision) + + branch: branches to clone + """ + + if isinstance(source, str): + origsource = ui.expandpath(source) + source, branch = parseurl(origsource, branch) + src_repo = repository(ui, source) + else: + src_repo = source + branch = (None, branch or []) + origsource = source = src_repo.url() + rev, checkout = addbranchrevs(src_repo, src_repo, branch, rev) + + if dest is None: + dest = defaultdest(source) + ui.status(_("destination directory: %s\n") % dest) + else: + dest = ui.expandpath(dest) + + dest = localpath(dest) + source = localpath(source) + + if os.path.exists(dest): + if not os.path.isdir(dest): + raise util.Abort(_("destination '%s' already exists") % dest) + elif os.listdir(dest): + raise util.Abort(_("destination '%s' is not empty") % dest) + + class DirCleanup(object): + def __init__(self, dir_): + self.rmtree = shutil.rmtree + self.dir_ = dir_ + def close(self): + self.dir_ = None + def cleanup(self): + if self.dir_: + self.rmtree(self.dir_, True) + + src_lock = dest_lock = dir_cleanup = None + try: + if islocal(dest): + dir_cleanup = DirCleanup(dest) + + abspath = origsource + copy = False + if src_repo.cancopy() and islocal(dest): + abspath = os.path.abspath(util.drop_scheme('file', origsource)) + copy = not pull and not rev + + if copy: + try: + # we use a lock here because if we race with commit, we + # can end up with extra data in the cloned revlogs that's + # not pointed to by changesets, thus causing verify to + # fail + src_lock = src_repo.lock(wait=False) + except error.LockError: + copy = False + + if copy: + src_repo.hook('preoutgoing', throw=True, source='clone') + hgdir = os.path.realpath(os.path.join(dest, ".hg")) + if not os.path.exists(dest): + os.mkdir(dest) + else: + # only clean up directories we create ourselves + dir_cleanup.dir_ = hgdir + try: + dest_path = hgdir + os.mkdir(dest_path) + except OSError, inst: + if inst.errno == errno.EEXIST: + dir_cleanup.close() + raise util.Abort(_("destination '%s' already exists") + % dest) + raise + + hardlink = None + num = 0 + for f in src_repo.store.copylist(): + src = os.path.join(src_repo.sharedpath, f) + dst = os.path.join(dest_path, f) + dstbase = os.path.dirname(dst) + if dstbase and not os.path.exists(dstbase): + os.mkdir(dstbase) + if os.path.exists(src): + if dst.endswith('data'): + # lock to avoid premature writing to the target + dest_lock = lock.lock(os.path.join(dstbase, "lock")) + hardlink, n = util.copyfiles(src, dst, hardlink) + num += n + if hardlink: + ui.debug("linked %d files\n" % num) + else: + ui.debug("copied %d files\n" % num) + + # we need to re-init the repo after manually copying the data + # into it + dest_repo = repository(ui, dest) + src_repo.hook('outgoing', source='clone', + node=node.hex(node.nullid)) + else: + try: + dest_repo = repository(ui, dest, create=True) + except OSError, inst: + if inst.errno == errno.EEXIST: + dir_cleanup.close() + raise util.Abort(_("destination '%s' already exists") + % dest) + raise + + revs = None + if rev: + if 'lookup' not in src_repo.capabilities: + raise util.Abort(_("src repository does not support " + "revision lookup and so doesn't " + "support clone by revision")) + revs = [src_repo.lookup(r) for r in rev] + checkout = revs[0] + if dest_repo.local(): + dest_repo.clone(src_repo, heads=revs, stream=stream) + elif src_repo.local(): + src_repo.push(dest_repo, revs=revs) + else: + raise util.Abort(_("clone from remote to remote not supported")) + + if dir_cleanup: + dir_cleanup.close() + + if dest_repo.local(): + fp = dest_repo.opener("hgrc", "w", text=True) + fp.write("[paths]\n") + fp.write("default = %s\n" % abspath) + fp.close() + + dest_repo.ui.setconfig('paths', 'default', abspath) + + if update: + if update is not True: + checkout = update + if src_repo.local(): + checkout = src_repo.lookup(update) + for test in (checkout, 'default', 'tip'): + if test is None: + continue + try: + uprev = dest_repo.lookup(test) + break + except error.RepoLookupError: + continue + bn = dest_repo[uprev].branch() + dest_repo.ui.status(_("updating to branch %s\n") + % encoding.tolocal(bn)) + _update(dest_repo, uprev) + + return src_repo, dest_repo + finally: + release(src_lock, dest_lock) + if dir_cleanup is not None: + dir_cleanup.cleanup() + +def _showstats(repo, stats): + repo.ui.status(_("%d files updated, %d files merged, " + "%d files removed, %d files unresolved\n") % stats) + +def update(repo, node): + """update the working directory to node, merging linear changes""" + stats = mergemod.update(repo, node, False, False, None) + _showstats(repo, stats) + if stats[3]: + repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) + return stats[3] > 0 + +# naming conflict in clone() +_update = update + +def clean(repo, node, show_stats=True): + """forcibly switch the working directory to node, clobbering changes""" + stats = mergemod.update(repo, node, False, True, None) + if show_stats: + _showstats(repo, stats) + return stats[3] > 0 + +def merge(repo, node, force=None, remind=True): + """branch merge with node, resolving changes""" + stats = mergemod.update(repo, node, True, force, False) + _showstats(repo, stats) + if stats[3]: + repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " + "or 'hg update -C .' to abandon\n")) + elif remind: + repo.ui.status(_("(branch merge, don't forget to commit)\n")) + return stats[3] > 0 + +def _incoming(displaychlist, subreporecurse, ui, repo, source, + opts, buffered=False): + """ + Helper for incoming / gincoming. + displaychlist gets called with + (remoterepo, incomingchangesetlist, displayer) parameters, + and is supposed to contain only code that can't be unified. + """ + source, branches = parseurl(ui.expandpath(source), opts.get('branch')) + other = repository(remoteui(repo, opts), source) + ui.status(_('comparing with %s\n') % url.hidepassword(source)) + revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) + + if revs: + revs = [other.lookup(rev) for rev in revs] + other, incoming, bundle = bundlerepo.getremotechanges(ui, repo, other, revs, + opts["bundle"], opts["force"]) + if incoming is None: + ui.status(_("no changes found\n")) + return subreporecurse() + + try: + chlist = other.changelog.nodesbetween(incoming, revs)[0] + displayer = cmdutil.show_changeset(ui, other, opts, buffered) + + # XXX once graphlog extension makes it into core, + # should be replaced by a if graph/else + displaychlist(other, chlist, displayer) + + displayer.close() + finally: + if hasattr(other, 'close'): + other.close() + if bundle: + os.unlink(bundle) + subreporecurse() + return 0 # exit code is zero since we found incoming changes + +def incoming(ui, repo, source, opts): + def subreporecurse(): + ret = 1 + if opts.get('subrepos'): + ctx = repo[None] + for subpath in sorted(ctx.substate): + sub = ctx.sub(subpath) + ret = min(ret, sub.incoming(ui, source, opts)) + return ret + + def display(other, chlist, displayer): + limit = cmdutil.loglimit(opts) + if opts.get('newest_first'): + chlist.reverse() + count = 0 + for n in chlist: + if limit is not None and count >= limit: + break + parents = [p for p in other.changelog.parents(n) if p != nullid] + if opts.get('no_merges') and len(parents) == 2: + continue + count += 1 + displayer.show(other[n]) + return _incoming(display, subreporecurse, ui, repo, source, opts) + +def _outgoing(ui, repo, dest, opts): + dest = ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = parseurl(dest, opts.get('branch')) + revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) + if revs: + revs = [repo.lookup(rev) for rev in revs] + + other = repository(remoteui(repo, opts), dest) + ui.status(_('comparing with %s\n') % url.hidepassword(dest)) + o = discovery.findoutgoing(repo, other, force=opts.get('force')) + if not o: + ui.status(_("no changes found\n")) + return None + + return repo.changelog.nodesbetween(o, revs)[0] + +def outgoing(ui, repo, dest, opts): + def recurse(): + ret = 1 + if opts.get('subrepos'): + ctx = repo[None] + for subpath in sorted(ctx.substate): + sub = ctx.sub(subpath) + ret = min(ret, sub.outgoing(ui, dest, opts)) + return ret + + limit = cmdutil.loglimit(opts) + o = _outgoing(ui, repo, dest, opts) + if o is None: + return recurse() + + if opts.get('newest_first'): + o.reverse() + displayer = cmdutil.show_changeset(ui, repo, opts) + count = 0 + for n in o: + if limit is not None and count >= limit: + break + parents = [p for p in repo.changelog.parents(n) if p != nullid] + if opts.get('no_merges') and len(parents) == 2: + continue + count += 1 + displayer.show(repo[n]) + displayer.close() + recurse() + return 0 # exit code is zero since we found outgoing changes + +def revert(repo, node, choose): + """revert changes to revision in node without updating dirstate""" + return mergemod.update(repo, node, False, True, choose)[3] > 0 + +def verify(repo): + """verify the consistency of a repository""" + return verifymod.verify(repo) + +def remoteui(src, opts): + 'build a remote ui from ui or repo and opts' + if hasattr(src, 'baseui'): # looks like a repository + dst = src.baseui.copy() # drop repo-specific config + src = src.ui # copy target options from repo + else: # assume it's a global ui object + dst = src.copy() # keep all global options + + # copy ssh-specific options + for o in 'ssh', 'remotecmd': + v = opts.get(o) or src.config('ui', o) + if v: + dst.setconfig("ui", o, v) + + # copy bundle-specific options + r = src.config('bundle', 'mainreporoot') + if r: + dst.setconfig('bundle', 'mainreporoot', r) + + # copy selected local settings to the remote ui + for sect in ('auth', 'http_proxy'): + for key, val in src.configitems(sect): + dst.setconfig(sect, key, val) + v = src.config('web', 'cacerts') + if v: + dst.setconfig('web', 'cacerts', v) + + return dst diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.pyo Binary files differnew file mode 100644 index 0000000..3e25c2b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hg.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.py new file mode 100644 index 0000000..dd4d089 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.py @@ -0,0 +1,31 @@ +# hgweb/__init__.py - web interface to a mercurial repository +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +import hgweb_mod, hgwebdir_mod + +def hgweb(config, name=None, baseui=None): + '''create an hgweb wsgi object + + config can be one of: + - repo object (single repo view) + - path to repo (single repo view) + - path to config file (multi-repo view) + - dict of virtual:real pairs (multi-repo view) + - list of virtual:real tuples (multi-repo view) + ''' + + if ((isinstance(config, str) and not os.path.isdir(config)) or + isinstance(config, dict) or isinstance(config, list)): + # create a multi-dir interface + return hgwebdir_mod.hgwebdir(config, baseui=baseui) + return hgweb_mod.hgweb(config, name=name, baseui=baseui) + +def hgwebdir(config, baseui=None): + return hgwebdir_mod.hgwebdir(config, baseui=baseui) + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.pyo Binary files differnew file mode 100644 index 0000000..fe872c0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/__init__.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.py new file mode 100644 index 0000000..eb7e02a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.py @@ -0,0 +1,161 @@ +# hgweb/common.py - Utility functions needed by hgweb_mod and hgwebdir_mod +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import errno, mimetypes, os + +HTTP_OK = 200 +HTTP_NOT_MODIFIED = 304 +HTTP_BAD_REQUEST = 400 +HTTP_UNAUTHORIZED = 401 +HTTP_FORBIDDEN = 403 +HTTP_NOT_FOUND = 404 +HTTP_METHOD_NOT_ALLOWED = 405 +HTTP_SERVER_ERROR = 500 + +# Hooks for hgweb permission checks; extensions can add hooks here. Each hook +# is invoked like this: hook(hgweb, request, operation), where operation is +# either read, pull or push. Hooks should either raise an ErrorResponse +# exception, or just return. +# It is possible to do both authentication and authorization through this. +permhooks = [] + +def checkauthz(hgweb, req, op): + '''Check permission for operation based on request data (including + authentication info). Return if op allowed, else raise an ErrorResponse + exception.''' + + user = req.env.get('REMOTE_USER') + + deny_read = hgweb.configlist('web', 'deny_read') + if deny_read and (not user or deny_read == ['*'] or user in deny_read): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') + + allow_read = hgweb.configlist('web', 'allow_read') + result = (not allow_read) or (allow_read == ['*']) + if not (result or user in allow_read): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized') + + if op == 'pull' and not hgweb.allowpull: + raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized') + elif op == 'pull' or op is None: # op is None for interface requests + return + + # enforce that you can only push using POST requests + if req.env['REQUEST_METHOD'] != 'POST': + msg = 'push requires POST request' + raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) + + # require ssl by default for pushing, auth info cannot be sniffed + # and replayed + scheme = req.env.get('wsgi.url_scheme') + if hgweb.configbool('web', 'push_ssl', True) and scheme != 'https': + raise ErrorResponse(HTTP_OK, 'ssl required') + + deny = hgweb.configlist('web', 'deny_push') + if deny and (not user or deny == ['*'] or user in deny): + raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') + + allow = hgweb.configlist('web', 'allow_push') + result = allow and (allow == ['*'] or user in allow) + if not result: + raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized') + +# Add the default permhook, which provides simple authorization. +permhooks.append(checkauthz) + + +class ErrorResponse(Exception): + def __init__(self, code, message=None, headers=[]): + Exception.__init__(self) + self.code = code + self.headers = headers + if message is not None: + self.message = message + else: + self.message = _statusmessage(code) + +def _statusmessage(code): + from BaseHTTPServer import BaseHTTPRequestHandler + responses = BaseHTTPRequestHandler.responses + return responses.get(code, ('Error', 'Unknown error'))[0] + +def statusmessage(code, message=None): + return '%d %s' % (code, message or _statusmessage(code)) + +def get_mtime(spath): + cl_path = os.path.join(spath, "00changelog.i") + if os.path.exists(cl_path): + return os.stat(cl_path).st_mtime + else: + return os.stat(spath).st_mtime + +def staticfile(directory, fname, req): + """return a file inside directory with guessed Content-Type header + + fname always uses '/' as directory separator and isn't allowed to + contain unusual path components. + Content-Type is guessed using the mimetypes module. + Return an empty string if fname is illegal or file not found. + + """ + parts = fname.split('/') + for part in parts: + if (part in ('', os.curdir, os.pardir) or + os.sep in part or os.altsep is not None and os.altsep in part): + return "" + fpath = os.path.join(*parts) + if isinstance(directory, str): + directory = [directory] + for d in directory: + path = os.path.join(d, fpath) + if os.path.exists(path): + break + try: + os.stat(path) + ct = mimetypes.guess_type(path)[0] or "text/plain" + req.respond(HTTP_OK, ct, length = os.path.getsize(path)) + return open(path, 'rb').read() + except TypeError: + raise ErrorResponse(HTTP_SERVER_ERROR, 'illegal filename') + except OSError, err: + if err.errno == errno.ENOENT: + raise ErrorResponse(HTTP_NOT_FOUND) + else: + raise ErrorResponse(HTTP_SERVER_ERROR, err.strerror) + +def paritygen(stripecount, offset=0): + """count parity of horizontal stripes for easier reading""" + if stripecount and offset: + # account for offset, e.g. due to building the list in reverse + count = (stripecount + offset) % stripecount + parity = (stripecount + offset) / stripecount & 1 + else: + count = 0 + parity = 0 + while True: + yield parity + count += 1 + if stripecount and count >= stripecount: + parity = 1 - parity + count = 0 + +def get_contact(config): + """Return repo contact information or empty string. + + web.contact is the primary source, but if that is not set, try + ui.username or $EMAIL as a fallback to display something useful. + """ + return (config("web", "contact") or + config("ui", "username") or + os.environ.get("EMAIL") or "") + +def caching(web, req): + tag = str(web.mtime) + if req.env.get('HTTP_IF_NONE_MATCH') == tag: + raise ErrorResponse(HTTP_NOT_MODIFIED) + req.headers.append(('ETag', tag)) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.pyo Binary files differnew file mode 100644 index 0000000..c743684 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/common.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.py new file mode 100644 index 0000000..f886277 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.py @@ -0,0 +1,290 @@ +# hgweb/hgweb_mod.py - Web interface for a repository. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os +from mercurial import ui, hg, hook, error, encoding, templater +from common import get_mtime, ErrorResponse, permhooks, caching +from common import HTTP_OK, HTTP_NOT_MODIFIED, HTTP_BAD_REQUEST +from common import HTTP_NOT_FOUND, HTTP_SERVER_ERROR +from request import wsgirequest +import webcommands, protocol, webutil + +perms = { + 'changegroup': 'pull', + 'changegroupsubset': 'pull', + 'stream_out': 'pull', + 'listkeys': 'pull', + 'unbundle': 'push', + 'pushkey': 'push', +} + +class hgweb(object): + def __init__(self, repo, name=None, baseui=None): + if isinstance(repo, str): + if baseui: + u = baseui.copy() + else: + u = ui.ui() + self.repo = hg.repository(u, repo) + else: + self.repo = repo + + self.repo.ui.setconfig('ui', 'report_untrusted', 'off') + self.repo.ui.setconfig('ui', 'interactive', 'off') + hook.redirect(True) + self.mtime = -1 + self.reponame = name + self.archives = 'zip', 'gz', 'bz2' + self.stripecount = 1 + # a repo owner may set web.templates in .hg/hgrc to get any file + # readable by the user running the CGI script + self.templatepath = self.config('web', 'templates') + + # The CGI scripts are often run by a user different from the repo owner. + # Trust the settings from the .hg/hgrc files by default. + def config(self, section, name, default=None, untrusted=True): + return self.repo.ui.config(section, name, default, + untrusted=untrusted) + + def configbool(self, section, name, default=False, untrusted=True): + return self.repo.ui.configbool(section, name, default, + untrusted=untrusted) + + def configlist(self, section, name, default=None, untrusted=True): + return self.repo.ui.configlist(section, name, default, + untrusted=untrusted) + + def refresh(self, request=None): + if request: + self.repo.ui.environ = request.env + mtime = get_mtime(self.repo.spath) + if mtime != self.mtime: + self.mtime = mtime + self.repo = hg.repository(self.repo.ui, self.repo.root) + self.maxchanges = int(self.config("web", "maxchanges", 10)) + self.stripecount = int(self.config("web", "stripes", 1)) + self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) + self.maxfiles = int(self.config("web", "maxfiles", 10)) + self.allowpull = self.configbool("web", "allowpull", True) + encoding.encoding = self.config("web", "encoding", + encoding.encoding) + + def run(self): + if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): + raise RuntimeError("This function is only intended to be " + "called while running as a CGI script.") + import mercurial.hgweb.wsgicgi as wsgicgi + wsgicgi.launch(self) + + def __call__(self, env, respond): + req = wsgirequest(env, respond) + return self.run_wsgi(req) + + def run_wsgi(self, req): + + self.refresh(req) + + # work with CGI variables to create coherent structure + # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME + + req.url = req.env['SCRIPT_NAME'] + if not req.url.endswith('/'): + req.url += '/' + if 'REPO_NAME' in req.env: + req.url += req.env['REPO_NAME'] + '/' + + if 'PATH_INFO' in req.env: + parts = req.env['PATH_INFO'].strip('/').split('/') + repo_parts = req.env.get('REPO_NAME', '').split('/') + if parts[:len(repo_parts)] == repo_parts: + parts = parts[len(repo_parts):] + query = '/'.join(parts) + else: + query = req.env['QUERY_STRING'].split('&', 1)[0] + query = query.split(';', 1)[0] + + # process this if it's a protocol request + # protocol bits don't need to create any URLs + # and the clients always use the old URL structure + + cmd = req.form.get('cmd', [''])[0] + if protocol.iscmd(cmd): + if query: + raise ErrorResponse(HTTP_NOT_FOUND) + if cmd in perms: + try: + self.check_perm(req, perms[cmd]) + except ErrorResponse, inst: + if cmd == 'unbundle': + req.drain() + req.respond(inst, protocol.HGTYPE) + return '0\n%s\n' % inst.message + return protocol.call(self.repo, req, cmd) + + # translate user-visible url structure to internal structure + + args = query.split('/', 2) + if 'cmd' not in req.form and args and args[0]: + + cmd = args.pop(0) + style = cmd.rfind('-') + if style != -1: + req.form['style'] = [cmd[:style]] + cmd = cmd[style + 1:] + + # avoid accepting e.g. style parameter as command + if hasattr(webcommands, cmd): + req.form['cmd'] = [cmd] + else: + cmd = '' + + if cmd == 'static': + req.form['file'] = ['/'.join(args)] + else: + if args and args[0]: + node = args.pop(0) + req.form['node'] = [node] + if args: + req.form['file'] = args + + ua = req.env.get('HTTP_USER_AGENT', '') + if cmd == 'rev' and 'mercurial' in ua: + req.form['style'] = ['raw'] + + if cmd == 'archive': + fn = req.form['node'][0] + for type_, spec in self.archive_specs.iteritems(): + ext = spec[2] + if fn.endswith(ext): + req.form['node'] = [fn[:-len(ext)]] + req.form['type'] = [type_] + + # process the web interface request + + try: + tmpl = self.templater(req) + ctype = tmpl('mimetype', encoding=encoding.encoding) + ctype = templater.stringify(ctype) + + # check read permissions non-static content + if cmd != 'static': + self.check_perm(req, None) + + if cmd == '': + req.form['cmd'] = [tmpl.cache['default']] + cmd = req.form['cmd'][0] + + caching(self, req) # sets ETag header or raises NOT_MODIFIED + if cmd not in webcommands.__all__: + msg = 'no such method: %s' % cmd + raise ErrorResponse(HTTP_BAD_REQUEST, msg) + elif cmd == 'file' and 'raw' in req.form.get('style', []): + self.ctype = ctype + content = webcommands.rawfile(self, req, tmpl) + else: + content = getattr(webcommands, cmd)(self, req, tmpl) + req.respond(HTTP_OK, ctype) + + return content + + except error.LookupError, err: + req.respond(HTTP_NOT_FOUND, ctype) + msg = str(err) + if 'manifest' not in msg: + msg = 'revision not found: %s' % err.name + return tmpl('error', error=msg) + except (error.RepoError, error.RevlogError), inst: + req.respond(HTTP_SERVER_ERROR, ctype) + return tmpl('error', error=str(inst)) + except ErrorResponse, inst: + req.respond(inst, ctype) + if inst.code == HTTP_NOT_MODIFIED: + # Not allowed to return a body on a 304 + return [''] + return tmpl('error', error=inst.message) + + def templater(self, req): + + # determine scheme, port and server name + # this is needed to create absolute urls + + proto = req.env.get('wsgi.url_scheme') + if proto == 'https': + proto = 'https' + default_port = "443" + else: + proto = 'http' + default_port = "80" + + port = req.env["SERVER_PORT"] + port = port != default_port and (":" + port) or "" + urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) + staticurl = self.config("web", "staticurl") or req.url + 'static/' + if not staticurl.endswith('/'): + staticurl += '/' + + # some functions for the templater + + def header(**map): + yield tmpl('header', encoding=encoding.encoding, **map) + + def footer(**map): + yield tmpl("footer", **map) + + def motd(**map): + yield self.config("web", "motd", "") + + # figure out which style to use + + vars = {} + styles = ( + req.form.get('style', [None])[0], + self.config('web', 'style'), + 'paper', + ) + style, mapfile = templater.stylemap(styles, self.templatepath) + if style == styles[0]: + vars['style'] = style + + start = req.url[-1] == '?' and '&' or '?' + sessionvars = webutil.sessionvars(vars, start) + + if not self.reponame: + self.reponame = (self.config("web", "name") + or req.env.get('REPO_NAME') + or req.url.strip('/') or self.repo.root) + + # create the templater + + tmpl = templater.templater(mapfile, + defaults={"url": req.url, + "staticurl": staticurl, + "urlbase": urlbase, + "repo": self.reponame, + "header": header, + "footer": footer, + "motd": motd, + "sessionvars": sessionvars + }) + return tmpl + + def archivelist(self, nodeid): + allowed = self.configlist("web", "allow_archive") + for i, spec in self.archive_specs.iteritems(): + if i in allowed or self.configbool("web", "allow" + i): + yield {"type" : i, "extension" : spec[2], "node" : nodeid} + + archive_specs = { + 'bz2': ('application/x-bzip2', 'tbz2', '.tar.bz2', None), + 'gz': ('application/x-gzip', 'tgz', '.tar.gz', None), + 'zip': ('application/zip', 'zip', '.zip', None), + } + + def check_perm(self, req, op): + for hook in permhooks: + hook(self, req, op) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.pyo Binary files differnew file mode 100644 index 0000000..375c705 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgweb_mod.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.py new file mode 100644 index 0000000..167cf35 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.py @@ -0,0 +1,359 @@ +# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, re, time, urlparse +from mercurial.i18n import _ +from mercurial import ui, hg, util, templater +from mercurial import error, encoding +from common import ErrorResponse, get_mtime, staticfile, paritygen, \ + get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR +from hgweb_mod import hgweb +from request import wsgirequest +import webutil + +def cleannames(items): + return [(util.pconvert(name).strip('/'), path) for name, path in items] + +def findrepos(paths): + repos = [] + for prefix, root in cleannames(paths): + roothead, roottail = os.path.split(root) + # "foo = /bar/*" makes every subrepo of /bar/ to be + # mounted as foo/subrepo + # and "foo = /bar/**" also recurses into the subdirectories, + # remember to use it without working dir. + try: + recurse = {'*': False, '**': True}[roottail] + except KeyError: + repos.append((prefix, root)) + continue + roothead = os.path.normpath(os.path.abspath(roothead)) + for path in util.walkrepos(roothead, followsym=True, recurse=recurse): + path = os.path.normpath(path) + name = util.pconvert(path[len(roothead):]).strip('/') + if prefix: + name = prefix + '/' + name + repos.append((name, path)) + return repos + +class hgwebdir(object): + refreshinterval = 20 + + def __init__(self, conf, baseui=None): + self.conf = conf + self.baseui = baseui + self.lastrefresh = 0 + self.motd = None + self.refresh() + + def refresh(self): + if self.lastrefresh + self.refreshinterval > time.time(): + return + + if self.baseui: + u = self.baseui.copy() + else: + u = ui.ui() + u.setconfig('ui', 'report_untrusted', 'off') + u.setconfig('ui', 'interactive', 'off') + + if not isinstance(self.conf, (dict, list, tuple)): + map = {'paths': 'hgweb-paths'} + if not os.path.exists(self.conf): + raise util.Abort(_('config file %s not found!') % self.conf) + u.readconfig(self.conf, remap=map, trust=True) + paths = u.configitems('hgweb-paths') + elif isinstance(self.conf, (list, tuple)): + paths = self.conf + elif isinstance(self.conf, dict): + paths = self.conf.items() + + repos = findrepos(paths) + for prefix, root in u.configitems('collections'): + prefix = util.pconvert(prefix) + for path in util.walkrepos(root, followsym=True): + repo = os.path.normpath(path) + name = util.pconvert(repo) + if name.startswith(prefix): + name = name[len(prefix):] + repos.append((name.lstrip('/'), repo)) + + self.repos = repos + self.ui = u + encoding.encoding = self.ui.config('web', 'encoding', + encoding.encoding) + self.style = self.ui.config('web', 'style', 'paper') + self.templatepath = self.ui.config('web', 'templates', None) + self.stripecount = self.ui.config('web', 'stripes', 1) + if self.stripecount: + self.stripecount = int(self.stripecount) + self._baseurl = self.ui.config('web', 'baseurl') + self.lastrefresh = time.time() + + def run(self): + if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): + raise RuntimeError("This function is only intended to be " + "called while running as a CGI script.") + import mercurial.hgweb.wsgicgi as wsgicgi + wsgicgi.launch(self) + + def __call__(self, env, respond): + req = wsgirequest(env, respond) + return self.run_wsgi(req) + + def read_allowed(self, ui, req): + """Check allow_read and deny_read config options of a repo's ui object + to determine user permissions. By default, with neither option set (or + both empty), allow all users to read the repo. There are two ways a + user can be denied read access: (1) deny_read is not empty, and the + user is unauthenticated or deny_read contains user (or *), and (2) + allow_read is not empty and the user is not in allow_read. Return True + if user is allowed to read the repo, else return False.""" + + user = req.env.get('REMOTE_USER') + + deny_read = ui.configlist('web', 'deny_read', untrusted=True) + if deny_read and (not user or deny_read == ['*'] or user in deny_read): + return False + + allow_read = ui.configlist('web', 'allow_read', untrusted=True) + # by default, allow reading if no allow_read option has been set + if (not allow_read) or (allow_read == ['*']) or (user in allow_read): + return True + + return False + + def run_wsgi(self, req): + try: + try: + self.refresh() + + virtual = req.env.get("PATH_INFO", "").strip('/') + tmpl = self.templater(req) + ctype = tmpl('mimetype', encoding=encoding.encoding) + ctype = templater.stringify(ctype) + + # a static file + if virtual.startswith('static/') or 'static' in req.form: + if virtual.startswith('static/'): + fname = virtual[7:] + else: + fname = req.form['static'][0] + static = templater.templatepath('static') + return (staticfile(static, fname, req),) + + # top-level index + elif not virtual: + req.respond(HTTP_OK, ctype) + return self.makeindex(req, tmpl) + + # nested indexes and hgwebs + + repos = dict(self.repos) + virtualrepo = virtual + while virtualrepo: + real = repos.get(virtualrepo) + if real: + req.env['REPO_NAME'] = virtualrepo + try: + repo = hg.repository(self.ui, real) + return hgweb(repo).run_wsgi(req) + except IOError, inst: + msg = inst.strerror + raise ErrorResponse(HTTP_SERVER_ERROR, msg) + except error.RepoError, inst: + raise ErrorResponse(HTTP_SERVER_ERROR, str(inst)) + + up = virtualrepo.rfind('/') + if up < 0: + break + virtualrepo = virtualrepo[:up] + + # browse subdirectories + subdir = virtual + '/' + if [r for r in repos if r.startswith(subdir)]: + req.respond(HTTP_OK, ctype) + return self.makeindex(req, tmpl, subdir) + + # prefixes not found + req.respond(HTTP_NOT_FOUND, ctype) + return tmpl("notfound", repo=virtual) + + except ErrorResponse, err: + req.respond(err, ctype) + return tmpl('error', error=err.message or '') + finally: + tmpl = None + + def makeindex(self, req, tmpl, subdir=""): + + def archivelist(ui, nodeid, url): + allowed = ui.configlist("web", "allow_archive", untrusted=True) + for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]: + if i[0] in allowed or ui.configbool("web", "allow" + i[0], + untrusted=True): + yield {"type" : i[0], "extension": i[1], + "node": nodeid, "url": url} + + def rawentries(subdir="", **map): + + descend = self.ui.configbool('web', 'descend', True) + for name, path in self.repos: + + if not name.startswith(subdir): + continue + name = name[len(subdir):] + if not descend and '/' in name: + continue + + u = self.ui.copy() + try: + u.readconfig(os.path.join(path, '.hg', 'hgrc')) + except Exception, e: + u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e)) + continue + def get(section, name, default=None): + return u.config(section, name, default, untrusted=True) + + if u.configbool("web", "hidden", untrusted=True): + continue + + if not self.read_allowed(u, req): + continue + + parts = [name] + if 'PATH_INFO' in req.env: + parts.insert(0, req.env['PATH_INFO'].rstrip('/')) + if req.env['SCRIPT_NAME']: + parts.insert(0, req.env['SCRIPT_NAME']) + url = re.sub(r'/+', '/', '/'.join(parts) + '/') + + # update time with local timezone + try: + r = hg.repository(self.ui, path) + except error.RepoError: + u.warn(_('error accessing repository at %s\n') % path) + continue + try: + d = (get_mtime(r.spath), util.makedate()[1]) + except OSError: + continue + + contact = get_contact(get) + description = get("web", "description", "") + name = get("web", "name", name) + row = dict(contact=contact or "unknown", + contact_sort=contact.upper() or "unknown", + name=name, + name_sort=name, + url=url, + description=description or "unknown", + description_sort=description.upper() or "unknown", + lastchange=d, + lastchange_sort=d[1]-d[0], + archives=archivelist(u, "tip", url)) + yield row + + sortdefault = None, False + def entries(sortcolumn="", descending=False, subdir="", **map): + rows = rawentries(subdir=subdir, **map) + + if sortcolumn and sortdefault != (sortcolumn, descending): + sortkey = '%s_sort' % sortcolumn + rows = sorted(rows, key=lambda x: x[sortkey], + reverse=descending) + for row, parity in zip(rows, paritygen(self.stripecount)): + row['parity'] = parity + yield row + + self.refresh() + sortable = ["name", "description", "contact", "lastchange"] + sortcolumn, descending = sortdefault + if 'sort' in req.form: + sortcolumn = req.form['sort'][0] + descending = sortcolumn.startswith('-') + if descending: + sortcolumn = sortcolumn[1:] + if sortcolumn not in sortable: + sortcolumn = "" + + sort = [("sort_%s" % column, + "%s%s" % ((not descending and column == sortcolumn) + and "-" or "", column)) + for column in sortable] + + self.refresh() + self.updatereqenv(req.env) + + return tmpl("index", entries=entries, subdir=subdir, + sortcolumn=sortcolumn, descending=descending, + **dict(sort)) + + def templater(self, req): + + def header(**map): + yield tmpl('header', encoding=encoding.encoding, **map) + + def footer(**map): + yield tmpl("footer", **map) + + def motd(**map): + if self.motd is not None: + yield self.motd + else: + yield config('web', 'motd', '') + + def config(section, name, default=None, untrusted=True): + return self.ui.config(section, name, default, untrusted) + + self.updatereqenv(req.env) + + url = req.env.get('SCRIPT_NAME', '') + if not url.endswith('/'): + url += '/' + + vars = {} + styles = ( + req.form.get('style', [None])[0], + config('web', 'style'), + 'paper' + ) + style, mapfile = templater.stylemap(styles, self.templatepath) + if style == styles[0]: + vars['style'] = style + + start = url[-1] == '?' and '&' or '?' + sessionvars = webutil.sessionvars(vars, start) + staticurl = config('web', 'staticurl') or url + 'static/' + if not staticurl.endswith('/'): + staticurl += '/' + + tmpl = templater.templater(mapfile, + defaults={"header": header, + "footer": footer, + "motd": motd, + "url": url, + "staticurl": staticurl, + "sessionvars": sessionvars}) + return tmpl + + def updatereqenv(self, env): + def splitnetloc(netloc): + if ':' in netloc: + return netloc.split(':', 1) + else: + return (netloc, None) + + if self._baseurl is not None: + urlcomp = urlparse.urlparse(self._baseurl) + host, port = splitnetloc(urlcomp[1]) + path = urlcomp[2] + env['SERVER_NAME'] = host + if port: + env['SERVER_PORT'] = port + env['SCRIPT_NAME'] = path diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.pyo Binary files differnew file mode 100644 index 0000000..fa80ba1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/hgwebdir_mod.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.py new file mode 100644 index 0000000..c87805f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.py @@ -0,0 +1,75 @@ +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import cStringIO, zlib, sys, urllib +from mercurial import util, wireproto +from common import HTTP_OK + +HGTYPE = 'application/mercurial-0.1' + +class webproto(object): + def __init__(self, req): + self.req = req + self.response = '' + def getargs(self, args): + data = {} + keys = args.split() + for k in keys: + if k == '*': + star = {} + for key in self.req.form.keys(): + if key not in keys: + star[key] = self.req.form[key][0] + data['*'] = star + else: + data[k] = self.req.form[k][0] + return [data[k] for k in keys] + def getfile(self, fp): + length = int(self.req.env['CONTENT_LENGTH']) + for s in util.filechunkiter(self.req, limit=length): + fp.write(s) + def redirect(self): + self.oldio = sys.stdout, sys.stderr + sys.stderr = sys.stdout = cStringIO.StringIO() + def groupchunks(self, cg): + z = zlib.compressobj() + while 1: + chunk = cg.read(4096) + if not chunk: + break + yield z.compress(chunk) + yield z.flush() + def _client(self): + return 'remote:%s:%s:%s' % ( + self.req.env.get('wsgi.url_scheme') or 'http', + urllib.quote(self.req.env.get('REMOTE_HOST', '')), + urllib.quote(self.req.env.get('REMOTE_USER', ''))) + +def iscmd(cmd): + return cmd in wireproto.commands + +def call(repo, req, cmd): + p = webproto(req) + rsp = wireproto.dispatch(repo, p, cmd) + if isinstance(rsp, str): + req.respond(HTTP_OK, HGTYPE, length=len(rsp)) + return [rsp] + elif isinstance(rsp, wireproto.streamres): + req.respond(HTTP_OK, HGTYPE) + return rsp.gen + elif isinstance(rsp, wireproto.pushres): + val = sys.stdout.getvalue() + sys.stdout, sys.stderr = p.oldio + req.respond(HTTP_OK, HGTYPE) + return ['%d\n%s' % (rsp.res, val)] + elif isinstance(rsp, wireproto.pusherr): + # drain the incoming bundle + req.drain() + sys.stdout, sys.stderr = p.oldio + rsp = '0\n%s\n' % rsp.res + req.respond(HTTP_OK, HGTYPE, length=len(rsp)) + return [rsp] diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.pyo Binary files differnew file mode 100644 index 0000000..a747da1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/protocol.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.py new file mode 100644 index 0000000..cb68664 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.py @@ -0,0 +1,147 @@ +# hgweb/request.py - An http request from either CGI or the standalone server. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import socket, cgi, errno +from mercurial import util +from common import ErrorResponse, statusmessage, HTTP_NOT_MODIFIED + +shortcuts = { + 'cl': [('cmd', ['changelog']), ('rev', None)], + 'sl': [('cmd', ['shortlog']), ('rev', None)], + 'cs': [('cmd', ['changeset']), ('node', None)], + 'f': [('cmd', ['file']), ('filenode', None)], + 'fl': [('cmd', ['filelog']), ('filenode', None)], + 'fd': [('cmd', ['filediff']), ('node', None)], + 'fa': [('cmd', ['annotate']), ('filenode', None)], + 'mf': [('cmd', ['manifest']), ('manifest', None)], + 'ca': [('cmd', ['archive']), ('node', None)], + 'tags': [('cmd', ['tags'])], + 'tip': [('cmd', ['changeset']), ('node', ['tip'])], + 'static': [('cmd', ['static']), ('file', None)] +} + +def normalize(form): + # first expand the shortcuts + for k in shortcuts.iterkeys(): + if k in form: + for name, value in shortcuts[k]: + if value is None: + value = form[k] + form[name] = value + del form[k] + # And strip the values + for k, v in form.iteritems(): + form[k] = [i.strip() for i in v] + return form + +class wsgirequest(object): + def __init__(self, wsgienv, start_response): + version = wsgienv['wsgi.version'] + if (version < (1, 0)) or (version >= (2, 0)): + raise RuntimeError("Unknown and unsupported WSGI version %d.%d" + % version) + self.inp = wsgienv['wsgi.input'] + self.err = wsgienv['wsgi.errors'] + self.threaded = wsgienv['wsgi.multithread'] + self.multiprocess = wsgienv['wsgi.multiprocess'] + self.run_once = wsgienv['wsgi.run_once'] + self.env = wsgienv + self.form = normalize(cgi.parse(self.inp, + self.env, + keep_blank_values=1)) + self._start_response = start_response + self.server_write = None + self.headers = [] + + def __iter__(self): + return iter([]) + + def read(self, count=-1): + return self.inp.read(count) + + def drain(self): + '''need to read all data from request, httplib is half-duplex''' + length = int(self.env.get('CONTENT_LENGTH', 0)) + for s in util.filechunkiter(self.inp, limit=length): + pass + + def respond(self, status, type=None, filename=None, length=0): + if self._start_response is not None: + + self.httphdr(type, filename, length) + if not self.headers: + raise RuntimeError("request.write called before headers sent") + + for k, v in self.headers: + if not isinstance(v, str): + raise TypeError('header value must be string: %r' % v) + + if isinstance(status, ErrorResponse): + self.header(status.headers) + if status.code == HTTP_NOT_MODIFIED: + # RFC 2616 Section 10.3.5: 304 Not Modified has cases where + # it MUST NOT include any headers other than these and no + # body + self.headers = [(k, v) for (k, v) in self.headers if + k in ('Date', 'ETag', 'Expires', + 'Cache-Control', 'Vary')] + status = statusmessage(status.code, status.message) + elif status == 200: + status = '200 Script output follows' + elif isinstance(status, int): + status = statusmessage(status) + + self.server_write = self._start_response(status, self.headers) + self._start_response = None + self.headers = [] + + def write(self, thing): + if hasattr(thing, "__iter__"): + for part in thing: + self.write(part) + else: + thing = str(thing) + try: + self.server_write(thing) + except socket.error, inst: + if inst[0] != errno.ECONNRESET: + raise + + def writelines(self, lines): + for line in lines: + self.write(line) + + def flush(self): + return None + + def close(self): + return None + + def header(self, headers=[('Content-Type','text/html')]): + self.headers.extend(headers) + + def httphdr(self, type=None, filename=None, length=0, headers={}): + headers = headers.items() + if type is not None: + headers.append(('Content-Type', type)) + if filename: + filename = (filename.split('/')[-1] + .replace('\\', '\\\\').replace('"', '\\"')) + headers.append(('Content-Disposition', + 'inline; filename="%s"' % filename)) + if length: + headers.append(('Content-Length', str(length))) + self.header(headers) + +def wsgiapplication(app_maker): + '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() + can and should now be used as a WSGI application.''' + application = app_maker() + def run_wsgi(env, respond): + return application(env, respond) + return run_wsgi diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.pyo Binary files differnew file mode 100644 index 0000000..2d9b8b2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/request.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.py new file mode 100644 index 0000000..fa31afa --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.py @@ -0,0 +1,309 @@ +# hgweb/server.py - The standalone hg web server. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback +from mercurial import util, error +from mercurial.i18n import _ + +def _splitURI(uri): + """ Return path and query splited from uri + + Just like CGI environment, the path is unquoted, the query is + not. + """ + if '?' in uri: + path, query = uri.split('?', 1) + else: + path, query = uri, '' + return urllib.unquote(path), query + +class _error_logger(object): + def __init__(self, handler): + self.handler = handler + def flush(self): + pass + def write(self, str): + self.writelines(str.split('\n')) + def writelines(self, seq): + for msg in seq: + self.handler.log_error("HG error: %s", msg) + +class _httprequesthandler(BaseHTTPServer.BaseHTTPRequestHandler): + + url_scheme = 'http' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + """Prepare .socket of new HTTPServer instance""" + pass + + def __init__(self, *args, **kargs): + self.protocol_version = 'HTTP/1.1' + BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs) + + def _log_any(self, fp, format, *args): + fp.write("%s - - [%s] %s\n" % (self.client_address[0], + self.log_date_time_string(), + format % args)) + fp.flush() + + def log_error(self, format, *args): + self._log_any(self.server.errorlog, format, *args) + + def log_message(self, format, *args): + self._log_any(self.server.accesslog, format, *args) + + def do_write(self): + try: + self.do_hgweb() + except socket.error, inst: + if inst[0] != errno.EPIPE: + raise + + def do_POST(self): + try: + self.do_write() + except StandardError: + self._start_response("500 Internal Server Error", []) + self._write("Internal Server Error") + tb = "".join(traceback.format_exception(*sys.exc_info())) + self.log_error("Exception happened during processing " + "request '%s':\n%s", self.path, tb) + + def do_GET(self): + self.do_POST() + + def do_hgweb(self): + path, query = _splitURI(self.path) + + env = {} + env['GATEWAY_INTERFACE'] = 'CGI/1.1' + env['REQUEST_METHOD'] = self.command + env['SERVER_NAME'] = self.server.server_name + env['SERVER_PORT'] = str(self.server.server_port) + env['REQUEST_URI'] = self.path + env['SCRIPT_NAME'] = self.server.prefix + env['PATH_INFO'] = path[len(self.server.prefix):] + env['REMOTE_HOST'] = self.client_address[0] + env['REMOTE_ADDR'] = self.client_address[0] + if query: + env['QUERY_STRING'] = query + + if self.headers.typeheader is None: + env['CONTENT_TYPE'] = self.headers.type + else: + env['CONTENT_TYPE'] = self.headers.typeheader + length = self.headers.getheader('content-length') + if length: + env['CONTENT_LENGTH'] = length + for header in [h for h in self.headers.keys() + if h not in ('content-type', 'content-length')]: + hkey = 'HTTP_' + header.replace('-', '_').upper() + hval = self.headers.getheader(header) + hval = hval.replace('\n', '').strip() + if hval: + env[hkey] = hval + env['SERVER_PROTOCOL'] = self.request_version + env['wsgi.version'] = (1, 0) + env['wsgi.url_scheme'] = self.url_scheme + env['wsgi.input'] = self.rfile + env['wsgi.errors'] = _error_logger(self) + env['wsgi.multithread'] = isinstance(self.server, + SocketServer.ThreadingMixIn) + env['wsgi.multiprocess'] = isinstance(self.server, + SocketServer.ForkingMixIn) + env['wsgi.run_once'] = 0 + + self.close_connection = True + self.saved_status = None + self.saved_headers = [] + self.sent_headers = False + self.length = None + for chunk in self.server.application(env, self._start_response): + self._write(chunk) + + def send_headers(self): + if not self.saved_status: + raise AssertionError("Sending headers before " + "start_response() called") + saved_status = self.saved_status.split(None, 1) + saved_status[0] = int(saved_status[0]) + self.send_response(*saved_status) + should_close = True + for h in self.saved_headers: + self.send_header(*h) + if h[0].lower() == 'content-length': + should_close = False + self.length = int(h[1]) + # The value of the Connection header is a list of case-insensitive + # tokens separated by commas and optional whitespace. + if 'close' in [token.strip().lower() for token in + self.headers.get('connection', '').split(',')]: + should_close = True + if should_close: + self.send_header('Connection', 'close') + self.close_connection = should_close + self.end_headers() + self.sent_headers = True + + def _start_response(self, http_status, headers, exc_info=None): + code, msg = http_status.split(None, 1) + code = int(code) + self.saved_status = http_status + bad_headers = ('connection', 'transfer-encoding') + self.saved_headers = [h for h in headers + if h[0].lower() not in bad_headers] + return self._write + + def _write(self, data): + if not self.saved_status: + raise AssertionError("data written before start_response() called") + elif not self.sent_headers: + self.send_headers() + if self.length is not None: + if len(data) > self.length: + raise AssertionError("Content-length header sent, but more " + "bytes than specified are being written.") + self.length = self.length - len(data) + self.wfile.write(data) + self.wfile.flush() + +class _httprequesthandleropenssl(_httprequesthandler): + """HTTPS handler based on pyOpenSSL""" + + url_scheme = 'https' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + try: + import OpenSSL + OpenSSL.SSL.Context + except ImportError: + raise util.Abort(_("SSL support is unavailable")) + ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD) + ctx.use_privatekey_file(ssl_cert) + ctx.use_certificate_file(ssl_cert) + sock = socket.socket(httpserver.address_family, httpserver.socket_type) + httpserver.socket = OpenSSL.SSL.Connection(ctx, sock) + httpserver.server_bind() + httpserver.server_activate() + + def setup(self): + self.connection = self.request + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) + + def do_write(self): + import OpenSSL + try: + _httprequesthandler.do_write(self) + except OpenSSL.SSL.SysCallError, inst: + if inst.args[0] != errno.EPIPE: + raise + + def handle_one_request(self): + import OpenSSL + try: + _httprequesthandler.handle_one_request(self) + except (OpenSSL.SSL.SysCallError, OpenSSL.SSL.ZeroReturnError): + self.close_connection = True + pass + +class _httprequesthandlerssl(_httprequesthandler): + """HTTPS handler based on Pythons ssl module (introduced in 2.6)""" + + url_scheme = 'https' + + @staticmethod + def preparehttpserver(httpserver, ssl_cert): + try: + import ssl + ssl.wrap_socket + except ImportError: + raise util.Abort(_("SSL support is unavailable")) + httpserver.socket = ssl.wrap_socket(httpserver.socket, server_side=True, + certfile=ssl_cert, ssl_version=ssl.PROTOCOL_SSLv23) + + def setup(self): + self.connection = self.request + self.rfile = socket._fileobject(self.request, "rb", self.rbufsize) + self.wfile = socket._fileobject(self.request, "wb", self.wbufsize) + +try: + from threading import activeCount + _mixin = SocketServer.ThreadingMixIn +except ImportError: + if hasattr(os, "fork"): + _mixin = SocketServer.ForkingMixIn + else: + class _mixin: + pass + +def openlog(opt, default): + if opt and opt != '-': + return open(opt, 'a') + return default + +class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer): + + # SO_REUSEADDR has broken semantics on windows + if os.name == 'nt': + allow_reuse_address = 0 + + def __init__(self, ui, app, addr, handler, **kwargs): + BaseHTTPServer.HTTPServer.__init__(self, addr, handler, **kwargs) + self.daemon_threads = True + self.application = app + + handler.preparehttpserver(self, ui.config('web', 'certificate')) + + prefix = ui.config('web', 'prefix', '') + if prefix: + prefix = '/' + prefix.strip('/') + self.prefix = prefix + + alog = openlog(ui.config('web', 'accesslog', '-'), sys.stdout) + elog = openlog(ui.config('web', 'errorlog', '-'), sys.stderr) + self.accesslog = alog + self.errorlog = elog + + self.addr, self.port = self.socket.getsockname()[0:2] + self.fqaddr = socket.getfqdn(addr[0]) + +class IPv6HTTPServer(MercurialHTTPServer): + address_family = getattr(socket, 'AF_INET6', None) + def __init__(self, *args, **kwargs): + if self.address_family is None: + raise error.RepoError(_('IPv6 is not available on this system')) + super(IPv6HTTPServer, self).__init__(*args, **kwargs) + +def create_server(ui, app): + + if ui.config('web', 'certificate'): + if sys.version_info >= (2, 6): + handler = _httprequesthandlerssl + else: + handler = _httprequesthandleropenssl + else: + handler = _httprequesthandler + + if ui.configbool('web', 'ipv6'): + cls = IPv6HTTPServer + else: + cls = MercurialHTTPServer + + # ugly hack due to python issue5853 (for threaded use) + import mimetypes; mimetypes.init() + + address = ui.config('web', 'address', '') + port = util.getport(ui.config('web', 'port', 8000)) + try: + return cls(ui, app, (address, port), handler) + except socket.error, inst: + raise util.Abort(_("cannot start server at '%s:%d': %s") + % (address, port, inst.args[1])) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.pyo Binary files differnew file mode 100644 index 0000000..c90e5de --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/server.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.py new file mode 100644 index 0000000..e290da6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.py @@ -0,0 +1,783 @@ +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, mimetypes, re, cgi, copy +import webutil +from mercurial import error, encoding, archival, templater, templatefilters +from mercurial.node import short, hex +from mercurial.util import binary +from common import paritygen, staticfile, get_contact, ErrorResponse +from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND +from mercurial import graphmod +from mercurial import help as helpmod +from mercurial.i18n import _ + +# __all__ is populated with the allowed commands. Be sure to add to it if +# you're adding a new command, or the new command won't work. + +__all__ = [ + 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev', + 'manifest', 'tags', 'branches', 'summary', 'filediff', 'diff', 'annotate', + 'filelog', 'archive', 'static', 'graph', 'help', +] + +def log(web, req, tmpl): + if 'file' in req.form and req.form['file'][0]: + return filelog(web, req, tmpl) + else: + return changelog(web, req, tmpl) + +def rawfile(web, req, tmpl): + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + if not path: + content = manifest(web, req, tmpl) + req.respond(HTTP_OK, web.ctype) + return content + + try: + fctx = webutil.filectx(web.repo, req) + except error.LookupError, inst: + try: + content = manifest(web, req, tmpl) + req.respond(HTTP_OK, web.ctype) + return content + except ErrorResponse: + raise inst + + path = fctx.path() + text = fctx.data() + mt = mimetypes.guess_type(path)[0] + if mt is None: + mt = binary(text) and 'application/octet-stream' or 'text/plain' + if mt.startswith('text/'): + mt += '; charset="%s"' % encoding.encoding + + req.respond(HTTP_OK, mt, path, len(text)) + return [text] + +def _filerevision(web, tmpl, fctx): + f = fctx.path() + text = fctx.data() + parity = paritygen(web.stripecount) + + if binary(text): + mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' + text = '(binary:%s)' % mt + + def lines(): + for lineno, t in enumerate(text.splitlines(True)): + yield {"line": t, + "lineid": "l%d" % (lineno + 1), + "linenumber": "% 6d" % (lineno + 1), + "parity": parity.next()} + + return tmpl("filerevision", + file=f, + path=webutil.up(f), + text=lines(), + rev=fctx.rev(), + node=hex(fctx.node()), + author=fctx.user(), + date=fctx.date(), + desc=fctx.description(), + branch=webutil.nodebranchnodefault(fctx), + parent=webutil.parents(fctx), + child=webutil.children(fctx), + rename=webutil.renamelink(fctx), + permissions=fctx.manifest().flags(f)) + +def file(web, req, tmpl): + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + if not path: + return manifest(web, req, tmpl) + try: + return _filerevision(web, tmpl, webutil.filectx(web.repo, req)) + except error.LookupError, inst: + try: + return manifest(web, req, tmpl) + except ErrorResponse: + raise inst + +def _search(web, req, tmpl): + + query = req.form['rev'][0] + revcount = web.maxchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = revcount / 2 + lessvars['rev'] = query + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + morevars['rev'] = query + + def changelist(**map): + count = 0 + qw = query.lower().split() + + def revgen(): + for i in xrange(len(web.repo) - 1, 0, -100): + l = [] + for j in xrange(max(0, i - 100), i + 1): + ctx = web.repo[j] + l.append(ctx) + l.reverse() + for e in l: + yield e + + for ctx in revgen(): + miss = 0 + for q in qw: + if not (q in ctx.user().lower() or + q in ctx.description().lower() or + q in " ".join(ctx.files()).lower()): + miss = 1 + break + if miss: + continue + + count += 1 + n = ctx.node() + showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) + files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) + + yield tmpl('searchentry', + parity=parity.next(), + author=ctx.user(), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + changelogtag=showtags, + desc=ctx.description(), + date=ctx.date(), + files=files, + rev=ctx.rev(), + node=hex(n), + tags=webutil.nodetagsdict(web.repo, n), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + + if count >= revcount: + break + + tip = web.repo['tip'] + parity = paritygen(web.stripecount) + + return tmpl('search', query=query, node=tip.hex(), + entries=changelist, archives=web.archivelist("tip"), + morevars=morevars, lessvars=lessvars) + +def changelog(web, req, tmpl, shortlog=False): + + if 'node' in req.form: + ctx = webutil.changectx(web.repo, req) + else: + if 'rev' in req.form: + hi = req.form['rev'][0] + else: + hi = len(web.repo) - 1 + try: + ctx = web.repo[hi] + except error.RepoError: + return _search(web, req, tmpl) # XXX redirect to 404 page? + + def changelist(limit=0, **map): + l = [] # build a list in forward order for efficiency + for i in xrange(start, end): + ctx = web.repo[i] + n = ctx.node() + showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) + files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) + + l.insert(0, {"parity": parity.next(), + "author": ctx.user(), + "parent": webutil.parents(ctx, i - 1), + "child": webutil.children(ctx, i + 1), + "changelogtag": showtags, + "desc": ctx.description(), + "date": ctx.date(), + "files": files, + "rev": i, + "node": hex(n), + "tags": webutil.nodetagsdict(web.repo, n), + "inbranch": webutil.nodeinbranch(web.repo, ctx), + "branches": webutil.nodebranchdict(web.repo, ctx) + }) + + if limit > 0: + l = l[:limit] + + for e in l: + yield e + + revcount = shortlog and web.maxshortchanges or web.maxchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = revcount / 2 + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + count = len(web.repo) + pos = ctx.rev() + start = max(0, pos - revcount + 1) + end = min(count, start + revcount) + pos = end - 1 + parity = paritygen(web.stripecount, offset=start - end) + + changenav = webutil.revnavgen(pos, revcount, count, web.repo.changectx) + + return tmpl(shortlog and 'shortlog' or 'changelog', changenav=changenav, + node=hex(ctx.node()), rev=pos, changesets=count, + entries=lambda **x: changelist(limit=0,**x), + latestentry=lambda **x: changelist(limit=1,**x), + archives=web.archivelist("tip"), revcount=revcount, + morevars=morevars, lessvars=lessvars) + +def shortlog(web, req, tmpl): + return changelog(web, req, tmpl, shortlog = True) + +def changeset(web, req, tmpl): + ctx = webutil.changectx(web.repo, req) + showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node()) + showbranch = webutil.nodebranchnodefault(ctx) + + files = [] + parity = paritygen(web.stripecount) + for f in ctx.files(): + template = f in ctx and 'filenodelink' or 'filenolink' + files.append(tmpl(template, + node=ctx.hex(), file=f, + parity=parity.next())) + + parity = paritygen(web.stripecount) + style = web.config('web', 'style', 'paper') + if 'style' in req.form: + style = req.form['style'][0] + + diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity, style) + return tmpl('changeset', + diff=diffs, + rev=ctx.rev(), + node=ctx.hex(), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + changesettag=showtags, + changesetbranch=showbranch, + author=ctx.user(), + desc=ctx.description(), + date=ctx.date(), + files=files, + archives=web.archivelist(ctx.hex()), + tags=webutil.nodetagsdict(web.repo, ctx.node()), + branch=webutil.nodebranchnodefault(ctx), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + +rev = changeset + +def manifest(web, req, tmpl): + ctx = webutil.changectx(web.repo, req) + path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0]) + mf = ctx.manifest() + node = ctx.node() + + files = {} + dirs = {} + parity = paritygen(web.stripecount) + + if path and path[-1] != "/": + path += "/" + l = len(path) + abspath = "/" + path + + for f, n in mf.iteritems(): + if f[:l] != path: + continue + remain = f[l:] + elements = remain.split('/') + if len(elements) == 1: + files[remain] = f + else: + h = dirs # need to retain ref to dirs (root) + for elem in elements[0:-1]: + if elem not in h: + h[elem] = {} + h = h[elem] + if len(h) > 1: + break + h[None] = None # denotes files present + + if mf and not files and not dirs: + raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) + + def filelist(**map): + for f in sorted(files): + full = files[f] + + fctx = ctx.filectx(full) + yield {"file": full, + "parity": parity.next(), + "basename": f, + "date": fctx.date(), + "size": fctx.size(), + "permissions": mf.flags(full)} + + def dirlist(**map): + for d in sorted(dirs): + + emptydirs = [] + h = dirs[d] + while isinstance(h, dict) and len(h) == 1: + k, v = h.items()[0] + if v: + emptydirs.append(k) + h = v + + path = "%s%s" % (abspath, d) + yield {"parity": parity.next(), + "path": path, + "emptydirs": "/".join(emptydirs), + "basename": d} + + return tmpl("manifest", + rev=ctx.rev(), + node=hex(node), + path=abspath, + up=webutil.up(abspath), + upparity=parity.next(), + fentries=filelist, + dentries=dirlist, + archives=web.archivelist(hex(node)), + tags=webutil.nodetagsdict(web.repo, node), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx)) + +def tags(web, req, tmpl): + i = web.repo.tagslist() + i.reverse() + parity = paritygen(web.stripecount) + + def entries(notip=False, limit=0, **map): + count = 0 + for k, n in i: + if notip and k == "tip": + continue + if limit > 0 and count >= limit: + continue + count = count + 1 + yield {"parity": parity.next(), + "tag": k, + "date": web.repo[n].date(), + "node": hex(n)} + + return tmpl("tags", + node=hex(web.repo.changelog.tip()), + entries=lambda **x: entries(False, 0, **x), + entriesnotip=lambda **x: entries(True, 0, **x), + latestentry=lambda **x: entries(True, 1, **x)) + +def branches(web, req, tmpl): + tips = (web.repo[n] for t, n in web.repo.branchtags().iteritems()) + heads = web.repo.heads() + parity = paritygen(web.stripecount) + sortkey = lambda ctx: ('close' not in ctx.extra(), ctx.rev()) + + def entries(limit, **map): + count = 0 + for ctx in sorted(tips, key=sortkey, reverse=True): + if limit > 0 and count >= limit: + return + count += 1 + if ctx.node() not in heads: + status = 'inactive' + elif not web.repo.branchheads(ctx.branch()): + status = 'closed' + else: + status = 'open' + yield {'parity': parity.next(), + 'branch': ctx.branch(), + 'status': status, + 'node': ctx.hex(), + 'date': ctx.date()} + + return tmpl('branches', node=hex(web.repo.changelog.tip()), + entries=lambda **x: entries(0, **x), + latestentry=lambda **x: entries(1, **x)) + +def summary(web, req, tmpl): + i = web.repo.tagslist() + i.reverse() + + def tagentries(**map): + parity = paritygen(web.stripecount) + count = 0 + for k, n in i: + if k == "tip": # skip tip + continue + + count += 1 + if count > 10: # limit to 10 tags + break + + yield tmpl("tagentry", + parity=parity.next(), + tag=k, + node=hex(n), + date=web.repo[n].date()) + + def branches(**map): + parity = paritygen(web.stripecount) + + b = web.repo.branchtags() + l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()] + for r, n, t in sorted(l): + yield {'parity': parity.next(), + 'branch': t, + 'node': hex(n), + 'date': web.repo[n].date()} + + def changelist(**map): + parity = paritygen(web.stripecount, offset=start - end) + l = [] # build a list in forward order for efficiency + for i in xrange(start, end): + ctx = web.repo[i] + n = ctx.node() + hn = hex(n) + + l.insert(0, tmpl( + 'shortlogentry', + parity=parity.next(), + author=ctx.user(), + desc=ctx.description(), + date=ctx.date(), + rev=i, + node=hn, + tags=webutil.nodetagsdict(web.repo, n), + inbranch=webutil.nodeinbranch(web.repo, ctx), + branches=webutil.nodebranchdict(web.repo, ctx))) + + yield l + + tip = web.repo['tip'] + count = len(web.repo) + start = max(0, count - web.maxchanges) + end = min(count, start + web.maxchanges) + + return tmpl("summary", + desc=web.config("web", "description", "unknown"), + owner=get_contact(web.config) or "unknown", + lastchange=tip.date(), + tags=tagentries, + branches=branches, + shortlog=changelist, + node=tip.hex(), + archives=web.archivelist("tip")) + +def filediff(web, req, tmpl): + fctx, ctx = None, None + try: + fctx = webutil.filectx(web.repo, req) + except LookupError: + ctx = webutil.changectx(web.repo, req) + path = webutil.cleanpath(web.repo, req.form['file'][0]) + if path not in ctx.files(): + raise + + if fctx is not None: + n = fctx.node() + path = fctx.path() + else: + n = ctx.node() + # path already defined in except clause + + parity = paritygen(web.stripecount) + style = web.config('web', 'style', 'paper') + if 'style' in req.form: + style = req.form['style'][0] + + diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity, style) + rename = fctx and webutil.renamelink(fctx) or [] + ctx = fctx and fctx or ctx + return tmpl("filediff", + file=path, + node=hex(n), + rev=ctx.rev(), + date=ctx.date(), + desc=ctx.description(), + author=ctx.user(), + rename=rename, + branch=webutil.nodebranchnodefault(ctx), + parent=webutil.parents(ctx), + child=webutil.children(ctx), + diff=diffs) + +diff = filediff + +def annotate(web, req, tmpl): + fctx = webutil.filectx(web.repo, req) + f = fctx.path() + parity = paritygen(web.stripecount) + + def annotate(**map): + last = None + if binary(fctx.data()): + mt = (mimetypes.guess_type(fctx.path())[0] + or 'application/octet-stream') + lines = enumerate([((fctx.filectx(fctx.filerev()), 1), + '(binary:%s)' % mt)]) + else: + lines = enumerate(fctx.annotate(follow=True, linenumber=True)) + for lineno, ((f, targetline), l) in lines: + fnode = f.filenode() + + if last != fnode: + last = fnode + + yield {"parity": parity.next(), + "node": hex(f.node()), + "rev": f.rev(), + "author": f.user(), + "desc": f.description(), + "file": f.path(), + "targetline": targetline, + "line": l, + "lineid": "l%d" % (lineno + 1), + "linenumber": "% 6d" % (lineno + 1)} + + return tmpl("fileannotate", + file=f, + annotate=annotate, + path=webutil.up(f), + rev=fctx.rev(), + node=hex(fctx.node()), + author=fctx.user(), + date=fctx.date(), + desc=fctx.description(), + rename=webutil.renamelink(fctx), + branch=webutil.nodebranchnodefault(fctx), + parent=webutil.parents(fctx), + child=webutil.children(fctx), + permissions=fctx.manifest().flags(f)) + +def filelog(web, req, tmpl): + + try: + fctx = webutil.filectx(web.repo, req) + f = fctx.path() + fl = fctx.filelog() + except error.LookupError: + f = webutil.cleanpath(web.repo, req.form['file'][0]) + fl = web.repo.file(f) + numrevs = len(fl) + if not numrevs: # file doesn't exist at all + raise + rev = webutil.changectx(web.repo, req).rev() + first = fl.linkrev(0) + if rev < first: # current rev is from before file existed + raise + frev = numrevs - 1 + while fl.linkrev(frev) > rev: + frev -= 1 + fctx = web.repo.filectx(f, fl.linkrev(frev)) + + revcount = web.maxshortchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = revcount / 2 + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + count = fctx.filerev() + 1 + start = max(0, fctx.filerev() - revcount + 1) # first rev on this page + end = min(count, start + revcount) # last rev on this page + parity = paritygen(web.stripecount, offset=start - end) + + def entries(limit=0, **map): + l = [] + + repo = web.repo + for i in xrange(start, end): + iterfctx = fctx.filectx(i) + + l.insert(0, {"parity": parity.next(), + "filerev": i, + "file": f, + "node": hex(iterfctx.node()), + "author": iterfctx.user(), + "date": iterfctx.date(), + "rename": webutil.renamelink(iterfctx), + "parent": webutil.parents(iterfctx), + "child": webutil.children(iterfctx), + "desc": iterfctx.description(), + "tags": webutil.nodetagsdict(repo, iterfctx.node()), + "branch": webutil.nodebranchnodefault(iterfctx), + "inbranch": webutil.nodeinbranch(repo, iterfctx), + "branches": webutil.nodebranchdict(repo, iterfctx)}) + + if limit > 0: + l = l[:limit] + + for e in l: + yield e + + nodefunc = lambda x: fctx.filectx(fileid=x) + nav = webutil.revnavgen(end - 1, revcount, count, nodefunc) + return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, + entries=lambda **x: entries(limit=0, **x), + latestentry=lambda **x: entries(limit=1, **x), + revcount=revcount, morevars=morevars, lessvars=lessvars) + +def archive(web, req, tmpl): + type_ = req.form.get('type', [None])[0] + allowed = web.configlist("web", "allow_archive") + key = req.form['node'][0] + + if type_ not in web.archives: + msg = 'Unsupported archive type: %s' % type_ + raise ErrorResponse(HTTP_NOT_FOUND, msg) + + if not ((type_ in allowed or + web.configbool("web", "allow" + type_, False))): + msg = 'Archive type not allowed: %s' % type_ + raise ErrorResponse(HTTP_FORBIDDEN, msg) + + reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame)) + cnode = web.repo.lookup(key) + arch_version = key + if cnode == key or key == 'tip': + arch_version = short(cnode) + name = "%s-%s" % (reponame, arch_version) + mimetype, artype, extension, encoding = web.archive_specs[type_] + headers = [ + ('Content-Type', mimetype), + ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) + ] + if encoding: + headers.append(('Content-Encoding', encoding)) + req.header(headers) + req.respond(HTTP_OK) + archival.archive(web.repo, req, cnode, artype, prefix=name) + return [] + + +def static(web, req, tmpl): + fname = req.form['file'][0] + # a repo owner may set web.static in .hg/hgrc to get any file + # readable by the user running the CGI script + static = web.config("web", "static", None, untrusted=False) + if not static: + tp = web.templatepath or templater.templatepath() + if isinstance(tp, str): + tp = [tp] + static = [os.path.join(p, 'static') for p in tp] + return [staticfile(static, fname, req)] + +def graph(web, req, tmpl): + + rev = webutil.changectx(web.repo, req).rev() + bg_height = 39 + revcount = web.maxshortchanges + if 'revcount' in req.form: + revcount = int(req.form.get('revcount', [revcount])[0]) + tmpl.defaults['sessionvars']['revcount'] = revcount + + lessvars = copy.copy(tmpl.defaults['sessionvars']) + lessvars['revcount'] = revcount / 2 + morevars = copy.copy(tmpl.defaults['sessionvars']) + morevars['revcount'] = revcount * 2 + + max_rev = len(web.repo) - 1 + revcount = min(max_rev, revcount) + revnode = web.repo.changelog.node(rev) + revnode_hex = hex(revnode) + uprev = min(max_rev, rev + revcount) + downrev = max(0, rev - revcount) + count = len(web.repo) + changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx) + + dag = graphmod.revisions(web.repo, rev, downrev) + tree = list(graphmod.colored(dag)) + canvasheight = (len(tree) + 1) * bg_height - 27 + data = [] + for (id, type, ctx, vtx, edges) in tree: + if type != graphmod.CHANGESET: + continue + node = short(ctx.node()) + age = templatefilters.age(ctx.date()) + desc = templatefilters.firstline(ctx.description()) + desc = cgi.escape(templatefilters.nonempty(desc)) + user = cgi.escape(templatefilters.person(ctx.user())) + branch = ctx.branch() + branch = branch, web.repo.branchtags().get(branch) == ctx.node() + data.append((node, vtx, edges, desc, user, age, branch, ctx.tags())) + + return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev, + lessvars=lessvars, morevars=morevars, downrev=downrev, + canvasheight=canvasheight, jsdata=data, bg_height=bg_height, + node=revnode_hex, changenav=changenav) + +def _getdoc(e): + doc = e[0].__doc__ + if doc: + doc = doc.split('\n')[0] + else: + doc = _('(no help text available)') + return doc + +def help(web, req, tmpl): + from mercurial import commands # avoid cycle + + topicname = req.form.get('node', [None])[0] + if not topicname: + topic = [] + + def topics(**map): + for entries, summary, _ in helpmod.helptable: + entries = sorted(entries, key=len) + yield {'topic': entries[-1], 'summary': summary} + + early, other = [], [] + primary = lambda s: s.split('|')[0] + for c, e in commands.table.iteritems(): + doc = _getdoc(e) + if 'DEPRECATED' in doc or c.startswith('debug'): + continue + cmd = primary(c) + if cmd.startswith('^'): + early.append((cmd[1:], doc)) + else: + other.append((cmd, doc)) + + early.sort() + other.sort() + + def earlycommands(**map): + for c, doc in early: + yield {'topic': c, 'summary': doc} + + def othercommands(**map): + for c, doc in other: + yield {'topic': c, 'summary': doc} + + return tmpl('helptopics', topics=topics, earlycommands=earlycommands, + othercommands=othercommands, title='Index') + + u = webutil.wsgiui() + u.pushbuffer() + try: + commands.help_(u, topicname) + except error.UnknownCommand: + raise ErrorResponse(HTTP_NOT_FOUND) + doc = u.popbuffer() + return tmpl('help', topic=topicname, doc=doc) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.pyo Binary files differnew file mode 100644 index 0000000..0ae06ef --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webcommands.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.py new file mode 100644 index 0000000..5dbff02 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.py @@ -0,0 +1,226 @@ +# hgweb/webutil.py - utility library for the web interface. +# +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, copy +from mercurial import match, patch, util, error, ui +from mercurial.node import hex, nullid + +def up(p): + if p[0] != "/": + p = "/" + p + if p[-1] == "/": + p = p[:-1] + up = os.path.dirname(p) + if up == "/": + return "/" + return up + "/" + +def revnavgen(pos, pagelen, limit, nodefunc): + def seq(factor, limit=None): + if limit: + yield limit + if limit >= 20 and limit <= 40: + yield 50 + else: + yield 1 * factor + yield 3 * factor + for f in seq(factor * 10): + yield f + + navbefore = [] + navafter = [] + + last = 0 + for f in seq(1, pagelen): + if f < pagelen or f <= last: + continue + if f > limit: + break + last = f + if pos + f < limit: + navafter.append(("+%d" % f, hex(nodefunc(pos + f).node()))) + if pos - f >= 0: + navbefore.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) + + navafter.append(("tip", "tip")) + try: + navbefore.insert(0, ("(0)", hex(nodefunc('0').node()))) + except error.RepoError: + pass + + def gen(l): + def f(**map): + for label, node in l: + yield {"label": label, "node": node} + return f + + return (dict(before=gen(navbefore), after=gen(navafter)),) + +def _siblings(siblings=[], hiderev=None): + siblings = [s for s in siblings if s.node() != nullid] + if len(siblings) == 1 and siblings[0].rev() == hiderev: + return + for s in siblings: + d = {'node': hex(s.node()), 'rev': s.rev()} + d['user'] = s.user() + d['date'] = s.date() + d['description'] = s.description() + d['branch'] = s.branch() + if hasattr(s, 'path'): + d['file'] = s.path() + yield d + +def parents(ctx, hide=None): + return _siblings(ctx.parents(), hide) + +def children(ctx, hide=None): + return _siblings(ctx.children(), hide) + +def renamelink(fctx): + r = fctx.renamed() + if r: + return [dict(file=r[0], node=hex(r[1]))] + return [] + +def nodetagsdict(repo, node): + return [{"name": i} for i in repo.nodetags(node)] + +def nodebranchdict(repo, ctx): + branches = [] + branch = ctx.branch() + # If this is an empty repo, ctx.node() == nullid, + # ctx.branch() == 'default', but branchtags() is + # an empty dict. Using dict.get avoids a traceback. + if repo.branchtags().get(branch) == ctx.node(): + branches.append({"name": branch}) + return branches + +def nodeinbranch(repo, ctx): + branches = [] + branch = ctx.branch() + if branch != 'default' and repo.branchtags().get(branch) != ctx.node(): + branches.append({"name": branch}) + return branches + +def nodebranchnodefault(ctx): + branches = [] + branch = ctx.branch() + if branch != 'default': + branches.append({"name": branch}) + return branches + +def showtag(repo, tmpl, t1, node=nullid, **args): + for t in repo.nodetags(node): + yield tmpl(t1, tag=t, **args) + +def cleanpath(repo, path): + path = path.lstrip('/') + return util.canonpath(repo.root, '', path) + +def changectx(repo, req): + changeid = "tip" + if 'node' in req.form: + changeid = req.form['node'][0] + elif 'manifest' in req.form: + changeid = req.form['manifest'][0] + + try: + ctx = repo[changeid] + except error.RepoError: + man = repo.manifest + ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))] + + return ctx + +def filectx(repo, req): + path = cleanpath(repo, req.form['file'][0]) + if 'node' in req.form: + changeid = req.form['node'][0] + else: + changeid = req.form['filenode'][0] + try: + fctx = repo[changeid][path] + except error.RepoError: + fctx = repo.filectx(path, fileid=changeid) + + return fctx + +def listfilediffs(tmpl, files, node, max): + for f in files[:max]: + yield tmpl('filedifflink', node=hex(node), file=f) + if len(files) > max: + yield tmpl('fileellipses') + +def diffs(repo, tmpl, ctx, files, parity, style): + + def countgen(): + start = 1 + while True: + yield start + start += 1 + + blockcount = countgen() + def prettyprintlines(diff): + blockno = blockcount.next() + for lineno, l in enumerate(diff.splitlines(True)): + lineno = "%d.%d" % (blockno, lineno + 1) + if l.startswith('+'): + ltype = "difflineplus" + elif l.startswith('-'): + ltype = "difflineminus" + elif l.startswith('@'): + ltype = "difflineat" + else: + ltype = "diffline" + yield tmpl(ltype, + line=l, + lineid="l%s" % lineno, + linenumber="% 8s" % lineno) + + if files: + m = match.exact(repo.root, repo.getcwd(), files) + else: + m = match.always(repo.root, repo.getcwd()) + + diffopts = patch.diffopts(repo.ui, untrusted=True) + parents = ctx.parents() + node1 = parents and parents[0].node() or nullid + node2 = ctx.node() + + block = [] + for chunk in patch.diff(repo, node1, node2, m, opts=diffopts): + if chunk.startswith('diff') and block: + yield tmpl('diffblock', parity=parity.next(), + lines=prettyprintlines(''.join(block))) + block = [] + if chunk.startswith('diff') and style != 'raw': + chunk = ''.join(chunk.splitlines(True)[1:]) + block.append(chunk) + yield tmpl('diffblock', parity=parity.next(), + lines=prettyprintlines(''.join(block))) + +class sessionvars(object): + def __init__(self, vars, start='?'): + self.start = start + self.vars = vars + def __getitem__(self, key): + return self.vars[key] + def __setitem__(self, key, value): + self.vars[key] = value + def __copy__(self): + return sessionvars(copy.copy(self.vars), self.start) + def __iter__(self): + separator = self.start + for key, value in self.vars.iteritems(): + yield {'name': key, 'value': str(value), 'separator': separator} + separator = '&' + +class wsgiui(ui.ui): + # default termwidth breaks under mod_wsgi + def termwidth(self): + return 80 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.pyo Binary files differnew file mode 100644 index 0000000..5a9a36f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/webutil.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.py new file mode 100644 index 0000000..8dc5060 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.py @@ -0,0 +1,77 @@ +# hgweb/wsgicgi.py - CGI->WSGI translator +# +# Copyright 2006 Eric Hopper <hopper@omnifarious.org> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +# +# This was originally copied from the public domain code at +# http://www.python.org/dev/peps/pep-0333/#the-server-gateway-side + +import os, sys +from mercurial import util + +def launch(application): + util.set_binary(sys.stdin) + util.set_binary(sys.stdout) + + environ = dict(os.environ.iteritems()) + environ.setdefault('PATH_INFO', '') + if environ.get('SERVER_SOFTWARE', '').startswith('Microsoft-IIS'): + # IIS includes script_name in path_info + scriptname = environ['SCRIPT_NAME'] + if environ['PATH_INFO'].startswith(scriptname): + environ['PATH_INFO'] = environ['PATH_INFO'][len(scriptname):] + + environ['wsgi.input'] = sys.stdin + environ['wsgi.errors'] = sys.stderr + environ['wsgi.version'] = (1, 0) + environ['wsgi.multithread'] = False + environ['wsgi.multiprocess'] = True + environ['wsgi.run_once'] = True + + if environ.get('HTTPS', 'off').lower() in ('on', '1', 'yes'): + environ['wsgi.url_scheme'] = 'https' + else: + environ['wsgi.url_scheme'] = 'http' + + headers_set = [] + headers_sent = [] + out = sys.stdout + + def write(data): + if not headers_set: + raise AssertionError("write() before start_response()") + + elif not headers_sent: + # Before the first output, send the stored headers + status, response_headers = headers_sent[:] = headers_set + out.write('Status: %s\r\n' % status) + for header in response_headers: + out.write('%s: %s\r\n' % header) + out.write('\r\n') + + out.write(data) + out.flush() + + def start_response(status, response_headers, exc_info=None): + if exc_info: + try: + if headers_sent: + # Re-raise original exception if headers sent + raise exc_info[0](exc_info[1], exc_info[2]) + finally: + exc_info = None # avoid dangling circular ref + elif headers_set: + raise AssertionError("Headers already set!") + + headers_set[:] = [status, response_headers] + return write + + content = application(environ, start_response) + try: + for chunk in content: + write(chunk) + finally: + if hasattr(content, 'close'): + content.close() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.pyo Binary files differnew file mode 100644 index 0000000..0b669d9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hgweb/wsgicgi.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.py new file mode 100644 index 0000000..6f0bcdb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.py @@ -0,0 +1,153 @@ +# hook.py - hook support for mercurial +# +# Copyright 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, sys +import extensions, util + +def _pythonhook(ui, repo, name, hname, funcname, args, throw): + '''call python hook. hook is callable object, looked up as + name in python module. if callable returns "true", hook + fails, else passes. if hook raises exception, treated as + hook failure. exception propagates if throw is "true". + + reason for "true" meaning "hook failed" is so that + unmodified commands (e.g. mercurial.commands.update) can + be run as hooks without wrappers to convert return values.''' + + ui.note(_("calling hook %s: %s\n") % (hname, funcname)) + obj = funcname + if not hasattr(obj, '__call__'): + d = funcname.rfind('.') + if d == -1: + raise util.Abort(_('%s hook is invalid ("%s" not in ' + 'a module)') % (hname, funcname)) + modname = funcname[:d] + oldpaths = sys.path + if hasattr(sys, "frozen"): + # binary installs require sys.path manipulation + modpath, modfile = os.path.split(modname) + if modpath and modfile: + sys.path = sys.path[:] + [modpath] + modname = modfile + try: + obj = __import__(modname) + except ImportError: + e1 = sys.exc_type, sys.exc_value, sys.exc_traceback + try: + # extensions are loaded with hgext_ prefix + obj = __import__("hgext_%s" % modname) + except ImportError: + e2 = sys.exc_type, sys.exc_value, sys.exc_traceback + if ui.tracebackflag: + ui.warn(_('exception from first failed import attempt:\n')) + ui.traceback(e1) + if ui.tracebackflag: + ui.warn(_('exception from second failed import attempt:\n')) + ui.traceback(e2) + raise util.Abort(_('%s hook is invalid ' + '(import of "%s" failed)') % + (hname, modname)) + sys.path = oldpaths + try: + for p in funcname.split('.')[1:]: + obj = getattr(obj, p) + except AttributeError: + raise util.Abort(_('%s hook is invalid ' + '("%s" is not defined)') % + (hname, funcname)) + if not hasattr(obj, '__call__'): + raise util.Abort(_('%s hook is invalid ' + '("%s" is not callable)') % + (hname, funcname)) + try: + r = obj(ui=ui, repo=repo, hooktype=name, **args) + except KeyboardInterrupt: + raise + except Exception, exc: + if isinstance(exc, util.Abort): + ui.warn(_('error: %s hook failed: %s\n') % + (hname, exc.args[0])) + else: + ui.warn(_('error: %s hook raised an exception: ' + '%s\n') % (hname, exc)) + if throw: + raise + ui.traceback() + return True + if r: + if throw: + raise util.Abort(_('%s hook failed') % hname) + ui.warn(_('warning: %s hook failed\n') % hname) + return r + +def _exthook(ui, repo, name, cmd, args, throw): + ui.note(_("running hook %s: %s\n") % (name, cmd)) + + env = {} + for k, v in args.iteritems(): + if hasattr(v, '__call__'): + v = v() + env['HG_' + k.upper()] = v + + if repo: + cwd = repo.root + else: + cwd = os.getcwd() + if 'HG_URL' in env and env['HG_URL'].startswith('remote:http'): + r = util.system(cmd, environ=env, cwd=cwd, out=ui) + else: + r = util.system(cmd, environ=env, cwd=cwd) + if r: + desc, r = util.explain_exit(r) + if throw: + raise util.Abort(_('%s hook %s') % (name, desc)) + ui.warn(_('warning: %s hook %s\n') % (name, desc)) + return r + +_redirect = False +def redirect(state): + global _redirect + _redirect = state + +def hook(ui, repo, name, throw=False, **args): + r = False + + oldstdout = -1 + if _redirect: + stdoutno = sys.__stdout__.fileno() + stderrno = sys.__stderr__.fileno() + # temporarily redirect stdout to stderr, if possible + if stdoutno >= 0 and stderrno >= 0: + oldstdout = os.dup(stdoutno) + os.dup2(stderrno, stdoutno) + + try: + for hname, cmd in ui.configitems('hooks'): + if hname.split('.')[0] != name or not cmd: + continue + if hasattr(cmd, '__call__'): + r = _pythonhook(ui, repo, name, hname, cmd, args, throw) or r + elif cmd.startswith('python:'): + if cmd.count(':') >= 2: + path, cmd = cmd[7:].rsplit(':', 1) + path = util.expandpath(path) + if repo: + path = os.path.join(repo.root, path) + mod = extensions.loadpath(path, 'hghook.%s' % hname) + hookfn = getattr(mod, cmd) + else: + hookfn = cmd[7:].strip() + r = _pythonhook(ui, repo, name, hname, hookfn, args, throw) or r + else: + r = _exthook(ui, repo, hname, cmd, args, throw) or r + finally: + if _redirect and oldstdout >= 0: + os.dup2(oldstdout, stdoutno) + os.close(oldstdout) + + return r diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.pyo Binary files differnew file mode 100644 index 0000000..9c49771 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/hook.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.py new file mode 100644 index 0000000..c19c661 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.py @@ -0,0 +1,203 @@ +# httprepo.py - HTTP repository proxy classes for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid +from i18n import _ +import changegroup, statichttprepo, error, url, util, wireproto +import os, urllib, urllib2, urlparse, zlib, httplib +import errno, socket + +def zgenerator(f): + zd = zlib.decompressobj() + try: + for chunk in util.filechunkiter(f): + while chunk: + yield zd.decompress(chunk, 2**18) + chunk = zd.unconsumed_tail + except httplib.HTTPException: + raise IOError(None, _('connection ended unexpectedly')) + yield zd.flush() + +class httprepository(wireproto.wirerepository): + def __init__(self, ui, path): + self.path = path + self.caps = None + self.handler = None + scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) + if query or frag: + raise util.Abort(_('unsupported URL component: "%s"') % + (query or frag)) + + # urllib cannot handle URLs with embedded user or passwd + self._url, authinfo = url.getauthinfo(path) + + self.ui = ui + self.ui.debug('using %s\n' % self._url) + + self.urlopener = url.opener(ui, authinfo) + + def __del__(self): + for h in self.urlopener.handlers: + h.close() + if hasattr(h, "close_all"): + h.close_all() + + def url(self): + return self.path + + # look up capabilities only when needed + + def get_caps(self): + if self.caps is None: + try: + self.caps = set(self._call('capabilities').split()) + except error.RepoError: + self.caps = set() + self.ui.debug('capabilities: %s\n' % + (' '.join(self.caps or ['none']))) + return self.caps + + capabilities = property(get_caps) + + def lock(self): + raise util.Abort(_('operation not supported over http')) + + def _callstream(self, cmd, **args): + if cmd == 'pushkey': + args['data'] = '' + data = args.pop('data', None) + headers = args.pop('headers', {}) + self.ui.debug("sending %s command\n" % cmd) + q = {"cmd": cmd} + q.update(args) + qs = '?%s' % urllib.urlencode(q) + cu = "%s%s" % (self._url, qs) + req = urllib2.Request(cu, data, headers) + if data is not None: + # len(data) is broken if data doesn't fit into Py_ssize_t + # add the header ourself to avoid OverflowError + size = data.__len__() + self.ui.debug("sending %s bytes\n" % size) + req.add_unredirected_header('Content-Length', '%d' % size) + try: + resp = self.urlopener.open(req) + except urllib2.HTTPError, inst: + if inst.code == 401: + raise util.Abort(_('authorization failed')) + raise + except httplib.HTTPException, inst: + self.ui.debug('http error while sending %s command\n' % cmd) + self.ui.traceback() + raise IOError(None, inst) + except IndexError: + # this only happens with Python 2.3, later versions raise URLError + raise util.Abort(_('http error, possibly caused by proxy setting')) + # record the url we got redirected to + resp_url = resp.geturl() + if resp_url.endswith(qs): + resp_url = resp_url[:-len(qs)] + if self._url.rstrip('/') != resp_url.rstrip('/'): + self.ui.status(_('real URL is %s\n') % resp_url) + self._url = resp_url + try: + proto = resp.getheader('content-type') + except AttributeError: + proto = resp.headers['content-type'] + + safeurl = url.hidepassword(self._url) + # accept old "text/plain" and "application/hg-changegroup" for now + if not (proto.startswith('application/mercurial-') or + proto.startswith('text/plain') or + proto.startswith('application/hg-changegroup')): + self.ui.debug("requested URL: '%s'\n" % url.hidepassword(cu)) + raise error.RepoError( + _("'%s' does not appear to be an hg repository:\n" + "---%%<--- (%s)\n%s\n---%%<---\n") + % (safeurl, proto, resp.read())) + + if proto.startswith('application/mercurial-'): + try: + version = proto.split('-', 1)[1] + version_info = tuple([int(n) for n in version.split('.')]) + except ValueError: + raise error.RepoError(_("'%s' sent a broken Content-Type " + "header (%s)") % (safeurl, proto)) + if version_info > (0, 1): + raise error.RepoError(_("'%s' uses newer protocol %s") % + (safeurl, version)) + + return resp + + def _call(self, cmd, **args): + fp = self._callstream(cmd, **args) + try: + return fp.read() + finally: + # if using keepalive, allow connection to be reused + fp.close() + + def _callpush(self, cmd, cg, **args): + # have to stream bundle to a temp file because we do not have + # http 1.1 chunked transfer. + + type = "" + types = self.capable('unbundle') + # servers older than d1b16a746db6 will send 'unbundle' as a + # boolean capability + try: + types = types.split(',') + except AttributeError: + types = [""] + if types: + for x in types: + if x in changegroup.bundletypes: + type = x + break + + tempname = changegroup.writebundle(cg, None, type) + fp = url.httpsendfile(tempname, "rb") + headers = {'Content-Type': 'application/mercurial-0.1'} + + try: + try: + r = self._call(cmd, data=fp, headers=headers, **args) + return r.split('\n', 1) + except socket.error, err: + if err.args[0] in (errno.ECONNRESET, errno.EPIPE): + raise util.Abort(_('push failed: %s') % err.args[1]) + raise util.Abort(err.args[1]) + finally: + fp.close() + os.unlink(tempname) + + def _abort(self, exception): + raise exception + + def _decompress(self, stream): + return util.chunkbuffer(zgenerator(stream)) + +class httpsrepository(httprepository): + def __init__(self, ui, path): + if not url.has_https: + raise util.Abort(_('Python support for SSL and HTTPS ' + 'is not installed')) + httprepository.__init__(self, ui, path) + +def instance(ui, path, create): + if create: + raise util.Abort(_('cannot create new http repository')) + try: + if path.startswith('https:'): + inst = httpsrepository(ui, path) + else: + inst = httprepository(ui, path) + inst.between([(nullid, nullid)]) + return inst + except error.RepoError: + ui.note('(falling back to static-http)\n') + return statichttprepo.instance(ui, "static-" + path, create) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.pyo Binary files differnew file mode 100644 index 0000000..1936104 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/httprepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.py new file mode 100644 index 0000000..f35311c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.py @@ -0,0 +1,58 @@ +# i18n.py - internationalization support for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import encoding +import gettext, sys, os + +# modelled after templater.templatepath: +if hasattr(sys, 'frozen'): + module = sys.executable +else: + module = __file__ + +base = os.path.dirname(module) +for dir in ('.', '..'): + localedir = os.path.join(base, dir, 'locale') + if os.path.isdir(localedir): + break + +t = gettext.translation('hg', localedir, fallback=True) + +def gettext(message): + """Translate message. + + The message is looked up in the catalog to get a Unicode string, + which is encoded in the local encoding before being returned. + + Important: message is restricted to characters in the encoding + given by sys.getdefaultencoding() which is most likely 'ascii'. + """ + # If message is None, t.ugettext will return u'None' as the + # translation whereas our callers expect us to return None. + if message is None: + return message + + paragraphs = message.split('\n\n') + # Be careful not to translate the empty string -- it holds the + # meta data of the .po file. + u = u'\n\n'.join([p and t.ugettext(p) or '' for p in paragraphs]) + try: + # encoding.tolocal cannot be used since it will first try to + # decode the Unicode string. Calling u.decode(enc) really + # means u.encode(sys.getdefaultencoding()).decode(enc). Since + # the Python encoding defaults to 'ascii', this fails if the + # translated string use non-ASCII characters. + return u.encode(encoding.encoding, "replace") + except LookupError: + # An unknown encoding results in a LookupError. + return message + +if 'HGPLAIN' in os.environ: + _ = lambda message: message +else: + _ = gettext + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.pyo Binary files differnew file mode 100644 index 0000000..3cfad64 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/i18n.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.py new file mode 100644 index 0000000..8cc5058 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.py @@ -0,0 +1,103 @@ +# ignore.py - ignored file handling for mercurial +# +# Copyright 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util, match +import re + +_commentre = None + +def ignorepats(lines): + '''parse lines (iterable) of .hgignore text, returning a tuple of + (patterns, parse errors). These patterns should be given to compile() + to be validated and converted into a match function.''' + syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'} + syntax = 'relre:' + patterns = [] + warnings = [] + + for line in lines: + if "#" in line: + global _commentre + if not _commentre: + _commentre = re.compile(r'((^|[^\\])(\\\\)*)#.*') + # remove comments prefixed by an even number of escapes + line = _commentre.sub(r'\1', line) + # fixup properly escaped comments that survived the above + line = line.replace("\\#", "#") + line = line.rstrip() + if not line: + continue + + if line.startswith('syntax:'): + s = line[7:].strip() + try: + syntax = syntaxes[s] + except KeyError: + warnings.append(_("ignoring invalid syntax '%s'") % s) + continue + pat = syntax + line + for s, rels in syntaxes.iteritems(): + if line.startswith(rels): + pat = line + break + elif line.startswith(s+':'): + pat = rels + line[len(s)+1:] + break + patterns.append(pat) + + return patterns, warnings + +def ignore(root, files, warn): + '''return matcher covering patterns in 'files'. + + the files parsed for patterns include: + .hgignore in the repository root + any additional files specified in the [ui] section of ~/.hgrc + + trailing white space is dropped. + the escape character is backslash. + comments start with #. + empty lines are skipped. + + lines can be of the following formats: + + syntax: regexp # defaults following lines to non-rooted regexps + syntax: glob # defaults following lines to non-rooted globs + re:pattern # non-rooted regular expression + glob:pattern # non-rooted glob + pattern # pattern of the current default type''' + + pats = {} + for f in files: + try: + pats[f] = [] + fp = open(f) + pats[f], warnings = ignorepats(fp) + for warning in warnings: + warn("%s: %s\n" % (f, warning)) + except IOError, inst: + if f != files[0]: + warn(_("skipping unreadable ignore file '%s': %s\n") % + (f, inst.strerror)) + + allpats = [] + [allpats.extend(patlist) for patlist in pats.values()] + if not allpats: + return util.never + + try: + ignorefunc = match.match(root, '', [], allpats) + except util.Abort: + # Re-raise an exception where the src is the right file + for f, patlist in pats.iteritems(): + try: + match.match(root, '', [], patlist) + except util.Abort, inst: + raise util.Abort('%s: %s' % (f, inst[0])) + + return ignorefunc diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.pyo Binary files differnew file mode 100644 index 0000000..0ebb7cb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ignore.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.py new file mode 100644 index 0000000..a5a4882 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.py @@ -0,0 +1,765 @@ +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the +# Free Software Foundation, Inc., +# 59 Temple Place, Suite 330, +# Boston, MA 02111-1307 USA + +# This file is part of urlgrabber, a high-level cross-protocol url-grabber +# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko + +# Modified by Benoit Boissinot: +# - fix for digest auth (inspired from urllib2.py @ Python v2.4) +# Modified by Dirkjan Ochtman: +# - import md5 function from a local util module +# Modified by Martin Geisler: +# - moved md5 function from local util module to this module +# Modified by Augie Fackler: +# - add safesend method and use it to prevent broken pipe errors +# on large POST requests + +"""An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive. + +>>> import urllib2 +>>> from keepalive import HTTPHandler +>>> keepalive_handler = HTTPHandler() +>>> opener = urllib2.build_opener(keepalive_handler) +>>> urllib2.install_opener(opener) +>>> +>>> fo = urllib2.urlopen('http://www.python.org') + +If a connection to a given host is requested, and all of the existing +connections are still in use, another connection will be opened. If +the handler tries to use an existing connection but it fails in some +way, it will be closed and removed from the pool. + +To remove the handler, simply re-run build_opener with no arguments, and +install that opener. + +You can explicitly close connections by using the close_connection() +method of the returned file-like object (described below) or you can +use the handler methods: + + close_connection(host) + close_all() + open_connections() + +NOTE: using the close_connection and close_all methods of the handler +should be done with care when using multiple threads. + * there is nothing that prevents another thread from creating new + connections immediately after connections are closed + * no checks are done to prevent in-use connections from being closed + +>>> keepalive_handler.close_all() + +EXTRA ATTRIBUTES AND METHODS + + Upon a status of 200, the object returned has a few additional + attributes and methods, which should not be used if you want to + remain consistent with the normal urllib2-returned objects: + + close_connection() - close the connection to the host + readlines() - you know, readlines() + status - the return status (ie 404) + reason - english translation of status (ie 'File not found') + + If you want the best of both worlds, use this inside an + AttributeError-catching try: + + >>> try: status = fo.status + >>> except AttributeError: status = None + + Unfortunately, these are ONLY there if status == 200, so it's not + easy to distinguish between non-200 responses. The reason is that + urllib2 tries to do clever things with error codes 301, 302, 401, + and 407, and it wraps the object upon return. + + For python versions earlier than 2.4, you can avoid this fancy error + handling by setting the module-level global HANDLE_ERRORS to zero. + You see, prior to 2.4, it's the HTTP Handler's job to determine what + to handle specially, and what to just pass up. HANDLE_ERRORS == 0 + means "pass everything up". In python 2.4, however, this job no + longer belongs to the HTTP Handler and is now done by a NEW handler, + HTTPErrorProcessor. Here's the bottom line: + + python version < 2.4 + HANDLE_ERRORS == 1 (default) pass up 200, treat the rest as + errors + HANDLE_ERRORS == 0 pass everything up, error processing is + left to the calling code + python version >= 2.4 + HANDLE_ERRORS == 1 pass up 200, treat the rest as errors + HANDLE_ERRORS == 0 (default) pass everything up, let the + other handlers (specifically, + HTTPErrorProcessor) decide what to do + + In practice, setting the variable either way makes little difference + in python 2.4, so for the most consistent behavior across versions, + you probably just want to use the defaults, which will give you + exceptions on errors. + +""" + +# $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $ + +import errno +import httplib +import socket +import thread +import urllib2 + +DEBUG = None + +import sys +if sys.version_info < (2, 4): + HANDLE_ERRORS = 1 +else: HANDLE_ERRORS = 0 + +class ConnectionManager: + """ + The connection manager must be able to: + * keep track of all existing + """ + def __init__(self): + self._lock = thread.allocate_lock() + self._hostmap = {} # map hosts to a list of connections + self._connmap = {} # map connections to host + self._readymap = {} # map connection to ready state + + def add(self, host, connection, ready): + self._lock.acquire() + try: + if not host in self._hostmap: + self._hostmap[host] = [] + self._hostmap[host].append(connection) + self._connmap[connection] = host + self._readymap[connection] = ready + finally: + self._lock.release() + + def remove(self, connection): + self._lock.acquire() + try: + try: + host = self._connmap[connection] + except KeyError: + pass + else: + del self._connmap[connection] + del self._readymap[connection] + self._hostmap[host].remove(connection) + if not self._hostmap[host]: del self._hostmap[host] + finally: + self._lock.release() + + def set_ready(self, connection, ready): + try: + self._readymap[connection] = ready + except KeyError: + pass + + def get_ready_conn(self, host): + conn = None + self._lock.acquire() + try: + if host in self._hostmap: + for c in self._hostmap[host]: + if self._readymap[c]: + self._readymap[c] = 0 + conn = c + break + finally: + self._lock.release() + return conn + + def get_all(self, host=None): + if host: + return list(self._hostmap.get(host, [])) + else: + return dict(self._hostmap) + +class KeepAliveHandler: + def __init__(self): + self._cm = ConnectionManager() + + #### Connection Management + def open_connections(self): + """return a list of connected hosts and the number of connections + to each. [('foo.com:80', 2), ('bar.org', 1)]""" + return [(host, len(li)) for (host, li) in self._cm.get_all().items()] + + def close_connection(self, host): + """close connection(s) to <host> + host is the host:port spec, as in 'www.cnn.com:8080' as passed in. + no error occurs if there is no connection to that host.""" + for h in self._cm.get_all(host): + self._cm.remove(h) + h.close() + + def close_all(self): + """close all open connections""" + for host, conns in self._cm.get_all().iteritems(): + for h in conns: + self._cm.remove(h) + h.close() + + def _request_closed(self, request, host, connection): + """tells us that this request is now closed and the the + connection is ready for another request""" + self._cm.set_ready(connection, 1) + + def _remove_connection(self, host, connection, close=0): + if close: + connection.close() + self._cm.remove(connection) + + #### Transaction Execution + def http_open(self, req): + return self.do_open(HTTPConnection, req) + + def do_open(self, http_class, req): + host = req.get_host() + if not host: + raise urllib2.URLError('no host given') + + try: + h = self._cm.get_ready_conn(host) + while h: + r = self._reuse_connection(h, req, host) + + # if this response is non-None, then it worked and we're + # done. Break out, skipping the else block. + if r: + break + + # connection is bad - possibly closed by server + # discard it and ask for the next free connection + h.close() + self._cm.remove(h) + h = self._cm.get_ready_conn(host) + else: + # no (working) free connections were found. Create a new one. + h = http_class(host) + if DEBUG: + DEBUG.info("creating new connection to %s (%d)", + host, id(h)) + self._cm.add(host, h, 0) + self._start_transaction(h, req) + r = h.getresponse() + except (socket.error, httplib.HTTPException), err: + raise urllib2.URLError(err) + + # if not a persistent connection, don't try to reuse it + if r.will_close: + self._cm.remove(h) + + if DEBUG: + DEBUG.info("STATUS: %s, %s", r.status, r.reason) + r._handler = self + r._host = host + r._url = req.get_full_url() + r._connection = h + r.code = r.status + r.headers = r.msg + r.msg = r.reason + + if r.status == 200 or not HANDLE_ERRORS: + return r + else: + return self.parent.error('http', req, r, + r.status, r.msg, r.headers) + + def _reuse_connection(self, h, req, host): + """start the transaction with a re-used connection + return a response object (r) upon success or None on failure. + This DOES not close or remove bad connections in cases where + it returns. However, if an unexpected exception occurs, it + will close and remove the connection before re-raising. + """ + try: + self._start_transaction(h, req) + r = h.getresponse() + # note: just because we got something back doesn't mean it + # worked. We'll check the version below, too. + except (socket.error, httplib.HTTPException): + r = None + except: + # adding this block just in case we've missed + # something we will still raise the exception, but + # lets try and close the connection and remove it + # first. We previously got into a nasty loop + # where an exception was uncaught, and so the + # connection stayed open. On the next try, the + # same exception was raised, etc. The tradeoff is + # that it's now possible this call will raise + # a DIFFERENT exception + if DEBUG: + DEBUG.error("unexpected exception - closing " + "connection to %s (%d)", host, id(h)) + self._cm.remove(h) + h.close() + raise + + if r is None or r.version == 9: + # httplib falls back to assuming HTTP 0.9 if it gets a + # bad header back. This is most likely to happen if + # the socket has been closed by the server since we + # last used the connection. + if DEBUG: + DEBUG.info("failed to re-use connection to %s (%d)", + host, id(h)) + r = None + else: + if DEBUG: + DEBUG.info("re-using connection to %s (%d)", host, id(h)) + + return r + + def _start_transaction(self, h, req): + # What follows mostly reimplements HTTPConnection.request() + # except it adds self.parent.addheaders in the mix. + headers = req.headers.copy() + if sys.version_info >= (2, 4): + headers.update(req.unredirected_hdrs) + headers.update(self.parent.addheaders) + headers = dict((n.lower(), v) for n, v in headers.items()) + skipheaders = {} + for n in ('host', 'accept-encoding'): + if n in headers: + skipheaders['skip_' + n.replace('-', '_')] = 1 + try: + if req.has_data(): + data = req.get_data() + h.putrequest('POST', req.get_selector(), **skipheaders) + if 'content-type' not in headers: + h.putheader('Content-type', + 'application/x-www-form-urlencoded') + if 'content-length' not in headers: + h.putheader('Content-length', '%d' % len(data)) + else: + h.putrequest('GET', req.get_selector(), **skipheaders) + except (socket.error), err: + raise urllib2.URLError(err) + for k, v in headers.items(): + h.putheader(k, v) + h.endheaders() + if req.has_data(): + h.send(data) + +class HTTPHandler(KeepAliveHandler, urllib2.HTTPHandler): + pass + +class HTTPResponse(httplib.HTTPResponse): + # we need to subclass HTTPResponse in order to + # 1) add readline() and readlines() methods + # 2) add close_connection() methods + # 3) add info() and geturl() methods + + # in order to add readline(), read must be modified to deal with a + # buffer. example: readline must read a buffer and then spit back + # one line at a time. The only real alternative is to read one + # BYTE at a time (ick). Once something has been read, it can't be + # put back (ok, maybe it can, but that's even uglier than this), + # so if you THEN do a normal read, you must first take stuff from + # the buffer. + + # the read method wraps the original to accomodate buffering, + # although read() never adds to the buffer. + # Both readline and readlines have been stolen with almost no + # modification from socket.py + + + def __init__(self, sock, debuglevel=0, strict=0, method=None): + if method: # the httplib in python 2.3 uses the method arg + httplib.HTTPResponse.__init__(self, sock, debuglevel, method) + else: # 2.2 doesn't + httplib.HTTPResponse.__init__(self, sock, debuglevel) + self.fileno = sock.fileno + self.code = None + self._rbuf = '' + self._rbufsize = 8096 + self._handler = None # inserted by the handler later + self._host = None # (same) + self._url = None # (same) + self._connection = None # (same) + + _raw_read = httplib.HTTPResponse.read + + def close(self): + if self.fp: + self.fp.close() + self.fp = None + if self._handler: + self._handler._request_closed(self, self._host, + self._connection) + + def close_connection(self): + self._handler._remove_connection(self._host, self._connection, close=1) + self.close() + + def info(self): + return self.headers + + def geturl(self): + return self._url + + def read(self, amt=None): + # the _rbuf test is only in this first if for speed. It's not + # logically necessary + if self._rbuf and not amt is None: + L = len(self._rbuf) + if amt > L: + amt -= L + else: + s = self._rbuf[:amt] + self._rbuf = self._rbuf[amt:] + return s + + s = self._rbuf + self._raw_read(amt) + self._rbuf = '' + return s + + # stolen from Python SVN #68532 to fix issue1088 + def _read_chunked(self, amt): + chunk_left = self.chunk_left + value = '' + + # XXX This accumulates chunks by repeated string concatenation, + # which is not efficient as the number or size of chunks gets big. + while True: + if chunk_left is None: + line = self.fp.readline() + i = line.find(';') + if i >= 0: + line = line[:i] # strip chunk-extensions + try: + chunk_left = int(line, 16) + except ValueError: + # close the connection as protocol synchronisation is + # probably lost + self.close() + raise httplib.IncompleteRead(value) + if chunk_left == 0: + break + if amt is None: + value += self._safe_read(chunk_left) + elif amt < chunk_left: + value += self._safe_read(amt) + self.chunk_left = chunk_left - amt + return value + elif amt == chunk_left: + value += self._safe_read(amt) + self._safe_read(2) # toss the CRLF at the end of the chunk + self.chunk_left = None + return value + else: + value += self._safe_read(chunk_left) + amt -= chunk_left + + # we read the whole chunk, get another + self._safe_read(2) # toss the CRLF at the end of the chunk + chunk_left = None + + # read and discard trailer up to the CRLF terminator + ### note: we shouldn't have any trailers! + while True: + line = self.fp.readline() + if not line: + # a vanishingly small number of sites EOF without + # sending the trailer + break + if line == '\r\n': + break + + # we read everything; close the "file" + self.close() + + return value + + def readline(self, limit=-1): + i = self._rbuf.find('\n') + while i < 0 and not (0 < limit <= len(self._rbuf)): + new = self._raw_read(self._rbufsize) + if not new: + break + i = new.find('\n') + if i >= 0: + i = i + len(self._rbuf) + self._rbuf = self._rbuf + new + if i < 0: + i = len(self._rbuf) + else: + i = i + 1 + if 0 <= limit < len(self._rbuf): + i = limit + data, self._rbuf = self._rbuf[:i], self._rbuf[i:] + return data + + def readlines(self, sizehint = 0): + total = 0 + list = [] + while 1: + line = self.readline() + if not line: + break + list.append(line) + total += len(line) + if sizehint and total >= sizehint: + break + return list + +def safesend(self, str): + """Send `str' to the server. + + Shamelessly ripped off from httplib to patch a bad behavior. + """ + # _broken_pipe_resp is an attribute we set in this function + # if the socket is closed while we're sending data but + # the server sent us a response before hanging up. + # In that case, we want to pretend to send the rest of the + # outgoing data, and then let the user use getresponse() + # (which we wrap) to get this last response before + # opening a new socket. + if getattr(self, '_broken_pipe_resp', None) is not None: + return + + if self.sock is None: + if self.auto_open: + self.connect() + else: + raise httplib.NotConnected() + + # send the data to the server. if we get a broken pipe, then close + # the socket. we want to reconnect when somebody tries to send again. + # + # NOTE: we DO propagate the error, though, because we cannot simply + # ignore the error... the caller will know if they can retry. + if self.debuglevel > 0: + print "send:", repr(str) + try: + blocksize = 8192 + if hasattr(str,'read') : + if self.debuglevel > 0: + print "sendIng a read()able" + data = str.read(blocksize) + while data: + self.sock.sendall(data) + data = str.read(blocksize) + else: + self.sock.sendall(str) + except socket.error, v: + reraise = True + if v[0] == errno.EPIPE: # Broken pipe + if self._HTTPConnection__state == httplib._CS_REQ_SENT: + self._broken_pipe_resp = None + self._broken_pipe_resp = self.getresponse() + reraise = False + self.close() + if reraise: + raise + +def wrapgetresponse(cls): + """Wraps getresponse in cls with a broken-pipe sane version. + """ + def safegetresponse(self): + # In safesend() we might set the _broken_pipe_resp + # attribute, in which case the socket has already + # been closed and we just need to give them the response + # back. Otherwise, we use the normal response path. + r = getattr(self, '_broken_pipe_resp', None) + if r is not None: + return r + return cls.getresponse(self) + safegetresponse.__doc__ = cls.getresponse.__doc__ + return safegetresponse + +class HTTPConnection(httplib.HTTPConnection): + # use the modified response class + response_class = HTTPResponse + send = safesend + getresponse = wrapgetresponse(httplib.HTTPConnection) + + +######################################################################### +##### TEST FUNCTIONS +######################################################################### + +def error_handler(url): + global HANDLE_ERRORS + orig = HANDLE_ERRORS + keepalive_handler = HTTPHandler() + opener = urllib2.build_opener(keepalive_handler) + urllib2.install_opener(opener) + pos = {0: 'off', 1: 'on'} + for i in (0, 1): + print " fancy error handling %s (HANDLE_ERRORS = %i)" % (pos[i], i) + HANDLE_ERRORS = i + try: + fo = urllib2.urlopen(url) + fo.read() + fo.close() + try: + status, reason = fo.status, fo.reason + except AttributeError: + status, reason = None, None + except IOError, e: + print " EXCEPTION: %s" % e + raise + else: + print " status = %s, reason = %s" % (status, reason) + HANDLE_ERRORS = orig + hosts = keepalive_handler.open_connections() + print "open connections:", hosts + keepalive_handler.close_all() + +def md5(s): + try: + from hashlib import md5 as _md5 + except ImportError: + from md5 import md5 as _md5 + global md5 + md5 = _md5 + return _md5(s) + +def continuity(url): + format = '%25s: %s' + + # first fetch the file with the normal http handler + opener = urllib2.build_opener() + urllib2.install_opener(opener) + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + m = md5.new(foo) + print format % ('normal urllib', m.hexdigest()) + + # now install the keepalive handler and try again + opener = urllib2.build_opener(HTTPHandler()) + urllib2.install_opener(opener) + + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + m = md5.new(foo) + print format % ('keepalive read', m.hexdigest()) + + fo = urllib2.urlopen(url) + foo = '' + while 1: + f = fo.readline() + if f: + foo = foo + f + else: break + fo.close() + m = md5.new(foo) + print format % ('keepalive readline', m.hexdigest()) + +def comp(N, url): + print ' making %i connections to:\n %s' % (N, url) + + sys.stdout.write(' first using the normal urllib handlers') + # first use normal opener + opener = urllib2.build_opener() + urllib2.install_opener(opener) + t1 = fetch(N, url) + print ' TIME: %.3f s' % t1 + + sys.stdout.write(' now using the keepalive handler ') + # now install the keepalive handler and try again + opener = urllib2.build_opener(HTTPHandler()) + urllib2.install_opener(opener) + t2 = fetch(N, url) + print ' TIME: %.3f s' % t2 + print ' improvement factor: %.2f' % (t1 / t2) + +def fetch(N, url, delay=0): + import time + lens = [] + starttime = time.time() + for i in range(N): + if delay and i > 0: + time.sleep(delay) + fo = urllib2.urlopen(url) + foo = fo.read() + fo.close() + lens.append(len(foo)) + diff = time.time() - starttime + + j = 0 + for i in lens[1:]: + j = j + 1 + if not i == lens[0]: + print "WARNING: inconsistent length on read %i: %i" % (j, i) + + return diff + +def test_timeout(url): + global DEBUG + dbbackup = DEBUG + class FakeLogger: + def debug(self, msg, *args): + print msg % args + info = warning = error = debug + DEBUG = FakeLogger() + print " fetching the file to establish a connection" + fo = urllib2.urlopen(url) + data1 = fo.read() + fo.close() + + i = 20 + print " waiting %i seconds for the server to close the connection" % i + while i > 0: + sys.stdout.write('\r %2i' % i) + sys.stdout.flush() + time.sleep(1) + i -= 1 + sys.stderr.write('\r') + + print " fetching the file a second time" + fo = urllib2.urlopen(url) + data2 = fo.read() + fo.close() + + if data1 == data2: + print ' data are identical' + else: + print ' ERROR: DATA DIFFER' + + DEBUG = dbbackup + + +def test(url, N=10): + print "checking error hander (do this on a non-200)" + try: error_handler(url) + except IOError: + print "exiting - exception will prevent further tests" + sys.exit() + print + print "performing continuity test (making sure stuff isn't corrupted)" + continuity(url) + print + print "performing speed comparison" + comp(N, url) + print + print "performing dropped-connection check" + test_timeout(url) + +if __name__ == '__main__': + import time + import sys + try: + N = int(sys.argv[1]) + url = sys.argv[2] + except: + print "%s <integer> <url>" % sys.argv[0] + else: + test(url, N) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.pyo Binary files differnew file mode 100644 index 0000000..f144af5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/keepalive.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.py new file mode 100644 index 0000000..23e6ff4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.py @@ -0,0 +1,1906 @@ +# localrepo.py - read/write repository class for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import bin, hex, nullid, nullrev, short +from i18n import _ +import repo, changegroup, subrepo, discovery, pushkey +import changelog, dirstate, filelog, manifest, context +import lock, transaction, store, encoding +import util, extensions, hook, error +import match as matchmod +import merge as mergemod +import tags as tagsmod +import url as urlmod +from lock import release +import weakref, errno, os, time, inspect +propertycache = util.propertycache + +class localrepository(repo.repository): + capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey')) + supportedformats = set(('revlogv1', 'parentdelta')) + supported = supportedformats | set(('store', 'fncache', 'shared', + 'dotencode')) + + def __init__(self, baseui, path=None, create=0): + repo.repository.__init__(self) + self.root = os.path.realpath(util.expandpath(path)) + self.path = os.path.join(self.root, ".hg") + self.origroot = path + self.auditor = util.path_auditor(self.root, self._checknested) + self.opener = util.opener(self.path) + self.wopener = util.opener(self.root) + self.baseui = baseui + self.ui = baseui.copy() + + try: + self.ui.readconfig(self.join("hgrc"), self.root) + extensions.loadall(self.ui) + except IOError: + pass + + if not os.path.isdir(self.path): + if create: + if not os.path.exists(path): + util.makedirs(path) + os.mkdir(self.path) + requirements = ["revlogv1"] + if self.ui.configbool('format', 'usestore', True): + os.mkdir(os.path.join(self.path, "store")) + requirements.append("store") + if self.ui.configbool('format', 'usefncache', True): + requirements.append("fncache") + if self.ui.configbool('format', 'dotencode', True): + requirements.append('dotencode') + # create an invalid changelog + self.opener("00changelog.i", "a").write( + '\0\0\0\2' # represents revlogv2 + ' dummy changelog to prevent using the old repo layout' + ) + if self.ui.configbool('format', 'parentdelta', False): + requirements.append("parentdelta") + else: + raise error.RepoError(_("repository %s not found") % path) + elif create: + raise error.RepoError(_("repository %s already exists") % path) + else: + # find requirements + requirements = set() + try: + requirements = set(self.opener("requires").read().splitlines()) + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + for r in requirements - self.supported: + raise error.RepoError(_("requirement '%s' not supported") % r) + + self.sharedpath = self.path + try: + s = os.path.realpath(self.opener("sharedpath").read()) + if not os.path.exists(s): + raise error.RepoError( + _('.hg/sharedpath points to nonexistent directory %s') % s) + self.sharedpath = s + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + + self.store = store.store(requirements, self.sharedpath, util.opener) + self.spath = self.store.path + self.sopener = self.store.opener + self.sjoin = self.store.join + self.opener.createmode = self.store.createmode + self._applyrequirements(requirements) + if create: + self._writerequirements() + + # These two define the set of tags for this repository. _tags + # maps tag name to node; _tagtypes maps tag name to 'global' or + # 'local'. (Global tags are defined by .hgtags across all + # heads, and local tags are defined in .hg/localtags.) They + # constitute the in-memory cache of tags. + self._tags = None + self._tagtypes = None + + self._branchcache = None # in UTF-8 + self._branchcachetip = None + self.nodetagscache = None + self.filterpats = {} + self._datafilters = {} + self._transref = self._lockref = self._wlockref = None + + def _applyrequirements(self, requirements): + self.requirements = requirements + self.sopener.options = {} + if 'parentdelta' in requirements: + self.sopener.options['parentdelta'] = 1 + + def _writerequirements(self): + reqfile = self.opener("requires", "w") + for r in self.requirements: + reqfile.write("%s\n" % r) + reqfile.close() + + def _checknested(self, path): + """Determine if path is a legal nested repository.""" + if not path.startswith(self.root): + return False + subpath = path[len(self.root) + 1:] + + # XXX: Checking against the current working copy is wrong in + # the sense that it can reject things like + # + # $ hg cat -r 10 sub/x.txt + # + # if sub/ is no longer a subrepository in the working copy + # parent revision. + # + # However, it can of course also allow things that would have + # been rejected before, such as the above cat command if sub/ + # is a subrepository now, but was a normal directory before. + # The old path auditor would have rejected by mistake since it + # panics when it sees sub/.hg/. + # + # All in all, checking against the working copy seems sensible + # since we want to prevent access to nested repositories on + # the filesystem *now*. + ctx = self[None] + parts = util.splitpath(subpath) + while parts: + prefix = os.sep.join(parts) + if prefix in ctx.substate: + if prefix == subpath: + return True + else: + sub = ctx.sub(prefix) + return sub.checknested(subpath[len(prefix) + 1:]) + else: + parts.pop() + return False + + + @propertycache + def changelog(self): + c = changelog.changelog(self.sopener) + if 'HG_PENDING' in os.environ: + p = os.environ['HG_PENDING'] + if p.startswith(self.root): + c.readpending('00changelog.i.a') + self.sopener.options['defversion'] = c.version + return c + + @propertycache + def manifest(self): + return manifest.manifest(self.sopener) + + @propertycache + def dirstate(self): + return dirstate.dirstate(self.opener, self.ui, self.root) + + def __getitem__(self, changeid): + if changeid is None: + return context.workingctx(self) + return context.changectx(self, changeid) + + def __contains__(self, changeid): + try: + return bool(self.lookup(changeid)) + except error.RepoLookupError: + return False + + def __nonzero__(self): + return True + + def __len__(self): + return len(self.changelog) + + def __iter__(self): + for i in xrange(len(self)): + yield i + + def url(self): + return 'file:' + self.root + + def hook(self, name, throw=False, **args): + return hook.hook(self.ui, self, name, throw, **args) + + tag_disallowed = ':\r\n' + + def _tag(self, names, node, message, local, user, date, extra={}): + if isinstance(names, str): + allchars = names + names = (names,) + else: + allchars = ''.join(names) + for c in self.tag_disallowed: + if c in allchars: + raise util.Abort(_('%r cannot be used in a tag name') % c) + + branches = self.branchmap() + for name in names: + self.hook('pretag', throw=True, node=hex(node), tag=name, + local=local) + if name in branches: + self.ui.warn(_("warning: tag %s conflicts with existing" + " branch name\n") % name) + + def writetags(fp, names, munge, prevtags): + fp.seek(0, 2) + if prevtags and prevtags[-1] != '\n': + fp.write('\n') + for name in names: + m = munge and munge(name) or name + if self._tagtypes and name in self._tagtypes: + old = self._tags.get(name, nullid) + fp.write('%s %s\n' % (hex(old), m)) + fp.write('%s %s\n' % (hex(node), m)) + fp.close() + + prevtags = '' + if local: + try: + fp = self.opener('localtags', 'r+') + except IOError: + fp = self.opener('localtags', 'a') + else: + prevtags = fp.read() + + # local tags are stored in the current charset + writetags(fp, names, None, prevtags) + for name in names: + self.hook('tag', node=hex(node), tag=name, local=local) + return + + try: + fp = self.wfile('.hgtags', 'rb+') + except IOError: + fp = self.wfile('.hgtags', 'ab') + else: + prevtags = fp.read() + + # committed tags are stored in UTF-8 + writetags(fp, names, encoding.fromlocal, prevtags) + + if '.hgtags' not in self.dirstate: + self[None].add(['.hgtags']) + + m = matchmod.exact(self.root, '', ['.hgtags']) + tagnode = self.commit(message, user, date, extra=extra, match=m) + + for name in names: + self.hook('tag', node=hex(node), tag=name, local=local) + + return tagnode + + def tag(self, names, node, message, local, user, date): + '''tag a revision with one or more symbolic names. + + names is a list of strings or, when adding a single tag, names may be a + string. + + if local is True, the tags are stored in a per-repository file. + otherwise, they are stored in the .hgtags file, and a new + changeset is committed with the change. + + keyword arguments: + + local: whether to store tags in non-version-controlled file + (default False) + + message: commit message to use if committing + + user: name of user to use if committing + + date: date tuple to use if committing''' + + if not local: + for x in self.status()[:5]: + if '.hgtags' in x: + raise util.Abort(_('working copy of .hgtags is changed ' + '(please commit .hgtags manually)')) + + self.tags() # instantiate the cache + self._tag(names, node, message, local, user, date) + + def tags(self): + '''return a mapping of tag to node''' + if self._tags is None: + (self._tags, self._tagtypes) = self._findtags() + + return self._tags + + def _findtags(self): + '''Do the hard work of finding tags. Return a pair of dicts + (tags, tagtypes) where tags maps tag name to node, and tagtypes + maps tag name to a string like \'global\' or \'local\'. + Subclasses or extensions are free to add their own tags, but + should be aware that the returned dicts will be retained for the + duration of the localrepo object.''' + + # XXX what tagtype should subclasses/extensions use? Currently + # mq and bookmarks add tags, but do not set the tagtype at all. + # Should each extension invent its own tag type? Should there + # be one tagtype for all such "virtual" tags? Or is the status + # quo fine? + + alltags = {} # map tag name to (node, hist) + tagtypes = {} + + tagsmod.findglobaltags(self.ui, self, alltags, tagtypes) + tagsmod.readlocaltags(self.ui, self, alltags, tagtypes) + + # Build the return dicts. Have to re-encode tag names because + # the tags module always uses UTF-8 (in order not to lose info + # writing to the cache), but the rest of Mercurial wants them in + # local encoding. + tags = {} + for (name, (node, hist)) in alltags.iteritems(): + if node != nullid: + tags[encoding.tolocal(name)] = node + tags['tip'] = self.changelog.tip() + tagtypes = dict([(encoding.tolocal(name), value) + for (name, value) in tagtypes.iteritems()]) + return (tags, tagtypes) + + def tagtype(self, tagname): + ''' + return the type of the given tag. result can be: + + 'local' : a local tag + 'global' : a global tag + None : tag does not exist + ''' + + self.tags() + + return self._tagtypes.get(tagname) + + def tagslist(self): + '''return a list of tags ordered by revision''' + l = [] + for t, n in self.tags().iteritems(): + try: + r = self.changelog.rev(n) + except: + r = -2 # sort to the beginning of the list if unknown + l.append((r, t, n)) + return [(t, n) for r, t, n in sorted(l)] + + def nodetags(self, node): + '''return the tags associated with a node''' + if not self.nodetagscache: + self.nodetagscache = {} + for t, n in self.tags().iteritems(): + self.nodetagscache.setdefault(n, []).append(t) + for tags in self.nodetagscache.itervalues(): + tags.sort() + return self.nodetagscache.get(node, []) + + def _branchtags(self, partial, lrev): + # TODO: rename this function? + tiprev = len(self) - 1 + if lrev != tiprev: + ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1)) + self._updatebranchcache(partial, ctxgen) + self._writebranchcache(partial, self.changelog.tip(), tiprev) + + return partial + + def updatebranchcache(self): + tip = self.changelog.tip() + if self._branchcache is not None and self._branchcachetip == tip: + return self._branchcache + + oldtip = self._branchcachetip + self._branchcachetip = tip + if oldtip is None or oldtip not in self.changelog.nodemap: + partial, last, lrev = self._readbranchcache() + else: + lrev = self.changelog.rev(oldtip) + partial = self._branchcache + + self._branchtags(partial, lrev) + # this private cache holds all heads (not just tips) + self._branchcache = partial + + def branchmap(self): + '''returns a dictionary {branch: [branchheads]}''' + self.updatebranchcache() + return self._branchcache + + def branchtags(self): + '''return a dict where branch names map to the tipmost head of + the branch, open heads come before closed''' + bt = {} + for bn, heads in self.branchmap().iteritems(): + tip = heads[-1] + for h in reversed(heads): + if 'close' not in self.changelog.read(h)[5]: + tip = h + break + bt[bn] = tip + return bt + + + def _readbranchcache(self): + partial = {} + try: + f = self.opener("branchheads.cache") + lines = f.read().split('\n') + f.close() + except (IOError, OSError): + return {}, nullid, nullrev + + try: + last, lrev = lines.pop(0).split(" ", 1) + last, lrev = bin(last), int(lrev) + if lrev >= len(self) or self[lrev].node() != last: + # invalidate the cache + raise ValueError('invalidating branch cache (tip differs)') + for l in lines: + if not l: + continue + node, label = l.split(" ", 1) + partial.setdefault(label.strip(), []).append(bin(node)) + except KeyboardInterrupt: + raise + except Exception, inst: + if self.ui.debugflag: + self.ui.warn(str(inst), '\n') + partial, last, lrev = {}, nullid, nullrev + return partial, last, lrev + + def _writebranchcache(self, branches, tip, tiprev): + try: + f = self.opener("branchheads.cache", "w", atomictemp=True) + f.write("%s %s\n" % (hex(tip), tiprev)) + for label, nodes in branches.iteritems(): + for node in nodes: + f.write("%s %s\n" % (hex(node), label)) + f.rename() + except (IOError, OSError): + pass + + def _updatebranchcache(self, partial, ctxgen): + # collect new branch entries + newbranches = {} + for c in ctxgen: + newbranches.setdefault(c.branch(), []).append(c.node()) + # if older branchheads are reachable from new ones, they aren't + # really branchheads. Note checking parents is insufficient: + # 1 (branch a) -> 2 (branch b) -> 3 (branch a) + for branch, newnodes in newbranches.iteritems(): + bheads = partial.setdefault(branch, []) + bheads.extend(newnodes) + if len(bheads) <= 1: + continue + # starting from tip means fewer passes over reachable + while newnodes: + latest = newnodes.pop() + if latest not in bheads: + continue + minbhrev = self[min([self[bh].rev() for bh in bheads])].node() + reachable = self.changelog.reachable(latest, minbhrev) + reachable.remove(latest) + bheads = [b for b in bheads if b not in reachable] + partial[branch] = bheads + + def lookup(self, key): + if isinstance(key, int): + return self.changelog.node(key) + elif key == '.': + return self.dirstate.parents()[0] + elif key == 'null': + return nullid + elif key == 'tip': + return self.changelog.tip() + n = self.changelog._match(key) + if n: + return n + if key in self.tags(): + return self.tags()[key] + if key in self.branchtags(): + return self.branchtags()[key] + n = self.changelog._partialmatch(key) + if n: + return n + + # can't find key, check if it might have come from damaged dirstate + if key in self.dirstate.parents(): + raise error.Abort(_("working directory has unknown parent '%s'!") + % short(key)) + try: + if len(key) == 20: + key = hex(key) + except: + pass + raise error.RepoLookupError(_("unknown revision '%s'") % key) + + def lookupbranch(self, key, remote=None): + repo = remote or self + if key in repo.branchmap(): + return key + + repo = (remote and remote.local()) and remote or self + return repo[key].branch() + + def local(self): + return True + + def join(self, f): + return os.path.join(self.path, f) + + def wjoin(self, f): + return os.path.join(self.root, f) + + def file(self, f): + if f[0] == '/': + f = f[1:] + return filelog.filelog(self.sopener, f) + + def changectx(self, changeid): + return self[changeid] + + def parents(self, changeid=None): + '''get list of changectxs for parents of changeid''' + return self[changeid].parents() + + def filectx(self, path, changeid=None, fileid=None): + """changeid can be a changeset revision, node, or tag. + fileid can be a file revision or node.""" + return context.filectx(self, path, changeid, fileid) + + def getcwd(self): + return self.dirstate.getcwd() + + def pathto(self, f, cwd=None): + return self.dirstate.pathto(f, cwd) + + def wfile(self, f, mode='r'): + return self.wopener(f, mode) + + def _link(self, f): + return os.path.islink(self.wjoin(f)) + + def _loadfilter(self, filter): + if filter not in self.filterpats: + l = [] + for pat, cmd in self.ui.configitems(filter): + if cmd == '!': + continue + mf = matchmod.match(self.root, '', [pat]) + fn = None + params = cmd + for name, filterfn in self._datafilters.iteritems(): + if cmd.startswith(name): + fn = filterfn + params = cmd[len(name):].lstrip() + break + if not fn: + fn = lambda s, c, **kwargs: util.filter(s, c) + # Wrap old filters not supporting keyword arguments + if not inspect.getargspec(fn)[2]: + oldfn = fn + fn = lambda s, c, **kwargs: oldfn(s, c) + l.append((mf, fn, params)) + self.filterpats[filter] = l + return self.filterpats[filter] + + def _filter(self, filterpats, filename, data): + for mf, fn, cmd in filterpats: + if mf(filename): + self.ui.debug("filtering %s through %s\n" % (filename, cmd)) + data = fn(data, cmd, ui=self.ui, repo=self, filename=filename) + break + + return data + + @propertycache + def _encodefilterpats(self): + return self._loadfilter('encode') + + @propertycache + def _decodefilterpats(self): + return self._loadfilter('decode') + + def adddatafilter(self, name, filter): + self._datafilters[name] = filter + + def wread(self, filename): + if self._link(filename): + data = os.readlink(self.wjoin(filename)) + else: + data = self.wopener(filename, 'r').read() + return self._filter(self._encodefilterpats, filename, data) + + def wwrite(self, filename, data, flags): + data = self._filter(self._decodefilterpats, filename, data) + try: + os.unlink(self.wjoin(filename)) + except OSError: + pass + if 'l' in flags: + self.wopener.symlink(data, filename) + else: + self.wopener(filename, 'w').write(data) + if 'x' in flags: + util.set_flags(self.wjoin(filename), False, True) + + def wwritedata(self, filename, data): + return self._filter(self._decodefilterpats, filename, data) + + def transaction(self, desc): + tr = self._transref and self._transref() or None + if tr and tr.running(): + return tr.nest() + + # abort here if the journal already exists + if os.path.exists(self.sjoin("journal")): + raise error.RepoError( + _("abandoned transaction found - run hg recover")) + + # save dirstate for rollback + try: + ds = self.opener("dirstate").read() + except IOError: + ds = "" + self.opener("journal.dirstate", "w").write(ds) + self.opener("journal.branch", "w").write(self.dirstate.branch()) + self.opener("journal.desc", "w").write("%d\n%s\n" % (len(self), desc)) + + renames = [(self.sjoin("journal"), self.sjoin("undo")), + (self.join("journal.dirstate"), self.join("undo.dirstate")), + (self.join("journal.branch"), self.join("undo.branch")), + (self.join("journal.desc"), self.join("undo.desc"))] + tr = transaction.transaction(self.ui.warn, self.sopener, + self.sjoin("journal"), + aftertrans(renames), + self.store.createmode) + self._transref = weakref.ref(tr) + return tr + + def recover(self): + lock = self.lock() + try: + if os.path.exists(self.sjoin("journal")): + self.ui.status(_("rolling back interrupted transaction\n")) + transaction.rollback(self.sopener, self.sjoin("journal"), + self.ui.warn) + self.invalidate() + return True + else: + self.ui.warn(_("no interrupted transaction available\n")) + return False + finally: + lock.release() + + def rollback(self, dryrun=False): + wlock = lock = None + try: + wlock = self.wlock() + lock = self.lock() + if os.path.exists(self.sjoin("undo")): + try: + args = self.opener("undo.desc", "r").read().splitlines() + if len(args) >= 3 and self.ui.verbose: + desc = _("rolling back to revision %s" + " (undo %s: %s)\n") % ( + int(args[0]) - 1, args[1], args[2]) + elif len(args) >= 2: + desc = _("rolling back to revision %s (undo %s)\n") % ( + int(args[0]) - 1, args[1]) + except IOError: + desc = _("rolling back unknown transaction\n") + self.ui.status(desc) + if dryrun: + return + transaction.rollback(self.sopener, self.sjoin("undo"), + self.ui.warn) + util.rename(self.join("undo.dirstate"), self.join("dirstate")) + try: + branch = self.opener("undo.branch").read() + self.dirstate.setbranch(branch) + except IOError: + self.ui.warn(_("Named branch could not be reset, " + "current branch still is: %s\n") + % encoding.tolocal(self.dirstate.branch())) + self.invalidate() + self.dirstate.invalidate() + self.destroyed() + else: + self.ui.warn(_("no rollback information available\n")) + return 1 + finally: + release(lock, wlock) + + def invalidatecaches(self): + self._tags = None + self._tagtypes = None + self.nodetagscache = None + self._branchcache = None # in UTF-8 + self._branchcachetip = None + + def invalidate(self): + for a in "changelog manifest".split(): + if a in self.__dict__: + delattr(self, a) + self.invalidatecaches() + + def _lock(self, lockname, wait, releasefn, acquirefn, desc): + try: + l = lock.lock(lockname, 0, releasefn, desc=desc) + except error.LockHeld, inst: + if not wait: + raise + self.ui.warn(_("waiting for lock on %s held by %r\n") % + (desc, inst.locker)) + # default to 600 seconds timeout + l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")), + releasefn, desc=desc) + if acquirefn: + acquirefn() + return l + + def lock(self, wait=True): + '''Lock the repository store (.hg/store) and return a weak reference + to the lock. Use this before modifying the store (e.g. committing or + stripping). If you are opening a transaction, get a lock as well.)''' + l = self._lockref and self._lockref() + if l is not None and l.held: + l.lock() + return l + + l = self._lock(self.sjoin("lock"), wait, None, self.invalidate, + _('repository %s') % self.origroot) + self._lockref = weakref.ref(l) + return l + + def wlock(self, wait=True): + '''Lock the non-store parts of the repository (everything under + .hg except .hg/store) and return a weak reference to the lock. + Use this before modifying files in .hg.''' + l = self._wlockref and self._wlockref() + if l is not None and l.held: + l.lock() + return l + + l = self._lock(self.join("wlock"), wait, self.dirstate.write, + self.dirstate.invalidate, _('working directory of %s') % + self.origroot) + self._wlockref = weakref.ref(l) + return l + + def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): + """ + commit an individual file as part of a larger transaction + """ + + fname = fctx.path() + text = fctx.data() + flog = self.file(fname) + fparent1 = manifest1.get(fname, nullid) + fparent2 = fparent2o = manifest2.get(fname, nullid) + + meta = {} + copy = fctx.renamed() + if copy and copy[0] != fname: + # Mark the new revision of this file as a copy of another + # file. This copy data will effectively act as a parent + # of this new revision. If this is a merge, the first + # parent will be the nullid (meaning "look up the copy data") + # and the second one will be the other parent. For example: + # + # 0 --- 1 --- 3 rev1 changes file foo + # \ / rev2 renames foo to bar and changes it + # \- 2 -/ rev3 should have bar with all changes and + # should record that bar descends from + # bar in rev2 and foo in rev1 + # + # this allows this merge to succeed: + # + # 0 --- 1 --- 3 rev4 reverts the content change from rev2 + # \ / merging rev3 and rev4 should use bar@rev2 + # \- 2 --- 4 as the merge base + # + + cfname = copy[0] + crev = manifest1.get(cfname) + newfparent = fparent2 + + if manifest2: # branch merge + if fparent2 == nullid or crev is None: # copied on remote side + if cfname in manifest2: + crev = manifest2[cfname] + newfparent = fparent1 + + # find source in nearest ancestor if we've lost track + if not crev: + self.ui.debug(" %s: searching for copy revision for %s\n" % + (fname, cfname)) + for ancestor in self[None].ancestors(): + if cfname in ancestor: + crev = ancestor[cfname].filenode() + break + + if crev: + self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev))) + meta["copy"] = cfname + meta["copyrev"] = hex(crev) + fparent1, fparent2 = nullid, newfparent + else: + self.ui.warn(_("warning: can't find ancestor for '%s' " + "copied from '%s'!\n") % (fname, cfname)) + + elif fparent2 != nullid: + # is one parent an ancestor of the other? + fparentancestor = flog.ancestor(fparent1, fparent2) + if fparentancestor == fparent1: + fparent1, fparent2 = fparent2, nullid + elif fparentancestor == fparent2: + fparent2 = nullid + + # is the file changed? + if fparent2 != nullid or flog.cmp(fparent1, text) or meta: + changelist.append(fname) + return flog.add(text, meta, tr, linkrev, fparent1, fparent2) + + # are just the flags changed during merge? + if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags(): + changelist.append(fname) + + return fparent1 + + def commit(self, text="", user=None, date=None, match=None, force=False, + editor=False, extra={}): + """Add a new revision to current repository. + + Revision information is gathered from the working directory, + match can be used to filter the committed files. If editor is + supplied, it is called to get a commit message. + """ + + def fail(f, msg): + raise util.Abort('%s: %s' % (f, msg)) + + if not match: + match = matchmod.always(self.root, '') + + if not force: + vdirs = [] + match.dir = vdirs.append + match.bad = fail + + wlock = self.wlock() + try: + wctx = self[None] + merge = len(wctx.parents()) > 1 + + if (not force and merge and match and + (match.files() or match.anypats())): + raise util.Abort(_('cannot partially commit a merge ' + '(do not specify files or patterns)')) + + changes = self.status(match=match, clean=force) + if force: + changes[0].extend(changes[6]) # mq may commit unchanged files + + # check subrepos + subs = [] + removedsubs = set() + for p in wctx.parents(): + removedsubs.update(s for s in p.substate if match(s)) + for s in wctx.substate: + removedsubs.discard(s) + if match(s) and wctx.sub(s).dirty(): + subs.append(s) + if (subs or removedsubs): + if (not match('.hgsub') and + '.hgsub' in (wctx.modified() + wctx.added())): + raise util.Abort(_("can't commit subrepos without .hgsub")) + if '.hgsubstate' not in changes[0]: + changes[0].insert(0, '.hgsubstate') + + # make sure all explicit patterns are matched + if not force and match.files(): + matched = set(changes[0] + changes[1] + changes[2]) + + for f in match.files(): + if f == '.' or f in matched or f in wctx.substate: + continue + if f in changes[3]: # missing + fail(f, _('file not found!')) + if f in vdirs: # visited directory + d = f + '/' + for mf in matched: + if mf.startswith(d): + break + else: + fail(f, _("no match under directory!")) + elif f not in self.dirstate: + fail(f, _("file not tracked!")) + + if (not force and not extra.get("close") and not merge + and not (changes[0] or changes[1] or changes[2]) + and wctx.branch() == wctx.p1().branch()): + return None + + ms = mergemod.mergestate(self) + for f in changes[0]: + if f in ms and ms[f] == 'u': + raise util.Abort(_("unresolved merge conflicts " + "(see hg resolve)")) + + cctx = context.workingctx(self, text, user, date, extra, changes) + if editor: + cctx._text = editor(self, cctx, subs) + edited = (text != cctx._text) + + # commit subs + if subs or removedsubs: + state = wctx.substate.copy() + for s in sorted(subs): + sub = wctx.sub(s) + self.ui.status(_('committing subrepository %s\n') % + subrepo.subrelpath(sub)) + sr = sub.commit(cctx._text, user, date) + state[s] = (state[s][0], sr) + subrepo.writestate(self, state) + + # Save commit message in case this transaction gets rolled back + # (e.g. by a pretxncommit hook). Leave the content alone on + # the assumption that the user will use the same editor again. + msgfile = self.opener('last-message.txt', 'wb') + msgfile.write(cctx._text) + msgfile.close() + + p1, p2 = self.dirstate.parents() + hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') + try: + self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2) + ret = self.commitctx(cctx, True) + except: + if edited: + msgfn = self.pathto(msgfile.name[len(self.root)+1:]) + self.ui.write( + _('note: commit message saved in %s\n') % msgfn) + raise + + # update dirstate and mergestate + for f in changes[0] + changes[1]: + self.dirstate.normal(f) + for f in changes[2]: + self.dirstate.forget(f) + self.dirstate.setparents(ret) + ms.reset() + finally: + wlock.release() + + self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2) + return ret + + def commitctx(self, ctx, error=False): + """Add a new revision to current repository. + Revision information is passed via the context argument. + """ + + tr = lock = None + removed = list(ctx.removed()) + p1, p2 = ctx.p1(), ctx.p2() + m1 = p1.manifest().copy() + m2 = p2.manifest() + user = ctx.user() + + lock = self.lock() + try: + tr = self.transaction("commit") + trp = weakref.proxy(tr) + + # check in files + new = {} + changed = [] + linkrev = len(self) + for f in sorted(ctx.modified() + ctx.added()): + self.ui.note(f + "\n") + try: + fctx = ctx[f] + new[f] = self._filecommit(fctx, m1, m2, linkrev, trp, + changed) + m1.set(f, fctx.flags()) + except OSError, inst: + self.ui.warn(_("trouble committing %s!\n") % f) + raise + except IOError, inst: + errcode = getattr(inst, 'errno', errno.ENOENT) + if error or errcode and errcode != errno.ENOENT: + self.ui.warn(_("trouble committing %s!\n") % f) + raise + else: + removed.append(f) + + # update manifest + m1.update(new) + removed = [f for f in sorted(removed) if f in m1 or f in m2] + drop = [f for f in removed if f in m1] + for f in drop: + del m1[f] + mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(), + p2.manifestnode(), (new, drop)) + + # update changelog + self.changelog.delayupdate() + n = self.changelog.add(mn, changed + removed, ctx.description(), + trp, p1.node(), p2.node(), + user, ctx.date(), ctx.extra().copy()) + p = lambda: self.changelog.writepending() and self.root or "" + xp1, xp2 = p1.hex(), p2 and p2.hex() or '' + self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, + parent2=xp2, pending=p) + self.changelog.finalize(trp) + tr.close() + + if self._branchcache: + self.updatebranchcache() + return n + finally: + if tr: + tr.release() + lock.release() + + def destroyed(self): + '''Inform the repository that nodes have been destroyed. + Intended for use by strip and rollback, so there's a common + place for anything that has to be done after destroying history.''' + # XXX it might be nice if we could take the list of destroyed + # nodes, but I don't see an easy way for rollback() to do that + + # Ensure the persistent tag cache is updated. Doing it now + # means that the tag cache only has to worry about destroyed + # heads immediately after a strip/rollback. That in turn + # guarantees that "cachetip == currenttip" (comparing both rev + # and node) always means no nodes have been added or destroyed. + + # XXX this is suboptimal when qrefresh'ing: we strip the current + # head, refresh the tag cache, then immediately add a new head. + # But I think doing it this way is necessary for the "instant + # tag cache retrieval" case to work. + self.invalidatecaches() + + def walk(self, match, node=None): + ''' + walk recursively through the directory tree or a given + changeset, finding all files matched by the match + function + ''' + return self[node].walk(match) + + def status(self, node1='.', node2=None, match=None, + ignored=False, clean=False, unknown=False, + listsubrepos=False): + """return status of files between two nodes or node and working directory + + If node1 is None, use the first dirstate parent instead. + If node2 is None, compare node1 with working directory. + """ + + def mfmatches(ctx): + mf = ctx.manifest().copy() + for fn in mf.keys(): + if not match(fn): + del mf[fn] + return mf + + if isinstance(node1, context.changectx): + ctx1 = node1 + else: + ctx1 = self[node1] + if isinstance(node2, context.changectx): + ctx2 = node2 + else: + ctx2 = self[node2] + + working = ctx2.rev() is None + parentworking = working and ctx1 == self['.'] + match = match or matchmod.always(self.root, self.getcwd()) + listignored, listclean, listunknown = ignored, clean, unknown + + # load earliest manifest first for caching reasons + if not working and ctx2.rev() < ctx1.rev(): + ctx2.manifest() + + if not parentworking: + def bad(f, msg): + if f not in ctx1: + self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg)) + match.bad = bad + + if working: # we need to scan the working dir + subrepos = [] + if '.hgsub' in self.dirstate: + subrepos = ctx1.substate.keys() + s = self.dirstate.status(match, subrepos, listignored, + listclean, listunknown) + cmp, modified, added, removed, deleted, unknown, ignored, clean = s + + # check for any possibly clean files + if parentworking and cmp: + fixup = [] + # do a full compare of any files that might have changed + for f in sorted(cmp): + if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) + or ctx1[f].cmp(ctx2[f])): + modified.append(f) + else: + fixup.append(f) + + # update dirstate for files that are actually clean + if fixup: + if listclean: + clean += fixup + + try: + # updating the dirstate is optional + # so we don't wait on the lock + wlock = self.wlock(False) + try: + for f in fixup: + self.dirstate.normal(f) + finally: + wlock.release() + except error.LockError: + pass + + if not parentworking: + mf1 = mfmatches(ctx1) + if working: + # we are comparing working dir against non-parent + # generate a pseudo-manifest for the working dir + mf2 = mfmatches(self['.']) + for f in cmp + modified + added: + mf2[f] = None + mf2.set(f, ctx2.flags(f)) + for f in removed: + if f in mf2: + del mf2[f] + else: + # we are comparing two revisions + deleted, unknown, ignored = [], [], [] + mf2 = mfmatches(ctx2) + + modified, added, clean = [], [], [] + for fn in mf2: + if fn in mf1: + if (mf1.flags(fn) != mf2.flags(fn) or + (mf1[fn] != mf2[fn] and + (mf2[fn] or ctx1[fn].cmp(ctx2[fn])))): + modified.append(fn) + elif listclean: + clean.append(fn) + del mf1[fn] + else: + added.append(fn) + removed = mf1.keys() + + r = modified, added, removed, deleted, unknown, ignored, clean + + if listsubrepos: + for subpath, sub in subrepo.itersubrepos(ctx1, ctx2): + if working: + rev2 = None + else: + rev2 = ctx2.substate[subpath][1] + try: + submatch = matchmod.narrowmatcher(subpath, match) + s = sub.status(rev2, match=submatch, ignored=listignored, + clean=listclean, unknown=listunknown, + listsubrepos=True) + for rfiles, sfiles in zip(r, s): + rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) + except error.LookupError: + self.ui.status(_("skipping missing subrepository: %s\n") + % subpath) + + [l.sort() for l in r] + return r + + def heads(self, start=None): + heads = self.changelog.heads(start) + # sort the output in rev descending order + heads = [(-self.changelog.rev(h), h) for h in heads] + return [n for (r, n) in sorted(heads)] + + def branchheads(self, branch=None, start=None, closed=False): + '''return a (possibly filtered) list of heads for the given branch + + Heads are returned in topological order, from newest to oldest. + If branch is None, use the dirstate branch. + If start is not None, return only heads reachable from start. + If closed is True, return heads that are marked as closed as well. + ''' + if branch is None: + branch = self[None].branch() + branches = self.branchmap() + if branch not in branches: + return [] + # the cache returns heads ordered lowest to highest + bheads = list(reversed(branches[branch])) + if start is not None: + # filter out the heads that cannot be reached from startrev + fbheads = set(self.changelog.nodesbetween([start], bheads)[2]) + bheads = [h for h in bheads if h in fbheads] + if not closed: + bheads = [h for h in bheads if + ('close' not in self.changelog.read(h)[5])] + return bheads + + def branches(self, nodes): + if not nodes: + nodes = [self.changelog.tip()] + b = [] + for n in nodes: + t = n + while 1: + p = self.changelog.parents(n) + if p[1] != nullid or p[0] == nullid: + b.append((t, n, p[0], p[1])) + break + n = p[0] + return b + + def between(self, pairs): + r = [] + + for top, bottom in pairs: + n, l, i = top, [], 0 + f = 1 + + while n != bottom and n != nullid: + p = self.changelog.parents(n)[0] + if i == f: + l.append(n) + f = f * 2 + n = p + i += 1 + + r.append(l) + + return r + + def pull(self, remote, heads=None, force=False): + lock = self.lock() + try: + tmp = discovery.findcommonincoming(self, remote, heads=heads, + force=force) + common, fetch, rheads = tmp + if not fetch: + self.ui.status(_("no changes found\n")) + return 0 + + if heads is None and fetch == [nullid]: + self.ui.status(_("requesting all changes\n")) + elif heads is None and remote.capable('changegroupsubset'): + # issue1320, avoid a race if remote changed after discovery + heads = rheads + + if heads is None: + cg = remote.changegroup(fetch, 'pull') + else: + if not remote.capable('changegroupsubset'): + raise util.Abort(_("partial pull cannot be done because " + "other repository doesn't support " + "changegroupsubset.")) + cg = remote.changegroupsubset(fetch, heads, 'pull') + return self.addchangegroup(cg, 'pull', remote.url(), lock=lock) + finally: + lock.release() + + def push(self, remote, force=False, revs=None, newbranch=False): + '''Push outgoing changesets (limited by revs) from the current + repository to remote. Return an integer: + - 0 means HTTP error *or* nothing to push + - 1 means we pushed and remote head count is unchanged *or* + we have outgoing changesets but refused to push + - other values as described by addchangegroup() + ''' + # there are two ways to push to remote repo: + # + # addchangegroup assumes local user can lock remote + # repo (local filesystem, old ssh servers). + # + # unbundle assumes local user cannot lock remote repo (new ssh + # servers, http servers). + + lock = None + unbundle = remote.capable('unbundle') + if not unbundle: + lock = remote.lock() + try: + ret = discovery.prepush(self, remote, force, revs, newbranch) + if ret[0] is None: + # and here we return 0 for "nothing to push" or 1 for + # "something to push but I refuse" + return ret[1] + + cg, remote_heads = ret + if unbundle: + # local repo finds heads on server, finds out what revs it must + # push. once revs transferred, if server finds it has + # different heads (someone else won commit/push race), server + # aborts. + if force: + remote_heads = ['force'] + # ssh: return remote's addchangegroup() + # http: return remote's addchangegroup() or 0 for error + return remote.unbundle(cg, remote_heads, 'push') + else: + # we return an integer indicating remote head count change + return remote.addchangegroup(cg, 'push', self.url(), lock=lock) + finally: + if lock is not None: + lock.release() + + def changegroupinfo(self, nodes, source): + if self.ui.verbose or source == 'bundle': + self.ui.status(_("%d changesets found\n") % len(nodes)) + if self.ui.debugflag: + self.ui.debug("list of changesets:\n") + for node in nodes: + self.ui.debug("%s\n" % hex(node)) + + def changegroupsubset(self, bases, heads, source, extranodes=None): + """Compute a changegroup consisting of all the nodes that are + descendents of any of the bases and ancestors of any of the heads. + Return a chunkbuffer object whose read() method will return + successive changegroup chunks. + + It is fairly complex as determining which filenodes and which + manifest nodes need to be included for the changeset to be complete + is non-trivial. + + Another wrinkle is doing the reverse, figuring out which changeset in + the changegroup a particular filenode or manifestnode belongs to. + + The caller can specify some nodes that must be included in the + changegroup using the extranodes argument. It should be a dict + where the keys are the filenames (or 1 for the manifest), and the + values are lists of (node, linknode) tuples, where node is a wanted + node and linknode is the changelog node that should be transmitted as + the linkrev. + """ + + # Set up some initial variables + # Make it easy to refer to self.changelog + cl = self.changelog + # Compute the list of changesets in this changegroup. + # Some bases may turn out to be superfluous, and some heads may be + # too. nodesbetween will return the minimal set of bases and heads + # necessary to re-create the changegroup. + if not bases: + bases = [nullid] + msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads) + + if extranodes is None: + # can we go through the fast path ? + heads.sort() + allheads = self.heads() + allheads.sort() + if heads == allheads: + return self._changegroup(msng_cl_lst, source) + + # slow path + self.hook('preoutgoing', throw=True, source=source) + + self.changegroupinfo(msng_cl_lst, source) + + # We assume that all ancestors of bases are known + commonrevs = set(cl.ancestors(*[cl.rev(n) for n in bases])) + + # Make it easy to refer to self.manifest + mnfst = self.manifest + # We don't know which manifests are missing yet + msng_mnfst_set = {} + # Nor do we know which filenodes are missing. + msng_filenode_set = {} + + junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex + junk = None + + # A changeset always belongs to itself, so the changenode lookup + # function for a changenode is identity. + def identity(x): + return x + + # A function generating function that sets up the initial environment + # the inner function. + def filenode_collector(changedfiles): + # This gathers information from each manifestnode included in the + # changegroup about which filenodes the manifest node references + # so we can include those in the changegroup too. + # + # It also remembers which changenode each filenode belongs to. It + # does this by assuming the a filenode belongs to the changenode + # the first manifest that references it belongs to. + def collect_msng_filenodes(mnfstnode): + r = mnfst.rev(mnfstnode) + if mnfst.deltaparent(r) in mnfst.parentrevs(r): + # If the previous rev is one of the parents, + # we only need to see a diff. + deltamf = mnfst.readdelta(mnfstnode) + # For each line in the delta + for f, fnode in deltamf.iteritems(): + # And if the file is in the list of files we care + # about. + if f in changedfiles: + # Get the changenode this manifest belongs to + clnode = msng_mnfst_set[mnfstnode] + # Create the set of filenodes for the file if + # there isn't one already. + ndset = msng_filenode_set.setdefault(f, {}) + # And set the filenode's changelog node to the + # manifest's if it hasn't been set already. + ndset.setdefault(fnode, clnode) + else: + # Otherwise we need a full manifest. + m = mnfst.read(mnfstnode) + # For every file in we care about. + for f in changedfiles: + fnode = m.get(f, None) + # If it's in the manifest + if fnode is not None: + # See comments above. + clnode = msng_mnfst_set[mnfstnode] + ndset = msng_filenode_set.setdefault(f, {}) + ndset.setdefault(fnode, clnode) + return collect_msng_filenodes + + # If we determine that a particular file or manifest node must be a + # node that the recipient of the changegroup will already have, we can + # also assume the recipient will have all the parents. This function + # prunes them from the set of missing nodes. + def prune(revlog, missingnodes): + hasset = set() + # If a 'missing' filenode thinks it belongs to a changenode we + # assume the recipient must have, then the recipient must have + # that filenode. + for n in missingnodes: + clrev = revlog.linkrev(revlog.rev(n)) + if clrev in commonrevs: + hasset.add(n) + for n in hasset: + missingnodes.pop(n, None) + for r in revlog.ancestors(*[revlog.rev(n) for n in hasset]): + missingnodes.pop(revlog.node(r), None) + + # Add the nodes that were explicitly requested. + def add_extra_nodes(name, nodes): + if not extranodes or name not in extranodes: + return + + for node, linknode in extranodes[name]: + if node not in nodes: + nodes[node] = linknode + + # Now that we have all theses utility functions to help out and + # logically divide up the task, generate the group. + def gengroup(): + # The set of changed files starts empty. + changedfiles = set() + collect = changegroup.collector(cl, msng_mnfst_set, changedfiles) + + # Create a changenode group generator that will call our functions + # back to lookup the owning changenode and collect information. + group = cl.group(msng_cl_lst, identity, collect) + for cnt, chnk in enumerate(group): + yield chnk + self.ui.progress(_('bundling changes'), cnt, unit=_('chunks')) + self.ui.progress(_('bundling changes'), None) + + prune(mnfst, msng_mnfst_set) + add_extra_nodes(1, msng_mnfst_set) + msng_mnfst_lst = msng_mnfst_set.keys() + # Sort the manifestnodes by revision number. + msng_mnfst_lst.sort(key=mnfst.rev) + # Create a generator for the manifestnodes that calls our lookup + # and data collection functions back. + group = mnfst.group(msng_mnfst_lst, + lambda mnode: msng_mnfst_set[mnode], + filenode_collector(changedfiles)) + for cnt, chnk in enumerate(group): + yield chnk + self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks')) + self.ui.progress(_('bundling manifests'), None) + + # These are no longer needed, dereference and toss the memory for + # them. + msng_mnfst_lst = None + msng_mnfst_set.clear() + + if extranodes: + for fname in extranodes: + if isinstance(fname, int): + continue + msng_filenode_set.setdefault(fname, {}) + changedfiles.add(fname) + # Go through all our files in order sorted by name. + cnt = 0 + for fname in sorted(changedfiles): + filerevlog = self.file(fname) + if not len(filerevlog): + raise util.Abort(_("empty or missing revlog for %s") % fname) + # Toss out the filenodes that the recipient isn't really + # missing. + missingfnodes = msng_filenode_set.pop(fname, {}) + prune(filerevlog, missingfnodes) + add_extra_nodes(fname, missingfnodes) + # If any filenodes are left, generate the group for them, + # otherwise don't bother. + if missingfnodes: + yield changegroup.chunkheader(len(fname)) + yield fname + # Sort the filenodes by their revision # (topological order) + nodeiter = list(missingfnodes) + nodeiter.sort(key=filerevlog.rev) + # Create a group generator and only pass in a changenode + # lookup function as we need to collect no information + # from filenodes. + group = filerevlog.group(nodeiter, + lambda fnode: missingfnodes[fnode]) + for chnk in group: + self.ui.progress( + _('bundling files'), cnt, item=fname, unit=_('chunks')) + cnt += 1 + yield chnk + # Signal that no more groups are left. + yield changegroup.closechunk() + self.ui.progress(_('bundling files'), None) + + if msng_cl_lst: + self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source) + + return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') + + def changegroup(self, basenodes, source): + # to avoid a race we use changegroupsubset() (issue1320) + return self.changegroupsubset(basenodes, self.heads(), source) + + def _changegroup(self, nodes, source): + """Compute the changegroup of all nodes that we have that a recipient + doesn't. Return a chunkbuffer object whose read() method will return + successive changegroup chunks. + + This is much easier than the previous function as we can assume that + the recipient has any changenode we aren't sending them. + + nodes is the set of nodes to send""" + + self.hook('preoutgoing', throw=True, source=source) + + cl = self.changelog + revset = set([cl.rev(n) for n in nodes]) + self.changegroupinfo(nodes, source) + + def identity(x): + return x + + def gennodelst(log): + for r in log: + if log.linkrev(r) in revset: + yield log.node(r) + + def lookuplinkrev_func(revlog): + def lookuplinkrev(n): + return cl.node(revlog.linkrev(revlog.rev(n))) + return lookuplinkrev + + def gengroup(): + '''yield a sequence of changegroup chunks (strings)''' + # construct a list of all changed files + changedfiles = set() + mmfs = {} + collect = changegroup.collector(cl, mmfs, changedfiles) + + for cnt, chnk in enumerate(cl.group(nodes, identity, collect)): + self.ui.progress(_('bundling changes'), cnt, unit=_('chunks')) + yield chnk + self.ui.progress(_('bundling changes'), None) + + mnfst = self.manifest + nodeiter = gennodelst(mnfst) + for cnt, chnk in enumerate(mnfst.group(nodeiter, + lookuplinkrev_func(mnfst))): + self.ui.progress(_('bundling manifests'), cnt, unit=_('chunks')) + yield chnk + self.ui.progress(_('bundling manifests'), None) + + cnt = 0 + for fname in sorted(changedfiles): + filerevlog = self.file(fname) + if not len(filerevlog): + raise util.Abort(_("empty or missing revlog for %s") % fname) + nodeiter = gennodelst(filerevlog) + nodeiter = list(nodeiter) + if nodeiter: + yield changegroup.chunkheader(len(fname)) + yield fname + lookup = lookuplinkrev_func(filerevlog) + for chnk in filerevlog.group(nodeiter, lookup): + self.ui.progress( + _('bundling files'), cnt, item=fname, unit=_('chunks')) + cnt += 1 + yield chnk + self.ui.progress(_('bundling files'), None) + + yield changegroup.closechunk() + + if nodes: + self.hook('outgoing', node=hex(nodes[0]), source=source) + + return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN') + + def addchangegroup(self, source, srctype, url, emptyok=False, lock=None): + """Add the changegroup returned by source.read() to this repo. + srctype is a string like 'push', 'pull', or 'unbundle'. url is + the URL of the repo where this changegroup is coming from. + + Return an integer summarizing the change to this repo: + - nothing changed or no source: 0 + - more heads than before: 1+added heads (2..n) + - fewer heads than before: -1-removed heads (-2..-n) + - number of heads stays the same: 1 + """ + def csmap(x): + self.ui.debug("add changeset %s\n" % short(x)) + return len(cl) + + def revmap(x): + return cl.rev(x) + + if not source: + return 0 + + self.hook('prechangegroup', throw=True, source=srctype, url=url) + + changesets = files = revisions = 0 + efiles = set() + + # write changelog data to temp files so concurrent readers will not see + # inconsistent view + cl = self.changelog + cl.delayupdate() + oldheads = len(cl.heads()) + + tr = self.transaction("\n".join([srctype, urlmod.hidepassword(url)])) + try: + trp = weakref.proxy(tr) + # pull off the changeset group + self.ui.status(_("adding changesets\n")) + clstart = len(cl) + class prog(object): + step = _('changesets') + count = 1 + ui = self.ui + total = None + def __call__(self): + self.ui.progress(self.step, self.count, unit=_('chunks'), + total=self.total) + self.count += 1 + pr = prog() + source.callback = pr + + if (cl.addgroup(source, csmap, trp) is None + and not emptyok): + raise util.Abort(_("received changelog group is empty")) + clend = len(cl) + changesets = clend - clstart + for c in xrange(clstart, clend): + efiles.update(self[c].files()) + efiles = len(efiles) + self.ui.progress(_('changesets'), None) + + # pull off the manifest group + self.ui.status(_("adding manifests\n")) + pr.step = _('manifests') + pr.count = 1 + pr.total = changesets # manifests <= changesets + # no need to check for empty manifest group here: + # if the result of the merge of 1 and 2 is the same in 3 and 4, + # no new manifest will be created and the manifest group will + # be empty during the pull + self.manifest.addgroup(source, revmap, trp) + self.ui.progress(_('manifests'), None) + + needfiles = {} + if self.ui.configbool('server', 'validate', default=False): + # validate incoming csets have their manifests + for cset in xrange(clstart, clend): + mfest = self.changelog.read(self.changelog.node(cset))[0] + mfest = self.manifest.readdelta(mfest) + # store file nodes we must see + for f, n in mfest.iteritems(): + needfiles.setdefault(f, set()).add(n) + + # process the files + self.ui.status(_("adding file changes\n")) + pr.step = 'files' + pr.count = 1 + pr.total = efiles + source.callback = None + + while 1: + f = source.chunk() + if not f: + break + self.ui.debug("adding %s revisions\n" % f) + pr() + fl = self.file(f) + o = len(fl) + if fl.addgroup(source, revmap, trp) is None: + raise util.Abort(_("received file revlog group is empty")) + revisions += len(fl) - o + files += 1 + if f in needfiles: + needs = needfiles[f] + for new in xrange(o, len(fl)): + n = fl.node(new) + if n in needs: + needs.remove(n) + if not needs: + del needfiles[f] + self.ui.progress(_('files'), None) + + for f, needs in needfiles.iteritems(): + fl = self.file(f) + for n in needs: + try: + fl.rev(n) + except error.LookupError: + raise util.Abort( + _('missing file data for %s:%s - run hg verify') % + (f, hex(n))) + + newheads = len(cl.heads()) + heads = "" + if oldheads and newheads != oldheads: + heads = _(" (%+d heads)") % (newheads - oldheads) + + self.ui.status(_("added %d changesets" + " with %d changes to %d files%s\n") + % (changesets, revisions, files, heads)) + + if changesets > 0: + p = lambda: cl.writepending() and self.root or "" + self.hook('pretxnchangegroup', throw=True, + node=hex(cl.node(clstart)), source=srctype, + url=url, pending=p) + + # make changelog see real files again + cl.finalize(trp) + + tr.close() + finally: + tr.release() + if lock: + lock.release() + + if changesets > 0: + # forcefully update the on-disk branch cache + self.ui.debug("updating the branch cache\n") + self.updatebranchcache() + self.hook("changegroup", node=hex(cl.node(clstart)), + source=srctype, url=url) + + for i in xrange(clstart, clend): + self.hook("incoming", node=hex(cl.node(i)), + source=srctype, url=url) + + # never return 0 here: + if newheads < oldheads: + return newheads - oldheads - 1 + else: + return newheads - oldheads + 1 + + + def stream_in(self, remote, requirements): + fp = remote.stream_out() + l = fp.readline() + try: + resp = int(l) + except ValueError: + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + if resp == 1: + raise util.Abort(_('operation forbidden by server')) + elif resp == 2: + raise util.Abort(_('locking the remote repository failed')) + elif resp != 0: + raise util.Abort(_('the server sent an unknown error code')) + self.ui.status(_('streaming all changes\n')) + l = fp.readline() + try: + total_files, total_bytes = map(int, l.split(' ', 1)) + except (ValueError, TypeError): + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + self.ui.status(_('%d files to transfer, %s of data\n') % + (total_files, util.bytecount(total_bytes))) + start = time.time() + for i in xrange(total_files): + # XXX doesn't support '\n' or '\r' in filenames + l = fp.readline() + try: + name, size = l.split('\0', 1) + size = int(size) + except (ValueError, TypeError): + raise error.ResponseError( + _('Unexpected response from remote server:'), l) + self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size))) + # for backwards compat, name was partially encoded + ofp = self.sopener(store.decodedir(name), 'w') + for chunk in util.filechunkiter(fp, limit=size): + ofp.write(chunk) + ofp.close() + elapsed = time.time() - start + if elapsed <= 0: + elapsed = 0.001 + self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') % + (util.bytecount(total_bytes), elapsed, + util.bytecount(total_bytes / elapsed))) + + # new requirements = old non-format requirements + new format-related + # requirements from the streamed-in repository + requirements.update(set(self.requirements) - self.supportedformats) + self._applyrequirements(requirements) + self._writerequirements() + + self.invalidate() + return len(self.heads()) + 1 + + def clone(self, remote, heads=[], stream=False): + '''clone remote repository. + + keyword arguments: + heads: list of revs to clone (forces use of pull) + stream: use streaming clone if possible''' + + # now, all clients that can request uncompressed clones can + # read repo formats supported by all servers that can serve + # them. + + # if revlog format changes, client will have to check version + # and format flags on "stream" capability, and use + # uncompressed only if compatible. + + if stream and not heads: + # 'stream' means remote revlog format is revlogv1 only + if remote.capable('stream'): + return self.stream_in(remote, set(('revlogv1',))) + # otherwise, 'streamreqs' contains the remote revlog format + streamreqs = remote.capable('streamreqs') + if streamreqs: + streamreqs = set(streamreqs.split(',')) + # if we support it, stream in and adjust our requirements + if not streamreqs - self.supportedformats: + return self.stream_in(remote, streamreqs) + return self.pull(remote, heads) + + def pushkey(self, namespace, key, old, new): + return pushkey.push(self, namespace, key, old, new) + + def listkeys(self, namespace): + return pushkey.list(self, namespace) + +# used to avoid circular references so destructors work +def aftertrans(files): + renamefiles = [tuple(t) for t in files] + def a(): + for src, dest in renamefiles: + util.rename(src, dest) + return a + +def instance(ui, path, create): + return localrepository(ui, util.drop_scheme('file', path), create) + +def islocal(path): + return True diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.pyo Binary files differnew file mode 100644 index 0000000..c6b363e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/localrepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.py new file mode 100644 index 0000000..e1dffcf --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.py @@ -0,0 +1,137 @@ +# lock.py - simple advisory locking scheme for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import util, error +import errno, os, socket, time +import warnings + +class lock(object): + '''An advisory lock held by one process to control access to a set + of files. Non-cooperating processes or incorrectly written scripts + can ignore Mercurial's locking scheme and stomp all over the + repository, so don't do that. + + Typically used via localrepository.lock() to lock the repository + store (.hg/store/) or localrepository.wlock() to lock everything + else under .hg/.''' + + # lock is symlink on platforms that support it, file on others. + + # symlink is used because create of directory entry and contents + # are atomic even over nfs. + + # old-style lock: symlink to pid + # new-style lock: symlink to hostname:pid + + _host = None + + def __init__(self, file, timeout=-1, releasefn=None, desc=None): + self.f = file + self.held = 0 + self.timeout = timeout + self.releasefn = releasefn + self.desc = desc + self.lock() + + def __del__(self): + if self.held: + warnings.warn("use lock.release instead of del lock", + category=DeprecationWarning, + stacklevel=2) + + # ensure the lock will be removed + # even if recursive locking did occur + self.held = 1 + + self.release() + + def lock(self): + timeout = self.timeout + while 1: + try: + self.trylock() + return 1 + except error.LockHeld, inst: + if timeout != 0: + time.sleep(1) + if timeout > 0: + timeout -= 1 + continue + raise error.LockHeld(errno.ETIMEDOUT, inst.filename, self.desc, + inst.locker) + + def trylock(self): + if self.held: + self.held += 1 + return + if lock._host is None: + lock._host = socket.gethostname() + lockname = '%s:%s' % (lock._host, os.getpid()) + while not self.held: + try: + util.makelock(lockname, self.f) + self.held = 1 + except (OSError, IOError), why: + if why.errno == errno.EEXIST: + locker = self.testlock() + if locker is not None: + raise error.LockHeld(errno.EAGAIN, self.f, self.desc, + locker) + else: + raise error.LockUnavailable(why.errno, why.strerror, + why.filename, self.desc) + + def testlock(self): + """return id of locker if lock is valid, else None. + + If old-style lock, we cannot tell what machine locker is on. + with new-style lock, if locker is on this machine, we can + see if locker is alive. If locker is on this machine but + not alive, we can safely break lock. + + The lock file is only deleted when None is returned. + + """ + locker = util.readlock(self.f) + try: + host, pid = locker.split(":", 1) + except ValueError: + return locker + if host != lock._host: + return locker + try: + pid = int(pid) + except ValueError: + return locker + if util.testpid(pid): + return locker + # if locker dead, break lock. must do this with another lock + # held, or can race and break valid lock. + try: + l = lock(self.f + '.break', timeout=0) + os.unlink(self.f) + l.release() + except error.LockError: + return locker + + def release(self): + if self.held > 1: + self.held -= 1 + elif self.held == 1: + self.held = 0 + if self.releasefn: + self.releasefn() + try: + os.unlink(self.f) + except OSError: + pass + +def release(*locks): + for lock in locks: + if lock is not None: + lock.release() + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.pyo Binary files differnew file mode 100644 index 0000000..e52d1fc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lock.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.py new file mode 100644 index 0000000..e9b185b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.py @@ -0,0 +1,111 @@ +import sys +from _lsprof import Profiler, profiler_entry + +__all__ = ['profile', 'Stats'] + +def profile(f, *args, **kwds): + """XXX docstring""" + p = Profiler() + p.enable(subcalls=True, builtins=True) + try: + f(*args, **kwds) + finally: + p.disable() + return Stats(p.getstats()) + + +class Stats(object): + """XXX docstring""" + + def __init__(self, data): + self.data = data + + def sort(self, crit="inlinetime"): + """XXX docstring""" + if crit not in profiler_entry.__dict__: + raise ValueError("Can't sort by %s" % crit) + self.data.sort(key=lambda x: getattr(x, crit), reverse=True) + for e in self.data: + if e.calls: + e.calls.sort(key=lambda x: getattr(x, crit), reverse=True) + + def pprint(self, top=None, file=None, limit=None, climit=None): + """XXX docstring""" + if file is None: + file = sys.stdout + d = self.data + if top is not None: + d = d[:top] + cols = "% 12s %12s %11.4f %11.4f %s\n" + hcols = "% 12s %12s %12s %12s %s\n" + file.write(hcols % ("CallCount", "Recursive", "Total(ms)", + "Inline(ms)", "module:lineno(function)")) + count = 0 + for e in d: + file.write(cols % (e.callcount, e.reccallcount, e.totaltime, + e.inlinetime, label(e.code))) + count += 1 + if limit is not None and count == limit: + return + ccount = 0 + if e.calls: + for se in e.calls: + file.write(cols % ("+%s" % se.callcount, se.reccallcount, + se.totaltime, se.inlinetime, + "+%s" % label(se.code))) + count += 1 + ccount += 1 + if limit is not None and count == limit: + return + if climit is not None and ccount == climit: + break + + def freeze(self): + """Replace all references to code objects with string + descriptions; this makes it possible to pickle the instance.""" + + # this code is probably rather ickier than it needs to be! + for i in range(len(self.data)): + e = self.data[i] + if not isinstance(e.code, str): + self.data[i] = type(e)((label(e.code),) + e[1:]) + if e.calls: + for j in range(len(e.calls)): + se = e.calls[j] + if not isinstance(se.code, str): + e.calls[j] = type(se)((label(se.code),) + se[1:]) + +_fn2mod = {} + +def label(code): + if isinstance(code, str): + return code + try: + mname = _fn2mod[code.co_filename] + except KeyError: + for k, v in list(sys.modules.iteritems()): + if v is None: + continue + if not hasattr(v, '__file__'): + continue + if not isinstance(v.__file__, str): + continue + if v.__file__.startswith(code.co_filename): + mname = _fn2mod[code.co_filename] = k + break + else: + mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename + + return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name) + + +if __name__ == '__main__': + import os + sys.argv = sys.argv[1:] + if not sys.argv: + print >> sys.stderr, "usage: lsprof.py <script> <arguments...>" + sys.exit(2) + sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0]))) + stats = profile(execfile, sys.argv[0], globals(), locals()) + stats.sort() + stats.pprint() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.pyo Binary files differnew file mode 100644 index 0000000..8461f91 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprof.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.py new file mode 100644 index 0000000..358b951 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.py @@ -0,0 +1,86 @@ +""" +lsprofcalltree.py - lsprof output which is readable by kcachegrind + +Authors: + * David Allouche <david <at> allouche.net> + * Jp Calderone & Itamar Shtull-Trauring + * Johan Dahlin + +This software may be used and distributed according to the terms +of the GNU General Public License, incorporated herein by reference. +""" + +def label(code): + if isinstance(code, str): + return '~' + code # built-in functions ('~' sorts at the end) + else: + return '%s %s:%d' % (code.co_name, + code.co_filename, + code.co_firstlineno) + +class KCacheGrind(object): + def __init__(self, profiler): + self.data = profiler.getstats() + self.out_file = None + + def output(self, out_file): + self.out_file = out_file + print >> out_file, 'events: Ticks' + self._print_summary() + for entry in self.data: + self._entry(entry) + + def _print_summary(self): + max_cost = 0 + for entry in self.data: + totaltime = int(entry.totaltime * 1000) + max_cost = max(max_cost, totaltime) + print >> self.out_file, 'summary: %d' % (max_cost,) + + def _entry(self, entry): + out_file = self.out_file + + code = entry.code + #print >> out_file, 'ob=%s' % (code.co_filename,) + if isinstance(code, str): + print >> out_file, 'fi=~' + else: + print >> out_file, 'fi=%s' % (code.co_filename,) + print >> out_file, 'fn=%s' % (label(code),) + + inlinetime = int(entry.inlinetime * 1000) + if isinstance(code, str): + print >> out_file, '0 ', inlinetime + else: + print >> out_file, '%d %d' % (code.co_firstlineno, inlinetime) + + # recursive calls are counted in entry.calls + if entry.calls: + calls = entry.calls + else: + calls = [] + + if isinstance(code, str): + lineno = 0 + else: + lineno = code.co_firstlineno + + for subentry in calls: + self._subentry(lineno, subentry) + print >> out_file + + def _subentry(self, lineno, subentry): + out_file = self.out_file + code = subentry.code + #print >> out_file, 'cob=%s' % (code.co_filename,) + print >> out_file, 'cfn=%s' % (label(code),) + if isinstance(code, str): + print >> out_file, 'cfi=~' + print >> out_file, 'calls=%d 0' % (subentry.callcount,) + else: + print >> out_file, 'cfi=%s' % (code.co_filename,) + print >> out_file, 'calls=%d %d' % ( + subentry.callcount, code.co_firstlineno) + + totaltime = int(subentry.totaltime * 1000) + print >> out_file, '%d %d' % (lineno, totaltime) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.pyo Binary files differnew file mode 100644 index 0000000..928ca97 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/lsprofcalltree.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.py new file mode 100644 index 0000000..3aa1a55 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.py @@ -0,0 +1,226 @@ +# mail.py - mail sending bits for mercurial +# +# Copyright 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util, encoding +import os, smtplib, socket, quopri +import email.Header, email.MIMEText, email.Utils + +_oldheaderinit = email.Header.Header.__init__ +def _unifiedheaderinit(self, *args, **kw): + """ + Python2.7 introduces a backwards incompatible change + (Python issue1974, r70772) in email.Generator.Generator code: + pre-2.7 code passed "continuation_ws='\t'" to the Header + constructor, and 2.7 removed this parameter. + + Default argument is continuation_ws=' ', which means that the + behaviour is different in <2.7 and 2.7 + + We consider the 2.7 behaviour to be preferable, but need + to have an unified behaviour for versions 2.4 to 2.7 + """ + # override continuation_ws + kw['continuation_ws'] = ' ' + _oldheaderinit(self, *args, **kw) + +email.Header.Header.__dict__['__init__'] = _unifiedheaderinit + +def _smtp(ui): + '''build an smtp connection and return a function to send mail''' + local_hostname = ui.config('smtp', 'local_hostname') + s = smtplib.SMTP(local_hostname=local_hostname) + mailhost = ui.config('smtp', 'host') + if not mailhost: + raise util.Abort(_('smtp.host not configured - cannot send mail')) + mailport = util.getport(ui.config('smtp', 'port', 25)) + ui.note(_('sending mail: smtp host %s, port %s\n') % + (mailhost, mailport)) + s.connect(host=mailhost, port=mailport) + if ui.configbool('smtp', 'tls'): + if not hasattr(socket, 'ssl'): + raise util.Abort(_("can't use TLS: Python SSL support " + "not installed")) + ui.note(_('(using tls)\n')) + s.ehlo() + s.starttls() + s.ehlo() + username = ui.config('smtp', 'username') + password = ui.config('smtp', 'password') + if username and not password: + password = ui.getpass() + if username and password: + ui.note(_('(authenticating to mail server as %s)\n') % + (username)) + try: + s.login(username, password) + except smtplib.SMTPException, inst: + raise util.Abort(inst) + + def send(sender, recipients, msg): + try: + return s.sendmail(sender, recipients, msg) + except smtplib.SMTPRecipientsRefused, inst: + recipients = [r[1] for r in inst.recipients.values()] + raise util.Abort('\n' + '\n'.join(recipients)) + except smtplib.SMTPException, inst: + raise util.Abort(inst) + + return send + +def _sendmail(ui, sender, recipients, msg): + '''send mail using sendmail.''' + program = ui.config('email', 'method') + cmdline = '%s -f %s %s' % (program, util.email(sender), + ' '.join(map(util.email, recipients))) + ui.note(_('sending mail: %s\n') % cmdline) + fp = util.popen(cmdline, 'w') + fp.write(msg) + ret = fp.close() + if ret: + raise util.Abort('%s %s' % ( + os.path.basename(program.split(None, 1)[0]), + util.explain_exit(ret)[0])) + +def connect(ui): + '''make a mail connection. return a function to send mail. + call as sendmail(sender, list-of-recipients, msg).''' + if ui.config('email', 'method', 'smtp') == 'smtp': + return _smtp(ui) + return lambda s, r, m: _sendmail(ui, s, r, m) + +def sendmail(ui, sender, recipients, msg): + send = connect(ui) + return send(sender, recipients, msg) + +def validateconfig(ui): + '''determine if we have enough config data to try sending email.''' + method = ui.config('email', 'method', 'smtp') + if method == 'smtp': + if not ui.config('smtp', 'host'): + raise util.Abort(_('smtp specified as email transport, ' + 'but no smtp host configured')) + else: + if not util.find_exe(method): + raise util.Abort(_('%r specified as email transport, ' + 'but not in PATH') % method) + +def mimetextpatch(s, subtype='plain', display=False): + '''If patch in utf-8 transfer-encode it.''' + + enc = None + for line in s.splitlines(): + if len(line) > 950: + s = quopri.encodestring(s) + enc = "quoted-printable" + break + + cs = 'us-ascii' + if not display: + try: + s.decode('us-ascii') + except UnicodeDecodeError: + try: + s.decode('utf-8') + cs = 'utf-8' + except UnicodeDecodeError: + # We'll go with us-ascii as a fallback. + pass + + msg = email.MIMEText.MIMEText(s, subtype, cs) + if enc: + del msg['Content-Transfer-Encoding'] + msg['Content-Transfer-Encoding'] = enc + return msg + +def _charsets(ui): + '''Obtains charsets to send mail parts not containing patches.''' + charsets = [cs.lower() for cs in ui.configlist('email', 'charsets')] + fallbacks = [encoding.fallbackencoding.lower(), + encoding.encoding.lower(), 'utf-8'] + for cs in fallbacks: # find unique charsets while keeping order + if cs not in charsets: + charsets.append(cs) + return [cs for cs in charsets if not cs.endswith('ascii')] + +def _encode(ui, s, charsets): + '''Returns (converted) string, charset tuple. + Finds out best charset by cycling through sendcharsets in descending + order. Tries both encoding and fallbackencoding for input. Only as + last resort send as is in fake ascii. + Caveat: Do not use for mail parts containing patches!''' + try: + s.decode('ascii') + except UnicodeDecodeError: + sendcharsets = charsets or _charsets(ui) + for ics in (encoding.encoding, encoding.fallbackencoding): + try: + u = s.decode(ics) + except UnicodeDecodeError: + continue + for ocs in sendcharsets: + try: + return u.encode(ocs), ocs + except UnicodeEncodeError: + pass + except LookupError: + ui.warn(_('ignoring invalid sendcharset: %s\n') % ocs) + # if ascii, or all conversion attempts fail, send (broken) ascii + return s, 'us-ascii' + +def headencode(ui, s, charsets=None, display=False): + '''Returns RFC-2047 compliant header from given string.''' + if not display: + # split into words? + s, cs = _encode(ui, s, charsets) + return str(email.Header.Header(s, cs)) + return s + +def _addressencode(ui, name, addr, charsets=None): + name = headencode(ui, name, charsets) + try: + acc, dom = addr.split('@') + acc = acc.encode('ascii') + dom = dom.decode(encoding.encoding).encode('idna') + addr = '%s@%s' % (acc, dom) + except UnicodeDecodeError: + raise util.Abort(_('invalid email address: %s') % addr) + except ValueError: + try: + # too strict? + addr = addr.encode('ascii') + except UnicodeDecodeError: + raise util.Abort(_('invalid local address: %s') % addr) + return email.Utils.formataddr((name, addr)) + +def addressencode(ui, address, charsets=None, display=False): + '''Turns address into RFC-2047 compliant header.''' + if display or not address: + return address or '' + name, addr = email.Utils.parseaddr(address) + return _addressencode(ui, name, addr, charsets) + +def addrlistencode(ui, addrs, charsets=None, display=False): + '''Turns a list of addresses into a list of RFC-2047 compliant headers. + A single element of input list may contain multiple addresses, but output + always has one address per item''' + if display: + return [a.strip() for a in addrs if a.strip()] + + result = [] + for name, addr in email.Utils.getaddresses(addrs): + if name or addr: + result.append(_addressencode(ui, name, addr, charsets)) + return result + +def mimeencode(ui, s, charsets=None, display=False): + '''creates mime text object, encodes it if needed, and sets + charset and transfer-encoding accordingly.''' + cs = 'us-ascii' + if not display: + s, cs = _encode(ui, s, charsets) + return email.MIMEText.MIMEText(s, 'plain', cs) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.pyo Binary files differnew file mode 100644 index 0000000..8111e74 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mail.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.py new file mode 100644 index 0000000..0d008e8 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.py @@ -0,0 +1,196 @@ +# manifest.py - manifest revision class for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import mdiff, parsers, error, revlog +import array, struct + +class manifestdict(dict): + def __init__(self, mapping=None, flags=None): + if mapping is None: + mapping = {} + if flags is None: + flags = {} + dict.__init__(self, mapping) + self._flags = flags + def flags(self, f): + return self._flags.get(f, "") + def set(self, f, flags): + self._flags[f] = flags + def copy(self): + return manifestdict(self, dict.copy(self._flags)) + +class manifest(revlog.revlog): + def __init__(self, opener): + self._mancache = None + revlog.revlog.__init__(self, opener, "00manifest.i") + + def parse(self, lines): + mfdict = manifestdict() + parsers.parse_manifest(mfdict, mfdict._flags, lines) + return mfdict + + def readdelta(self, node): + r = self.rev(node) + return self.parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r))) + + def read(self, node): + if node == revlog.nullid: + return manifestdict() # don't upset local cache + if self._mancache and self._mancache[0] == node: + return self._mancache[1] + text = self.revision(node) + arraytext = array.array('c', text) + mapping = self.parse(text) + self._mancache = (node, mapping, arraytext) + return mapping + + def _search(self, m, s, lo=0, hi=None): + '''return a tuple (start, end) that says where to find s within m. + + If the string is found m[start:end] are the line containing + that string. If start == end the string was not found and + they indicate the proper sorted insertion point. This was + taken from bisect_left, and modified to find line start/end as + it goes along. + + m should be a buffer or a string + s is a string''' + def advance(i, c): + while i < lenm and m[i] != c: + i += 1 + return i + if not s: + return (lo, lo) + lenm = len(m) + if not hi: + hi = lenm + while lo < hi: + mid = (lo + hi) // 2 + start = mid + while start > 0 and m[start - 1] != '\n': + start -= 1 + end = advance(start, '\0') + if m[start:end] < s: + # we know that after the null there are 40 bytes of sha1 + # this translates to the bisect lo = mid + 1 + lo = advance(end + 40, '\n') + 1 + else: + # this translates to the bisect hi = mid + hi = start + end = advance(lo, '\0') + found = m[lo:end] + if s == found: + # we know that after the null there are 40 bytes of sha1 + end = advance(end + 40, '\n') + return (lo, end + 1) + else: + return (lo, lo) + + def find(self, node, f): + '''look up entry for a single file efficiently. + return (node, flags) pair if found, (None, None) if not.''' + if self._mancache and self._mancache[0] == node: + return self._mancache[1].get(f), self._mancache[1].flags(f) + text = self.revision(node) + start, end = self._search(text, f) + if start == end: + return None, None + l = text[start:end] + f, n = l.split('\0') + return revlog.bin(n[:40]), n[40:-1] + + def add(self, map, transaction, link, p1=None, p2=None, + changed=None): + # apply the changes collected during the bisect loop to our addlist + # return a delta suitable for addrevision + def addlistdelta(addlist, x): + # start from the bottom up + # so changes to the offsets don't mess things up. + for start, end, content in reversed(x): + if content: + addlist[start:end] = array.array('c', content) + else: + del addlist[start:end] + return "".join(struct.pack(">lll", start, end, len(content)) + content + for start, end, content in x) + + def checkforbidden(l): + for f in l: + if '\n' in f or '\r' in f: + raise error.RevlogError( + _("'\\n' and '\\r' disallowed in filenames: %r") % f) + + # if we're using the cache, make sure it is valid and + # parented by the same node we're diffing against + if not (changed and self._mancache and p1 and self._mancache[0] == p1): + files = sorted(map) + checkforbidden(files) + + # if this is changed to support newlines in filenames, + # be sure to check the templates/ dir again (especially *-raw.tmpl) + hex, flags = revlog.hex, map.flags + text = ''.join("%s\000%s%s\n" % (f, hex(map[f]), flags(f)) + for f in files) + arraytext = array.array('c', text) + cachedelta = None + else: + added, removed = changed + addlist = self._mancache[2] + + checkforbidden(added) + # combine the changed lists into one list for sorting + work = [(x, False) for x in added] + work.extend((x, True) for x in removed) + # this could use heapq.merge() (from python2.6+) or equivalent + # since the lists are already sorted + work.sort() + + delta = [] + dstart = None + dend = None + dline = [""] + start = 0 + # zero copy representation of addlist as a buffer + addbuf = buffer(addlist) + + # start with a readonly loop that finds the offset of + # each line and creates the deltas + for f, todelete in work: + # bs will either be the index of the item or the insert point + start, end = self._search(addbuf, f, start) + if not todelete: + l = "%s\000%s%s\n" % (f, revlog.hex(map[f]), map.flags(f)) + else: + if start == end: + # item we want to delete was not found, error out + raise AssertionError( + _("failed to remove %s from manifest") % f) + l = "" + if dstart != None and dstart <= start and dend >= start: + if dend < end: + dend = end + if l: + dline.append(l) + else: + if dstart != None: + delta.append([dstart, dend, "".join(dline)]) + dstart = start + dend = end + dline = [l] + + if dstart != None: + delta.append([dstart, dend, "".join(dline)]) + # apply the delta to the addlist, and get a delta for addrevision + cachedelta = (self.rev(p1), addlistdelta(addlist, delta)) + arraytext = addlist + text = buffer(arraytext) + + n = self.addrevision(text, transaction, link, p1, p2, cachedelta) + self._mancache = (n, map, arraytext) + + return n diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.pyo Binary files differnew file mode 100644 index 0000000..bcabb44 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/manifest.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.py new file mode 100644 index 0000000..33c42b4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.py @@ -0,0 +1,296 @@ +# match.py - filename matching +# +# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import re +import util +from i18n import _ + +class match(object): + def __init__(self, root, cwd, patterns, include=[], exclude=[], + default='glob', exact=False, auditor=None): + """build an object to match a set of file patterns + + arguments: + root - the canonical root of the tree you're matching against + cwd - the current working directory, if relevant + patterns - patterns to find + include - patterns to include + exclude - patterns to exclude + default - if a pattern in names has no explicit type, assume this one + exact - patterns are actually literals + + a pattern is one of: + 'glob:<glob>' - a glob relative to cwd + 're:<regexp>' - a regular expression + 'path:<path>' - a path relative to canonroot + 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs) + 'relpath:<path>' - a path relative to cwd + 'relre:<regexp>' - a regexp that needn't match the start of a name + '<something>' - a pattern of the specified default type + """ + + self._root = root + self._cwd = cwd + self._files = [] + self._anypats = bool(include or exclude) + + if include: + im = _buildmatch(_normalize(include, 'glob', root, cwd, auditor), + '(?:/|$)') + if exclude: + em = _buildmatch(_normalize(exclude, 'glob', root, cwd, auditor), + '(?:/|$)') + if exact: + self._files = patterns + pm = self.exact + elif patterns: + pats = _normalize(patterns, default, root, cwd, auditor) + self._files = _roots(pats) + self._anypats = self._anypats or _anypats(pats) + pm = _buildmatch(pats, '$') + + if patterns or exact: + if include: + if exclude: + m = lambda f: im(f) and not em(f) and pm(f) + else: + m = lambda f: im(f) and pm(f) + else: + if exclude: + m = lambda f: not em(f) and pm(f) + else: + m = pm + else: + if include: + if exclude: + m = lambda f: im(f) and not em(f) + else: + m = im + else: + if exclude: + m = lambda f: not em(f) + else: + m = lambda f: True + + self.matchfn = m + self._fmap = set(self._files) + + def __call__(self, fn): + return self.matchfn(fn) + def __iter__(self): + for f in self._files: + yield f + def bad(self, f, msg): + '''callback for each explicit file that can't be + found/accessed, with an error message + ''' + pass + def dir(self, f): + pass + def missing(self, f): + pass + def exact(self, f): + return f in self._fmap + def rel(self, f): + return util.pathto(self._root, self._cwd, f) + def files(self): + return self._files + def anypats(self): + return self._anypats + +class exact(match): + def __init__(self, root, cwd, files): + match.__init__(self, root, cwd, files, exact = True) + +class always(match): + def __init__(self, root, cwd): + match.__init__(self, root, cwd, []) + +class narrowmatcher(match): + """Adapt a matcher to work on a subdirectory only. + + The paths are remapped to remove/insert the path as needed: + + >>> m1 = match('root', '', ['a.txt', 'sub/b.txt']) + >>> m2 = narrowmatcher('sub', m1) + >>> bool(m2('a.txt')) + False + >>> bool(m2('b.txt')) + True + >>> bool(m2.matchfn('a.txt')) + False + >>> bool(m2.matchfn('b.txt')) + True + >>> m2.files() + ['b.txt'] + >>> m2.exact('b.txt') + True + >>> m2.rel('b.txt') + 'b.txt' + >>> def bad(f, msg): + ... print "%s: %s" % (f, msg) + >>> m1.bad = bad + >>> m2.bad('x.txt', 'No such file') + sub/x.txt: No such file + """ + + def __init__(self, path, matcher): + self._root = matcher._root + self._cwd = matcher._cwd + self._path = path + self._matcher = matcher + + self._files = [f[len(path) + 1:] for f in matcher._files + if f.startswith(path + "/")] + self._anypats = matcher._anypats + self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn) + self._fmap = set(self._files) + + def bad(self, f, msg): + self._matcher.bad(self._path + "/" + f, msg) + +def patkind(pat): + return _patsplit(pat, None)[0] + +def _patsplit(pat, default): + """Split a string into an optional pattern kind prefix and the + actual pattern.""" + if ':' in pat: + kind, val = pat.split(':', 1) + if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre'): + return kind, val + return default, pat + +def _globre(pat): + "convert a glob pattern into a regexp" + i, n = 0, len(pat) + res = '' + group = 0 + escape = re.escape + def peek(): + return i < n and pat[i] + while i < n: + c = pat[i] + i += 1 + if c not in '*?[{},\\': + res += escape(c) + elif c == '*': + if peek() == '*': + i += 1 + res += '.*' + else: + res += '[^/]*' + elif c == '?': + res += '.' + elif c == '[': + j = i + if j < n and pat[j] in '!]': + j += 1 + while j < n and pat[j] != ']': + j += 1 + if j >= n: + res += '\\[' + else: + stuff = pat[i:j].replace('\\','\\\\') + i = j + 1 + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] == '^': + stuff = '\\' + stuff + res = '%s[%s]' % (res, stuff) + elif c == '{': + group += 1 + res += '(?:' + elif c == '}' and group: + res += ')' + group -= 1 + elif c == ',' and group: + res += '|' + elif c == '\\': + p = peek() + if p: + i += 1 + res += escape(p) + else: + res += escape(c) + else: + res += escape(c) + return res + +def _regex(kind, name, tail): + '''convert a pattern into a regular expression''' + if not name: + return '' + if kind == 're': + return name + elif kind == 'path': + return '^' + re.escape(name) + '(?:/|$)' + elif kind == 'relglob': + return '(?:|.*/)' + _globre(name) + tail + elif kind == 'relpath': + return re.escape(name) + '(?:/|$)' + elif kind == 'relre': + if name.startswith('^'): + return name + return '.*' + name + return _globre(name) + tail + +def _buildmatch(pats, tail): + """build a matching function from a set of patterns""" + try: + pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats]) + if len(pat) > 20000: + raise OverflowError() + return re.compile(pat).match + except OverflowError: + # We're using a Python with a tiny regex engine and we + # made it explode, so we'll divide the pattern list in two + # until it works + l = len(pats) + if l < 2: + raise + a, b = _buildmatch(pats[:l//2], tail), _buildmatch(pats[l//2:], tail) + return lambda s: a(s) or b(s) + except re.error: + for k, p in pats: + try: + re.compile('(?:%s)' % _regex(k, p, tail)) + except re.error: + raise util.Abort(_("invalid pattern (%s): %s") % (k, p)) + raise util.Abort(_("invalid pattern")) + +def _normalize(names, default, root, cwd, auditor): + pats = [] + for kind, name in [_patsplit(p, default) for p in names]: + if kind in ('glob', 'relpath'): + name = util.canonpath(root, cwd, name, auditor) + elif kind in ('relglob', 'path'): + name = util.normpath(name) + + pats.append((kind, name)) + return pats + +def _roots(patterns): + r = [] + for kind, name in patterns: + if kind == 'glob': # find the non-glob prefix + root = [] + for p in name.split('/'): + if '[' in p or '{' in p or '*' in p or '?' in p: + break + root.append(p) + r.append('/'.join(root) or '.') + elif kind in ('relpath', 'path'): + r.append(name or '.') + elif kind == 'relglob': + r.append('.') + return r + +def _anypats(patterns): + for kind, name in patterns: + if kind in ('glob', 're', 'relglob', 'relre'): + return True diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.pyo Binary files differnew file mode 100644 index 0000000..c88ac3f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/match.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.py new file mode 100644 index 0000000..4d1e760 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.py @@ -0,0 +1,277 @@ +# mdiff.py - diff and patch routines for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import bdiff, mpatch, util +import re, struct + +def splitnewlines(text): + '''like str.splitlines, but only split on newlines.''' + lines = [l + '\n' for l in text.split('\n')] + if lines: + if lines[-1] == '\n': + lines.pop() + else: + lines[-1] = lines[-1][:-1] + return lines + +class diffopts(object): + '''context is the number of context lines + text treats all files as text + showfunc enables diff -p output + git enables the git extended patch format + nodates removes dates from diff headers + ignorews ignores all whitespace changes in the diff + ignorewsamount ignores changes in the amount of whitespace + ignoreblanklines ignores changes whose lines are all blank + upgrade generates git diffs to avoid data loss + ''' + + defaults = { + 'context': 3, + 'text': False, + 'showfunc': False, + 'git': False, + 'nodates': False, + 'ignorews': False, + 'ignorewsamount': False, + 'ignoreblanklines': False, + 'upgrade': False, + } + + __slots__ = defaults.keys() + + def __init__(self, **opts): + for k in self.__slots__: + v = opts.get(k) + if v is None: + v = self.defaults[k] + setattr(self, k, v) + + try: + self.context = int(self.context) + except ValueError: + raise util.Abort(_('diff context lines count must be ' + 'an integer, not %r') % self.context) + + def copy(self, **kwargs): + opts = dict((k, getattr(self, k)) for k in self.defaults) + opts.update(kwargs) + return diffopts(**opts) + +defaultopts = diffopts() + +def wsclean(opts, text, blank=True): + if opts.ignorews: + text = re.sub('[ \t\r]+', '', text) + elif opts.ignorewsamount: + text = re.sub('[ \t\r]+', ' ', text) + text = text.replace(' \n', '\n') + if blank and opts.ignoreblanklines: + text = re.sub('\n+', '', text) + return text + +def diffline(revs, a, b, opts): + parts = ['diff'] + if opts.git: + parts.append('--git') + if revs and not opts.git: + parts.append(' '.join(["-r %s" % rev for rev in revs])) + if opts.git: + parts.append('a/%s' % a) + parts.append('b/%s' % b) + else: + parts.append(a) + return ' '.join(parts) + '\n' + +def unidiff(a, ad, b, bd, fn1, fn2, r=None, opts=defaultopts): + def datetag(date, addtab=True): + if not opts.git and not opts.nodates: + return '\t%s\n' % date + if addtab and ' ' in fn1: + return '\t\n' + return '\n' + + if not a and not b: + return "" + epoch = util.datestr((0, 0)) + + if not opts.text and (util.binary(a) or util.binary(b)): + if a and b and len(a) == len(b) and a == b: + return "" + l = ['Binary file %s has changed\n' % fn1] + elif not a: + b = splitnewlines(b) + if a is None: + l1 = '--- /dev/null%s' % datetag(epoch, False) + else: + l1 = "--- %s%s" % ("a/" + fn1, datetag(ad)) + l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd)) + l3 = "@@ -0,0 +1,%d @@\n" % len(b) + l = [l1, l2, l3] + ["+" + e for e in b] + elif not b: + a = splitnewlines(a) + l1 = "--- %s%s" % ("a/" + fn1, datetag(ad)) + if b is None: + l2 = '+++ /dev/null%s' % datetag(epoch, False) + else: + l2 = "+++ %s%s" % ("b/" + fn2, datetag(bd)) + l3 = "@@ -1,%d +0,0 @@\n" % len(a) + l = [l1, l2, l3] + ["-" + e for e in a] + else: + al = splitnewlines(a) + bl = splitnewlines(b) + l = list(_unidiff(a, b, al, bl, opts=opts)) + if not l: + return "" + + l.insert(0, "--- a/%s%s" % (fn1, datetag(ad))) + l.insert(1, "+++ b/%s%s" % (fn2, datetag(bd))) + + for ln in xrange(len(l)): + if l[ln][-1] != '\n': + l[ln] += "\n\ No newline at end of file\n" + + if r: + l.insert(0, diffline(r, fn1, fn2, opts)) + + return "".join(l) + +# creates a headerless unified diff +# t1 and t2 are the text to be diffed +# l1 and l2 are the text broken up into lines +def _unidiff(t1, t2, l1, l2, opts=defaultopts): + def contextend(l, len): + ret = l + opts.context + if ret > len: + ret = len + return ret + + def contextstart(l): + ret = l - opts.context + if ret < 0: + return 0 + return ret + + def yieldhunk(hunk): + (astart, a2, bstart, b2, delta) = hunk + aend = contextend(a2, len(l1)) + alen = aend - astart + blen = b2 - bstart + aend - a2 + + func = "" + if opts.showfunc: + # walk backwards from the start of the context + # to find a line starting with an alphanumeric char. + for x in xrange(astart - 1, -1, -1): + t = l1[x].rstrip() + if funcre.match(t): + func = ' ' + t[:40] + break + + yield "@@ -%d,%d +%d,%d @@%s\n" % (astart + 1, alen, + bstart + 1, blen, func) + for x in delta: + yield x + for x in xrange(a2, aend): + yield ' ' + l1[x] + + if opts.showfunc: + funcre = re.compile('\w') + + # bdiff.blocks gives us the matching sequences in the files. The loop + # below finds the spaces between those matching sequences and translates + # them into diff output. + # + if opts.ignorews or opts.ignorewsamount: + t1 = wsclean(opts, t1, False) + t2 = wsclean(opts, t2, False) + + diff = bdiff.blocks(t1, t2) + hunk = None + for i, s1 in enumerate(diff): + # The first match is special. + # we've either found a match starting at line 0 or a match later + # in the file. If it starts later, old and new below will both be + # empty and we'll continue to the next match. + if i > 0: + s = diff[i - 1] + else: + s = [0, 0, 0, 0] + delta = [] + a1 = s[1] + a2 = s1[0] + b1 = s[3] + b2 = s1[2] + + old = l1[a1:a2] + new = l2[b1:b2] + + # bdiff sometimes gives huge matches past eof, this check eats them, + # and deals with the special first match case described above + if not old and not new: + continue + + if opts.ignoreblanklines: + if wsclean(opts, "".join(old)) == wsclean(opts, "".join(new)): + continue + + astart = contextstart(a1) + bstart = contextstart(b1) + prev = None + if hunk: + # join with the previous hunk if it falls inside the context + if astart < hunk[1] + opts.context + 1: + prev = hunk + astart = hunk[1] + bstart = hunk[3] + else: + for x in yieldhunk(hunk): + yield x + if prev: + # we've joined the previous hunk, record the new ending points. + hunk[1] = a2 + hunk[3] = b2 + delta = hunk[4] + else: + # create a new hunk + hunk = [astart, a2, bstart, b2, delta] + + delta[len(delta):] = [' ' + x for x in l1[astart:a1]] + delta[len(delta):] = ['-' + x for x in old] + delta[len(delta):] = ['+' + x for x in new] + + if hunk: + for x in yieldhunk(hunk): + yield x + +def patchtext(bin): + pos = 0 + t = [] + while pos < len(bin): + p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12]) + pos += 12 + t.append(bin[pos:pos + l]) + pos += l + return "".join(t) + +def patch(a, bin): + if len(a) == 0: + # skip over trivial delta header + return buffer(bin, 12) + return mpatch.patches(a, [bin]) + +# similar to difflib.SequenceMatcher.get_matching_blocks +def get_matching_blocks(a, b): + return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)] + +def trivialdiffheader(length): + return struct.pack(">lll", 0, 0, length) + +patches = mpatch.patches +patchedsize = mpatch.patchedsize +textdiff = bdiff.bdiff diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.pyo Binary files differnew file mode 100644 index 0000000..ef8112b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mdiff.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.py new file mode 100644 index 0000000..ebf9d00 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.py @@ -0,0 +1,544 @@ +# merge.py - directory-level update/merge handling for Mercurial +# +# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, nullrev, hex, bin +from i18n import _ +import util, filemerge, copies, subrepo +import errno, os, shutil + +class mergestate(object): + '''track 3-way merge state of individual files''' + def __init__(self, repo): + self._repo = repo + self._dirty = False + self._read() + def reset(self, node=None): + self._state = {} + if node: + self._local = node + shutil.rmtree(self._repo.join("merge"), True) + self._dirty = False + def _read(self): + self._state = {} + try: + f = self._repo.opener("merge/state") + for i, l in enumerate(f): + if i == 0: + self._local = bin(l[:-1]) + else: + bits = l[:-1].split("\0") + self._state[bits[0]] = bits[1:] + except IOError, err: + if err.errno != errno.ENOENT: + raise + self._dirty = False + def commit(self): + if self._dirty: + f = self._repo.opener("merge/state", "w") + f.write(hex(self._local) + "\n") + for d, v in self._state.iteritems(): + f.write("\0".join([d] + v) + "\n") + self._dirty = False + def add(self, fcl, fco, fca, fd, flags): + hash = util.sha1(fcl.path()).hexdigest() + self._repo.opener("merge/" + hash, "w").write(fcl.data()) + self._state[fd] = ['u', hash, fcl.path(), fca.path(), + hex(fca.filenode()), fco.path(), flags] + self._dirty = True + def __contains__(self, dfile): + return dfile in self._state + def __getitem__(self, dfile): + return self._state[dfile][0] + def __iter__(self): + l = self._state.keys() + l.sort() + for f in l: + yield f + def mark(self, dfile, state): + self._state[dfile][0] = state + self._dirty = True + def resolve(self, dfile, wctx, octx): + if self[dfile] == 'r': + return 0 + state, hash, lfile, afile, anode, ofile, flags = self._state[dfile] + f = self._repo.opener("merge/" + hash) + self._repo.wwrite(dfile, f.read(), flags) + fcd = wctx[dfile] + fco = octx[ofile] + fca = self._repo.filectx(afile, fileid=anode) + r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca) + if not r: + self.mark(dfile, 'r') + return r + +def _checkunknown(wctx, mctx): + "check for collisions between unknown files and files in mctx" + for f in wctx.unknown(): + if f in mctx and mctx[f].cmp(wctx[f]): + raise util.Abort(_("untracked file in working directory differs" + " from file in requested revision: '%s'") % f) + +def _checkcollision(mctx): + "check for case folding collisions in the destination context" + folded = {} + for fn in mctx: + fold = fn.lower() + if fold in folded: + raise util.Abort(_("case-folding collision between %s and %s") + % (fn, folded[fold])) + folded[fold] = fn + +def _forgetremoved(wctx, mctx, branchmerge): + """ + Forget removed files + + If we're jumping between revisions (as opposed to merging), and if + neither the working directory nor the target rev has the file, + then we need to remove it from the dirstate, to prevent the + dirstate from listing the file when it is no longer in the + manifest. + + If we're merging, and the other revision has removed a file + that is not present in the working directory, we need to mark it + as removed. + """ + + action = [] + state = branchmerge and 'r' or 'f' + for f in wctx.deleted(): + if f not in mctx: + action.append((f, state)) + + if not branchmerge: + for f in wctx.removed(): + if f not in mctx: + action.append((f, "f")) + + return action + +def manifestmerge(repo, p1, p2, pa, overwrite, partial): + """ + Merge p1 and p2 with ancestor pa and generate merge action list + + overwrite = whether we clobber working files + partial = function to filter file lists + """ + + def fmerge(f, f2, fa): + """merge flags""" + a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2) + if m == n: # flags agree + return m # unchanged + if m and n and not a: # flags set, don't agree, differ from parent + r = repo.ui.promptchoice( + _(" conflicting flags for %s\n" + "(n)one, e(x)ec or sym(l)ink?") % f, + (_("&None"), _("E&xec"), _("Sym&link")), 0) + if r == 1: + return "x" # Exec + if r == 2: + return "l" # Symlink + return "" + if m and m != a: # changed from a to m + return m + if n and n != a: # changed from a to n + return n + return '' # flag was cleared + + def act(msg, m, f, *args): + repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m)) + action.append((f, m) + args) + + action, copy = [], {} + + if overwrite: + pa = p1 + elif pa == p2: # backwards + pa = p1.p1() + elif pa and repo.ui.configbool("merge", "followcopies", True): + dirs = repo.ui.configbool("merge", "followdirs", True) + copy, diverge = copies.copies(repo, p1, p2, pa, dirs) + for of, fl in diverge.iteritems(): + act("divergent renames", "dr", of, fl) + + repo.ui.note(_("resolving manifests\n")) + repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial))) + repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2)) + + m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() + copied = set(copy.values()) + + if '.hgsubstate' in m1: + # check whether sub state is modified + for s in p1.substate: + if p1.sub(s).dirty(): + m1['.hgsubstate'] += "+" + break + + # Compare manifests + for f, n in m1.iteritems(): + if partial and not partial(f): + continue + if f in m2: + rflags = fmerge(f, f, f) + a = ma.get(f, nullid) + if n == m2[f] or m2[f] == a: # same or local newer + # is file locally modified or flags need changing? + # dirstate flags may need to be made current + if m1.flags(f) != rflags or n[20:]: + act("update permissions", "e", f, rflags) + elif n == a: # remote newer + act("remote is newer", "g", f, rflags) + else: # both changed + act("versions differ", "m", f, f, f, rflags, False) + elif f in copied: # files we'll deal with on m2 side + pass + elif f in copy: + f2 = copy[f] + if f2 not in m2: # directory rename + act("remote renamed directory to " + f2, "d", + f, None, f2, m1.flags(f)) + else: # case 2 A,B/B/B or case 4,21 A/B/B + act("local copied/moved to " + f2, "m", + f, f2, f, fmerge(f, f2, f2), False) + elif f in ma: # clean, a different, no remote + if n != ma[f]: + if repo.ui.promptchoice( + _(" local changed %s which remote deleted\n" + "use (c)hanged version or (d)elete?") % f, + (_("&Changed"), _("&Delete")), 0): + act("prompt delete", "r", f) + else: + act("prompt keep", "a", f) + elif n[20:] == "a": # added, no remote + act("remote deleted", "f", f) + elif n[20:] != "u": + act("other deleted", "r", f) + + for f, n in m2.iteritems(): + if partial and not partial(f): + continue + if f in m1 or f in copied: # files already visited + continue + if f in copy: + f2 = copy[f] + if f2 not in m1: # directory rename + act("local renamed directory to " + f2, "d", + None, f, f2, m2.flags(f)) + elif f2 in m2: # rename case 1, A/A,B/A + act("remote copied to " + f, "m", + f2, f, f, fmerge(f2, f, f2), False) + else: # case 3,20 A/B/A + act("remote moved to " + f, "m", + f2, f, f, fmerge(f2, f, f2), True) + elif f not in ma: + act("remote created", "g", f, m2.flags(f)) + elif n != ma[f]: + if repo.ui.promptchoice( + _("remote changed %s which local deleted\n" + "use (c)hanged version or leave (d)eleted?") % f, + (_("&Changed"), _("&Deleted")), 0) == 0: + act("prompt recreating", "g", f, m2.flags(f)) + + return action + +def actionkey(a): + return a[1] == 'r' and -1 or 0, a + +def applyupdates(repo, action, wctx, mctx, actx): + """apply the merge action list to the working directory + + wctx is the working copy context + mctx is the context to be merged into the working copy + actx is the context of the common ancestor + """ + + updated, merged, removed, unresolved = 0, 0, 0, 0 + ms = mergestate(repo) + ms.reset(wctx.parents()[0].node()) + moves = [] + action.sort(key=actionkey) + substate = wctx.substate # prime + + # prescan for merges + u = repo.ui + for a in action: + f, m = a[:2] + if m == 'm': # merge + f2, fd, flags, move = a[2:] + if f == '.hgsubstate': # merged internally + continue + repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd)) + fcl = wctx[f] + fco = mctx[f2] + if mctx == actx: # backwards, use working dir parent as ancestor + if fcl.parents(): + fca = fcl.parents()[0] + else: + fca = repo.filectx(f, fileid=nullrev) + else: + fca = fcl.ancestor(fco, actx) + if not fca: + fca = repo.filectx(f, fileid=nullrev) + ms.add(fcl, fco, fca, fd, flags) + if f != fd and move: + moves.append(f) + + # remove renamed files after safely stored + for f in moves: + if os.path.lexists(repo.wjoin(f)): + repo.ui.debug("removing %s\n" % f) + os.unlink(repo.wjoin(f)) + + audit_path = util.path_auditor(repo.root) + + numupdates = len(action) + for i, a in enumerate(action): + f, m = a[:2] + u.progress(_('updating'), i + 1, item=f, total=numupdates, + unit=_('files')) + if f and f[0] == "/": + continue + if m == "r": # remove + repo.ui.note(_("removing %s\n") % f) + audit_path(f) + if f == '.hgsubstate': # subrepo states need updating + subrepo.submerge(repo, wctx, mctx, wctx) + try: + util.unlink(repo.wjoin(f)) + except OSError, inst: + if inst.errno != errno.ENOENT: + repo.ui.warn(_("update failed to remove %s: %s!\n") % + (f, inst.strerror)) + removed += 1 + elif m == "m": # merge + if f == '.hgsubstate': # subrepo states need updating + subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx)) + continue + f2, fd, flags, move = a[2:] + r = ms.resolve(fd, wctx, mctx) + if r is not None and r > 0: + unresolved += 1 + else: + if r is None: + updated += 1 + else: + merged += 1 + util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags) + if f != fd and move and os.path.lexists(repo.wjoin(f)): + repo.ui.debug("removing %s\n" % f) + os.unlink(repo.wjoin(f)) + elif m == "g": # get + flags = a[2] + repo.ui.note(_("getting %s\n") % f) + t = mctx.filectx(f).data() + repo.wwrite(f, t, flags) + t = None + updated += 1 + if f == '.hgsubstate': # subrepo states need updating + subrepo.submerge(repo, wctx, mctx, wctx) + elif m == "d": # directory rename + f2, fd, flags = a[2:] + if f: + repo.ui.note(_("moving %s to %s\n") % (f, fd)) + t = wctx.filectx(f).data() + repo.wwrite(fd, t, flags) + util.unlink(repo.wjoin(f)) + if f2: + repo.ui.note(_("getting %s to %s\n") % (f2, fd)) + t = mctx.filectx(f2).data() + repo.wwrite(fd, t, flags) + updated += 1 + elif m == "dr": # divergent renames + fl = a[2] + repo.ui.warn(_("note: possible conflict - %s was renamed " + "multiple times to:\n") % f) + for nf in fl: + repo.ui.warn(" %s\n" % nf) + elif m == "e": # exec + flags = a[2] + util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags) + ms.commit() + u.progress(_('updating'), None, total=numupdates, unit=_('files')) + + return updated, merged, removed, unresolved + +def recordupdates(repo, action, branchmerge): + "record merge actions to the dirstate" + + for a in action: + f, m = a[:2] + if m == "r": # remove + if branchmerge: + repo.dirstate.remove(f) + else: + repo.dirstate.forget(f) + elif m == "a": # re-add + if not branchmerge: + repo.dirstate.add(f) + elif m == "f": # forget + repo.dirstate.forget(f) + elif m == "e": # exec change + repo.dirstate.normallookup(f) + elif m == "g": # get + if branchmerge: + repo.dirstate.otherparent(f) + else: + repo.dirstate.normal(f) + elif m == "m": # merge + f2, fd, flag, move = a[2:] + if branchmerge: + # We've done a branch merge, mark this file as merged + # so that we properly record the merger later + repo.dirstate.merge(fd) + if f != f2: # copy/rename + if move: + repo.dirstate.remove(f) + if f != fd: + repo.dirstate.copy(f, fd) + else: + repo.dirstate.copy(f2, fd) + else: + # We've update-merged a locally modified file, so + # we set the dirstate to emulate a normal checkout + # of that file some time in the past. Thus our + # merge will appear as a normal local file + # modification. + if f2 == fd: # file not locally copied/moved + repo.dirstate.normallookup(fd) + if move: + repo.dirstate.forget(f) + elif m == "d": # directory rename + f2, fd, flag = a[2:] + if not f2 and f not in repo.dirstate: + # untracked file moved + continue + if branchmerge: + repo.dirstate.add(fd) + if f: + repo.dirstate.remove(f) + repo.dirstate.copy(f, fd) + if f2: + repo.dirstate.copy(f2, fd) + else: + repo.dirstate.normal(fd) + if f: + repo.dirstate.forget(f) + +def update(repo, node, branchmerge, force, partial): + """ + Perform a merge between the working directory and the given node + + node = the node to update to, or None if unspecified + branchmerge = whether to merge between branches + force = whether to force branch merging or file overwriting + partial = a function to filter file lists (dirstate not updated) + + The table below shows all the behaviors of the update command + given the -c and -C or no options, whether the working directory + is dirty, whether a revision is specified, and the relationship of + the parent rev to the target rev (linear, on the same named + branch, or on another named branch). + + This logic is tested by test-update-branches.t. + + -c -C dirty rev | linear same cross + n n n n | ok (1) x + n n n y | ok ok ok + n n y * | merge (2) (2) + n y * * | --- discard --- + y n y * | --- (3) --- + y n n * | --- ok --- + y y * * | --- (4) --- + + x = can't happen + * = don't-care + 1 = abort: crosses branches (use 'hg merge' or 'hg update -c') + 2 = abort: crosses branches (use 'hg merge' to merge or + use 'hg update -C' to discard changes) + 3 = abort: uncommitted local changes + 4 = incompatible options (checked in commands.py) + """ + + onode = node + wlock = repo.wlock() + try: + wc = repo[None] + if node is None: + # tip of current branch + try: + node = repo.branchtags()[wc.branch()] + except KeyError: + if wc.branch() == "default": # no default branch! + node = repo.lookup("tip") # update to tip + else: + raise util.Abort(_("branch %s not found") % wc.branch()) + overwrite = force and not branchmerge + pl = wc.parents() + p1, p2 = pl[0], repo[node] + pa = p1.ancestor(p2) + fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) + fastforward = False + + ### check phase + if not overwrite and len(pl) > 1: + raise util.Abort(_("outstanding uncommitted merges")) + if branchmerge: + if pa == p2: + raise util.Abort(_("merging with a working directory ancestor" + " has no effect")) + elif pa == p1: + if p1.branch() != p2.branch(): + fastforward = True + else: + raise util.Abort(_("nothing to merge (use 'hg update'" + " or check 'hg heads')")) + if not force and (wc.files() or wc.deleted()): + raise util.Abort(_("outstanding uncommitted changes " + "(use 'hg status' to list changes)")) + elif not overwrite: + if pa == p1 or pa == p2: # linear + pass # all good + elif wc.files() or wc.deleted(): + raise util.Abort(_("crosses branches (merge branches or use" + " --clean to discard changes)")) + elif onode is None: + raise util.Abort(_("crosses branches (merge branches or use" + " --check to force update)")) + else: + # Allow jumping branches if clean and specific rev given + overwrite = True + + ### calculate phase + action = [] + wc.status(unknown=True) # prime cache + if not force: + _checkunknown(wc, p2) + if not util.checkcase(repo.path): + _checkcollision(p2) + action += _forgetremoved(wc, p2, branchmerge) + action += manifestmerge(repo, wc, p2, pa, overwrite, partial) + + ### apply phase + if not branchmerge: # just jump to the new rev + fp1, fp2, xp1, xp2 = fp2, nullid, xp2, '' + if not partial: + repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2) + + stats = applyupdates(repo, action, wc, p2, pa) + + if not partial: + repo.dirstate.setparents(fp1, fp2) + recordupdates(repo, action, branchmerge) + if not branchmerge and not fastforward: + repo.dirstate.setbranch(p2.branch()) + finally: + wlock.release() + + if not partial: + repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3]) + return stats diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.pyo Binary files differnew file mode 100644 index 0000000..bc7d942 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/merge.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.py new file mode 100644 index 0000000..73a0aa4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.py @@ -0,0 +1,450 @@ +# minirst.py - minimal reStructuredText parser +# +# Copyright 2009, 2010 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""simplified reStructuredText parser. + +This parser knows just enough about reStructuredText to parse the +Mercurial docstrings. + +It cheats in a major way: nested blocks are not really nested. They +are just indented blocks that look like they are nested. This relies +on the user to keep the right indentation for the blocks. + +It only supports a small subset of reStructuredText: + +- sections + +- paragraphs + +- literal blocks + +- definition lists + +- specific admonitions + +- bullet lists (items must start with '-') + +- enumerated lists (no autonumbering) + +- field lists (colons cannot be escaped) + +- option lists (supports only long options without arguments) + +- inline literals (no other inline markup is not recognized) +""" + +import re, sys +import util, encoding +from i18n import _ + + +def replace(text, substs): + utext = text.decode(encoding.encoding) + for f, t in substs: + utext = utext.replace(f, t) + return utext.encode(encoding.encoding) + + +_blockre = re.compile(r"\n(?:\s*\n)+") + +def findblocks(text): + """Find continuous blocks of lines in text. + + Returns a list of dictionaries representing the blocks. Each block + has an 'indent' field and a 'lines' field. + """ + blocks = [] + for b in _blockre.split(text.strip()): + lines = b.splitlines() + indent = min((len(l) - len(l.lstrip())) for l in lines) + lines = [l[indent:] for l in lines] + blocks.append(dict(indent=indent, lines=lines)) + return blocks + + +def findliteralblocks(blocks): + """Finds literal blocks and adds a 'type' field to the blocks. + + Literal blocks are given the type 'literal', all other blocks are + given type the 'paragraph'. + """ + i = 0 + while i < len(blocks): + # Searching for a block that looks like this: + # + # +------------------------------+ + # | paragraph | + # | (ends with "::") | + # +------------------------------+ + # +---------------------------+ + # | indented literal block | + # +---------------------------+ + blocks[i]['type'] = 'paragraph' + if blocks[i]['lines'][-1].endswith('::') and i + 1 < len(blocks): + indent = blocks[i]['indent'] + adjustment = blocks[i + 1]['indent'] - indent + + if blocks[i]['lines'] == ['::']: + # Expanded form: remove block + del blocks[i] + i -= 1 + elif blocks[i]['lines'][-1].endswith(' ::'): + # Partially minimized form: remove space and both + # colons. + blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-3] + else: + # Fully minimized form: remove just one colon. + blocks[i]['lines'][-1] = blocks[i]['lines'][-1][:-1] + + # List items are formatted with a hanging indent. We must + # correct for this here while we still have the original + # information on the indentation of the subsequent literal + # blocks available. + m = _bulletre.match(blocks[i]['lines'][0]) + if m: + indent += m.end() + adjustment -= m.end() + + # Mark the following indented blocks. + while i + 1 < len(blocks) and blocks[i + 1]['indent'] > indent: + blocks[i + 1]['type'] = 'literal' + blocks[i + 1]['indent'] -= adjustment + i += 1 + i += 1 + return blocks + +_bulletre = re.compile(r'(-|[0-9A-Za-z]+\.|\(?[0-9A-Za-z]+\)|\|) ') +_optionre = re.compile(r'^(--[a-z-]+)((?:[ =][a-zA-Z][\w-]*)? +)(.*)$') +_fieldre = re.compile(r':(?![: ])([^:]*)(?<! ):[ ]+(.*)') +_definitionre = re.compile(r'[^ ]') + +def splitparagraphs(blocks): + """Split paragraphs into lists.""" + # Tuples with (list type, item regexp, single line items?). Order + # matters: definition lists has the least specific regexp and must + # come last. + listtypes = [('bullet', _bulletre, True), + ('option', _optionre, True), + ('field', _fieldre, True), + ('definition', _definitionre, False)] + + def match(lines, i, itemre, singleline): + """Does itemre match an item at line i? + + A list item can be followed by an idented line or another list + item (but only if singleline is True). + """ + line1 = lines[i] + line2 = i + 1 < len(lines) and lines[i + 1] or '' + if not itemre.match(line1): + return False + if singleline: + return line2 == '' or line2[0] == ' ' or itemre.match(line2) + else: + return line2.startswith(' ') + + i = 0 + while i < len(blocks): + if blocks[i]['type'] == 'paragraph': + lines = blocks[i]['lines'] + for type, itemre, singleline in listtypes: + if match(lines, 0, itemre, singleline): + items = [] + for j, line in enumerate(lines): + if match(lines, j, itemre, singleline): + items.append(dict(type=type, lines=[], + indent=blocks[i]['indent'])) + items[-1]['lines'].append(line) + blocks[i:i + 1] = items + break + i += 1 + return blocks + + +_fieldwidth = 12 + +def updatefieldlists(blocks): + """Find key and maximum key width for field lists.""" + i = 0 + while i < len(blocks): + if blocks[i]['type'] != 'field': + i += 1 + continue + + keywidth = 0 + j = i + while j < len(blocks) and blocks[j]['type'] == 'field': + m = _fieldre.match(blocks[j]['lines'][0]) + key, rest = m.groups() + blocks[j]['lines'][0] = rest + blocks[j]['key'] = key + keywidth = max(keywidth, len(key)) + j += 1 + + for block in blocks[i:j]: + block['keywidth'] = keywidth + i = j + 1 + + return blocks + + +def prunecontainers(blocks, keep): + """Prune unwanted containers. + + The blocks must have a 'type' field, i.e., they should have been + run through findliteralblocks first. + """ + pruned = [] + i = 0 + while i + 1 < len(blocks): + # Searching for a block that looks like this: + # + # +-------+---------------------------+ + # | ".. container ::" type | + # +---+ | + # | blocks | + # +-------------------------------+ + if (blocks[i]['type'] == 'paragraph' and + blocks[i]['lines'][0].startswith('.. container::')): + indent = blocks[i]['indent'] + adjustment = blocks[i + 1]['indent'] - indent + containertype = blocks[i]['lines'][0][15:] + prune = containertype not in keep + if prune: + pruned.append(containertype) + + # Always delete "..container:: type" block + del blocks[i] + j = i + while j < len(blocks) and blocks[j]['indent'] > indent: + if prune: + del blocks[j] + i -= 1 # adjust outer index + else: + blocks[j]['indent'] -= adjustment + j += 1 + i += 1 + return blocks, pruned + + +_sectionre = re.compile(r"""^([-=`:.'"~^_*+#])\1+$""") + +def findsections(blocks): + """Finds sections. + + The blocks must have a 'type' field, i.e., they should have been + run through findliteralblocks first. + """ + for block in blocks: + # Searching for a block that looks like this: + # + # +------------------------------+ + # | Section title | + # | ------------- | + # +------------------------------+ + if (block['type'] == 'paragraph' and + len(block['lines']) == 2 and + encoding.colwidth(block['lines'][0]) == len(block['lines'][1]) and + _sectionre.match(block['lines'][1])): + block['underline'] = block['lines'][1][0] + block['type'] = 'section' + del block['lines'][1] + return blocks + + +def inlineliterals(blocks): + substs = [('``', '"')] + for b in blocks: + if b['type'] in ('paragraph', 'section'): + b['lines'] = [replace(l, substs) for l in b['lines']] + return blocks + + +def hgrole(blocks): + substs = [(':hg:`', '"hg '), ('`', '"')] + for b in blocks: + if b['type'] in ('paragraph', 'section'): + # Turn :hg:`command` into "hg command". This also works + # when there is a line break in the command and relies on + # the fact that we have no stray back-quotes in the input + # (run the blocks through inlineliterals first). + b['lines'] = [replace(l, substs) for l in b['lines']] + return blocks + + +def addmargins(blocks): + """Adds empty blocks for vertical spacing. + + This groups bullets, options, and definitions together with no vertical + space between them, and adds an empty block between all other blocks. + """ + i = 1 + while i < len(blocks): + if (blocks[i]['type'] == blocks[i - 1]['type'] and + blocks[i]['type'] in ('bullet', 'option', 'field')): + i += 1 + else: + blocks.insert(i, dict(lines=[''], indent=0, type='margin')) + i += 2 + return blocks + +def prunecomments(blocks): + """Remove comments.""" + i = 0 + while i < len(blocks): + b = blocks[i] + if b['type'] == 'paragraph' and b['lines'][0].startswith('.. '): + del blocks[i] + else: + i += 1 + return blocks + +_admonitionre = re.compile(r"\.\. (admonition|attention|caution|danger|" + r"error|hint|important|note|tip|warning)::", + flags=re.IGNORECASE) + +def findadmonitions(blocks): + """ + Makes the type of the block an admonition block if + the first line is an admonition directive + """ + i = 0 + while i < len(blocks): + m = _admonitionre.match(blocks[i]['lines'][0]) + if m: + blocks[i]['type'] = 'admonition' + admonitiontitle = blocks[i]['lines'][0][3:m.end() - 2].lower() + + firstline = blocks[i]['lines'][0][m.end() + 1:] + if firstline: + blocks[i]['lines'].insert(1, ' ' + firstline) + + blocks[i]['admonitiontitle'] = admonitiontitle + del blocks[i]['lines'][0] + i = i + 1 + return blocks + +_admonitiontitles = {'attention': _('Attention:'), + 'caution': _('Caution:'), + 'danger': _('!Danger!') , + 'error': _('Error:'), + 'hint': _('Hint:'), + 'important': _('Important:'), + 'note': _('Note:'), + 'tip': _('Tip:'), + 'warning': _('Warning!')} + +def formatblock(block, width): + """Format a block according to width.""" + if width <= 0: + width = 78 + indent = ' ' * block['indent'] + if block['type'] == 'admonition': + admonition = _admonitiontitles[block['admonitiontitle']] + hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip()) + + defindent = indent + hang * ' ' + text = ' '.join(map(str.strip, block['lines'])) + return '%s\n%s' % (indent + admonition, util.wrap(text, width=width, + initindent=defindent, + hangindent=defindent)) + if block['type'] == 'margin': + return '' + if block['type'] == 'literal': + indent += ' ' + return indent + ('\n' + indent).join(block['lines']) + if block['type'] == 'section': + underline = encoding.colwidth(block['lines'][0]) * block['underline'] + return "%s%s\n%s%s" % (indent, block['lines'][0],indent, underline) + if block['type'] == 'definition': + term = indent + block['lines'][0] + hang = len(block['lines'][-1]) - len(block['lines'][-1].lstrip()) + defindent = indent + hang * ' ' + text = ' '.join(map(str.strip, block['lines'][1:])) + return '%s\n%s' % (term, util.wrap(text, width=width, + initindent=defindent, + hangindent=defindent)) + subindent = indent + if block['type'] == 'bullet': + if block['lines'][0].startswith('| '): + # Remove bullet for line blocks and add no extra + # indention. + block['lines'][0] = block['lines'][0][2:] + else: + m = _bulletre.match(block['lines'][0]) + subindent = indent + m.end() * ' ' + elif block['type'] == 'field': + keywidth = block['keywidth'] + key = block['key'] + + subindent = indent + _fieldwidth * ' ' + if len(key) + 2 > _fieldwidth: + # key too large, use full line width + key = key.ljust(width) + elif keywidth + 2 < _fieldwidth: + # all keys are small, add only two spaces + key = key.ljust(keywidth + 2) + subindent = indent + (keywidth + 2) * ' ' + else: + # mixed sizes, use fieldwidth for this one + key = key.ljust(_fieldwidth) + block['lines'][0] = key + block['lines'][0] + elif block['type'] == 'option': + m = _optionre.match(block['lines'][0]) + option, arg, rest = m.groups() + subindent = indent + (len(option) + len(arg)) * ' ' + + text = ' '.join(map(str.strip, block['lines'])) + return util.wrap(text, width=width, + initindent=indent, + hangindent=subindent) + + +def format(text, width, indent=0, keep=None): + """Parse and format the text according to width.""" + blocks = findblocks(text) + for b in blocks: + b['indent'] += indent + blocks = findliteralblocks(blocks) + blocks, pruned = prunecontainers(blocks, keep or []) + blocks = findsections(blocks) + blocks = inlineliterals(blocks) + blocks = hgrole(blocks) + blocks = splitparagraphs(blocks) + blocks = updatefieldlists(blocks) + blocks = prunecomments(blocks) + blocks = addmargins(blocks) + blocks = findadmonitions(blocks) + text = '\n'.join(formatblock(b, width) for b in blocks) + if keep is None: + return text + else: + return text, pruned + + +if __name__ == "__main__": + from pprint import pprint + + def debug(func, *args): + blocks = func(*args) + print "*** after %s:" % func.__name__ + pprint(blocks) + print + return blocks + + text = open(sys.argv[1]).read() + blocks = debug(findblocks, text) + blocks = debug(findliteralblocks, blocks) + blocks, pruned = debug(prunecontainers, blocks, sys.argv[2:]) + blocks = debug(inlineliterals, blocks) + blocks = debug(splitparagraphs, blocks) + blocks = debug(updatefieldlists, blocks) + blocks = debug(findsections, blocks) + blocks = debug(prunecomments, blocks) + blocks = debug(addmargins, blocks) + blocks = debug(findadmonitions, blocks) + print '\n'.join(formatblock(b, 30) for b in blocks) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.pyo Binary files differnew file mode 100644 index 0000000..6030fb2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/minirst.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.py new file mode 100644 index 0000000..def4981 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'mpatch.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.pyo Binary files differnew file mode 100644 index 0000000..1d5614b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.so Binary files differnew file mode 100755 index 0000000..f6d0fe6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/mpatch.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.py new file mode 100644 index 0000000..9debeaf --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.py @@ -0,0 +1,18 @@ +# node.py - basic nodeid manipulation for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import binascii + +nullrev = -1 +nullid = "\0" * 20 + +# This ugly style has a noticeable effect in manifest parsing +hex = binascii.hexlify +bin = binascii.unhexlify + +def short(node): + return hex(node[:6]) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.pyo Binary files differnew file mode 100644 index 0000000..46f9e2b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/node.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.py new file mode 100644 index 0000000..7ff826c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'osutil.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.pyo Binary files differnew file mode 100644 index 0000000..53db967 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.so Binary files differnew file mode 100755 index 0000000..7921499 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/osutil.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.py new file mode 100644 index 0000000..5a6a55c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.py @@ -0,0 +1,91 @@ +# parser.py - simple top-down operator precedence parser for mercurial +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +# see http://effbot.org/zone/simple-top-down-parsing.htm and +# http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/ +# for background + +# takes a tokenizer and elements +# tokenizer is an iterator that returns type, value pairs +# elements is a mapping of types to binding strength, prefix and infix actions +# an action is a tree node name, a tree label, and an optional match +# __call__(program) parses program into a labelled tree + +import error + +class parser(object): + def __init__(self, tokenizer, elements, methods=None): + self._tokenizer = tokenizer + self._elements = elements + self._methods = methods + def _advance(self): + 'advance the tokenizer' + t = self.current + try: + self.current = self._iter.next() + except StopIteration: + pass + return t + def _match(self, m, pos): + 'make sure the tokenizer matches an end condition' + if self.current[0] != m: + raise error.ParseError("unexpected token: %s" % self.current[0], + self.current[2]) + self._advance() + def _parse(self, bind=0): + token, value, pos = self._advance() + # handle prefix rules on current token + prefix = self._elements[token][1] + if not prefix: + raise error.ParseError("not a prefix: %s" % token, pos) + if len(prefix) == 1: + expr = (prefix[0], value) + else: + if len(prefix) > 2 and prefix[2] == self.current[0]: + self._match(prefix[2], pos) + expr = (prefix[0], None) + else: + expr = (prefix[0], self._parse(prefix[1])) + if len(prefix) > 2: + self._match(prefix[2], pos) + # gather tokens until we meet a lower binding strength + while bind < self._elements[self.current[0]][0]: + token, value, pos = self._advance() + e = self._elements[token] + # check for suffix - next token isn't a valid prefix + if len(e) == 4 and not self._elements[self.current[0]][1]: + suffix = e[3] + expr = (suffix[0], expr) + else: + # handle infix rules + if len(e) < 3 or not e[2]: + raise error.ParseError("not an infix: %s" % token, pos) + infix = e[2] + if len(infix) == 3 and infix[2] == self.current[0]: + self._match(infix[2], pos) + expr = (infix[0], expr, (None)) + else: + expr = (infix[0], expr, self._parse(infix[1])) + if len(infix) == 3: + self._match(infix[2], pos) + return expr + def parse(self, message): + 'generate a parse tree from a message' + self._iter = self._tokenizer(message) + self.current = self._iter.next() + return self._parse() + def eval(self, tree): + 'recursively evaluate a parse tree using node methods' + if not isinstance(tree, tuple): + return tree + return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]]) + def __call__(self, message): + 'parse a message into a parse tree and evaluate if methods given' + t = self.parse(message) + if self._methods: + return self.eval(t) + return t diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.pyo Binary files differnew file mode 100644 index 0000000..5935e9b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parser.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.py new file mode 100644 index 0000000..6780752 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.py @@ -0,0 +1,7 @@ +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources, imp + __file__ = pkg_resources.resource_filename(__name__,'parsers.so') + __loader__ = None; del __bootstrap__, __loader__ + imp.load_dynamic(__name__,__file__) +__bootstrap__() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.pyo Binary files differnew file mode 100644 index 0000000..eabf6c0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.so b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.so Binary files differnew file mode 100755 index 0000000..b68f418 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/parsers.so diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.py new file mode 100644 index 0000000..8e9b13e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.py @@ -0,0 +1,1630 @@ +# patch.py - patch file parsing routines +# +# Copyright 2006 Brendan Cully <brendan@kublai.com> +# Copyright 2007 Chris Mason <chris.mason@oracle.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import cStringIO, email.Parser, os, re +import tempfile, zlib + +from i18n import _ +from node import hex, nullid, short +import base85, mdiff, util, diffhelpers, copies, encoding + +gitre = re.compile('diff --git a/(.*) b/(.*)') + +class PatchError(Exception): + pass + +# helper functions + +def copyfile(src, dst, basedir): + abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]] + if os.path.lexists(absdst): + raise util.Abort(_("cannot create %s: destination already exists") % + dst) + + dstdir = os.path.dirname(absdst) + if dstdir and not os.path.isdir(dstdir): + try: + os.makedirs(dstdir) + except IOError: + raise util.Abort( + _("cannot create %s: unable to create destination directory") + % dst) + + util.copyfile(abssrc, absdst) + +# public functions + +def split(stream): + '''return an iterator of individual patches from a stream''' + def isheader(line, inheader): + if inheader and line[0] in (' ', '\t'): + # continuation + return True + if line[0] in (' ', '-', '+'): + # diff line - don't check for header pattern in there + return False + l = line.split(': ', 1) + return len(l) == 2 and ' ' not in l[0] + + def chunk(lines): + return cStringIO.StringIO(''.join(lines)) + + def hgsplit(stream, cur): + inheader = True + + for line in stream: + if not line.strip(): + inheader = False + if not inheader and line.startswith('# HG changeset patch'): + yield chunk(cur) + cur = [] + inheader = True + + cur.append(line) + + if cur: + yield chunk(cur) + + def mboxsplit(stream, cur): + for line in stream: + if line.startswith('From '): + for c in split(chunk(cur[1:])): + yield c + cur = [] + + cur.append(line) + + if cur: + for c in split(chunk(cur[1:])): + yield c + + def mimesplit(stream, cur): + def msgfp(m): + fp = cStringIO.StringIO() + g = email.Generator.Generator(fp, mangle_from_=False) + g.flatten(m) + fp.seek(0) + return fp + + for line in stream: + cur.append(line) + c = chunk(cur) + + m = email.Parser.Parser().parse(c) + if not m.is_multipart(): + yield msgfp(m) + else: + ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') + for part in m.walk(): + ct = part.get_content_type() + if ct not in ok_types: + continue + yield msgfp(part) + + def headersplit(stream, cur): + inheader = False + + for line in stream: + if not inheader and isheader(line, inheader): + yield chunk(cur) + cur = [] + inheader = True + if inheader and not isheader(line, inheader): + inheader = False + + cur.append(line) + + if cur: + yield chunk(cur) + + def remainder(cur): + yield chunk(cur) + + class fiter(object): + def __init__(self, fp): + self.fp = fp + + def __iter__(self): + return self + + def next(self): + l = self.fp.readline() + if not l: + raise StopIteration + return l + + inheader = False + cur = [] + + mimeheaders = ['content-type'] + + if not hasattr(stream, 'next'): + # http responses, for example, have readline but not next + stream = fiter(stream) + + for line in stream: + cur.append(line) + if line.startswith('# HG changeset patch'): + return hgsplit(stream, cur) + elif line.startswith('From '): + return mboxsplit(stream, cur) + elif isheader(line, inheader): + inheader = True + if line.split(':', 1)[0].lower() in mimeheaders: + # let email parser handle this + return mimesplit(stream, cur) + elif line.startswith('--- ') and inheader: + # No evil headers seen by diff start, split by hand + return headersplit(stream, cur) + # Not enough info, keep reading + + # if we are here, we have a very plain patch + return remainder(cur) + +def extract(ui, fileobj): + '''extract patch from data read from fileobj. + + patch can be a normal patch or contained in an email message. + + return tuple (filename, message, user, date, branch, node, p1, p2). + Any item in the returned tuple can be None. If filename is None, + fileobj did not contain a patch. Caller must unlink filename when done.''' + + # attempt to detect the start of a patch + # (this heuristic is borrowed from quilt) + diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |' + r'retrieving revision [0-9]+(\.[0-9]+)*$|' + r'---[ \t].*?^\+\+\+[ \t]|' + r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL) + + fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') + tmpfp = os.fdopen(fd, 'w') + try: + msg = email.Parser.Parser().parse(fileobj) + + subject = msg['Subject'] + user = msg['From'] + if not subject and not user: + # Not an email, restore parsed headers if any + subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n' + + gitsendmail = 'git-send-email' in msg.get('X-Mailer', '') + # should try to parse msg['Date'] + date = None + nodeid = None + branch = None + parents = [] + + if subject: + if subject.startswith('[PATCH'): + pend = subject.find(']') + if pend >= 0: + subject = subject[pend + 1:].lstrip() + subject = subject.replace('\n\t', ' ') + ui.debug('Subject: %s\n' % subject) + if user: + ui.debug('From: %s\n' % user) + diffs_seen = 0 + ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') + message = '' + for part in msg.walk(): + content_type = part.get_content_type() + ui.debug('Content-Type: %s\n' % content_type) + if content_type not in ok_types: + continue + payload = part.get_payload(decode=True) + m = diffre.search(payload) + if m: + hgpatch = False + hgpatchheader = False + ignoretext = False + + ui.debug('found patch at byte %d\n' % m.start(0)) + diffs_seen += 1 + cfp = cStringIO.StringIO() + for line in payload[:m.start(0)].splitlines(): + if line.startswith('# HG changeset patch') and not hgpatch: + ui.debug('patch generated by hg export\n') + hgpatch = True + hgpatchheader = True + # drop earlier commit message content + cfp.seek(0) + cfp.truncate() + subject = None + elif hgpatchheader: + if line.startswith('# User '): + user = line[7:] + ui.debug('From: %s\n' % user) + elif line.startswith("# Date "): + date = line[7:] + elif line.startswith("# Branch "): + branch = line[9:] + elif line.startswith("# Node ID "): + nodeid = line[10:] + elif line.startswith("# Parent "): + parents.append(line[10:]) + elif not line.startswith("# "): + hgpatchheader = False + elif line == '---' and gitsendmail: + ignoretext = True + if not hgpatchheader and not ignoretext: + cfp.write(line) + cfp.write('\n') + message = cfp.getvalue() + if tmpfp: + tmpfp.write(payload) + if not payload.endswith('\n'): + tmpfp.write('\n') + elif not diffs_seen and message and content_type == 'text/plain': + message += '\n' + payload + except: + tmpfp.close() + os.unlink(tmpname) + raise + + if subject and not message.startswith(subject): + message = '%s\n%s' % (subject, message) + tmpfp.close() + if not diffs_seen: + os.unlink(tmpname) + return None, message, user, date, branch, None, None, None + p1 = parents and parents.pop(0) or None + p2 = parents and parents.pop(0) or None + return tmpname, message, user, date, branch, nodeid, p1, p2 + +class patchmeta(object): + """Patched file metadata + + 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY + or COPY. 'path' is patched file path. 'oldpath' is set to the + origin file when 'op' is either COPY or RENAME, None otherwise. If + file mode is changed, 'mode' is a tuple (islink, isexec) where + 'islink' is True if the file is a symlink and 'isexec' is True if + the file is executable. Otherwise, 'mode' is None. + """ + def __init__(self, path): + self.path = path + self.oldpath = None + self.mode = None + self.op = 'MODIFY' + self.binary = False + + def setmode(self, mode): + islink = mode & 020000 + isexec = mode & 0100 + self.mode = (islink, isexec) + + def __repr__(self): + return "<patchmeta %s %r>" % (self.op, self.path) + +def readgitpatch(lr): + """extract git-style metadata about patches from <patchname>""" + + # Filter patch for git information + gp = None + gitpatches = [] + for line in lr: + line = line.rstrip(' \r\n') + if line.startswith('diff --git'): + m = gitre.match(line) + if m: + if gp: + gitpatches.append(gp) + dst = m.group(2) + gp = patchmeta(dst) + elif gp: + if line.startswith('--- '): + gitpatches.append(gp) + gp = None + continue + if line.startswith('rename from '): + gp.op = 'RENAME' + gp.oldpath = line[12:] + elif line.startswith('rename to '): + gp.path = line[10:] + elif line.startswith('copy from '): + gp.op = 'COPY' + gp.oldpath = line[10:] + elif line.startswith('copy to '): + gp.path = line[8:] + elif line.startswith('deleted file'): + gp.op = 'DELETE' + elif line.startswith('new file mode '): + gp.op = 'ADD' + gp.setmode(int(line[-6:], 8)) + elif line.startswith('new mode '): + gp.setmode(int(line[-6:], 8)) + elif line.startswith('GIT binary patch'): + gp.binary = True + if gp: + gitpatches.append(gp) + + return gitpatches + +class linereader(object): + # simple class to allow pushing lines back into the input stream + def __init__(self, fp, textmode=False): + self.fp = fp + self.buf = [] + self.textmode = textmode + self.eol = None + + def push(self, line): + if line is not None: + self.buf.append(line) + + def readline(self): + if self.buf: + l = self.buf[0] + del self.buf[0] + return l + l = self.fp.readline() + if not self.eol: + if l.endswith('\r\n'): + self.eol = '\r\n' + elif l.endswith('\n'): + self.eol = '\n' + if self.textmode and l.endswith('\r\n'): + l = l[:-2] + '\n' + return l + + def __iter__(self): + while 1: + l = self.readline() + if not l: + break + yield l + +# @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 +unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@') +contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)') +eolmodes = ['strict', 'crlf', 'lf', 'auto'] + +class patchfile(object): + def __init__(self, ui, fname, opener, missing=False, eolmode='strict'): + self.fname = fname + self.eolmode = eolmode + self.eol = None + self.opener = opener + self.ui = ui + self.lines = [] + self.exists = False + self.missing = missing + if not missing: + try: + self.lines = self.readlines(fname) + self.exists = True + except IOError: + pass + else: + self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) + + self.hash = {} + self.dirty = 0 + self.offset = 0 + self.skew = 0 + self.rej = [] + self.fileprinted = False + self.printfile(False) + self.hunks = 0 + + def readlines(self, fname): + if os.path.islink(fname): + return [os.readlink(fname)] + fp = self.opener(fname, 'r') + try: + lr = linereader(fp, self.eolmode != 'strict') + lines = list(lr) + self.eol = lr.eol + return lines + finally: + fp.close() + + def writelines(self, fname, lines): + # Ensure supplied data ends in fname, being a regular file or + # a symlink. cmdutil.updatedir will -too magically- take care + # of setting it to the proper type afterwards. + islink = os.path.islink(fname) + if islink: + fp = cStringIO.StringIO() + else: + fp = self.opener(fname, 'w') + try: + if self.eolmode == 'auto': + eol = self.eol + elif self.eolmode == 'crlf': + eol = '\r\n' + else: + eol = '\n' + + if self.eolmode != 'strict' and eol and eol != '\n': + for l in lines: + if l and l[-1] == '\n': + l = l[:-1] + eol + fp.write(l) + else: + fp.writelines(lines) + if islink: + self.opener.symlink(fp.getvalue(), fname) + finally: + fp.close() + + def unlink(self, fname): + os.unlink(fname) + + def printfile(self, warn): + if self.fileprinted: + return + if warn or self.ui.verbose: + self.fileprinted = True + s = _("patching file %s\n") % self.fname + if warn: + self.ui.warn(s) + else: + self.ui.note(s) + + + def findlines(self, l, linenum): + # looks through the hash and finds candidate lines. The + # result is a list of line numbers sorted based on distance + # from linenum + + cand = self.hash.get(l, []) + if len(cand) > 1: + # resort our list of potentials forward then back. + cand.sort(key=lambda x: abs(x - linenum)) + return cand + + def hashlines(self): + self.hash = {} + for x, s in enumerate(self.lines): + self.hash.setdefault(s, []).append(x) + + def makerejlines(self, fname): + base = os.path.basename(fname) + yield "--- %s\n+++ %s\n" % (base, base) + for x in self.rej: + for l in x.hunk: + yield l + if l[-1] != '\n': + yield "\n\ No newline at end of file\n" + + def write_rej(self): + # our rejects are a little different from patch(1). This always + # creates rejects in the same form as the original patch. A file + # header is inserted so that you can run the reject through patch again + # without having to type the filename. + + if not self.rej: + return + + fname = self.fname + ".rej" + self.ui.warn( + _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % + (len(self.rej), self.hunks, fname)) + + fp = self.opener(fname, 'w') + fp.writelines(self.makerejlines(self.fname)) + fp.close() + + def apply(self, h): + if not h.complete(): + raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % + (h.number, h.desc, len(h.a), h.lena, len(h.b), + h.lenb)) + + self.hunks += 1 + + if self.missing: + self.rej.append(h) + return -1 + + if self.exists and h.createfile(): + self.ui.warn(_("file %s already exists\n") % self.fname) + self.rej.append(h) + return -1 + + if isinstance(h, binhunk): + if h.rmfile(): + self.unlink(self.fname) + else: + self.lines[:] = h.new() + self.offset += len(h.new()) + self.dirty = 1 + return 0 + + horig = h + if (self.eolmode in ('crlf', 'lf') + or self.eolmode == 'auto' and self.eol): + # If new eols are going to be normalized, then normalize + # hunk data before patching. Otherwise, preserve input + # line-endings. + h = h.getnormalized() + + # fast case first, no offsets, no fuzz + old = h.old() + # patch starts counting at 1 unless we are adding the file + if h.starta == 0: + start = 0 + else: + start = h.starta + self.offset - 1 + orig_start = start + # if there's skew we want to emit the "(offset %d lines)" even + # when the hunk cleanly applies at start + skew, so skip the + # fast case code + if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0: + if h.rmfile(): + self.unlink(self.fname) + else: + self.lines[start : start + h.lena] = h.new() + self.offset += h.lenb - h.lena + self.dirty = 1 + return 0 + + # ok, we couldn't match the hunk. Lets look for offsets and fuzz it + self.hashlines() + if h.hunk[-1][0] != ' ': + # if the hunk tried to put something at the bottom of the file + # override the start line and use eof here + search_start = len(self.lines) + else: + search_start = orig_start + self.skew + + for fuzzlen in xrange(3): + for toponly in [True, False]: + old = h.old(fuzzlen, toponly) + + cand = self.findlines(old[0][1:], search_start) + for l in cand: + if diffhelpers.testhunk(old, self.lines, l) == 0: + newlines = h.new(fuzzlen, toponly) + self.lines[l : l + len(old)] = newlines + self.offset += len(newlines) - len(old) + self.skew = l - orig_start + self.dirty = 1 + offset = l - orig_start - fuzzlen + if fuzzlen: + msg = _("Hunk #%d succeeded at %d " + "with fuzz %d " + "(offset %d lines).\n") + self.printfile(True) + self.ui.warn(msg % + (h.number, l + 1, fuzzlen, offset)) + else: + msg = _("Hunk #%d succeeded at %d " + "(offset %d lines).\n") + self.ui.note(msg % (h.number, l + 1, offset)) + return fuzzlen + self.printfile(True) + self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) + self.rej.append(horig) + return -1 + +class hunk(object): + def __init__(self, desc, num, lr, context, create=False, remove=False): + self.number = num + self.desc = desc + self.hunk = [desc] + self.a = [] + self.b = [] + self.starta = self.lena = None + self.startb = self.lenb = None + if lr is not None: + if context: + self.read_context_hunk(lr) + else: + self.read_unified_hunk(lr) + self.create = create + self.remove = remove and not create + + def getnormalized(self): + """Return a copy with line endings normalized to LF.""" + + def normalize(lines): + nlines = [] + for line in lines: + if line.endswith('\r\n'): + line = line[:-2] + '\n' + nlines.append(line) + return nlines + + # Dummy object, it is rebuilt manually + nh = hunk(self.desc, self.number, None, None, False, False) + nh.number = self.number + nh.desc = self.desc + nh.hunk = self.hunk + nh.a = normalize(self.a) + nh.b = normalize(self.b) + nh.starta = self.starta + nh.startb = self.startb + nh.lena = self.lena + nh.lenb = self.lenb + nh.create = self.create + nh.remove = self.remove + return nh + + def read_unified_hunk(self, lr): + m = unidesc.match(self.desc) + if not m: + raise PatchError(_("bad hunk #%d") % self.number) + self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups() + if self.lena is None: + self.lena = 1 + else: + self.lena = int(self.lena) + if self.lenb is None: + self.lenb = 1 + else: + self.lenb = int(self.lenb) + self.starta = int(self.starta) + self.startb = int(self.startb) + diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b) + # if we hit eof before finishing out the hunk, the last line will + # be zero length. Lets try to fix it up. + while len(self.hunk[-1]) == 0: + del self.hunk[-1] + del self.a[-1] + del self.b[-1] + self.lena -= 1 + self.lenb -= 1 + + def read_context_hunk(self, lr): + self.desc = lr.readline() + m = contextdesc.match(self.desc) + if not m: + raise PatchError(_("bad hunk #%d") % self.number) + foo, self.starta, foo2, aend, foo3 = m.groups() + self.starta = int(self.starta) + if aend is None: + aend = self.starta + self.lena = int(aend) - self.starta + if self.starta: + self.lena += 1 + for x in xrange(self.lena): + l = lr.readline() + if l.startswith('---'): + # lines addition, old block is empty + lr.push(l) + break + s = l[2:] + if l.startswith('- ') or l.startswith('! '): + u = '-' + s + elif l.startswith(' '): + u = ' ' + s + else: + raise PatchError(_("bad hunk #%d old text line %d") % + (self.number, x)) + self.a.append(u) + self.hunk.append(u) + + l = lr.readline() + if l.startswith('\ '): + s = self.a[-1][:-1] + self.a[-1] = s + self.hunk[-1] = s + l = lr.readline() + m = contextdesc.match(l) + if not m: + raise PatchError(_("bad hunk #%d") % self.number) + foo, self.startb, foo2, bend, foo3 = m.groups() + self.startb = int(self.startb) + if bend is None: + bend = self.startb + self.lenb = int(bend) - self.startb + if self.startb: + self.lenb += 1 + hunki = 1 + for x in xrange(self.lenb): + l = lr.readline() + if l.startswith('\ '): + # XXX: the only way to hit this is with an invalid line range. + # The no-eol marker is not counted in the line range, but I + # guess there are diff(1) out there which behave differently. + s = self.b[-1][:-1] + self.b[-1] = s + self.hunk[hunki - 1] = s + continue + if not l: + # line deletions, new block is empty and we hit EOF + lr.push(l) + break + s = l[2:] + if l.startswith('+ ') or l.startswith('! '): + u = '+' + s + elif l.startswith(' '): + u = ' ' + s + elif len(self.b) == 0: + # line deletions, new block is empty + lr.push(l) + break + else: + raise PatchError(_("bad hunk #%d old text line %d") % + (self.number, x)) + self.b.append(s) + while True: + if hunki >= len(self.hunk): + h = "" + else: + h = self.hunk[hunki] + hunki += 1 + if h == u: + break + elif h.startswith('-'): + continue + else: + self.hunk.insert(hunki - 1, u) + break + + if not self.a: + # this happens when lines were only added to the hunk + for x in self.hunk: + if x.startswith('-') or x.startswith(' '): + self.a.append(x) + if not self.b: + # this happens when lines were only deleted from the hunk + for x in self.hunk: + if x.startswith('+') or x.startswith(' '): + self.b.append(x[1:]) + # @@ -start,len +start,len @@ + self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, + self.startb, self.lenb) + self.hunk[0] = self.desc + + def fix_newline(self): + diffhelpers.fix_newline(self.hunk, self.a, self.b) + + def complete(self): + return len(self.a) == self.lena and len(self.b) == self.lenb + + def createfile(self): + return self.starta == 0 and self.lena == 0 and self.create + + def rmfile(self): + return self.startb == 0 and self.lenb == 0 and self.remove + + def fuzzit(self, l, fuzz, toponly): + # this removes context lines from the top and bottom of list 'l'. It + # checks the hunk to make sure only context lines are removed, and then + # returns a new shortened list of lines. + fuzz = min(fuzz, len(l)-1) + if fuzz: + top = 0 + bot = 0 + hlen = len(self.hunk) + for x in xrange(hlen - 1): + # the hunk starts with the @@ line, so use x+1 + if self.hunk[x + 1][0] == ' ': + top += 1 + else: + break + if not toponly: + for x in xrange(hlen - 1): + if self.hunk[hlen - bot - 1][0] == ' ': + bot += 1 + else: + break + + # top and bot now count context in the hunk + # adjust them if either one is short + context = max(top, bot, 3) + if bot < context: + bot = max(0, fuzz - (context - bot)) + else: + bot = min(fuzz, bot) + if top < context: + top = max(0, fuzz - (context - top)) + else: + top = min(fuzz, top) + + return l[top:len(l)-bot] + return l + + def old(self, fuzz=0, toponly=False): + return self.fuzzit(self.a, fuzz, toponly) + + def new(self, fuzz=0, toponly=False): + return self.fuzzit(self.b, fuzz, toponly) + +class binhunk: + 'A binary patch file. Only understands literals so far.' + def __init__(self, gitpatch): + self.gitpatch = gitpatch + self.text = None + self.hunk = ['GIT binary patch\n'] + + def createfile(self): + return self.gitpatch.op in ('ADD', 'RENAME', 'COPY') + + def rmfile(self): + return self.gitpatch.op == 'DELETE' + + def complete(self): + return self.text is not None + + def new(self): + return [self.text] + + def extract(self, lr): + line = lr.readline() + self.hunk.append(line) + while line and not line.startswith('literal '): + line = lr.readline() + self.hunk.append(line) + if not line: + raise PatchError(_('could not extract binary patch')) + size = int(line[8:].rstrip()) + dec = [] + line = lr.readline() + self.hunk.append(line) + while len(line) > 1: + l = line[0] + if l <= 'Z' and l >= 'A': + l = ord(l) - ord('A') + 1 + else: + l = ord(l) - ord('a') + 27 + dec.append(base85.b85decode(line[1:-1])[:l]) + line = lr.readline() + self.hunk.append(line) + text = zlib.decompress(''.join(dec)) + if len(text) != size: + raise PatchError(_('binary patch is %d bytes, not %d') % + len(text), size) + self.text = text + +def parsefilename(str): + # --- filename \t|space stuff + s = str[4:].rstrip('\r\n') + i = s.find('\t') + if i < 0: + i = s.find(' ') + if i < 0: + return s + return s[:i] + +def pathstrip(path, strip): + pathlen = len(path) + i = 0 + if strip == 0: + return '', path.rstrip() + count = strip + while count > 0: + i = path.find('/', i) + if i == -1: + raise PatchError(_("unable to strip away %d of %d dirs from %s") % + (count, strip, path)) + i += 1 + # consume '//' in the path + while i < pathlen - 1 and path[i] == '/': + i += 1 + count -= 1 + return path[:i].lstrip(), path[i:].rstrip() + +def selectfile(afile_orig, bfile_orig, hunk, strip): + nulla = afile_orig == "/dev/null" + nullb = bfile_orig == "/dev/null" + abase, afile = pathstrip(afile_orig, strip) + gooda = not nulla and os.path.lexists(afile) + bbase, bfile = pathstrip(bfile_orig, strip) + if afile == bfile: + goodb = gooda + else: + goodb = not nullb and os.path.lexists(bfile) + createfunc = hunk.createfile + missing = not goodb and not gooda and not createfunc() + + # some diff programs apparently produce patches where the afile is + # not /dev/null, but afile starts with bfile + abasedir = afile[:afile.rfind('/') + 1] + bbasedir = bfile[:bfile.rfind('/') + 1] + if missing and abasedir == bbasedir and afile.startswith(bfile): + # this isn't very pretty + hunk.create = True + if createfunc(): + missing = False + else: + hunk.create = False + + # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the + # diff is between a file and its backup. In this case, the original + # file should be patched (see original mpatch code). + isbackup = (abase == bbase and bfile.startswith(afile)) + fname = None + if not missing: + if gooda and goodb: + fname = isbackup and afile or bfile + elif gooda: + fname = afile + + if not fname: + if not nullb: + fname = isbackup and afile or bfile + elif not nulla: + fname = afile + else: + raise PatchError(_("undefined source and destination files")) + + return fname, missing + +def scangitpatch(lr, firstline): + """ + Git patches can emit: + - rename a to b + - change b + - copy a to c + - change c + + We cannot apply this sequence as-is, the renamed 'a' could not be + found for it would have been renamed already. And we cannot copy + from 'b' instead because 'b' would have been changed already. So + we scan the git patch for copy and rename commands so we can + perform the copies ahead of time. + """ + pos = 0 + try: + pos = lr.fp.tell() + fp = lr.fp + except IOError: + fp = cStringIO.StringIO(lr.fp.read()) + gitlr = linereader(fp, lr.textmode) + gitlr.push(firstline) + gitpatches = readgitpatch(gitlr) + fp.seek(pos) + return gitpatches + +def iterhunks(ui, fp, sourcefile=None): + """Read a patch and yield the following events: + - ("file", afile, bfile, firsthunk): select a new target file. + - ("hunk", hunk): a new hunk is ready to be applied, follows a + "file" event. + - ("git", gitchanges): current diff is in git format, gitchanges + maps filenames to gitpatch records. Unique event. + """ + changed = {} + current_hunk = None + afile = "" + bfile = "" + state = None + hunknum = 0 + emitfile = False + git = False + + # our states + BFILE = 1 + context = None + lr = linereader(fp) + # gitworkdone is True if a git operation (copy, rename, ...) was + # performed already for the current file. Useful when the file + # section may have no hunk. + gitworkdone = False + + while True: + newfile = newgitfile = False + x = lr.readline() + if not x: + break + if current_hunk: + if x.startswith('\ '): + current_hunk.fix_newline() + yield 'hunk', current_hunk + current_hunk = None + if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or + ((context is not False) and x.startswith('***************')))): + if context is None and x.startswith('***************'): + context = True + gpatch = changed.get(bfile) + create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD' + remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE' + current_hunk = hunk(x, hunknum + 1, lr, context, create, remove) + hunknum += 1 + if emitfile: + emitfile = False + yield 'file', (afile, bfile, current_hunk) + elif state == BFILE and x.startswith('GIT binary patch'): + current_hunk = binhunk(changed[bfile]) + hunknum += 1 + if emitfile: + emitfile = False + yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk) + current_hunk.extract(lr) + elif x.startswith('diff --git'): + # check for git diff, scanning the whole patch file if needed + m = gitre.match(x) + gitworkdone = False + if m: + afile, bfile = m.group(1, 2) + if not git: + git = True + gitpatches = scangitpatch(lr, x) + yield 'git', gitpatches + for gp in gitpatches: + changed[gp.path] = gp + # else error? + # copy/rename + modify should modify target, not source + gp = changed.get(bfile) + if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') + or gp.mode): + afile = bfile + gitworkdone = True + newgitfile = True + elif x.startswith('---'): + # check for a unified diff + l2 = lr.readline() + if not l2.startswith('+++'): + lr.push(l2) + continue + newfile = True + context = False + afile = parsefilename(x) + bfile = parsefilename(l2) + elif x.startswith('***'): + # check for a context diff + l2 = lr.readline() + if not l2.startswith('---'): + lr.push(l2) + continue + l3 = lr.readline() + lr.push(l3) + if not l3.startswith("***************"): + lr.push(l2) + continue + newfile = True + context = True + afile = parsefilename(x) + bfile = parsefilename(l2) + + if newfile: + gitworkdone = False + + if newgitfile or newfile: + emitfile = True + state = BFILE + hunknum = 0 + if current_hunk: + if current_hunk.complete(): + yield 'hunk', current_hunk + else: + raise PatchError(_("malformed patch %s %s") % (afile, + current_hunk.desc)) + +def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'): + """Reads a patch from fp and tries to apply it. + + The dict 'changed' is filled in with all of the filenames changed + by the patch. Returns 0 for a clean patch, -1 if any rejects were + found and 1 if there was any fuzz. + + If 'eolmode' is 'strict', the patch content and patched file are + read in binary mode. Otherwise, line endings are ignored when + patching then normalized according to 'eolmode'. + + Callers probably want to call 'cmdutil.updatedir' after this to + apply certain categories of changes not done by this function. + """ + return _applydiff( + ui, fp, patchfile, copyfile, + changed, strip=strip, sourcefile=sourcefile, eolmode=eolmode) + + +def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, + sourcefile=None, eolmode='strict'): + rejects = 0 + err = 0 + current_file = None + cwd = os.getcwd() + opener = util.opener(cwd) + + def closefile(): + if not current_file: + return 0 + if current_file.dirty: + current_file.writelines(current_file.fname, current_file.lines) + current_file.write_rej() + return len(current_file.rej) + + for state, values in iterhunks(ui, fp, sourcefile): + if state == 'hunk': + if not current_file: + continue + ret = current_file.apply(values) + if ret >= 0: + changed.setdefault(current_file.fname, None) + if ret > 0: + err = 1 + elif state == 'file': + rejects += closefile() + afile, bfile, first_hunk = values + try: + if sourcefile: + current_file = patcher(ui, sourcefile, opener, + eolmode=eolmode) + else: + current_file, missing = selectfile(afile, bfile, + first_hunk, strip) + current_file = patcher(ui, current_file, opener, + missing=missing, eolmode=eolmode) + except PatchError, err: + ui.warn(str(err) + '\n') + current_file = None + rejects += 1 + continue + elif state == 'git': + for gp in values: + gp.path = pathstrip(gp.path, strip - 1)[1] + if gp.oldpath: + gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1] + # Binary patches really overwrite target files, copying them + # will just make it fails with "target file exists" + if gp.op in ('COPY', 'RENAME') and not gp.binary: + copyfn(gp.oldpath, gp.path, cwd) + changed[gp.path] = gp + else: + raise util.Abort(_('unsupported parser state: %s') % state) + + rejects += closefile() + + if rejects: + return -1 + return err + +def externalpatch(patcher, patchname, ui, strip, cwd, files): + """use <patcher> to apply <patchname> to the working directory. + returns whether patch was applied with fuzz factor.""" + + fuzz = False + args = [] + if cwd: + args.append('-d %s' % util.shellquote(cwd)) + fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip, + util.shellquote(patchname))) + + for line in fp: + line = line.rstrip() + ui.note(line + '\n') + if line.startswith('patching file '): + pf = util.parse_patch_output(line) + printed_file = False + files.setdefault(pf, None) + elif line.find('with fuzz') >= 0: + fuzz = True + if not printed_file: + ui.warn(pf + '\n') + printed_file = True + ui.warn(line + '\n') + elif line.find('saving rejects to file') >= 0: + ui.warn(line + '\n') + elif line.find('FAILED') >= 0: + if not printed_file: + ui.warn(pf + '\n') + printed_file = True + ui.warn(line + '\n') + code = fp.close() + if code: + raise PatchError(_("patch command failed: %s") % + util.explain_exit(code)[0]) + return fuzz + +def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'): + """use builtin patch to apply <patchobj> to the working directory. + returns whether patch was applied with fuzz factor.""" + + if files is None: + files = {} + if eolmode is None: + eolmode = ui.config('patch', 'eol', 'strict') + if eolmode.lower() not in eolmodes: + raise util.Abort(_('unsupported line endings type: %s') % eolmode) + eolmode = eolmode.lower() + + try: + fp = open(patchobj, 'rb') + except TypeError: + fp = patchobj + if cwd: + curdir = os.getcwd() + os.chdir(cwd) + try: + ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode) + finally: + if cwd: + os.chdir(curdir) + if fp != patchobj: + fp.close() + if ret < 0: + raise PatchError(_('patch failed to apply')) + return ret > 0 + +def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'): + """Apply <patchname> to the working directory. + + 'eolmode' specifies how end of lines should be handled. It can be: + - 'strict': inputs are read in binary mode, EOLs are preserved + - 'crlf': EOLs are ignored when patching and reset to CRLF + - 'lf': EOLs are ignored when patching and reset to LF + - None: get it from user settings, default to 'strict' + 'eolmode' is ignored when using an external patcher program. + + Returns whether patch was applied with fuzz factor. + """ + patcher = ui.config('ui', 'patch') + if files is None: + files = {} + try: + if patcher: + return externalpatch(patcher, patchname, ui, strip, cwd, files) + return internalpatch(patchname, ui, strip, cwd, files, eolmode) + except PatchError, err: + raise util.Abort(str(err)) + +def b85diff(to, tn): + '''print base85-encoded binary diff''' + def gitindex(text): + if not text: + return hex(nullid) + l = len(text) + s = util.sha1('blob %d\0' % l) + s.update(text) + return s.hexdigest() + + def fmtline(line): + l = len(line) + if l <= 26: + l = chr(ord('A') + l - 1) + else: + l = chr(l - 26 + ord('a') - 1) + return '%c%s\n' % (l, base85.b85encode(line, True)) + + def chunk(text, csize=52): + l = len(text) + i = 0 + while i < l: + yield text[i:i + csize] + i += csize + + tohash = gitindex(to) + tnhash = gitindex(tn) + if tohash == tnhash: + return "" + + # TODO: deltas + ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' % + (tohash, tnhash, len(tn))] + for l in chunk(zlib.compress(tn)): + ret.append(fmtline(l)) + ret.append('\n') + return ''.join(ret) + +class GitDiffRequired(Exception): + pass + +def diffopts(ui, opts=None, untrusted=False): + def get(key, name=None, getter=ui.configbool): + return ((opts and opts.get(key)) or + getter('diff', name or key, None, untrusted=untrusted)) + return mdiff.diffopts( + text=opts and opts.get('text'), + git=get('git'), + nodates=get('nodates'), + showfunc=get('show_function', 'showfunc'), + ignorews=get('ignore_all_space', 'ignorews'), + ignorewsamount=get('ignore_space_change', 'ignorewsamount'), + ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'), + context=get('unified', getter=ui.config)) + +def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None, + losedatafn=None, prefix=''): + '''yields diff of changes to files between two nodes, or node and + working directory. + + if node1 is None, use first dirstate parent instead. + if node2 is None, compare node1 with working directory. + + losedatafn(**kwarg) is a callable run when opts.upgrade=True and + every time some change cannot be represented with the current + patch format. Return False to upgrade to git patch format, True to + accept the loss or raise an exception to abort the diff. It is + called with the name of current file being diffed as 'fn'. If set + to None, patches will always be upgraded to git format when + necessary. + + prefix is a filename prefix that is prepended to all filenames on + display (used for subrepos). + ''' + + if opts is None: + opts = mdiff.defaultopts + + if not node1 and not node2: + node1 = repo.dirstate.parents()[0] + + def lrugetfilectx(): + cache = {} + order = [] + def getfilectx(f, ctx): + fctx = ctx.filectx(f, filelog=cache.get(f)) + if f not in cache: + if len(cache) > 20: + del cache[order.pop(0)] + cache[f] = fctx.filelog() + else: + order.remove(f) + order.append(f) + return fctx + return getfilectx + getfilectx = lrugetfilectx() + + ctx1 = repo[node1] + ctx2 = repo[node2] + + if not changes: + changes = repo.status(ctx1, ctx2, match=match) + modified, added, removed = changes[:3] + + if not modified and not added and not removed: + return [] + + revs = None + if not repo.ui.quiet: + hexfunc = repo.ui.debugflag and hex or short + revs = [hexfunc(node) for node in [node1, node2] if node] + + copy = {} + if opts.git or opts.upgrade: + copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0] + + difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2, + modified, added, removed, copy, getfilectx, opts, losedata, prefix) + if opts.upgrade and not opts.git: + try: + def losedata(fn): + if not losedatafn or not losedatafn(fn=fn): + raise GitDiffRequired() + # Buffer the whole output until we are sure it can be generated + return list(difffn(opts.copy(git=False), losedata)) + except GitDiffRequired: + return difffn(opts.copy(git=True), None) + else: + return difffn(opts, None) + +def difflabel(func, *args, **kw): + '''yields 2-tuples of (output, label) based on the output of func()''' + prefixes = [('diff', 'diff.diffline'), + ('copy', 'diff.extended'), + ('rename', 'diff.extended'), + ('old', 'diff.extended'), + ('new', 'diff.extended'), + ('deleted', 'diff.extended'), + ('---', 'diff.file_a'), + ('+++', 'diff.file_b'), + ('@@', 'diff.hunk'), + ('-', 'diff.deleted'), + ('+', 'diff.inserted')] + + for chunk in func(*args, **kw): + lines = chunk.split('\n') + for i, line in enumerate(lines): + if i != 0: + yield ('\n', '') + stripline = line + if line and line[0] in '+-': + # highlight trailing whitespace, but only in changed lines + stripline = line.rstrip() + for prefix, label in prefixes: + if stripline.startswith(prefix): + yield (stripline, label) + break + else: + yield (line, '') + if line != stripline: + yield (line[len(stripline):], 'diff.trailingwhitespace') + +def diffui(*args, **kw): + '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' + return difflabel(diff, *args, **kw) + + +def _addmodehdr(header, omode, nmode): + if omode != nmode: + header.append('old mode %s\n' % omode) + header.append('new mode %s\n' % nmode) + +def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, + copy, getfilectx, opts, losedatafn, prefix): + + def join(f): + return os.path.join(prefix, f) + + date1 = util.datestr(ctx1.date()) + man1 = ctx1.manifest() + + gone = set() + gitmode = {'l': '120000', 'x': '100755', '': '100644'} + + copyto = dict([(v, k) for k, v in copy.items()]) + + if opts.git: + revs = None + + for f in sorted(modified + added + removed): + to = None + tn = None + dodiff = True + header = [] + if f in man1: + to = getfilectx(f, ctx1).data() + if f not in removed: + tn = getfilectx(f, ctx2).data() + a, b = f, f + if opts.git or losedatafn: + if f in added: + mode = gitmode[ctx2.flags(f)] + if f in copy or f in copyto: + if opts.git: + if f in copy: + a = copy[f] + else: + a = copyto[f] + omode = gitmode[man1.flags(a)] + _addmodehdr(header, omode, mode) + if a in removed and a not in gone: + op = 'rename' + gone.add(a) + else: + op = 'copy' + header.append('%s from %s\n' % (op, join(a))) + header.append('%s to %s\n' % (op, join(f))) + to = getfilectx(a, ctx1).data() + else: + losedatafn(f) + else: + if opts.git: + header.append('new file mode %s\n' % mode) + elif ctx2.flags(f): + losedatafn(f) + # In theory, if tn was copied or renamed we should check + # if the source is binary too but the copy record already + # forces git mode. + if util.binary(tn): + if opts.git: + dodiff = 'binary' + else: + losedatafn(f) + if not opts.git and not tn: + # regular diffs cannot represent new empty file + losedatafn(f) + elif f in removed: + if opts.git: + # have we already reported a copy above? + if ((f in copy and copy[f] in added + and copyto[copy[f]] == f) or + (f in copyto and copyto[f] in added + and copy[copyto[f]] == f)): + dodiff = False + else: + header.append('deleted file mode %s\n' % + gitmode[man1.flags(f)]) + elif not to or util.binary(to): + # regular diffs cannot represent empty file deletion + losedatafn(f) + else: + oflag = man1.flags(f) + nflag = ctx2.flags(f) + binary = util.binary(to) or util.binary(tn) + if opts.git: + _addmodehdr(header, gitmode[oflag], gitmode[nflag]) + if binary: + dodiff = 'binary' + elif binary or nflag != oflag: + losedatafn(f) + if opts.git: + header.insert(0, mdiff.diffline(revs, join(a), join(b), opts)) + + if dodiff: + if dodiff == 'binary': + text = b85diff(to, tn) + else: + text = mdiff.unidiff(to, date1, + # ctx2 date may be dynamic + tn, util.datestr(ctx2.date()), + join(a), join(b), revs, opts=opts) + if header and (text or len(header) > 1): + yield ''.join(header) + if text: + yield text + +def diffstatdata(lines): + filename, adds, removes = None, 0, 0 + for line in lines: + if line.startswith('diff'): + if filename: + isbinary = adds == 0 and removes == 0 + yield (filename, adds, removes, isbinary) + # set numbers to 0 anyway when starting new file + adds, removes = 0, 0 + if line.startswith('diff --git'): + filename = gitre.search(line).group(1) + else: + # format: "diff -r ... -r ... filename" + filename = line.split(None, 5)[-1] + elif line.startswith('+') and not line.startswith('+++'): + adds += 1 + elif line.startswith('-') and not line.startswith('---'): + removes += 1 + if filename: + isbinary = adds == 0 and removes == 0 + yield (filename, adds, removes, isbinary) + +def diffstat(lines, width=80, git=False): + output = [] + stats = list(diffstatdata(lines)) + + maxtotal, maxname = 0, 0 + totaladds, totalremoves = 0, 0 + hasbinary = False + + sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename)) + for filename, adds, removes, isbinary in stats] + + for filename, adds, removes, isbinary, namewidth in sized: + totaladds += adds + totalremoves += removes + maxname = max(maxname, namewidth) + maxtotal = max(maxtotal, adds + removes) + if isbinary: + hasbinary = True + + countwidth = len(str(maxtotal)) + if hasbinary and countwidth < 3: + countwidth = 3 + graphwidth = width - countwidth - maxname - 6 + if graphwidth < 10: + graphwidth = 10 + + def scale(i): + if maxtotal <= graphwidth: + return i + # If diffstat runs out of room it doesn't print anything, + # which isn't very useful, so always print at least one + or - + # if there were at least some changes. + return max(i * graphwidth // maxtotal, int(bool(i))) + + for filename, adds, removes, isbinary, namewidth in sized: + if git and isbinary: + count = 'Bin' + else: + count = adds + removes + pluses = '+' * scale(adds) + minuses = '-' * scale(removes) + output.append(' %s%s | %*s %s%s\n' % + (filename, ' ' * (maxname - namewidth), + countwidth, count, + pluses, minuses)) + + if stats: + output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n') + % (len(stats), totaladds, totalremoves)) + + return ''.join(output) + +def diffstatui(*args, **kw): + '''like diffstat(), but yields 2-tuples of (output, label) for + ui.write() + ''' + + for line in diffstat(*args, **kw).splitlines(): + if line and line[-1] in '+-': + name, graph = line.rsplit(' ', 1) + yield (name + ' ', '') + m = re.search(r'\++', graph) + if m: + yield (m.group(0), 'diffstat.inserted') + m = re.search(r'-+', graph) + if m: + yield (m.group(0), 'diffstat.deleted') + else: + yield (line, '') + yield ('\n', '') diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.pyo Binary files differnew file mode 100644 index 0000000..404d3fa --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/patch.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.py new file mode 100644 index 0000000..39ea40a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.py @@ -0,0 +1,297 @@ +# posix.py - Posix utility function implementations for Mercurial +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import osutil +import os, sys, errno, stat, getpass, pwd, grp + +posixfile = open +nulldev = '/dev/null' +normpath = os.path.normpath +samestat = os.path.samestat +rename = os.rename +expandglobs = False + +umask = os.umask(0) +os.umask(umask) + +def openhardlinks(): + '''return true if it is safe to hold open file handles to hardlinks''' + return True + +def rcfiles(path): + rcs = [os.path.join(path, 'hgrc')] + rcdir = os.path.join(path, 'hgrc.d') + try: + rcs.extend([os.path.join(rcdir, f) + for f, kind in osutil.listdir(rcdir) + if f.endswith(".rc")]) + except OSError: + pass + return rcs + +def system_rcpath(): + path = [] + # old mod_python does not set sys.argv + if len(getattr(sys, 'argv', [])) > 0: + path.extend(rcfiles(os.path.dirname(sys.argv[0]) + + '/../etc/mercurial')) + path.extend(rcfiles('/etc/mercurial')) + return path + +def user_rcpath(): + return [os.path.expanduser('~/.hgrc')] + +def parse_patch_output(output_line): + """parses the output produced by patch and returns the filename""" + pf = output_line[14:] + if os.sys.platform == 'OpenVMS': + if pf[0] == '`': + pf = pf[1:-1] # Remove the quotes + else: + if pf.startswith("'") and pf.endswith("'") and " " in pf: + pf = pf[1:-1] # Remove the quotes + return pf + +def sshargs(sshcmd, host, user, port): + '''Build argument list for ssh''' + args = user and ("%s@%s" % (user, host)) or host + return port and ("%s -p %s" % (args, port)) or args + +def is_exec(f): + """check whether a file is executable""" + return (os.lstat(f).st_mode & 0100 != 0) + +def set_flags(f, l, x): + s = os.lstat(f).st_mode + if l: + if not stat.S_ISLNK(s): + # switch file to link + data = open(f).read() + os.unlink(f) + try: + os.symlink(data, f) + except: + # failed to make a link, rewrite file + open(f, "w").write(data) + # no chmod needed at this point + return + if stat.S_ISLNK(s): + # switch link to file + data = os.readlink(f) + os.unlink(f) + open(f, "w").write(data) + s = 0666 & ~umask # avoid restatting for chmod + + sx = s & 0100 + if x and not sx: + # Turn on +x for every +r bit when making a file executable + # and obey umask. + os.chmod(f, s | (s & 0444) >> 2 & ~umask) + elif not x and sx: + # Turn off all +x bits + os.chmod(f, s & 0666) + +def set_binary(fd): + pass + +def pconvert(path): + return path + +def localpath(path): + return path + +def samefile(fpath1, fpath2): + """Returns whether path1 and path2 refer to the same file. This is only + guaranteed to work for files, not directories.""" + return os.path.samefile(fpath1, fpath2) + +def samedevice(fpath1, fpath2): + """Returns whether fpath1 and fpath2 are on the same device. This is only + guaranteed to work for files, not directories.""" + st1 = os.lstat(fpath1) + st2 = os.lstat(fpath2) + return st1.st_dev == st2.st_dev + +if sys.platform == 'darwin': + import fcntl # only needed on darwin, missing on jython + def realpath(path): + ''' + Returns the true, canonical file system path equivalent to the given + path. + + Equivalent means, in this case, resulting in the same, unique + file system link to the path. Every file system entry, whether a file, + directory, hard link or symbolic link or special, will have a single + path preferred by the system, but may allow multiple, differing path + lookups to point to it. + + Most regular UNIX file systems only allow a file system entry to be + looked up by its distinct path. Obviously, this does not apply to case + insensitive file systems, whether case preserving or not. The most + complex issue to deal with is file systems transparently reencoding the + path, such as the non-standard Unicode normalisation required for HFS+ + and HFSX. + ''' + # Constants copied from /usr/include/sys/fcntl.h + F_GETPATH = 50 + O_SYMLINK = 0x200000 + + try: + fd = os.open(path, O_SYMLINK) + except OSError, err: + if err.errno == errno.ENOENT: + return path + raise + + try: + return fcntl.fcntl(fd, F_GETPATH, '\0' * 1024).rstrip('\0') + finally: + os.close(fd) +else: + # Fallback to the likely inadequate Python builtin function. + realpath = os.path.realpath + +def shellquote(s): + if os.sys.platform == 'OpenVMS': + return '"%s"' % s + else: + return "'%s'" % s.replace("'", "'\\''") + +def quotecommand(cmd): + return cmd + +def popen(command, mode='r'): + return os.popen(command, mode) + +def testpid(pid): + '''return False if pid dead, True if running or not sure''' + if os.sys.platform == 'OpenVMS': + return True + try: + os.kill(pid, 0) + return True + except OSError, inst: + return inst.errno != errno.ESRCH + +def explain_exit(code): + """return a 2-tuple (desc, code) describing a subprocess status + (codes from kill are negative - not os.system/wait encoding)""" + if code >= 0: + return _("exited with status %d") % code, code + return _("killed by signal %d") % -code, -code + +def isowner(st): + """Return True if the stat object st is from the current user.""" + return st.st_uid == os.getuid() + +def find_exe(command): + '''Find executable for command searching like which does. + If command is a basename then PATH is searched for command. + PATH isn't searched if command is an absolute or relative path. + If command isn't found None is returned.''' + if sys.platform == 'OpenVMS': + return command + + def findexisting(executable): + 'Will return executable if existing file' + if os.path.exists(executable): + return executable + return None + + if os.sep in command: + return findexisting(command) + + for path in os.environ.get('PATH', '').split(os.pathsep): + executable = findexisting(os.path.join(path, command)) + if executable is not None: + return executable + return None + +def set_signal_handler(): + pass + +def statfiles(files): + 'Stat each file in files and yield stat or None if file does not exist.' + lstat = os.lstat + for nf in files: + try: + st = lstat(nf) + except OSError, err: + if err.errno not in (errno.ENOENT, errno.ENOTDIR): + raise + st = None + yield st + +def getuser(): + '''return name of current user''' + return getpass.getuser() + +def expand_glob(pats): + '''On Windows, expand the implicit globs in a list of patterns''' + return list(pats) + +def username(uid=None): + """Return the name of the user with the given uid. + + If uid is None, return the name of the current user.""" + + if uid is None: + uid = os.getuid() + try: + return pwd.getpwuid(uid)[0] + except KeyError: + return str(uid) + +def groupname(gid=None): + """Return the name of the group with the given gid. + + If gid is None, return the name of the current group.""" + + if gid is None: + gid = os.getgid() + try: + return grp.getgrgid(gid)[0] + except KeyError: + return str(gid) + +def groupmembers(name): + """Return the list of members of the group with the given + name, KeyError if the group does not exist. + """ + return list(grp.getgrnam(name).gr_mem) + +def spawndetached(args): + return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), + args[0], args) + +def gethgcmd(): + return sys.argv[:1] + +def termwidth(): + try: + import termios, array, fcntl + for dev in (sys.stderr, sys.stdout, sys.stdin): + try: + try: + fd = dev.fileno() + except AttributeError: + continue + if not os.isatty(fd): + continue + arri = fcntl.ioctl(fd, termios.TIOCGWINSZ, '\0' * 8) + return array.array('h', arri)[1] + except ValueError: + pass + except IOError, e: + if e[0] == errno.EINVAL: + pass + else: + raise + except ImportError: + pass + return 80 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.pyo Binary files differnew file mode 100644 index 0000000..4aea4bf --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/posix.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.py new file mode 100644 index 0000000..d7868e6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.py @@ -0,0 +1,31 @@ +# pushkey.py - dispatching for pushing and pulling keys +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +def _nslist(repo): + n = {} + for k in _namespaces: + n[k] = "" + return n + +_namespaces = {"namespaces": (lambda *x: False, _nslist)} + +def register(namespace, pushkey, listkeys): + _namespaces[namespace] = (pushkey, listkeys) + +def _get(namespace): + return _namespaces.get(namespace, (lambda *x: False, lambda *x: {})) + +def push(repo, namespace, key, old, new): + '''should succeed iff value was old''' + pk = _get(namespace)[0] + return pk(repo, key, old, new) + +def list(repo, namespace): + '''return a dict''' + lk = _get(namespace)[1] + return lk(repo) + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.pyo Binary files differnew file mode 100644 index 0000000..9d4d579 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/pushkey.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.py new file mode 100644 index 0000000..8843e9e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.py @@ -0,0 +1,72 @@ +# py3kcompat.py - compatibility definitions for running hg in py3k +# +# Copyright 2010 Renato Cunha <renatoc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import os, builtins + +from numbers import Number + +def bytesformatter(format, args): + '''Custom implementation of a formatter for bytestrings. + + This function currently relias on the string formatter to do the + formatting and always returns bytes objects. + + >>> bytesformatter(20, 10) + 0 + >>> bytesformatter('unicode %s, %s!', ('string', 'foo')) + b'unicode string, foo!' + >>> bytesformatter(b'test %s', 'me') + b'test me' + >>> bytesformatter('test %s', 'me') + b'test me' + >>> bytesformatter(b'test %s', b'me') + b'test me' + >>> bytesformatter('test %s', b'me') + b'test me' + >>> bytesformatter('test %d: %s', (1, b'result')) + b'test 1: result' + ''' + # The current implementation just converts from bytes to unicode, do + # what's needed and then convert the results back to bytes. + # Another alternative is to use the Python C API implementation. + if isinstance(format, Number): + # If the fixer erroneously passes a number remainder operation to + # bytesformatter, we just return the correct operation + return format % args + if isinstance(format, bytes): + format = format.decode('utf-8', 'surrogateescape') + if isinstance(args, bytes): + args = args.decode('utf-8', 'surrogateescape') + if isinstance(args, tuple): + newargs = [] + for arg in args: + if isinstance(arg, bytes): + arg = arg.decode('utf-8', 'surrogateescape') + newargs.append(arg) + args = tuple(newargs) + ret = format % args + return ret.encode('utf-8', 'surrogateescape') +builtins.bytesformatter = bytesformatter + +# Create bytes equivalents for os.environ values +for key in list(os.environ.keys()): + # UTF-8 is fine for us + bkey = key.encode('utf-8', 'surrogateescape') + bvalue = os.environ[key].encode('utf-8', 'surrogateescape') + os.environ[bkey] = bvalue + +origord = builtins.ord +def fakeord(char): + if isinstance(char, int): + return char + return origord(char) +builtins.ord = fakeord + +if __name__ == '__main__': + import doctest + doctest.testmod() + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.pyo Binary files differnew file mode 100644 index 0000000..625e824 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/py3kcompat.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.py new file mode 100644 index 0000000..2685d34 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.py @@ -0,0 +1,167 @@ +# repair.py - functions for repository repair for mercurial +# +# Copyright 2005, 2006 Chris Mason <mason@suse.com> +# Copyright 2007 Matt Mackall +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import changegroup +from node import nullrev, short +from i18n import _ +import os + +def _bundle(repo, bases, heads, node, suffix, extranodes=None, compress=True): + """create a bundle with the specified revisions as a backup""" + cg = repo.changegroupsubset(bases, heads, 'strip', extranodes) + backupdir = repo.join("strip-backup") + if not os.path.isdir(backupdir): + os.mkdir(backupdir) + name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix)) + if compress: + bundletype = "HG10BZ" + else: + bundletype = "HG10UN" + return changegroup.writebundle(cg, name, bundletype) + +def _collectfiles(repo, striprev): + """find out the filelogs affected by the strip""" + files = set() + + for x in xrange(striprev, len(repo)): + files.update(repo[x].files()) + + return sorted(files) + +def _collectextranodes(repo, files, link): + """return the nodes that have to be saved before the strip""" + def collectone(cl, revlog): + extra = [] + startrev = count = len(revlog) + # find the truncation point of the revlog + for i in xrange(count): + lrev = revlog.linkrev(i) + if lrev >= link: + startrev = i + 1 + break + + # see if any revision after that point has a linkrev less than link + # (we have to manually save these guys) + for i in xrange(startrev, count): + node = revlog.node(i) + lrev = revlog.linkrev(i) + if lrev < link: + extra.append((node, cl.node(lrev))) + + return extra + + extranodes = {} + cl = repo.changelog + extra = collectone(cl, repo.manifest) + if extra: + extranodes[1] = extra + for fname in files: + f = repo.file(fname) + extra = collectone(cl, f) + if extra: + extranodes[fname] = extra + + return extranodes + +def strip(ui, repo, node, backup="all"): + cl = repo.changelog + # TODO delete the undo files, and handle undo of merge sets + striprev = cl.rev(node) + + keeppartialbundle = backup == 'strip' + + # Some revisions with rev > striprev may not be descendants of striprev. + # We have to find these revisions and put them in a bundle, so that + # we can restore them after the truncations. + # To create the bundle we use repo.changegroupsubset which requires + # the list of heads and bases of the set of interesting revisions. + # (head = revision in the set that has no descendant in the set; + # base = revision in the set that has no ancestor in the set) + tostrip = set((striprev,)) + saveheads = set() + savebases = [] + for r in xrange(striprev + 1, len(cl)): + parents = cl.parentrevs(r) + if parents[0] in tostrip or parents[1] in tostrip: + # r is a descendant of striprev + tostrip.add(r) + # if this is a merge and one of the parents does not descend + # from striprev, mark that parent as a savehead. + if parents[1] != nullrev: + for p in parents: + if p not in tostrip and p > striprev: + saveheads.add(p) + else: + # if no parents of this revision will be stripped, mark it as + # a savebase + if parents[0] < striprev and parents[1] < striprev: + savebases.append(cl.node(r)) + + saveheads.difference_update(parents) + saveheads.add(r) + + saveheads = [cl.node(r) for r in saveheads] + files = _collectfiles(repo, striprev) + + extranodes = _collectextranodes(repo, files, striprev) + + # create a changegroup for all the branches we need to keep + backupfile = None + if backup == "all": + backupfile = _bundle(repo, [node], cl.heads(), node, 'backup') + repo.ui.status(_("saved backup bundle to %s\n") % backupfile) + if saveheads or extranodes: + # do not compress partial bundle if we remove it from disk later + chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp', + extranodes=extranodes, compress=keeppartialbundle) + + mfst = repo.manifest + + tr = repo.transaction("strip") + offset = len(tr.entries) + + try: + tr.startgroup() + cl.strip(striprev, tr) + mfst.strip(striprev, tr) + for fn in files: + repo.file(fn).strip(striprev, tr) + tr.endgroup() + + try: + for i in xrange(offset, len(tr.entries)): + file, troffset, ignore = tr.entries[i] + repo.sopener(file, 'a').truncate(troffset) + tr.close() + except: + tr.abort() + raise + + if saveheads or extranodes: + ui.note(_("adding branch\n")) + f = open(chgrpfile, "rb") + gen = changegroup.readbundle(f, chgrpfile) + if not repo.ui.verbose: + # silence internal shuffling chatter + repo.ui.pushbuffer() + repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True) + if not repo.ui.verbose: + repo.ui.popbuffer() + f.close() + if not keeppartialbundle: + os.unlink(chgrpfile) + except: + if backupfile: + ui.warn(_("strip failed, full bundle stored in '%s'\n") + % backupfile) + elif saveheads: + ui.warn(_("strip failed, partial bundle stored in '%s'\n") + % chgrpfile) + raise + + repo.destroyed() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.pyo Binary files differnew file mode 100644 index 0000000..0d61074 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repair.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.py new file mode 100644 index 0000000..ac5510f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.py @@ -0,0 +1,37 @@ +# repo.py - repository base classes for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import error + +class repository(object): + def capable(self, name): + '''tell whether repo supports named capability. + return False if not supported. + if boolean capability, return True. + if string capability, return string.''' + if name in self.capabilities: + return True + name_eq = name + '=' + for cap in self.capabilities: + if cap.startswith(name_eq): + return cap[len(name_eq):] + return False + + def requirecap(self, name, purpose): + '''raise an exception if the given capability is not present''' + if not self.capable(name): + raise error.CapabilityError( + _('cannot %s; remote repository does not ' + 'support the %r capability') % (purpose, name)) + + def local(self): + return False + + def cancopy(self): + return self.local() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.pyo Binary files differnew file mode 100644 index 0000000..0cdc8cb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/repo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.py new file mode 100644 index 0000000..de94492 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.py @@ -0,0 +1,1477 @@ +# revlog.py - storage back-end for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Storage back-end for Mercurial. + +This provides efficient delta storage with O(1) retrieve and append +and O(changes) merge between branches. +""" + +# import stuff from node for others to import from revlog +from node import bin, hex, nullid, nullrev, short #@UnusedImport +from i18n import _ +import changegroup, ancestor, mdiff, parsers, error, util +import struct, zlib, errno + +_pack = struct.pack +_unpack = struct.unpack +_compress = zlib.compress +_decompress = zlib.decompress +_sha = util.sha1 + +# revlog header flags +REVLOGV0 = 0 +REVLOGNG = 1 +REVLOGNGINLINEDATA = (1 << 16) +REVLOGSHALLOW = (1 << 17) +REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA +REVLOG_DEFAULT_FORMAT = REVLOGNG +REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS +REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGSHALLOW + +# revlog index flags +REVIDX_PARENTDELTA = 1 +REVIDX_PUNCHED_FLAG = 2 +REVIDX_KNOWN_FLAGS = REVIDX_PUNCHED_FLAG | REVIDX_PARENTDELTA + +# amount of data read unconditionally, should be >= 4 +# when not inline: threshold for using lazy index +_prereadsize = 1048576 +# max size of revlog with inline data +_maxinline = 131072 + +RevlogError = error.RevlogError +LookupError = error.LookupError + +def getoffset(q): + return int(q >> 16) + +def gettype(q): + return int(q & 0xFFFF) + +def offset_type(offset, type): + return long(long(offset) << 16 | type) + +nullhash = _sha(nullid) + +def hash(text, p1, p2): + """generate a hash from the given text and its parent hashes + + This hash combines both the current file contents and its history + in a manner that makes it easy to distinguish nodes with the same + content in the revision graph. + """ + # As of now, if one of the parent node is null, p2 is null + if p2 == nullid: + # deep copy of a hash is faster than creating one + s = nullhash.copy() + s.update(p1) + else: + # none of the parent nodes are nullid + l = [p1, p2] + l.sort() + s = _sha(l[0]) + s.update(l[1]) + s.update(text) + return s.digest() + +def compress(text): + """ generate a possibly-compressed representation of text """ + if not text: + return ("", text) + l = len(text) + bin = None + if l < 44: + pass + elif l > 1000000: + # zlib makes an internal copy, thus doubling memory usage for + # large files, so lets do this in pieces + z = zlib.compressobj() + p = [] + pos = 0 + while pos < l: + pos2 = pos + 2**20 + p.append(z.compress(text[pos:pos2])) + pos = pos2 + p.append(z.flush()) + if sum(map(len, p)) < l: + bin = "".join(p) + else: + bin = _compress(text) + if bin is None or len(bin) > l: + if text[0] == '\0': + return ("", text) + return ('u', text) + return ("", bin) + +def decompress(bin): + """ decompress the given input """ + if not bin: + return bin + t = bin[0] + if t == '\0': + return bin + if t == 'x': + return _decompress(bin) + if t == 'u': + return bin[1:] + raise RevlogError(_("unknown compression type %r") % t) + +class lazyparser(object): + """ + this class avoids the need to parse the entirety of large indices + """ + + # lazyparser is not safe to use on windows if win32 extensions not + # available. it keeps file handle open, which make it not possible + # to break hardlinks on local cloned repos. + + def __init__(self, dataf): + try: + size = util.fstat(dataf).st_size + except AttributeError: + size = 0 + self.dataf = dataf + self.s = struct.calcsize(indexformatng) + self.datasize = size + self.l = size // self.s + self.index = [None] * self.l + self.map = {nullid: nullrev} + self.allmap = 0 + self.all = 0 + self.mapfind_count = 0 + + def loadmap(self): + """ + during a commit, we need to make sure the rev being added is + not a duplicate. This requires loading the entire index, + which is fairly slow. loadmap can load up just the node map, + which takes much less time. + """ + if self.allmap: + return + end = self.datasize + self.allmap = 1 + cur = 0 + count = 0 + blocksize = self.s * 256 + self.dataf.seek(0) + while cur < end: + data = self.dataf.read(blocksize) + off = 0 + for x in xrange(256): + n = data[off + ngshaoffset:off + ngshaoffset + 20] + self.map[n] = count + count += 1 + if count >= self.l: + break + off += self.s + cur += blocksize + + def loadblock(self, blockstart, blocksize, data=None): + if self.all: + return + if data is None: + self.dataf.seek(blockstart) + if blockstart + blocksize > self.datasize: + # the revlog may have grown since we've started running, + # but we don't have space in self.index for more entries. + # limit blocksize so that we don't get too much data. + blocksize = max(self.datasize - blockstart, 0) + data = self.dataf.read(blocksize) + lend = len(data) // self.s + i = blockstart // self.s + off = 0 + # lazyindex supports __delitem__ + if lend > len(self.index) - i: + lend = len(self.index) - i + for x in xrange(lend): + if self.index[i + x] is None: + b = data[off : off + self.s] + self.index[i + x] = b + n = b[ngshaoffset:ngshaoffset + 20] + self.map[n] = i + x + off += self.s + + def findnode(self, node): + """search backwards through the index file for a specific node""" + if self.allmap: + return None + + # hg log will cause many many searches for the manifest + # nodes. After we get called a few times, just load the whole + # thing. + if self.mapfind_count > 8: + self.loadmap() + if node in self.map: + return node + return None + self.mapfind_count += 1 + last = self.l - 1 + while self.index[last] != None: + if last == 0: + self.all = 1 + self.allmap = 1 + return None + last -= 1 + end = (last + 1) * self.s + blocksize = self.s * 256 + while end >= 0: + start = max(end - blocksize, 0) + self.dataf.seek(start) + data = self.dataf.read(end - start) + findend = end - start + while True: + # we're searching backwards, so we have to make sure + # we don't find a changeset where this node is a parent + off = data.find(node, 0, findend) + findend = off + if off >= 0: + i = off / self.s + off = i * self.s + n = data[off + ngshaoffset:off + ngshaoffset + 20] + if n == node: + self.map[n] = i + start / self.s + return node + else: + break + end -= blocksize + return None + + def loadindex(self, i=None, end=None): + if self.all: + return + all = False + if i is None: + blockstart = 0 + blocksize = (65536 / self.s) * self.s + end = self.datasize + all = True + else: + if end: + blockstart = i * self.s + end = end * self.s + blocksize = end - blockstart + else: + blockstart = (i & ~1023) * self.s + blocksize = self.s * 1024 + end = blockstart + blocksize + while blockstart < end: + self.loadblock(blockstart, blocksize) + blockstart += blocksize + if all: + self.all = True + +class lazyindex(object): + """a lazy version of the index array""" + def __init__(self, parser): + self.p = parser + def __len__(self): + return len(self.p.index) + def load(self, pos): + if pos < 0: + pos += len(self.p.index) + self.p.loadindex(pos) + return self.p.index[pos] + def __getitem__(self, pos): + return _unpack(indexformatng, self.p.index[pos] or self.load(pos)) + def __setitem__(self, pos, item): + self.p.index[pos] = _pack(indexformatng, *item) + def __delitem__(self, pos): + del self.p.index[pos] + def insert(self, pos, e): + self.p.index.insert(pos, _pack(indexformatng, *e)) + def append(self, e): + self.p.index.append(_pack(indexformatng, *e)) + +class lazymap(object): + """a lazy version of the node map""" + def __init__(self, parser): + self.p = parser + def load(self, key): + n = self.p.findnode(key) + if n is None: + raise KeyError(key) + def __contains__(self, key): + if key in self.p.map: + return True + self.p.loadmap() + return key in self.p.map + def __iter__(self): + yield nullid + for i, ret in enumerate(self.p.index): + if not ret: + self.p.loadindex(i) + ret = self.p.index[i] + if isinstance(ret, str): + ret = _unpack(indexformatng, ret) + yield ret[7] + def __getitem__(self, key): + try: + return self.p.map[key] + except KeyError: + try: + self.load(key) + return self.p.map[key] + except KeyError: + raise KeyError("node " + hex(key)) + def __setitem__(self, key, val): + self.p.map[key] = val + def __delitem__(self, key): + del self.p.map[key] + +indexformatv0 = ">4l20s20s20s" +v0shaoffset = 56 + +class revlogoldio(object): + def __init__(self): + self.size = struct.calcsize(indexformatv0) + + def parseindex(self, fp, data, inline): + s = self.size + index = [] + nodemap = {nullid: nullrev} + n = off = 0 + if len(data) == _prereadsize: + data += fp.read() # read the rest + l = len(data) + while off + s <= l: + cur = data[off:off + s] + off += s + e = _unpack(indexformatv0, cur) + # transform to revlogv1 format + e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], + nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) + index.append(e2) + nodemap[e[6]] = n + n += 1 + + return index, nodemap, None + + def packentry(self, entry, node, version, rev): + if gettype(entry[0]): + raise RevlogError(_("index entry flags need RevlogNG")) + e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], + node(entry[5]), node(entry[6]), entry[7]) + return _pack(indexformatv0, *e2) + +# index ng: +# 6 bytes: offset +# 2 bytes: flags +# 4 bytes: compressed length +# 4 bytes: uncompressed length +# 4 bytes: base rev +# 4 bytes: link rev +# 4 bytes: parent 1 rev +# 4 bytes: parent 2 rev +# 32 bytes: nodeid +indexformatng = ">Qiiiiii20s12x" +ngshaoffset = 32 +versionformat = ">I" + +class revlogio(object): + def __init__(self): + self.size = struct.calcsize(indexformatng) + + def parseindex(self, fp, data, inline): + if len(data) == _prereadsize: + if util.openhardlinks() and not inline: + # big index, let's parse it on demand + parser = lazyparser(fp) + index = lazyindex(parser) + nodemap = lazymap(parser) + e = list(index[0]) + type = gettype(e[0]) + e[0] = offset_type(0, type) + index[0] = e + return index, nodemap, None + else: + data += fp.read() + + # call the C implementation to parse the index data + index, nodemap, cache = parsers.parse_index(data, inline) + return index, nodemap, cache + + def packentry(self, entry, node, version, rev): + p = _pack(indexformatng, *entry) + if rev == 0: + p = _pack(versionformat, version) + p[4:] + return p + +class revlog(object): + """ + the underlying revision storage object + + A revlog consists of two parts, an index and the revision data. + + The index is a file with a fixed record size containing + information on each revision, including its nodeid (hash), the + nodeids of its parents, the position and offset of its data within + the data file, and the revision it's based on. Finally, each entry + contains a linkrev entry that can serve as a pointer to external + data. + + The revision data itself is a linear collection of data chunks. + Each chunk represents a revision and is usually represented as a + delta against the previous chunk. To bound lookup time, runs of + deltas are limited to about 2 times the length of the original + version data. This makes retrieval of a version proportional to + its size, or O(1) relative to the number of revisions. + + Both pieces of the revlog are written to in an append-only + fashion, which means we never need to rewrite a file to insert or + remove data, and can use some simple techniques to avoid the need + for locking while reading. + """ + def __init__(self, opener, indexfile, shallowroot=None): + """ + create a revlog object + + opener is a function that abstracts the file opening operation + and can be used to implement COW semantics or the like. + """ + self.indexfile = indexfile + self.datafile = indexfile[:-2] + ".d" + self.opener = opener + self._cache = None + self._chunkcache = (0, '') + self.nodemap = {nullid: nullrev} + self.index = [] + self._shallowroot = shallowroot + self._parentdelta = 0 + + v = REVLOG_DEFAULT_VERSION + if hasattr(opener, 'options') and 'defversion' in opener.options: + v = opener.options['defversion'] + if v & REVLOGNG: + v |= REVLOGNGINLINEDATA + if v & REVLOGNG and 'parentdelta' in opener.options: + self._parentdelta = 1 + + if shallowroot: + v |= REVLOGSHALLOW + + i = '' + try: + f = self.opener(self.indexfile) + if "nonlazy" in getattr(self.opener, 'options', {}): + i = f.read() + else: + i = f.read(_prereadsize) + if len(i) > 0: + v = struct.unpack(versionformat, i[:4])[0] + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + + self.version = v + self._inline = v & REVLOGNGINLINEDATA + self._shallow = v & REVLOGSHALLOW + flags = v & ~0xFFFF + fmt = v & 0xFFFF + if fmt == REVLOGV0 and flags: + raise RevlogError(_("index %s unknown flags %#04x for format v0") + % (self.indexfile, flags >> 16)) + elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS: + raise RevlogError(_("index %s unknown flags %#04x for revlogng") + % (self.indexfile, flags >> 16)) + elif fmt > REVLOGNG: + raise RevlogError(_("index %s unknown format %d") + % (self.indexfile, fmt)) + + self._io = revlogio() + if self.version == REVLOGV0: + self._io = revlogoldio() + if i: + try: + d = self._io.parseindex(f, i, self._inline) + except (ValueError, IndexError): + raise RevlogError(_("index %s is corrupted") % (self.indexfile)) + self.index, self.nodemap, self._chunkcache = d + if not self._chunkcache: + self._chunkclear() + + # add the magic null revision at -1 (if it hasn't been done already) + if (self.index == [] or isinstance(self.index, lazyindex) or + self.index[-1][7] != nullid) : + self.index.append((0, 0, 0, -1, -1, -1, -1, nullid)) + + def _loadindex(self, start, end): + """load a block of indexes all at once from the lazy parser""" + if isinstance(self.index, lazyindex): + self.index.p.loadindex(start, end) + + def _loadindexmap(self): + """loads both the map and the index from the lazy parser""" + if isinstance(self.index, lazyindex): + p = self.index.p + p.loadindex() + self.nodemap = p.map + + def _loadmap(self): + """loads the map from the lazy parser""" + if isinstance(self.nodemap, lazymap): + self.nodemap.p.loadmap() + self.nodemap = self.nodemap.p.map + + def tip(self): + return self.node(len(self.index) - 2) + def __len__(self): + return len(self.index) - 1 + def __iter__(self): + for i in xrange(len(self)): + yield i + def rev(self, node): + try: + return self.nodemap[node] + except KeyError: + raise LookupError(node, self.indexfile, _('no node')) + def node(self, rev): + return self.index[rev][7] + def linkrev(self, rev): + return self.index[rev][4] + def parents(self, node): + i = self.index + d = i[self.rev(node)] + return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline + def parentrevs(self, rev): + return self.index[rev][5:7] + def start(self, rev): + return int(self.index[rev][0] >> 16) + def end(self, rev): + return self.start(rev) + self.length(rev) + def length(self, rev): + return self.index[rev][1] + def base(self, rev): + return self.index[rev][3] + def flags(self, rev): + return self.index[rev][0] & 0xFFFF + def rawsize(self, rev): + """return the length of the uncompressed text for a given revision""" + l = self.index[rev][2] + if l >= 0: + return l + + t = self.revision(self.node(rev)) + return len(t) + size = rawsize + + def reachable(self, node, stop=None): + """return the set of all nodes ancestral to a given node, including + the node itself, stopping when stop is matched""" + reachable = set((node,)) + visit = [node] + if stop: + stopn = self.rev(stop) + else: + stopn = 0 + while visit: + n = visit.pop(0) + if n == stop: + continue + if n == nullid: + continue + for p in self.parents(n): + if self.rev(p) < stopn: + continue + if p not in reachable: + reachable.add(p) + visit.append(p) + return reachable + + def ancestors(self, *revs): + """Generate the ancestors of 'revs' in reverse topological order. + + Yield a sequence of revision numbers starting with the parents + of each revision in revs, i.e., each revision is *not* considered + an ancestor of itself. Results are in breadth-first order: + parents of each rev in revs, then parents of those, etc. Result + does not include the null revision.""" + visit = list(revs) + seen = set([nullrev]) + while visit: + for parent in self.parentrevs(visit.pop(0)): + if parent not in seen: + visit.append(parent) + seen.add(parent) + yield parent + + def descendants(self, *revs): + """Generate the descendants of 'revs' in revision order. + + Yield a sequence of revision numbers starting with a child of + some rev in revs, i.e., each revision is *not* considered a + descendant of itself. Results are ordered by revision number (a + topological sort).""" + first = min(revs) + if first == nullrev: + for i in self: + yield i + return + + seen = set(revs) + for i in xrange(first + 1, len(self)): + for x in self.parentrevs(i): + if x != nullrev and x in seen: + seen.add(i) + yield i + break + + def findmissing(self, common=None, heads=None): + """Return the ancestors of heads that are not ancestors of common. + + More specifically, return a list of nodes N such that every N + satisfies the following constraints: + + 1. N is an ancestor of some node in 'heads' + 2. N is not an ancestor of any node in 'common' + + The list is sorted by revision number, meaning it is + topologically sorted. + + 'heads' and 'common' are both lists of node IDs. If heads is + not supplied, uses all of the revlog's heads. If common is not + supplied, uses nullid.""" + if common is None: + common = [nullid] + if heads is None: + heads = self.heads() + + common = [self.rev(n) for n in common] + heads = [self.rev(n) for n in heads] + + # we want the ancestors, but inclusive + has = set(self.ancestors(*common)) + has.add(nullrev) + has.update(common) + + # take all ancestors from heads that aren't in has + missing = set() + visit = [r for r in heads if r not in has] + while visit: + r = visit.pop(0) + if r in missing: + continue + else: + missing.add(r) + for p in self.parentrevs(r): + if p not in has: + visit.append(p) + missing = list(missing) + missing.sort() + return [self.node(r) for r in missing] + + def nodesbetween(self, roots=None, heads=None): + """Return a topological path from 'roots' to 'heads'. + + Return a tuple (nodes, outroots, outheads) where 'nodes' is a + topologically sorted list of all nodes N that satisfy both of + these constraints: + + 1. N is a descendant of some node in 'roots' + 2. N is an ancestor of some node in 'heads' + + Every node is considered to be both a descendant and an ancestor + of itself, so every reachable node in 'roots' and 'heads' will be + included in 'nodes'. + + 'outroots' is the list of reachable nodes in 'roots', i.e., the + subset of 'roots' that is returned in 'nodes'. Likewise, + 'outheads' is the subset of 'heads' that is also in 'nodes'. + + 'roots' and 'heads' are both lists of node IDs. If 'roots' is + unspecified, uses nullid as the only root. If 'heads' is + unspecified, uses list of all of the revlog's heads.""" + nonodes = ([], [], []) + if roots is not None: + roots = list(roots) + if not roots: + return nonodes + lowestrev = min([self.rev(n) for n in roots]) + else: + roots = [nullid] # Everybody's a descendent of nullid + lowestrev = nullrev + if (lowestrev == nullrev) and (heads is None): + # We want _all_ the nodes! + return ([self.node(r) for r in self], [nullid], list(self.heads())) + if heads is None: + # All nodes are ancestors, so the latest ancestor is the last + # node. + highestrev = len(self) - 1 + # Set ancestors to None to signal that every node is an ancestor. + ancestors = None + # Set heads to an empty dictionary for later discovery of heads + heads = {} + else: + heads = list(heads) + if not heads: + return nonodes + ancestors = set() + # Turn heads into a dictionary so we can remove 'fake' heads. + # Also, later we will be using it to filter out the heads we can't + # find from roots. + heads = dict.fromkeys(heads, 0) + # Start at the top and keep marking parents until we're done. + nodestotag = set(heads) + # Remember where the top was so we can use it as a limit later. + highestrev = max([self.rev(n) for n in nodestotag]) + while nodestotag: + # grab a node to tag + n = nodestotag.pop() + # Never tag nullid + if n == nullid: + continue + # A node's revision number represents its place in a + # topologically sorted list of nodes. + r = self.rev(n) + if r >= lowestrev: + if n not in ancestors: + # If we are possibly a descendent of one of the roots + # and we haven't already been marked as an ancestor + ancestors.add(n) # Mark as ancestor + # Add non-nullid parents to list of nodes to tag. + nodestotag.update([p for p in self.parents(n) if + p != nullid]) + elif n in heads: # We've seen it before, is it a fake head? + # So it is, real heads should not be the ancestors of + # any other heads. + heads.pop(n) + if not ancestors: + return nonodes + # Now that we have our set of ancestors, we want to remove any + # roots that are not ancestors. + + # If one of the roots was nullid, everything is included anyway. + if lowestrev > nullrev: + # But, since we weren't, let's recompute the lowest rev to not + # include roots that aren't ancestors. + + # Filter out roots that aren't ancestors of heads + roots = [n for n in roots if n in ancestors] + # Recompute the lowest revision + if roots: + lowestrev = min([self.rev(n) for n in roots]) + else: + # No more roots? Return empty list + return nonodes + else: + # We are descending from nullid, and don't need to care about + # any other roots. + lowestrev = nullrev + roots = [nullid] + # Transform our roots list into a set. + descendents = set(roots) + # Also, keep the original roots so we can filter out roots that aren't + # 'real' roots (i.e. are descended from other roots). + roots = descendents.copy() + # Our topologically sorted list of output nodes. + orderedout = [] + # Don't start at nullid since we don't want nullid in our output list, + # and if nullid shows up in descedents, empty parents will look like + # they're descendents. + for r in xrange(max(lowestrev, 0), highestrev + 1): + n = self.node(r) + isdescendent = False + if lowestrev == nullrev: # Everybody is a descendent of nullid + isdescendent = True + elif n in descendents: + # n is already a descendent + isdescendent = True + # This check only needs to be done here because all the roots + # will start being marked is descendents before the loop. + if n in roots: + # If n was a root, check if it's a 'real' root. + p = tuple(self.parents(n)) + # If any of its parents are descendents, it's not a root. + if (p[0] in descendents) or (p[1] in descendents): + roots.remove(n) + else: + p = tuple(self.parents(n)) + # A node is a descendent if either of its parents are + # descendents. (We seeded the dependents list with the roots + # up there, remember?) + if (p[0] in descendents) or (p[1] in descendents): + descendents.add(n) + isdescendent = True + if isdescendent and ((ancestors is None) or (n in ancestors)): + # Only include nodes that are both descendents and ancestors. + orderedout.append(n) + if (ancestors is not None) and (n in heads): + # We're trying to figure out which heads are reachable + # from roots. + # Mark this head as having been reached + heads[n] = 1 + elif ancestors is None: + # Otherwise, we're trying to discover the heads. + # Assume this is a head because if it isn't, the next step + # will eventually remove it. + heads[n] = 1 + # But, obviously its parents aren't. + for p in self.parents(n): + heads.pop(p, None) + heads = [n for n in heads.iterkeys() if heads[n] != 0] + roots = list(roots) + assert orderedout + assert roots + assert heads + return (orderedout, roots, heads) + + def heads(self, start=None, stop=None): + """return the list of all nodes that have no children + + if start is specified, only heads that are descendants of + start will be returned + if stop is specified, it will consider all the revs from stop + as if they had no children + """ + if start is None and stop is None: + count = len(self) + if not count: + return [nullid] + ishead = [1] * (count + 1) + index = self.index + for r in xrange(count): + e = index[r] + ishead[e[5]] = ishead[e[6]] = 0 + return [self.node(r) for r in xrange(count) if ishead[r]] + + if start is None: + start = nullid + if stop is None: + stop = [] + stoprevs = set([self.rev(n) for n in stop]) + startrev = self.rev(start) + reachable = set((startrev,)) + heads = set((startrev,)) + + parentrevs = self.parentrevs + for r in xrange(startrev + 1, len(self)): + for p in parentrevs(r): + if p in reachable: + if r not in stoprevs: + reachable.add(r) + heads.add(r) + if p in heads and p not in stoprevs: + heads.remove(p) + + return [self.node(r) for r in heads] + + def children(self, node): + """find the children of a given node""" + c = [] + p = self.rev(node) + for r in range(p + 1, len(self)): + prevs = [pr for pr in self.parentrevs(r) if pr != nullrev] + if prevs: + for pr in prevs: + if pr == p: + c.append(self.node(r)) + elif p == nullrev: + c.append(self.node(r)) + return c + + def descendant(self, start, end): + if start == nullrev: + return True + for i in self.descendants(start): + if i == end: + return True + elif i > end: + break + return False + + def ancestor(self, a, b): + """calculate the least common ancestor of nodes a and b""" + + # fast path, check if it is a descendant + a, b = self.rev(a), self.rev(b) + start, end = sorted((a, b)) + if self.descendant(start, end): + return self.node(start) + + def parents(rev): + return [p for p in self.parentrevs(rev) if p != nullrev] + + c = ancestor.ancestor(a, b, parents) + if c is None: + return nullid + + return self.node(c) + + def _match(self, id): + if isinstance(id, (long, int)): + # rev + return self.node(id) + if len(id) == 20: + # possibly a binary node + # odds of a binary node being all hex in ASCII are 1 in 10**25 + try: + node = id + self.rev(node) # quick search the index + return node + except LookupError: + pass # may be partial hex id + try: + # str(rev) + rev = int(id) + if str(rev) != id: + raise ValueError + if rev < 0: + rev = len(self) + rev + if rev < 0 or rev >= len(self): + raise ValueError + return self.node(rev) + except (ValueError, OverflowError): + pass + if len(id) == 40: + try: + # a full hex nodeid? + node = bin(id) + self.rev(node) + return node + except (TypeError, LookupError): + pass + + def _partialmatch(self, id): + if len(id) < 40: + try: + # hex(node)[:...] + l = len(id) // 2 # grab an even number of digits + bin_id = bin(id[:l * 2]) + nl = [n for n in self.nodemap if n[:l] == bin_id] + nl = [n for n in nl if hex(n).startswith(id)] + if len(nl) > 0: + if len(nl) == 1: + return nl[0] + raise LookupError(id, self.indexfile, + _('ambiguous identifier')) + return None + except TypeError: + pass + + def lookup(self, id): + """locate a node based on: + - revision number or str(revision number) + - nodeid or subset of hex nodeid + """ + n = self._match(id) + if n is not None: + return n + n = self._partialmatch(id) + if n: + return n + + raise LookupError(id, self.indexfile, _('no match found')) + + def cmp(self, node, text): + """compare text with a given file revision + + returns True if text is different than what is stored. + """ + p1, p2 = self.parents(node) + return hash(text, p1, p2) != node + + def _addchunk(self, offset, data): + o, d = self._chunkcache + # try to add to existing cache + if o + len(d) == offset and len(d) + len(data) < _prereadsize: + self._chunkcache = o, d + data + else: + self._chunkcache = offset, data + + def _loadchunk(self, offset, length): + if self._inline: + df = self.opener(self.indexfile) + else: + df = self.opener(self.datafile) + + readahead = max(65536, length) + df.seek(offset) + d = df.read(readahead) + self._addchunk(offset, d) + if readahead > length: + return d[:length] + return d + + def _getchunk(self, offset, length): + o, d = self._chunkcache + l = len(d) + + # is it in the cache? + cachestart = offset - o + cacheend = cachestart + length + if cachestart >= 0 and cacheend <= l: + if cachestart == 0 and cacheend == l: + return d # avoid a copy + return d[cachestart:cacheend] + + return self._loadchunk(offset, length) + + def _chunkraw(self, startrev, endrev): + start = self.start(startrev) + length = self.end(endrev) - start + if self._inline: + start += (startrev + 1) * self._io.size + return self._getchunk(start, length) + + def _chunk(self, rev): + return decompress(self._chunkraw(rev, rev)) + + def _chunkclear(self): + self._chunkcache = (0, '') + + def deltaparent(self, rev): + """return previous revision or parentrev according to flags""" + if self.flags(rev) & REVIDX_PARENTDELTA: + return self.parentrevs(rev)[0] + else: + return rev - 1 + + def revdiff(self, rev1, rev2): + """return or calculate a delta between two revisions""" + if self.base(rev2) != rev2 and self.deltaparent(rev2) == rev1: + return self._chunk(rev2) + + return mdiff.textdiff(self.revision(self.node(rev1)), + self.revision(self.node(rev2))) + + def revision(self, node): + """return an uncompressed revision of a given node""" + cachedrev = None + if node == nullid: + return "" + if self._cache: + if self._cache[0] == node: + return self._cache[2] + cachedrev = self._cache[1] + + # look up what we need to read + text = None + rev = self.rev(node) + base = self.base(rev) + + # check rev flags + if self.flags(rev) & ~REVIDX_KNOWN_FLAGS: + raise RevlogError(_('incompatible revision flag %x') % + (self.flags(rev) & ~REVIDX_KNOWN_FLAGS)) + + # build delta chain + self._loadindex(base, rev + 1) + chain = [] + index = self.index # for performance + iterrev = rev + e = index[iterrev] + while iterrev != base and iterrev != cachedrev: + chain.append(iterrev) + if e[0] & REVIDX_PARENTDELTA: + iterrev = e[5] + else: + iterrev -= 1 + e = index[iterrev] + chain.reverse() + base = iterrev + + if iterrev == cachedrev: + # cache hit + text = self._cache[2] + + # drop cache to save memory + self._cache = None + + self._chunkraw(base, rev) + if text is None: + text = self._chunk(base) + + bins = [self._chunk(r) for r in chain] + text = mdiff.patches(text, bins) + p1, p2 = self.parents(node) + if (node != hash(text, p1, p2) and + not (self.flags(rev) & REVIDX_PUNCHED_FLAG)): + raise RevlogError(_("integrity check failed on %s:%d") + % (self.indexfile, rev)) + + self._cache = (node, rev, text) + return text + + def checkinlinesize(self, tr, fp=None): + if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline: + return + + trinfo = tr.find(self.indexfile) + if trinfo is None: + raise RevlogError(_("%s not found in the transaction") + % self.indexfile) + + trindex = trinfo[2] + dataoff = self.start(trindex) + + tr.add(self.datafile, dataoff) + + if fp: + fp.flush() + fp.close() + + df = self.opener(self.datafile, 'w') + try: + for r in self: + df.write(self._chunkraw(r, r)) + finally: + df.close() + + fp = self.opener(self.indexfile, 'w', atomictemp=True) + self.version &= ~(REVLOGNGINLINEDATA) + self._inline = False + for i in self: + e = self._io.packentry(self.index[i], self.node, self.version, i) + fp.write(e) + + # if we don't call rename, the temp file will never replace the + # real index + fp.rename() + + tr.replace(self.indexfile, trindex * self._io.size) + self._chunkclear() + + def addrevision(self, text, transaction, link, p1, p2, cachedelta=None): + """add a revision to the log + + text - the revision data to add + transaction - the transaction object used for rollback + link - the linkrev data to add + p1, p2 - the parent nodeids of the revision + cachedelta - an optional precomputed delta + """ + node = hash(text, p1, p2) + if (node in self.nodemap and + (not self.flags(self.rev(node)) & REVIDX_PUNCHED_FLAG)): + return node + + dfh = None + if not self._inline: + dfh = self.opener(self.datafile, "a") + ifh = self.opener(self.indexfile, "a+") + try: + return self._addrevision(node, text, transaction, link, p1, p2, + cachedelta, ifh, dfh) + finally: + if dfh: + dfh.close() + ifh.close() + + def _addrevision(self, node, text, transaction, link, p1, p2, + cachedelta, ifh, dfh): + + btext = [text] + def buildtext(): + if btext[0] is not None: + return btext[0] + # flush any pending writes here so we can read it in revision + if dfh: + dfh.flush() + ifh.flush() + basetext = self.revision(self.node(cachedelta[0])) + btext[0] = mdiff.patch(basetext, cachedelta[1]) + chk = hash(btext[0], p1, p2) + if chk != node: + raise RevlogError(_("consistency error in delta")) + return btext[0] + + def builddelta(rev): + # can we use the cached delta? + if cachedelta and cachedelta[0] == rev: + delta = cachedelta[1] + else: + t = buildtext() + ptext = self.revision(self.node(rev)) + delta = mdiff.textdiff(ptext, t) + data = compress(delta) + l = len(data[1]) + len(data[0]) + base = self.base(rev) + dist = l + offset - self.start(base) + return dist, l, data, base + + curr = len(self) + prev = curr - 1 + base = curr + offset = self.end(prev) + flags = 0 + d = None + p1r, p2r = self.rev(p1), self.rev(p2) + + # should we try to build a delta? + if prev != nullrev: + d = builddelta(prev) + if self._parentdelta and prev != p1r: + d2 = builddelta(p1r) + if d2 < d: + d = d2 + flags = REVIDX_PARENTDELTA + dist, l, data, base = d + + # full versions are inserted when the needed deltas + # become comparable to the uncompressed text + # or the base revision is punched + if text is None: + textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]), + cachedelta[1]) + else: + textlen = len(text) + if (d is None or dist > textlen * 2 or + (self.flags(base) & REVIDX_PUNCHED_FLAG)): + text = buildtext() + data = compress(text) + l = len(data[1]) + len(data[0]) + base = curr + + e = (offset_type(offset, flags), l, textlen, + base, link, p1r, p2r, node) + self.index.insert(-1, e) + self.nodemap[node] = curr + + entry = self._io.packentry(e, self.node, self.version, curr) + if not self._inline: + transaction.add(self.datafile, offset) + transaction.add(self.indexfile, curr * len(entry)) + if data[0]: + dfh.write(data[0]) + dfh.write(data[1]) + dfh.flush() + ifh.write(entry) + else: + offset += curr * self._io.size + transaction.add(self.indexfile, offset, curr) + ifh.write(entry) + ifh.write(data[0]) + ifh.write(data[1]) + self.checkinlinesize(transaction, ifh) + + if type(text) == str: # only accept immutable objects + self._cache = (node, curr, text) + return node + + def group(self, nodelist, lookup, infocollect=None, fullrev=False): + """Calculate a delta group, yielding a sequence of changegroup chunks + (strings). + + Given a list of changeset revs, return a set of deltas and + metadata corresponding to nodes. The first delta is + first parent(nodelist[0]) -> nodelist[0], the receiver is + guaranteed to have this parent as it has all history before + these changesets. In the case firstparent is nullrev the + changegroup starts with a full revision. + fullrev forces the insertion of the full revision, necessary + in the case of shallow clones where the first parent might + not exist at the reciever. + """ + + revs = [self.rev(n) for n in nodelist] + + # if we don't have any revisions touched by these changesets, bail + if not revs: + yield changegroup.closechunk() + return + + # add the parent of the first rev + p = self.parentrevs(revs[0])[0] + revs.insert(0, p) + if p == nullrev: + fullrev = True + + # build deltas + for d in xrange(len(revs) - 1): + a, b = revs[d], revs[d + 1] + nb = self.node(b) + + if infocollect is not None: + infocollect(nb) + + p = self.parents(nb) + meta = nb + p[0] + p[1] + lookup(nb) + if fullrev: + d = self.revision(nb) + meta += mdiff.trivialdiffheader(len(d)) + fullrev = False + else: + d = self.revdiff(a, b) + yield changegroup.chunkheader(len(meta) + len(d)) + yield meta + yield d + + yield changegroup.closechunk() + + def addgroup(self, bundle, linkmapper, transaction): + """ + add a delta group + + given a set of deltas, add them to the revision log. the + first delta is against its parent, which should be in our + log, the rest are against the previous delta. + """ + + # track the base of the current delta log + node = None + + r = len(self) + end = 0 + if r: + end = self.end(r - 1) + ifh = self.opener(self.indexfile, "a+") + isize = r * self._io.size + if self._inline: + transaction.add(self.indexfile, end + isize, r) + dfh = None + else: + transaction.add(self.indexfile, isize, r) + transaction.add(self.datafile, end) + dfh = self.opener(self.datafile, "a") + + try: + # loop through our set of deltas + chain = None + while 1: + chunkdata = bundle.parsechunk() + if not chunkdata: + break + node = chunkdata['node'] + p1 = chunkdata['p1'] + p2 = chunkdata['p2'] + cs = chunkdata['cs'] + delta = chunkdata['data'] + + link = linkmapper(cs) + if (node in self.nodemap and + (not self.flags(self.rev(node)) & REVIDX_PUNCHED_FLAG)): + # this can happen if two branches make the same change + chain = node + continue + + for p in (p1, p2): + if not p in self.nodemap: + if self._shallow: + # add null entries for missing parents + # XXX FIXME + #if base == nullrev: + # base = len(self) + #e = (offset_type(end, REVIDX_PUNCHED_FLAG), + # 0, 0, base, nullrev, nullrev, nullrev, p) + #self.index.insert(-1, e) + #self.nodemap[p] = r + #entry = self._io.packentry(e, self.node, + # self.version, r) + #ifh.write(entry) + #t, r = r, r + 1 + raise LookupError(p, self.indexfile, + _('unknown parent')) + else: + raise LookupError(p, self.indexfile, + _('unknown parent')) + + if not chain: + # retrieve the parent revision of the delta chain + chain = p1 + if not chain in self.nodemap: + raise LookupError(chain, self.indexfile, _('unknown base')) + + chainrev = self.rev(chain) + chain = self._addrevision(node, None, transaction, link, + p1, p2, (chainrev, delta), ifh, dfh) + if not dfh and not self._inline: + # addrevision switched from inline to conventional + # reopen the index + dfh = self.opener(self.datafile, "a") + ifh = self.opener(self.indexfile, "a") + finally: + if dfh: + dfh.close() + ifh.close() + + return node + + def strip(self, minlink, transaction): + """truncate the revlog on the first revision with a linkrev >= minlink + + This function is called when we're stripping revision minlink and + its descendants from the repository. + + We have to remove all revisions with linkrev >= minlink, because + the equivalent changelog revisions will be renumbered after the + strip. + + So we truncate the revlog on the first of these revisions, and + trust that the caller has saved the revisions that shouldn't be + removed and that it'll readd them after this truncation. + """ + if len(self) == 0: + return + + if isinstance(self.index, lazyindex): + self._loadindexmap() + + for rev in self: + if self.index[rev][4] >= minlink: + break + else: + return + + # first truncate the files on disk + end = self.start(rev) + if not self._inline: + transaction.add(self.datafile, end) + end = rev * self._io.size + else: + end += rev * self._io.size + + transaction.add(self.indexfile, end) + + # then reset internal state in memory to forget those revisions + self._cache = None + self._chunkclear() + for x in xrange(rev, len(self)): + del self.nodemap[self.node(x)] + + del self.index[rev:-1] + + def checksize(self): + expected = 0 + if len(self): + expected = max(0, self.end(len(self) - 1)) + + try: + f = self.opener(self.datafile) + f.seek(0, 2) + actual = f.tell() + dd = actual - expected + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + dd = 0 + + try: + f = self.opener(self.indexfile) + f.seek(0, 2) + actual = f.tell() + s = self._io.size + i = max(0, actual // s) + di = actual - (i * s) + if self._inline: + databytes = 0 + for r in self: + databytes += max(0, self.length(r)) + dd = 0 + di = actual - len(self) * s - databytes + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + di = 0 + + return (dd, di) + + def files(self): + res = [self.indexfile] + if not self._inline: + res.append(self.datafile) + return res diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.pyo Binary files differnew file mode 100644 index 0000000..4df1c76 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revlog.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.py new file mode 100644 index 0000000..7adc768 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.py @@ -0,0 +1,797 @@ +# revset.py - revision set queries for mercurial +# +# Copyright 2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import re +import parser, util, error, discovery +import match as matchmod +from i18n import _, gettext + +elements = { + "(": (20, ("group", 1, ")"), ("func", 1, ")")), + "-": (5, ("negate", 19), ("minus", 5)), + "::": (17, ("dagrangepre", 17), ("dagrange", 17), + ("dagrangepost", 17)), + "..": (17, ("dagrangepre", 17), ("dagrange", 17), + ("dagrangepost", 17)), + ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)), + "not": (10, ("not", 10)), + "!": (10, ("not", 10)), + "and": (5, None, ("and", 5)), + "&": (5, None, ("and", 5)), + "or": (4, None, ("or", 4)), + "|": (4, None, ("or", 4)), + "+": (4, None, ("or", 4)), + ",": (2, None, ("list", 2)), + ")": (0, None, None), + "symbol": (0, ("symbol",), None), + "string": (0, ("string",), None), + "end": (0, None, None), +} + +keywords = set(['and', 'or', 'not']) + +def tokenize(program): + pos, l = 0, len(program) + while pos < l: + c = program[pos] + if c.isspace(): # skip inter-token whitespace + pass + elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully + yield ('::', None, pos) + pos += 1 # skip ahead + elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully + yield ('..', None, pos) + pos += 1 # skip ahead + elif c in "():,-|&+!": # handle simple operators + yield (c, None, pos) + elif (c in '"\'' or c == 'r' and + program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings + if c == 'r': + pos += 1 + c = program[pos] + decode = lambda x: x + else: + decode = lambda x: x.decode('string-escape') + pos += 1 + s = pos + while pos < l: # find closing quote + d = program[pos] + if d == '\\': # skip over escaped characters + pos += 2 + continue + if d == c: + yield ('string', decode(program[s:pos]), s) + break + pos += 1 + else: + raise error.ParseError(_("unterminated string"), s) + elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword + s = pos + pos += 1 + while pos < l: # find end of symbol + d = program[pos] + if not (d.isalnum() or d in "._" or ord(d) > 127): + break + if d == '.' and program[pos - 1] == '.': # special case for .. + pos -= 1 + break + pos += 1 + sym = program[s:pos] + if sym in keywords: # operator keywords + yield (sym, None, s) + else: + yield ('symbol', sym, s) + pos -= 1 + else: + raise error.ParseError(_("syntax error"), pos) + pos += 1 + yield ('end', None, pos) + +# helpers + +def getstring(x, err): + if x and (x[0] == 'string' or x[0] == 'symbol'): + return x[1] + raise error.ParseError(err) + +def getlist(x): + if not x: + return [] + if x[0] == 'list': + return getlist(x[1]) + [x[2]] + return [x] + +def getargs(x, min, max, err): + l = getlist(x) + if len(l) < min or len(l) > max: + raise error.ParseError(err) + return l + +def getset(repo, subset, x): + if not x: + raise error.ParseError(_("missing argument")) + return methods[x[0]](repo, subset, *x[1:]) + +# operator methods + +def stringset(repo, subset, x): + x = repo[x].rev() + if x == -1 and len(subset) == len(repo): + return [-1] + if x in subset: + return [x] + return [] + +def symbolset(repo, subset, x): + if x in symbols: + raise error.ParseError(_("can't use %s here") % x) + return stringset(repo, subset, x) + +def rangeset(repo, subset, x, y): + m = getset(repo, subset, x) + if not m: + m = getset(repo, range(len(repo)), x) + + n = getset(repo, subset, y) + if not n: + n = getset(repo, range(len(repo)), y) + + if not m or not n: + return [] + m, n = m[0], n[-1] + + if m < n: + r = range(m, n + 1) + else: + r = range(m, n - 1, -1) + s = set(subset) + return [x for x in r if x in s] + +def andset(repo, subset, x, y): + return getset(repo, getset(repo, subset, x), y) + +def orset(repo, subset, x, y): + s = set(getset(repo, subset, x)) + s |= set(getset(repo, [r for r in subset if r not in s], y)) + return [r for r in subset if r in s] + +def notset(repo, subset, x): + s = set(getset(repo, subset, x)) + return [r for r in subset if r not in s] + +def listset(repo, subset, a, b): + raise error.ParseError(_("can't use a list in this context")) + +def func(repo, subset, a, b): + if a[0] == 'symbol' and a[1] in symbols: + return symbols[a[1]](repo, subset, b) + raise error.ParseError(_("not a function: %s") % a[1]) + +# functions + +def node(repo, subset, x): + """``id(string)`` + Revision non-ambiguously specified by the given hex string prefix. + """ + # i18n: "id" is a keyword + l = getargs(x, 1, 1, _("id requires one argument")) + # i18n: "id" is a keyword + n = getstring(l[0], _("id requires a string")) + if len(n) == 40: + rn = repo[n].rev() + else: + rn = repo.changelog.rev(repo.changelog._partialmatch(n)) + return [r for r in subset if r == rn] + +def rev(repo, subset, x): + """``rev(number)`` + Revision with the given numeric identifier. + """ + # i18n: "rev" is a keyword + l = getargs(x, 1, 1, _("rev requires one argument")) + try: + # i18n: "rev" is a keyword + l = int(getstring(l[0], _("rev requires a number"))) + except ValueError: + # i18n: "rev" is a keyword + raise error.ParseError(_("rev expects a number")) + return [r for r in subset if r == l] + +def p1(repo, subset, x): + """``p1(set)`` + First parent of changesets in set. + """ + ps = set() + cl = repo.changelog + for r in getset(repo, range(len(repo)), x): + ps.add(cl.parentrevs(r)[0]) + return [r for r in subset if r in ps] + +def p2(repo, subset, x): + """``p2(set)`` + Second parent of changesets in set. + """ + ps = set() + cl = repo.changelog + for r in getset(repo, range(len(repo)), x): + ps.add(cl.parentrevs(r)[1]) + return [r for r in subset if r in ps] + +def parents(repo, subset, x): + """``parents(set)`` + The set of all parents for all changesets in set. + """ + ps = set() + cl = repo.changelog + for r in getset(repo, range(len(repo)), x): + ps.update(cl.parentrevs(r)) + return [r for r in subset if r in ps] + +def maxrev(repo, subset, x): + """``max(set)`` + Changeset with highest revision number in set. + """ + s = getset(repo, subset, x) + if s: + m = max(s) + if m in subset: + return [m] + return [] + +def minrev(repo, subset, x): + """``min(set)`` + Changeset with lowest revision number in set. + """ + s = getset(repo, subset, x) + if s: + m = min(s) + if m in subset: + return [m] + return [] + +def limit(repo, subset, x): + """``limit(set, n)`` + First n members of set. + """ + # i18n: "limit" is a keyword + l = getargs(x, 2, 2, _("limit requires two arguments")) + try: + # i18n: "limit" is a keyword + lim = int(getstring(l[1], _("limit requires a number"))) + except ValueError: + # i18n: "limit" is a keyword + raise error.ParseError(_("limit expects a number")) + return getset(repo, subset, l[0])[:lim] + +def children(repo, subset, x): + """``children(set)`` + Child changesets of changesets in set. + """ + cs = set() + cl = repo.changelog + s = set(getset(repo, range(len(repo)), x)) + for r in xrange(0, len(repo)): + for p in cl.parentrevs(r): + if p in s: + cs.add(r) + return [r for r in subset if r in cs] + +def branch(repo, subset, x): + """``branch(set)`` + All changesets belonging to the branches of changesets in set. + """ + s = getset(repo, range(len(repo)), x) + b = set() + for r in s: + b.add(repo[r].branch()) + s = set(s) + return [r for r in subset if r in s or repo[r].branch() in b] + +def ancestor(repo, subset, x): + """``ancestor(single, single)`` + Greatest common ancestor of the two changesets. + """ + # i18n: "ancestor" is a keyword + l = getargs(x, 2, 2, _("ancestor requires two arguments")) + r = range(len(repo)) + a = getset(repo, r, l[0]) + b = getset(repo, r, l[1]) + if len(a) != 1 or len(b) != 1: + # i18n: "ancestor" is a keyword + raise error.ParseError(_("ancestor arguments must be single revisions")) + an = [repo[a[0]].ancestor(repo[b[0]]).rev()] + + return [r for r in an if r in subset] + +def ancestors(repo, subset, x): + """``ancestors(set)`` + Changesets that are ancestors of a changeset in set. + """ + args = getset(repo, range(len(repo)), x) + if not args: + return [] + s = set(repo.changelog.ancestors(*args)) | set(args) + return [r for r in subset if r in s] + +def descendants(repo, subset, x): + """``descendants(set)`` + Changesets which are descendants of changesets in set. + """ + args = getset(repo, range(len(repo)), x) + if not args: + return [] + s = set(repo.changelog.descendants(*args)) | set(args) + return [r for r in subset if r in s] + +def follow(repo, subset, x): + """``follow()`` + An alias for ``::.`` (ancestors of the working copy's first parent). + """ + # i18n: "follow" is a keyword + getargs(x, 0, 0, _("follow takes no arguments")) + p = repo['.'].rev() + s = set(repo.changelog.ancestors(p)) | set([p]) + return [r for r in subset if r in s] + +def date(repo, subset, x): + """``date(interval)`` + Changesets within the interval, see :hg:`help dates`. + """ + # i18n: "date" is a keyword + ds = getstring(x, _("date requires a string")) + dm = util.matchdate(ds) + return [r for r in subset if dm(repo[r].date()[0])] + +def keyword(repo, subset, x): + """``keyword(string)`` + Search commit message, user name, and names of changed files for + string. + """ + # i18n: "keyword" is a keyword + kw = getstring(x, _("keyword requires a string")).lower() + l = [] + for r in subset: + c = repo[r] + t = " ".join(c.files() + [c.user(), c.description()]) + if kw in t.lower(): + l.append(r) + return l + +def grep(repo, subset, x): + """``grep(regex)`` + Like ``keyword(string)`` but accepts a regex. Use ``grep(r'...')`` + to ensure special escape characters are handled correctly. + """ + try: + # i18n: "grep" is a keyword + gr = re.compile(getstring(x, _("grep requires a string"))) + except re.error, e: + raise error.ParseError(_('invalid match pattern: %s') % e) + l = [] + for r in subset: + c = repo[r] + for e in c.files() + [c.user(), c.description()]: + if gr.search(e): + l.append(r) + continue + return l + +def author(repo, subset, x): + """``author(string)`` + Alias for ``user(string)``. + """ + # i18n: "author" is a keyword + n = getstring(x, _("author requires a string")).lower() + return [r for r in subset if n in repo[r].user().lower()] + +def user(repo, subset, x): + """``user(string)`` + User name is string. + """ + return author(repo, subset, x) + +def hasfile(repo, subset, x): + """``file(pattern)`` + Changesets affecting files matched by pattern. + """ + # i18n: "file" is a keyword + pat = getstring(x, _("file requires a pattern")) + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + for r in subset: + for f in repo[r].files(): + if m(f): + s.append(r) + continue + return s + +def contains(repo, subset, x): + """``contains(pattern)`` + Revision contains pattern. + """ + # i18n: "contains" is a keyword + pat = getstring(x, _("contains requires a pattern")) + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + if m.files() == [pat]: + for r in subset: + if pat in repo[r]: + s.append(r) + continue + else: + for r in subset: + for f in repo[r].manifest(): + if m(f): + s.append(r) + continue + return s + +def checkstatus(repo, subset, pat, field): + m = matchmod.match(repo.root, repo.getcwd(), [pat]) + s = [] + fast = (m.files() == [pat]) + for r in subset: + c = repo[r] + if fast: + if pat not in c.files(): + continue + else: + for f in c.files(): + if m(f): + break + else: + continue + files = repo.status(c.p1().node(), c.node())[field] + if fast: + if pat in files: + s.append(r) + continue + else: + for f in files: + if m(f): + s.append(r) + continue + return s + +def modifies(repo, subset, x): + """``modifies(pattern)`` + Changesets modifying files matched by pattern. + """ + # i18n: "modifies" is a keyword + pat = getstring(x, _("modifies requires a pattern")) + return checkstatus(repo, subset, pat, 0) + +def adds(repo, subset, x): + """``adds(pattern)`` + Changesets that add a file matching pattern. + """ + # i18n: "adds" is a keyword + pat = getstring(x, _("adds requires a pattern")) + return checkstatus(repo, subset, pat, 1) + +def removes(repo, subset, x): + """``removes(pattern)`` + Changesets which remove files matching pattern. + """ + # i18n: "removes" is a keyword + pat = getstring(x, _("removes requires a pattern")) + return checkstatus(repo, subset, pat, 2) + +def merge(repo, subset, x): + """``merge()`` + Changeset is a merge changeset. + """ + # i18n: "merge" is a keyword + getargs(x, 0, 0, _("merge takes no arguments")) + cl = repo.changelog + return [r for r in subset if cl.parentrevs(r)[1] != -1] + +def closed(repo, subset, x): + """``closed()`` + Changeset is closed. + """ + # i18n: "closed" is a keyword + getargs(x, 0, 0, _("closed takes no arguments")) + return [r for r in subset if repo[r].extra().get('close')] + +def head(repo, subset, x): + """``head()`` + Changeset is a named branch head. + """ + # i18n: "head" is a keyword + getargs(x, 0, 0, _("head takes no arguments")) + hs = set() + for b, ls in repo.branchmap().iteritems(): + hs.update(repo[h].rev() for h in ls) + return [r for r in subset if r in hs] + +def reverse(repo, subset, x): + """``reverse(set)`` + Reverse order of set. + """ + l = getset(repo, subset, x) + l.reverse() + return l + +def present(repo, subset, x): + """``present(set)`` + An empty set, if any revision in set isn't found; otherwise, + all revisions in set. + """ + try: + return getset(repo, subset, x) + except error.RepoLookupError: + return [] + +def sort(repo, subset, x): + """``sort(set[, [-]key...])`` + Sort set by keys. The default sort order is ascending, specify a key + as ``-key`` to sort in descending order. + + The keys can be: + + - ``rev`` for the revision number, + - ``branch`` for the branch name, + - ``desc`` for the commit message (description), + - ``user`` for user name (``author`` can be used as an alias), + - ``date`` for the commit date + """ + # i18n: "sort" is a keyword + l = getargs(x, 1, 2, _("sort requires one or two arguments")) + keys = "rev" + if len(l) == 2: + keys = getstring(l[1], _("sort spec must be a string")) + + s = l[0] + keys = keys.split() + l = [] + def invert(s): + return "".join(chr(255 - ord(c)) for c in s) + for r in getset(repo, subset, s): + c = repo[r] + e = [] + for k in keys: + if k == 'rev': + e.append(r) + elif k == '-rev': + e.append(-r) + elif k == 'branch': + e.append(c.branch()) + elif k == '-branch': + e.append(invert(c.branch())) + elif k == 'desc': + e.append(c.description()) + elif k == '-desc': + e.append(invert(c.description())) + elif k in 'user author': + e.append(c.user()) + elif k in '-user -author': + e.append(invert(c.user())) + elif k == 'date': + e.append(c.date()[0]) + elif k == '-date': + e.append(-c.date()[0]) + else: + raise error.ParseError(_("unknown sort key %r") % k) + e.append(r) + l.append(e) + l.sort() + return [e[-1] for e in l] + +def getall(repo, subset, x): + """``all()`` + All changesets, the same as ``0:tip``. + """ + # i18n: "all" is a keyword + getargs(x, 0, 0, _("all takes no arguments")) + return subset + +def heads(repo, subset, x): + """``heads(set)`` + Members of set with no children in set. + """ + s = getset(repo, subset, x) + ps = set(parents(repo, subset, x)) + return [r for r in s if r not in ps] + +def roots(repo, subset, x): + """``roots(set)`` + Changesets with no parent changeset in set. + """ + s = getset(repo, subset, x) + cs = set(children(repo, subset, x)) + return [r for r in s if r not in cs] + +def outgoing(repo, subset, x): + """``outgoing([path])`` + Changesets not found in the specified destination repository, or the + default push location. + """ + import hg # avoid start-up nasties + # i18n: "outgoing" is a keyword + l = getargs(x, 0, 1, _("outgoing requires a repository path")) + # i18n: "outgoing" is a keyword + dest = l and getstring(l[0], _("outgoing requires a repository path")) or '' + dest = repo.ui.expandpath(dest or 'default-push', dest or 'default') + dest, branches = hg.parseurl(dest) + revs, checkout = hg.addbranchrevs(repo, repo, branches, []) + if revs: + revs = [repo.lookup(rev) for rev in revs] + other = hg.repository(hg.remoteui(repo, {}), dest) + repo.ui.pushbuffer() + o = discovery.findoutgoing(repo, other) + repo.ui.popbuffer() + cl = repo.changelog + o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, revs)[0]]) + return [r for r in subset if r in o] + +def tag(repo, subset, x): + """``tag(name)`` + The specified tag by name, or all tagged revisions if no name is given. + """ + # i18n: "tag" is a keyword + args = getargs(x, 0, 1, _("tag takes one or no arguments")) + cl = repo.changelog + if args: + tn = getstring(args[0], + # i18n: "tag" is a keyword + _('the argument to tag must be a string')) + s = set([cl.rev(n) for t, n in repo.tagslist() if t == tn]) + else: + s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip']) + return [r for r in subset if r in s] + +def tagged(repo, subset, x): + return tag(repo, subset, x) + +symbols = { + "adds": adds, + "all": getall, + "ancestor": ancestor, + "ancestors": ancestors, + "author": author, + "branch": branch, + "children": children, + "closed": closed, + "contains": contains, + "date": date, + "descendants": descendants, + "file": hasfile, + "follow": follow, + "grep": grep, + "head": head, + "heads": heads, + "keyword": keyword, + "limit": limit, + "max": maxrev, + "min": minrev, + "merge": merge, + "modifies": modifies, + "id": node, + "outgoing": outgoing, + "p1": p1, + "p2": p2, + "parents": parents, + "present": present, + "removes": removes, + "reverse": reverse, + "rev": rev, + "roots": roots, + "sort": sort, + "tag": tag, + "tagged": tagged, + "user": user, +} + +methods = { + "range": rangeset, + "string": stringset, + "symbol": symbolset, + "and": andset, + "or": orset, + "not": notset, + "list": listset, + "func": func, +} + +def optimize(x, small): + if x == None: + return 0, x + + smallbonus = 1 + if small: + smallbonus = .5 + + op = x[0] + if op == 'minus': + return optimize(('and', x[1], ('not', x[2])), small) + elif op == 'dagrange': + return optimize(('and', ('func', ('symbol', 'descendants'), x[1]), + ('func', ('symbol', 'ancestors'), x[2])), small) + elif op == 'dagrangepre': + return optimize(('func', ('symbol', 'ancestors'), x[1]), small) + elif op == 'dagrangepost': + return optimize(('func', ('symbol', 'descendants'), x[1]), small) + elif op == 'rangepre': + return optimize(('range', ('string', '0'), x[1]), small) + elif op == 'rangepost': + return optimize(('range', x[1], ('string', 'tip')), small) + elif op == 'negate': + return optimize(('string', + '-' + getstring(x[1], _("can't negate that"))), small) + elif op in 'string symbol negate': + return smallbonus, x # single revisions are small + elif op == 'and' or op == 'dagrange': + wa, ta = optimize(x[1], True) + wb, tb = optimize(x[2], True) + w = min(wa, wb) + if wa > wb: + return w, (op, tb, ta) + return w, (op, ta, tb) + elif op == 'or': + wa, ta = optimize(x[1], False) + wb, tb = optimize(x[2], False) + if wb < wa: + wb, wa = wa, wb + return max(wa, wb), (op, ta, tb) + elif op == 'not': + o = optimize(x[1], not small) + return o[0], (op, o[1]) + elif op == 'group': + return optimize(x[1], small) + elif op in 'range list': + wa, ta = optimize(x[1], small) + wb, tb = optimize(x[2], small) + return wa + wb, (op, ta, tb) + elif op == 'func': + f = getstring(x[1], _("not a symbol")) + wa, ta = optimize(x[2], small) + if f in "grep date user author keyword branch file outgoing": + w = 10 # slow + elif f in "modifies adds removes": + w = 30 # slower + elif f == "contains": + w = 100 # very slow + elif f == "ancestor": + w = 1 * smallbonus + elif f == "reverse limit": + w = 0 + elif f in "sort": + w = 10 # assume most sorts look at changelog + else: + w = 1 + return w + wa, (op, x[1], ta) + return 1, x + +parse = parser.parser(tokenize, elements).parse + +def match(spec): + if not spec: + raise error.ParseError(_("empty query")) + tree = parse(spec) + weight, tree = optimize(tree, True) + def mfunc(repo, subset): + return getset(repo, subset, tree) + return mfunc + +def makedoc(topic, doc): + """Generate and include predicates help in revsets topic.""" + predicates = [] + for name in sorted(symbols): + text = symbols[name].__doc__ + if not text: + continue + text = gettext(text.rstrip()) + lines = text.splitlines() + lines[1:] = [(' ' + l.strip()) for l in lines[1:]] + predicates.append('\n'.join(lines)) + predicates = '\n\n'.join(predicates) + doc = doc.replace('.. predicatesmarker', predicates) + return doc + +# tell hggettext to extract docstrings from these functions: +i18nfunctions = symbols.values() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.pyo Binary files differnew file mode 100644 index 0000000..26ad568 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/revset.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.py new file mode 100644 index 0000000..b18795b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.py @@ -0,0 +1,103 @@ +# similar.py - mechanisms for finding similar files +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util +import mdiff +import bdiff + +def _findexactmatches(repo, added, removed): + '''find renamed files that have no changes + + Takes a list of new filectxs and a list of removed filectxs, and yields + (before, after) tuples of exact matches. + ''' + numfiles = len(added) + len(removed) + + # Get hashes of removed files. + hashes = {} + for i, fctx in enumerate(removed): + repo.ui.progress(_('searching for exact renames'), i, total=numfiles) + h = util.sha1(fctx.data()).digest() + hashes[h] = fctx + + # For each added file, see if it corresponds to a removed file. + for i, fctx in enumerate(added): + repo.ui.progress(_('searching for exact renames'), i + len(removed), + total=numfiles) + h = util.sha1(fctx.data()).digest() + if h in hashes: + yield (hashes[h], fctx) + + # Done + repo.ui.progress(_('searching for exact renames'), None) + +def _findsimilarmatches(repo, added, removed, threshold): + '''find potentially renamed files based on similar file content + + Takes a list of new filectxs and a list of removed filectxs, and yields + (before, after, score) tuples of partial matches. + ''' + copies = {} + for i, r in enumerate(removed): + repo.ui.progress(_('searching for similar files'), i, total=len(removed)) + + # lazily load text + @util.cachefunc + def data(): + orig = r.data() + return orig, mdiff.splitnewlines(orig) + + def score(text): + orig, lines = data() + # bdiff.blocks() returns blocks of matching lines + # count the number of bytes in each + equal = 0 + matches = bdiff.blocks(text, orig) + for x1, x2, y1, y2 in matches: + for line in lines[y1:y2]: + equal += len(line) + + lengths = len(text) + len(orig) + return equal * 2.0 / lengths + + for a in added: + bestscore = copies.get(a, (None, threshold))[1] + myscore = score(a.data()) + if myscore >= bestscore: + copies[a] = (r, myscore) + repo.ui.progress(_('searching'), None) + + for dest, v in copies.iteritems(): + source, score = v + yield source, dest, score + +def findrenames(repo, added, removed, threshold): + '''find renamed files -- yields (before, after, score) tuples''' + parentctx = repo['.'] + workingctx = repo[None] + + # Zero length files will be frequently unrelated to each other, and + # tracking the deletion/addition of such a file will probably cause more + # harm than good. We strip them out here to avoid matching them later on. + addedfiles = set([workingctx[fp] for fp in added + if workingctx[fp].size() > 0]) + removedfiles = set([parentctx[fp] for fp in removed + if fp in parentctx and parentctx[fp].size() > 0]) + + # Find exact matches. + for (a, b) in _findexactmatches(repo, + sorted(addedfiles), sorted(removedfiles)): + addedfiles.remove(b) + yield (a.path(), b.path(), 1.0) + + # If the user requested similar files to be matched, search for them also. + if threshold < 1.0: + for (a, b, score) in _findsimilarmatches(repo, + sorted(addedfiles), sorted(removedfiles), threshold): + yield (a.path(), b.path(), score) + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.pyo Binary files differnew file mode 100644 index 0000000..dcd7aac --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/similar.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.py new file mode 100644 index 0000000..a4e3f57 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.py @@ -0,0 +1,450 @@ +# Copyright (C) 2004, 2005 Canonical Ltd +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +# mbp: "you know that thing where cvs gives you conflict markers?" +# s: "i hate that." + +from i18n import _ +import util, mdiff +import sys, os + +class CantReprocessAndShowBase(Exception): + pass + +def intersect(ra, rb): + """Given two ranges return the range where they intersect or None. + + >>> intersect((0, 10), (0, 6)) + (0, 6) + >>> intersect((0, 10), (5, 15)) + (5, 10) + >>> intersect((0, 10), (10, 15)) + >>> intersect((0, 9), (10, 15)) + >>> intersect((0, 9), (7, 15)) + (7, 9) + """ + assert ra[0] <= ra[1] + assert rb[0] <= rb[1] + + sa = max(ra[0], rb[0]) + sb = min(ra[1], rb[1]) + if sa < sb: + return sa, sb + else: + return None + +def compare_range(a, astart, aend, b, bstart, bend): + """Compare a[astart:aend] == b[bstart:bend], without slicing. + """ + if (aend - astart) != (bend - bstart): + return False + for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)): + if a[ia] != b[ib]: + return False + else: + return True + +class Merge3Text(object): + """3-way merge of texts. + + Given strings BASE, OTHER, THIS, tries to produce a combined text + incorporating the changes from both BASE->OTHER and BASE->THIS.""" + def __init__(self, basetext, atext, btext, base=None, a=None, b=None): + self.basetext = basetext + self.atext = atext + self.btext = btext + if base is None: + base = mdiff.splitnewlines(basetext) + if a is None: + a = mdiff.splitnewlines(atext) + if b is None: + b = mdiff.splitnewlines(btext) + self.base = base + self.a = a + self.b = b + + def merge_lines(self, + name_a=None, + name_b=None, + name_base=None, + start_marker='<<<<<<<', + mid_marker='=======', + end_marker='>>>>>>>', + base_marker=None, + reprocess=False): + """Return merge in cvs-like form. + """ + self.conflicts = False + newline = '\n' + if len(self.a) > 0: + if self.a[0].endswith('\r\n'): + newline = '\r\n' + elif self.a[0].endswith('\r'): + newline = '\r' + if base_marker and reprocess: + raise CantReprocessAndShowBase() + if name_a: + start_marker = start_marker + ' ' + name_a + if name_b: + end_marker = end_marker + ' ' + name_b + if name_base and base_marker: + base_marker = base_marker + ' ' + name_base + merge_regions = self.merge_regions() + if reprocess is True: + merge_regions = self.reprocess_merge_regions(merge_regions) + for t in merge_regions: + what = t[0] + if what == 'unchanged': + for i in range(t[1], t[2]): + yield self.base[i] + elif what == 'a' or what == 'same': + for i in range(t[1], t[2]): + yield self.a[i] + elif what == 'b': + for i in range(t[1], t[2]): + yield self.b[i] + elif what == 'conflict': + self.conflicts = True + yield start_marker + newline + for i in range(t[3], t[4]): + yield self.a[i] + if base_marker is not None: + yield base_marker + newline + for i in range(t[1], t[2]): + yield self.base[i] + yield mid_marker + newline + for i in range(t[5], t[6]): + yield self.b[i] + yield end_marker + newline + else: + raise ValueError(what) + + def merge_annotated(self): + """Return merge with conflicts, showing origin of lines. + + Most useful for debugging merge. + """ + for t in self.merge_regions(): + what = t[0] + if what == 'unchanged': + for i in range(t[1], t[2]): + yield 'u | ' + self.base[i] + elif what == 'a' or what == 'same': + for i in range(t[1], t[2]): + yield what[0] + ' | ' + self.a[i] + elif what == 'b': + for i in range(t[1], t[2]): + yield 'b | ' + self.b[i] + elif what == 'conflict': + yield '<<<<\n' + for i in range(t[3], t[4]): + yield 'A | ' + self.a[i] + yield '----\n' + for i in range(t[5], t[6]): + yield 'B | ' + self.b[i] + yield '>>>>\n' + else: + raise ValueError(what) + + def merge_groups(self): + """Yield sequence of line groups. Each one is a tuple: + + 'unchanged', lines + Lines unchanged from base + + 'a', lines + Lines taken from a + + 'same', lines + Lines taken from a (and equal to b) + + 'b', lines + Lines taken from b + + 'conflict', base_lines, a_lines, b_lines + Lines from base were changed to either a or b and conflict. + """ + for t in self.merge_regions(): + what = t[0] + if what == 'unchanged': + yield what, self.base[t[1]:t[2]] + elif what == 'a' or what == 'same': + yield what, self.a[t[1]:t[2]] + elif what == 'b': + yield what, self.b[t[1]:t[2]] + elif what == 'conflict': + yield (what, + self.base[t[1]:t[2]], + self.a[t[3]:t[4]], + self.b[t[5]:t[6]]) + else: + raise ValueError(what) + + def merge_regions(self): + """Return sequences of matching and conflicting regions. + + This returns tuples, where the first value says what kind we + have: + + 'unchanged', start, end + Take a region of base[start:end] + + 'same', astart, aend + b and a are different from base but give the same result + + 'a', start, end + Non-clashing insertion from a[start:end] + + Method is as follows: + + The two sequences align only on regions which match the base + and both descendents. These are found by doing a two-way diff + of each one against the base, and then finding the + intersections between those regions. These "sync regions" + are by definition unchanged in both and easily dealt with. + + The regions in between can be in any of three cases: + conflicted, or changed on only one side. + """ + + # section a[0:ia] has been disposed of, etc + iz = ia = ib = 0 + + for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions(): + #print 'match base [%d:%d]' % (zmatch, zend) + + matchlen = zend - zmatch + assert matchlen >= 0 + assert matchlen == (aend - amatch) + assert matchlen == (bend - bmatch) + + len_a = amatch - ia + len_b = bmatch - ib + len_base = zmatch - iz + assert len_a >= 0 + assert len_b >= 0 + assert len_base >= 0 + + #print 'unmatched a=%d, b=%d' % (len_a, len_b) + + if len_a or len_b: + # try to avoid actually slicing the lists + equal_a = compare_range(self.a, ia, amatch, + self.base, iz, zmatch) + equal_b = compare_range(self.b, ib, bmatch, + self.base, iz, zmatch) + same = compare_range(self.a, ia, amatch, + self.b, ib, bmatch) + + if same: + yield 'same', ia, amatch + elif equal_a and not equal_b: + yield 'b', ib, bmatch + elif equal_b and not equal_a: + yield 'a', ia, amatch + elif not equal_a and not equal_b: + yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch + else: + raise AssertionError("can't handle a=b=base but unmatched") + + ia = amatch + ib = bmatch + iz = zmatch + + # if the same part of the base was deleted on both sides + # that's OK, we can just skip it. + + + if matchlen > 0: + assert ia == amatch + assert ib == bmatch + assert iz == zmatch + + yield 'unchanged', zmatch, zend + iz = zend + ia = aend + ib = bend + + def reprocess_merge_regions(self, merge_regions): + """Where there are conflict regions, remove the agreed lines. + + Lines where both A and B have made the same changes are + eliminated. + """ + for region in merge_regions: + if region[0] != "conflict": + yield region + continue + type, iz, zmatch, ia, amatch, ib, bmatch = region + a_region = self.a[ia:amatch] + b_region = self.b[ib:bmatch] + matches = mdiff.get_matching_blocks(''.join(a_region), + ''.join(b_region)) + next_a = ia + next_b = ib + for region_ia, region_ib, region_len in matches[:-1]: + region_ia += ia + region_ib += ib + reg = self.mismatch_region(next_a, region_ia, next_b, + region_ib) + if reg is not None: + yield reg + yield 'same', region_ia, region_len + region_ia + next_a = region_ia + region_len + next_b = region_ib + region_len + reg = self.mismatch_region(next_a, amatch, next_b, bmatch) + if reg is not None: + yield reg + + def mismatch_region(next_a, region_ia, next_b, region_ib): + if next_a < region_ia or next_b < region_ib: + return 'conflict', None, None, next_a, region_ia, next_b, region_ib + mismatch_region = staticmethod(mismatch_region) + + def find_sync_regions(self): + """Return a list of sync regions, where both descendents match the base. + + Generates a list of (base1, base2, a1, a2, b1, b2). There is + always a zero-length sync region at the end of all the files. + """ + + ia = ib = 0 + amatches = mdiff.get_matching_blocks(self.basetext, self.atext) + bmatches = mdiff.get_matching_blocks(self.basetext, self.btext) + len_a = len(amatches) + len_b = len(bmatches) + + sl = [] + + while ia < len_a and ib < len_b: + abase, amatch, alen = amatches[ia] + bbase, bmatch, blen = bmatches[ib] + + # there is an unconflicted block at i; how long does it + # extend? until whichever one ends earlier. + i = intersect((abase, abase + alen), (bbase, bbase + blen)) + if i: + intbase = i[0] + intend = i[1] + intlen = intend - intbase + + # found a match of base[i[0], i[1]]; this may be less than + # the region that matches in either one + assert intlen <= alen + assert intlen <= blen + assert abase <= intbase + assert bbase <= intbase + + asub = amatch + (intbase - abase) + bsub = bmatch + (intbase - bbase) + aend = asub + intlen + bend = bsub + intlen + + assert self.base[intbase:intend] == self.a[asub:aend], \ + (self.base[intbase:intend], self.a[asub:aend]) + + assert self.base[intbase:intend] == self.b[bsub:bend] + + sl.append((intbase, intend, + asub, aend, + bsub, bend)) + + # advance whichever one ends first in the base text + if (abase + alen) < (bbase + blen): + ia += 1 + else: + ib += 1 + + intbase = len(self.base) + abase = len(self.a) + bbase = len(self.b) + sl.append((intbase, intbase, abase, abase, bbase, bbase)) + + return sl + + def find_unconflicted(self): + """Return a list of ranges in base that are not conflicted.""" + am = mdiff.get_matching_blocks(self.basetext, self.atext) + bm = mdiff.get_matching_blocks(self.basetext, self.btext) + + unc = [] + + while am and bm: + # there is an unconflicted block at i; how long does it + # extend? until whichever one ends earlier. + a1 = am[0][0] + a2 = a1 + am[0][2] + b1 = bm[0][0] + b2 = b1 + bm[0][2] + i = intersect((a1, a2), (b1, b2)) + if i: + unc.append(i) + + if a2 < b2: + del am[0] + else: + del bm[0] + + return unc + +def simplemerge(ui, local, base, other, **opts): + def readfile(filename): + f = open(filename, "rb") + text = f.read() + f.close() + if util.binary(text): + msg = _("%s looks like a binary file.") % filename + if not opts.get('text'): + raise util.Abort(msg) + elif not opts.get('quiet'): + ui.warn(_('warning: %s\n') % msg) + return text + + name_a = local + name_b = other + labels = opts.get('label', []) + if labels: + name_a = labels.pop(0) + if labels: + name_b = labels.pop(0) + if labels: + raise util.Abort(_("can only specify two labels.")) + + localtext = readfile(local) + basetext = readfile(base) + othertext = readfile(other) + + local = os.path.realpath(local) + if not opts.get('print'): + opener = util.opener(os.path.dirname(local)) + out = opener(os.path.basename(local), "w", atomictemp=True) + else: + out = sys.stdout + + reprocess = not opts.get('no_minimal') + + m3 = Merge3Text(basetext, localtext, othertext) + for line in m3.merge_lines(name_a=name_a, name_b=name_b, + reprocess=reprocess): + out.write(line) + + if not opts.get('print'): + out.rename() + + if m3.conflicts: + if not opts.get('quiet'): + ui.warn(_("warning: conflicts during merge.\n")) + return 1 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.pyo Binary files differnew file mode 100644 index 0000000..367afbd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/simplemerge.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.py new file mode 100644 index 0000000..a2b8f7b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.py @@ -0,0 +1,197 @@ +# sshrepo.py - ssh repository proxy class for mercurial +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import util, error, wireproto +import re + +class remotelock(object): + def __init__(self, repo): + self.repo = repo + def release(self): + self.repo.unlock() + self.repo = None + def __del__(self): + if self.repo: + self.release() + +class sshrepository(wireproto.wirerepository): + def __init__(self, ui, path, create=0): + self._url = path + self.ui = ui + + m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path) + if not m: + self._abort(error.RepoError(_("couldn't parse location %s") % path)) + + self.user = m.group(2) + self.host = m.group(3) + self.port = m.group(5) + self.path = m.group(7) or "." + + sshcmd = self.ui.config("ui", "ssh", "ssh") + remotecmd = self.ui.config("ui", "remotecmd", "hg") + + args = util.sshargs(sshcmd, self.host, self.user, self.port) + + if create: + cmd = '%s %s "%s init %s"' + cmd = cmd % (sshcmd, args, remotecmd, self.path) + + ui.note(_('running %s\n') % cmd) + res = util.system(cmd) + if res != 0: + self._abort(error.RepoError(_("could not create remote repo"))) + + self.validate_repo(ui, sshcmd, args, remotecmd) + + def url(self): + return self._url + + def validate_repo(self, ui, sshcmd, args, remotecmd): + # cleanup up previous run + self.cleanup() + + cmd = '%s %s "%s -R %s serve --stdio"' + cmd = cmd % (sshcmd, args, remotecmd, self.path) + + cmd = util.quotecommand(cmd) + ui.note(_('running %s\n') % cmd) + self.pipeo, self.pipei, self.pipee = util.popen3(cmd) + + # skip any noise generated by remote shell + self._callstream("hello") + r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40))) + lines = ["", "dummy"] + max_noise = 500 + while lines[-1] and max_noise: + l = r.readline() + self.readerr() + if lines[-1] == "1\n" and l == "\n": + break + if l: + ui.debug("remote: ", l) + lines.append(l) + max_noise -= 1 + else: + self._abort(error.RepoError(_("no suitable response from remote hg"))) + + self.capabilities = set() + for l in reversed(lines): + if l.startswith("capabilities:"): + self.capabilities.update(l[:-1].split(":")[1].split()) + break + + def readerr(self): + while 1: + size = util.fstat(self.pipee).st_size + if size == 0: + break + l = self.pipee.readline() + if not l: + break + self.ui.status(_("remote: "), l) + + def _abort(self, exception): + self.cleanup() + raise exception + + def cleanup(self): + try: + self.pipeo.close() + self.pipei.close() + # read the error descriptor until EOF + for l in self.pipee: + self.ui.status(_("remote: "), l) + self.pipee.close() + except: + pass + + __del__ = cleanup + + def _callstream(self, cmd, **args): + self.ui.debug("sending %s command\n" % cmd) + self.pipeo.write("%s\n" % cmd) + for k, v in sorted(args.iteritems()): + self.pipeo.write("%s %d\n" % (k, len(v))) + self.pipeo.write(v) + self.pipeo.flush() + + return self.pipei + + def _call(self, cmd, **args): + self._callstream(cmd, **args) + return self._recv() + + def _callpush(self, cmd, fp, **args): + r = self._call(cmd, **args) + if r: + return '', r + while 1: + d = fp.read(4096) + if not d: + break + self._send(d) + self._send("", flush=True) + r = self._recv() + if r: + return '', r + return self._recv(), '' + + def _decompress(self, stream): + return stream + + def _recv(self): + l = self.pipei.readline() + self.readerr() + try: + l = int(l) + except: + self._abort(error.ResponseError(_("unexpected response:"), l)) + return self.pipei.read(l) + + def _send(self, data, flush=False): + self.pipeo.write("%d\n" % len(data)) + if data: + self.pipeo.write(data) + if flush: + self.pipeo.flush() + self.readerr() + + def lock(self): + self._call("lock") + return remotelock(self) + + def unlock(self): + self._call("unlock") + + def addchangegroup(self, cg, source, url): + '''Send a changegroup to the remote server. Return an integer + similar to unbundle(). DEPRECATED, since it requires locking the + remote.''' + d = self._call("addchangegroup") + if d: + self._abort(error.RepoError(_("push refused: %s") % d)) + while 1: + d = cg.read(4096) + if not d: + break + self.pipeo.write(d) + self.readerr() + + self.pipeo.flush() + + self.readerr() + r = self._recv() + if not r: + return 1 + try: + return int(r) + except: + self._abort(error.ResponseError(_("unexpected response:"), r)) + +instance = sshrepository diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.pyo Binary files differnew file mode 100644 index 0000000..27170f4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshrepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.py new file mode 100644 index 0000000..8d2e4af --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.py @@ -0,0 +1,144 @@ +# sshserver.py - ssh protocol server support for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import util, hook, wireproto, changegroup +import os, sys + +class sshserver(object): + def __init__(self, ui, repo): + self.ui = ui + self.repo = repo + self.lock = None + self.fin = sys.stdin + self.fout = sys.stdout + + hook.redirect(True) + sys.stdout = sys.stderr + + # Prevent insertion/deletion of CRs + util.set_binary(self.fin) + util.set_binary(self.fout) + + def getargs(self, args): + data = {} + keys = args.split() + count = len(keys) + for n in xrange(len(keys)): + argline = self.fin.readline()[:-1] + arg, l = argline.split() + val = self.fin.read(int(l)) + if arg not in keys: + raise util.Abort("unexpected parameter %r" % arg) + if arg == '*': + star = {} + for n in xrange(int(l)): + arg, l = argline.split() + val = self.fin.read(int(l)) + star[arg] = val + data['*'] = star + else: + data[arg] = val + return [data[k] for k in keys] + + def getarg(self, name): + return self.getargs(name)[0] + + def getfile(self, fpout): + self.sendresponse('') + count = int(self.fin.readline()) + while count: + fpout.write(self.fin.read(count)) + count = int(self.fin.readline()) + + def redirect(self): + pass + + def groupchunks(self, changegroup): + while True: + d = changegroup.read(4096) + if not d: + break + yield d + + def sendresponse(self, v): + self.fout.write("%d\n" % len(v)) + self.fout.write(v) + self.fout.flush() + + def sendstream(self, source): + for chunk in source.gen: + self.fout.write(chunk) + self.fout.flush() + + def sendpushresponse(self, rsp): + self.sendresponse('') + self.sendresponse(str(rsp.res)) + + def sendpusherror(self, rsp): + self.sendresponse(rsp.res) + + def serve_forever(self): + try: + while self.serve_one(): + pass + finally: + if self.lock is not None: + self.lock.release() + sys.exit(0) + + handlers = { + str: sendresponse, + wireproto.streamres: sendstream, + wireproto.pushres: sendpushresponse, + wireproto.pusherr: sendpusherror, + } + + def serve_one(self): + cmd = self.fin.readline()[:-1] + if cmd and cmd in wireproto.commands: + rsp = wireproto.dispatch(self.repo, self, cmd) + self.handlers[rsp.__class__](self, rsp) + elif cmd: + impl = getattr(self, 'do_' + cmd, None) + if impl: + r = impl() + if r is not None: + self.sendresponse(r) + else: self.sendresponse("") + return cmd != '' + + def do_lock(self): + '''DEPRECATED - allowing remote client to lock repo is not safe''' + + self.lock = self.repo.lock() + return "" + + def do_unlock(self): + '''DEPRECATED''' + + if self.lock: + self.lock.release() + self.lock = None + return "" + + def do_addchangegroup(self): + '''DEPRECATED''' + + if not self.lock: + self.sendresponse("not locked") + return + + self.sendresponse("") + cg = changegroup.unbundle10(self.fin, "UN") + r = self.repo.addchangegroup(cg, 'serve', self._client(), + lock=self.lock) + return str(r) + + def _client(self): + client = os.environ.get('SSH_CLIENT', '').split(' ', 1)[0] + return 'remote:ssh:' + client diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.pyo Binary files differnew file mode 100644 index 0000000..aa665eb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/sshserver.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.py new file mode 100644 index 0000000..792497d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.py @@ -0,0 +1,146 @@ +# statichttprepo.py - simple http repository class for mercurial +# +# This provides read-only repo access to repositories exported via static http +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import changelog, byterange, url, error +import localrepo, manifest, util, store +import urllib, urllib2, errno + +class httprangereader(object): + def __init__(self, url, opener): + # we assume opener has HTTPRangeHandler + self.url = url + self.pos = 0 + self.opener = opener + self.name = url + def seek(self, pos): + self.pos = pos + def read(self, bytes=None): + req = urllib2.Request(self.url) + end = '' + if bytes: + end = self.pos + bytes - 1 + req.add_header('Range', 'bytes=%d-%s' % (self.pos, end)) + + try: + f = self.opener.open(req) + data = f.read() + if hasattr(f, 'getcode'): + # python 2.6+ + code = f.getcode() + elif hasattr(f, 'code'): + # undocumented attribute, seems to be set in 2.4 and 2.5 + code = f.code + else: + # Don't know how to check, hope for the best. + code = 206 + except urllib2.HTTPError, inst: + num = inst.code == 404 and errno.ENOENT or None + raise IOError(num, inst) + except urllib2.URLError, inst: + raise IOError(None, inst.reason[1]) + + if code == 200: + # HTTPRangeHandler does nothing if remote does not support + # Range headers and returns the full entity. Let's slice it. + if bytes: + data = data[self.pos:self.pos + bytes] + else: + data = data[self.pos:] + elif bytes: + data = data[:bytes] + self.pos += len(data) + return data + def __iter__(self): + return iter(self.read().splitlines(1)) + def close(self): + pass + +def build_opener(ui, authinfo): + # urllib cannot handle URLs with embedded user or passwd + urlopener = url.opener(ui, authinfo) + urlopener.add_handler(byterange.HTTPRangeHandler()) + + def opener(base): + """return a function that opens files over http""" + p = base + def o(path, mode="r", atomictemp=None): + if 'a' in mode or 'w' in mode: + raise IOError('Permission denied') + f = "/".join((p, urllib.quote(path))) + return httprangereader(f, urlopener) + return o + + opener.options = {'nonlazy': 1} + return opener + +class statichttprepository(localrepo.localrepository): + def __init__(self, ui, path): + self._url = path + self.ui = ui + + self.root = path + self.path, authinfo = url.getauthinfo(path.rstrip('/') + "/.hg") + + opener = build_opener(ui, authinfo) + self.opener = opener(self.path) + + # find requirements + try: + requirements = self.opener("requires").read().splitlines() + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + # check if it is a non-empty old-style repository + try: + self.opener("00changelog.i").read(1) + except IOError, inst: + if inst.errno != errno.ENOENT: + raise + # we do not care about empty old-style repositories here + msg = _("'%s' does not appear to be an hg repository") % path + raise error.RepoError(msg) + requirements = [] + + # check them + for r in requirements: + if r not in self.supported: + raise error.RepoError(_("requirement '%s' not supported") % r) + + # setup store + def pjoin(a, b): + return a + '/' + b + self.store = store.store(requirements, self.path, opener, pjoin) + self.spath = self.store.path + self.sopener = self.store.opener + self.sjoin = self.store.join + + self.manifest = manifest.manifest(self.sopener) + self.changelog = changelog.changelog(self.sopener) + self._tags = None + self.nodetagscache = None + self._branchcache = None + self._branchcachetip = None + self.encodepats = None + self.decodepats = None + self.capabilities.remove("pushkey") + + def url(self): + return self._url + + def local(self): + return False + + def lock(self, wait=True): + raise util.Abort(_('cannot lock static-http repository')) + +def instance(ui, path, create): + if create: + raise util.Abort(_('cannot create new static-http repository')) + return statichttprepository(ui, path[7:]) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.pyo Binary files differnew file mode 100644 index 0000000..ea6957a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/statichttprepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.py new file mode 100644 index 0000000..c155fe9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.py @@ -0,0 +1,339 @@ +# store.py - repository store handling for Mercurial +# +# Copyright 2008 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import osutil, util +import os, stat + +_sha = util.sha1 + +# This avoids a collision between a file named foo and a dir named +# foo.i or foo.d +def encodedir(path): + if not path.startswith('data/'): + return path + return (path + .replace(".hg/", ".hg.hg/") + .replace(".i/", ".i.hg/") + .replace(".d/", ".d.hg/")) + +def decodedir(path): + if not path.startswith('data/') or ".hg/" not in path: + return path + return (path + .replace(".d.hg/", ".d/") + .replace(".i.hg/", ".i/") + .replace(".hg.hg/", ".hg/")) + +def _buildencodefun(): + e = '_' + win_reserved = [ord(x) for x in '\\:*?"<>|'] + cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) + for x in (range(32) + range(126, 256) + win_reserved): + cmap[chr(x)] = "~%02x" % x + for x in range(ord("A"), ord("Z")+1) + [ord(e)]: + cmap[chr(x)] = e + chr(x).lower() + dmap = {} + for k, v in cmap.iteritems(): + dmap[v] = k + def decode(s): + i = 0 + while i < len(s): + for l in xrange(1, 4): + try: + yield dmap[s[i:i + l]] + i += l + break + except KeyError: + pass + else: + raise KeyError + return (lambda s: "".join([cmap[c] for c in encodedir(s)]), + lambda s: decodedir("".join(list(decode(s))))) + +encodefilename, decodefilename = _buildencodefun() + +def _build_lower_encodefun(): + win_reserved = [ord(x) for x in '\\:*?"<>|'] + cmap = dict([(chr(x), chr(x)) for x in xrange(127)]) + for x in (range(32) + range(126, 256) + win_reserved): + cmap[chr(x)] = "~%02x" % x + for x in range(ord("A"), ord("Z")+1): + cmap[chr(x)] = chr(x).lower() + return lambda s: "".join([cmap[c] for c in s]) + +lowerencode = _build_lower_encodefun() + +_windows_reserved_filenames = '''con prn aux nul + com1 com2 com3 com4 com5 com6 com7 com8 com9 + lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split() +def _auxencode(path, dotencode): + res = [] + for n in path.split('/'): + if n: + base = n.split('.')[0] + if base and (base in _windows_reserved_filenames): + # encode third letter ('aux' -> 'au~78') + ec = "~%02x" % ord(n[2]) + n = n[0:2] + ec + n[3:] + if n[-1] in '. ': + # encode last period or space ('foo...' -> 'foo..~2e') + n = n[:-1] + "~%02x" % ord(n[-1]) + if dotencode and n[0] in '. ': + n = "~%02x" % ord(n[0]) + n[1:] + res.append(n) + return '/'.join(res) + +MAX_PATH_LEN_IN_HGSTORE = 120 +DIR_PREFIX_LEN = 8 +_MAX_SHORTENED_DIRS_LEN = 8 * (DIR_PREFIX_LEN + 1) - 4 +def _hybridencode(path, auxencode): + '''encodes path with a length limit + + Encodes all paths that begin with 'data/', according to the following. + + Default encoding (reversible): + + Encodes all uppercase letters 'X' as '_x'. All reserved or illegal + characters are encoded as '~xx', where xx is the two digit hex code + of the character (see encodefilename). + Relevant path components consisting of Windows reserved filenames are + masked by encoding the third character ('aux' -> 'au~78', see auxencode). + + Hashed encoding (not reversible): + + If the default-encoded path is longer than MAX_PATH_LEN_IN_HGSTORE, a + non-reversible hybrid hashing of the path is done instead. + This encoding uses up to DIR_PREFIX_LEN characters of all directory + levels of the lowerencoded path, but not more levels than can fit into + _MAX_SHORTENED_DIRS_LEN. + Then follows the filler followed by the sha digest of the full path. + The filler is the beginning of the basename of the lowerencoded path + (the basename is everything after the last path separator). The filler + is as long as possible, filling in characters from the basename until + the encoded path has MAX_PATH_LEN_IN_HGSTORE characters (or all chars + of the basename have been taken). + The extension (e.g. '.i' or '.d') is preserved. + + The string 'data/' at the beginning is replaced with 'dh/', if the hashed + encoding was used. + ''' + if not path.startswith('data/'): + return path + # escape directories ending with .i and .d + path = encodedir(path) + ndpath = path[len('data/'):] + res = 'data/' + auxencode(encodefilename(ndpath)) + if len(res) > MAX_PATH_LEN_IN_HGSTORE: + digest = _sha(path).hexdigest() + aep = auxencode(lowerencode(ndpath)) + _root, ext = os.path.splitext(aep) + parts = aep.split('/') + basename = parts[-1] + sdirs = [] + for p in parts[:-1]: + d = p[:DIR_PREFIX_LEN] + if d[-1] in '. ': + # Windows can't access dirs ending in period or space + d = d[:-1] + '_' + t = '/'.join(sdirs) + '/' + d + if len(t) > _MAX_SHORTENED_DIRS_LEN: + break + sdirs.append(d) + dirs = '/'.join(sdirs) + if len(dirs) > 0: + dirs += '/' + res = 'dh/' + dirs + digest + ext + space_left = MAX_PATH_LEN_IN_HGSTORE - len(res) + if space_left > 0: + filler = basename[:space_left] + res = 'dh/' + dirs + filler + digest + ext + return res + +def _calcmode(path): + try: + # files in .hg/ will be created using this mode + mode = os.stat(path).st_mode + # avoid some useless chmods + if (0777 & ~util.umask) == (0777 & mode): + mode = None + except OSError: + mode = None + return mode + +_data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i' + +class basicstore(object): + '''base class for local repository stores''' + def __init__(self, path, opener, pathjoiner): + self.pathjoiner = pathjoiner + self.path = path + self.createmode = _calcmode(path) + op = opener(self.path) + op.createmode = self.createmode + self.opener = lambda f, *args, **kw: op(encodedir(f), *args, **kw) + + def join(self, f): + return self.pathjoiner(self.path, encodedir(f)) + + def _walk(self, relpath, recurse): + '''yields (unencoded, encoded, size)''' + path = self.pathjoiner(self.path, relpath) + striplen = len(self.path) + len(os.sep) + l = [] + if os.path.isdir(path): + visit = [path] + while visit: + p = visit.pop() + for f, kind, st in osutil.listdir(p, stat=True): + fp = self.pathjoiner(p, f) + if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'): + n = util.pconvert(fp[striplen:]) + l.append((decodedir(n), n, st.st_size)) + elif kind == stat.S_IFDIR and recurse: + visit.append(fp) + return sorted(l) + + def datafiles(self): + return self._walk('data', True) + + def walk(self): + '''yields (unencoded, encoded, size)''' + # yield data files first + for x in self.datafiles(): + yield x + # yield manifest before changelog + for x in reversed(self._walk('', False)): + yield x + + def copylist(self): + return ['requires'] + _data.split() + +class encodedstore(basicstore): + def __init__(self, path, opener, pathjoiner): + self.pathjoiner = pathjoiner + self.path = self.pathjoiner(path, 'store') + self.createmode = _calcmode(self.path) + op = opener(self.path) + op.createmode = self.createmode + self.opener = lambda f, *args, **kw: op(encodefilename(f), *args, **kw) + + def datafiles(self): + for a, b, size in self._walk('data', True): + try: + a = decodefilename(a) + except KeyError: + a = None + yield a, b, size + + def join(self, f): + return self.pathjoiner(self.path, encodefilename(f)) + + def copylist(self): + return (['requires', '00changelog.i'] + + [self.pathjoiner('store', f) for f in _data.split()]) + +class fncache(object): + # the filename used to be partially encoded + # hence the encodedir/decodedir dance + def __init__(self, opener): + self.opener = opener + self.entries = None + + def _load(self): + '''fill the entries from the fncache file''' + self.entries = set() + try: + fp = self.opener('fncache', mode='rb') + except IOError: + # skip nonexistent file + return + for n, line in enumerate(fp): + if (len(line) < 2) or (line[-1] != '\n'): + t = _('invalid entry in fncache, line %s') % (n + 1) + raise util.Abort(t) + self.entries.add(decodedir(line[:-1])) + fp.close() + + def rewrite(self, files): + fp = self.opener('fncache', mode='wb') + for p in files: + fp.write(encodedir(p) + '\n') + fp.close() + self.entries = set(files) + + def add(self, fn): + if self.entries is None: + self._load() + if fn not in self.entries: + self.opener('fncache', 'ab').write(encodedir(fn) + '\n') + self.entries.add(fn) + + def __contains__(self, fn): + if self.entries is None: + self._load() + return fn in self.entries + + def __iter__(self): + if self.entries is None: + self._load() + return iter(self.entries) + +class fncachestore(basicstore): + def __init__(self, path, opener, pathjoiner, encode): + self.encode = encode + self.pathjoiner = pathjoiner + self.path = self.pathjoiner(path, 'store') + self.createmode = _calcmode(self.path) + op = opener(self.path) + op.createmode = self.createmode + fnc = fncache(op) + self.fncache = fnc + + def fncacheopener(path, mode='r', *args, **kw): + if mode not in ('r', 'rb') and path.startswith('data/'): + fnc.add(path) + return op(self.encode(path), mode, *args, **kw) + self.opener = fncacheopener + + def join(self, f): + return self.pathjoiner(self.path, self.encode(f)) + + def datafiles(self): + rewrite = False + existing = [] + pjoin = self.pathjoiner + spath = self.path + for f in self.fncache: + ef = self.encode(f) + try: + st = os.stat(pjoin(spath, ef)) + yield f, ef, st.st_size + existing.append(f) + except OSError: + # nonexistent entry + rewrite = True + if rewrite: + # rewrite fncache to remove nonexistent entries + # (may be caused by rollback / strip) + self.fncache.rewrite(existing) + + def copylist(self): + d = ('data dh fncache' + ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i') + return (['requires', '00changelog.i'] + + [self.pathjoiner('store', f) for f in d.split()]) + +def store(requirements, path, opener, pathjoiner=None): + pathjoiner = pathjoiner or os.path.join + if 'store' in requirements: + if 'fncache' in requirements: + auxencode = lambda f: _auxencode(f, 'dotencode' in requirements) + encode = lambda f: _hybridencode(f, auxencode) + return fncachestore(path, opener, pathjoiner, encode) + return encodedstore(path, opener, pathjoiner) + return basicstore(path, opener, pathjoiner) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.pyo Binary files differnew file mode 100644 index 0000000..62edb9f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/store.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.py new file mode 100644 index 0000000..b33fb6b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.py @@ -0,0 +1,34 @@ +# strutil.py - string utilities for Mercurial +# +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +def findall(haystack, needle, start=0, end=None): + if end is None: + end = len(haystack) + if end < 0: + end += len(haystack) + if start < 0: + start += len(haystack) + while start < end: + c = haystack.find(needle, start, end) + if c == -1: + break + yield c + start = c + 1 + +def rfindall(haystack, needle, start=0, end=None): + if end is None: + end = len(haystack) + if end < 0: + end += len(haystack) + if start < 0: + start += len(haystack) + while end >= 0: + c = haystack.rfind(needle, start, end) + if c == -1: + break + yield c + end = c - 1 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.pyo Binary files differnew file mode 100644 index 0000000..0921585 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/strutil.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.py new file mode 100644 index 0000000..4761dca --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.py @@ -0,0 +1,610 @@ +# subrepo.py - sub-repository handling for Mercurial +# +# Copyright 2009-2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import errno, os, re, xml.dom.minidom, shutil, urlparse, posixpath +import stat, subprocess +from i18n import _ +import config, util, node, error, cmdutil +hg = None + +nullstate = ('', '', 'empty') + +def state(ctx, ui): + """return a state dict, mapping subrepo paths configured in .hgsub + to tuple: (source from .hgsub, revision from .hgsubstate, kind + (key in types dict)) + """ + p = config.config() + def read(f, sections=None, remap=None): + if f in ctx: + try: + data = ctx[f].data() + except IOError, err: + if err.errno != errno.ENOENT: + raise + # handle missing subrepo spec files as removed + ui.warn(_("warning: subrepo spec file %s not found\n") % f) + return + p.parse(f, data, sections, remap, read) + else: + raise util.Abort(_("subrepo spec file %s not found") % f) + + if '.hgsub' in ctx: + read('.hgsub') + + for path, src in ui.configitems('subpaths'): + p.set('subpaths', path, src, ui.configsource('subpaths', path)) + + rev = {} + if '.hgsubstate' in ctx: + try: + for l in ctx['.hgsubstate'].data().splitlines(): + revision, path = l.split(" ", 1) + rev[path] = revision + except IOError, err: + if err.errno != errno.ENOENT: + raise + + state = {} + for path, src in p[''].items(): + kind = 'hg' + if src.startswith('['): + if ']' not in src: + raise util.Abort(_('missing ] in subrepo source')) + kind, src = src.split(']', 1) + kind = kind[1:] + + for pattern, repl in p.items('subpaths'): + # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub + # does a string decode. + repl = repl.encode('string-escape') + # However, we still want to allow back references to go + # through unharmed, so we turn r'\\1' into r'\1'. Again, + # extra escapes are needed because re.sub string decodes. + repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl) + try: + src = re.sub(pattern, repl, src, 1) + except re.error, e: + raise util.Abort(_("bad subrepository pattern in %s: %s") + % (p.source('subpaths', pattern), e)) + + state[path] = (src.strip(), rev.get(path, ''), kind) + + return state + +def writestate(repo, state): + """rewrite .hgsubstate in (outer) repo with these subrepo states""" + repo.wwrite('.hgsubstate', + ''.join(['%s %s\n' % (state[s][1], s) + for s in sorted(state)]), '') + +def submerge(repo, wctx, mctx, actx): + """delegated from merge.applyupdates: merging of .hgsubstate file + in working context, merging context and ancestor context""" + if mctx == actx: # backwards? + actx = wctx.p1() + s1 = wctx.substate + s2 = mctx.substate + sa = actx.substate + sm = {} + + repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx)) + + def debug(s, msg, r=""): + if r: + r = "%s:%s:%s" % r + repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r)) + + for s, l in s1.items(): + a = sa.get(s, nullstate) + ld = l # local state with possible dirty flag for compares + if wctx.sub(s).dirty(): + ld = (l[0], l[1] + "+") + if wctx == actx: # overwrite + a = ld + + if s in s2: + r = s2[s] + if ld == r or r == a: # no change or local is newer + sm[s] = l + continue + elif ld == a: # other side changed + debug(s, "other changed, get", r) + wctx.sub(s).get(r) + sm[s] = r + elif ld[0] != r[0]: # sources differ + if repo.ui.promptchoice( + _(' subrepository sources for %s differ\n' + 'use (l)ocal source (%s) or (r)emote source (%s)?') + % (s, l[0], r[0]), + (_('&Local'), _('&Remote')), 0): + debug(s, "prompt changed, get", r) + wctx.sub(s).get(r) + sm[s] = r + elif ld[1] == a[1]: # local side is unchanged + debug(s, "other side changed, get", r) + wctx.sub(s).get(r) + sm[s] = r + else: + debug(s, "both sides changed, merge with", r) + wctx.sub(s).merge(r) + sm[s] = l + elif ld == a: # remote removed, local unchanged + debug(s, "remote removed, remove") + wctx.sub(s).remove() + else: + if repo.ui.promptchoice( + _(' local changed subrepository %s which remote removed\n' + 'use (c)hanged version or (d)elete?') % s, + (_('&Changed'), _('&Delete')), 0): + debug(s, "prompt remove") + wctx.sub(s).remove() + + for s, r in s2.items(): + if s in s1: + continue + elif s not in sa: + debug(s, "remote added, get", r) + mctx.sub(s).get(r) + sm[s] = r + elif r != sa[s]: + if repo.ui.promptchoice( + _(' remote changed subrepository %s which local removed\n' + 'use (c)hanged version or (d)elete?') % s, + (_('&Changed'), _('&Delete')), 0) == 0: + debug(s, "prompt recreate", r) + wctx.sub(s).get(r) + sm[s] = r + + # record merged .hgsubstate + writestate(repo, sm) + +def reporelpath(repo): + """return path to this (sub)repo as seen from outermost repo""" + parent = repo + while hasattr(parent, '_subparent'): + parent = parent._subparent + return repo.root[len(parent.root)+1:] + +def subrelpath(sub): + """return path to this subrepo as seen from outermost repo""" + if not hasattr(sub, '_repo'): + return sub._path + return reporelpath(sub._repo) + +def _abssource(repo, push=False, abort=True): + """return pull/push path of repo - either based on parent repo .hgsub info + or on the top repo config. Abort or return None if no source found.""" + if hasattr(repo, '_subparent'): + source = repo._subsource + if source.startswith('/') or '://' in source: + return source + parent = _abssource(repo._subparent, push, abort=False) + if parent: + if '://' in parent: + if parent[-1] == '/': + parent = parent[:-1] + r = urlparse.urlparse(parent + '/' + source) + r = urlparse.urlunparse((r[0], r[1], + posixpath.normpath(r[2]), + r[3], r[4], r[5])) + return r + else: # plain file system path + return posixpath.normpath(os.path.join(parent, repo._subsource)) + else: # recursion reached top repo + if hasattr(repo, '_subtoppath'): + return repo._subtoppath + if push and repo.ui.config('paths', 'default-push'): + return repo.ui.config('paths', 'default-push') + if repo.ui.config('paths', 'default'): + return repo.ui.config('paths', 'default') + if abort: + raise util.Abort(_("default path for subrepository %s not found") % + reporelpath(repo)) + +def itersubrepos(ctx1, ctx2): + """find subrepos in ctx1 or ctx2""" + # Create a (subpath, ctx) mapping where we prefer subpaths from + # ctx1. The subpaths from ctx2 are important when the .hgsub file + # has been modified (in ctx2) but not yet committed (in ctx1). + subpaths = dict.fromkeys(ctx2.substate, ctx2) + subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) + for subpath, ctx in sorted(subpaths.iteritems()): + yield subpath, ctx.sub(subpath) + +def subrepo(ctx, path): + """return instance of the right subrepo class for subrepo in path""" + # subrepo inherently violates our import layering rules + # because it wants to make repo objects from deep inside the stack + # so we manually delay the circular imports to not break + # scripts that don't use our demand-loading + global hg + import hg as h + hg = h + + util.path_auditor(ctx._repo.root)(path) + state = ctx.substate.get(path, nullstate) + if state[2] not in types: + raise util.Abort(_('unknown subrepo type %s') % state[2]) + return types[state[2]](ctx, path, state[:2]) + +# subrepo classes need to implement the following abstract class: + +class abstractsubrepo(object): + + def dirty(self): + """returns true if the dirstate of the subrepo does not match + current stored state + """ + raise NotImplementedError + + def checknested(self, path): + """check if path is a subrepository within this repository""" + return False + + def commit(self, text, user, date): + """commit the current changes to the subrepo with the given + log message. Use given user and date if possible. Return the + new state of the subrepo. + """ + raise NotImplementedError + + def remove(self): + """remove the subrepo + + (should verify the dirstate is not dirty first) + """ + raise NotImplementedError + + def get(self, state): + """run whatever commands are needed to put the subrepo into + this state + """ + raise NotImplementedError + + def merge(self, state): + """merge currently-saved state with the new state.""" + raise NotImplementedError + + def push(self, force): + """perform whatever action is analogous to 'hg push' + + This may be a no-op on some systems. + """ + raise NotImplementedError + + def add(self, ui, match, dryrun, prefix): + return [] + + def status(self, rev2, **opts): + return [], [], [], [], [], [], [] + + def diff(self, diffopts, node2, match, prefix, **opts): + pass + + def outgoing(self, ui, dest, opts): + return 1 + + def incoming(self, ui, source, opts): + return 1 + + def files(self): + """return filename iterator""" + raise NotImplementedError + + def filedata(self, name): + """return file data""" + raise NotImplementedError + + def fileflags(self, name): + """return file flags""" + return '' + + def archive(self, archiver, prefix): + for name in self.files(): + flags = self.fileflags(name) + mode = 'x' in flags and 0755 or 0644 + symlink = 'l' in flags + archiver.addfile(os.path.join(prefix, self._path, name), + mode, symlink, self.filedata(name)) + + +class hgsubrepo(abstractsubrepo): + def __init__(self, ctx, path, state): + self._path = path + self._state = state + r = ctx._repo + root = r.wjoin(path) + create = False + if not os.path.exists(os.path.join(root, '.hg')): + create = True + util.makedirs(root) + self._repo = hg.repository(r.ui, root, create=create) + self._repo._subparent = r + self._repo._subsource = state[0] + + if create: + fp = self._repo.opener("hgrc", "w", text=True) + fp.write('[paths]\n') + + def addpathconfig(key, value): + if value: + fp.write('%s = %s\n' % (key, value)) + self._repo.ui.setconfig('paths', key, value) + + defpath = _abssource(self._repo, abort=False) + defpushpath = _abssource(self._repo, True, abort=False) + addpathconfig('default', defpath) + if defpath != defpushpath: + addpathconfig('default-push', defpushpath) + fp.close() + + def add(self, ui, match, dryrun, prefix): + return cmdutil.add(ui, self._repo, match, dryrun, True, + os.path.join(prefix, self._path)) + + def status(self, rev2, **opts): + try: + rev1 = self._state[1] + ctx1 = self._repo[rev1] + ctx2 = self._repo[rev2] + return self._repo.status(ctx1, ctx2, **opts) + except error.RepoLookupError, inst: + self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n') + % (inst, subrelpath(self))) + return [], [], [], [], [], [], [] + + def diff(self, diffopts, node2, match, prefix, **opts): + try: + node1 = node.bin(self._state[1]) + # We currently expect node2 to come from substate and be + # in hex format + if node2 is not None: + node2 = node.bin(node2) + cmdutil.diffordiffstat(self._repo.ui, self._repo, diffopts, + node1, node2, match, + prefix=os.path.join(prefix, self._path), + listsubrepos=True, **opts) + except error.RepoLookupError, inst: + self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n') + % (inst, subrelpath(self))) + + def archive(self, archiver, prefix): + abstractsubrepo.archive(self, archiver, prefix) + + rev = self._state[1] + ctx = self._repo[rev] + for subpath in ctx.substate: + s = subrepo(ctx, subpath) + s.archive(archiver, os.path.join(prefix, self._path)) + + def dirty(self): + r = self._state[1] + if r == '': + return True + w = self._repo[None] + if w.p1() != self._repo[r]: # version checked out change + return True + return w.dirty() # working directory changed + + def checknested(self, path): + return self._repo._checknested(self._repo.wjoin(path)) + + def commit(self, text, user, date): + self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self)) + n = self._repo.commit(text, user, date) + if not n: + return self._repo['.'].hex() # different version checked out + return node.hex(n) + + def remove(self): + # we can't fully delete the repository as it may contain + # local-only history + self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self)) + hg.clean(self._repo, node.nullid, False) + + def _get(self, state): + source, revision, kind = state + try: + self._repo.lookup(revision) + except error.RepoError: + self._repo._subsource = source + srcurl = _abssource(self._repo) + self._repo.ui.status(_('pulling subrepo %s from %s\n') + % (subrelpath(self), srcurl)) + other = hg.repository(self._repo.ui, srcurl) + self._repo.pull(other) + + def get(self, state): + self._get(state) + source, revision, kind = state + self._repo.ui.debug("getting subrepo %s\n" % self._path) + hg.clean(self._repo, revision, False) + + def merge(self, state): + self._get(state) + cur = self._repo['.'] + dst = self._repo[state[1]] + anc = dst.ancestor(cur) + if anc == cur: + self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self)) + hg.update(self._repo, state[1]) + elif anc == dst: + self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self)) + else: + self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self)) + hg.merge(self._repo, state[1], remind=False) + + def push(self, force): + # push subrepos depth-first for coherent ordering + c = self._repo[''] + subs = c.substate # only repos that are committed + for s in sorted(subs): + if not c.sub(s).push(force): + return False + + dsturl = _abssource(self._repo, True) + self._repo.ui.status(_('pushing subrepo %s to %s\n') % + (subrelpath(self), dsturl)) + other = hg.repository(self._repo.ui, dsturl) + return self._repo.push(other, force) + + def outgoing(self, ui, dest, opts): + return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) + + def incoming(self, ui, source, opts): + return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts) + + def files(self): + rev = self._state[1] + ctx = self._repo[rev] + return ctx.manifest() + + def filedata(self, name): + rev = self._state[1] + return self._repo[rev][name].data() + + def fileflags(self, name): + rev = self._state[1] + ctx = self._repo[rev] + return ctx.flags(name) + + +class svnsubrepo(abstractsubrepo): + def __init__(self, ctx, path, state): + self._path = path + self._state = state + self._ctx = ctx + self._ui = ctx._repo.ui + + def _svncommand(self, commands, filename=''): + path = os.path.join(self._ctx._repo.origroot, self._path, filename) + cmd = ['svn'] + commands + [path] + cmd = [util.shellquote(arg) for arg in cmd] + cmd = util.quotecommand(' '.join(cmd)) + env = dict(os.environ) + # Avoid localized output, preserve current locale for everything else. + env['LC_MESSAGES'] = 'C' + p = subprocess.Popen(cmd, shell=True, bufsize=-1, + close_fds=util.closefds, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True, env=env) + stdout, stderr = p.communicate() + stderr = stderr.strip() + if stderr: + raise util.Abort(stderr) + return stdout + + def _wcrev(self): + output = self._svncommand(['info', '--xml']) + doc = xml.dom.minidom.parseString(output) + entries = doc.getElementsByTagName('entry') + if not entries: + return '0' + return str(entries[0].getAttribute('revision')) or '0' + + def _wcchanged(self): + """Return (changes, extchanges) where changes is True + if the working directory was changed, and extchanges is + True if any of these changes concern an external entry. + """ + output = self._svncommand(['status', '--xml']) + externals, changes = [], [] + doc = xml.dom.minidom.parseString(output) + for e in doc.getElementsByTagName('entry'): + s = e.getElementsByTagName('wc-status') + if not s: + continue + item = s[0].getAttribute('item') + props = s[0].getAttribute('props') + path = e.getAttribute('path') + if item == 'external': + externals.append(path) + if (item not in ('', 'normal', 'unversioned', 'external') + or props not in ('', 'none')): + changes.append(path) + for path in changes: + for ext in externals: + if path == ext or path.startswith(ext + os.sep): + return True, True + return bool(changes), False + + def dirty(self): + if self._wcrev() == self._state[1] and not self._wcchanged()[0]: + return False + return True + + def commit(self, text, user, date): + # user and date are out of our hands since svn is centralized + changed, extchanged = self._wcchanged() + if not changed: + return self._wcrev() + if extchanged: + # Do not try to commit externals + raise util.Abort(_('cannot commit svn externals')) + commitinfo = self._svncommand(['commit', '-m', text]) + self._ui.status(commitinfo) + newrev = re.search('Committed revision ([0-9]+).', commitinfo) + if not newrev: + raise util.Abort(commitinfo.splitlines()[-1]) + newrev = newrev.groups()[0] + self._ui.status(self._svncommand(['update', '-r', newrev])) + return newrev + + def remove(self): + if self.dirty(): + self._ui.warn(_('not removing repo %s because ' + 'it has changes.\n' % self._path)) + return + self._ui.note(_('removing subrepo %s\n') % self._path) + + def onerror(function, path, excinfo): + if function is not os.remove: + raise + # read-only files cannot be unlinked under Windows + s = os.stat(path) + if (s.st_mode & stat.S_IWRITE) != 0: + raise + os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE) + os.remove(path) + + path = self._ctx._repo.wjoin(self._path) + shutil.rmtree(path, onerror=onerror) + try: + os.removedirs(os.path.dirname(path)) + except OSError: + pass + + def get(self, state): + status = self._svncommand(['checkout', state[0], '--revision', state[1]]) + if not re.search('Checked out revision [0-9]+.', status): + raise util.Abort(status.splitlines()[-1]) + self._ui.status(status) + + def merge(self, state): + old = int(self._state[1]) + new = int(state[1]) + if new > old: + self.get(state) + + def push(self, force): + # push is a no-op for SVN + return True + + def files(self): + output = self._svncommand(['list']) + # This works because svn forbids \n in filenames. + return output.splitlines() + + def filedata(self, name): + return self._svncommand(['cat'], name) + + +types = { + 'hg': hgsubrepo, + 'svn': svnsubrepo, + } diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.pyo Binary files differnew file mode 100644 index 0000000..92f6ed7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/subrepo.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.py new file mode 100644 index 0000000..6db9613 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.py @@ -0,0 +1,288 @@ +# tags.py - read tag info from local repository +# +# Copyright 2009 Matt Mackall <mpm@selenic.com> +# Copyright 2009 Greg Ward <greg@gerg.ca> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +# Currently this module only deals with reading and caching tags. +# Eventually, it could take care of updating (adding/removing/moving) +# tags too. + +from node import nullid, bin, hex, short +from i18n import _ +import encoding +import error + +def findglobaltags(ui, repo, alltags, tagtypes): + '''Find global tags in repo by reading .hgtags from every head that + has a distinct version of it, using a cache to avoid excess work. + Updates the dicts alltags, tagtypes in place: alltags maps tag name + to (node, hist) pair (see _readtags() below), and tagtypes maps tag + name to tag type ("global" in this case).''' + # This is so we can be lazy and assume alltags contains only global + # tags when we pass it to _writetagcache(). + assert len(alltags) == len(tagtypes) == 0, \ + "findglobaltags() should be called first" + + (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo) + if cachetags is not None: + assert not shouldwrite + # XXX is this really 100% correct? are there oddball special + # cases where a global tag should outrank a local tag but won't, + # because cachetags does not contain rank info? + _updatetags(cachetags, 'global', alltags, tagtypes) + return + + seen = set() # set of fnode + fctx = None + for head in reversed(heads): # oldest to newest + assert head in repo.changelog.nodemap, \ + "tag cache returned bogus head %s" % short(head) + + fnode = tagfnode.get(head) + if fnode and fnode not in seen: + seen.add(fnode) + if not fctx: + fctx = repo.filectx('.hgtags', fileid=fnode) + else: + fctx = fctx.filectx(fnode) + + filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx) + _updatetags(filetags, 'global', alltags, tagtypes) + + # and update the cache (if necessary) + if shouldwrite: + _writetagcache(ui, repo, heads, tagfnode, alltags) + +def readlocaltags(ui, repo, alltags, tagtypes): + '''Read local tags in repo. Update alltags and tagtypes.''' + try: + # localtags is in the local encoding; re-encode to UTF-8 on + # input for consistency with the rest of this module. + data = repo.opener("localtags").read() + filetags = _readtags( + ui, repo, data.splitlines(), "localtags", + recode=encoding.fromlocal) + _updatetags(filetags, "local", alltags, tagtypes) + except IOError: + pass + +def _readtags(ui, repo, lines, fn, recode=None): + '''Read tag definitions from a file (or any source of lines). + Return a mapping from tag name to (node, hist): node is the node id + from the last line read for that name, and hist is the list of node + ids previously associated with it (in file order). All node ids are + binary, not hex.''' + + filetags = {} # map tag name to (node, hist) + count = 0 + + def warn(msg): + ui.warn(_("%s, line %s: %s\n") % (fn, count, msg)) + + for line in lines: + count += 1 + if not line: + continue + try: + (nodehex, name) = line.split(" ", 1) + except ValueError: + warn(_("cannot parse entry")) + continue + name = name.strip() + if recode: + name = recode(name) + try: + nodebin = bin(nodehex) + except TypeError: + warn(_("node '%s' is not well formed") % nodehex) + continue + if nodebin not in repo.changelog.nodemap: + # silently ignore as pull -r might cause this + continue + + # update filetags + hist = [] + if name in filetags: + n, hist = filetags[name] + hist.append(n) + filetags[name] = (nodebin, hist) + return filetags + +def _updatetags(filetags, tagtype, alltags, tagtypes): + '''Incorporate the tag info read from one file into the two + dictionaries, alltags and tagtypes, that contain all tag + info (global across all heads plus local).''' + + for name, nodehist in filetags.iteritems(): + if name not in alltags: + alltags[name] = nodehist + tagtypes[name] = tagtype + continue + + # we prefer alltags[name] if: + # it supercedes us OR + # mutual supercedes and it has a higher rank + # otherwise we win because we're tip-most + anode, ahist = nodehist + bnode, bhist = alltags[name] + if (bnode != anode and anode in bhist and + (bnode not in ahist or len(bhist) > len(ahist))): + anode = bnode + ahist.extend([n for n in bhist if n not in ahist]) + alltags[name] = anode, ahist + tagtypes[name] = tagtype + + +# The tag cache only stores info about heads, not the tag contents +# from each head. I.e. it doesn't try to squeeze out the maximum +# performance, but is simpler has a better chance of actually +# working correctly. And this gives the biggest performance win: it +# avoids looking up .hgtags in the manifest for every head, and it +# can avoid calling heads() at all if there have been no changes to +# the repo. + +def _readtagcache(ui, repo): + '''Read the tag cache and return a tuple (heads, fnodes, cachetags, + shouldwrite). If the cache is completely up-to-date, cachetags is a + dict of the form returned by _readtags(); otherwise, it is None and + heads and fnodes are set. In that case, heads is the list of all + heads currently in the repository (ordered from tip to oldest) and + fnodes is a mapping from head to .hgtags filenode. If those two are + set, caller is responsible for reading tag info from each head.''' + + try: + cachefile = repo.opener('tags.cache', 'r') + # force reading the file for static-http + cachelines = iter(cachefile) + except IOError: + cachefile = None + + # The cache file consists of lines like + # <headrev> <headnode> [<tagnode>] + # where <headrev> and <headnode> redundantly identify a repository + # head from the time the cache was written, and <tagnode> is the + # filenode of .hgtags on that head. Heads with no .hgtags file will + # have no <tagnode>. The cache is ordered from tip to oldest (which + # is part of why <headrev> is there: a quick visual check is all + # that's required to ensure correct order). + # + # This information is enough to let us avoid the most expensive part + # of finding global tags, which is looking up <tagnode> in the + # manifest for each head. + cacherevs = [] # list of headrev + cacheheads = [] # list of headnode + cachefnode = {} # map headnode to filenode + if cachefile: + try: + for line in cachelines: + if line == "\n": + break + line = line.rstrip().split() + cacherevs.append(int(line[0])) + headnode = bin(line[1]) + cacheheads.append(headnode) + if len(line) == 3: + fnode = bin(line[2]) + cachefnode[headnode] = fnode + except (ValueError, TypeError): + # corruption of tags.cache, just recompute it + ui.warn(_('.hg/tags.cache is corrupt, rebuilding it\n')) + cacheheads = [] + cacherevs = [] + cachefnode = {} + + tipnode = repo.changelog.tip() + tiprev = len(repo.changelog) - 1 + + # Case 1 (common): tip is the same, so nothing has changed. + # (Unchanged tip trivially means no changesets have been added. + # But, thanks to localrepository.destroyed(), it also means none + # have been destroyed by strip or rollback.) + if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev: + tags = _readtags(ui, repo, cachelines, cachefile.name) + cachefile.close() + return (None, None, tags, False) + if cachefile: + cachefile.close() # ignore rest of file + + repoheads = repo.heads() + # Case 2 (uncommon): empty repo; get out quickly and don't bother + # writing an empty cache. + if repoheads == [nullid]: + return ([], {}, {}, False) + + # Case 3 (uncommon): cache file missing or empty. + + # Case 4 (uncommon): tip rev decreased. This should only happen + # when we're called from localrepository.destroyed(). Refresh the + # cache so future invocations will not see disappeared heads in the + # cache. + + # Case 5 (common): tip has changed, so we've added/replaced heads. + + # As it happens, the code to handle cases 3, 4, 5 is the same. + + # N.B. in case 4 (nodes destroyed), "new head" really means "newly + # exposed". + newheads = [head + for head in repoheads + if head not in set(cacheheads)] + + # Now we have to lookup the .hgtags filenode for every new head. + # This is the most expensive part of finding tags, so performance + # depends primarily on the size of newheads. Worst case: no cache + # file, so newheads == repoheads. + for head in newheads: + cctx = repo[head] + try: + fnode = cctx.filenode('.hgtags') + cachefnode[head] = fnode + except error.LookupError: + # no .hgtags file on this head + pass + + # Caller has to iterate over all heads, but can use the filenodes in + # cachefnode to get to each .hgtags revision quickly. + return (repoheads, cachefnode, None, True) + +def _writetagcache(ui, repo, heads, tagfnode, cachetags): + + try: + cachefile = repo.opener('tags.cache', 'w', atomictemp=True) + except (OSError, IOError): + return + + realheads = repo.heads() # for sanity checks below + for head in heads: + # temporary sanity checks; these can probably be removed + # once this code has been in crew for a few weeks + assert head in repo.changelog.nodemap, \ + 'trying to write non-existent node %s to tag cache' % short(head) + assert head in realheads, \ + 'trying to write non-head %s to tag cache' % short(head) + assert head != nullid, \ + 'trying to write nullid to tag cache' + + # This can't fail because of the first assert above. When/if we + # remove that assert, we might want to catch LookupError here + # and downgrade it to a warning. + rev = repo.changelog.rev(head) + + fnode = tagfnode.get(head) + if fnode: + cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode))) + else: + cachefile.write('%d %s\n' % (rev, hex(head))) + + # Tag names in the cache are in UTF-8 -- which is the whole reason + # we keep them in UTF-8 throughout this module. If we converted + # them local encoding on input, we would lose info writing them to + # the cache. + cachefile.write('\n') + for (name, (node, hist)) in cachetags.iteritems(): + cachefile.write("%s %s\n" % (hex(node), name)) + + cachefile.rename() diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.pyo Binary files differnew file mode 100644 index 0000000..c61ce3b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/tags.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.py new file mode 100644 index 0000000..4701fed --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.py @@ -0,0 +1,228 @@ +# template-filters.py - common template expansion filters +# +# Copyright 2005-2008 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import cgi, re, os, time, urllib +import encoding, node, util + +def stringify(thing): + '''turn nested template iterator into string.''' + if hasattr(thing, '__iter__') and not isinstance(thing, str): + return "".join([stringify(t) for t in thing if t is not None]) + return str(thing) + +agescales = [("year", 3600 * 24 * 365), + ("month", 3600 * 24 * 30), + ("week", 3600 * 24 * 7), + ("day", 3600 * 24), + ("hour", 3600), + ("minute", 60), + ("second", 1)] + +def age(date): + '''turn a (timestamp, tzoff) tuple into an age string.''' + + def plural(t, c): + if c == 1: + return t + return t + "s" + def fmt(t, c): + return "%d %s" % (c, plural(t, c)) + + now = time.time() + then = date[0] + if then > now: + return 'in the future' + + delta = max(1, int(now - then)) + if delta > agescales[0][1] * 2: + return util.shortdate(date) + + for t, s in agescales: + n = delta // s + if n >= 2 or s == 1: + return '%s ago' % fmt(t, n) + +para_re = None +space_re = None + +def fill(text, width): + '''fill many paragraphs.''' + global para_re, space_re + if para_re is None: + para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M) + space_re = re.compile(r' +') + + def findparas(): + start = 0 + while True: + m = para_re.search(text, start) + if not m: + uctext = unicode(text[start:], encoding.encoding) + w = len(uctext) + while 0 < w and uctext[w - 1].isspace(): + w -= 1 + yield (uctext[:w].encode(encoding.encoding), + uctext[w:].encode(encoding.encoding)) + break + yield text[start:m.start(0)], m.group(1) + start = m.end(1) + + return "".join([space_re.sub(' ', util.wrap(para, width=width)) + rest + for para, rest in findparas()]) + +def firstline(text): + '''return the first line of text''' + try: + return text.splitlines(True)[0].rstrip('\r\n') + except IndexError: + return '' + +def nl2br(text): + '''replace raw newlines with xhtml line breaks.''' + return text.replace('\n', '<br/>\n') + +def obfuscate(text): + text = unicode(text, encoding.encoding, 'replace') + return ''.join(['&#%d;' % ord(c) for c in text]) + +def domain(author): + '''get domain of author, or empty string if none.''' + f = author.find('@') + if f == -1: + return '' + author = author[f + 1:] + f = author.find('>') + if f >= 0: + author = author[:f] + return author + +def person(author): + '''get name of author, or else username.''' + if not '@' in author: + return author + f = author.find('<') + if f == -1: + return util.shortuser(author) + return author[:f].rstrip() + +def indent(text, prefix): + '''indent each non-empty line of text after first with prefix.''' + lines = text.splitlines() + num_lines = len(lines) + endswithnewline = text[-1:] == '\n' + def indenter(): + for i in xrange(num_lines): + l = lines[i] + if i and l.strip(): + yield prefix + yield l + if i < num_lines - 1 or endswithnewline: + yield '\n' + return "".join(indenter()) + +def permissions(flags): + if "l" in flags: + return "lrwxrwxrwx" + if "x" in flags: + return "-rwxr-xr-x" + return "-rw-r--r--" + +def xmlescape(text): + text = (text + .replace('&', '&') + .replace('<', '<') + .replace('>', '>') + .replace('"', '"') + .replace("'", ''')) # ' invalid in HTML + return re.sub('[\x00-\x08\x0B\x0C\x0E-\x1F]', ' ', text) + +def uescape(c): + if ord(c) < 0x80: + return c + else: + return '\\u%04x' % ord(c) + +_escapes = [ + ('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'), ('\n', '\\n'), + ('\r', '\\r'), ('\f', '\\f'), ('\b', '\\b'), +] + +def jsonescape(s): + for k, v in _escapes: + s = s.replace(k, v) + return ''.join(uescape(c) for c in s) + +def json(obj): + if obj is None or obj is False or obj is True: + return {None: 'null', False: 'false', True: 'true'}[obj] + elif isinstance(obj, int) or isinstance(obj, float): + return str(obj) + elif isinstance(obj, str): + u = unicode(obj, encoding.encoding, 'replace') + return '"%s"' % jsonescape(u) + elif isinstance(obj, unicode): + return '"%s"' % jsonescape(obj) + elif hasattr(obj, 'keys'): + out = [] + for k, v in obj.iteritems(): + s = '%s: %s' % (json(k), json(v)) + out.append(s) + return '{' + ', '.join(out) + '}' + elif hasattr(obj, '__iter__'): + out = [] + for i in obj: + out.append(json(i)) + return '[' + ', '.join(out) + ']' + else: + raise TypeError('cannot encode type %s' % obj.__class__.__name__) + +def stripdir(text): + '''Treat the text as path and strip a directory level, if possible.''' + dir = os.path.dirname(text) + if dir == "": + return os.path.basename(text) + else: + return dir + +def nonempty(str): + return str or "(none)" + +filters = { + "addbreaks": nl2br, + "basename": os.path.basename, + "stripdir": stripdir, + "age": age, + "date": lambda x: util.datestr(x), + "domain": domain, + "email": util.email, + "escape": lambda x: cgi.escape(x, True), + "fill68": lambda x: fill(x, width=68), + "fill76": lambda x: fill(x, width=76), + "firstline": firstline, + "tabindent": lambda x: indent(x, '\t'), + "hgdate": lambda x: "%d %d" % x, + "isodate": lambda x: util.datestr(x, '%Y-%m-%d %H:%M %1%2'), + "isodatesec": lambda x: util.datestr(x, '%Y-%m-%d %H:%M:%S %1%2'), + "json": json, + "jsonescape": jsonescape, + "localdate": lambda x: (x[0], util.makedate()[1]), + "nonempty": nonempty, + "obfuscate": obfuscate, + "permissions": permissions, + "person": person, + "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S %1%2"), + "rfc3339date": lambda x: util.datestr(x, "%Y-%m-%dT%H:%M:%S%1:%2"), + "hex": node.hex, + "short": lambda x: x[:12], + "shortdate": util.shortdate, + "stringify": stringify, + "strip": lambda x: x.strip(), + "urlescape": lambda x: urllib.quote(x), + "user": lambda x: util.shortuser(x), + "stringescape": lambda x: x.encode('string_escape'), + "xmlescape": xmlescape, +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.pyo Binary files differnew file mode 100644 index 0000000..3263c3f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatefilters.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.py new file mode 100644 index 0000000..905edb8 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.py @@ -0,0 +1,271 @@ +# templatekw.py - common changeset template keywords +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import hex +import encoding, patch, util, error + +def showlist(name, values, plural=None, **args): + '''expand set of values. + name is name of key in template map. + values is list of strings or dicts. + plural is plural of name, if not simply name + 's'. + + expansion works like this, given name 'foo'. + + if values is empty, expand 'no_foos'. + + if 'foo' not in template map, return values as a string, + joined by space. + + expand 'start_foos'. + + for each value, expand 'foo'. if 'last_foo' in template + map, expand it instead of 'foo' for last key. + + expand 'end_foos'. + ''' + templ = args['templ'] + if plural: + names = plural + else: names = name + 's' + if not values: + noname = 'no_' + names + if noname in templ: + yield templ(noname, **args) + return + if name not in templ: + if isinstance(values[0], str): + yield ' '.join(values) + else: + for v in values: + yield dict(v, **args) + return + startname = 'start_' + names + if startname in templ: + yield templ(startname, **args) + vargs = args.copy() + def one(v, tag=name): + try: + vargs.update(v) + except (AttributeError, ValueError): + try: + for a, b in v: + vargs[a] = b + except ValueError: + vargs[name] = v + return templ(tag, **vargs) + lastname = 'last_' + name + if lastname in templ: + last = values.pop() + else: + last = None + for v in values: + yield one(v) + if last is not None: + yield one(last, tag=lastname) + endname = 'end_' + names + if endname in templ: + yield templ(endname, **args) + +def getfiles(repo, ctx, revcache): + if 'files' not in revcache: + revcache['files'] = repo.status(ctx.parents()[0].node(), + ctx.node())[:3] + return revcache['files'] + +def getlatesttags(repo, ctx, cache): + '''return date, distance and name for the latest tag of rev''' + + if 'latesttags' not in cache: + # Cache mapping from rev to a tuple with tag date, tag + # distance and tag name + cache['latesttags'] = {-1: (0, 0, 'null')} + latesttags = cache['latesttags'] + + rev = ctx.rev() + todo = [rev] + while todo: + rev = todo.pop() + if rev in latesttags: + continue + ctx = repo[rev] + tags = [t for t in ctx.tags() if repo.tagtype(t) == 'global'] + if tags: + latesttags[rev] = ctx.date()[0], 0, ':'.join(sorted(tags)) + continue + try: + # The tuples are laid out so the right one can be found by + # comparison. + pdate, pdist, ptag = max( + latesttags[p.rev()] for p in ctx.parents()) + except KeyError: + # Cache miss - recurse + todo.append(rev) + todo.extend(p.rev() for p in ctx.parents()) + continue + latesttags[rev] = pdate, pdist + 1, ptag + return latesttags[rev] + +def getrenamedfn(repo, endrev=None): + rcache = {} + if endrev is None: + endrev = len(repo) + + def getrenamed(fn, rev): + '''looks up all renames for a file (up to endrev) the first + time the file is given. It indexes on the changerev and only + parses the manifest if linkrev != changerev. + Returns rename info for fn at changerev rev.''' + if fn not in rcache: + rcache[fn] = {} + fl = repo.file(fn) + for i in fl: + lr = fl.linkrev(i) + renamed = fl.renamed(fl.node(i)) + rcache[fn][lr] = renamed + if lr >= endrev: + break + if rev in rcache[fn]: + return rcache[fn][rev] + + # If linkrev != rev (i.e. rev not found in rcache) fallback to + # filectx logic. + try: + return repo[rev][fn].renamed() + except error.LookupError: + return None + + return getrenamed + + +def showauthor(repo, ctx, templ, **args): + return ctx.user() + +def showbranches(**args): + branch = args['ctx'].branch() + if branch != 'default': + branch = encoding.tolocal(branch) + return showlist('branch', [branch], plural='branches', **args) + +def showchildren(**args): + ctx = args['ctx'] + childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()] + return showlist('children', childrevs, **args) + +def showdate(repo, ctx, templ, **args): + return ctx.date() + +def showdescription(repo, ctx, templ, **args): + return ctx.description().strip() + +def showdiffstat(repo, ctx, templ, **args): + diff = patch.diff(repo, ctx.parents()[0].node(), ctx.node()) + files, adds, removes = 0, 0, 0 + for i in patch.diffstatdata(util.iterlines(diff)): + files += 1 + adds += i[1] + removes += i[2] + return '%s: +%s/-%s' % (files, adds, removes) + +def showextras(**args): + templ = args['templ'] + for key, value in sorted(args['ctx'].extra().items()): + args = args.copy() + args.update(dict(key=key, value=value)) + yield templ('extra', **args) + +def showfileadds(**args): + repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] + return showlist('file_add', getfiles(repo, ctx, revcache)[1], **args) + +def showfilecopies(**args): + cache, ctx = args['cache'], args['ctx'] + copies = args['revcache'].get('copies') + if copies is None: + if 'getrenamed' not in cache: + cache['getrenamed'] = getrenamedfn(args['repo']) + copies = [] + getrenamed = cache['getrenamed'] + for fn in ctx.files(): + rename = getrenamed(fn, ctx.rev()) + if rename: + copies.append((fn, rename[0])) + + c = [{'name': x[0], 'source': x[1]} for x in copies] + return showlist('file_copy', c, plural='file_copies', **args) + +# showfilecopiesswitch() displays file copies only if copy records are +# provided before calling the templater, usually with a --copies +# command line switch. +def showfilecopiesswitch(**args): + copies = args['revcache'].get('copies') or [] + c = [{'name': x[0], 'source': x[1]} for x in copies] + return showlist('file_copy', c, plural='file_copies', **args) + +def showfiledels(**args): + repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] + return showlist('file_del', getfiles(repo, ctx, revcache)[2], **args) + +def showfilemods(**args): + repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] + return showlist('file_mod', getfiles(repo, ctx, revcache)[0], **args) + +def showfiles(**args): + return showlist('file', args['ctx'].files(), **args) + +def showlatesttag(repo, ctx, templ, cache, **args): + return getlatesttags(repo, ctx, cache)[2] + +def showlatesttagdistance(repo, ctx, templ, cache, **args): + return getlatesttags(repo, ctx, cache)[1] + +def showmanifest(**args): + repo, ctx, templ = args['repo'], args['ctx'], args['templ'] + args = args.copy() + args.update(dict(rev=repo.manifest.rev(ctx.changeset()[0]), + node=hex(ctx.changeset()[0]))) + return templ('manifest', **args) + +def shownode(repo, ctx, templ, **args): + return ctx.hex() + +def showrev(repo, ctx, templ, **args): + return ctx.rev() + +def showtags(**args): + return showlist('tag', args['ctx'].tags(), **args) + +# keywords are callables like: +# fn(repo, ctx, templ, cache, revcache, **args) +# with: +# repo - current repository instance +# ctx - the changectx being displayed +# templ - the templater instance +# cache - a cache dictionary for the whole templater run +# revcache - a cache dictionary for the current revision +keywords = { + 'author': showauthor, + 'branches': showbranches, + 'children': showchildren, + 'date': showdate, + 'desc': showdescription, + 'diffstat': showdiffstat, + 'extras': showextras, + 'file_adds': showfileadds, + 'file_copies': showfilecopies, + 'file_copies_switch': showfilecopiesswitch, + 'file_dels': showfiledels, + 'file_mods': showfilemods, + 'files': showfiles, + 'latesttag': showlatesttag, + 'latesttagdistance': showlatesttagdistance, + 'manifest': showmanifest, + 'node': shownode, + 'rev': showrev, + 'tags': showtags, +} + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.pyo Binary files differnew file mode 100644 index 0000000..04eb28c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templatekw.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.py new file mode 100644 index 0000000..5b3a5ad --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.py @@ -0,0 +1,289 @@ +# templater.py - template expansion for output +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import sys, os +import util, config, templatefilters + +path = ['templates', '../templates'] +stringify = templatefilters.stringify + +def _flatten(thing): + '''yield a single stream from a possibly nested set of iterators''' + if isinstance(thing, str): + yield thing + elif not hasattr(thing, '__iter__'): + if thing is not None: + yield str(thing) + else: + for i in thing: + if isinstance(i, str): + yield i + elif not hasattr(i, '__iter__'): + if i is not None: + yield str(i) + elif i is not None: + for j in _flatten(i): + yield j + +def parsestring(s, quoted=True): + '''parse a string using simple c-like syntax. + string must be in quotes if quoted is True.''' + if quoted: + if len(s) < 2 or s[0] != s[-1]: + raise SyntaxError(_('unmatched quotes')) + return s[1:-1].decode('string_escape') + + return s.decode('string_escape') + +class engine(object): + '''template expansion engine. + + template expansion works like this. a map file contains key=value + pairs. if value is quoted, it is treated as string. otherwise, it + is treated as name of template file. + + templater is asked to expand a key in map. it looks up key, and + looks for strings like this: {foo}. it expands {foo} by looking up + foo in map, and substituting it. expansion is recursive: it stops + when there is no more {foo} to replace. + + expansion also allows formatting and filtering. + + format uses key to expand each item in list. syntax is + {key%format}. + + filter uses function to transform value. syntax is + {key|filter1|filter2|...}.''' + + def __init__(self, loader, filters={}, defaults={}): + self._loader = loader + self._filters = filters + self._defaults = defaults + self._cache = {} + + def process(self, t, mapping): + '''Perform expansion. t is name of map element to expand. + mapping contains added elements for use during expansion. Is a + generator.''' + return _flatten(self._process(self._load(t), mapping)) + + def _load(self, t): + '''load, parse, and cache a template''' + if t not in self._cache: + self._cache[t] = self._parse(self._loader(t)) + return self._cache[t] + + def _get(self, mapping, key): + v = mapping.get(key) + if v is None: + v = self._defaults.get(key, '') + if hasattr(v, '__call__'): + v = v(**mapping) + return v + + def _filter(self, mapping, parts): + filters, val = parts + x = self._get(mapping, val) + for f in filters: + x = f(x) + return x + + def _format(self, mapping, args): + key, parsed = args + v = self._get(mapping, key) + if not hasattr(v, '__iter__'): + raise SyntaxError(_("error expanding '%s%%%s'") + % (key, parsed)) + lm = mapping.copy() + for i in v: + if isinstance(i, dict): + lm.update(i) + yield self._process(parsed, lm) + else: + # v is not an iterable of dicts, this happen when 'key' + # has been fully expanded already and format is useless. + # If so, return the expanded value. + yield i + + def _parse(self, tmpl): + '''preparse a template''' + parsed = [] + pos, stop = 0, len(tmpl) + while pos < stop: + n = tmpl.find('{', pos) + if n < 0: + parsed.append((None, tmpl[pos:stop])) + break + if n > 0 and tmpl[n - 1] == '\\': + # escaped + parsed.append((None, tmpl[pos:n - 1] + "{")) + pos = n + 1 + continue + if n > pos: + parsed.append((None, tmpl[pos:n])) + + pos = n + n = tmpl.find('}', pos) + if n < 0: + # no closing + parsed.append((None, tmpl[pos:stop])) + break + + expr = tmpl[pos + 1:n] + pos = n + 1 + + if '%' in expr: + # the keyword should be formatted with a template + key, t = expr.split('%') + parsed.append((self._format, (key.strip(), + self._load(t.strip())))) + elif '|' in expr: + # process the keyword value with one or more filters + parts = expr.split('|') + val = parts[0].strip() + try: + filters = [self._filters[f.strip()] for f in parts[1:]] + except KeyError, i: + raise SyntaxError(_("unknown filter '%s'") % i[0]) + parsed.append((self._filter, (filters, val))) + else: + # just get the keyword + parsed.append((self._get, expr.strip())) + + return parsed + + def _process(self, parsed, mapping): + '''Render a template. Returns a generator.''' + for f, e in parsed: + if f: + yield f(mapping, e) + else: + yield e + +engines = {'default': engine} + +class templater(object): + + def __init__(self, mapfile, filters={}, defaults={}, cache={}, + minchunk=1024, maxchunk=65536): + '''set up template engine. + mapfile is name of file to read map definitions from. + filters is dict of functions. each transforms a value into another. + defaults is dict of default map definitions.''' + self.mapfile = mapfile or 'template' + self.cache = cache.copy() + self.map = {} + self.base = (mapfile and os.path.dirname(mapfile)) or '' + self.filters = templatefilters.filters.copy() + self.filters.update(filters) + self.defaults = defaults + self.minchunk, self.maxchunk = minchunk, maxchunk + self.engines = {} + + if not mapfile: + return + if not os.path.exists(mapfile): + raise util.Abort(_('style not found: %s') % mapfile) + + conf = config.config() + conf.read(mapfile) + + for key, val in conf[''].items(): + if val[0] in "'\"": + try: + self.cache[key] = parsestring(val) + except SyntaxError, inst: + raise SyntaxError('%s: %s' % + (conf.source('', key), inst.args[0])) + else: + val = 'default', val + if ':' in val[1]: + val = val[1].split(':', 1) + self.map[key] = val[0], os.path.join(self.base, val[1]) + + def __contains__(self, key): + return key in self.cache or key in self.map + + def load(self, t): + '''Get the template for the given template name. Use a local cache.''' + if not t in self.cache: + try: + self.cache[t] = open(self.map[t][1]).read() + except IOError, inst: + raise IOError(inst.args[0], _('template file %s: %s') % + (self.map[t][1], inst.args[1])) + return self.cache[t] + + def __call__(self, t, **mapping): + ttype = t in self.map and self.map[t][0] or 'default' + proc = self.engines.get(ttype) + if proc is None: + proc = engines[ttype](self.load, self.filters, self.defaults) + self.engines[ttype] = proc + + stream = proc.process(t, mapping) + if self.minchunk: + stream = util.increasingchunks(stream, min=self.minchunk, + max=self.maxchunk) + return stream + +def templatepath(name=None): + '''return location of template file or directory (if no name). + returns None if not found.''' + normpaths = [] + + # executable version (py2exe) doesn't support __file__ + if hasattr(sys, 'frozen'): + module = sys.executable + else: + module = __file__ + for f in path: + if f.startswith('/'): + p = f + else: + fl = f.split('/') + p = os.path.join(os.path.dirname(module), *fl) + if name: + p = os.path.join(p, name) + if name and os.path.exists(p): + return os.path.normpath(p) + elif os.path.isdir(p): + normpaths.append(os.path.normpath(p)) + + return normpaths + +def stylemap(styles, paths=None): + """Return path to mapfile for a given style. + + Searches mapfile in the following locations: + 1. templatepath/style/map + 2. templatepath/map-style + 3. templatepath/map + """ + + if paths is None: + paths = templatepath() + elif isinstance(paths, str): + paths = [paths] + + if isinstance(styles, str): + styles = [styles] + + for style in styles: + if not style: + continue + locations = [os.path.join(style, 'map'), 'map-' + style] + locations.append('map') + + for path in paths: + for location in locations: + mapfile = os.path.join(path, location) + if os.path.isfile(mapfile): + return style, mapfile + + raise RuntimeError("No hgweb templates found in %r" % paths) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.pyo Binary files differnew file mode 100644 index 0000000..b37c7ca --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templater.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelog.tmpl new file mode 100644 index 0000000..29902ab --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelog.tmpl @@ -0,0 +1,10 @@ +{header} + <!-- Changelog --> + <id>{urlbase}{url}</id> + <link rel="self" href="{urlbase}{url}atom-log"/> + <link rel="alternate" href="{urlbase}{url}"/> + <title>{repo|escape} Changelog</title> + {latestentry%feedupdated} + +{entries%changelogentry} +</feed> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelogentry.tmpl new file mode 100644 index 0000000..068b5ea --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/changelogentry.tmpl @@ -0,0 +1,16 @@ + <entry> + <title>{desc|strip|firstline|strip|escape|nonempty}</title> + <id>{urlbase}{url}#changeset-{node}</id> + <link href="{urlbase}{url}rev/{node|short}"/> + <author> + <name>{author|person|escape}</name> + <email>{author|email|obfuscate}</email> + </author> + <updated>{date|rfc3339date}</updated> + <published>{date|rfc3339date}</published> + <content type="xhtml"> + <div xmlns="http://www.w3.org/1999/xhtml"> + <pre xml:space="preserve">{desc|escape|nonempty}</pre> + </div> + </content> + </entry> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/error.tmpl new file mode 100644 index 0000000..5735fba --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/error.tmpl @@ -0,0 +1,17 @@ +{header} + <!-- Error --> + <id>{urlbase}{url}</id> + <link rel="self" href="{urlbase}{url}atom-log"/> + <link rel="alternate" href="{urlbase}{url}"/> + <title>Error</title> + <updated>1970-01-01T00:00:00+00:00</updated> + <entry> + <title>Error</title> + <id>http://mercurial.selenic.com/#error</id> + <author> + <name>mercurial</name> + </author> + <updated>1970-01-01T00:00:00+00:00</updated> + <content type="text">{error|escape}</content> + </entry> +</feed> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/filelog.tmpl new file mode 100644 index 0000000..99d4e9b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/filelog.tmpl @@ -0,0 +1,8 @@ +{header} + <id>{urlbase}{url}atom-log/tip/{file|escape}</id> + <link rel="self" href="{urlbase}{url}atom-log/tip/{file|urlescape}"/> + <title>{repo|escape}: {file|escape} history</title> + {latestentry%feedupdated} + +{entries%changelogentry} +</feed> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/header.tmpl new file mode 100644 index 0000000..90ffceb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/header.tmpl @@ -0,0 +1,2 @@ +<?xml version="1.0" encoding="{encoding}"?> +<feed xmlns="http://www.w3.org/2005/Atom">
\ No newline at end of file diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/map new file mode 100644 index 0000000..c016b55 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/map @@ -0,0 +1,11 @@ +default = 'changelog' +feedupdated = '<updated>{date|rfc3339date}</updated>' +mimetype = 'application/atom+xml; charset={encoding}' +header = header.tmpl +changelog = changelog.tmpl +changelogentry = changelogentry.tmpl +filelog = filelog.tmpl +filelogentry = filelogentry.tmpl +tags = tags.tmpl +tagentry = tagentry.tmpl +error = error.tmpl diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tagentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tagentry.tmpl new file mode 100644 index 0000000..857df12 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tagentry.tmpl @@ -0,0 +1,8 @@ + <entry> + <title>{tag|escape}</title> + <link rel="alternate" href="{urlbase}{url}rev/{node|short}"/> + <id>{urlbase}{url}#tag-{node}</id> + <updated>{date|rfc3339date}</updated> + <published>{date|rfc3339date}</published> + <content type="text">{tag|strip|escape}</content> + </entry> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tags.tmpl new file mode 100644 index 0000000..82294ec --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/atom/tags.tmpl @@ -0,0 +1,11 @@ +{header} + <id>{urlbase}{url}</id> + <link rel="self" href="{urlbase}{url}atom-tags"/> + <link rel="alternate" href="{urlbase}{url}tags"/> + <title>{repo|escape}: tags</title> + <summary>{repo|escape} tag history</summary> + <author><name>Mercurial SCM</name></author> + {latestentry%feedupdated} + +{entriesnotip%tagentry} +</feed> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/header.tmpl new file mode 100644 index 0000000..ed0f42d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/header.tmpl @@ -0,0 +1,6 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US"> +<head> +<link rel="icon" href="{staticurl}hgicon.png" type="image/png" /> +<meta name="robots" content="index, nofollow" /> +<link rel="stylesheet" href="{staticurl}style-coal.css" type="text/css" /> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/map new file mode 100644 index 0000000..eaa8c0c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/coal/map @@ -0,0 +1,201 @@ +default = 'shortlog' + +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = ../paper/footer.tmpl +search = ../paper/search.tmpl + +changelog = ../paper/shortlog.tmpl +shortlog = ../paper/shortlog.tmpl +shortlogentry = ../paper/shortlogentry.tmpl +graph = ../paper/graph.tmpl + +help = ../paper/help.tmpl +helptopics = ../paper/helptopics.tmpl + +helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>' + +naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> ' +filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenolink = '{file|escape} ' +fileellipses = '...' +changelogentry = ../paper/shortlogentry.tmpl +searchentry = ../paper/shortlogentry.tmpl +changeset = ../paper/changeset.tmpl +manifest = ../paper/manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + <tr class="fileline parity{parity}"> + <td class="name"> + <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}"> + <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/ + </a> + <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}"> + {emptydirs|escape} + </a> + </td> + <td class="size"></td> + <td class="permissions">drwxr-xr-x</td> + </tr>' + +fileentry = ' + <tr class="fileline parity{parity}"> + <td class="filename"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape} + </a> + </td> + <td class="size">{size}</td> + <td class="permissions">{permissions|permissions}</td> + </tr>' + +filerevision = ../paper/filerevision.tmpl +fileannotate = ../paper/fileannotate.tmpl +filediff = ../paper/filediff.tmpl +filelog = ../paper/filelog.tmpl +fileline = ' + <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>' +filelogentry = ../paper/filelogentry.tmpl + +annotateline = ' + <tr class="parity{parity}"> + <td class="annotate"> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}" + title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> + </td> + <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td> + </tr>' + +diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>' +difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>' +difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>' +difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>' +diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}' + +changelogparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' + +changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> ' + +filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> ' +filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> ' + +filerename = '{file|escape}@' +filelogrename = ' + <tr> + <th>base:</th> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {file|escape}@{node|short} + </a> + </td> + </tr>' +fileannotateparent = ' + <tr> + <td class="metatag">parent:</td> + <td> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>' +changelogchild = ' + <tr> + <th class="child">child</th> + <td class="child"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}"> + {node|short} + </a> + </td> + </tr>' +fileannotatechild = ' + <tr> + <td class="metatag">child:</td> + <td> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {node|short} + </a> + </td> + </tr>' +tags = ../paper/tags.tmpl +tagentry = ' + <tr class="tagEntry parity{parity}"> + <td> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}"> + {tag|escape} + </a> + </td> + <td class="node"> + {node|short} + </td> + </tr>' +branches = ../paper/branches.tmpl +branchentry = ' + <tr class="tagEntry parity{parity}"> + <td> + <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}"> + {branch|escape} + </a> + </td> + <td class="node"> + {node|short} + </td> + </tr>' +changelogtag = '<span class="tag">{name|escape}</span> ' +changesettag = '<span class="tag">{tag|escape}</span> ' +changelogbranchhead = '<span class="branchhead">{name|escape}</span> ' +changelogbranchname = '<span class="branchname">{name|escape}</span> ' + +filediffparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filelogparent = ' + <tr> + <th>parent {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filediffchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +filelogchild = ' + <tr> + <th>child {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' + +indexentry = ' + <tr class="parity{parity}"> + <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td> + <td>{description}</td> + <td>{contact|obfuscate}</td> + <td class="age">{lastchange|age}</td> + <td class="indexlinks">{archives%indexarchiveentry}</td> + </tr>\n' +indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}"> ↓{type|escape}</a>' +index = ../paper/index.tmpl +archiveentry = ' + <li> + <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a> + </li>' +notfound = ../paper/notfound.tmpl +error = ../paper/error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/branches.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/branches.tmpl new file mode 100644 index 0000000..3df49f9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/branches.tmpl @@ -0,0 +1,31 @@ +{header} +<title>{repo|escape}: Branches</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +branches | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title"> </div> +<table cellspacing="0"> +{entries%branchentry} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelog.tmpl new file mode 100644 index 0000000..62c188f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelog.tmpl @@ -0,0 +1,40 @@ +{header} +<title>{repo|escape}: Changelog</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog +</div> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<div class="search"> +<input type="text" name="rev" /> +</div> +</form> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> | +changelog | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +{changenav%nav}<br/> +</div> + +{entries%changelogentry} + +<div class="page_nav"> +{changenav%nav}<br/> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelogentry.tmpl new file mode 100644 index 0000000..bb11704 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changelogentry.tmpl @@ -0,0 +1,14 @@ +<div> +<a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}"><span class="age">{date|age}</span>{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a> +</div> +<div class="title_text"> +<div class="log_link"> +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a><br/> +</div> +<i>{author|obfuscate} [{date|rfc822date}] rev {rev}</i><br/> +</div> +<div class="log_body"> +{desc|strip|escape|addbreaks|nonempty} +<br/> +<br/> +</div> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changeset.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changeset.tmpl new file mode 100644 index 0000000..13267c2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/changeset.tmpl @@ -0,0 +1,52 @@ +{header} +<title>{repo|escape}: changeset {rev}:{node|short}</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changeset +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> | +changeset | +<a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry} | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div> +<a class="title" href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a> +</div> +<div class="title_text"> +<table cellspacing="0"> +<tr><td>author</td><td>{author|obfuscate}</td></tr> +<tr><td></td><td>{date|date} ({date|age})</td></tr> +{branch%changesetbranch} +<tr><td>changeset {rev}</td><td style="font-family:monospace">{node|short}</td></tr> +{parent%changesetparent} +{child%changesetchild} +</table></div> + +<div class="page_body"> +{desc|strip|escape|addbreaks|nonempty} +</div> +<div class="list_head"></div> +<div class="title_text"> +<table cellspacing="0"> +{files} +</table></div> + +<div class="page_body">{diff}</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/error.tmpl new file mode 100644 index 0000000..25b71fc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/error.tmpl @@ -0,0 +1,25 @@ +{header} +<title>{repo|escape}: Error</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / error +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | <a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | <a href="{url}log{sessionvars%urlparameter}">changelog</a> | <a href="{url}tags{sessionvars%urlparameter}">tags</a> | <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a><br/> +</div> + +<div class="page_body"> +<br/> +<i>An error occurred while processing your request</i><br/> +<br/> +{error|escape} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/fileannotate.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/fileannotate.tmpl new file mode 100644 index 0000000..54b05c6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/fileannotate.tmpl @@ -0,0 +1,64 @@ +{header} +<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> | +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | +<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> | +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> | +annotate | +<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | +<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title">{file|escape}</div> + +<div class="title_text"> +<table cellspacing="0"> +<tr> + <td>author</td> + <td>{author|obfuscate}</td></tr> +<tr> + <td></td> + <td>{date|date} ({date|age})</td></tr> +{branch%filerevbranch} +<tr> + <td>changeset {rev}</td> + <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr> +{parent%fileannotateparent} +{child%fileannotatechild} +<tr> + <td>permissions</td> + <td style="font-family:monospace">{permissions|permissions}</td></tr> +</table> +</div> + +<div class="page_path"> +{desc|strip|escape|addbreaks|nonempty} +</div> +<div class="page_body"> +<table> +{annotate%annotateline} +</table> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filediff.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filediff.tmpl new file mode 100644 index 0000000..91343ed --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filediff.tmpl @@ -0,0 +1,49 @@ +{header} +<title>{repo|escape}: diff {file|escape}</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / diff +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> | +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | +<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> | +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> | +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> | +diff | +<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a><br/> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<div class="title">{file|escape}</div> + +<table> +{branch%filerevbranch} +<tr> + <td>changeset {rev}</td> + <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr> +{parent%filediffparent} +{child%filediffchild} +</table> + +<div class="list_head"></div> + +<div class="page_body"> +{diff} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filelog.tmpl new file mode 100644 index 0000000..d034593 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filelog.tmpl @@ -0,0 +1,41 @@ +{header} +<title>{repo|escape}: File revisions</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | +revisions | +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> | +<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | +<a href="{url}rss-log/{node|short}/{file|urlescape}">rss</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +{nav%filenav} +</div> + +<div class="title" >{file|urlescape}</div> + +<table> +{entries%filelogentry} +</table> + +<div class="page_nav"> +{nav%filenav} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filerevision.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filerevision.tmpl new file mode 100644 index 0000000..065a21b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/filerevision.tmpl @@ -0,0 +1,63 @@ +{header} +<title>{repo|escape}: {file|escape}@{node|short}</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> | +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | +file | +<a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a> | +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> | +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> | +<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | +<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title">{file|escape}</div> + +<div class="title_text"> +<table cellspacing="0"> +<tr> + <td>author</td> + <td>{author|obfuscate}</td></tr> +<tr> + <td></td> + <td>{date|date} ({date|age})</td></tr> +{branch%filerevbranch} +<tr> + <td>changeset {rev}</td> + <td style="font-family:monospace"><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr> +{parent%filerevparent} +{child%filerevchild} +<tr> + <td>permissions</td> + <td style="font-family:monospace">{permissions|permissions}</td></tr> +</table> +</div> + +<div class="page_path"> +{desc|strip|escape|addbreaks|nonempty} +</div> + +<div class="page_body"> +{text%fileline} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/footer.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/footer.tmpl new file mode 100644 index 0000000..a5f74c3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/footer.tmpl @@ -0,0 +1,11 @@ +<div class="page_footer"> +<div class="page_footer_text">{repo|escape}</div> +<div class="rss_logo"> +<a href="{url}rss-log">RSS</a> +<a href="{url}atom-log">Atom</a> +</div> +<br /> +{motd} +</div> +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/graph.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/graph.tmpl new file mode 100644 index 0000000..f329d1c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/graph.tmpl @@ -0,0 +1,122 @@ +{header} +<title>{repo|escape}: Graph</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]--> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph +</div> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<div class="search"> +<input type="text" name="rev" /> +</div> +</form> +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> | +graph | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}graph/{rev}{morevars%urlparameter}">more</a> +| {changenav%navgraph}<br/> +</div> + +<div class="title"> </div> + +<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript> + +<div id="wrapper"> +<ul id="nodebgs"></ul> +<canvas id="graph" width="224" height="{canvasheight}"></canvas> +<ul id="graphnodes"></ul> +</div> + +<script type="text/javascript" src="{staticurl}graph.js"></script> +<script> +<!-- hide script content + +var data = {jsdata|json}; +var graph = new Graph(); +graph.scale({bg_height}); + +graph.edge = function(x0, y0, x1, y1, color) \{ + + this.setColor(color, 0.0, 0.65); + this.ctx.beginPath(); + this.ctx.moveTo(x0, y0); + this.ctx.lineTo(x1, y1); + this.ctx.stroke(); + +} + +var revlink = '<li style="_STYLE"><span class="desc">'; +revlink += '<a class="list" href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID"><b>_DESC</b></a>'; +revlink += '</span> _TAGS'; +revlink += '<span class="info">_DATE, by _USER</span></li>'; + +graph.vertex = function(x, y, color, parity, cur) \{ + + this.ctx.beginPath(); + color = this.setColor(color, 0.25, 0.75); + this.ctx.arc(x, y, radius, 0, Math.PI * 2, true); + this.ctx.fill(); + + var bg = '<li class="bg parity' + parity + '"></li>'; + var left = (this.columns + 1) * this.bg_height; + var nstyle = 'padding-left: ' + left + 'px;'; + var item = revlink.replace(/_STYLE/, nstyle); + item = item.replace(/_PARITY/, 'parity' + parity); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_DESC/, cur[3]); + item = item.replace(/_USER/, cur[4]); + item = item.replace(/_DATE/, cur[5]); + + var tagspan = ''; + if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) \{ + tagspan = '<span class="logtags">'; + if (cur[6][1]) \{ + tagspan += '<span class="branchtag" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } else if (!cur[6][1] && cur[6][0] != 'default') \{ + tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } + if (cur[7].length) \{ + for (var t in cur[7]) \{ + var tag = cur[7][t]; + tagspan += '<span class="tagtag">' + tag + '</span> '; + } + } + tagspan += '</span>'; + } + + item = item.replace(/_TAGS/, tagspan); + return [bg, item]; + +} + +graph.render(data); + +// stop hiding script --> +</script> + +<div class="page_nav"> +<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}graph/{rev}{morevars%urlparameter}">more</a> +| {changenav%navgraph} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/header.tmpl new file mode 100644 index 0000000..f3df3d7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/header.tmpl @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="{encoding}"?> +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" lang="en-US"> +<head> +<link rel="icon" href="{staticurl}hgicon.png" type="image/png" /> +<meta name="robots" content="index, nofollow"/> +<link rel="stylesheet" href="{staticurl}style-gitweb.css" type="text/css" /> + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/help.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/help.tmpl new file mode 100644 index 0000000..42ad8d2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/help.tmpl @@ -0,0 +1,31 @@ +{header} +<title>{repo|escape}: Branches</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +branches | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> +<br/> +</div> + +<div class="title"> </div> + +<pre> +{doc|escape} +</pre> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/helptopics.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/helptopics.tmpl new file mode 100644 index 0000000..1f2a436 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/helptopics.tmpl @@ -0,0 +1,38 @@ +{header} +<title>{repo|escape}: Branches</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / branches +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> | +help +<br/> +</div> + +<div class="title"> </div> +<table cellspacing="0"> +<tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr> +{topics % helpentry} + +<tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr> +{earlycommands % helpentry} + +<tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr> +{othercommands % helpentry} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/index.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/index.tmpl new file mode 100644 index 0000000..b8e66b0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/index.tmpl @@ -0,0 +1,26 @@ +{header} +<title>Mercurial repositories index</title> +</head> +<body> + +<div class="page_header"> + <a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a> + Repositories list +</div> + +<table cellspacing="0"> + <tr> + <td><a href="?sort={sort_name}">Name</a></td> + <td><a href="?sort={sort_description}">Description</a></td> + <td><a href="?sort={sort_contact}">Contact</a></td> + <td><a href="?sort={sort_lastchange}">Last modified</a></td> + <td> </td> + <td> </td> + </tr> + {entries%indexentry} +</table> +<div class="page_footer"> +{motd} +</div> +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/manifest.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/manifest.tmpl new file mode 100644 index 0000000..5123ed6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/manifest.tmpl @@ -0,0 +1,40 @@ +{header} +<title>{repo|escape}: files</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +files | +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry} | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></div> +<table cellspacing="0"> +<tr class="parity{upparity}"> +<td style="font-family:monospace">drwxr-xr-x</td> +<td style="font-family:monospace"></td> +<td style="font-family:monospace"></td> +<td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td> +<td class="link"> </td> +</tr> +{dentries%direntry} +{fentries%fileentry} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/map new file mode 100644 index 0000000..ce6a325 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/map @@ -0,0 +1,260 @@ +default = 'summary' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +summary = summary.tmpl +error = error.tmpl +notfound = notfound.tmpl + +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>' + +naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> ' +filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenodelink = ' + <tr class="parity{parity}"> + <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td> + <td></td> + <td class="link"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> | + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> + </td> + </tr>' +filenolink = ' + <tr class="parity{parity}"> + <td><a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td> + <td></td> + <td class="link"> + file | + annotate | + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> + </td> + </tr>' + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl +direntry = ' + <tr class="parity{parity}"> + <td style="font-family:monospace">drwxr-xr-x</td> + <td style="font-family:monospace"></td> + <td style="font-family:monospace"></td> + <td> + <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a> + <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}">{emptydirs|escape}</a> + </td> + <td class="link"> + <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +fileentry = ' + <tr class="parity{parity}"> + <td style="font-family:monospace">{permissions|permissions}</td> + <td style="font-family:monospace" align=right>{date|isodate}</td> + <td style="font-family:monospace" align=right>{size}</td> + <td class="list"> + <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a> + </td> + <td class="link"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> | + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> + </td> + </tr>' +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' + <div style="font-family:monospace" class="parity{parity}"> + <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre> + </div>' +annotateline = ' + <tr style="font-family:monospace" class="parity{parity}"> + <td class="linenr" style="text-align: right;"> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}" + title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> + </td> + <td><pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a></pre></td> + <td><pre>{line|escape}</pre></td> + </tr>' +difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +changelogparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +changesetbranch = '<tr><td>branch</td><td>{name}</td></tr>' +changesetparent = ' + <tr> + <td>parent {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +filerevbranch = '<tr><td>branch</td><td>{name}</td></tr>' +filerevparent = ' + <tr> + <td>parent {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +filerename = '{file|escape}@' +filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>' +fileannotateparent = ' + <tr> + <td>parent {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +changelogchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +changesetchild = ' + <tr> + <td>child {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +filerevchild = ' + <tr> + <td>child {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +fileannotatechild = ' + <tr> + <td>child {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +tags = tags.tmpl +tagentry = ' + <tr class="parity{parity}"> + <td class="age"><i>{date|age}</i></td> + <td><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"><b>{tag|escape}</b></a></td> + <td class="link"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +branches = branches.tmpl +branchentry = ' + <tr class="parity{parity}"> + <td class="age"><i>{date|age}</i></td> + <td><a class="list" href="{url}shortlog/{node|short}{sessionvars%urlparameter}"><b>{node|short}</b></a></td> + <td class="{status}">{branch|escape}</td> + <td class="link"> + <a href="{url}changeset/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +diffblock = '<pre>{lines}</pre>' +filediffparent = ' + <tr> + <td>parent {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {node|short} + </a> + </td> + </tr>' +filelogparent = ' + <tr> + <td align="right">parent {rev}: </td> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filediffchild = ' + <tr> + <td>child {rev}</td> + <td style="font-family:monospace"> + <a class="list" href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +filelogchild = ' + <tr> + <td align="right">child {rev}: </td> + <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +shortlog = shortlog.tmpl +graph = graph.tmpl +tagtag = '<span class="tagtag" title="{name}">{name}</span> ' +branchtag = '<span class="branchtag" title="{name}">{name}</span> ' +inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> ' +shortlogentry = ' + <tr class="parity{parity}"> + <td class="age"><i>{date|age}</i></td> + <td><i>{author|person}</i></td> + <td> + <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"> + <b>{desc|strip|firstline|escape|nonempty}</b> + <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span> + </a> + </td> + <td class="link" nowrap> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +filelogentry = ' + <tr class="parity{parity}"> + <td class="age"><i>{date|age}</i></td> + <td> + <a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}"> + <b>{desc|strip|firstline|escape|nonempty}</b> + </a> + </td> + <td class="link"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> {rename%filelogrename}</td> + </tr>' +archiveentry = ' | <a href="{url}archive/{node|short}{extension}">{type|escape}</a> ' +indexentry = ' + <tr class="parity{parity}"> + <td> + <a class="list" href="{url}{sessionvars%urlparameter}"> + <b>{name|escape}</b> + </a> + </td> + <td>{description}</td> + <td>{contact|obfuscate}</td> + <td class="age">{lastchange|age}</td> + <td class="indexlinks">{archives%indexarchiveentry}</td> + <td><div class="rss_logo"><a href="{url}rss-log">RSS</a> <a href="{url}atom-log">Atom</a></div></td> + </tr>\n' +indexarchiveentry = ' <a href="{url}archive/{node|short}{extension}">{type|escape}</a> ' +index = index.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/notfound.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/notfound.tmpl new file mode 100644 index 0000000..073bb11 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/notfound.tmpl @@ -0,0 +1,18 @@ +{header} +<title>Mercurial repository not found</title> +</head> + +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a> Not found: {repo|escape} +</div> + +<div class="page_body"> +The specified repository "{repo|escape}" is unknown, sorry. +<br/> +<br/> +Please go back to the <a href="{url}">main repository list page</a>. +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/search.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/search.tmpl new file mode 100644 index 0000000..6cc51ad --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/search.tmpl @@ -0,0 +1,38 @@ +{header} +<title>{repo|escape}: Search</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<div class="search"> +<input type="text" name="rev" value="{query|escape}" /> +</div> +</form> +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} + | + <a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title">searching for {query|escape}</div> + +{entries} + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/shortlog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/shortlog.tmpl new file mode 100644 index 0000000..91e8e0b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/shortlog.tmpl @@ -0,0 +1,41 @@ +{header} +<title>{repo|escape}: Shortlog</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog +</div> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<div class="search"> +<input type="text" name="rev" /> +</div> +</form> +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +shortlog | +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/>{changenav%navshort}<br/> +</div> + +<div class="title"> </div> +<table cellspacing="0"> +{entries%shortlogentry} +</table> + +<div class="page_nav"> +{changenav%navshort} +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/summary.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/summary.tmpl new file mode 100644 index 0000000..b964721 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/summary.tmpl @@ -0,0 +1,59 @@ +{header} +<title>{repo|escape}: Summary</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<div class="search"> +<input type="text" name="rev" /> +</div> +</form> +</div> + +<div class="page_nav"> +summary | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +<a href="{url}tags{sessionvars%urlparameter}">tags</a> | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title"> </div> +<table cellspacing="0"> +<tr><td>description</td><td>{desc}</td></tr> +<tr><td>owner</td><td>{owner|obfuscate}</td></tr> +<tr><td>last change</td><td>{lastchange|rfc822date}</td></tr> +</table> + +<div><a class="title" href="{url}shortlog{sessionvars%urlparameter}">changes</a></div> +<table cellspacing="0"> +{shortlog} +<tr class="light"><td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td></tr> +</table> + +<div><a class="title" href="{url}tags{sessionvars%urlparameter}">tags</a></div> +<table cellspacing="0"> +{tags} +<tr class="light"><td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td></tr> +</table> + +<div><a class="title" href="#">branches</a></div> +<table cellspacing="0"> +{branches%branchentry} +<tr class="light"> + <td colspan="4"><a class="list" href="#">...</a></td> +</tr> +</table> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/tags.tmpl new file mode 100644 index 0000000..4bdf85c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/gitweb/tags.tmpl @@ -0,0 +1,31 @@ +{header} +<title>{repo|escape}: Tags</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}"/> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}"/> +</head> +<body> + +<div class="page_header"> +<a href="http://mercurial.selenic.com/" title="Mercurial" style="float: right;">Mercurial</a><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / tags +</div> + +<div class="page_nav"> +<a href="{url}summary{sessionvars%urlparameter}">summary</a> | +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> | +<a href="{url}log{sessionvars%urlparameter}">changelog</a> | +<a href="{url}graph{sessionvars%urlparameter}">graph</a> | +tags | +<a href="{url}branches{sessionvars%urlparameter}">branches</a> | +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> | +<a href="{url}help{sessionvars%urlparameter}">help</a> +<br/> +</div> + +<div class="title"> </div> +<table cellspacing="0"> +{entries%tagentry} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.changelog b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.changelog new file mode 100644 index 0000000..f54134a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.changelog @@ -0,0 +1,17 @@ +header = '{date|shortdate} {author|person} <{author|email}>\n\n' +header_verbose = '' +changeset = '\t* {files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\t[{node|short}]{tags}{branches}\n\n' +changeset_quiet = '\t* {desc|firstline|fill68|tabindent|strip}\n\n' +changeset_verbose = '{date|isodate} {author|person} <{author|email}> ({node|short}{tags}{branches})\n\n\t* {file_adds|stringify|fill68|tabindent}{file_dels|stringify|fill68|tabindent}{files|stringify|fill68|tabindent}{desc|fill68|tabindent|strip}\n\n' +start_tags = ' [' +tag = '{tag}, ' +last_tag = '{tag}]' +start_branches = ' <' +branch = '{branch}, ' +last_branch = '{branch}>' +file = '{file}, ' +last_file = '{file}:\n\t' +file_add = '{file_add}, ' +last_file_add = '{file_add}: new file.\n* ' +file_del = '{file_del}, ' +last_file_del = '{file_del}: deleted file.\n* ' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.compact b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.compact new file mode 100644 index 0000000..ee66bff --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.compact @@ -0,0 +1,9 @@ +changeset = '{rev}{tags}{parents} {node|short} {date|isodate} {author|user}\n {desc|firstline|strip}\n\n' +changeset_quiet = '{rev}:{node|short}\n' +changeset_verbose = '{rev}{tags}{parents} {node|short} {date|isodate} {author}\n {desc|strip}\n\n' +start_tags = '[' +tag = '{tag},' +last_tag = '{tag}]' +start_parents = ':' +parent = '{rev},' +last_parent = '{rev}' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.default b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.default new file mode 100644 index 0000000..badef72 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.default @@ -0,0 +1,24 @@ +changeset = 'changeset: {rev}:{node|short}\n{branches}{tags}{parents}user: {author}\ndate: {date|date}\nsummary: {desc|firstline}\n\n' +changeset_quiet = '{rev}:{node|short}\n' +changeset_verbose = 'changeset: {rev}:{node|short}\n{branches}{tags}{parents}user: {author}\ndate: {date|date}\n{files}{file_copies_switch}description:\n{desc|strip}\n\n\n' +changeset_debug = 'changeset: {rev}:{node}\n{branches}{tags}{parents}{manifest}user: {author}\ndate: {date|date}\n{file_mods}{file_adds}{file_dels}{file_copies_switch}{extras}description:\n{desc|strip}\n\n\n' +start_files = 'files: ' +file = ' {file}' +end_files = '\n' +start_file_mods = 'files: ' +file_mod = ' {file_mod}' +end_file_mods = '\n' +start_file_adds = 'files+: ' +file_add = ' {file_add}' +end_file_adds = '\n' +start_file_dels = 'files-: ' +file_del = ' {file_del}' +end_file_dels = '\n' +start_file_copies = 'copies: ' +file_copy = ' {name} ({source})' +end_file_copies = '\n' +parent = 'parent: {rev}:{node|formatnode}\n' +manifest = 'manifest: {rev}:{node}\n' +branch = 'branch: {branch}\n' +tag = 'tag: {tag}\n' +extra = 'extra: {key}={value|stringescape}\n' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.xml b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.xml new file mode 100644 index 0000000..f16a3e0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/map-cmdline.xml @@ -0,0 +1,19 @@ +header = '<?xml version="1.0"?>\n<log>\n' +footer = '</log>\n' + +changeset = '<logentry revision="{rev}" node="{node}">\n{branches}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n</logentry>\n' +changeset_verbose = '<logentry revision="{rev}" node="{node}">\n{branches}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n<paths>\n{file_adds}{file_dels}{file_mods}</paths>\n{file_copies}</logentry>\n' +changeset_debug = '<logentry revision="{rev}" node="{node}">\n{branches}{tags}{parents}<author email="{author|email|xmlescape}">{author|person|xmlescape}</author>\n<date>{date|rfc3339date}</date>\n<msg xml:space="preserve">{desc|xmlescape}</msg>\n<paths>\n{file_adds}{file_dels}{file_mods}</paths>\n{file_copies}{extras}</logentry>\n' + +file_add = '<path action="A">{file_add|xmlescape}</path>\n' +file_mod = '<path action="M">{file_mod|xmlescape}</path>\n' +file_del = '<path action="R">{file_del|xmlescape}</path>\n' + +start_file_copies = '<copies>\n' +file_copy = '<copy source="{source|xmlescape}">{name|xmlescape}</copy>\n' +end_file_copies = '</copies>\n' + +parent = '<parent revision="{rev}" node="{node}" />\n' +branch = '<branch>{branch|xmlescape}</branch>\n' +tag = '<tag>{tag|xmlescape}</tag>\n' +extra = '<extra key="{key|xmlescape}">{value|xmlescape}</extra>\n' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/branches.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/branches.tmpl new file mode 100644 index 0000000..999daff --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/branches.tmpl @@ -0,0 +1,37 @@ +{header} + <title>{repo|escape}: Branches</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Branches</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li class="current">branches</li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">branches</h2> + <table cellspacing="0"> +{entries%branchentry} + </table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelog.tmpl new file mode 100644 index 0000000..a00d846 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelog.tmpl @@ -0,0 +1,41 @@ +{header} + <title>{repo|escape}: changelog</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / changelog</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li class="current">changelog</li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">changelog</h2> + <div> + {entries%changelogentry} + </div> + + <div class="page-path"> +{changenav%nav} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelogentry.tmpl new file mode 100644 index 0000000..6906629 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changelogentry.tmpl @@ -0,0 +1,6 @@ +<h3 class="changelog"><a class="title" href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}<span class="logtags"> {inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a></h3> +<ul class="changelog-entry"> + <li class="age">{date|age}</li> + <li>by <span class="name">{author|obfuscate}</span> <span class="revdate">[{date|rfc822date}] rev {rev}</span></li> + <li class="description">{desc|strip|escape|addbreaks|nonempty}</li> +</ul> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changeset.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changeset.tmpl new file mode 100644 index 0000000..3816db3 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/changeset.tmpl @@ -0,0 +1,64 @@ +{header} +<title>{repo|escape}: changeset {rev}:{node|short}</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li class="current">changeset</li> + <li><a href="{url}raw-rev/{node|short}">raw</a> {archives%archiveentry}</li> + </ul> + + <h2 class="no-link no-border">changeset</h2> + + <h3 class="changeset"><a href="{url}raw-rev/{node|short}">{desc|strip|escape|firstline|nonempty} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></a></h3> + <p class="changeset-age"><span>{date|age}</span></p> + + <dl class="overview"> + <dt>author</dt> + <dd>{author|obfuscate}</dd> + <dt>date</dt> + <dd>{date|date}</dd> + {branch%changesetbranch} + <dt>changeset {rev}</dt> + <dd>{node|short}</dd> + {parent%changesetparent} + {child%changesetchild} + </dl> + + <p class="description">{desc|strip|escape|addbreaks|nonempty}</p> + + <table> + {files} + </table> + + <div class="diff"> + {diff} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/error.tmpl new file mode 100644 index 0000000..5ec26b5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/error.tmpl @@ -0,0 +1,35 @@ +{header} + <title>{repo|escape}: Error</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li class="current">summary</li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">An error occurred while processing your request</h2> + <p class="normal">{error|escape}</p> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/fileannotate.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/fileannotate.tmpl new file mode 100644 index 0000000..cb13781 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/fileannotate.tmpl @@ -0,0 +1,64 @@ +{header} +<title>{repo|escape}: {file|escape}@{node|short} (annotated)</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / annotate</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> + <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li> + <li class="current">annotate</li> + <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> + <li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li> + </ul> + + <h2 class="no-link no-border">{file|escape}@{node|short} (annotated)</h2> + <h3 class="changeset">{file|escape}</h3> + <p class="changeset-age"><span>{date|age}</span></p> + + <dl class="overview"> + <dt>author</dt> + <dd>{author|obfuscate}</dd> + <dt>date</dt> + <dd>{date|date}</dd> + {branch%filerevbranch} + <dt>changeset {rev}</dt> + <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd> + {parent%fileannotateparent} + {child%fileannotatechild} + <dt>permissions</dt> + <dd>{permissions|permissions}</dd> + </dl> + + <p class="description">{desc|strip|escape|addbreaks|nonempty}</p> + + <table class="annotated"> + {annotate%annotateline} + </table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filediff.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filediff.tmpl new file mode 100644 index 0000000..84f1f80 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filediff.tmpl @@ -0,0 +1,55 @@ +{header} +<title>{repo|escape}: diff {file|escape}</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file diff</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> + <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li> + <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> + <li class="current">diff</li> + <li><a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a></li> + </ul> + + <h2 class="no-link no-border">diff: {file|escape}</h2> + <h3 class="changeset">{file|escape}</h3> + + <dl class="overview"> + {branch%filerevbranch} + <dt>changeset {rev}</dt> + <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd> + {parent%filediffparent} + {child%filediffchild} + </dl> + + <div class="diff"> + {diff} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filelog.tmpl new file mode 100644 index 0000000..8612016 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filelog.tmpl @@ -0,0 +1,50 @@ +{header} +<title>{repo|escape}: File revisions</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revisions</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> + <li class="current">revisions</li> + <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> + <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> + <li><a href="{url}rss-log/{node|short}/{file|urlescape}">rss</a></li> + </ul> + + <h2 class="no-link no-border">{file|urlescape}</h2> + + <table> + {entries%filelogentry} + </table> + + <div class="page-path"> + {nav%filenav} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filerevision.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filerevision.tmpl new file mode 100644 index 0000000..a594b1a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/filerevision.tmpl @@ -0,0 +1,64 @@ +{header} +<title>{repo|escape}: {file|escape}@{node|short}</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / file revision</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li class="current">file</li> + <li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a></li> + <li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> + <li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> + <li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li> + </ul> + + <h2 class="no-link no-border">{file|escape}@{node|short}</h2> + <h3 class="changeset">{file|escape}</h3> + <p class="changeset-age"><span>{date|age}</span></p> + + <dl class="overview"> + <dt>author</dt> + <dd>{author|obfuscate}</dd> + <dt>date</dt> + <dd>{date|date}</dd> + {branch%filerevbranch} + <dt>changeset {rev}</dt> + <dd><a class="list" href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd> + {parent%filerevparent} + {child%filerevchild} + <dt>permissions</dt> + <dd>{permissions|permissions}</dd> + </dl> + + <p class="description">{desc|strip|escape|addbreaks|nonempty}</p> + + <div class="source"> + {text%fileline} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/footer.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/footer.tmpl new file mode 100644 index 0000000..cddaa92 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/footer.tmpl @@ -0,0 +1,22 @@ + <div class="page-footer"> + <p>Mercurial Repository: {repo|escape}</p> + <ul class="rss-logo"> + <li><a href="{url}rss-log">RSS</a></li> + <li><a href="{url}atom-log">Atom</a></li> + </ul> + {motd} + </div> + + <div id="powered-by"> + <p><a href="http://mercurial.selenic.com/" title="Mercurial"><img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a></p> + </div> + + <div id="corner-top-left"></div> + <div id="corner-top-right"></div> + <div id="corner-bottom-left"></div> + <div id="corner-bottom-right"></div> + +</div> + +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/graph.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/graph.tmpl new file mode 100644 index 0000000..dcdc8d7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/graph.tmpl @@ -0,0 +1,119 @@ +{header} + <title>{repo|escape}: graph</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> + <!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]--> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / graph</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li class="current">graph</li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">graph</h2> + + <div id="noscript">The revision graph only works with JavaScript-enabled browsers.</div> + <div id="wrapper"> + <ul id="nodebgs"></ul> + <canvas id="graph" width="224" height="{canvasheight}"></canvas> + <ul id="graphnodes"></ul> + </div> + + <script type="text/javascript" src="{staticurl}graph.js"></script> + <script> + <!-- hide script content + + document.getElementById('noscript').style.display = 'none'; + + var data = {jsdata|json}; + var graph = new Graph(); + graph.scale({bg_height}); + + graph.edge = function(x0, y0, x1, y1, color) \{ + + this.setColor(color, 0.0, 0.65); + this.ctx.beginPath(); + this.ctx.moveTo(x0, y0); + this.ctx.lineTo(x1, y1); + this.ctx.stroke(); + + } + + var revlink = '<li style="_STYLE"><span class="desc">'; + revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>'; + revlink += '</span>_TAGS<span class="info">_DATE, by _USER</span></li>'; + + graph.vertex = function(x, y, color, parity, cur) \{ + + this.ctx.beginPath(); + color = this.setColor(color, 0.25, 0.75); + this.ctx.arc(x, y, radius, 0, Math.PI * 2, true); + this.ctx.fill(); + + var bg = '<li class="bg parity' + parity + '"></li>'; + var left = (this.columns + 1) * this.bg_height; + var nstyle = 'padding-left: ' + left + 'px;'; + var item = revlink.replace(/_STYLE/, nstyle); + item = item.replace(/_PARITY/, 'parity' + parity); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_DESC/, cur[3]); + item = item.replace(/_USER/, cur[4]); + item = item.replace(/_DATE/, cur[5]); + + var tagspan = ''; + if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) \{ + tagspan = '<span class="logtags">'; + if (cur[6][1]) \{ + tagspan += '<span class="branchtag" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } else if (!cur[6][1] && cur[6][0] != 'default') \{ + tagspan += '<span class="inbranchtag" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } + if (cur[7].length) \{ + for (var t in cur[7]) \{ + var tag = cur[7][t]; + tagspan += '<span class="tagtag">' + tag + '</span> '; + } + } + tagspan += '</span>'; + } + + item = item.replace(/_TAGS/, tagspan); + return [bg, item]; + + } + + graph.render(data); + + // stop hiding script --> + </script> + + <div class="page-path"> + <a href="{url}graph/{rev}{lessvars%urlparameter}">less</a> + <a href="{url}graph/{rev}{morevars%urlparameter}">more</a> + | {changenav%navgraph} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/header.tmpl new file mode 100644 index 0000000..dd03884 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/header.tmpl @@ -0,0 +1,6 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> +<head> + <link rel="icon" href="{staticurl}hgicon.png" type="image/png" /> + <meta name="robots" content="index, nofollow"/> + <link rel="stylesheet" href="{staticurl}style-monoblue.css" type="text/css" /> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/help.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/help.tmpl new file mode 100644 index 0000000..df99fb5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/help.tmpl @@ -0,0 +1,37 @@ +{header} + <title>{repo|escape}: Branches</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Branches</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li class="current">help</li> + </ul> + </div> + + <h2 class="no-link no-border">branches</h2> + <pre> + {doc|escape} + </pre> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/helptopics.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/helptopics.tmpl new file mode 100644 index 0000000..f7f2fd6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/helptopics.tmpl @@ -0,0 +1,44 @@ +{header} + <title>{repo|escape}: Branches</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Branches</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li class="current">help</li> + </ul> + </div> + + <h2 class="no-link no-border">branches</h2> + <table cellspacing="0"> + <tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr> + {topics % helpentry} + + <tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr> + {earlycommands % helpentry} + + <tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr> + {othercommands % helpentry} + </table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/index.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/index.tmpl new file mode 100644 index 0000000..1159f47 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/index.tmpl @@ -0,0 +1,39 @@ +{header} + <title>{repo|escape}: Mercurial repositories index</title> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1>Mercurial Repositories</h1> + <ul class="page-nav"> + </ul> + </div> + + <table cellspacing="0"> + <tr> + <td><a href="?sort={sort_name}">Name</a></td> + <td><a href="?sort={sort_description}">Description</a></td> + <td><a href="?sort={sort_contact}">Contact</a></td> + <td><a href="?sort={sort_lastchange}">Last modified</a></td> + <td> </td> + <td> </td> + </tr> + {entries%indexentry} + </table> + <div class="page-footer"> + {motd} + </div> + + <div id="powered-by"> + <p><a href="http://mercurial.selenic.com/" title="Mercurial"><img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a></p> + </div> + + <div id="corner-top-left"></div> + <div id="corner-top-right"></div> + <div id="corner-bottom-left"></div> + <div id="corner-bottom-right"></div> + +</div> +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/manifest.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/manifest.tmpl new file mode 100644 index 0000000..fe5ea8a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/manifest.tmpl @@ -0,0 +1,52 @@ +{header} +<title>{repo|escape}: files</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / files</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li class="current">files</li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <ul class="submenu"> + <li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> {archives%archiveentry}</li> + {archives%archiveentry} + </ul> + + <h2 class="no-link no-border">files</h2> + <p class="files">{path|escape} <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span></p> + + <table> + <tr class="parity{upparity}"> + <td>drwxr-xr-x</td> + <td></td> + <td></td> + <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td> + <td class="link"> </td> + </tr> + {dentries%direntry} + {fentries%fileentry} + </table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/map new file mode 100644 index 0000000..1835016 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/map @@ -0,0 +1,226 @@ +default = 'summary' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +summary = summary.tmpl +error = error.tmpl +notfound = notfound.tmpl + +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>' + +naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a>' +filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenodelink = ' + <tr class="parity{parity}"> + <td><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td> + <td></td> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> | + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> + </td> + </tr>' +filenolink = ' + <tr class="parity{parity}"> + <td> + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a></td><td></td><td>file | + annotate | + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> + </td> + </tr>' + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl +direntry = ' + <tr class="parity{parity}"> + <td>drwxr-xr-x</td> + <td></td> + <td></td> + <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td> + <td><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a></td> + </tr>' +fileentry = ' + <tr class="parity{parity}"> + <td>{permissions|permissions}</td> + <td>{date|isodate}</td> + <td>{size}</td> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a></td> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | + <a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> | + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> + </td> + </tr>' +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' + <div style="font-family:monospace" class="parity{parity}"> + <pre><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</pre> + </div>' +annotateline = ' + <tr class="parity{parity}"> + <td class="linenr"> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}" + title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> + </td> + <td class="lineno"> + <a href="#{lineid}" id="{lineid}">{linenumber}</a> + </td> + <td class="source">{line|escape}</td> + </tr>' +difflineplus = '<span style="color:#008800;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +difflineminus = '<span style="color:#cc0000;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +difflineat = '<span style="color:#990099;"><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +diffline = '<span><a class="linenr" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</span>' +changelogparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +changesetbranch = '<dt>branch</dt><dd>{name}</dd>' +changesetparent = ' + <dt>parent {rev}</dt> + <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>' +filerevbranch = '<dt>branch</dt><dd>{name}</dd>' +filerevparent = ' + <dt>parent {rev}</dt> + <dd> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </dd>' +filerename = '{file|escape}@' +filelogrename = '| <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">base</a>' +fileannotateparent = ' + <dt>parent {rev}</dt> + <dd> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </dd>' +changelogchild = ' + <dt>child {rev}:</dt> + <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>' +changesetchild = ' + <dt>child {rev}</dt> + <dd><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd>' +filerevchild = ' + <dt>child {rev}</dt> + <dd> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> + </dd>' +fileannotatechild = ' + <dt>child {rev}</dt> + <dd> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> + </dd>' +tags = tags.tmpl +tagentry = ' + <tr class="parity{parity}"> + <td class="nowrap">{date|age}</td> + <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a></td> + <td class="nowrap"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +branches = branches.tmpl +branchentry = ' + <tr class="parity{parity}"> + <td class="nowrap">{date|age}</td> + <td><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + <td class="{status}">{branch|escape}</td> + <td class="nowrap"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}log/{node|short}{sessionvars%urlparameter}">changelog</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +diffblock = '<pre>{lines}</pre>' +filediffparent = ' + <dt>parent {rev}</dt> + <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>' +filelogparent = ' + <tr> + <td align="right">parent {rev}: </td> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filediffchild = ' + <dt>child {rev}</dt> + <dd><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></dd>' +filelogchild = ' + <tr> + <td align="right">child {rev}: </td> + <td><a href="{url}file{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +shortlog = shortlog.tmpl +tagtag = '<span class="tagtag" title="{name}">{name}</span> ' +branchtag = '<span class="branchtag" title="{name}">{name}</span> ' +inbranchtag = '<span class="inbranchtag" title="{name}">{name}</span> ' +shortlogentry = ' + <tr class="parity{parity}"> + <td class="nowrap">{date|age}</td> + <td>{author|person}</td> + <td> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}"> + {desc|strip|firstline|escape|nonempty} + <span class="logtags">{inbranch%inbranchtag}{branches%branchtag}{tags%tagtag}</span> + </a> + </td> + <td class="nowrap"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> | + <a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> + </td> + </tr>' +filelogentry = ' + <tr class="parity{parity}"> + <td class="nowrap">{date|age}</td> + <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td> + <td class="nowrap"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> | <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a> | <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> + {rename%filelogrename} + </td> + </tr>' +archiveentry = '<li><a href="{url}archive/{node|short}{extension}">{type|escape}</a></li>' +indexentry = ' + <tr class="parity{parity}"> + <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td> + <td>{description}</td> + <td>{contact|obfuscate}</td> + <td>{lastchange|age}</td> + <td class="indexlinks">{archives%indexarchiveentry}</td> + <td> + <div class="rss_logo"> + <a href="{url}rss-log">RSS</a> + <a href="{url}atom-log">Atom</a> + </div> + </td> + </tr>\n' +indexarchiveentry = '<a href="{url}archive/{node|short}{extension}">{type|escape}</a> ' +index = index.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' +graph = graph.tmpl diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/notfound.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/notfound.tmpl new file mode 100644 index 0000000..4b1ffad --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/notfound.tmpl @@ -0,0 +1,36 @@ +{header} + <title>{repo|escape}: Mercurial repository not found</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Not found: {repo|escape}</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li class="current">summary</li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry}</li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">Not Found</h2> + <p class="normal">The specified repository "{repo|escape}" is unknown, sorry.</p> + <p class="normal">Please go back to the <a href="{url}">main repository list page</a>.</p> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/search.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/search.tmpl new file mode 100644 index 0000000..cde54a7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/search.tmpl @@ -0,0 +1,35 @@ +{header} + <title>{repo|escape}: Search</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / search</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" value="{query|escape}" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>{archives%archiveentry} + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">searching for {query|escape}</h2> + {entries} + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/shortlog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/shortlog.tmpl new file mode 100644 index 0000000..7a88897 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/shortlog.tmpl @@ -0,0 +1,43 @@ +{header} + <title>{repo|escape}: shortlog</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / shortlog</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li class="current">shortlog</li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + {archives%archiveentry} + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">shortlog</h2> + + <table> +{entries%shortlogentry} + </table> + + <div class="page-path"> + {changenav%navshort} + </div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/summary.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/summary.tmpl new file mode 100644 index 0000000..9830cea --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/summary.tmpl @@ -0,0 +1,67 @@ +{header} + <title>{repo|escape}: Summary</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / summary</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li class="current">summary</li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}log{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">Mercurial Repository Overview</h2> + <dl class="overview"> + <dt>name</dt> + <dd>{repo|escape}</dd> + <dt>description</dt> + <dd>{desc}</dd> + <dt>owner</dt> + <dd>{owner|obfuscate}</dd> + <dt>last change</dt> + <dd>{lastchange|rfc822date}</dd> + </dl> + + <h2><a href="{url}shortlog{sessionvars%urlparameter}">Changes</a></h2> + <table> +{shortlog} + <tr class="light"> + <td colspan="4"><a class="list" href="{url}shortlog{sessionvars%urlparameter}">...</a></td> + </tr> + </table> + + <h2><a href="{url}tags{sessionvars%urlparameter}">Tags</a></h2> + <table> +{tags} + <tr class="light"> + <td colspan="3"><a class="list" href="{url}tags{sessionvars%urlparameter}">...</a></td> + </tr> + </table> + + <h2 class="no-link">Branches</h2> + <table> + {branches%branchentry} + <tr class="light"> + <td colspan="4"><a class="list" href="#">...</a></td> + </tr> + </table> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/tags.tmpl new file mode 100644 index 0000000..9607a8c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/monoblue/tags.tmpl @@ -0,0 +1,37 @@ +{header} + <title>{repo|escape}: Tags</title> + <link rel="alternate" type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}"/> + <link rel="alternate" type="application/rss+xml" href="{url}rss-log" title="RSS feed for {repo|escape}"/> +</head> + +<body> +<div id="container"> + <div class="page-header"> + <h1><a href="{url}summary{sessionvars%urlparameter}">{repo|escape}</a> / Tags</h1> + + <form action="{url}log"> + {sessionvars%hiddenformentry} + <dl class="search"> + <dt><label>Search: </label></dt> + <dd><input type="text" name="rev" /></dd> + </dl> + </form> + + <ul class="page-nav"> + <li><a href="{url}summary{sessionvars%urlparameter}">summary</a></li> + <li><a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a></li> + <li><a href="{url}changelog{sessionvars%urlparameter}">changelog</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li class="current">tags</li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a></li> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> + </ul> + </div> + + <h2 class="no-link no-border">tags</h2> + <table cellspacing="0"> +{entries%tagentry} + </table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/branches.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/branches.tmpl new file mode 100644 index 0000000..a8a4ad2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/branches.tmpl @@ -0,0 +1,48 @@ +{header} +<title>{repo|escape}: branches</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}: branches" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}: branches" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li class="active">branches</li> +</ul> +<ul> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>branches</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<table class="bigtable"> +<tr> + <th>branch</th> + <th>node</th> +</tr> +{entries%branchentry} +</table> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/changeset.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/changeset.tmpl new file mode 100644 index 0000000..883f376 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/changeset.tmpl @@ -0,0 +1,74 @@ +{header} +<title>{repo|escape}: {node|short}</title> +</head> +<body> +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> + <li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> + <li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> + <li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> + <li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> + <li class="active">changeset</li> + <li><a href="{url}raw-rev/{node|short}{sessionvars%urlparameter}">raw</a></li> + <li><a href="{url}file/{node|short}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> + {archives%archiveentry} +</ul> +<ul> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> + +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>changeset {rev}:{node|short} {changesetbranch%changelogbranchname} {changesettag}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="description">{desc|strip|escape|addbreaks|nonempty}</div> + +<table id="changesetEntry"> +<tr> + <th class="author">author</th> + <td class="author">{author|obfuscate}</td> +</tr> +<tr> + <th class="date">date</th> + <td class="date">{date|date} ({date|age})</td></tr> +<tr> + <th class="author">parents</th> + <td class="author">{parent%changesetparent}</td> +</tr> +<tr> + <th class="author">children</th> + <td class="author">{child%changesetchild}</td> +</tr> +<tr> + <th class="files">files</th> + <td class="files">{files}</td> +</tr> +</table> + +<div class="overflow"> +<div class="sourcefirst"> line diff</div> + +{diff} +</div> + +</div> +</div> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/error.tmpl new file mode 100644 index 0000000..c815fd0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/error.tmpl @@ -0,0 +1,44 @@ +{header} +<title>{repo|escape}: error</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> + +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>error</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30"></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="description"> +<p> +An error occurred while processing your request: +</p> +<p> +{error|escape} +</p> +</div> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/fileannotate.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/fileannotate.tmpl new file mode 100644 index 0000000..1ba24e5 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/fileannotate.tmpl @@ -0,0 +1,81 @@ +{header} +<title>{repo|escape}: {file|escape} annotate</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> + +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> +<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> +<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li> +<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> +<li class="active">annotate</li> +<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li> +<li><a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a></li> +</ul> +<ul> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>annotate {file|escape} @ {rev}:{node|short}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="description">{desc|strip|escape|addbreaks|nonempty}</div> + +<table id="changesetEntry"> +<tr> + <th class="author">author</th> + <td class="author">{author|obfuscate}</td> +</tr> +<tr> + <th class="date">date</th> + <td class="date">{date|date} ({date|age})</td> +</tr> +<tr> + <th class="author">parents</th> + <td class="author">{parent%filerevparent}</td> +</tr> +<tr> + <th class="author">children</th> + <td class="author">{child%filerevchild}</td> +</tr> +{changesettag} +</table> + +<div class="overflow"> +<table class="bigtable"> +<tr> + <th class="annotate">rev</th> + <th class="line"> line source</th> +</tr> +{annotate%annotateline} +</table> +</div> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filediff.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filediff.tmpl new file mode 100644 index 0000000..8fec53b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filediff.tmpl @@ -0,0 +1,76 @@ +{header} +<title>{repo|escape}: {file|escape} diff</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> +<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> +<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li> +<li class="active">diff</li> +<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> +<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li> +<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li> +</ul> +<ul> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>diff {file|escape} @ {rev}:{node|short}</h3> + +<form class="search" action="{url}log"> +<p>{sessionvars%hiddenformentry}</p> +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="description">{desc|strip|escape|addbreaks|nonempty}</div> + +<table id="changesetEntry"> +<tr> + <th>author</th> + <td>{author|obfuscate}</td> +</tr> +<tr> + <th>date</th> + <td>{date|date} ({date|age})</td> +</tr> +<tr> + <th>parents</th> + <td>{parent%filerevparent}</td> +</tr> +<tr> + <th>children</th> + <td>{child%filerevchild}</td> +</tr> +{changesettag} +</table> + +<div class="overflow"> +<div class="sourcefirst"> line diff</div> + +{diff} +</div> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelog.tmpl new file mode 100644 index 0000000..a5fb5cd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelog.tmpl @@ -0,0 +1,72 @@ +{header} +<title>{repo|escape}: {file|escape} history</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> +<li><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a></li> +<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> +<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> +<li class="active">file log</li> +<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li> +</ul> +<ul> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>log {file|escape}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="navigate"> +<a href="{url}log/{node|short}/{file|urlescape}{lessvars%urlparameter}">less</a> +<a href="{url}log/{node|short}/{file|urlescape}{morevars%urlparameter}">more</a> +| {nav%filenav}</div> + +<table class="bigtable"> + <tr> + <th class="age">age</th> + <th class="author">author</th> + <th class="description">description</th> + </tr> +{entries%filelogentry} +</table> + +<div class="navigate"> +<a href="{url}log/{node|short}/{file|urlescape}{lessvars%urlparameter}">less</a> +<a href="{url}log/{node|short}/{file|urlescape}{morevars%urlparameter}">more</a> +| {nav%filenav} +</div> + +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelogentry.tmpl new file mode 100644 index 0000000..43e068c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filelogentry.tmpl @@ -0,0 +1,5 @@ + <tr class="parity{parity}"> + <td class="age">{date|age}</td> + <td class="author">{author|person}</td> + <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}</td> + </tr> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filerevision.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filerevision.tmpl new file mode 100644 index 0000000..2a23461 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/filerevision.tmpl @@ -0,0 +1,76 @@ +{header} +<title>{repo|escape}: {node|short} {file|escape}</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> +<li class="active">file</li> +<li><a href="{url}file/tip/{file|urlescape}{sessionvars%urlparameter}">latest</a></li> +<li><a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">diff</a></li> +<li><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a></li> +<li><a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file log</a></li> +<li><a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a></li> +</ul> +<ul> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>view {file|escape} @ {rev}:{node|short}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="description">{desc|strip|escape|addbreaks|nonempty}</div> + +<table id="changesetEntry"> +<tr> + <th class="author">author</th> + <td class="author">{author|obfuscate}</td> +</tr> +<tr> + <th class="date">date</th> + <td class="date">{date|date} ({date|age})</td> +</tr> +<tr> + <th class="author">parents</th> + <td class="author">{parent%filerevparent}</td> +</tr> +<tr> + <th class="author">children</th> + <td class="author">{child%filerevchild}</td> +</tr> +{changesettag} +</table> + +<div class="overflow"> +<div class="sourcefirst"> line source</div> +{text%fileline} +<div class="sourcelast"></div> +</div> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/footer.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/footer.tmpl new file mode 100644 index 0000000..6231a3c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/footer.tmpl @@ -0,0 +1,4 @@ +{motd} + +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/graph.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/graph.tmpl new file mode 100644 index 0000000..597614a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/graph.tmpl @@ -0,0 +1,135 @@ +{header} +<title>{repo|escape}: revision graph</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}: log" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}: log" /> +<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]--> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li class="active">graph</li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>graph</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="navigate"> +<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}graph/{rev}{morevars%urlparameter}">more</a> +| rev {rev}: {changenav%navgraph} +</div> + +<noscript><p>The revision graph only works with JavaScript-enabled browsers.</p></noscript> + +<div id="wrapper"> +<ul id="nodebgs"></ul> +<canvas id="graph" width="224" height="{canvasheight}"></canvas> +<ul id="graphnodes"></ul> +</div> + +<script type="text/javascript" src="{staticurl}graph.js"></script> +<script type="text/javascript"> +<!-- hide script content + +var data = {jsdata|json}; +var graph = new Graph(); +graph.scale({bg_height}); + +graph.edge = function(x0, y0, x1, y1, color) \{ + + this.setColor(color, 0.0, 0.65); + this.ctx.beginPath(); + this.ctx.moveTo(x0, y0); + this.ctx.lineTo(x1, y1); + this.ctx.stroke(); + +} + +var revlink = '<li style="_STYLE"><span class="desc">'; +revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>'; +revlink += '</span>_TAGS<span class="info">_DATE, by _USER</span></li>'; + +graph.vertex = function(x, y, color, parity, cur) \{ + + this.ctx.beginPath(); + color = this.setColor(color, 0.25, 0.75); + this.ctx.arc(x, y, radius, 0, Math.PI * 2, true); + this.ctx.fill(); + + var bg = '<li class="bg parity' + parity + '"></li>'; + var left = (this.columns + 1) * this.bg_height; + var nstyle = 'padding-left: ' + left + 'px;'; + var item = revlink.replace(/_STYLE/, nstyle); + item = item.replace(/_PARITY/, 'parity' + parity); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_DESC/, cur[3]); + item = item.replace(/_USER/, cur[4]); + item = item.replace(/_DATE/, cur[5]); + + var tagspan = ''; + if (cur[7].length || (cur[6][0] != 'default' || cur[6][1])) \{ + tagspan = '<span class="logtags">'; + if (cur[6][1]) \{ + tagspan += '<span class="branchhead" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } else if (!cur[6][1] && cur[6][0] != 'default') \{ + tagspan += '<span class="branchname" title="' + cur[6][0] + '">'; + tagspan += cur[6][0] + '</span> '; + } + if (cur[7].length) \{ + for (var t in cur[7]) \{ + var tag = cur[7][t]; + tagspan += '<span class="tag">' + tag + '</span> '; + } + } + tagspan += '</span>'; + } + + item = item.replace(/_TAGS/, tagspan); + return [bg, item]; + +} + +graph.render(data); + +// stop hiding script --> +</script> + +<div class="navigate"> +<a href="{url}graph/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}graph/{rev}{morevars%urlparameter}">more</a> +| rev {rev}: {changenav%navgraph} +</div> + +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/header.tmpl new file mode 100644 index 0000000..305bc2f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/header.tmpl @@ -0,0 +1,6 @@ +<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> +<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US"> +<head> +<link rel="icon" href="{staticurl}hgicon.png" type="image/png" /> +<meta name="robots" content="index, nofollow" /> +<link rel="stylesheet" href="{staticurl}style-paper.css" type="text/css" /> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/help.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/help.tmpl new file mode 100644 index 0000000..22b16bd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/help.tmpl @@ -0,0 +1,43 @@ +{header} +<title>Help: {topic}</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> + <li class="active">help</li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>Help: {topic}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> +<pre> +{doc|escape} +</pre> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/helptopics.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/helptopics.tmpl new file mode 100644 index 0000000..8711160 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/helptopics.tmpl @@ -0,0 +1,48 @@ +{header} +<title>Help: {title}</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li class="active">help</li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> +<table class="bigtable"> +<tr><td colspan="2"><h2><a name="main" href="#topics">Topics</a></h2></td></tr> +{topics % helpentry} + +<tr><td colspan="2"><h2><a name="main" href="#main">Main Commands</a></h2></td></tr> +{earlycommands % helpentry} + +<tr><td colspan="2"><h2><a name="other" href="#other">Other Commands</a></h2></td></tr> +{othercommands % helpentry} +</table> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/index.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/index.tmpl new file mode 100644 index 0000000..b8adfc9 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/index.tmpl @@ -0,0 +1,26 @@ +{header} +<title>Mercurial repositories index</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a> +</div> +<div class="main"> +<h2>Mercurial Repositories</h2> + +<table class="bigtable"> + <tr> + <th><a href="?sort={sort_name}">Name</a></th> + <th><a href="?sort={sort_description}">Description</a></th> + <th><a href="?sort={sort_contact}">Contact</a></th> + <th><a href="?sort={sort_lastchange}">Last modified</a></th> + <th> </th> + </tr> + {entries%indexentry} +</table> +</div> +</div> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/manifest.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/manifest.tmpl new file mode 100644 index 0000000..ff537bd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/manifest.tmpl @@ -0,0 +1,57 @@ +{header} +<title>{repo|escape}: {node|short} {path|escape}</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li class="active">browse</li> +</ul> +<ul> +{archives%archiveentry} +</ul> +<ul> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>directory {path|escape} @ {rev}:{node|short} {tags%changelogtag}</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<table class="bigtable"> +<tr> + <th class="name">name</th> + <th class="size">size</th> + <th class="permissions">permissions</th> +</tr> +<tr class="fileline parity{upparity}"> + <td class="name"><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a></td> + <td class="size"></td> + <td class="permissions">drwxr-xr-x</td> +</tr> +{dentries%direntry} +{fentries%fileentry} +</table> +</div> +</div> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/map new file mode 100644 index 0000000..caac1c7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/map @@ -0,0 +1,200 @@ +default = 'shortlog' + +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl + +changelog = shortlog.tmpl +shortlog = shortlog.tmpl +shortlogentry = shortlogentry.tmpl +graph = graph.tmpl +help = help.tmpl +helptopics = helptopics.tmpl + +helpentry = '<tr><td><a href="{url}help/{topic|escape}{sessionvars%urlparameter}">{topic|escape}</a></td><td>{summary|escape}</td></tr>' + +naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> ' +filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenolink = '{file|escape} ' +fileellipses = '...' +changelogentry = shortlogentry.tmpl +searchentry = shortlogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + <tr class="fileline parity{parity}"> + <td class="name"> + <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}"> + <img src="{staticurl}coal-folder.png" alt="dir."/> {basename|escape}/ + </a> + <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}"> + {emptydirs|escape} + </a> + </td> + <td class="size"></td> + <td class="permissions">drwxr-xr-x</td> + </tr>' + +fileentry = ' + <tr class="fileline parity{parity}"> + <td class="filename"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + <img src="{staticurl}coal-file.png" alt="file"/> {basename|escape} + </a> + </td> + <td class="size">{size}</td> + <td class="permissions">{permissions|permissions}</td> + </tr>' + +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = ' + <div class="parity{parity} source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>' +filelogentry = filelogentry.tmpl + +annotateline = ' + <tr class="parity{parity}"> + <td class="annotate"> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#{targetline}" + title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> + </td> + <td class="source"><a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</td> + </tr>' + +diffblock = '<div class="source bottomline parity{parity}"><pre>{lines}</pre></div>' +difflineplus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="plusline">{line|escape}</span>' +difflineminus = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="minusline">{line|escape}</span>' +difflineat = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> <span class="atline">{line|escape}</span>' +diffline = '<a href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}' + +changelogparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' + +changesetparent = '<a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> ' + +filerevparent = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{rename%filerename}{node|short}</a> ' +filerevchild = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> ' + +filerename = '{file|escape}@' +filelogrename = ' + <tr> + <th>base:</th> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {file|escape}@{node|short} + </a> + </td> + </tr>' +fileannotateparent = ' + <tr> + <td class="metatag">parent:</td> + <td> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +changesetchild = ' <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a>' +changelogchild = ' + <tr> + <th class="child">child</th> + <td class="child"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}"> + {node|short} + </a> + </td> + </tr>' +fileannotatechild = ' + <tr> + <td class="metatag">child:</td> + <td> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {node|short} + </a> + </td> + </tr>' +tags = tags.tmpl +tagentry = ' + <tr class="tagEntry parity{parity}"> + <td> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}"> + {tag|escape} + </a> + </td> + <td class="node"> + {node|short} + </td> + </tr>' +branches = branches.tmpl +branchentry = ' + <tr class="tagEntry parity{parity}"> + <td> + <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}"> + {branch|escape} + </a> + </td> + <td class="node"> + {node|short} + </td> + </tr>' +changelogtag = '<span class="tag">{name|escape}</span> ' +changesettag = '<span class="tag">{tag|escape}</span> ' +changelogbranchhead = '<span class="branchhead">{name|escape}</span> ' +changelogbranchname = '<span class="branchname">{name|escape}</span> ' + +filediffparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filelogparent = ' + <tr> + <th>parent {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filediffchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +filelogchild = ' + <tr> + <th>child {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' + +indexentry = ' + <tr class="parity{parity}"> + <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td> + <td>{description}</td> + <td>{contact|obfuscate}</td> + <td class="age">{lastchange|age}</td> + <td class="indexlinks">{archives%indexarchiveentry}</td> + </tr>\n' +indexarchiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}"> ↓{type|escape}</a>' +index = index.tmpl +archiveentry = ' + <li> + <a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a> + </li>' +notfound = notfound.tmpl +error = error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/notfound.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/notfound.tmpl new file mode 100644 index 0000000..e9e6ba4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/notfound.tmpl @@ -0,0 +1,12 @@ +{header} +<title>Mercurial repository not found</title> +</head> +<body> + +<h2>Mercurial repository not found</h2> + +The specified repository "{repo|escape}" is unknown, sorry. + +Please go back to the <a href="{url}">main repository list page</a>. + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/search.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/search.tmpl new file mode 100644 index 0000000..ef5c6b0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/search.tmpl @@ -0,0 +1,54 @@ +{header} +<title>{repo|escape}: searching for {query|escape}</title> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>searching for '{query|escape}'</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30"></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="navigate"> +<a href="{url}search/{lessvars%urlparameter}">less</a> +<a href="{url}search/{morevars%urlparameter}">more</a> +</div> + +<table class="bigtable"> + <tr> + <th class="age">age</th> + <th class="author">author</th> + <th class="description">description</th> + </tr> +{entries} +</table> + +<div class="navigate"> +<a href="{url}search/{lessvars%urlparameter}">less</a> +<a href="{url}search/{morevars%urlparameter}">more</a> +</div> + +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlog.tmpl new file mode 100644 index 0000000..8fed55b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlog.tmpl @@ -0,0 +1,69 @@ +{header} +<title>{repo|escape}: log</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li class="active">log</li> +<li><a href="{url}graph/{node|short}{sessionvars%urlparameter}">graph</a></li> +<li><a href="{url}tags{sessionvars%urlparameter}">tags</a></li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a></li> +<li><a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">browse</a></li> +</ul> +<ul> +{archives%archiveentry} +</ul> +<ul> + <li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>log</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<div class="navigate"> +<a href="{url}shortlog/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}shortlog/{rev}{morevars%urlparameter}">more</a> +| rev {rev}: {changenav%navshort} +</div> + +<table class="bigtable"> + <tr> + <th class="age">age</th> + <th class="author">author</th> + <th class="description">description</th> + </tr> +{entries%shortlogentry} +</table> + +<div class="navigate"> +<a href="{url}shortlog/{rev}{lessvars%urlparameter}">less</a> +<a href="{url}shortlog/{rev}{morevars%urlparameter}">more</a> +| rev {rev}: {changenav%navshort} +</div> + +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlogentry.tmpl new file mode 100644 index 0000000..43e068c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/shortlogentry.tmpl @@ -0,0 +1,5 @@ + <tr class="parity{parity}"> + <td class="age">{date|age}</td> + <td class="author">{author|person}</td> + <td class="description"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a>{inbranch%changelogbranchname}{branches%changelogbranchhead}{tags%changelogtag}</td> + </tr> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/tags.tmpl new file mode 100644 index 0000000..3c2461f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/paper/tags.tmpl @@ -0,0 +1,48 @@ +{header} +<title>{repo|escape}: tags</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}: tags" /> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}: tags" /> +</head> +<body> + +<div class="container"> +<div class="menu"> +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" alt="mercurial" /></a> +</div> +<ul> +<li><a href="{url}shortlog{sessionvars%urlparameter}">log</a></li> +<li><a href="{url}graph{sessionvars%urlparameter}">graph</a></li> +<li class="active">tags</li> +<li><a href="{url}branches{sessionvars%urlparameter}">branches</a></li> +</ul> +<ul> +<li><a href="{url}help{sessionvars%urlparameter}">help</a></li> +</ul> +</div> + +<div class="main"> +<h2><a href="{url}{sessionvars%urlparameter}">{repo|escape}</a></h2> +<h3>tags</h3> + +<form class="search" action="{url}log"> +{sessionvars%hiddenformentry} +<p><input name="rev" id="search1" type="text" size="30" /></p> +<div id="hint">find changesets by author, revision, +files, or words in the commit message</div> +</form> + +<table class="bigtable"> +<tr> + <th>tag</th> + <th>node</th> +</tr> +{entries%tagentry} +</table> +</div> +</div> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/changeset.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/changeset.tmpl new file mode 100644 index 0000000..b59d99b --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/changeset.tmpl @@ -0,0 +1,9 @@ +{header} +# HG changeset patch +# User {author} +# Date {date|hgdate} +# Node ID {node} +{parent%changesetparent} +{desc} + +{diff} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/error.tmpl new file mode 100644 index 0000000..9407c13 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/error.tmpl @@ -0,0 +1,2 @@ +{header} +error: {error} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/fileannotate.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/fileannotate.tmpl new file mode 100644 index 0000000..ad1bed6 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/fileannotate.tmpl @@ -0,0 +1,5 @@ +{header} +{annotate%annotateline} +{footer} + + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/filediff.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/filediff.tmpl new file mode 100644 index 0000000..c4014bc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/filediff.tmpl @@ -0,0 +1,5 @@ +{header} +{diff} +{footer} + + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/index.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/index.tmpl new file mode 100644 index 0000000..29d7c9e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/index.tmpl @@ -0,0 +1,2 @@ +{header} +{entries%indexentry} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/manifest.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/manifest.tmpl new file mode 100644 index 0000000..8d4a934 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/manifest.tmpl @@ -0,0 +1,3 @@ +{header} +{dentries%direntry}{fentries%fileentry} +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/map new file mode 100644 index 0000000..980e91d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/map @@ -0,0 +1,28 @@ +mimetype = 'text/plain; charset={encoding}' +header = '' +footer = '' +changeset = changeset.tmpl +difflineplus = '{line}' +difflineminus = '{line}' +difflineat = '{line}' +diffline = '{line}' +changesetparent = '# Parent {node}' +changesetchild = '# Child {node}' +filenodelink = '' +filenolink = '' +fileline = '{line}' +diffblock = '{lines}' +filediff = filediff.tmpl +fileannotate = fileannotate.tmpl +annotateline = '{author|user}@{rev}: {line}' +manifest = manifest.tmpl +direntry = 'drwxr-xr-x {basename}\n' +fileentry = '{permissions|permissions} {size} {basename}\n' +index = index.tmpl +notfound = notfound.tmpl +error = error.tmpl +indexentry = '{url}\n' +tags = '{entries%tagentry}' +tagentry = '{tag} {node}\n' +branches = '{entries%branchentry}' +branchentry = '{branch} {node} {status}\n' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/notfound.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/notfound.tmpl new file mode 100644 index 0000000..a7b3251 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/raw/notfound.tmpl @@ -0,0 +1,2 @@ +{header} +error: repository {repo} not found diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelog.tmpl new file mode 100644 index 0000000..65b96ad --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelog.tmpl @@ -0,0 +1,6 @@ +{header} + <title>{repo|escape} Changelog</title> + <description>{repo|escape} Changelog</description> + {entries%changelogentry} + </channel> +</rss>
\ No newline at end of file diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelogentry.tmpl new file mode 100644 index 0000000..12fe8e0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/changelogentry.tmpl @@ -0,0 +1,7 @@ +<item> + <title>{desc|strip|firstline|strip|escape}</title> + <guid isPermaLink="true">{urlbase}{url}rev/{node|short}</guid> + <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description> + <author>{author|obfuscate}</author> + <pubDate>{date|rfc822date}</pubDate> +</item> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/error.tmpl new file mode 100644 index 0000000..87e6009 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/error.tmpl @@ -0,0 +1,10 @@ +{header} + <title>Error</title> + <description>Error</description> + <item> + <title>Error</title> + <description>{error|escape}</description> + <guid>http://mercurial.selenic.com/#error</guid> + </item> + </channel> +</rss> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelog.tmpl new file mode 100644 index 0000000..31f4dc7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelog.tmpl @@ -0,0 +1,6 @@ +{header} + <title>{repo|escape}: {file|escape} history</title> + <description>{file|escape} revision history</description> + {entries%filelogentry} + </channel> +</rss> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelogentry.tmpl new file mode 100644 index 0000000..220dc4a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/filelogentry.tmpl @@ -0,0 +1,7 @@ +<item> + <title>{desc|strip|firstline|strip|escape}</title> + <link>{urlbase}{url}log{{node|short}}/{file|urlescape}</link> + <description><![CDATA[{desc|strip|escape|addbreaks|nonempty}]]></description> + <author>{author|obfuscate}</author> + <pubDate>{date|rfc822date}</pubDate> +</item> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/header.tmpl new file mode 100644 index 0000000..ed29196 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/header.tmpl @@ -0,0 +1,5 @@ +<?xml version="1.0" encoding="{encoding}"?> +<rss version="2.0"> + <channel> + <link>{urlbase}{url}</link> + <language>en-us</language> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/map new file mode 100644 index 0000000..2f777b7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/map @@ -0,0 +1,10 @@ +default = 'changelog' +mimetype = 'text/xml; charset={encoding}' +header = header.tmpl +changelog = changelog.tmpl +changelogentry = changelogentry.tmpl +filelog = filelog.tmpl +filelogentry = filelogentry.tmpl +tags = tags.tmpl +tagentry = tagentry.tmpl +error = error.tmpl diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tagentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tagentry.tmpl new file mode 100644 index 0000000..42fa038 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tagentry.tmpl @@ -0,0 +1,6 @@ +<item> + <title>{tag|escape}</title> + <link>{urlbase}{url}rev/{node|short}</link> + <description><![CDATA[{tag|strip|escape|addbreaks}]]></description> + <pubDate>{date|rfc822date}</pubDate> +</item> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tags.tmpl new file mode 100644 index 0000000..93f1e96 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/rss/tags.tmpl @@ -0,0 +1,6 @@ +{header} + <title>{repo|escape}: tags </title> + <description>{repo|escape} tag history</description> + {entriesnotip%tagentry} + </channel> +</rss> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/branches.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/branches.tmpl new file mode 100644 index 0000000..43e3bdb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/branches.tmpl @@ -0,0 +1,27 @@ +{header} +<title>{repo|escape}: branches</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-branches" title="Atom feed for {repo|escape}: branches"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-branches" title="RSS feed for {repo|escape}: branches"> +</head> +<body> + +<div class="buttons"> +<a href="{url}log{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +<a type="application/rss+xml" href="{url}rss-branches">rss</a> +<a type="application/atom+xml" href="{url}atom-branches">atom</a> +</div> + +<h2>branches:</h2> + +<ul id="tagEntries"> +{entries%branchentry} +</ul> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelog.tmpl new file mode 100644 index 0000000..466e681 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelog.tmpl @@ -0,0 +1,44 @@ +{header} +<title>{repo|escape}: changelog</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"> +</head> +<body> + +<div class="buttons"> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> +{archives%archiveentry} +<a href="{url}help{sessionvars%urlparameter}">help</a> +<a type="application/rss+xml" href="{url}rss-log">rss</a> +<a type="application/atom+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a> +</div> + +<h2>changelog for {repo|escape}</h2> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search1">search:</label> +<input name="rev" id="search1" type="text" size="30"> +navigate: <small class="navigate">{changenav%nav}</small> +</p> +</form> + +{entries%changelogentry} + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search2">search:</label> +<input name="rev" id="search2" type="text" size="30"> +navigate: <small class="navigate">{changenav%nav}</small> +</p> +</form> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelogentry.tmpl new file mode 100644 index 0000000..e924722 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changelogentry.tmpl @@ -0,0 +1,25 @@ +<table class="logEntry parity{parity}"> + <tr> + <th class="age">{date|age}:</th> + <th class="firstline">{desc|strip|firstline|escape|nonempty}</th> + </tr> + <tr> + <th class="revision">changeset {rev}:</th> + <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr> + {parent%changelogparent} + {child%changelogchild} + {changelogtag} + <tr> + <th class="author">author:</th> + <td class="author">{author|obfuscate}</td> + </tr> + <tr> + <th class="date">date:</th> + <td class="date">{date|date}</td> + </tr> + <tr> + <th class="files"><a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a>:</th> + <td class="files">{files}</td> + </tr> +</table> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changeset.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changeset.tmpl new file mode 100644 index 0000000..c9a72a1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/changeset.tmpl @@ -0,0 +1,52 @@ +{header} +<title>{repo|escape}: changeset {node|short}</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> +<a href="{url}raw-rev/{node|short}">raw</a> +{archives%archiveentry} +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>changeset: {desc|strip|escape|firstline|nonempty}</h2> + +<table id="changesetEntry"> +<tr> + <th class="changeset">changeset {rev}:</th> + <td class="changeset"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> +</tr> +{parent%changesetparent} +{child%changesetchild} +{changesettag} +<tr> + <th class="author">author:</th> + <td class="author">{author|obfuscate}</td> +</tr> +<tr> + <th class="date">date:</th> + <td class="date">{date|date} ({date|age})</td> +</tr> +<tr> + <th class="files">files:</th> + <td class="files">{files}</td> +</tr> +<tr> + <th class="description">description:</th> + <td class="description">{desc|strip|escape|addbreaks|nonempty}</td> +</tr> +</table> + +<div id="changesetDiff"> +{diff} +</div> + +{footer} + + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/error.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/error.tmpl new file mode 100644 index 0000000..fc2c788 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/error.tmpl @@ -0,0 +1,15 @@ +{header} +<title>Mercurial Error</title> +</head> +<body> + +<h2>Mercurial Error</h2> + +<p> +An error occurred while processing your request: +</p> +<p> +{error|escape} +</p> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/fileannotate.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/fileannotate.tmpl new file mode 100644 index 0000000..465fd7e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/fileannotate.tmpl @@ -0,0 +1,49 @@ +{header} +<title>{repo|escape}: {file|escape} annotate</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> +<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> +<a href="{url}raw-annotate/{node|short}/{file|urlescape}">raw</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>Annotate {file|escape}</h2> + +<table> +<tr> + <td class="metatag">changeset {rev}:</td> + <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr> +{parent%fileannotateparent} +{child%fileannotatechild} +<tr> + <td class="metatag">author:</td> + <td>{author|obfuscate}</td></tr> +<tr> + <td class="metatag">date:</td> + <td>{date|date} ({date|age})</td> +</tr> +<tr> + <td class="metatag">permissions:</td> + <td>{permissions|permissions}</td> +</tr> +<tr> + <td class="metatag">description:</td> + <td>{desc|strip|escape|addbreaks|nonempty}</td> +</tr> +</table> + +<table cellspacing="0" cellpadding="0"> +{annotate%annotateline} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filediff.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filediff.tmpl new file mode 100644 index 0000000..7640cba --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filediff.tmpl @@ -0,0 +1,37 @@ +{header} +<title>{repo|escape}: {file|escape} diff</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> +<a href="{url}raw-diff/{node|short}/{file|urlescape}">raw</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>{file|escape}</h2> + +<table id="filediffEntry"> +<tr> + <th class="revision">revision {rev}:</th> + <td class="revision"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> +</tr> +{parent%filediffparent} +{child%filediffchild} +</table> + +<div id="fileDiff"> +{diff} +</div> + +{footer} + + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelog.tmpl new file mode 100644 index 0000000..d003f79 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelog.tmpl @@ -0,0 +1,29 @@ +{header} +<title>{repo|escape}: {file|escape} history</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log/tip/{file|urlescape}" title="RSS feed for {repo|escape}:{file}"> +</head> +<body> + +<div class="buttons"> +<a href="{url}log{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">file</a> +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +<a type="application/rss+xml" href="{url}rss-log/tip/{file|urlescape}">rss</a> +<a type="application/atom+xml" href="{url}atom-log/tip/{file|urlescape}" title="Atom feed for {repo|escape}:{file}">atom</a> +</div> + +<h2>{file|escape} revision history</h2> + +<p>navigate: <small class="navigate">{nav%filenav}</small></p> + +{entries%filelogentry} + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelogentry.tmpl new file mode 100644 index 0000000..dcbdca8 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filelogentry.tmpl @@ -0,0 +1,25 @@ +<table class="logEntry parity{parity}"> + <tr> + <th class="age">{date|age}:</th> + <th class="firstline"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></th> + </tr> + <tr> + <th class="revision">revision {filerev}:</td> + <td class="node"> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a> + <a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(diff)</a> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">(annotate)</a> + </td> + </tr> + {rename%filelogrename} + <tr> + <th class="author">author:</th> + <td class="author">{author|obfuscate}</td> + </tr> + <tr> + <th class="date">date:</th> + <td class="date">{date|date}</td> + </tr> +</table> + + diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filerevision.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filerevision.tmpl new file mode 100644 index 0000000..50cbb7a --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/filerevision.tmpl @@ -0,0 +1,47 @@ +{header} +<title>{repo|escape}:{file|escape}</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> +<a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">files</a> +<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">revisions</a> +<a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">annotate</a> +<a href="{url}raw-file/{node|short}/{file|urlescape}">raw</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>{file|escape}</h2> + +<table> +<tr> + <td class="metatag">changeset {rev}:</td> + <td><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td></tr> +{parent%filerevparent} +{child%filerevchild} +<tr> + <td class="metatag">author:</td> + <td>{author|obfuscate}</td></tr> +<tr> + <td class="metatag">date:</td> + <td>{date|date} ({date|age})</td></tr> +<tr> + <td class="metatag">permissions:</td> + <td>{permissions|permissions}</td></tr> +<tr> + <td class="metatag">description:</td> + <td>{desc|strip|escape|addbreaks|nonempty}</td> +</tr> +</table> + +<pre> +{text%fileline} +</pre> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/footer.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/footer.tmpl new file mode 100644 index 0000000..afcb2d0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/footer.tmpl @@ -0,0 +1,8 @@ +{motd} +<div class="logo"> +<a href="http://mercurial.selenic.com/"> +<img src="{staticurl}hglogo.png" width=75 height=90 border=0 alt="mercurial"></a> +</div> + +</body> +</html> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/graph.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/graph.tmpl new file mode 100644 index 0000000..efc5adc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/graph.tmpl @@ -0,0 +1,97 @@ +{header} +<title>{repo|escape}: graph</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}: tags"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}: tags"> +<!--[if IE]><script type="text/javascript" src="{staticurl}excanvas.js"></script><![endif]--> +</head> +<body> + +<div class="buttons"> +<a href="{url}log{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>graph</h2> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search1">search:</label> +<input name="rev" id="search1" type="text" size="30"> +navigate: <small class="navigate">{changenav%navgraph}</small> +</p> +</form> + +<noscript>The revision graph only works with JavaScript-enabled browsers.</noscript> + +<div id="wrapper"> +<ul id="nodebgs"></ul> +<canvas id="graph" width="224" height="{canvasheight}"></canvas> +<ul id="graphnodes"></ul> +</div> + +<script type="text/javascript" src="{staticurl}graph.js"></script> +<script type="text/javascript"> +<!-- hide script content + +var data = {jsdata|json}; +var graph = new Graph(); +graph.scale({bg_height}); + +graph.edge = function(x0, y0, x1, y1, color) \{ + + this.setColor(color, 0.0, 0.65); + this.ctx.beginPath(); + this.ctx.moveTo(x0, y0); + this.ctx.lineTo(x1, y1); + this.ctx.stroke(); + +} + +var revlink = '<li style="_STYLE"><span class="desc">'; +revlink += '<a href="{url}rev/_NODEID{sessionvars%urlparameter}" title="_NODEID">_DESC</a>'; +revlink += '</span><span class="info">_DATE, by _USER</span></li>'; + +graph.vertex = function(x, y, color, parity, cur) \{ + + this.ctx.beginPath(); + color = this.setColor(color, 0.25, 0.75); + this.ctx.arc(x, y, radius, 0, Math.PI * 2, true); + this.ctx.fill(); + + var bg = '<li class="bg parity' + parity + '"></li>'; + var left = (this.columns + 1) * this.bg_height; + var nstyle = 'padding-left: ' + left + 'px;'; + var item = revlink.replace(/_STYLE/, nstyle); + item = item.replace(/_PARITY/, 'parity' + parity); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_NODEID/, cur[0]); + item = item.replace(/_DESC/, cur[3]); + item = item.replace(/_USER/, cur[4]); + item = item.replace(/_DATE/, cur[5]); + + return [bg, item]; + +} + +graph.render(data); + +// stop hiding script --> +</script> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search1">search:</label> +<input name="rev" id="search1" type="text" size="30"> +navigate: <small class="navigate">{changenav%navgraph}</small> +</p> +</form> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/header.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/header.tmpl new file mode 100644 index 0000000..646b2fe --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/header.tmpl @@ -0,0 +1,6 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"> +<html> +<head> +<link rel="icon" href="{staticurl}hgicon.png" type="image/png"> +<meta name="robots" content="index, nofollow" /> +<link rel="stylesheet" href="{staticurl}style.css" type="text/css" /> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/index.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/index.tmpl new file mode 100644 index 0000000..f399813 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/index.tmpl @@ -0,0 +1,19 @@ +{header} +<title>Mercurial repositories index</title> +</head> +<body> + +<h2>Mercurial Repositories</h2> + +<table> + <tr> + <td><a href="?sort={sort_name}">Name</a></td> + <td><a href="?sort={sort_description}">Description</a></td> + <td><a href="?sort={sort_contact}">Contact</a></td> + <td><a href="?sort={sort_lastchange}">Last modified</a></td> + <td> </td> + </tr> + {entries%indexentry} +</table> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/manifest.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/manifest.tmpl new file mode 100644 index 0000000..24a0973 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/manifest.tmpl @@ -0,0 +1,29 @@ +{header} +<title>{repo|escape}: files for changeset {node|short}</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog/{rev}{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}rev/{node|short}{sessionvars%urlparameter}">changeset</a> +{archives%archiveentry} +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>files for changeset {node|short}: {path|escape}</h2> + +<table cellpadding="0" cellspacing="0"> +<tr class="parity{upparity}"> + <td><tt>drwxr-xr-x</tt> + <td> + <td> + <td><a href="{url}file/{node|short}{up|urlescape}{sessionvars%urlparameter}">[up]</a> +</tr> +{dentries%direntry} +{fentries%fileentry} +</table> +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/map b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/map new file mode 100644 index 0000000..33df1ad --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/map @@ -0,0 +1,183 @@ +default = 'shortlog' +mimetype = 'text/html; charset={encoding}' +header = header.tmpl +footer = footer.tmpl +search = search.tmpl +changelog = changelog.tmpl +shortlog = shortlog.tmpl +shortlogentry = shortlogentry.tmpl +graph = graph.tmpl +naventry = '<a href="{url}log/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navshortentry = '<a href="{url}shortlog/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +navgraphentry = '<a href="{url}graph/{node|short}{sessionvars%urlparameter}">{label|escape}</a> ' +filenaventry = '<a href="{url}log/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{label|escape}</a> ' +filedifflink = '<a href="{url}diff/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenodelink = '<a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{file|escape}</a> ' +filenolink = '{file|escape} ' +fileellipses = '...' +changelogentry = changelogentry.tmpl +searchentry = changelogentry.tmpl +changeset = changeset.tmpl +manifest = manifest.tmpl + +nav = '{before%naventry} {after%naventry}' +navshort = '{before%navshortentry}{after%navshortentry}' +navgraph = '{before%navgraphentry}{after%navgraphentry}' +filenav = '{before%filenaventry}{after%filenaventry}' + +direntry = ' + <tr class="parity{parity}"> + <td><tt>drwxr-xr-x</tt> + <td> + <td> + <td> + <a href="{url}file/{node|short}{path|urlescape}{sessionvars%urlparameter}">{basename|escape}/</a> + <a href="{url}file/{node|short}{path|urlescape}/{emptydirs|urlescape}{sessionvars%urlparameter}"> + {emptydirs|urlescape} + </a>' + +fileentry = ' + <tr class="parity{parity}"> + <td><tt>{permissions|permissions}</tt> + <td align=right><tt class="date">{date|isodate}</tt> + <td align=right><tt>{size}</tt> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{basename|escape}</a>' + +filerevision = filerevision.tmpl +fileannotate = fileannotate.tmpl +filediff = filediff.tmpl +filelog = filelog.tmpl +fileline = '<div class="parity{parity}"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a> {line|escape}</div>' +filelogentry = filelogentry.tmpl + +# The ensures that all table cells have content (even if there +# is an empty line in the annotated file), which in turn ensures that +# all table rows have equal height. +annotateline = ' + <tr class="parity{parity}"> + <td class="annotate"> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}#l{targetline}" + title="{node|short}: {desc|escape|firstline}">{author|user}@{rev}</a> + </td> + <td> + <a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a> + </td> + <td><pre> {line|escape}</pre></td> + </tr>' +difflineplus = '<span class="plusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>' +difflineminus = '<span class="minusline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>' +difflineat = '<span class="atline"><a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}</span>' +diffline = '<a class="lineno" href="#{lineid}" id="{lineid}">{linenumber}</a>{line|escape}' +changelogparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a> + </td> + </tr>' +changesetparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filerevparent = ' + <tr> + <td class="metatag">parent:</td> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +filerename = '{file|escape}@' +filelogrename = ' + <tr> + <th>base:</th> + <td> + <a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {file|escape}@{node|short} + </a> + </td> + </tr>' +fileannotateparent = ' + <tr> + <td class="metatag">parent:</td> + <td> + <a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}"> + {rename%filerename}{node|short} + </a> + </td> + </tr>' +changesetchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +changelogchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filerevchild = ' + <tr> + <td class="metatag">child:</td> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +fileannotatechild = ' + <tr> + <td class="metatag">child:</td> + <td><a href="{url}annotate/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +tags = tags.tmpl +tagentry = ' + <li class="tagEntry parity{parity}"> + <tt class="node">{node}</tt> + <a href="{url}rev/{node|short}{sessionvars%urlparameter}">{tag|escape}</a> + </li>' +branches = branches.tmpl +branchentry = ' + <li class="tagEntry parity{parity}"> + <tt class="node">{node}</tt> + <a href="{url}shortlog/{node|short}{sessionvars%urlparameter}" class="{status}">{branch|escape}</a> + </li>' +diffblock = '<pre class="parity{parity}">{lines}</pre>' +changelogtag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>' +changesettag = '<tr><th class="tag">tag:</th><td class="tag">{tag|escape}</td></tr>' +filediffparent = ' + <tr> + <th class="parent">parent {rev}:</th> + <td class="parent"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filelogparent = ' + <tr> + <th>parent {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filediffchild = ' + <tr> + <th class="child">child {rev}:</th> + <td class="child"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +filelogchild = ' + <tr> + <th>child {rev}:</th> + <td><a href="{url}file/{node|short}/{file|urlescape}{sessionvars%urlparameter}">{node|short}</a></td> + </tr>' +indexentry = ' + <tr class="parity{parity}"> + <td><a href="{url}{sessionvars%urlparameter}">{name|escape}</a></td> + <td>{description}</td> + <td>{contact|obfuscate}</td> + <td class="age">{lastchange|age}</td> + <td class="indexlinks"> + <a href="{url}rss-log">RSS</a> + <a href="{url}atom-log">Atom</a> + {archives%archiveentry} + </td> + </tr>' +index = index.tmpl +archiveentry = '<a href="{url}archive/{node|short}{extension|urlescape}">{type|escape}</a> ' +notfound = notfound.tmpl +error = error.tmpl +urlparameter = '{separator}{name}={value|urlescape}' +hiddenformentry = '<input type="hidden" name="{name}" value="{value|escape}" />' diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/notfound.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/notfound.tmpl new file mode 100644 index 0000000..e9e6ba4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/notfound.tmpl @@ -0,0 +1,12 @@ +{header} +<title>Mercurial repository not found</title> +</head> +<body> + +<h2>Mercurial repository not found</h2> + +The specified repository "{repo|escape}" is unknown, sorry. + +Please go back to the <a href="{url}">main repository list page</a>. + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/search.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/search.tmpl new file mode 100644 index 0000000..9ace1eb --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/search.tmpl @@ -0,0 +1,37 @@ +{header} +<title>{repo|escape}: searching for {query|escape}</title> +</head> +<body> + +<div class="buttons"> +<a href="{url}log{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}{sessionvars%urlparameter}">files</a> +{archives%archiveentry} +<a href="{url}help{sessionvars%urlparameter}">help</a> +</div> + +<h2>searching for {query|escape}</h2> + +<form> +{sessionvars%hiddenformentry} +<p> +search: +<input name="rev" type="text" width="30" value="{query|escape}"> +</p> +</form> + +{entries} + +<form> +{sessionvars%hiddenformentry} +<p> +search: +<input name="rev" type="text" width="30" value="{query|escape}"> +</p> +</form> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlog.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlog.tmpl new file mode 100644 index 0000000..1f70a53 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlog.tmpl @@ -0,0 +1,44 @@ +{header} +<title>{repo|escape}: shortlog</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-log" title="Atom feed for {repo|escape}"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-log" title="RSS feed for {repo|escape}"> +</head> +<body> + +<div class="buttons"> +<a href="{url}log/{rev}{sessionvars%urlparameter}">changelog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}tags{sessionvars%urlparameter}">tags</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a> +{archives%archiveentry} +<a href="{url}help{sessionvars%urlparameter}">help</a> +<a type="application/rss+xml" href="{url}rss-log">rss</a> +<a type="application/rss+xml" href="{url}atom-log" title="Atom feed for {repo|escape}">atom</a> +</div> + +<h2>shortlog for {repo|escape}</h2> + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search1">search:</label> +<input name="rev" id="search1" type="text" size="30"> +navigate: <small class="navigate">{changenav%navshort}</small> +</p> +</form> + +{entries%shortlogentry} + +<form action="{url}log"> +{sessionvars%hiddenformentry} +<p> +<label for="search2">search:</label> +<input name="rev" id="search2" type="text" size="30"> +navigate: <small class="navigate">{changenav%navshort}</small> +</p> +</form> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlogentry.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlogentry.tmpl new file mode 100644 index 0000000..b6857db --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/shortlogentry.tmpl @@ -0,0 +1,7 @@ +<table class="slogEntry parity{parity}"> + <tr> + <td class="age">{date|age}</td> + <td class="author">{author|person}</td> + <td class="node"><a href="{url}rev/{node|short}{sessionvars%urlparameter}">{desc|strip|firstline|escape|nonempty}</a></td> + </tr> +</table> diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/tags.tmpl b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/tags.tmpl new file mode 100644 index 0000000..4c41551 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/spartan/tags.tmpl @@ -0,0 +1,27 @@ +{header} +<title>{repo|escape}: tags</title> +<link rel="alternate" type="application/atom+xml" + href="{url}atom-tags" title="Atom feed for {repo|escape}: tags"> +<link rel="alternate" type="application/rss+xml" + href="{url}rss-tags" title="RSS feed for {repo|escape}: tags"> +</head> +<body> + +<div class="buttons"> +<a href="{url}log{sessionvars%urlparameter}">changelog</a> +<a href="{url}shortlog{sessionvars%urlparameter}">shortlog</a> +<a href="{url}graph{sessionvars%urlparameter}">graph</a> +<a href="{url}branches{sessionvars%urlparameter}">branches</a> +<a href="{url}file/{node|short}/{sessionvars%urlparameter}">files</a> +<a href="{url}help{sessionvars%urlparameter}">help</a> +<a type="application/rss+xml" href="{url}rss-tags">rss</a> +<a type="application/atom+xml" href="{url}atom-tags">atom</a> +</div> + +<h2>tags:</h2> + +<ul id="tagEntries"> +{entries%tagentry} +</ul> + +{footer} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/background.png b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/background.png Binary files differnew file mode 100644 index 0000000..af8a0aa --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/background.png diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-file.png b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-file.png Binary files differnew file mode 100644 index 0000000..7ecf463 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-file.png diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-folder.png b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-folder.png Binary files differnew file mode 100644 index 0000000..d1b8ecc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/coal-folder.png diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/excanvas.js b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/excanvas.js new file mode 100644 index 0000000..9d71658 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/excanvas.js @@ -0,0 +1,19 @@ +if(!window.CanvasRenderingContext2D){(function(){var I=Math,i=I.round,L=I.sin,M=I.cos,m=10,A=m/2,Q={init:function(a){var b=a||document;if(/MSIE/.test(navigator.userAgent)&&!window.opera){var c=this;b.attachEvent("onreadystatechange",function(){c.r(b)})}},r:function(a){if(a.readyState=="complete"){if(!a.namespaces["s"]){a.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml")}var b=a.createStyleSheet();b.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}g_vml_\\:*{behavior:url(#default#VML)}"; +var c=a.getElementsByTagName("canvas");for(var d=0;d<c.length;d++){if(!c[d].getContext){this.initElement(c[d])}}}},q:function(a){var b=a.outerHTML,c=a.ownerDocument.createElement(b);if(b.slice(-2)!="/>"){var d="/"+a.tagName,e;while((e=a.nextSibling)&&e.tagName!=d){e.removeNode()}if(e){e.removeNode()}}a.parentNode.replaceChild(c,a);return c},initElement:function(a){a=this.q(a);a.getContext=function(){if(this.l){return this.l}return this.l=new K(this)};a.attachEvent("onpropertychange",V);a.attachEvent("onresize", +W);var b=a.attributes;if(b.width&&b.width.specified){a.style.width=b.width.nodeValue+"px"}else{a.width=a.clientWidth}if(b.height&&b.height.specified){a.style.height=b.height.nodeValue+"px"}else{a.height=a.clientHeight}return a}};function V(a){var b=a.srcElement;switch(a.propertyName){case "width":b.style.width=b.attributes.width.nodeValue+"px";b.getContext().clearRect();break;case "height":b.style.height=b.attributes.height.nodeValue+"px";b.getContext().clearRect();break}}function W(a){var b=a.srcElement; +if(b.firstChild){b.firstChild.style.width=b.clientWidth+"px";b.firstChild.style.height=b.clientHeight+"px"}}Q.init();var R=[];for(var E=0;E<16;E++){for(var F=0;F<16;F++){R[E*16+F]=E.toString(16)+F.toString(16)}}function J(){return[[1,0,0],[0,1,0],[0,0,1]]}function G(a,b){var c=J();for(var d=0;d<3;d++){for(var e=0;e<3;e++){var g=0;for(var h=0;h<3;h++){g+=a[d][h]*b[h][e]}c[d][e]=g}}return c}function N(a,b){b.fillStyle=a.fillStyle;b.lineCap=a.lineCap;b.lineJoin=a.lineJoin;b.lineWidth=a.lineWidth;b.miterLimit= +a.miterLimit;b.shadowBlur=a.shadowBlur;b.shadowColor=a.shadowColor;b.shadowOffsetX=a.shadowOffsetX;b.shadowOffsetY=a.shadowOffsetY;b.strokeStyle=a.strokeStyle;b.d=a.d;b.e=a.e}function O(a){var b,c=1;a=String(a);if(a.substring(0,3)=="rgb"){var d=a.indexOf("(",3),e=a.indexOf(")",d+1),g=a.substring(d+1,e).split(",");b="#";for(var h=0;h<3;h++){b+=R[Number(g[h])]}if(g.length==4&&a.substr(3,1)=="a"){c=g[3]}}else{b=a}return[b,c]}function S(a){switch(a){case "butt":return"flat";case "round":return"round"; +case "square":default:return"square"}}function K(a){this.a=J();this.m=[];this.k=[];this.c=[];this.strokeStyle="#000";this.fillStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=m*1;this.globalAlpha=1;this.canvas=a;var b=a.ownerDocument.createElement("div");b.style.width=a.clientWidth+"px";b.style.height=a.clientHeight+"px";b.style.overflow="hidden";b.style.position="absolute";a.appendChild(b);this.j=b;this.d=1;this.e=1}var j=K.prototype;j.clearRect=function(){this.j.innerHTML= +"";this.c=[]};j.beginPath=function(){this.c=[]};j.moveTo=function(a,b){this.c.push({type:"moveTo",x:a,y:b});this.f=a;this.g=b};j.lineTo=function(a,b){this.c.push({type:"lineTo",x:a,y:b});this.f=a;this.g=b};j.bezierCurveTo=function(a,b,c,d,e,g){this.c.push({type:"bezierCurveTo",cp1x:a,cp1y:b,cp2x:c,cp2y:d,x:e,y:g});this.f=e;this.g=g};j.quadraticCurveTo=function(a,b,c,d){var e=this.f+0.6666666666666666*(a-this.f),g=this.g+0.6666666666666666*(b-this.g),h=e+(c-this.f)/3,l=g+(d-this.g)/3;this.bezierCurveTo(e, +g,h,l,c,d)};j.arc=function(a,b,c,d,e,g){c*=m;var h=g?"at":"wa",l=a+M(d)*c-A,n=b+L(d)*c-A,o=a+M(e)*c-A,f=b+L(e)*c-A;if(l==o&&!g){l+=0.125}this.c.push({type:h,x:a,y:b,radius:c,xStart:l,yStart:n,xEnd:o,yEnd:f})};j.rect=function(a,b,c,d){this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath()};j.strokeRect=function(a,b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.stroke()};j.fillRect=function(a, +b,c,d){this.beginPath();this.moveTo(a,b);this.lineTo(a+c,b);this.lineTo(a+c,b+d);this.lineTo(a,b+d);this.closePath();this.fill()};j.createLinearGradient=function(a,b,c,d){var e=new H("gradient");return e};j.createRadialGradient=function(a,b,c,d,e,g){var h=new H("gradientradial");h.n=c;h.o=g;h.i.x=a;h.i.y=b;return h};j.drawImage=function(a,b){var c,d,e,g,h,l,n,o,f=a.runtimeStyle.width,k=a.runtimeStyle.height;a.runtimeStyle.width="auto";a.runtimeStyle.height="auto";var q=a.width,r=a.height;a.runtimeStyle.width= +f;a.runtimeStyle.height=k;if(arguments.length==3){c=arguments[1];d=arguments[2];h=(l=0);n=(e=q);o=(g=r)}else if(arguments.length==5){c=arguments[1];d=arguments[2];e=arguments[3];g=arguments[4];h=(l=0);n=q;o=r}else if(arguments.length==9){h=arguments[1];l=arguments[2];n=arguments[3];o=arguments[4];c=arguments[5];d=arguments[6];e=arguments[7];g=arguments[8]}else{throw"Invalid number of arguments";}var s=this.b(c,d),t=[],v=10,w=10;t.push(" <g_vml_:group",' coordsize="',m*v,",",m*w,'"',' coordorigin="0,0"', +' style="width:',v,";height:",w,";position:absolute;");if(this.a[0][0]!=1||this.a[0][1]){var x=[];x.push("M11='",this.a[0][0],"',","M12='",this.a[1][0],"',","M21='",this.a[0][1],"',","M22='",this.a[1][1],"',","Dx='",i(s.x/m),"',","Dy='",i(s.y/m),"'");var p=s,y=this.b(c+e,d),z=this.b(c,d+g),B=this.b(c+e,d+g);p.x=Math.max(p.x,y.x,z.x,B.x);p.y=Math.max(p.y,y.y,z.y,B.y);t.push("padding:0 ",i(p.x/m),"px ",i(p.y/m),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",x.join(""),", sizingmethod='clip');")}else{t.push("top:", +i(s.y/m),"px;left:",i(s.x/m),"px;")}t.push(' ">','<g_vml_:image src="',a.src,'"',' style="width:',m*e,";"," height:",m*g,';"',' cropleft="',h/q,'"',' croptop="',l/r,'"',' cropright="',(q-h-n)/q,'"',' cropbottom="',(r-l-o)/r,'"'," />","</g_vml_:group>");this.j.insertAdjacentHTML("BeforeEnd",t.join(""))};j.stroke=function(a){var b=[],c=O(a?this.fillStyle:this.strokeStyle),d=c[0],e=c[1]*this.globalAlpha,g=10,h=10;b.push("<g_vml_:shape",' fillcolor="',d,'"',' filled="',Boolean(a),'"',' style="position:absolute;width:', +g,";height:",h,';"',' coordorigin="0 0" coordsize="',m*g," ",m*h,'"',' stroked="',!a,'"',' strokeweight="',this.lineWidth,'"',' strokecolor="',d,'"',' path="');var l={x:null,y:null},n={x:null,y:null};for(var o=0;o<this.c.length;o++){var f=this.c[o];if(f.type=="moveTo"){b.push(" m ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="lineTo"){b.push(" l ");var k=this.b(f.x,f.y);b.push(i(k.x),",",i(k.y))}else if(f.type=="close"){b.push(" x ")}else if(f.type=="bezierCurveTo"){b.push(" c "); +var k=this.b(f.x,f.y),q=this.b(f.cp1x,f.cp1y),r=this.b(f.cp2x,f.cp2y);b.push(i(q.x),",",i(q.y),",",i(r.x),",",i(r.y),",",i(k.x),",",i(k.y))}else if(f.type=="at"||f.type=="wa"){b.push(" ",f.type," ");var k=this.b(f.x,f.y),s=this.b(f.xStart,f.yStart),t=this.b(f.xEnd,f.yEnd);b.push(i(k.x-this.d*f.radius),",",i(k.y-this.e*f.radius)," ",i(k.x+this.d*f.radius),",",i(k.y+this.e*f.radius)," ",i(s.x),",",i(s.y)," ",i(t.x),",",i(t.y))}if(k){if(l.x==null||k.x<l.x){l.x=k.x}if(n.x==null||k.x>n.x){n.x=k.x}if(l.y== +null||k.y<l.y){l.y=k.y}if(n.y==null||k.y>n.y){n.y=k.y}}}b.push(' ">');if(typeof this.fillStyle=="object"){var v={x:"50%",y:"50%"},w=n.x-l.x,x=n.y-l.y,p=w>x?w:x;v.x=i(this.fillStyle.i.x/w*100+50)+"%";v.y=i(this.fillStyle.i.y/x*100+50)+"%";var y=[];if(this.fillStyle.p=="gradientradial"){var z=this.fillStyle.n/p*100,B=this.fillStyle.o/p*100-z}else{var z=0,B=100}var C={offset:null,color:null},D={offset:null,color:null};this.fillStyle.h.sort(function(T,U){return T.offset-U.offset});for(var o=0;o<this.fillStyle.h.length;o++){var u= +this.fillStyle.h[o];y.push(u.offset*B+z,"% ",u.color,",");if(u.offset>C.offset||C.offset==null){C.offset=u.offset;C.color=u.color}if(u.offset<D.offset||D.offset==null){D.offset=u.offset;D.color=u.color}}y.pop();b.push("<g_vml_:fill",' color="',D.color,'"',' color2="',C.color,'"',' type="',this.fillStyle.p,'"',' focusposition="',v.x,", ",v.y,'"',' colors="',y.join(""),'"',' opacity="',e,'" />')}else if(a){b.push('<g_vml_:fill color="',d,'" opacity="',e,'" />')}else{b.push("<g_vml_:stroke",' opacity="', +e,'"',' joinstyle="',this.lineJoin,'"',' miterlimit="',this.miterLimit,'"',' endcap="',S(this.lineCap),'"',' weight="',this.lineWidth,'px"',' color="',d,'" />')}b.push("</g_vml_:shape>");this.j.insertAdjacentHTML("beforeEnd",b.join(""));this.c=[]};j.fill=function(){this.stroke(true)};j.closePath=function(){this.c.push({type:"close"})};j.b=function(a,b){return{x:m*(a*this.a[0][0]+b*this.a[1][0]+this.a[2][0])-A,y:m*(a*this.a[0][1]+b*this.a[1][1]+this.a[2][1])-A}};j.save=function(){var a={};N(this,a); +this.k.push(a);this.m.push(this.a);this.a=G(J(),this.a)};j.restore=function(){N(this.k.pop(),this);this.a=this.m.pop()};j.translate=function(a,b){var c=[[1,0,0],[0,1,0],[a,b,1]];this.a=G(c,this.a)};j.rotate=function(a){var b=M(a),c=L(a),d=[[b,c,0],[-c,b,0],[0,0,1]];this.a=G(d,this.a)};j.scale=function(a,b){this.d*=a;this.e*=b;var c=[[a,0,0],[0,b,0],[0,0,1]];this.a=G(c,this.a)};j.clip=function(){};j.arcTo=function(){};j.createPattern=function(){return new P};function H(a){this.p=a;this.n=0;this.o= +0;this.h=[];this.i={x:0,y:0}}H.prototype.addColorStop=function(a,b){b=O(b);this.h.push({offset:1-a,color:b})};function P(){}G_vmlCanvasManager=Q;CanvasRenderingContext2D=K;CanvasGradient=H;CanvasPattern=P})()}; diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/graph.js b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/graph.js new file mode 100644 index 0000000..0d4dcdd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/graph.js @@ -0,0 +1,137 @@ +// branch_renderer.js - Rendering of branch DAGs on the client side +// +// Copyright 2008 Dirkjan Ochtman <dirkjan AT ochtman DOT nl> +// Copyright 2006 Alexander Schremmer <alex AT alexanderweb DOT de> +// +// derived from code written by Scott James Remnant <scott@ubuntu.com> +// Copyright 2005 Canonical Ltd. +// +// This software may be used and distributed according to the terms +// of the GNU General Public License, incorporated herein by reference. + +var colors = [ + [ 1.0, 0.0, 0.0 ], + [ 1.0, 1.0, 0.0 ], + [ 0.0, 1.0, 0.0 ], + [ 0.0, 1.0, 1.0 ], + [ 0.0, 0.0, 1.0 ], + [ 1.0, 0.0, 1.0 ] +]; + +function Graph() { + + this.canvas = document.getElementById('graph'); + if (navigator.userAgent.indexOf('MSIE') >= 0) this.canvas = window.G_vmlCanvasManager.initElement(this.canvas); + this.ctx = this.canvas.getContext('2d'); + this.ctx.strokeStyle = 'rgb(0, 0, 0)'; + this.ctx.fillStyle = 'rgb(0, 0, 0)'; + this.cur = [0, 0]; + this.line_width = 3; + this.bg = [0, 4]; + this.cell = [2, 0]; + this.columns = 0; + this.revlink = ''; + + this.scale = function(height) { + this.bg_height = height; + this.box_size = Math.floor(this.bg_height / 1.2); + this.cell_height = this.box_size; + } + + function colorPart(num) { + num *= 255 + num = num < 0 ? 0 : num; + num = num > 255 ? 255 : num; + var digits = Math.round(num).toString(16); + if (num < 16) { + return '0' + digits; + } else { + return digits; + } + } + + this.setColor = function(color, bg, fg) { + + // Set the colour. + // + // Picks a distinct colour based on an internal wheel; the bg + // parameter provides the value that should be assigned to the 'zero' + // colours and the fg parameter provides the multiplier that should be + // applied to the foreground colours. + + color %= colors.length; + var red = (colors[color][0] * fg) || bg; + var green = (colors[color][1] * fg) || bg; + var blue = (colors[color][2] * fg) || bg; + red = Math.round(red * 255); + green = Math.round(green * 255); + blue = Math.round(blue * 255); + var s = 'rgb(' + red + ', ' + green + ', ' + blue + ')'; + this.ctx.strokeStyle = s; + this.ctx.fillStyle = s; + return s; + + } + + this.render = function(data) { + + var backgrounds = ''; + var nodedata = ''; + + for (var i in data) { + + var parity = i % 2; + this.cell[1] += this.bg_height; + this.bg[1] += this.bg_height; + + var cur = data[i]; + var node = cur[1]; + var edges = cur[2]; + var fold = false; + + for (var j in edges) { + + line = edges[j]; + start = line[0]; + end = line[1]; + color = line[2]; + + if (end > this.columns || start > this.columns) { + this.columns += 1; + } + + if (start == this.columns && start > end) { + var fold = true; + } + + x0 = this.cell[0] + this.box_size * start + this.box_size / 2; + y0 = this.bg[1] - this.bg_height / 2; + x1 = this.cell[0] + this.box_size * end + this.box_size / 2; + y1 = this.bg[1] + this.bg_height / 2; + + this.edge(x0, y0, x1, y1, color); + + } + + // Draw the revision node in the right column + + column = node[0] + color = node[1] + + radius = this.box_size / 8; + x = this.cell[0] + this.box_size * column + this.box_size / 2; + y = this.bg[1] - this.bg_height / 2; + var add = this.vertex(x, y, color, parity, cur); + backgrounds += add[0]; + nodedata += add[1]; + + if (fold) this.columns -= 1; + + } + + document.getElementById('nodebgs').innerHTML += backgrounds; + document.getElementById('graphnodes').innerHTML += nodedata; + + } + +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hgicon.png b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hgicon.png Binary files differnew file mode 100644 index 0000000..60effbc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hgicon.png diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hglogo.png b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hglogo.png Binary files differnew file mode 100644 index 0000000..adc6e65 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/hglogo.png diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-coal.css b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-coal.css new file mode 100644 index 0000000..6f633e1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-coal.css @@ -0,0 +1,265 @@ +body { + margin: 0; + padding: 0; + background: black url(background.png) repeat-x; + font-family: sans-serif; +} + +.container { + padding-right: 150px; +} + +.main { + position: relative; + background: white; + padding: 2em; + border-right: 15px solid black; + border-bottom: 15px solid black; +} + +#.main { + width: 98%; +} + +.overflow { + width: 100%; + overflow: auto; +} + +.menu { + background: #999; + padding: 10px; + width: 75px; + margin: 0; + font-size: 80%; + text-align: left; + position: fixed; + top: 27px; + left: auto; + right: 27px; +} + +#.menu { + position: absolute !important; + top:expression(eval(document.body.scrollTop + 27)); +} + +.menu ul { + list-style: none; + padding: 0; + margin: 10px 0 0 0; +} + +.menu li { + margin-bottom: 3px; + padding: 2px 4px; + background: white; + color: black; + font-weight: normal; +} + +.menu li.active { + background: black; + color: white; +} + +.menu img { + width: 75px; + height: 90px; + border: 0; +} + +.menu a { color: black; display: block; } + +.search { + position: absolute; + top: .7em; + right: 2em; +} + +form.search div#hint { + display: none; + position: absolute; + top: 40px; + right: 0px; + width: 190px; + padding: 5px; + background: #ffc; + font-size: 70%; + border: 1px solid yellow; + -moz-border-radius: 5px; /* this works only in camino/firefox */ + -webkit-border-radius: 5px; /* this is just for Safari */ +} + +form.search:hover div#hint { display: block; } + +a { text-decoration:none; } +.age { white-space:nowrap; } +.date { white-space:nowrap; } +.indexlinks { white-space:nowrap; } +.parity0 { background-color: #f0f0f0; } +.parity1 { background-color: white; } +.plusline { color: green; } +.minusline { color: #dc143c; } /* crimson */ +.atline { color: purple; } + +.navigate { + text-align: right; + font-size: 60%; + margin: 1em 0; +} + +.tag { + color: #999; + font-size: 70%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +.branchhead { + color: #000; + font-size: 80%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +ul#graphnodes .branchhead { + font-size: 75%; +} + +.branchname { + color: #000; + font-size: 60%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +h3 .branchname { + font-size: 80%; +} + +/* Common */ +pre { margin: 0; } + +h2 { font-size: 120%; border-bottom: 1px solid #999; } +h2 a { color: #000; } +h3 { + margin-top: -.7em; + font-size: 100%; +} + +/* log and tags tables */ +.bigtable { + border-bottom: 1px solid #999; + border-collapse: collapse; + font-size: 90%; + width: 100%; + font-weight: normal; + text-align: left; +} + +.bigtable td { + vertical-align: top; +} + +.bigtable th { + padding: 1px 4px; + border-bottom: 1px solid #999; +} +.bigtable tr { border: none; } +.bigtable .age { width: 6em; } +.bigtable .author { width: 12em; } +.bigtable .description { } +.bigtable .node { width: 5em; font-family: monospace;} +.bigtable .lineno { width: 2em; text-align: right;} +.bigtable .lineno a { color: #999; font-size: smaller; font-family: monospace;} +.bigtable .permissions { width: 8em; text-align: left;} +.bigtable .size { width: 5em; text-align: right; } +.bigtable .annotate { text-align: right; } +.bigtable td.annotate { font-size: smaller; } +.bigtable td.source { font-size: inherit; } + +.source, .sourcefirst, .sourcelast { + font-family: monospace; + white-space: pre; + padding: 1px 4px; + font-size: 90%; +} +.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; } +.sourcelast { border-top: 1px solid #999; } +.source a { color: #999; font-size: smaller; font-family: monospace;} +.bottomline { border-bottom: 1px solid #999; } + +.fileline { font-family: monospace; } +.fileline img { border: 0; } + +.tagEntry .closed { color: #99f; } + +/* Changeset entry */ +#changesetEntry { + border-collapse: collapse; + font-size: 90%; + width: 100%; + margin-bottom: 1em; +} + +#changesetEntry th { + padding: 1px 4px; + width: 4em; + text-align: right; + font-weight: normal; + color: #999; + margin-right: .5em; + vertical-align: top; +} + +div.description { + border-left: 3px solid #999; + margin: 1em 0 1em 0; + padding: .3em; +} + +/* Graph */ +div#wrapper { + position: relative; + border-top: 1px solid black; + border-bottom: 1px solid black; + margin: 0; + padding: 0; +} + +canvas { + position: absolute; + z-index: 5; + top: -0.7em; + margin: 0; +} + +ul#graphnodes { + position: absolute; + z-index: 10; + top: -1.0em; + list-style: none inside none; + padding: 0; +} + +ul#nodebgs { + list-style: none inside none; + padding: 0; + margin: 0; + top: -0.7em; +} + +ul#graphnodes li, ul#nodebgs li { + height: 39px; +} + +ul#graphnodes li .info { + display: block; + font-size: 70%; + position: relative; + top: -3px; +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-gitweb.css b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-gitweb.css new file mode 100644 index 0000000..b13c62e --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-gitweb.css @@ -0,0 +1,124 @@ +body { font-family: sans-serif; font-size: 12px; margin:0px; border:solid #d9d8d1; border-width:1px; margin:10px; } +a { color:#0000cc; } +a:hover, a:visited, a:active { color:#880000; } +div.page_header { height:25px; padding:8px; font-size:18px; font-weight:bold; background-color:#d9d8d1; } +div.page_header a:visited { color:#0000cc; } +div.page_header a:hover { color:#880000; } +div.page_nav { padding:8px; } +div.page_nav a:visited { color:#0000cc; } +div.page_path { padding:8px; border:solid #d9d8d1; border-width:0px 0px 1px} +div.page_footer { padding:4px 8px; background-color: #d9d8d1; } +div.page_footer_text { float:left; color:#555555; font-style:italic; } +div.page_body { padding:8px; } +div.title, a.title { + display:block; padding:6px 8px; + font-weight:bold; background-color:#edece6; text-decoration:none; color:#000000; +} +a.title:hover { background-color: #d9d8d1; } +div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; } +div.log_body { padding:8px 8px 8px 150px; } +.age { white-space:nowrap; } +span.age { position:relative; float:left; width:142px; font-style:italic; } +div.log_link { + padding:0px 8px; + font-size:10px; font-family:sans-serif; font-style:normal; + position:relative; float:left; width:136px; +} +div.list_head { padding:6px 8px 4px; border:solid #d9d8d1; border-width:1px 0px 0px; font-style:italic; } +a.list { text-decoration:none; color:#000000; } +a.list:hover { text-decoration:underline; color:#880000; } +table { padding:8px 4px; } +th { padding:2px 5px; font-size:12px; text-align:left; } +tr.light:hover, .parity0:hover { background-color:#edece6; } +tr.dark, .parity1 { background-color:#f6f6f0; } +tr.dark:hover, .parity1:hover { background-color:#edece6; } +td { padding:2px 5px; font-size:12px; vertical-align:top; } +td.closed { background-color: #99f; } +td.link { padding:2px 5px; font-family:sans-serif; font-size:10px; } +td.indexlinks { white-space: nowrap; } +td.indexlinks a { + padding: 2px 5px; line-height: 10px; + border: 1px solid; + color: #ffffff; background-color: #7777bb; + border-color: #aaaadd #333366 #333366 #aaaadd; + font-weight: bold; text-align: center; text-decoration: none; + font-size: 10px; +} +td.indexlinks a:hover { background-color: #6666aa; } +div.pre { font-family:monospace; font-size:12px; white-space:pre; } +div.diff_info { font-family:monospace; color:#000099; background-color:#edece6; font-style:italic; } +div.index_include { border:solid #d9d8d1; border-width:0px 0px 1px; padding:12px 8px; } +div.search { margin:4px 8px; position:absolute; top:56px; right:12px } +.linenr { color:#999999; text-decoration:none } +div.rss_logo { float: right; white-space: nowrap; } +div.rss_logo a { + padding:3px 6px; line-height:10px; + border:1px solid; border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e; + color:#ffffff; background-color:#ff6600; + font-weight:bold; font-family:sans-serif; font-size:10px; + text-align:center; text-decoration:none; +} +div.rss_logo a:hover { background-color:#ee5500; } +pre { margin: 0; } +span.logtags span { + padding: 0px 4px; + font-size: 10px; + font-weight: normal; + border: 1px solid; + background-color: #ffaaff; + border-color: #ffccff #ff00ee #ff00ee #ffccff; +} +span.logtags span.tagtag { + background-color: #ffffaa; + border-color: #ffffcc #ffee00 #ffee00 #ffffcc; +} +span.logtags span.branchtag { + background-color: #aaffaa; + border-color: #ccffcc #00cc33 #00cc33 #ccffcc; +} +span.logtags span.inbranchtag { + background-color: #d5dde6; + border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4; +} + +/* Graph */ +div#wrapper { + position: relative; + margin: 0; + padding: 0; + margin-top: 3px; +} + +canvas { + position: absolute; + z-index: 5; + top: -0.9em; + margin: 0; +} + +ul#nodebgs { + list-style: none inside none; + padding: 0; + margin: 0; + top: -0.7em; +} + +ul#graphnodes li, ul#nodebgs li { + height: 39px; +} + +ul#graphnodes { + position: absolute; + z-index: 10; + top: -0.8em; + list-style: none inside none; + padding: 0; +} + +ul#graphnodes li .info { + display: block; + font-size: 100%; + position: relative; + top: -3px; + font-style: italic; +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-monoblue.css b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-monoblue.css new file mode 100644 index 0000000..9bea29d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-monoblue.css @@ -0,0 +1,475 @@ +/*** Initial Settings ***/ +* { + margin: 0; + padding: 0; + font-weight: normal; + font-style: normal; +} + +html { + font-size: 100%; + font-family: sans-serif; +} + +body { + font-size: 77%; + margin: 15px 50px; + background: #4B4B4C; +} + +a { + color:#0000cc; + text-decoration: none; +} +/*** end of Initial Settings ***/ + + +/** common settings **/ +div#container { + background: #FFFFFF; + position: relative; + color: #666; +} + +div.page-header { + padding: 50px 20px 0; + background: #006699 top left repeat-x; + position: relative; +} + div.page-header h1 { + margin: 10px 0 30px; + font-size: 1.8em; + font-weight: bold; + font-family: osaka,'MS P Gothic', Georgia, serif; + letter-spacing: 1px; + color: #DDD; + } + div.page-header h1 a { + font-weight: bold; + color: #FFF; + } + div.page-header a { + text-decoration: none; + } + + div.page-header form { + position: absolute; + margin-bottom: 2px; + bottom: 0; + right: 20px; + } + div.page-header form label { + color: #DDD; + } + div.page-header form input { + padding: 2px; + border: solid 1px #DDD; + } + div.page-header form dl { + overflow: hidden; + } + div.page-header form dl dt { + font-size: 1.2em; + } + div.page-header form dl dt, + div.page-header form dl dd { + margin: 0 0 0 5px; + float: left; + height: 24px; + line-height: 20px; + } + + ul.page-nav { + margin: 10px 0 0 0; + list-style-type: none; + overflow: hidden; + width: 900px; + } + ul.page-nav li { + margin: 0 2px 0 0; + float: left; + width: 80px; + height: 24px; + font-size: 1.1em; + line-height: 24px; + text-align: center; + } + ul.page-nav li.current { + background: #FFF; + } + ul.page-nav li a { + height: 24px; + color: #666; + background: #DDD; + display: block; + text-decoration: none; + } + ul.page-nav li a:hover { + color:#333; + background: #FFF; + } + +ul.submenu { + margin: 10px 0 -10px 20px; + list-style-type: none; +} +ul.submenu li { + margin: 0 10px 0 0; + font-size: 1.2em; + display: inline; +} + +h2 { + margin: 20px 0 10px; + height: 30px; + line-height: 30px; + text-indent: 20px; + background: #FFF; + font-size: 1.2em; + border-top: dotted 1px #D5E1E6; + font-weight: bold; +} +h2.no-link { + color:#006699; +} +h2.no-border { + color: #FFF; + background: #006699; + border: 0; +} +h2 a { + font-weight:bold; + color:#006699; +} + +div.page-path { + text-align: right; + padding: 20px 30px 10px 0; + border:solid #d9d8d1; + border-width:0px 0px 1px; + font-size: 1.2em; +} + +div.page-footer { + margin: 50px 0 0; + position: relative; +} + div.page-footer p { + position: relative; + left: 20px; + bottom: 5px; + font-size: 1.2em; + } + + ul.rss-logo { + position: absolute; + top: -10px; + right: 20px; + height: 20px; + list-style-type: none; + } + ul.rss-logo li { + display: inline; + } + ul.rss-logo li a { + padding: 3px 6px; + line-height: 10px; + border:1px solid; + border-color:#fcc7a5 #7d3302 #3e1a01 #ff954e; + color:#ffffff; + background-color:#ff6600; + font-weight:bold; + font-family:sans-serif; + font-size:10px; + text-align:center; + text-decoration:none; + } + div.rss-logo li a:hover { + background-color:#ee5500; + } + +p.normal { + margin: 20px 0 20px 30px; + font-size: 1.2em; +} + +table { + margin: 10px 0 0 20px; + width: 95%; + border-collapse: collapse; +} +table tr td { + font-size: 1.1em; +} +table tr td.nowrap { + white-space: nowrap; +} +table tr td.closed { + background-color: #99f; +} +/* +table tr.parity0:hover, +table tr.parity1:hover { + background: #D5E1E6; +} +*/ +table tr.parity0 { + background: #F1F6F7; +} +table tr.parity1 { + background: #FFFFFF; +} +table tr td { + padding: 5px 5px; +} +table.annotated tr td { + padding: 0px 5px; +} + +span.logtags span { + padding: 2px 6px; + font-weight: normal; + font-size: 11px; + border: 1px solid; + background-color: #ffaaff; + border-color: #ffccff #ff00ee #ff00ee #ffccff; +} +span.logtags span.tagtag { + background-color: #ffffaa; + border-color: #ffffcc #ffee00 #ffee00 #ffffcc; +} +span.logtags span.branchtag { + background-color: #aaffaa; + border-color: #ccffcc #00cc33 #00cc33 #ccffcc; +} +span.logtags span.inbranchtag { + background-color: #d5dde6; + border-color: #e3ecf4 #9398f4 #9398f4 #e3ecf4; +} + +div.diff pre { + margin: 10px 0 0 0; +} +div.diff pre span { + font-family: monospace; + white-space: pre; + font-size: 1.2em; + padding: 3px 0; +} +td.source { + white-space: pre; + font-family: monospace; + margin: 10px 30px 0; + font-size: 1.2em; + font-family: monospace; +} + div.source div.parity0, + div.source div.parity1 { + padding: 1px; + font-size: 1.2em; + } + div.source div.parity0 { + background: #F1F6F7; + } + div.source div.parity1 { + background: #FFFFFF; + } +div.parity0:hover, +div.parity1:hover { + background: #D5E1E6; +} +.linenr { + color: #999; + text-align: right; +} +.lineno { + text-align: right; +} +.lineno a { + color: #999; +} +td.linenr { + width: 60px; +} + +div#powered-by { + position: absolute; + width: 75px; + top: 15px; + right: 20px; + font-size: 1.2em; +} +div#powered-by a { + color: #EEE; + text-decoration: none; +} +div#powered-by a:hover { + text-decoration: underline; +} +/* +div#monoblue-corner-top-left { + position: absolute; + top: 0; + left: 0; + width: 10px; + height: 10px; + background: url(./monoblue-corner.png) top left no-repeat !important; + background: none; +} +div#monoblue-corner-top-right { + position: absolute; + top: 0; + right: 0; + width: 10px; + height: 10px; + background: url(./monoblue-corner.png) top right no-repeat !important; + background: none; +} +div#monoblue-corner-bottom-left { + position: absolute; + bottom: 0; + left: 0; + width: 10px; + height: 10px; + background: url(./monoblue-corner.png) bottom left no-repeat !important; + background: none; +} +div#monoblue-corner-bottom-right { + position: absolute; + bottom: 0; + right: 0; + width: 10px; + height: 10px; + background: url(./monoblue-corner.png) bottom right no-repeat !important; + background: none; +} +*/ +/** end of common settings **/ + +/** summary **/ +dl.overview { + margin: 0 0 0 30px; + font-size: 1.1em; + overflow: hidden; +} + dl.overview dt, + dl.overview dd { + margin: 5px 0; + float: left; + } + dl.overview dt { + clear: left; + font-weight: bold; + width: 150px; + } +/** end of summary **/ + +/** chagelog **/ +h3.changelog { + margin: 20px 0 5px 30px; + padding: 0 0 2px; + font-size: 1.4em; + border-bottom: dotted 1px #D5E1E6; +} +ul.changelog-entry { + margin: 0 0 10px 30px; + list-style-type: none; + position: relative; +} +ul.changelog-entry li span.revdate { + font-size: 1.1em; +} +ul.changelog-entry li.age { + position: absolute; + top: -25px; + right: 10px; + font-size: 1.4em; + color: #CCC; + font-weight: bold; + font-style: italic; +} +ul.changelog-entry li span.name { + font-size: 1.2em; + font-weight: bold; +} +ul.changelog-entry li.description { + margin: 10px 0 0; + font-size: 1.1em; +} +/** end of changelog **/ + +/** file **/ +p.files { + margin: 0 0 0 20px; + font-size: 2.0em; + font-weight: bold; +} +/** end of file **/ + +/** changeset **/ +h3.changeset { + margin: 20px 0 5px 20px; + padding: 0 0 2px; + font-size: 1.6em; + border-bottom: dotted 1px #D5E1E6; +} +p.changeset-age { + position: relative; +} +p.changeset-age span { + position: absolute; + top: -25px; + right: 10px; + font-size: 1.4em; + color: #CCC; + font-weight: bold; + font-style: italic; +} +p.description { + margin: 10px 30px 0 30px; + padding: 10px; + border: solid 1px #CCC; + font-size: 1.2em; +} +/** end of changeset **/ + +/** canvas **/ +div#wrapper { + position: relative; + font-size: 1.2em; +} + +canvas { + position: absolute; + z-index: 5; + top: -0.7em; +} + +ul#nodebgs li.parity0 { + background: #F1F6F7; +} + +ul#nodebgs li.parity1 { + background: #FFFFFF; +} + +ul#graphnodes { + position: absolute; + z-index: 10; + top: 7px; + list-style: none inside none; +} + +ul#nodebgs { + list-style: none inside none; +} + +ul#graphnodes li, ul#nodebgs li { + height: 39px; +} + +ul#graphnodes li .info { + display: block; + position: relative; +} +/** end of canvas **/ diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-paper.css b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-paper.css new file mode 100644 index 0000000..c353656 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style-paper.css @@ -0,0 +1,254 @@ +body { + margin: 0; + padding: 0; + background: white; + font-family: sans-serif; +} + +.container { + padding-left: 115px; +} + +.main { + position: relative; + background: white; + padding: 2em 2em 2em 0; +} + +#.main { + width: 98%; +} + +.overflow { + width: 100%; + overflow: auto; +} + +.menu { + width: 90px; + margin: 0; + font-size: 80%; + text-align: left; + position: absolute; + top: 20px; + left: 20px; + right: auto; +} + +.menu ul { + list-style: none; + padding: 0; + margin: 10px 0 0 0; + border-left: 2px solid #999; +} + +.menu li { + margin-bottom: 3px; + padding: 2px 4px; + background: white; + color: black; + font-weight: normal; +} + +.menu li.active { + font-weight: bold; +} + +.menu img { + width: 75px; + height: 90px; + border: 0; +} + +.menu a { color: black; display: block; } + +.search { + position: absolute; + top: .7em; + right: 2em; +} + +form.search div#hint { + display: none; + position: absolute; + top: 40px; + right: 0px; + width: 190px; + padding: 5px; + background: #ffc; + font-size: 70%; + border: 1px solid yellow; + -moz-border-radius: 5px; /* this works only in camino/firefox */ + -webkit-border-radius: 5px; /* this is just for Safari */ +} + +form.search:hover div#hint { display: block; } + +a { text-decoration:none; } +.age { white-space:nowrap; } +.date { white-space:nowrap; } +.indexlinks { white-space:nowrap; } +.parity0 { background-color: #f0f0f0; } +.parity1 { background-color: white; } +.plusline { color: green; } +.minusline { color: #dc143c; } /* crimson */ +.atline { color: purple; } + +.navigate { + text-align: right; + font-size: 60%; + margin: 1em 0; +} + +.tag { + color: #999; + font-size: 70%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +.branchhead { + color: #000; + font-size: 80%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +ul#graphnodes .branchhead { + font-size: 75%; +} + +.branchname { + color: #000; + font-size: 60%; + font-weight: normal; + margin-left: .5em; + vertical-align: baseline; +} + +h3 .branchname { + font-size: 80%; +} + +/* Common */ +pre { margin: 0; } + +h2 { font-size: 120%; border-bottom: 1px solid #999; } +h2 a { color: #000; } +h3 { + margin-top: -.7em; + font-size: 100%; +} + +/* log and tags tables */ +.bigtable { + border-bottom: 1px solid #999; + border-collapse: collapse; + font-size: 90%; + width: 100%; + font-weight: normal; + text-align: left; +} + +.bigtable td { + vertical-align: top; +} + +.bigtable th { + padding: 1px 4px; + border-bottom: 1px solid #999; +} +.bigtable tr { border: none; } +.bigtable .age { width: 7em; } +.bigtable .author { width: 12em; } +.bigtable .description { } +.bigtable .node { width: 5em; font-family: monospace;} +.bigtable .permissions { width: 8em; text-align: left;} +.bigtable .size { width: 5em; text-align: right; } +.bigtable .annotate { text-align: right; } +.bigtable td.annotate { font-size: smaller; } +.bigtable td.source { font-size: inherit; } + +.source, .sourcefirst, .sourcelast { + font-family: monospace; + white-space: pre; + padding: 1px 4px; + font-size: 90%; +} +.sourcefirst { border-bottom: 1px solid #999; font-weight: bold; } +.sourcelast { border-top: 1px solid #999; } +.source a { color: #999; font-size: smaller; font-family: monospace;} +.bottomline { border-bottom: 1px solid #999; } + +.fileline { font-family: monospace; } +.fileline img { border: 0; } + +.tagEntry .closed { color: #99f; } + +/* Changeset entry */ +#changesetEntry { + border-collapse: collapse; + font-size: 90%; + width: 100%; + margin-bottom: 1em; +} + +#changesetEntry th { + padding: 1px 4px; + width: 4em; + text-align: right; + font-weight: normal; + color: #999; + margin-right: .5em; + vertical-align: top; +} + +div.description { + border-left: 2px solid #999; + margin: 1em 0 1em 0; + padding: .3em; +} + +/* Graph */ +div#wrapper { + position: relative; + border-top: 1px solid black; + border-bottom: 1px solid black; + margin: 0; + padding: 0; +} + +canvas { + position: absolute; + z-index: 5; + top: -0.7em; + margin: 0; +} + +ul#graphnodes { + position: absolute; + z-index: 10; + top: -1.0em; + list-style: none inside none; + padding: 0; +} + +ul#nodebgs { + list-style: none inside none; + padding: 0; + margin: 0; + top: -0.7em; +} + +ul#graphnodes li, ul#nodebgs li { + height: 39px; +} + +ul#graphnodes li .info { + display: block; + font-size: 70%; + position: relative; + top: -3px; +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style.css b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style.css new file mode 100644 index 0000000..66bd96d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/static/style.css @@ -0,0 +1,105 @@ +a { text-decoration:none; } +.age { white-space:nowrap; } +.date { white-space:nowrap; } +.indexlinks { white-space:nowrap; } +.parity0 { background-color: #ddd; } +.parity1 { background-color: #eee; } +.lineno { width: 60px; color: #aaa; font-size: smaller; + text-align: right; } +.plusline { color: green; } +.minusline { color: red; } +.atline { color: purple; } +.annotate { font-size: smaller; text-align: right; padding-right: 1em; } +.buttons a { + background-color: #666; + padding: 2pt; + color: white; + font-family: sans; + font-weight: bold; +} +.navigate a { + background-color: #ccc; + padding: 2pt; + font-family: sans; + color: black; +} + +.metatag { + background-color: #888; + color: white; + text-align: right; +} + +/* Common */ +pre { margin: 0; } + +.logo { + float: right; + clear: right; +} + +/* Changelog/Filelog entries */ +.logEntry { width: 100%; } +.logEntry .age { width: 15%; } +.logEntry th { font-weight: normal; text-align: right; vertical-align: top; } +.logEntry th.age, .logEntry th.firstline { font-weight: bold; } +.logEntry th.firstline { text-align: left; width: inherit; } + +/* Shortlog entries */ +.slogEntry { width: 100%; } +.slogEntry .age { width: 8em; } +.slogEntry td { font-weight: normal; text-align: left; vertical-align: top; } +.slogEntry td.author { width: 15em; } + +/* Tag entries */ +#tagEntries { list-style: none; margin: 0; padding: 0; } +#tagEntries .tagEntry { list-style: none; margin: 0; padding: 0; } + +/* Changeset entry */ +#changesetEntry { } +#changesetEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; } +#changesetEntry th.files, #changesetEntry th.description { vertical-align: top; } + +/* File diff view */ +#filediffEntry { } +#filediffEntry th { font-weight: normal; background-color: #888; color: #fff; text-align: right; } + +/* Graph */ +div#wrapper { + position: relative; + margin: 0; + padding: 0; +} + +canvas { + position: absolute; + z-index: 5; + top: -0.6em; + margin: 0; +} + +ul#nodebgs { + list-style: none inside none; + padding: 0; + margin: 0; + top: -0.7em; +} + +ul#graphnodes li, ul#nodebgs li { + height: 39px; +} + +ul#graphnodes { + position: absolute; + z-index: 10; + top: -0.85em; + list-style: none inside none; + padding: 0; +} + +ul#graphnodes li .info { + display: block; + font-size: 70%; + position: relative; + top: -1px; +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/template-vars.txt b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/template-vars.txt new file mode 100644 index 0000000..28f7bc0 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/templates/template-vars.txt @@ -0,0 +1,41 @@ +repo the name of the repo +rev a changeset.manifest revision +node a changeset node +changesets total number of changesets +file a filename +filerev a file revision +filerevs total number of file revisions +up the directory of the relevant file +path a path in the manifest, starting with "/" +basename a short pathname +date a date string +age age in hours, days, etc +line a line of text (escaped) +desc a description (escaped, with breaks) +shortdesc a short description (escaped) +author a name or email addressv(obfuscated) +parent a list of the parent +child a list of the children +tags a list of tag + +header the global page header +footer the global page footer + +files a list of file links +file_copies a list of pairs of name, source filenames +dirs a set of directory links +diff a diff of one or more files +annotate an annotated file +entries the entries relevant to the page + +url base url of hgweb interface +staticurl base url for static resources + + +Templates and commands: + changelog(rev) - a page for browsing changesets + naventry - a link for jumping to a changeset number + filenodelink - jump to file diff + fileellipses - printed after maxfiles + changelogentry - an entry in the log + manifest - browse a manifest as a directory tree diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.py new file mode 100644 index 0000000..5bc2d1f --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.py @@ -0,0 +1,176 @@ +# transaction.py - simple journalling scheme for mercurial +# +# This transaction scheme is intended to gracefully handle program +# errors and interruptions. More serious failures like system crashes +# can be recovered with an fsck-like tool. As the whole repository is +# effectively log-structured, this should amount to simply truncating +# anything that isn't referenced in the changelog. +# +# Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import os, errno +import error + +def active(func): + def _active(self, *args, **kwds): + if self.count == 0: + raise error.Abort(_( + 'cannot use transaction when it is already committed/aborted')) + return func(self, *args, **kwds) + return _active + +def _playback(journal, report, opener, entries, unlink=True): + for f, o, ignore in entries: + if o or not unlink: + try: + opener(f, 'a').truncate(o) + except IOError: + report(_("failed to truncate %s\n") % f) + raise + else: + try: + fn = opener(f).name + os.unlink(fn) + except (IOError, OSError), inst: + if inst.errno != errno.ENOENT: + raise + os.unlink(journal) + +class transaction(object): + def __init__(self, report, opener, journal, after=None, createmode=None): + self.count = 1 + self.usages = 1 + self.report = report + self.opener = opener + self.after = after + self.entries = [] + self.map = {} + self.journal = journal + self._queue = [] + + self.file = open(self.journal, "w") + if createmode is not None: + os.chmod(self.journal, createmode & 0666) + + def __del__(self): + if self.journal: + self._abort() + + @active + def startgroup(self): + self._queue.append([]) + + @active + def endgroup(self): + q = self._queue.pop() + d = ''.join(['%s\0%d\n' % (x[0], x[1]) for x in q]) + self.entries.extend(q) + self.file.write(d) + self.file.flush() + + @active + def add(self, file, offset, data=None): + if file in self.map: + return + if self._queue: + self._queue[-1].append((file, offset, data)) + return + + self.entries.append((file, offset, data)) + self.map[file] = len(self.entries) - 1 + # add enough data to the journal to do the truncate + self.file.write("%s\0%d\n" % (file, offset)) + self.file.flush() + + @active + def find(self, file): + if file in self.map: + return self.entries[self.map[file]] + return None + + @active + def replace(self, file, offset, data=None): + ''' + replace can only replace already committed entries + that are not pending in the queue + ''' + + if file not in self.map: + raise KeyError(file) + index = self.map[file] + self.entries[index] = (file, offset, data) + self.file.write("%s\0%d\n" % (file, offset)) + self.file.flush() + + @active + def nest(self): + self.count += 1 + self.usages += 1 + return self + + def release(self): + if self.count > 0: + self.usages -= 1 + # if the transaction scopes are left without being closed, fail + if self.count > 0 and self.usages == 0: + self._abort() + + def running(self): + return self.count > 0 + + @active + def close(self): + '''commit the transaction''' + self.count -= 1 + if self.count != 0: + return + self.file.close() + self.entries = [] + if self.after: + self.after() + if os.path.isfile(self.journal): + os.unlink(self.journal) + self.journal = None + + @active + def abort(self): + '''abort the transaction (generally called on error, or when the + transaction is not explicitly committed before going out of + scope)''' + self._abort() + + def _abort(self): + self.count = 0 + self.usages = 0 + self.file.close() + + try: + if not self.entries: + if self.journal: + os.unlink(self.journal) + return + + self.report(_("transaction abort!\n")) + + try: + _playback(self.journal, self.report, self.opener, + self.entries, False) + self.report(_("rollback completed\n")) + except: + self.report(_("rollback failed - please run hg recover\n")) + finally: + self.journal = None + + +def rollback(opener, file, report): + entries = [] + + for l in open(file).readlines(): + f, o = l.split('\0') + entries.append((f, int(o), None)) + + _playback(file, report, opener, entries) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.pyo Binary files differnew file mode 100644 index 0000000..bdcb0af --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/transaction.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.py new file mode 100644 index 0000000..cd43fc7 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.py @@ -0,0 +1,626 @@ +# ui.py - user interface bits for mercurial +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import errno, getpass, os, socket, sys, tempfile, traceback +import config, util, error + +class ui(object): + def __init__(self, src=None): + self._buffers = [] + self.quiet = self.verbose = self.debugflag = self.tracebackflag = False + self._reportuntrusted = True + self._ocfg = config.config() # overlay + self._tcfg = config.config() # trusted + self._ucfg = config.config() # untrusted + self._trustusers = set() + self._trustgroups = set() + + if src: + self._tcfg = src._tcfg.copy() + self._ucfg = src._ucfg.copy() + self._ocfg = src._ocfg.copy() + self._trustusers = src._trustusers.copy() + self._trustgroups = src._trustgroups.copy() + self.environ = src.environ + self.fixconfig() + else: + # shared read-only environment + self.environ = os.environ + # we always trust global config files + for f in util.rcpath(): + self.readconfig(f, trust=True) + + def copy(self): + return self.__class__(self) + + def _is_trusted(self, fp, f): + st = util.fstat(fp) + if util.isowner(st): + return True + + tusers, tgroups = self._trustusers, self._trustgroups + if '*' in tusers or '*' in tgroups: + return True + + user = util.username(st.st_uid) + group = util.groupname(st.st_gid) + if user in tusers or group in tgroups or user == util.username(): + return True + + if self._reportuntrusted: + self.warn(_('Not trusting file %s from untrusted ' + 'user %s, group %s\n') % (f, user, group)) + return False + + def readconfig(self, filename, root=None, trust=False, + sections=None, remap=None): + try: + fp = open(filename) + except IOError: + if not sections: # ignore unless we were looking for something + return + raise + + cfg = config.config() + trusted = sections or trust or self._is_trusted(fp, filename) + + try: + cfg.read(filename, fp, sections=sections, remap=remap) + except error.ConfigError, inst: + if trusted: + raise + self.warn(_("Ignored: %s\n") % str(inst)) + + if self.plain(): + for k in ('debug', 'fallbackencoding', 'quiet', 'slash', + 'logtemplate', 'style', + 'traceback', 'verbose'): + if k in cfg['ui']: + del cfg['ui'][k] + for k, v in cfg.items('alias'): + del cfg['alias'][k] + for k, v in cfg.items('defaults'): + del cfg['defaults'][k] + + if trusted: + self._tcfg.update(cfg) + self._tcfg.update(self._ocfg) + self._ucfg.update(cfg) + self._ucfg.update(self._ocfg) + + if root is None: + root = os.path.expanduser('~') + self.fixconfig(root=root) + + def fixconfig(self, root=None, section=None): + if section in (None, 'paths'): + # expand vars and ~ + # translate paths relative to root (or home) into absolute paths + root = root or os.getcwd() + for c in self._tcfg, self._ucfg, self._ocfg: + for n, p in c.items('paths'): + if not p: + continue + if '%%' in p: + self.warn(_("(deprecated '%%' in path %s=%s from %s)\n") + % (n, p, self.configsource('paths', n))) + p = p.replace('%%', '%') + p = util.expandpath(p) + if '://' not in p and not os.path.isabs(p): + p = os.path.normpath(os.path.join(root, p)) + c.set("paths", n, p) + + if section in (None, 'ui'): + # update ui options + self.debugflag = self.configbool('ui', 'debug') + self.verbose = self.debugflag or self.configbool('ui', 'verbose') + self.quiet = not self.debugflag and self.configbool('ui', 'quiet') + if self.verbose and self.quiet: + self.quiet = self.verbose = False + self._reportuntrusted = self.configbool("ui", "report_untrusted", + True) + self.tracebackflag = self.configbool('ui', 'traceback', False) + + if section in (None, 'trusted'): + # update trust information + self._trustusers.update(self.configlist('trusted', 'users')) + self._trustgroups.update(self.configlist('trusted', 'groups')) + + def setconfig(self, section, name, value, overlay=True): + if overlay: + self._ocfg.set(section, name, value) + self._tcfg.set(section, name, value) + self._ucfg.set(section, name, value) + self.fixconfig(section=section) + + def _data(self, untrusted): + return untrusted and self._ucfg or self._tcfg + + def configsource(self, section, name, untrusted=False): + return self._data(untrusted).source(section, name) or 'none' + + def config(self, section, name, default=None, untrusted=False): + value = self._data(untrusted).get(section, name, default) + if self.debugflag and not untrusted and self._reportuntrusted: + uvalue = self._ucfg.get(section, name) + if uvalue is not None and uvalue != value: + self.debug(_("ignoring untrusted configuration option " + "%s.%s = %s\n") % (section, name, uvalue)) + return value + + def configbool(self, section, name, default=False, untrusted=False): + v = self.config(section, name, None, untrusted) + if v is None: + return default + if isinstance(v, bool): + return v + b = util.parsebool(v) + if b is None: + raise error.ConfigError(_("%s.%s not a boolean ('%s')") + % (section, name, v)) + return b + + def configlist(self, section, name, default=None, untrusted=False): + """Return a list of comma/space separated strings""" + + def _parse_plain(parts, s, offset): + whitespace = False + while offset < len(s) and (s[offset].isspace() or s[offset] == ','): + whitespace = True + offset += 1 + if offset >= len(s): + return None, parts, offset + if whitespace: + parts.append('') + if s[offset] == '"' and not parts[-1]: + return _parse_quote, parts, offset + 1 + elif s[offset] == '"' and parts[-1][-1] == '\\': + parts[-1] = parts[-1][:-1] + s[offset] + return _parse_plain, parts, offset + 1 + parts[-1] += s[offset] + return _parse_plain, parts, offset + 1 + + def _parse_quote(parts, s, offset): + if offset < len(s) and s[offset] == '"': # "" + parts.append('') + offset += 1 + while offset < len(s) and (s[offset].isspace() or + s[offset] == ','): + offset += 1 + return _parse_plain, parts, offset + + while offset < len(s) and s[offset] != '"': + if (s[offset] == '\\' and offset + 1 < len(s) + and s[offset + 1] == '"'): + offset += 1 + parts[-1] += '"' + else: + parts[-1] += s[offset] + offset += 1 + + if offset >= len(s): + real_parts = _configlist(parts[-1]) + if not real_parts: + parts[-1] = '"' + else: + real_parts[0] = '"' + real_parts[0] + parts = parts[:-1] + parts.extend(real_parts) + return None, parts, offset + + offset += 1 + while offset < len(s) and s[offset] in [' ', ',']: + offset += 1 + + if offset < len(s): + if offset + 1 == len(s) and s[offset] == '"': + parts[-1] += '"' + offset += 1 + else: + parts.append('') + else: + return None, parts, offset + + return _parse_plain, parts, offset + + def _configlist(s): + s = s.rstrip(' ,') + if not s: + return [] + parser, parts, offset = _parse_plain, [''], 0 + while parser: + parser, parts, offset = parser(parts, s, offset) + return parts + + result = self.config(section, name, untrusted=untrusted) + if result is None: + result = default or [] + if isinstance(result, basestring): + result = _configlist(result.lstrip(' ,\n')) + if result is None: + result = default or [] + return result + + def has_section(self, section, untrusted=False): + '''tell whether section exists in config.''' + return section in self._data(untrusted) + + def configitems(self, section, untrusted=False): + items = self._data(untrusted).items(section) + if self.debugflag and not untrusted and self._reportuntrusted: + for k, v in self._ucfg.items(section): + if self._tcfg.get(section, k) != v: + self.debug(_("ignoring untrusted configuration option " + "%s.%s = %s\n") % (section, k, v)) + return items + + def walkconfig(self, untrusted=False): + cfg = self._data(untrusted) + for section in cfg.sections(): + for name, value in self.configitems(section, untrusted): + yield section, name, str(value).replace('\n', '\\n') + + def plain(self): + '''is plain mode active? + + Plain mode means that all configuration variables which affect the + behavior and output of Mercurial should be ignored. Additionally, the + output should be stable, reproducible and suitable for use in scripts or + applications. + + The only way to trigger plain mode is by setting the `HGPLAIN' + environment variable. + ''' + return 'HGPLAIN' in os.environ + + def username(self): + """Return default username to be used in commits. + + Searched in this order: $HGUSER, [ui] section of hgrcs, $EMAIL + and stop searching if one of these is set. + If not found and ui.askusername is True, ask the user, else use + ($LOGNAME or $USER or $LNAME or $USERNAME) + "@full.hostname". + """ + user = os.environ.get("HGUSER") + if user is None: + user = self.config("ui", "username") + if user is not None: + user = os.path.expandvars(user) + if user is None: + user = os.environ.get("EMAIL") + if user is None and self.configbool("ui", "askusername"): + user = self.prompt(_("enter a commit username:"), default=None) + if user is None and not self.interactive(): + try: + user = '%s@%s' % (util.getuser(), socket.getfqdn()) + self.warn(_("No username found, using '%s' instead\n") % user) + except KeyError: + pass + if not user: + raise util.Abort(_('no username supplied (see "hg help config")')) + if "\n" in user: + raise util.Abort(_("username %s contains a newline\n") % repr(user)) + return user + + def shortuser(self, user): + """Return a short representation of a user name or email address.""" + if not self.verbose: + user = util.shortuser(user) + return user + + def expandpath(self, loc, default=None): + """Return repository location relative to cwd or from [paths]""" + if "://" in loc or os.path.isdir(os.path.join(loc, '.hg')): + return loc + + path = self.config('paths', loc) + if not path and default is not None: + path = self.config('paths', default) + return path or loc + + def pushbuffer(self): + self._buffers.append([]) + + def popbuffer(self, labeled=False): + '''pop the last buffer and return the buffered output + + If labeled is True, any labels associated with buffered + output will be handled. By default, this has no effect + on the output returned, but extensions and GUI tools may + handle this argument and returned styled output. If output + is being buffered so it can be captured and parsed or + processed, labeled should not be set to True. + ''' + return "".join(self._buffers.pop()) + + def write(self, *args, **opts): + '''write args to output + + By default, this method simply writes to the buffer or stdout, + but extensions or GUI tools may override this method, + write_err(), popbuffer(), and label() to style output from + various parts of hg. + + An optional keyword argument, "label", can be passed in. + This should be a string containing label names separated by + space. Label names take the form of "topic.type". For example, + ui.debug() issues a label of "ui.debug". + + When labeling output for a specific command, a label of + "cmdname.type" is recommended. For example, status issues + a label of "status.modified" for modified files. + ''' + if self._buffers: + self._buffers[-1].extend([str(a) for a in args]) + else: + for a in args: + sys.stdout.write(str(a)) + + def write_err(self, *args, **opts): + try: + if not getattr(sys.stdout, 'closed', False): + sys.stdout.flush() + for a in args: + sys.stderr.write(str(a)) + # stderr may be buffered under win32 when redirected to files, + # including stdout. + if not getattr(sys.stderr, 'closed', False): + sys.stderr.flush() + except IOError, inst: + if inst.errno not in (errno.EPIPE, errno.EIO): + raise + + def flush(self): + try: sys.stdout.flush() + except: pass + try: sys.stderr.flush() + except: pass + + def interactive(self): + '''is interactive input allowed? + + An interactive session is a session where input can be reasonably read + from `sys.stdin'. If this function returns false, any attempt to read + from stdin should fail with an error, unless a sensible default has been + specified. + + Interactiveness is triggered by the value of the `ui.interactive' + configuration variable or - if it is unset - when `sys.stdin' points + to a terminal device. + + This function refers to input only; for output, see `ui.formatted()'. + ''' + i = self.configbool("ui", "interactive", None) + if i is None: + try: + return sys.stdin.isatty() + except AttributeError: + # some environments replace stdin without implementing isatty + # usually those are non-interactive + return False + + return i + + def termwidth(self): + '''how wide is the terminal in columns? + ''' + if 'COLUMNS' in os.environ: + try: + return int(os.environ['COLUMNS']) + except ValueError: + pass + return util.termwidth() + + def formatted(self): + '''should formatted output be used? + + It is often desirable to format the output to suite the output medium. + Examples of this are truncating long lines or colorizing messages. + However, this is not often not desirable when piping output into other + utilities, e.g. `grep'. + + Formatted output is triggered by the value of the `ui.formatted' + configuration variable or - if it is unset - when `sys.stdout' points + to a terminal device. Please note that `ui.formatted' should be + considered an implementation detail; it is not intended for use outside + Mercurial or its extensions. + + This function refers to output only; for input, see `ui.interactive()'. + This function always returns false when in plain mode, see `ui.plain()'. + ''' + if self.plain(): + return False + + i = self.configbool("ui", "formatted", None) + if i is None: + try: + return sys.stdout.isatty() + except AttributeError: + # some environments replace stdout without implementing isatty + # usually those are non-interactive + return False + + return i + + def _readline(self, prompt=''): + if sys.stdin.isatty(): + try: + # magically add command line editing support, where + # available + import readline + # force demandimport to really load the module + readline.read_history_file + # windows sometimes raises something other than ImportError + except Exception: + pass + line = raw_input(prompt) + # When stdin is in binary mode on Windows, it can cause + # raw_input() to emit an extra trailing carriage return + if os.linesep == '\r\n' and line and line[-1] == '\r': + line = line[:-1] + return line + + def prompt(self, msg, default="y"): + """Prompt user with msg, read response. + If ui is not interactive, the default is returned. + """ + if not self.interactive(): + self.write(msg, ' ', default, "\n") + return default + try: + r = self._readline(msg + ' ') + if not r: + return default + return r + except EOFError: + raise util.Abort(_('response expected')) + + def promptchoice(self, msg, choices, default=0): + """Prompt user with msg, read response, and ensure it matches + one of the provided choices. The index of the choice is returned. + choices is a sequence of acceptable responses with the format: + ('&None', 'E&xec', 'Sym&link') Responses are case insensitive. + If ui is not interactive, the default is returned. + """ + resps = [s[s.index('&')+1].lower() for s in choices] + while True: + r = self.prompt(msg, resps[default]) + if r.lower() in resps: + return resps.index(r.lower()) + self.write(_("unrecognized response\n")) + + def getpass(self, prompt=None, default=None): + if not self.interactive(): + return default + try: + return getpass.getpass(prompt or _('password: ')) + except EOFError: + raise util.Abort(_('response expected')) + def status(self, *msg, **opts): + '''write status message to output (if ui.quiet is False) + + This adds an output label of "ui.status". + ''' + if not self.quiet: + opts['label'] = opts.get('label', '') + ' ui.status' + self.write(*msg, **opts) + def warn(self, *msg, **opts): + '''write warning message to output (stderr) + + This adds an output label of "ui.warning". + ''' + opts['label'] = opts.get('label', '') + ' ui.warning' + self.write_err(*msg, **opts) + def note(self, *msg, **opts): + '''write note to output (if ui.verbose is True) + + This adds an output label of "ui.note". + ''' + if self.verbose: + opts['label'] = opts.get('label', '') + ' ui.note' + self.write(*msg, **opts) + def debug(self, *msg, **opts): + '''write debug message to output (if ui.debugflag is True) + + This adds an output label of "ui.debug". + ''' + if self.debugflag: + opts['label'] = opts.get('label', '') + ' ui.debug' + self.write(*msg, **opts) + def edit(self, text, user): + (fd, name) = tempfile.mkstemp(prefix="hg-editor-", suffix=".txt", + text=True) + try: + f = os.fdopen(fd, "w") + f.write(text) + f.close() + + editor = self.geteditor() + + util.system("%s \"%s\"" % (editor, name), + environ={'HGUSER': user}, + onerr=util.Abort, errprefix=_("edit failed")) + + f = open(name) + t = f.read() + f.close() + finally: + os.unlink(name) + + return t + + def traceback(self, exc=None): + '''print exception traceback if traceback printing enabled. + only to call in exception handler. returns true if traceback + printed.''' + if self.tracebackflag: + if exc: + traceback.print_exception(exc[0], exc[1], exc[2]) + else: + traceback.print_exc() + return self.tracebackflag + + def geteditor(self): + '''return editor to use''' + return (os.environ.get("HGEDITOR") or + self.config("ui", "editor") or + os.environ.get("VISUAL") or + os.environ.get("EDITOR", "vi")) + + def progress(self, topic, pos, item="", unit="", total=None): + '''show a progress message + + With stock hg, this is simply a debug message that is hidden + by default, but with extensions or GUI tools it may be + visible. 'topic' is the current operation, 'item' is a + non-numeric marker of the current position (ie the currently + in-process file), 'pos' is the current numeric position (ie + revision, bytes, etc.), unit is a corresponding unit label, + and total is the highest expected pos. + + Multiple nested topics may be active at a time. + + All topics should be marked closed by setting pos to None at + termination. + ''' + + if pos == None or not self.debugflag: + return + + if unit: + unit = ' ' + unit + if item: + item = ' ' + item + + if total: + pct = 100.0 * pos / total + self.debug('%s:%s %s/%s%s (%4.2f%%)\n' + % (topic, item, pos, total, unit, pct)) + else: + self.debug('%s:%s %s%s\n' % (topic, item, pos, unit)) + + def log(self, service, message): + '''hook for logging facility extensions + + service should be a readily-identifiable subsystem, which will + allow filtering. + message should be a newline-terminated string to log. + ''' + pass + + def label(self, msg, label): + '''style msg based on supplied label + + Like ui.write(), this just returns msg unchanged, but extensions + and GUI tools can override it to allow styling output without + writing it. + + ui.write(s, 'label') is equivalent to + ui.write(ui.label(s, 'label')). + ''' + return msg diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.pyo Binary files differnew file mode 100644 index 0000000..6129008 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/ui.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.py new file mode 100644 index 0000000..cd5f2dc --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.py @@ -0,0 +1,701 @@ +# url.py - HTTP handling for mercurial +# +# Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com> +# Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import urllib, urllib2, urlparse, httplib, os, re, socket, cStringIO +import __builtin__ +from i18n import _ +import keepalive, util + +def _urlunparse(scheme, netloc, path, params, query, fragment, url): + '''Handle cases where urlunparse(urlparse(x://)) doesn't preserve the "//"''' + result = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) + if (scheme and + result.startswith(scheme + ':') and + not result.startswith(scheme + '://') and + url.startswith(scheme + '://') + ): + result = scheme + '://' + result[len(scheme + ':'):] + return result + +def hidepassword(url): + '''hide user credential in a url string''' + scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) + netloc = re.sub('([^:]*):([^@]*)@(.*)', r'\1:***@\3', netloc) + return _urlunparse(scheme, netloc, path, params, query, fragment, url) + +def removeauth(url): + '''remove all authentication information from a url string''' + scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) + netloc = netloc[netloc.find('@')+1:] + return _urlunparse(scheme, netloc, path, params, query, fragment, url) + +def netlocsplit(netloc): + '''split [user[:passwd]@]host[:port] into 4-tuple.''' + + a = netloc.find('@') + if a == -1: + user, passwd = None, None + else: + userpass, netloc = netloc[:a], netloc[a + 1:] + c = userpass.find(':') + if c == -1: + user, passwd = urllib.unquote(userpass), None + else: + user = urllib.unquote(userpass[:c]) + passwd = urllib.unquote(userpass[c + 1:]) + c = netloc.find(':') + if c == -1: + host, port = netloc, None + else: + host, port = netloc[:c], netloc[c + 1:] + return host, port, user, passwd + +def netlocunsplit(host, port, user=None, passwd=None): + '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' + if port: + hostport = host + ':' + port + else: + hostport = host + if user: + quote = lambda s: urllib.quote(s, safe='') + if passwd: + userpass = quote(user) + ':' + quote(passwd) + else: + userpass = quote(user) + return userpass + '@' + hostport + return hostport + +_safe = ('abcdefghijklmnopqrstuvwxyz' + 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' + '0123456789' '_.-/') +_safeset = None +_hex = None +def quotepath(path): + '''quote the path part of a URL + + This is similar to urllib.quote, but it also tries to avoid + quoting things twice (inspired by wget): + + >>> quotepath('abc def') + 'abc%20def' + >>> quotepath('abc%20def') + 'abc%20def' + >>> quotepath('abc%20 def') + 'abc%20%20def' + >>> quotepath('abc def%20') + 'abc%20def%20' + >>> quotepath('abc def%2') + 'abc%20def%252' + >>> quotepath('abc def%') + 'abc%20def%25' + ''' + global _safeset, _hex + if _safeset is None: + _safeset = set(_safe) + _hex = set('abcdefABCDEF0123456789') + l = list(path) + for i in xrange(len(l)): + c = l[i] + if (c == '%' and i + 2 < len(l) and + l[i + 1] in _hex and l[i + 2] in _hex): + pass + elif c not in _safeset: + l[i] = '%%%02X' % ord(c) + return ''.join(l) + +class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): + def __init__(self, ui): + urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) + self.ui = ui + + def find_user_password(self, realm, authuri): + authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( + self, realm, authuri) + user, passwd = authinfo + if user and passwd: + self._writedebug(user, passwd) + return (user, passwd) + + if not user: + auth = self.readauthtoken(authuri) + if auth: + user, passwd = auth.get('username'), auth.get('password') + if not user or not passwd: + if not self.ui.interactive(): + raise util.Abort(_('http authorization required')) + + self.ui.write(_("http authorization required\n")) + self.ui.write(_("realm: %s\n") % realm) + if user: + self.ui.write(_("user: %s\n") % user) + else: + user = self.ui.prompt(_("user:"), default=None) + + if not passwd: + passwd = self.ui.getpass() + + self.add_password(realm, authuri, user, passwd) + self._writedebug(user, passwd) + return (user, passwd) + + def _writedebug(self, user, passwd): + msg = _('http auth: user %s, password %s\n') + self.ui.debug(msg % (user, passwd and '*' * len(passwd) or 'not set')) + + def readauthtoken(self, uri): + # Read configuration + config = dict() + for key, val in self.ui.configitems('auth'): + if '.' not in key: + self.ui.warn(_("ignoring invalid [auth] key '%s'\n") % key) + continue + group, setting = key.split('.', 1) + gdict = config.setdefault(group, dict()) + if setting in ('username', 'cert', 'key'): + val = util.expandpath(val) + gdict[setting] = val + + # Find the best match + scheme, hostpath = uri.split('://', 1) + bestlen = 0 + bestauth = None + for auth in config.itervalues(): + prefix = auth.get('prefix') + if not prefix: + continue + p = prefix.split('://', 1) + if len(p) > 1: + schemes, prefix = [p[0]], p[1] + else: + schemes = (auth.get('schemes') or 'https').split() + if (prefix == '*' or hostpath.startswith(prefix)) and \ + len(prefix) > bestlen and scheme in schemes: + bestlen = len(prefix) + bestauth = auth + return bestauth + +class proxyhandler(urllib2.ProxyHandler): + def __init__(self, ui): + proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') + # XXX proxyauthinfo = None + + if proxyurl: + # proxy can be proper url or host[:port] + if not (proxyurl.startswith('http:') or + proxyurl.startswith('https:')): + proxyurl = 'http://' + proxyurl + '/' + snpqf = urlparse.urlsplit(proxyurl) + proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf + hpup = netlocsplit(proxynetloc) + + proxyhost, proxyport, proxyuser, proxypasswd = hpup + if not proxyuser: + proxyuser = ui.config("http_proxy", "user") + proxypasswd = ui.config("http_proxy", "passwd") + + # see if we should use a proxy for this url + no_list = ["localhost", "127.0.0.1"] + no_list.extend([p.lower() for + p in ui.configlist("http_proxy", "no")]) + no_list.extend([p.strip().lower() for + p in os.getenv("no_proxy", '').split(',') + if p.strip()]) + # "http_proxy.always" config is for running tests on localhost + if ui.configbool("http_proxy", "always"): + self.no_list = [] + else: + self.no_list = no_list + + proxyurl = urlparse.urlunsplit(( + proxyscheme, netlocunsplit(proxyhost, proxyport, + proxyuser, proxypasswd or ''), + proxypath, proxyquery, proxyfrag)) + proxies = {'http': proxyurl, 'https': proxyurl} + ui.debug('proxying through http://%s:%s\n' % + (proxyhost, proxyport)) + else: + proxies = {} + + # urllib2 takes proxy values from the environment and those + # will take precedence if found, so drop them + for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: + try: + if env in os.environ: + del os.environ[env] + except OSError: + pass + + urllib2.ProxyHandler.__init__(self, proxies) + self.ui = ui + + def proxy_open(self, req, proxy, type_): + host = req.get_host().split(':')[0] + if host in self.no_list: + return None + + # work around a bug in Python < 2.4.2 + # (it leaves a "\n" at the end of Proxy-authorization headers) + baseclass = req.__class__ + class _request(baseclass): + def add_header(self, key, val): + if key.lower() == 'proxy-authorization': + val = val.strip() + return baseclass.add_header(self, key, val) + req.__class__ = _request + + return urllib2.ProxyHandler.proxy_open(self, req, proxy, type_) + +class httpsendfile(object): + """This is a wrapper around the objects returned by python's "open". + + Its purpose is to send file-like objects via HTTP and, to do so, it + defines a __len__ attribute to feed the Content-Length header. + """ + + def __init__(self, *args, **kwargs): + # We can't just "self._data = open(*args, **kwargs)" here because there + # is an "open" function defined in this module that shadows the global + # one + self._data = __builtin__.open(*args, **kwargs) + self.read = self._data.read + self.seek = self._data.seek + self.close = self._data.close + self.write = self._data.write + + def __len__(self): + return os.fstat(self._data.fileno()).st_size + +def _gen_sendfile(connection): + def _sendfile(self, data): + # send a file + if isinstance(data, httpsendfile): + # if auth required, some data sent twice, so rewind here + data.seek(0) + for chunk in util.filechunkiter(data): + connection.send(self, chunk) + else: + connection.send(self, data) + return _sendfile + +has_https = hasattr(urllib2, 'HTTPSHandler') +if has_https: + try: + # avoid using deprecated/broken FakeSocket in python 2.6 + import ssl + _ssl_wrap_socket = ssl.wrap_socket + CERT_REQUIRED = ssl.CERT_REQUIRED + except ImportError: + CERT_REQUIRED = 2 + + def _ssl_wrap_socket(sock, key_file, cert_file, + cert_reqs=CERT_REQUIRED, ca_certs=None): + if ca_certs: + raise util.Abort(_( + 'certificate checking requires Python 2.6')) + + ssl = socket.ssl(sock, key_file, cert_file) + return httplib.FakeSocket(sock, ssl) + + try: + _create_connection = socket.create_connection + except AttributeError: + _GLOBAL_DEFAULT_TIMEOUT = object() + + def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + # lifted from Python 2.6 + + msg = "getaddrinfo returns an empty list" + host, port = address + for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket.socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + return sock + + except socket.error, msg: + if sock is not None: + sock.close() + + raise socket.error, msg + +class httpconnection(keepalive.HTTPConnection): + # must be able to send big bundle as stream. + send = _gen_sendfile(keepalive.HTTPConnection) + + def connect(self): + if has_https and self.realhostport: # use CONNECT proxy + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((self.host, self.port)) + if _generic_proxytunnel(self): + # we do not support client x509 certificates + self.sock = _ssl_wrap_socket(self.sock, None, None) + else: + keepalive.HTTPConnection.connect(self) + + def getresponse(self): + proxyres = getattr(self, 'proxyres', None) + if proxyres: + if proxyres.will_close: + self.close() + self.proxyres = None + return proxyres + return keepalive.HTTPConnection.getresponse(self) + +# general transaction handler to support different ways to handle +# HTTPS proxying before and after Python 2.6.3. +def _generic_start_transaction(handler, h, req): + if hasattr(req, '_tunnel_host') and req._tunnel_host: + tunnel_host = req._tunnel_host + if tunnel_host[:7] not in ['http://', 'https:/']: + tunnel_host = 'https://' + tunnel_host + new_tunnel = True + else: + tunnel_host = req.get_selector() + new_tunnel = False + + if new_tunnel or tunnel_host == req.get_full_url(): # has proxy + urlparts = urlparse.urlparse(tunnel_host) + if new_tunnel or urlparts[0] == 'https': # only use CONNECT for HTTPS + realhostport = urlparts[1] + if realhostport[-1] == ']' or ':' not in realhostport: + realhostport += ':443' + + h.realhostport = realhostport + h.headers = req.headers.copy() + h.headers.update(handler.parent.addheaders) + return + + h.realhostport = None + h.headers = None + +def _generic_proxytunnel(self): + proxyheaders = dict( + [(x, self.headers[x]) for x in self.headers + if x.lower().startswith('proxy-')]) + self._set_hostport(self.host, self.port) + self.send('CONNECT %s HTTP/1.0\r\n' % self.realhostport) + for header in proxyheaders.iteritems(): + self.send('%s: %s\r\n' % header) + self.send('\r\n') + + # majority of the following code is duplicated from + # httplib.HTTPConnection as there are no adequate places to + # override functions to provide the needed functionality + res = self.response_class(self.sock, + strict=self.strict, + method=self._method) + + while True: + version, status, reason = res._read_status() + if status != httplib.CONTINUE: + break + while True: + skip = res.fp.readline().strip() + if not skip: + break + res.status = status + res.reason = reason.strip() + + if res.status == 200: + while True: + line = res.fp.readline() + if line == '\r\n': + break + return True + + if version == 'HTTP/1.0': + res.version = 10 + elif version.startswith('HTTP/1.'): + res.version = 11 + elif version == 'HTTP/0.9': + res.version = 9 + else: + raise httplib.UnknownProtocol(version) + + if res.version == 9: + res.length = None + res.chunked = 0 + res.will_close = 1 + res.msg = httplib.HTTPMessage(cStringIO.StringIO()) + return False + + res.msg = httplib.HTTPMessage(res.fp) + res.msg.fp = None + + # are we using the chunked-style of transfer encoding? + trenc = res.msg.getheader('transfer-encoding') + if trenc and trenc.lower() == "chunked": + res.chunked = 1 + res.chunk_left = None + else: + res.chunked = 0 + + # will the connection close at the end of the response? + res.will_close = res._check_close() + + # do we have a Content-Length? + # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked" + length = res.msg.getheader('content-length') + if length and not res.chunked: + try: + res.length = int(length) + except ValueError: + res.length = None + else: + if res.length < 0: # ignore nonsensical negative lengths + res.length = None + else: + res.length = None + + # does the body have a fixed length? (of zero) + if (status == httplib.NO_CONTENT or status == httplib.NOT_MODIFIED or + 100 <= status < 200 or # 1xx codes + res._method == 'HEAD'): + res.length = 0 + + # if the connection remains open, and we aren't using chunked, and + # a content-length was not provided, then assume that the connection + # WILL close. + if (not res.will_close and + not res.chunked and + res.length is None): + res.will_close = 1 + + self.proxyres = res + + return False + +class httphandler(keepalive.HTTPHandler): + def http_open(self, req): + return self.do_open(httpconnection, req) + + def _start_transaction(self, h, req): + _generic_start_transaction(self, h, req) + return keepalive.HTTPHandler._start_transaction(self, h, req) + +def _verifycert(cert, hostname): + '''Verify that cert (in socket.getpeercert() format) matches hostname. + CRLs and subjectAltName are not handled. + + Returns error message if any problems are found and None on success. + ''' + if not cert: + return _('no certificate received') + dnsname = hostname.lower() + for s in cert.get('subject', []): + key, value = s[0] + if key == 'commonName': + certname = value.lower() + if (certname == dnsname or + '.' in dnsname and certname == '*.' + dnsname.split('.', 1)[1]): + return None + return _('certificate is for %s') % certname + return _('no commonName found in certificate') + +if has_https: + class BetterHTTPS(httplib.HTTPSConnection): + send = keepalive.safesend + + def connect(self): + if hasattr(self, 'ui'): + cacerts = self.ui.config('web', 'cacerts') + else: + cacerts = None + + if cacerts: + sock = _create_connection((self.host, self.port)) + self.sock = _ssl_wrap_socket(sock, self.key_file, + self.cert_file, cert_reqs=CERT_REQUIRED, + ca_certs=cacerts) + msg = _verifycert(self.sock.getpeercert(), self.host) + if msg: + raise util.Abort(_('%s certificate error: %s') % + (self.host, msg)) + self.ui.debug('%s certificate successfully verified\n' % + self.host) + else: + self.ui.warn(_("warning: %s certificate not verified " + "(check web.cacerts config setting)\n") % + self.host) + httplib.HTTPSConnection.connect(self) + + class httpsconnection(BetterHTTPS): + response_class = keepalive.HTTPResponse + # must be able to send big bundle as stream. + send = _gen_sendfile(BetterHTTPS) + getresponse = keepalive.wrapgetresponse(httplib.HTTPSConnection) + + def connect(self): + if self.realhostport: # use CONNECT proxy + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((self.host, self.port)) + if _generic_proxytunnel(self): + self.sock = _ssl_wrap_socket(self.sock, self.key_file, + self.cert_file) + else: + BetterHTTPS.connect(self) + + class httpshandler(keepalive.KeepAliveHandler, urllib2.HTTPSHandler): + def __init__(self, ui): + keepalive.KeepAliveHandler.__init__(self) + urllib2.HTTPSHandler.__init__(self) + self.ui = ui + self.pwmgr = passwordmgr(self.ui) + + def _start_transaction(self, h, req): + _generic_start_transaction(self, h, req) + return keepalive.KeepAliveHandler._start_transaction(self, h, req) + + def https_open(self, req): + self.auth = self.pwmgr.readauthtoken(req.get_full_url()) + return self.do_open(self._makeconnection, req) + + def _makeconnection(self, host, port=None, *args, **kwargs): + keyfile = None + certfile = None + + if len(args) >= 1: # key_file + keyfile = args[0] + if len(args) >= 2: # cert_file + certfile = args[1] + args = args[2:] + + # if the user has specified different key/cert files in + # hgrc, we prefer these + if self.auth and 'key' in self.auth and 'cert' in self.auth: + keyfile = self.auth['key'] + certfile = self.auth['cert'] + + conn = httpsconnection(host, port, keyfile, certfile, *args, **kwargs) + conn.ui = self.ui + return conn + +class httpdigestauthhandler(urllib2.HTTPDigestAuthHandler): + def __init__(self, *args, **kwargs): + urllib2.HTTPDigestAuthHandler.__init__(self, *args, **kwargs) + self.retried_req = None + + def reset_retry_count(self): + # Python 2.6.5 will call this on 401 or 407 errors and thus loop + # forever. We disable reset_retry_count completely and reset in + # http_error_auth_reqed instead. + pass + + def http_error_auth_reqed(self, auth_header, host, req, headers): + # Reset the retry counter once for each request. + if req is not self.retried_req: + self.retried_req = req + self.retried = 0 + # In python < 2.5 AbstractDigestAuthHandler raises a ValueError if + # it doesn't know about the auth type requested. This can happen if + # somebody is using BasicAuth and types a bad password. + try: + return urllib2.HTTPDigestAuthHandler.http_error_auth_reqed( + self, auth_header, host, req, headers) + except ValueError, inst: + arg = inst.args[0] + if arg.startswith("AbstractDigestAuthHandler doesn't know "): + return + raise + +class httpbasicauthhandler(urllib2.HTTPBasicAuthHandler): + def __init__(self, *args, **kwargs): + urllib2.HTTPBasicAuthHandler.__init__(self, *args, **kwargs) + self.retried_req = None + + def reset_retry_count(self): + # Python 2.6.5 will call this on 401 or 407 errors and thus loop + # forever. We disable reset_retry_count completely and reset in + # http_error_auth_reqed instead. + pass + + def http_error_auth_reqed(self, auth_header, host, req, headers): + # Reset the retry counter once for each request. + if req is not self.retried_req: + self.retried_req = req + self.retried = 0 + return urllib2.HTTPBasicAuthHandler.http_error_auth_reqed( + self, auth_header, host, req, headers) + +def getauthinfo(path): + scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) + if not urlpath: + urlpath = '/' + if scheme != 'file': + # XXX: why are we quoting the path again with some smart + # heuristic here? Anyway, it cannot be done with file:// + # urls since path encoding is os/fs dependent (see + # urllib.pathname2url() for details). + urlpath = quotepath(urlpath) + host, port, user, passwd = netlocsplit(netloc) + + # urllib cannot handle URLs with embedded user or passwd + url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), + urlpath, query, frag)) + if user: + netloc = host + if port: + netloc += ':' + port + # Python < 2.4.3 uses only the netloc to search for a password + authinfo = (None, (url, netloc), user, passwd or '') + else: + authinfo = None + return url, authinfo + +handlerfuncs = [] + +def opener(ui, authinfo=None): + ''' + construct an opener suitable for urllib2 + authinfo will be added to the password manager + ''' + handlers = [httphandler()] + if has_https: + handlers.append(httpshandler(ui)) + + handlers.append(proxyhandler(ui)) + + passmgr = passwordmgr(ui) + if authinfo is not None: + passmgr.add_password(*authinfo) + user, passwd = authinfo[2:4] + ui.debug('http auth: user %s, password %s\n' % + (user, passwd and '*' * len(passwd) or 'not set')) + + handlers.extend((httpbasicauthhandler(passmgr), + httpdigestauthhandler(passmgr))) + handlers.extend([h(ui, passmgr) for h in handlerfuncs]) + opener = urllib2.build_opener(*handlers) + + # 1.0 here is the _protocol_ version + opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] + opener.addheaders.append(('Accept', 'application/mercurial-0.1')) + return opener + +scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') + +def open(ui, url, data=None): + scheme = None + m = scheme_re.search(url) + if m: + scheme = m.group(1).lower() + if not scheme: + path = util.normpath(os.path.abspath(url)) + url = 'file://' + urllib.pathname2url(path) + authinfo = None + else: + url, authinfo = getauthinfo(url) + return opener(ui, authinfo).open(url, data) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.pyo Binary files differnew file mode 100644 index 0000000..c6e73dd --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/url.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.py new file mode 100644 index 0000000..d435108 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.py @@ -0,0 +1,1538 @@ +# util.py - Mercurial utility functions and platform specfic implementations +# +# Copyright 2005 K. Thananchayan <thananck@yahoo.com> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Mercurial utility functions and platform specfic implementations. + +This contains helper routines that are independent of the SCM core and +hide platform-specific details from the core. +""" + +from i18n import _ +import error, osutil, encoding +import errno, re, shutil, sys, tempfile, traceback +import os, stat, time, calendar, textwrap, unicodedata, signal +import imp, socket + +# Python compatibility + +def sha1(s): + return _fastsha1(s) + +def _fastsha1(s): + # This function will import sha1 from hashlib or sha (whichever is + # available) and overwrite itself with it on the first call. + # Subsequent calls will go directly to the imported function. + if sys.version_info >= (2, 5): + from hashlib import sha1 as _sha1 + else: + from sha import sha as _sha1 + global _fastsha1, sha1 + _fastsha1 = sha1 = _sha1 + return _sha1(s) + +import __builtin__ + +if sys.version_info[0] < 3: + def fakebuffer(sliceable, offset=0): + return sliceable[offset:] +else: + def fakebuffer(sliceable, offset=0): + return memoryview(sliceable)[offset:] +try: + buffer +except NameError: + __builtin__.buffer = fakebuffer + +import subprocess +closefds = os.name == 'posix' + +def popen2(cmd, env=None, newlines=False): + # Setting bufsize to -1 lets the system decide the buffer size. + # The default for bufsize is 0, meaning unbuffered. This leads to + # poor performance on Mac OS X: http://bugs.python.org/issue4194 + p = subprocess.Popen(cmd, shell=True, bufsize=-1, + close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + universal_newlines=newlines, + env=env) + return p.stdin, p.stdout + +def popen3(cmd, env=None, newlines=False): + p = subprocess.Popen(cmd, shell=True, bufsize=-1, + close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=newlines, + env=env) + return p.stdin, p.stdout, p.stderr + +def version(): + """Return version information if available.""" + try: + import __version__ + return __version__.version + except ImportError: + return 'unknown' + +# used by parsedate +defaultdateformats = ( + '%Y-%m-%d %H:%M:%S', + '%Y-%m-%d %I:%M:%S%p', + '%Y-%m-%d %H:%M', + '%Y-%m-%d %I:%M%p', + '%Y-%m-%d', + '%m-%d', + '%m/%d', + '%m/%d/%y', + '%m/%d/%Y', + '%a %b %d %H:%M:%S %Y', + '%a %b %d %I:%M:%S%p %Y', + '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822" + '%b %d %H:%M:%S %Y', + '%b %d %I:%M:%S%p %Y', + '%b %d %H:%M:%S', + '%b %d %I:%M:%S%p', + '%b %d %H:%M', + '%b %d %I:%M%p', + '%b %d %Y', + '%b %d', + '%H:%M:%S', + '%I:%M:%S%p', + '%H:%M', + '%I:%M%p', +) + +extendeddateformats = defaultdateformats + ( + "%Y", + "%Y-%m", + "%b", + "%b %Y", + ) + +def cachefunc(func): + '''cache the result of function calls''' + # XXX doesn't handle keywords args + cache = {} + if func.func_code.co_argcount == 1: + # we gain a small amount of time because + # we don't need to pack/unpack the list + def f(arg): + if arg not in cache: + cache[arg] = func(arg) + return cache[arg] + else: + def f(*args): + if args not in cache: + cache[args] = func(*args) + return cache[args] + + return f + +def lrucachefunc(func): + '''cache most recent results of function calls''' + cache = {} + order = [] + if func.func_code.co_argcount == 1: + def f(arg): + if arg not in cache: + if len(cache) > 20: + del cache[order.pop(0)] + cache[arg] = func(arg) + else: + order.remove(arg) + order.append(arg) + return cache[arg] + else: + def f(*args): + if args not in cache: + if len(cache) > 20: + del cache[order.pop(0)] + cache[args] = func(*args) + else: + order.remove(args) + order.append(args) + return cache[args] + + return f + +class propertycache(object): + def __init__(self, func): + self.func = func + self.name = func.__name__ + def __get__(self, obj, type=None): + result = self.func(obj) + setattr(obj, self.name, result) + return result + +def pipefilter(s, cmd): + '''filter string S through command CMD, returning its output''' + p = subprocess.Popen(cmd, shell=True, close_fds=closefds, + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + pout, perr = p.communicate(s) + return pout + +def tempfilter(s, cmd): + '''filter string S through a pair of temporary files with CMD. + CMD is used as a template to create the real command to be run, + with the strings INFILE and OUTFILE replaced by the real names of + the temporary files generated.''' + inname, outname = None, None + try: + infd, inname = tempfile.mkstemp(prefix='hg-filter-in-') + fp = os.fdopen(infd, 'wb') + fp.write(s) + fp.close() + outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-') + os.close(outfd) + cmd = cmd.replace('INFILE', inname) + cmd = cmd.replace('OUTFILE', outname) + code = os.system(cmd) + if sys.platform == 'OpenVMS' and code & 1: + code = 0 + if code: + raise Abort(_("command '%s' failed: %s") % + (cmd, explain_exit(code))) + return open(outname, 'rb').read() + finally: + try: + if inname: + os.unlink(inname) + except: + pass + try: + if outname: + os.unlink(outname) + except: + pass + +filtertable = { + 'tempfile:': tempfilter, + 'pipe:': pipefilter, + } + +def filter(s, cmd): + "filter a string through a command that transforms its input to its output" + for name, fn in filtertable.iteritems(): + if cmd.startswith(name): + return fn(s, cmd[len(name):].lstrip()) + return pipefilter(s, cmd) + +def binary(s): + """return true if a string is binary data""" + return bool(s and '\0' in s) + +def increasingchunks(source, min=1024, max=65536): + '''return no less than min bytes per chunk while data remains, + doubling min after each chunk until it reaches max''' + def log2(x): + if not x: + return 0 + i = 0 + while x: + x >>= 1 + i += 1 + return i - 1 + + buf = [] + blen = 0 + for chunk in source: + buf.append(chunk) + blen += len(chunk) + if blen >= min: + if min < max: + min = min << 1 + nmin = 1 << log2(blen) + if nmin > min: + min = nmin + if min > max: + min = max + yield ''.join(buf) + blen = 0 + buf = [] + if buf: + yield ''.join(buf) + +Abort = error.Abort + +def always(fn): + return True + +def never(fn): + return False + +def pathto(root, n1, n2): + '''return the relative path from one place to another. + root should use os.sep to separate directories + n1 should use os.sep to separate directories + n2 should use "/" to separate directories + returns an os.sep-separated path. + + If n1 is a relative path, it's assumed it's + relative to root. + n2 should always be relative to root. + ''' + if not n1: + return localpath(n2) + if os.path.isabs(n1): + if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]: + return os.path.join(root, localpath(n2)) + n2 = '/'.join((pconvert(root), n2)) + a, b = splitpath(n1), n2.split('/') + a.reverse() + b.reverse() + while a and b and a[-1] == b[-1]: + a.pop() + b.pop() + b.reverse() + return os.sep.join((['..'] * len(a)) + b) or '.' + +def canonpath(root, cwd, myname, auditor=None): + """return the canonical path of myname, given cwd and root""" + if endswithsep(root): + rootsep = root + else: + rootsep = root + os.sep + name = myname + if not os.path.isabs(name): + name = os.path.join(root, cwd, name) + name = os.path.normpath(name) + if auditor is None: + auditor = path_auditor(root) + if name != rootsep and name.startswith(rootsep): + name = name[len(rootsep):] + auditor(name) + return pconvert(name) + elif name == root: + return '' + else: + # Determine whether `name' is in the hierarchy at or beneath `root', + # by iterating name=dirname(name) until that causes no change (can't + # check name == '/', because that doesn't work on windows). For each + # `name', compare dev/inode numbers. If they match, the list `rel' + # holds the reversed list of components making up the relative file + # name we want. + root_st = os.stat(root) + rel = [] + while True: + try: + name_st = os.stat(name) + except OSError: + break + if samestat(name_st, root_st): + if not rel: + # name was actually the same as root (maybe a symlink) + return '' + rel.reverse() + name = os.path.join(*rel) + auditor(name) + return pconvert(name) + dirname, basename = os.path.split(name) + rel.append(basename) + if dirname == name: + break + name = dirname + + raise Abort('%s not under root' % myname) + +_hgexecutable = None + +def main_is_frozen(): + """return True if we are a frozen executable. + + The code supports py2exe (most common, Windows only) and tools/freeze + (portable, not much used). + """ + return (hasattr(sys, "frozen") or # new py2exe + hasattr(sys, "importers") or # old py2exe + imp.is_frozen("__main__")) # tools/freeze + +def hgexecutable(): + """return location of the 'hg' executable. + + Defaults to $HG or 'hg' in the search path. + """ + if _hgexecutable is None: + hg = os.environ.get('HG') + if hg: + set_hgexecutable(hg) + elif main_is_frozen(): + set_hgexecutable(sys.executable) + else: + exe = find_exe('hg') or os.path.basename(sys.argv[0]) + set_hgexecutable(exe) + return _hgexecutable + +def set_hgexecutable(path): + """set location of the 'hg' executable""" + global _hgexecutable + _hgexecutable = path + +def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None): + '''enhanced shell command execution. + run with environment maybe modified, maybe in different dir. + + if command fails and onerr is None, return status. if ui object, + print error message and return status, else raise onerr object as + exception. + + if out is specified, it is assumed to be a file-like object that has a + write() method. stdout and stderr will be redirected to out.''' + def py2shell(val): + 'convert python object into string that is useful to shell' + if val is None or val is False: + return '0' + if val is True: + return '1' + return str(val) + origcmd = cmd + cmd = quotecommand(cmd) + env = dict(os.environ) + env.update((k, py2shell(v)) for k, v in environ.iteritems()) + env['HG'] = hgexecutable() + if out is None: + rc = subprocess.call(cmd, shell=True, close_fds=closefds, + env=env, cwd=cwd) + else: + proc = subprocess.Popen(cmd, shell=True, close_fds=closefds, + env=env, cwd=cwd, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + for line in proc.stdout: + out.write(line) + proc.wait() + rc = proc.returncode + if sys.platform == 'OpenVMS' and rc & 1: + rc = 0 + if rc and onerr: + errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]), + explain_exit(rc)[0]) + if errprefix: + errmsg = '%s: %s' % (errprefix, errmsg) + try: + onerr.warn(errmsg + '\n') + except AttributeError: + raise onerr(errmsg) + return rc + +def checksignature(func): + '''wrap a function with code to check for calling errors''' + def check(*args, **kwargs): + try: + return func(*args, **kwargs) + except TypeError: + if len(traceback.extract_tb(sys.exc_info()[2])) == 1: + raise error.SignatureError + raise + + return check + +def unlink(f): + """unlink and remove the directory if it is empty""" + os.unlink(f) + # try removing directories that might now be empty + try: + os.removedirs(os.path.dirname(f)) + except OSError: + pass + +def copyfile(src, dest): + "copy a file, preserving mode and atime/mtime" + if os.path.islink(src): + try: + os.unlink(dest) + except: + pass + os.symlink(os.readlink(src), dest) + else: + try: + shutil.copyfile(src, dest) + shutil.copystat(src, dest) + except shutil.Error, inst: + raise Abort(str(inst)) + +def copyfiles(src, dst, hardlink=None): + """Copy a directory tree using hardlinks if possible""" + + if hardlink is None: + hardlink = (os.stat(src).st_dev == + os.stat(os.path.dirname(dst)).st_dev) + + num = 0 + if os.path.isdir(src): + os.mkdir(dst) + for name, kind in osutil.listdir(src): + srcname = os.path.join(src, name) + dstname = os.path.join(dst, name) + hardlink, n = copyfiles(srcname, dstname, hardlink) + num += n + else: + if hardlink: + try: + os_link(src, dst) + except (IOError, OSError): + hardlink = False + shutil.copy(src, dst) + else: + shutil.copy(src, dst) + num += 1 + + return hardlink, num + +class path_auditor(object): + '''ensure that a filesystem path contains no banned components. + the following properties of a path are checked: + + - under top-level .hg + - starts at the root of a windows drive + - contains ".." + - traverses a symlink (e.g. a/symlink_here/b) + - inside a nested repository (a callback can be used to approve + some nested repositories, e.g., subrepositories) + ''' + + def __init__(self, root, callback=None): + self.audited = set() + self.auditeddir = set() + self.root = root + self.callback = callback + + def __call__(self, path): + if path in self.audited: + return + normpath = os.path.normcase(path) + parts = splitpath(normpath) + if (os.path.splitdrive(path)[0] + or parts[0].lower() in ('.hg', '.hg.', '') + or os.pardir in parts): + raise Abort(_("path contains illegal component: %s") % path) + if '.hg' in path.lower(): + lparts = [p.lower() for p in parts] + for p in '.hg', '.hg.': + if p in lparts[1:]: + pos = lparts.index(p) + base = os.path.join(*parts[:pos]) + raise Abort(_('path %r is inside repo %r') % (path, base)) + def check(prefix): + curpath = os.path.join(self.root, prefix) + try: + st = os.lstat(curpath) + except OSError, err: + # EINVAL can be raised as invalid path syntax under win32. + # They must be ignored for patterns can be checked too. + if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): + raise + else: + if stat.S_ISLNK(st.st_mode): + raise Abort(_('path %r traverses symbolic link %r') % + (path, prefix)) + elif (stat.S_ISDIR(st.st_mode) and + os.path.isdir(os.path.join(curpath, '.hg'))): + if not self.callback or not self.callback(curpath): + raise Abort(_('path %r is inside repo %r') % + (path, prefix)) + parts.pop() + prefixes = [] + while parts: + prefix = os.sep.join(parts) + if prefix in self.auditeddir: + break + check(prefix) + prefixes.append(prefix) + parts.pop() + + self.audited.add(path) + # only add prefixes to the cache after checking everything: we don't + # want to add "foo/bar/baz" before checking if there's a "foo/.hg" + self.auditeddir.update(prefixes) + +def nlinks(pathname): + """Return number of hardlinks for the given file.""" + return os.lstat(pathname).st_nlink + +if hasattr(os, 'link'): + os_link = os.link +else: + def os_link(src, dst): + raise OSError(0, _("Hardlinks not supported")) + +def lookup_reg(key, name=None, scope=None): + return None + +def hidewindow(): + """Hide current shell window. + + Used to hide the window opened when starting asynchronous + child process under Windows, unneeded on other systems. + """ + pass + +if os.name == 'nt': + from windows import * +else: + from posix import * + +def makelock(info, pathname): + try: + return os.symlink(info, pathname) + except OSError, why: + if why.errno == errno.EEXIST: + raise + except AttributeError: # no symlink in os + pass + + ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL) + os.write(ld, info) + os.close(ld) + +def readlock(pathname): + try: + return os.readlink(pathname) + except OSError, why: + if why.errno not in (errno.EINVAL, errno.ENOSYS): + raise + except AttributeError: # no symlink in os + pass + return posixfile(pathname).read() + +def fstat(fp): + '''stat file object that may not have fileno method.''' + try: + return os.fstat(fp.fileno()) + except AttributeError: + return os.stat(fp.name) + +# File system features + +def checkcase(path): + """ + Check whether the given path is on a case-sensitive filesystem + + Requires a path (like /foo/.hg) ending with a foldable final + directory component. + """ + s1 = os.stat(path) + d, b = os.path.split(path) + p2 = os.path.join(d, b.upper()) + if path == p2: + p2 = os.path.join(d, b.lower()) + try: + s2 = os.stat(p2) + if s2 == s1: + return False + return True + except: + return True + +_fspathcache = {} +def fspath(name, root): + '''Get name in the case stored in the filesystem + + The name is either relative to root, or it is an absolute path starting + with root. Note that this function is unnecessary, and should not be + called, for case-sensitive filesystems (simply because it's expensive). + ''' + # If name is absolute, make it relative + if name.lower().startswith(root.lower()): + l = len(root) + if name[l] == os.sep or name[l] == os.altsep: + l = l + 1 + name = name[l:] + + if not os.path.lexists(os.path.join(root, name)): + return None + + seps = os.sep + if os.altsep: + seps = seps + os.altsep + # Protect backslashes. This gets silly very quickly. + seps.replace('\\','\\\\') + pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps)) + dir = os.path.normcase(os.path.normpath(root)) + result = [] + for part, sep in pattern.findall(name): + if sep: + result.append(sep) + continue + + if dir not in _fspathcache: + _fspathcache[dir] = os.listdir(dir) + contents = _fspathcache[dir] + + lpart = part.lower() + lenp = len(part) + for n in contents: + if lenp == len(n) and n.lower() == lpart: + result.append(n) + break + else: + # Cannot happen, as the file exists! + result.append(part) + dir = os.path.join(dir, lpart) + + return ''.join(result) + +def checkexec(path): + """ + Check whether the given path is on a filesystem with UNIX-like exec flags + + Requires a directory (like /foo/.hg) + """ + + # VFAT on some Linux versions can flip mode but it doesn't persist + # a FS remount. Frequently we can detect it if files are created + # with exec bit on. + + try: + EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-') + try: + os.close(fh) + m = os.stat(fn).st_mode & 0777 + new_file_has_exec = m & EXECFLAGS + os.chmod(fn, m ^ EXECFLAGS) + exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m) + finally: + os.unlink(fn) + except (IOError, OSError): + # we don't care, the user probably won't be able to commit anyway + return False + return not (new_file_has_exec or exec_flags_cannot_flip) + +def checklink(path): + """check whether the given path is on a symlink-capable filesystem""" + # mktemp is not racy because symlink creation will fail if the + # file already exists + name = tempfile.mktemp(dir=path, prefix='hg-checklink-') + try: + os.symlink(".", name) + os.unlink(name) + return True + except (OSError, AttributeError): + return False + +def checknlink(testfile): + '''check whether hardlink count reporting works properly''' + + # testfile may be open, so we need a separate file for checking to + # work around issue2543 (or testfile may get lost on Samba shares) + f1 = testfile + ".hgtmp1" + if os.path.lexists(f1): + return False + try: + posixfile(f1, 'w').close() + except IOError: + return False + + f2 = testfile + ".hgtmp2" + fd = None + try: + try: + os_link(f1, f2) + except OSError: + return False + + # nlinks() may behave differently for files on Windows shares if + # the file is open. + fd = open(f2) + return nlinks(f2) > 1 + finally: + if fd is not None: + fd.close() + for f in (f1, f2): + try: + os.unlink(f) + except OSError: + pass + + return False + +def endswithsep(path): + '''Check path ends with os.sep or os.altsep.''' + return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep) + +def splitpath(path): + '''Split path by os.sep. + Note that this function does not use os.altsep because this is + an alternative of simple "xxx.split(os.sep)". + It is recommended to use os.path.normpath() before using this + function if need.''' + return path.split(os.sep) + +def gui(): + '''Are we running in a GUI?''' + return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY") + +def mktempcopy(name, emptyok=False, createmode=None): + """Create a temporary file with the same contents from name + + The permission bits are copied from the original file. + + If the temporary file is going to be truncated immediately, you + can use emptyok=True as an optimization. + + Returns the name of the temporary file. + """ + d, fn = os.path.split(name) + fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d) + os.close(fd) + # Temporary files are created with mode 0600, which is usually not + # what we want. If the original file already exists, just copy + # its mode. Otherwise, manually obey umask. + try: + st_mode = os.lstat(name).st_mode & 0777 + except OSError, inst: + if inst.errno != errno.ENOENT: + raise + st_mode = createmode + if st_mode is None: + st_mode = ~umask + st_mode &= 0666 + os.chmod(temp, st_mode) + if emptyok: + return temp + try: + try: + ifp = posixfile(name, "rb") + except IOError, inst: + if inst.errno == errno.ENOENT: + return temp + if not getattr(inst, 'filename', None): + inst.filename = name + raise + ofp = posixfile(temp, "wb") + for chunk in filechunkiter(ifp): + ofp.write(chunk) + ifp.close() + ofp.close() + except: + try: os.unlink(temp) + except: pass + raise + return temp + +class atomictempfile(object): + """file-like object that atomically updates a file + + All writes will be redirected to a temporary copy of the original + file. When rename is called, the copy is renamed to the original + name, making the changes visible. + """ + def __init__(self, name, mode='w+b', createmode=None): + self.__name = name + self._fp = None + self.temp = mktempcopy(name, emptyok=('w' in mode), + createmode=createmode) + self._fp = posixfile(self.temp, mode) + + def __getattr__(self, name): + return getattr(self._fp, name) + + def rename(self): + if not self._fp.closed: + self._fp.close() + rename(self.temp, localpath(self.__name)) + + def __del__(self): + if not self._fp: + return + if not self._fp.closed: + try: + os.unlink(self.temp) + except: pass + self._fp.close() + +def makedirs(name, mode=None): + """recursive directory creation with parent mode inheritance""" + parent = os.path.abspath(os.path.dirname(name)) + try: + os.mkdir(name) + if mode is not None: + os.chmod(name, mode) + return + except OSError, err: + if err.errno == errno.EEXIST: + return + if not name or parent == name or err.errno != errno.ENOENT: + raise + makedirs(parent, mode) + makedirs(name, mode) + +class opener(object): + """Open files relative to a base directory + + This class is used to hide the details of COW semantics and + remote file access from higher level code. + """ + def __init__(self, base, audit=True): + self.base = base + if audit: + self.auditor = path_auditor(base) + else: + self.auditor = always + self.createmode = None + self._trustnlink = None + + @propertycache + def _can_symlink(self): + return checklink(self.base) + + def _fixfilemode(self, name): + if self.createmode is None: + return + os.chmod(name, self.createmode & 0666) + + def __call__(self, path, mode="r", text=False, atomictemp=False): + self.auditor(path) + f = os.path.join(self.base, path) + + if not text and "b" not in mode: + mode += "b" # for that other OS + + nlink = -1 + st_mode = None + dirname, basename = os.path.split(f) + # If basename is empty, then the path is malformed because it points + # to a directory. Let the posixfile() call below raise IOError. + if basename and mode not in ('r', 'rb'): + if atomictemp: + if not os.path.isdir(dirname): + makedirs(dirname, self.createmode) + return atomictempfile(f, mode, self.createmode) + try: + if 'w' in mode: + st_mode = os.lstat(f).st_mode & 0777 + os.unlink(f) + nlink = 0 + else: + # nlinks() may behave differently for files on Windows + # shares if the file is open. + fd = open(f) + nlink = nlinks(f) + fd.close() + except (OSError, IOError): + nlink = 0 + if not os.path.isdir(dirname): + makedirs(dirname, self.createmode) + if nlink > 0: + if self._trustnlink is None: + self._trustnlink = nlink > 1 or checknlink(f) + if nlink > 1 or not self._trustnlink: + rename(mktempcopy(f), f) + fp = posixfile(f, mode) + if nlink == 0: + if st_mode is None: + self._fixfilemode(f) + else: + os.chmod(f, st_mode) + return fp + + def symlink(self, src, dst): + self.auditor(dst) + linkname = os.path.join(self.base, dst) + try: + os.unlink(linkname) + except OSError: + pass + + dirname = os.path.dirname(linkname) + if not os.path.exists(dirname): + makedirs(dirname, self.createmode) + + if self._can_symlink: + try: + os.symlink(src, linkname) + except OSError, err: + raise OSError(err.errno, _('could not symlink to %r: %s') % + (src, err.strerror), linkname) + else: + f = self(dst, "w") + f.write(src) + f.close() + self._fixfilemode(dst) + +class chunkbuffer(object): + """Allow arbitrary sized chunks of data to be efficiently read from an + iterator over chunks of arbitrary size.""" + + def __init__(self, in_iter): + """in_iter is the iterator that's iterating over the input chunks. + targetsize is how big a buffer to try to maintain.""" + def splitbig(chunks): + for chunk in chunks: + if len(chunk) > 2**20: + pos = 0 + while pos < len(chunk): + end = pos + 2 ** 18 + yield chunk[pos:end] + pos = end + else: + yield chunk + self.iter = splitbig(in_iter) + self._queue = [] + + def read(self, l): + """Read L bytes of data from the iterator of chunks of data. + Returns less than L bytes if the iterator runs dry.""" + left = l + buf = '' + queue = self._queue + while left > 0: + # refill the queue + if not queue: + target = 2**18 + for chunk in self.iter: + queue.append(chunk) + target -= len(chunk) + if target <= 0: + break + if not queue: + break + + chunk = queue.pop(0) + left -= len(chunk) + if left < 0: + queue.insert(0, chunk[left:]) + buf += chunk[:left] + else: + buf += chunk + + return buf + +def filechunkiter(f, size=65536, limit=None): + """Create a generator that produces the data in the file size + (default 65536) bytes at a time, up to optional limit (default is + to read all data). Chunks may be less than size bytes if the + chunk is the last chunk in the file, or the file is a socket or + some other type of file that sometimes reads less data than is + requested.""" + assert size >= 0 + assert limit is None or limit >= 0 + while True: + if limit is None: + nbytes = size + else: + nbytes = min(limit, size) + s = nbytes and f.read(nbytes) + if not s: + break + if limit: + limit -= len(s) + yield s + +def makedate(): + lt = time.localtime() + if lt[8] == 1 and time.daylight: + tz = time.altzone + else: + tz = time.timezone + t = time.mktime(lt) + if t < 0: + hint = _("check your clock") + raise Abort(_("negative timestamp: %d") % t, hint=hint) + return t, tz + +def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'): + """represent a (unixtime, offset) tuple as a localized time. + unixtime is seconds since the epoch, and offset is the time zone's + number of seconds away from UTC. if timezone is false, do not + append time zone to string.""" + t, tz = date or makedate() + if t < 0: + t = 0 # time.gmtime(lt) fails on Windows for lt < -43200 + tz = 0 + if "%1" in format or "%2" in format: + sign = (tz > 0) and "-" or "+" + minutes = abs(tz) // 60 + format = format.replace("%1", "%c%02d" % (sign, minutes // 60)) + format = format.replace("%2", "%02d" % (minutes % 60)) + s = time.strftime(format, time.gmtime(float(t) - tz)) + return s + +def shortdate(date=None): + """turn (timestamp, tzoff) tuple into iso 8631 date.""" + return datestr(date, format='%Y-%m-%d') + +def strdate(string, format, defaults=[]): + """parse a localized time string and return a (unixtime, offset) tuple. + if the string cannot be parsed, ValueError is raised.""" + def timezone(string): + tz = string.split()[-1] + if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit(): + sign = (tz[0] == "+") and 1 or -1 + hours = int(tz[1:3]) + minutes = int(tz[3:5]) + return -sign * (hours * 60 + minutes) * 60 + if tz == "GMT" or tz == "UTC": + return 0 + return None + + # NOTE: unixtime = localunixtime + offset + offset, date = timezone(string), string + if offset != None: + date = " ".join(string.split()[:-1]) + + # add missing elements from defaults + usenow = False # default to using biased defaults + for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity + found = [True for p in part if ("%"+p) in format] + if not found: + date += "@" + defaults[part][usenow] + format += "@%" + part[0] + else: + # We've found a specific time element, less specific time + # elements are relative to today + usenow = True + + timetuple = time.strptime(date, format) + localunixtime = int(calendar.timegm(timetuple)) + if offset is None: + # local timezone + unixtime = int(time.mktime(timetuple)) + offset = unixtime - localunixtime + else: + unixtime = localunixtime + offset + return unixtime, offset + +def parsedate(date, formats=None, bias={}): + """parse a localized date/time and return a (unixtime, offset) tuple. + + The date may be a "unixtime offset" string or in one of the specified + formats. If the date already is a (unixtime, offset) tuple, it is returned. + """ + if not date: + return 0, 0 + if isinstance(date, tuple) and len(date) == 2: + return date + if not formats: + formats = defaultdateformats + date = date.strip() + try: + when, offset = map(int, date.split(' ')) + except ValueError: + # fill out defaults + now = makedate() + defaults = {} + nowmap = {} + for part in "d mb yY HI M S".split(): + # this piece is for rounding the specific end of unknowns + b = bias.get(part) + if b is None: + if part[0] in "HMS": + b = "00" + else: + b = "0" + + # this piece is for matching the generic end to today's date + n = datestr(now, "%" + part[0]) + + defaults[part] = (b, n) + + for format in formats: + try: + when, offset = strdate(date, format, defaults) + except (ValueError, OverflowError): + pass + else: + break + else: + raise Abort(_('invalid date: %r') % date) + # validate explicit (probably user-specified) date and + # time zone offset. values must fit in signed 32 bits for + # current 32-bit linux runtimes. timezones go from UTC-12 + # to UTC+14 + if abs(when) > 0x7fffffff: + raise Abort(_('date exceeds 32 bits: %d') % when) + if when < 0: + raise Abort(_('negative date value: %d') % when) + if offset < -50400 or offset > 43200: + raise Abort(_('impossible time zone offset: %d') % offset) + return when, offset + +def matchdate(date): + """Return a function that matches a given date match specifier + + Formats include: + + '{date}' match a given date to the accuracy provided + + '<{date}' on or before a given date + + '>{date}' on or after a given date + + >>> p1 = parsedate("10:29:59") + >>> p2 = parsedate("10:30:00") + >>> p3 = parsedate("10:30:59") + >>> p4 = parsedate("10:31:00") + >>> p5 = parsedate("Sep 15 10:30:00 1999") + >>> f = matchdate("10:30") + >>> f(p1[0]) + False + >>> f(p2[0]) + True + >>> f(p3[0]) + True + >>> f(p4[0]) + False + >>> f(p5[0]) + False + """ + + def lower(date): + d = dict(mb="1", d="1") + return parsedate(date, extendeddateformats, d)[0] + + def upper(date): + d = dict(mb="12", HI="23", M="59", S="59") + for days in "31 30 29".split(): + try: + d["d"] = days + return parsedate(date, extendeddateformats, d)[0] + except: + pass + d["d"] = "28" + return parsedate(date, extendeddateformats, d)[0] + + date = date.strip() + if date[0] == "<": + when = upper(date[1:]) + return lambda x: x <= when + elif date[0] == ">": + when = lower(date[1:]) + return lambda x: x >= when + elif date[0] == "-": + try: + days = int(date[1:]) + except ValueError: + raise Abort(_("invalid day spec: %s") % date[1:]) + when = makedate()[0] - days * 3600 * 24 + return lambda x: x >= when + elif " to " in date: + a, b = date.split(" to ") + start, stop = lower(a), upper(b) + return lambda x: x >= start and x <= stop + else: + start, stop = lower(date), upper(date) + return lambda x: x >= start and x <= stop + +def shortuser(user): + """Return a short representation of a user name or email address.""" + f = user.find('@') + if f >= 0: + user = user[:f] + f = user.find('<') + if f >= 0: + user = user[f + 1:] + f = user.find(' ') + if f >= 0: + user = user[:f] + f = user.find('.') + if f >= 0: + user = user[:f] + return user + +def email(author): + '''get email of author.''' + r = author.find('>') + if r == -1: + r = None + return author[author.find('<') + 1:r] + +def _ellipsis(text, maxlength): + if len(text) <= maxlength: + return text, False + else: + return "%s..." % (text[:maxlength - 3]), True + +def ellipsis(text, maxlength=400): + """Trim string to at most maxlength (default: 400) characters.""" + try: + # use unicode not to split at intermediate multi-byte sequence + utext, truncated = _ellipsis(text.decode(encoding.encoding), + maxlength) + if not truncated: + return text + return utext.encode(encoding.encoding) + except (UnicodeDecodeError, UnicodeEncodeError): + return _ellipsis(text, maxlength)[0] + +def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): + '''yield every hg repository under path, recursively.''' + def errhandler(err): + if err.filename == path: + raise err + if followsym and hasattr(os.path, 'samestat'): + def _add_dir_if_not_there(dirlst, dirname): + match = False + samestat = os.path.samestat + dirstat = os.stat(dirname) + for lstdirstat in dirlst: + if samestat(dirstat, lstdirstat): + match = True + break + if not match: + dirlst.append(dirstat) + return not match + else: + followsym = False + + if (seen_dirs is None) and followsym: + seen_dirs = [] + _add_dir_if_not_there(seen_dirs, path) + for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): + dirs.sort() + if '.hg' in dirs: + yield root # found a repository + qroot = os.path.join(root, '.hg', 'patches') + if os.path.isdir(os.path.join(qroot, '.hg')): + yield qroot # we have a patch queue repo here + if recurse: + # avoid recursing inside the .hg directory + dirs.remove('.hg') + else: + dirs[:] = [] # don't descend further + elif followsym: + newdirs = [] + for d in dirs: + fname = os.path.join(root, d) + if _add_dir_if_not_there(seen_dirs, fname): + if os.path.islink(fname): + for hgname in walkrepos(fname, True, seen_dirs): + yield hgname + else: + newdirs.append(d) + dirs[:] = newdirs + +_rcpath = None + +def os_rcpath(): + '''return default os-specific hgrc search path''' + path = system_rcpath() + path.extend(user_rcpath()) + path = [os.path.normpath(f) for f in path] + return path + +def rcpath(): + '''return hgrc search path. if env var HGRCPATH is set, use it. + for each item in path, if directory, use files ending in .rc, + else use item. + make HGRCPATH empty to only look in .hg/hgrc of current repo. + if no HGRCPATH, use default os-specific path.''' + global _rcpath + if _rcpath is None: + if 'HGRCPATH' in os.environ: + _rcpath = [] + for p in os.environ['HGRCPATH'].split(os.pathsep): + if not p: + continue + p = expandpath(p) + if os.path.isdir(p): + for f, kind in osutil.listdir(p): + if f.endswith('.rc'): + _rcpath.append(os.path.join(p, f)) + else: + _rcpath.append(p) + else: + _rcpath = os_rcpath() + return _rcpath + +def bytecount(nbytes): + '''return byte count formatted as readable string, with units''' + + units = ( + (100, 1 << 30, _('%.0f GB')), + (10, 1 << 30, _('%.1f GB')), + (1, 1 << 30, _('%.2f GB')), + (100, 1 << 20, _('%.0f MB')), + (10, 1 << 20, _('%.1f MB')), + (1, 1 << 20, _('%.2f MB')), + (100, 1 << 10, _('%.0f KB')), + (10, 1 << 10, _('%.1f KB')), + (1, 1 << 10, _('%.2f KB')), + (1, 1, _('%.0f bytes')), + ) + + for multiplier, divisor, format in units: + if nbytes >= divisor * multiplier: + return format % (nbytes / float(divisor)) + return units[-1][2] % nbytes + +def drop_scheme(scheme, path): + sc = scheme + ':' + if path.startswith(sc): + path = path[len(sc):] + if path.startswith('//'): + if scheme == 'file': + i = path.find('/', 2) + if i == -1: + return '' + # On Windows, absolute paths are rooted at the current drive + # root. On POSIX they are rooted at the file system root. + if os.name == 'nt': + droot = os.path.splitdrive(os.getcwd())[0] + '/' + path = os.path.join(droot, path[i + 1:]) + else: + path = path[i:] + else: + path = path[2:] + return path + +def uirepr(s): + # Avoid double backslash in Windows path repr() + return repr(s).replace('\\\\', '\\') + +#### naming convention of below implementation follows 'textwrap' module + +class MBTextWrapper(textwrap.TextWrapper): + def __init__(self, **kwargs): + textwrap.TextWrapper.__init__(self, **kwargs) + + def _cutdown(self, str, space_left): + l = 0 + ucstr = unicode(str, encoding.encoding) + w = unicodedata.east_asian_width + for i in xrange(len(ucstr)): + l += w(ucstr[i]) in 'WFA' and 2 or 1 + if space_left < l: + return (ucstr[:i].encode(encoding.encoding), + ucstr[i:].encode(encoding.encoding)) + return str, '' + + # ---------------------------------------- + # overriding of base class + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + space_left = max(width - cur_len, 1) + + if self.break_long_words: + cut, res = self._cutdown(reversed_chunks[-1], space_left) + cur_line.append(cut) + reversed_chunks[-1] = res + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + +#### naming convention of above implementation follows 'textwrap' module + +def wrap(line, width, initindent='', hangindent=''): + maxindent = max(len(hangindent), len(initindent)) + if width <= maxindent: + # adjust for weird terminal size + width = max(78, maxindent + 1) + wrapper = MBTextWrapper(width=width, + initial_indent=initindent, + subsequent_indent=hangindent) + return wrapper.fill(line) + +def iterlines(iterator): + for chunk in iterator: + for line in chunk.splitlines(): + yield line + +def expandpath(path): + return os.path.expanduser(os.path.expandvars(path)) + +def hgcmd(): + """Return the command used to execute current hg + + This is different from hgexecutable() because on Windows we want + to avoid things opening new shell windows like batch files, so we + get either the python call or current executable. + """ + if main_is_frozen(): + return [sys.executable] + return gethgcmd() + +def rundetached(args, condfn): + """Execute the argument list in a detached process. + + condfn is a callable which is called repeatedly and should return + True once the child process is known to have started successfully. + At this point, the child process PID is returned. If the child + process fails to start or finishes before condfn() evaluates to + True, return -1. + """ + # Windows case is easier because the child process is either + # successfully starting and validating the condition or exiting + # on failure. We just poll on its PID. On Unix, if the child + # process fails to start, it will be left in a zombie state until + # the parent wait on it, which we cannot do since we expect a long + # running process on success. Instead we listen for SIGCHLD telling + # us our child process terminated. + terminated = set() + def handler(signum, frame): + terminated.add(os.wait()) + prevhandler = None + if hasattr(signal, 'SIGCHLD'): + prevhandler = signal.signal(signal.SIGCHLD, handler) + try: + pid = spawndetached(args) + while not condfn(): + if ((pid in terminated or not testpid(pid)) + and not condfn()): + return -1 + time.sleep(0.1) + return pid + finally: + if prevhandler is not None: + signal.signal(signal.SIGCHLD, prevhandler) + +try: + any, all = any, all +except NameError: + def any(iterable): + for i in iterable: + if i: + return True + return False + + def all(iterable): + for i in iterable: + if not i: + return False + return True + +def interpolate(prefix, mapping, s, fn=None): + """Return the result of interpolating items in the mapping into string s. + + prefix is a single character string, or a two character string with + a backslash as the first character if the prefix needs to be escaped in + a regular expression. + + fn is an optional function that will be applied to the replacement text + just before replacement. + """ + fn = fn or (lambda s: s) + r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys()))) + return r.sub(lambda x: fn(mapping[x.group()[1:]]), s) + +def getport(port): + """Return the port for a given network service. + + If port is an integer, it's returned as is. If it's a string, it's + looked up using socket.getservbyname(). If there's no matching + service, util.Abort is raised. + """ + try: + return int(port) + except ValueError: + pass + + try: + return socket.getservbyname(port) + except socket.error: + raise Abort(_("no port number associated with service '%s'") % port) + +_booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True, + '0': False, 'no': False, 'false': False, 'off': False, + 'never': False} + +def parsebool(s): + """Parse s into a boolean. + + If s is not a valid boolean, returns None. + """ + return _booleans.get(s.lower(), None) diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.pyo Binary files differnew file mode 100644 index 0000000..829410c --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/util.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.py new file mode 100644 index 0000000..bb1d6c2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.py @@ -0,0 +1,301 @@ +# verify.py - repository integrity checking for Mercurial +# +# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from node import nullid, short +from i18n import _ +import os +import revlog, util, error + +def verify(repo): + lock = repo.lock() + try: + return _verify(repo) + finally: + lock.release() + +def _verify(repo): + mflinkrevs = {} + filelinkrevs = {} + filenodes = {} + revisions = 0 + badrevs = set() + errors = [0] + warnings = [0] + ui = repo.ui + cl = repo.changelog + mf = repo.manifest + lrugetctx = util.lrucachefunc(repo.changectx) + + if not repo.cancopy(): + raise util.Abort(_("cannot verify bundle or remote repos")) + + def err(linkrev, msg, filename=None): + if linkrev != None: + badrevs.add(linkrev) + else: + linkrev = '?' + msg = "%s: %s" % (linkrev, msg) + if filename: + msg = "%s@%s" % (filename, msg) + ui.warn(" " + msg + "\n") + errors[0] += 1 + + def exc(linkrev, msg, inst, filename=None): + if isinstance(inst, KeyboardInterrupt): + ui.warn(_("interrupted")) + raise + if not str(inst): + inst = repr(inst) + err(linkrev, "%s: %s" % (msg, inst), filename) + + def warn(msg): + ui.warn(msg + "\n") + warnings[0] += 1 + + def checklog(obj, name, linkrev): + if not len(obj) and (havecl or havemf): + err(linkrev, _("empty or missing %s") % name) + return + + d = obj.checksize() + if d[0]: + err(None, _("data length off by %d bytes") % d[0], name) + if d[1]: + err(None, _("index contains %d extra bytes") % d[1], name) + + if obj.version != revlog.REVLOGV0: + if not revlogv1: + warn(_("warning: `%s' uses revlog format 1") % name) + elif revlogv1: + warn(_("warning: `%s' uses revlog format 0") % name) + + def checkentry(obj, i, node, seen, linkrevs, f): + lr = obj.linkrev(obj.rev(node)) + if lr < 0 or (havecl and lr not in linkrevs): + if lr < 0 or lr >= len(cl): + msg = _("rev %d points to nonexistent changeset %d") + else: + msg = _("rev %d points to unexpected changeset %d") + err(None, msg % (i, lr), f) + if linkrevs: + if f and len(linkrevs) > 1: + try: + # attempt to filter down to real linkrevs + linkrevs = [l for l in linkrevs + if lrugetctx(l)[f].filenode() == node] + except: + pass + warn(_(" (expected %s)") % " ".join(map(str, linkrevs))) + lr = None # can't be trusted + + try: + p1, p2 = obj.parents(node) + if p1 not in seen and p1 != nullid: + err(lr, _("unknown parent 1 %s of %s") % + (short(p1), short(n)), f) + if p2 not in seen and p2 != nullid: + err(lr, _("unknown parent 2 %s of %s") % + (short(p2), short(p1)), f) + except Exception, inst: + exc(lr, _("checking parents of %s") % short(node), inst, f) + + if node in seen: + err(lr, _("duplicate revision %d (%d)") % (i, seen[n]), f) + seen[n] = i + return lr + + if os.path.exists(repo.sjoin("journal")): + ui.warn(_("abandoned transaction found - run hg recover\n")) + + revlogv1 = cl.version != revlog.REVLOGV0 + if ui.verbose or not revlogv1: + ui.status(_("repository uses revlog format %d\n") % + (revlogv1 and 1 or 0)) + + havecl = len(cl) > 0 + havemf = len(mf) > 0 + + ui.status(_("checking changesets\n")) + seen = {} + checklog(cl, "changelog", 0) + total = len(repo) + for i in repo: + ui.progress(_('checking'), i, total=total, unit=_('changesets')) + n = cl.node(i) + checkentry(cl, i, n, seen, [i], "changelog") + + try: + changes = cl.read(n) + mflinkrevs.setdefault(changes[0], []).append(i) + for f in changes[3]: + filelinkrevs.setdefault(f, []).append(i) + except Exception, inst: + exc(i, _("unpacking changeset %s") % short(n), inst) + ui.progress(_('checking'), None) + + ui.status(_("checking manifests\n")) + seen = {} + checklog(mf, "manifest", 0) + total = len(mf) + for i in mf: + ui.progress(_('checking'), i, total=total, unit=_('manifests')) + n = mf.node(i) + lr = checkentry(mf, i, n, seen, mflinkrevs.get(n, []), "manifest") + if n in mflinkrevs: + del mflinkrevs[n] + else: + err(lr, _("%s not in changesets") % short(n), "manifest") + + try: + for f, fn in mf.readdelta(n).iteritems(): + if not f: + err(lr, _("file without name in manifest")) + elif f != "/dev/null": + filenodes.setdefault(f, {}).setdefault(fn, lr) + except Exception, inst: + exc(lr, _("reading manifest delta %s") % short(n), inst) + ui.progress(_('checking'), None) + + ui.status(_("crosschecking files in changesets and manifests\n")) + + total = len(mflinkrevs) + len(filelinkrevs) + len(filenodes) + count = 0 + if havemf: + for c, m in sorted([(c, m) for m in mflinkrevs + for c in mflinkrevs[m]]): + count += 1 + ui.progress(_('crosschecking'), count, total=total) + err(c, _("changeset refers to unknown manifest %s") % short(m)) + mflinkrevs = None # del is bad here due to scope issues + + for f in sorted(filelinkrevs): + count += 1 + ui.progress(_('crosschecking'), count, total=total) + if f not in filenodes: + lr = filelinkrevs[f][0] + err(lr, _("in changeset but not in manifest"), f) + + if havecl: + for f in sorted(filenodes): + count += 1 + ui.progress(_('crosschecking'), count, total=total) + if f not in filelinkrevs: + try: + fl = repo.file(f) + lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]]) + except: + lr = None + err(lr, _("in manifest but not in changeset"), f) + + ui.progress(_('crosschecking'), None) + + ui.status(_("checking files\n")) + + storefiles = set() + for f, f2, size in repo.store.datafiles(): + if not f: + err(None, _("cannot decode filename '%s'") % f2) + elif size > 0 or not revlogv1: + storefiles.add(f) + + files = sorted(set(filenodes) | set(filelinkrevs)) + total = len(files) + for i, f in enumerate(files): + ui.progress(_('checking'), i, item=f, total=total) + try: + linkrevs = filelinkrevs[f] + except KeyError: + # in manifest but not in changelog + linkrevs = [] + + if linkrevs: + lr = linkrevs[0] + else: + lr = None + + try: + fl = repo.file(f) + except error.RevlogError, e: + err(lr, _("broken revlog! (%s)") % e, f) + continue + + for ff in fl.files(): + try: + storefiles.remove(ff) + except KeyError: + err(lr, _("missing revlog!"), ff) + + checklog(fl, f, lr) + seen = {} + rp = None + for i in fl: + revisions += 1 + n = fl.node(i) + lr = checkentry(fl, i, n, seen, linkrevs, f) + if f in filenodes: + if havemf and n not in filenodes[f]: + err(lr, _("%s not in manifests") % (short(n)), f) + else: + del filenodes[f][n] + + # verify contents + try: + l = len(fl.read(n)) + rp = fl.renamed(n) + if l != fl.size(i): + if len(fl.revision(n)) != fl.size(i): + err(lr, _("unpacked size is %s, %s expected") % + (l, fl.size(i)), f) + except Exception, inst: + exc(lr, _("unpacking %s") % short(n), inst, f) + + # check renames + try: + if rp: + if lr is not None and ui.verbose: + ctx = lrugetctx(lr) + found = False + for pctx in ctx.parents(): + if rp[0] in pctx: + found = True + break + if not found: + warn(_("warning: copy source of '%s' not" + " in parents of %s") % (f, ctx)) + fl2 = repo.file(rp[0]) + if not len(fl2): + err(lr, _("empty or missing copy source revlog %s:%s") + % (rp[0], short(rp[1])), f) + elif rp[1] == nullid: + ui.note(_("warning: %s@%s: copy source" + " revision is nullid %s:%s\n") + % (f, lr, rp[0], short(rp[1]))) + else: + fl2.rev(rp[1]) + except Exception, inst: + exc(lr, _("checking rename of %s") % short(n), inst, f) + + # cross-check + if f in filenodes: + fns = [(lr, n) for n, lr in filenodes[f].iteritems()] + for lr, node in sorted(fns): + err(lr, _("%s in manifests not found") % short(node), f) + ui.progress(_('checking'), None) + + for f in storefiles: + warn(_("warning: orphan revlog '%s'") % f) + + ui.status(_("%d files, %d changesets, %d total revisions\n") % + (len(files), len(cl), revisions)) + if warnings[0]: + ui.warn(_("%d warnings encountered!\n") % warnings[0]) + if errors[0]: + ui.warn(_("%d integrity errors encountered!\n") % errors[0]) + if badrevs: + ui.warn(_("(first damaged changeset appears to be %d)\n") + % min(badrevs)) + return 1 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.pyo Binary files differnew file mode 100644 index 0000000..ffe597d --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/verify.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.py new file mode 100644 index 0000000..71179a1 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.py @@ -0,0 +1,180 @@ +# win32.py - utility functions that use win32 API +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +"""Utility functions that use win32 API. + +Mark Hammond's win32all package allows better functionality on +Windows. This module overrides definitions in util.py. If not +available, import of this module will fail, and generic code will be +used. +""" + +import win32api + +import errno, os, sys, pywintypes, win32con, win32file, win32process +import winerror, win32gui, win32console +import osutil, encoding +from win32com.shell import shell, shellcon + +def os_link(src, dst): + try: + win32file.CreateHardLink(dst, src) + except pywintypes.error: + raise OSError(errno.EINVAL, 'target implements hardlinks improperly') + except NotImplementedError: # Another fake error win Win98 + raise OSError(errno.EINVAL, 'Hardlinking not supported') + +def _getfileinfo(pathname): + """Return number of hardlinks for the given file.""" + try: + fh = win32file.CreateFile(pathname, + win32file.GENERIC_READ, win32file.FILE_SHARE_READ, + None, win32file.OPEN_EXISTING, 0, None) + except pywintypes.error: + raise OSError(errno.ENOENT, 'The system cannot find the file specified') + try: + return win32file.GetFileInformationByHandle(fh) + finally: + fh.Close() + +def nlinks(pathname): + """Return number of hardlinks for the given file.""" + return _getfileinfo(pathname)[7] + +def samefile(fpath1, fpath2): + """Returns whether fpath1 and fpath2 refer to the same file. This is only + guaranteed to work for files, not directories.""" + res1 = _getfileinfo(fpath1) + res2 = _getfileinfo(fpath2) + # Index 4 is the volume serial number, and 8 and 9 contain the file ID + return res1[4] == res2[4] and res1[8] == res2[8] and res1[9] == res2[9] + +def samedevice(fpath1, fpath2): + """Returns whether fpath1 and fpath2 are on the same device. This is only + guaranteed to work for files, not directories.""" + res1 = _getfileinfo(fpath1) + res2 = _getfileinfo(fpath2) + return res1[4] == res2[4] + +def testpid(pid): + '''return True if pid is still running or unable to + determine, False otherwise''' + try: + handle = win32api.OpenProcess( + win32con.PROCESS_QUERY_INFORMATION, False, pid) + if handle: + status = win32process.GetExitCodeProcess(handle) + return status == win32con.STILL_ACTIVE + except pywintypes.error, details: + return details[0] != winerror.ERROR_INVALID_PARAMETER + return True + +def lookup_reg(key, valname=None, scope=None): + ''' Look up a key/value name in the Windows registry. + + valname: value name. If unspecified, the default value for the key + is used. + scope: optionally specify scope for registry lookup, this can be + a sequence of scopes to look up in order. Default (CURRENT_USER, + LOCAL_MACHINE). + ''' + try: + from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \ + QueryValueEx, OpenKey + except ImportError: + return None + + if scope is None: + scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE) + elif not isinstance(scope, (list, tuple)): + scope = (scope,) + for s in scope: + try: + val = QueryValueEx(OpenKey(s, key), valname)[0] + # never let a Unicode string escape into the wild + return encoding.tolocal(val.encode('UTF-8')) + except EnvironmentError: + pass + +def system_rcpath_win32(): + '''return default os-specific hgrc search path''' + filename = win32api.GetModuleFileName(0) + # Use mercurial.ini found in directory with hg.exe + progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') + if os.path.isfile(progrc): + return [progrc] + # Use hgrc.d found in directory with hg.exe + progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d') + if os.path.isdir(progrcd): + rcpath = [] + for f, kind in osutil.listdir(progrcd): + if f.endswith('.rc'): + rcpath.append(os.path.join(progrcd, f)) + return rcpath + # else look for a system rcpath in the registry + try: + value = win32api.RegQueryValue( + win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial') + rcpath = [] + for p in value.split(os.pathsep): + if p.lower().endswith('mercurial.ini'): + rcpath.append(p) + elif os.path.isdir(p): + for f, kind in osutil.listdir(p): + if f.endswith('.rc'): + rcpath.append(os.path.join(p, f)) + return rcpath + except pywintypes.error: + return [] + +def user_rcpath_win32(): + '''return os-specific hgrc search path to the user dir''' + userdir = os.path.expanduser('~') + if sys.getwindowsversion()[3] != 2 and userdir == '~': + # We are on win < nt: fetch the APPDATA directory location and use + # the parent directory as the user home dir. + appdir = shell.SHGetPathFromIDList( + shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA)) + userdir = os.path.dirname(appdir) + return [os.path.join(userdir, 'mercurial.ini'), + os.path.join(userdir, '.hgrc')] + +def getuser(): + '''return name of current user''' + return win32api.GetUserName() + +def set_signal_handler_win32(): + """Register a termination handler for console events including + CTRL+C. python signal handlers do not work well with socket + operations. + """ + def handler(event): + win32process.ExitProcess(1) + win32api.SetConsoleCtrlHandler(handler) + +def hidewindow(): + def callback(*args, **kwargs): + hwnd, pid = args + wpid = win32process.GetWindowThreadProcessId(hwnd)[1] + if pid == wpid: + win32gui.ShowWindow(hwnd, win32con.SW_HIDE) + + pid = win32process.GetCurrentProcessId() + win32gui.EnumWindows(callback, pid) + +def termwidth(): + try: + # Query stderr to avoid problems with redirections + screenbuf = win32console.GetStdHandle(win32console.STD_ERROR_HANDLE) + try: + window = screenbuf.GetConsoleScreenBufferInfo()['Window'] + width = window.Right - window.Left + return width + finally: + screenbuf.Detach() + except pywintypes.error: + return 79 diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.pyo Binary files differnew file mode 100644 index 0000000..1438ea2 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/win32.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.py new file mode 100644 index 0000000..a4c5ff4 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.py @@ -0,0 +1,375 @@ +# windows.py - Windows utility function implementations for Mercurial +# +# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from i18n import _ +import osutil, error +import errno, msvcrt, os, re, sys, random, subprocess + +nulldev = 'NUL:' +umask = 002 + +# wrap osutil.posixfile to provide friendlier exceptions +def posixfile(name, mode='r', buffering=-1): + try: + return osutil.posixfile(name, mode, buffering) + except WindowsError, err: + raise IOError(err.errno, '%s: %s' % (name, err.strerror)) +posixfile.__doc__ = osutil.posixfile.__doc__ + +class winstdout(object): + '''stdout on windows misbehaves if sent through a pipe''' + + def __init__(self, fp): + self.fp = fp + + def __getattr__(self, key): + return getattr(self.fp, key) + + def close(self): + try: + self.fp.close() + except: pass + + def write(self, s): + try: + # This is workaround for "Not enough space" error on + # writing large size of data to console. + limit = 16000 + l = len(s) + start = 0 + self.softspace = 0 + while start < l: + end = start + limit + self.fp.write(s[start:end]) + start = end + except IOError, inst: + if inst.errno != 0: + raise + self.close() + raise IOError(errno.EPIPE, 'Broken pipe') + + def flush(self): + try: + return self.fp.flush() + except IOError, inst: + if inst.errno != errno.EINVAL: + raise + self.close() + raise IOError(errno.EPIPE, 'Broken pipe') + +sys.stdout = winstdout(sys.stdout) + +def _is_win_9x(): + '''return true if run on windows 95, 98 or me.''' + try: + return sys.getwindowsversion()[3] == 1 + except AttributeError: + return 'command' in os.environ.get('comspec', '') + +def openhardlinks(): + return not _is_win_9x() and "win32api" in globals() + +def system_rcpath(): + try: + return system_rcpath_win32() + except: + return [r'c:\mercurial\mercurial.ini'] + +def user_rcpath(): + '''return os-specific hgrc search path to the user dir''' + try: + path = user_rcpath_win32() + except: + home = os.path.expanduser('~') + path = [os.path.join(home, 'mercurial.ini'), + os.path.join(home, '.hgrc')] + userprofile = os.environ.get('USERPROFILE') + if userprofile: + path.append(os.path.join(userprofile, 'mercurial.ini')) + path.append(os.path.join(userprofile, '.hgrc')) + return path + +def parse_patch_output(output_line): + """parses the output produced by patch and returns the filename""" + pf = output_line[14:] + if pf[0] == '`': + pf = pf[1:-1] # Remove the quotes + return pf + +def sshargs(sshcmd, host, user, port): + '''Build argument list for ssh or Plink''' + pflag = 'plink' in sshcmd.lower() and '-P' or '-p' + args = user and ("%s@%s" % (user, host)) or host + return port and ("%s %s %s" % (args, pflag, port)) or args + +def testpid(pid): + '''return False if pid dead, True if running or not known''' + return True + +def set_flags(f, l, x): + pass + +def set_binary(fd): + # When run without console, pipes may expose invalid + # fileno(), usually set to -1. + if hasattr(fd, 'fileno') and fd.fileno() >= 0: + msvcrt.setmode(fd.fileno(), os.O_BINARY) + +def pconvert(path): + return '/'.join(path.split(os.sep)) + +def localpath(path): + return path.replace('/', '\\') + +def normpath(path): + return pconvert(os.path.normpath(path)) + +def realpath(path): + ''' + Returns the true, canonical file system path equivalent to the given + path. + ''' + # TODO: There may be a more clever way to do this that also handles other, + # less common file systems. + return os.path.normpath(os.path.normcase(os.path.realpath(path))) + +def samestat(s1, s2): + return False + +# A sequence of backslashes is special iff it precedes a double quote: +# - if there's an even number of backslashes, the double quote is not +# quoted (i.e. it ends the quoted region) +# - if there's an odd number of backslashes, the double quote is quoted +# - in both cases, every pair of backslashes is unquoted into a single +# backslash +# (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) +# So, to quote a string, we must surround it in double quotes, double +# the number of backslashes that preceed double quotes and add another +# backslash before every double quote (being careful with the double +# quote we've appended to the end) +_quotere = None +def shellquote(s): + global _quotere + if _quotere is None: + _quotere = re.compile(r'(\\*)("|\\$)') + return '"%s"' % _quotere.sub(r'\1\1\\\2', s) + +def quotecommand(cmd): + """Build a command string suitable for os.popen* calls.""" + if sys.version_info < (2, 7, 1): + # Python versions since 2.7.1 do this extra quoting themselves + return '"' + cmd + '"' + return cmd + +def popen(command, mode='r'): + # Work around "popen spawned process may not write to stdout + # under windows" + # http://bugs.python.org/issue1366 + command += " 2> %s" % nulldev + return os.popen(quotecommand(command), mode) + +def explain_exit(code): + return _("exited with status %d") % code, code + +# if you change this stub into a real check, please try to implement the +# username and groupname functions above, too. +def isowner(st): + return True + +def find_exe(command): + '''Find executable for command searching like cmd.exe does. + If command is a basename then PATH is searched for command. + PATH isn't searched if command is an absolute or relative path. + An extension from PATHEXT is found and added if not present. + If command isn't found None is returned.''' + pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD') + pathexts = [ext for ext in pathext.lower().split(os.pathsep)] + if os.path.splitext(command)[1].lower() in pathexts: + pathexts = [''] + + def findexisting(pathcommand): + 'Will append extension (if needed) and return existing file' + for ext in pathexts: + executable = pathcommand + ext + if os.path.exists(executable): + return executable + return None + + if os.sep in command: + return findexisting(command) + + for path in os.environ.get('PATH', '').split(os.pathsep): + executable = findexisting(os.path.join(path, command)) + if executable is not None: + return executable + return findexisting(os.path.expanduser(os.path.expandvars(command))) + +def set_signal_handler(): + try: + set_signal_handler_win32() + except NameError: + pass + +def statfiles(files): + '''Stat each file in files and yield stat or None if file does not exist. + Cluster and cache stat per directory to minimize number of OS stat calls.''' + ncase = os.path.normcase + dircache = {} # dirname -> filename -> status | None if file does not exist + for nf in files: + nf = ncase(nf) + dir, base = os.path.split(nf) + if not dir: + dir = '.' + cache = dircache.get(dir, None) + if cache is None: + try: + dmap = dict([(ncase(n), s) + for n, k, s in osutil.listdir(dir, True)]) + except OSError, err: + # handle directory not found in Python version prior to 2.5 + # Python <= 2.4 returns native Windows code 3 in errno + # Python >= 2.5 returns ENOENT and adds winerror field + # EINVAL is raised if dir is not a directory. + if err.errno not in (3, errno.ENOENT, errno.EINVAL, + errno.ENOTDIR): + raise + dmap = {} + cache = dircache.setdefault(dir, dmap) + yield cache.get(base, None) + +def getuser(): + '''return name of current user''' + raise error.Abort(_('user name not available - set USERNAME ' + 'environment variable')) + +def username(uid=None): + """Return the name of the user with the given uid. + + If uid is None, return the name of the current user.""" + return None + +def groupname(gid=None): + """Return the name of the group with the given gid. + + If gid is None, return the name of the current group.""" + return None + +def _removedirs(name): + """special version of os.removedirs that does not remove symlinked + directories or junction points if they actually contain files""" + if osutil.listdir(name): + return + os.rmdir(name) + head, tail = os.path.split(name) + if not tail: + head, tail = os.path.split(head) + while head and tail: + try: + if osutil.listdir(head): + return + os.rmdir(head) + except: + break + head, tail = os.path.split(head) + +def unlink(f): + """unlink and remove the directory if it is empty""" + os.unlink(f) + # try removing directories that might now be empty + try: + _removedirs(os.path.dirname(f)) + except OSError: + pass + +def rename(src, dst): + '''atomically rename file src to dst, replacing dst if it exists''' + try: + os.rename(src, dst) + except OSError: # FIXME: check err (EEXIST ?) + + # On windows, rename to existing file is not allowed, so we + # must delete destination first. But if a file is open, unlink + # schedules it for delete but does not delete it. Rename + # happens immediately even for open files, so we rename + # destination to a temporary name, then delete that. Then + # rename is safe to do. + # The temporary name is chosen at random to avoid the situation + # where a file is left lying around from a previous aborted run. + + for tries in xrange(10): + temp = '%s-%08x' % (dst, random.randint(0, 0xffffffff)) + try: + os.rename(dst, temp) # raises OSError EEXIST if temp exists + break + except OSError, e: + if e.errno != errno.EEXIST: + raise + else: + raise IOError, (errno.EEXIST, "No usable temporary filename found") + + try: + os.unlink(temp) + except: + # Some rude AV-scanners on Windows may cause the unlink to + # fail. Not aborting here just leaks the temp file, whereas + # aborting at this point may leave serious inconsistencies. + # Ideally, we would notify the user here. + pass + os.rename(src, dst) + +def spawndetached(args): + # No standard library function really spawns a fully detached + # process under win32 because they allocate pipes or other objects + # to handle standard streams communications. Passing these objects + # to the child process requires handle inheritance to be enabled + # which makes really detached processes impossible. + class STARTUPINFO: + dwFlags = subprocess.STARTF_USESHOWWINDOW + hStdInput = None + hStdOutput = None + hStdError = None + wShowWindow = subprocess.SW_HIDE + + args = subprocess.list2cmdline(args) + # Not running the command in shell mode makes python26 hang when + # writing to hgweb output socket. + comspec = os.environ.get("COMSPEC", "cmd.exe") + args = comspec + " /c " + args + hp, ht, pid, tid = subprocess.CreateProcess( + None, args, + # no special security + None, None, + # Do not inherit handles + 0, + # DETACHED_PROCESS + 0x00000008, + os.environ, + os.getcwd(), + STARTUPINFO()) + return pid + +def gethgcmd(): + return [sys.executable] + sys.argv[:1] + +def termwidth(): + # cmd.exe does not handle CR like a unix console, the CR is + # counted in the line length. On 80 columns consoles, if 80 + # characters are written, the following CR won't apply on the + # current line but on the new one. Keep room for it. + return 79 + +def groupmembers(name): + # Don't support groups on Windows for now + raise KeyError() + +try: + # override functions with win32 versions if possible + from win32 import * +except ImportError: + pass + +expandglobs = True diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.pyo Binary files differnew file mode 100644 index 0000000..aa5e567 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/windows.pyo diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.py new file mode 100644 index 0000000..372e842 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.py @@ -0,0 +1,338 @@ +# wireproto.py - generic wire protocol support functions +# +# Copyright 2005-2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +import urllib, tempfile, os, sys +from i18n import _ +from node import bin, hex +import changegroup as changegroupmod +import repo, error, encoding, util, store +import pushkey as pushkeymod + +# list of nodes encoding / decoding + +def decodelist(l, sep=' '): + return map(bin, l.split(sep)) + +def encodelist(l, sep=' '): + return sep.join(map(hex, l)) + +# client side + +class wirerepository(repo.repository): + def lookup(self, key): + self.requirecap('lookup', _('look up remote revision')) + d = self._call("lookup", key=key) + success, data = d[:-1].split(" ", 1) + if int(success): + return bin(data) + self._abort(error.RepoError(data)) + + def heads(self): + d = self._call("heads") + try: + return decodelist(d[:-1]) + except: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + def branchmap(self): + d = self._call("branchmap") + try: + branchmap = {} + for branchpart in d.splitlines(): + branchname, branchheads = branchpart.split(' ', 1) + branchname = urllib.unquote(branchname) + # Earlier servers (1.3.x) send branch names in (their) local + # charset. The best we can do is assume it's identical to our + # own local charset, in case it's not utf-8. + try: + branchname.decode('utf-8') + except UnicodeDecodeError: + branchname = encoding.fromlocal(branchname) + branchheads = decodelist(branchheads) + branchmap[branchname] = branchheads + return branchmap + except TypeError: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + def branches(self, nodes): + n = encodelist(nodes) + d = self._call("branches", nodes=n) + try: + br = [tuple(decodelist(b)) for b in d.splitlines()] + return br + except: + self._abort(error.ResponseError(_("unexpected response:"), d)) + + def between(self, pairs): + batch = 8 # avoid giant requests + r = [] + for i in xrange(0, len(pairs), batch): + n = " ".join([encodelist(p, '-') for p in pairs[i:i + batch]]) + d = self._call("between", pairs=n) + try: + r.extend(l and decodelist(l) or [] for l in d.splitlines()) + except: + self._abort(error.ResponseError(_("unexpected response:"), d)) + return r + + def pushkey(self, namespace, key, old, new): + if not self.capable('pushkey'): + return False + d = self._call("pushkey", + namespace=namespace, key=key, old=old, new=new) + return bool(int(d)) + + def listkeys(self, namespace): + if not self.capable('pushkey'): + return {} + d = self._call("listkeys", namespace=namespace) + r = {} + for l in d.splitlines(): + k, v = l.split('\t') + r[k.decode('string-escape')] = v.decode('string-escape') + return r + + def stream_out(self): + return self._callstream('stream_out') + + def changegroup(self, nodes, kind): + n = encodelist(nodes) + f = self._callstream("changegroup", roots=n) + return changegroupmod.unbundle10(self._decompress(f), 'UN') + + def changegroupsubset(self, bases, heads, kind): + self.requirecap('changegroupsubset', _('look up remote changes')) + bases = encodelist(bases) + heads = encodelist(heads) + f = self._callstream("changegroupsubset", + bases=bases, heads=heads) + return changegroupmod.unbundle10(self._decompress(f), 'UN') + + def unbundle(self, cg, heads, source): + '''Send cg (a readable file-like object representing the + changegroup to push, typically a chunkbuffer object) to the + remote server as a bundle. Return an integer indicating the + result of the push (see localrepository.addchangegroup()).''' + + ret, output = self._callpush("unbundle", cg, heads=encodelist(heads)) + if ret == "": + raise error.ResponseError( + _('push failed:'), output) + try: + ret = int(ret) + except ValueError: + raise error.ResponseError( + _('push failed (unexpected response):'), ret) + + for l in output.splitlines(True): + self.ui.status(_('remote: '), l) + return ret + +# server side + +class streamres(object): + def __init__(self, gen): + self.gen = gen + +class pushres(object): + def __init__(self, res): + self.res = res + +class pusherr(object): + def __init__(self, res): + self.res = res + +def dispatch(repo, proto, command): + func, spec = commands[command] + args = proto.getargs(spec) + return func(repo, proto, *args) + +def between(repo, proto, pairs): + pairs = [decodelist(p, '-') for p in pairs.split(" ")] + r = [] + for b in repo.between(pairs): + r.append(encodelist(b) + "\n") + return "".join(r) + +def branchmap(repo, proto): + branchmap = repo.branchmap() + heads = [] + for branch, nodes in branchmap.iteritems(): + branchname = urllib.quote(branch) + branchnodes = encodelist(nodes) + heads.append('%s %s' % (branchname, branchnodes)) + return '\n'.join(heads) + +def branches(repo, proto, nodes): + nodes = decodelist(nodes) + r = [] + for b in repo.branches(nodes): + r.append(encodelist(b) + "\n") + return "".join(r) + +def capabilities(repo, proto): + caps = 'lookup changegroupsubset branchmap pushkey'.split() + if _allowstream(repo.ui): + requiredformats = repo.requirements & repo.supportedformats + # if our local revlogs are just revlogv1, add 'stream' cap + if not requiredformats - set(('revlogv1',)): + caps.append('stream') + # otherwise, add 'streamreqs' detailing our local revlog format + else: + caps.append('streamreqs=%s' % ','.join(requiredformats)) + caps.append('unbundle=%s' % ','.join(changegroupmod.bundlepriority)) + return ' '.join(caps) + +def changegroup(repo, proto, roots): + nodes = decodelist(roots) + cg = repo.changegroup(nodes, 'serve') + return streamres(proto.groupchunks(cg)) + +def changegroupsubset(repo, proto, bases, heads): + bases = decodelist(bases) + heads = decodelist(heads) + cg = repo.changegroupsubset(bases, heads, 'serve') + return streamres(proto.groupchunks(cg)) + +def heads(repo, proto): + h = repo.heads() + return encodelist(h) + "\n" + +def hello(repo, proto): + '''the hello command returns a set of lines describing various + interesting things about the server, in an RFC822-like format. + Currently the only one defined is "capabilities", which + consists of a line in the form: + + capabilities: space separated list of tokens + ''' + return "capabilities: %s\n" % (capabilities(repo, proto)) + +def listkeys(repo, proto, namespace): + d = pushkeymod.list(repo, namespace).items() + t = '\n'.join(['%s\t%s' % (k.encode('string-escape'), + v.encode('string-escape')) for k, v in d]) + return t + +def lookup(repo, proto, key): + try: + r = hex(repo.lookup(key)) + success = 1 + except Exception, inst: + r = str(inst) + success = 0 + return "%s %s\n" % (success, r) + +def pushkey(repo, proto, namespace, key, old, new): + r = pushkeymod.push(repo, namespace, key, old, new) + return '%s\n' % int(r) + +def _allowstream(ui): + return ui.configbool('server', 'uncompressed', True, untrusted=True) + +def stream(repo, proto): + '''If the server supports streaming clone, it advertises the "stream" + capability with a value representing the version and flags of the repo + it is serving. Client checks to see if it understands the format. + + The format is simple: the server writes out a line with the amount + of files, then the total amount of bytes to be transfered (separated + by a space). Then, for each file, the server first writes the filename + and filesize (separated by the null character), then the file contents. + ''' + + if not _allowstream(repo.ui): + return '1\n' + + entries = [] + total_bytes = 0 + try: + # get consistent snapshot of repo, lock during scan + lock = repo.lock() + try: + repo.ui.debug('scanning\n') + for name, ename, size in repo.store.walk(): + entries.append((name, size)) + total_bytes += size + finally: + lock.release() + except error.LockError: + return '2\n' # error: 2 + + def streamer(repo, entries, total): + '''stream out all metadata files in repository.''' + yield '0\n' # success + repo.ui.debug('%d files, %d bytes to transfer\n' % + (len(entries), total_bytes)) + yield '%d %d\n' % (len(entries), total_bytes) + for name, size in entries: + repo.ui.debug('sending %s (%d bytes)\n' % (name, size)) + # partially encode name over the wire for backwards compat + yield '%s\0%d\n' % (store.encodedir(name), size) + for chunk in util.filechunkiter(repo.sopener(name), limit=size): + yield chunk + + return streamres(streamer(repo, entries, total_bytes)) + +def unbundle(repo, proto, heads): + their_heads = decodelist(heads) + + def check_heads(): + heads = repo.heads() + return their_heads == ['force'] or their_heads == heads + + proto.redirect() + + # fail early if possible + if not check_heads(): + return pusherr('unsynced changes') + + # write bundle data to temporary file because it can be big + fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') + fp = os.fdopen(fd, 'wb+') + r = 0 + try: + proto.getfile(fp) + lock = repo.lock() + try: + if not check_heads(): + # someone else committed/pushed/unbundled while we + # were transferring data + return pusherr('unsynced changes') + + # push can proceed + fp.seek(0) + gen = changegroupmod.readbundle(fp, None) + + try: + r = repo.addchangegroup(gen, 'serve', proto._client(), + lock=lock) + except util.Abort, inst: + sys.stderr.write("abort: %s\n" % inst) + finally: + lock.release() + return pushres(r) + + finally: + fp.close() + os.unlink(tempname) + +commands = { + 'between': (between, 'pairs'), + 'branchmap': (branchmap, ''), + 'branches': (branches, 'nodes'), + 'capabilities': (capabilities, ''), + 'changegroup': (changegroup, 'roots'), + 'changegroupsubset': (changegroupsubset, 'bases heads'), + 'heads': (heads, ''), + 'hello': (hello, ''), + 'listkeys': (listkeys, 'namespace'), + 'lookup': (lookup, 'key'), + 'pushkey': (pushkey, 'namespace key old new'), + 'stream_out': (stream, ''), + 'unbundle': (unbundle, 'heads'), +} diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.pyo Binary files differnew file mode 100644 index 0000000..981a393 --- /dev/null +++ b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/mercurial/wireproto.pyo |