summaryrefslogtreecommitdiff
path: root/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert
diff options
context:
space:
mode:
Diffstat (limited to 'eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert')
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.py321
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.pyobin14008 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.py260
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.pyobin9581 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.py389
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.pyobin20100 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.py434
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.pyobin16933 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.py271
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.pyobin9466 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.py847
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.pyobin21195 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.py200
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.pyobin8694 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.py365
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.pyobin11449 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.py170
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.pyobin7015 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.py338
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.pyobin12415 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.py376
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.pyobin16042 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.py227
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.pyobin7808 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.py202
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.pyobin7333 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.py1168
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.pyobin39371 -> 0 bytes
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.py128
-rw-r--r--eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.pyobin5744 -> 0 bytes
30 files changed, 0 insertions, 5696 deletions
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.py
deleted file mode 100644
index be7aca5..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.py
+++ /dev/null
@@ -1,321 +0,0 @@
-# convert.py Foreign SCM converter
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-'''import revisions from foreign VCS repositories into Mercurial'''
-
-import convcmd
-import cvsps
-import subversion
-from mercurial import commands
-from mercurial.i18n import _
-
-# Commands definition was moved elsewhere to ease demandload job.
-
-def convert(ui, src, dest=None, revmapfile=None, **opts):
- """convert a foreign SCM repository to a Mercurial one.
-
- Accepted source formats [identifiers]:
-
- - Mercurial [hg]
- - CVS [cvs]
- - Darcs [darcs]
- - git [git]
- - Subversion [svn]
- - Monotone [mtn]
- - GNU Arch [gnuarch]
- - Bazaar [bzr]
- - Perforce [p4]
-
- Accepted destination formats [identifiers]:
-
- - Mercurial [hg]
- - Subversion [svn] (history on branches is not preserved)
-
- If no revision is given, all revisions will be converted.
- Otherwise, convert will only import up to the named revision
- (given in a format understood by the source).
-
- If no destination directory name is specified, it defaults to the
- basename of the source with ``-hg`` appended. If the destination
- repository doesn't exist, it will be created.
-
- By default, all sources except Mercurial will use --branchsort.
- Mercurial uses --sourcesort to preserve original revision numbers
- order. Sort modes have the following effects:
-
- --branchsort convert from parent to child revision when possible,
- which means branches are usually converted one after
- the other. It generates more compact repositories.
-
- --datesort sort revisions by date. Converted repositories have
- good-looking changelogs but are often an order of
- magnitude larger than the same ones generated by
- --branchsort.
-
- --sourcesort try to preserve source revisions order, only
- supported by Mercurial sources.
-
- If <REVMAP> isn't given, it will be put in a default location
- (<dest>/.hg/shamap by default). The <REVMAP> is a simple text file
- that maps each source commit ID to the destination ID for that
- revision, like so::
-
- <source ID> <destination ID>
-
- If the file doesn't exist, it's automatically created. It's
- updated on each commit copied, so :hg:`convert` can be interrupted
- and can be run repeatedly to copy new commits.
-
- The authormap is a simple text file that maps each source commit
- author to a destination commit author. It is handy for source SCMs
- that use unix logins to identify authors (eg: CVS). One line per
- author mapping and the line format is::
-
- source author = destination author
-
- Empty lines and lines starting with a ``#`` are ignored.
-
- The filemap is a file that allows filtering and remapping of files
- and directories. Each line can contain one of the following
- directives::
-
- include path/to/file-or-dir
-
- exclude path/to/file-or-dir
-
- rename path/to/source path/to/destination
-
- Comment lines start with ``#``. A specified path matches if it
- equals the full relative name of a file or one of its parent
- directories. The ``include`` or ``exclude`` directive with the
- longest matching path applies, so line order does not matter.
-
- The ``include`` directive causes a file, or all files under a
- directory, to be included in the destination repository, and the
- exclusion of all other files and directories not explicitly
- included. The ``exclude`` directive causes files or directories to
- be omitted. The ``rename`` directive renames a file or directory if
- it is converted. To rename from a subdirectory into the root of
- the repository, use ``.`` as the path to rename to.
-
- The splicemap is a file that allows insertion of synthetic
- history, letting you specify the parents of a revision. This is
- useful if you want to e.g. give a Subversion merge two parents, or
- graft two disconnected series of history together. Each entry
- contains a key, followed by a space, followed by one or two
- comma-separated values::
-
- key parent1, parent2
-
- The key is the revision ID in the source
- revision control system whose parents should be modified (same
- format as a key in .hg/shamap). The values are the revision IDs
- (in either the source or destination revision control system) that
- should be used as the new parents for that node. For example, if
- you have merged "release-1.0" into "trunk", then you should
- specify the revision on "trunk" as the first parent and the one on
- the "release-1.0" branch as the second.
-
- The branchmap is a file that allows you to rename a branch when it is
- being brought in from whatever external repository. When used in
- conjunction with a splicemap, it allows for a powerful combination
- to help fix even the most badly mismanaged repositories and turn them
- into nicely structured Mercurial repositories. The branchmap contains
- lines of the form::
-
- original_branch_name new_branch_name
-
- where "original_branch_name" is the name of the branch in the
- source repository, and "new_branch_name" is the name of the branch
- is the destination repository. No whitespace is allowed in the
- branch names. This can be used to (for instance) move code in one
- repository from "default" to a named branch.
-
- Mercurial Source
- ''''''''''''''''
-
- --config convert.hg.ignoreerrors=False (boolean)
- ignore integrity errors when reading. Use it to fix Mercurial
- repositories with missing revlogs, by converting from and to
- Mercurial.
- --config convert.hg.saverev=False (boolean)
- store original revision ID in changeset (forces target IDs to
- change)
- --config convert.hg.startrev=0 (hg revision identifier)
- convert start revision and its descendants
-
- CVS Source
- ''''''''''
-
- CVS source will use a sandbox (i.e. a checked-out copy) from CVS
- to indicate the starting point of what will be converted. Direct
- access to the repository files is not needed, unless of course the
- repository is :local:. The conversion uses the top level directory
- in the sandbox to find the CVS repository, and then uses CVS rlog
- commands to find files to convert. This means that unless a
- filemap is given, all files under the starting directory will be
- converted, and that any directory reorganization in the CVS
- sandbox is ignored.
-
- The options shown are the defaults.
-
- --config convert.cvsps.cache=True (boolean)
- Set to False to disable remote log caching, for testing and
- debugging purposes.
- --config convert.cvsps.fuzz=60 (integer)
- Specify the maximum time (in seconds) that is allowed between
- commits with identical user and log message in a single
- changeset. When very large files were checked in as part of a
- changeset then the default may not be long enough.
- --config convert.cvsps.mergeto='{{mergetobranch ([-\\w]+)}}'
- Specify a regular expression to which commit log messages are
- matched. If a match occurs, then the conversion process will
- insert a dummy revision merging the branch on which this log
- message occurs to the branch indicated in the regex.
- --config convert.cvsps.mergefrom='{{mergefrombranch ([-\\w]+)}}'
- Specify a regular expression to which commit log messages are
- matched. If a match occurs, then the conversion process will
- add the most recent revision on the branch indicated in the
- regex as the second parent of the changeset.
- --config hook.cvslog
- Specify a Python function to be called at the end of gathering
- the CVS log. The function is passed a list with the log entries,
- and can modify the entries in-place, or add or delete them.
- --config hook.cvschangesets
- Specify a Python function to be called after the changesets
- are calculated from the the CVS log. The function is passed
- a list with the changeset entries, and can modify the changesets
- in-place, or add or delete them.
-
- An additional "debugcvsps" Mercurial command allows the builtin
- changeset merging code to be run without doing a conversion. Its
- parameters and output are similar to that of cvsps 2.1. Please see
- the command help for more details.
-
- Subversion Source
- '''''''''''''''''
-
- Subversion source detects classical trunk/branches/tags layouts.
- By default, the supplied "svn://repo/path/" source URL is
- converted as a single branch. If "svn://repo/path/trunk" exists it
- replaces the default branch. If "svn://repo/path/branches" exists,
- its subdirectories are listed as possible branches. If
- "svn://repo/path/tags" exists, it is looked for tags referencing
- converted branches. Default "trunk", "branches" and "tags" values
- can be overridden with following options. Set them to paths
- relative to the source URL, or leave them blank to disable auto
- detection.
-
- --config convert.svn.branches=branches (directory name)
- specify the directory containing branches
- --config convert.svn.tags=tags (directory name)
- specify the directory containing tags
- --config convert.svn.trunk=trunk (directory name)
- specify the name of the trunk branch
-
- Source history can be retrieved starting at a specific revision,
- instead of being integrally converted. Only single branch
- conversions are supported.
-
- --config convert.svn.startrev=0 (svn revision number)
- specify start Subversion revision.
-
- Perforce Source
- '''''''''''''''
-
- The Perforce (P4) importer can be given a p4 depot path or a
- client specification as source. It will convert all files in the
- source to a flat Mercurial repository, ignoring labels, branches
- and integrations. Note that when a depot path is given you then
- usually should specify a target directory, because otherwise the
- target may be named ...-hg.
-
- It is possible to limit the amount of source history to be
- converted by specifying an initial Perforce revision.
-
- --config convert.p4.startrev=0 (perforce changelist number)
- specify initial Perforce revision.
-
- Mercurial Destination
- '''''''''''''''''''''
-
- --config convert.hg.clonebranches=False (boolean)
- dispatch source branches in separate clones.
- --config convert.hg.tagsbranch=default (branch name)
- tag revisions branch name
- --config convert.hg.usebranchnames=True (boolean)
- preserve branch names
-
- """
- return convcmd.convert(ui, src, dest, revmapfile, **opts)
-
-def debugsvnlog(ui, **opts):
- return subversion.debugsvnlog(ui, **opts)
-
-def debugcvsps(ui, *args, **opts):
- '''create changeset information from CVS
-
- This command is intended as a debugging tool for the CVS to
- Mercurial converter, and can be used as a direct replacement for
- cvsps.
-
- Hg debugcvsps reads the CVS rlog for current directory (or any
- named directory) in the CVS repository, and converts the log to a
- series of changesets based on matching commit log entries and
- dates.'''
- return cvsps.debugcvsps(ui, *args, **opts)
-
-commands.norepo += " convert debugsvnlog debugcvsps"
-
-cmdtable = {
- "convert":
- (convert,
- [('', 'authors', '',
- _('username mapping filename (DEPRECATED, use --authormap instead)'),
- _('FILE')),
- ('s', 'source-type', '',
- _('source repository type'), _('TYPE')),
- ('d', 'dest-type', '',
- _('destination repository type'), _('TYPE')),
- ('r', 'rev', '',
- _('import up to target revision REV'), _('REV')),
- ('A', 'authormap', '',
- _('remap usernames using this file'), _('FILE')),
- ('', 'filemap', '',
- _('remap file names using contents of file'), _('FILE')),
- ('', 'splicemap', '',
- _('splice synthesized history into place'), _('FILE')),
- ('', 'branchmap', '',
- _('change branch names while converting'), _('FILE')),
- ('', 'branchsort', None, _('try to sort changesets by branches')),
- ('', 'datesort', None, _('try to sort changesets by date')),
- ('', 'sourcesort', None, _('preserve source changesets order'))],
- _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]')),
- "debugsvnlog":
- (debugsvnlog,
- [],
- 'hg debugsvnlog'),
- "debugcvsps":
- (debugcvsps,
- [
- # Main options shared with cvsps-2.1
- ('b', 'branches', [], _('only return changes on specified branches')),
- ('p', 'prefix', '', _('prefix to remove from file names')),
- ('r', 'revisions', [],
- _('only return changes after or between specified tags')),
- ('u', 'update-cache', None, _("update cvs log cache")),
- ('x', 'new-cache', None, _("create new cvs log cache")),
- ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
- ('', 'root', '', _('specify cvsroot')),
- # Options specific to builtin cvsps
- ('', 'parents', '', _('show parent changesets')),
- ('', 'ancestors', '', _('show current changeset in ancestor branches')),
- # Options that are ignored for compatibility with cvsps-2.1
- ('A', 'cvs-direct', None, _('ignored for compatibility')),
- ],
- _('hg debugcvsps [OPTION]... [PATH]...')),
-}
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.pyo
deleted file mode 100644
index 892b438..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/__init__.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.py
deleted file mode 100644
index cc16258..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# bzr.py - bzr support for the convert extension
-#
-# Copyright 2008, 2009 Marek Kubica <marek@xivilization.net> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# This module is for handling 'bzr', that was formerly known as Bazaar-NG;
-# it cannot access 'bar' repositories, but they were never used very much
-
-import os
-from mercurial import demandimport
-# these do not work with demandimport, blacklist
-demandimport.ignore.extend([
- 'bzrlib.transactions',
- 'bzrlib.urlutils',
- 'ElementPath',
- ])
-
-from mercurial.i18n import _
-from mercurial import util
-from common import NoRepo, commit, converter_source
-
-try:
- # bazaar imports
- from bzrlib import branch, revision, errors
- from bzrlib.revisionspec import RevisionSpec
-except ImportError:
- pass
-
-supportedkinds = ('file', 'symlink')
-
-class bzr_source(converter_source):
- """Reads Bazaar repositories by using the Bazaar Python libraries"""
-
- def __init__(self, ui, path, rev=None):
- super(bzr_source, self).__init__(ui, path, rev=rev)
-
- if not os.path.exists(os.path.join(path, '.bzr')):
- raise NoRepo(_('%s does not look like a Bazaar repository')
- % path)
-
- try:
- # access bzrlib stuff
- branch
- except NameError:
- raise NoRepo(_('Bazaar modules could not be loaded'))
-
- path = os.path.abspath(path)
- self._checkrepotype(path)
- self.branch = branch.Branch.open(path)
- self.sourcerepo = self.branch.repository
- self._parentids = {}
-
- def _checkrepotype(self, path):
- # Lightweight checkouts detection is informational but probably
- # fragile at API level. It should not terminate the conversion.
- try:
- from bzrlib import bzrdir
- dir = bzrdir.BzrDir.open_containing(path)[0]
- try:
- tree = dir.open_workingtree(recommend_upgrade=False)
- branch = tree.branch
- except (errors.NoWorkingTree, errors.NotLocalUrl):
- tree = None
- branch = dir.open_branch()
- if (tree is not None and tree.bzrdir.root_transport.base !=
- branch.bzrdir.root_transport.base):
- self.ui.warn(_('warning: lightweight checkouts may cause '
- 'conversion failures, try with a regular '
- 'branch instead.\n'))
- except:
- self.ui.note(_('bzr source type could not be determined\n'))
-
- def before(self):
- """Before the conversion begins, acquire a read lock
- for all the operations that might need it. Fortunately
- read locks don't block other reads or writes to the
- repository, so this shouldn't have any impact on the usage of
- the source repository.
-
- The alternative would be locking on every operation that
- needs locks (there are currently two: getting the file and
- getting the parent map) and releasing immediately after,
- but this approach can take even 40% longer."""
- self.sourcerepo.lock_read()
-
- def after(self):
- self.sourcerepo.unlock()
-
- def getheads(self):
- if not self.rev:
- return [self.branch.last_revision()]
- try:
- r = RevisionSpec.from_string(self.rev)
- info = r.in_history(self.branch)
- except errors.BzrError:
- raise util.Abort(_('%s is not a valid revision in current branch')
- % self.rev)
- return [info.rev_id]
-
- def getfile(self, name, rev):
- revtree = self.sourcerepo.revision_tree(rev)
- fileid = revtree.path2id(name.decode(self.encoding or 'utf-8'))
- kind = None
- if fileid is not None:
- kind = revtree.kind(fileid)
- if kind not in supportedkinds:
- # the file is not available anymore - was deleted
- raise IOError(_('%s is not available in %s anymore') %
- (name, rev))
- mode = self._modecache[(name, rev)]
- if kind == 'symlink':
- target = revtree.get_symlink_target(fileid)
- if target is None:
- raise util.Abort(_('%s.%s symlink has no target')
- % (name, rev))
- return target, mode
- else:
- sio = revtree.get_file(fileid)
- return sio.read(), mode
-
- def getchanges(self, version):
- # set up caches: modecache and revtree
- self._modecache = {}
- self._revtree = self.sourcerepo.revision_tree(version)
- # get the parentids from the cache
- parentids = self._parentids.pop(version)
- # only diff against first parent id
- prevtree = self.sourcerepo.revision_tree(parentids[0])
- return self._gettreechanges(self._revtree, prevtree)
-
- def getcommit(self, version):
- rev = self.sourcerepo.get_revision(version)
- # populate parent id cache
- if not rev.parent_ids:
- parents = []
- self._parentids[version] = (revision.NULL_REVISION,)
- else:
- parents = self._filterghosts(rev.parent_ids)
- self._parentids[version] = parents
-
- return commit(parents=parents,
- date='%d %d' % (rev.timestamp, -rev.timezone),
- author=self.recode(rev.committer),
- # bzr returns bytestrings or unicode, depending on the content
- desc=self.recode(rev.message),
- rev=version)
-
- def gettags(self):
- if not self.branch.supports_tags():
- return {}
- tagdict = self.branch.tags.get_tag_dict()
- bytetags = {}
- for name, rev in tagdict.iteritems():
- bytetags[self.recode(name)] = rev
- return bytetags
-
- def getchangedfiles(self, rev, i):
- self._modecache = {}
- curtree = self.sourcerepo.revision_tree(rev)
- if i is not None:
- parentid = self._parentids[rev][i]
- else:
- # no parent id, get the empty revision
- parentid = revision.NULL_REVISION
-
- prevtree = self.sourcerepo.revision_tree(parentid)
- changes = [e[0] for e in self._gettreechanges(curtree, prevtree)[0]]
- return changes
-
- def _gettreechanges(self, current, origin):
- revid = current._revision_id
- changes = []
- renames = {}
- for (fileid, paths, changed_content, versioned, parent, name,
- kind, executable) in current.iter_changes(origin):
-
- if paths[0] == u'' or paths[1] == u'':
- # ignore changes to tree root
- continue
-
- # bazaar tracks directories, mercurial does not, so
- # we have to rename the directory contents
- if kind[1] == 'directory':
- if kind[0] not in (None, 'directory'):
- # Replacing 'something' with a directory, record it
- # so it can be removed.
- changes.append((self.recode(paths[0]), revid))
-
- if None not in paths and paths[0] != paths[1]:
- # neither an add nor an delete - a move
- # rename all directory contents manually
- subdir = origin.inventory.path2id(paths[0])
- # get all child-entries of the directory
- for name, entry in origin.inventory.iter_entries(subdir):
- # hg does not track directory renames
- if entry.kind == 'directory':
- continue
- frompath = self.recode(paths[0] + '/' + name)
- topath = self.recode(paths[1] + '/' + name)
- # register the files as changed
- changes.append((frompath, revid))
- changes.append((topath, revid))
- # add to mode cache
- mode = ((entry.executable and 'x')
- or (entry.kind == 'symlink' and 's')
- or '')
- self._modecache[(topath, revid)] = mode
- # register the change as move
- renames[topath] = frompath
-
- # no futher changes, go to the next change
- continue
-
- # we got unicode paths, need to convert them
- path, topath = [self.recode(part) for part in paths]
-
- if topath is None:
- # file deleted
- changes.append((path, revid))
- continue
-
- # renamed
- if path and path != topath:
- renames[topath] = path
- changes.append((path, revid))
-
- # populate the mode cache
- kind, executable = [e[1] for e in (kind, executable)]
- mode = ((executable and 'x') or (kind == 'symlink' and 'l')
- or '')
- self._modecache[(topath, revid)] = mode
- changes.append((topath, revid))
-
- return changes, renames
-
- def _filterghosts(self, ids):
- """Filters out ghost revisions which hg does not support, see
- <http://bazaar-vcs.org/GhostRevision>
- """
- parentmap = self.sourcerepo.get_parent_map(ids)
- parents = tuple([parent for parent in ids if parent in parentmap])
- return parents
-
- def recode(self, s, encoding=None):
- """This version of recode tries to encode unicode to bytecode,
- and preferably using the UTF-8 codec.
- Other types than Unicode are silently returned, this is by
- intention, e.g. the None-type is not going to be encoded but instead
- just passed through
- """
- if not encoding:
- encoding = self.encoding or 'utf-8'
-
- if isinstance(s, unicode):
- return s.encode(encoding)
- else:
- # leave it alone
- return s
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.pyo
deleted file mode 100644
index ab47e99..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/bzr.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.py
deleted file mode 100644
index fb3865f..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# common.py - common code for the convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import base64, errno
-import os
-import cPickle as pickle
-from mercurial import util
-from mercurial.i18n import _
-
-def encodeargs(args):
- def encodearg(s):
- lines = base64.encodestring(s)
- lines = [l.splitlines()[0] for l in lines]
- return ''.join(lines)
-
- s = pickle.dumps(args)
- return encodearg(s)
-
-def decodeargs(s):
- s = base64.decodestring(s)
- return pickle.loads(s)
-
-class MissingTool(Exception):
- pass
-
-def checktool(exe, name=None, abort=True):
- name = name or exe
- if not util.find_exe(exe):
- exc = abort and util.Abort or MissingTool
- raise exc(_('cannot find required "%s" tool') % name)
-
-class NoRepo(Exception):
- pass
-
-SKIPREV = 'SKIP'
-
-class commit(object):
- def __init__(self, author, date, desc, parents, branch=None, rev=None,
- extra={}, sortkey=None):
- self.author = author or 'unknown'
- self.date = date or '0 0'
- self.desc = desc
- self.parents = parents
- self.branch = branch
- self.rev = rev
- self.extra = extra
- self.sortkey = sortkey
-
-class converter_source(object):
- """Conversion source interface"""
-
- def __init__(self, ui, path=None, rev=None):
- """Initialize conversion source (or raise NoRepo("message")
- exception if path is not a valid repository)"""
- self.ui = ui
- self.path = path
- self.rev = rev
-
- self.encoding = 'utf-8'
-
- def before(self):
- pass
-
- def after(self):
- pass
-
- def setrevmap(self, revmap):
- """set the map of already-converted revisions"""
- pass
-
- def getheads(self):
- """Return a list of this repository's heads"""
- raise NotImplementedError()
-
- def getfile(self, name, rev):
- """Return a pair (data, mode) where data is the file content
- as a string and mode one of '', 'x' or 'l'. rev is the
- identifier returned by a previous call to getchanges(). Raise
- IOError to indicate that name was deleted in rev.
- """
- raise NotImplementedError()
-
- def getchanges(self, version):
- """Returns a tuple of (files, copies).
-
- files is a sorted list of (filename, id) tuples for all files
- changed between version and its first parent returned by
- getcommit(). id is the source revision id of the file.
-
- copies is a dictionary of dest: source
- """
- raise NotImplementedError()
-
- def getcommit(self, version):
- """Return the commit object for version"""
- raise NotImplementedError()
-
- def gettags(self):
- """Return the tags as a dictionary of name: revision
-
- Tag names must be UTF-8 strings.
- """
- raise NotImplementedError()
-
- def recode(self, s, encoding=None):
- if not encoding:
- encoding = self.encoding or 'utf-8'
-
- if isinstance(s, unicode):
- return s.encode("utf-8")
- try:
- return s.decode(encoding).encode("utf-8")
- except:
- try:
- return s.decode("latin-1").encode("utf-8")
- except:
- return s.decode(encoding, "replace").encode("utf-8")
-
- def getchangedfiles(self, rev, i):
- """Return the files changed by rev compared to parent[i].
-
- i is an index selecting one of the parents of rev. The return
- value should be the list of files that are different in rev and
- this parent.
-
- If rev has no parents, i is None.
-
- This function is only needed to support --filemap
- """
- raise NotImplementedError()
-
- def converted(self, rev, sinkrev):
- '''Notify the source that a revision has been converted.'''
- pass
-
- def hasnativeorder(self):
- """Return true if this source has a meaningful, native revision
- order. For instance, Mercurial revisions are store sequentially
- while there is no such global ordering with Darcs.
- """
- return False
-
- def lookuprev(self, rev):
- """If rev is a meaningful revision reference in source, return
- the referenced identifier in the same format used by getcommit().
- return None otherwise.
- """
- return None
-
-class converter_sink(object):
- """Conversion sink (target) interface"""
-
- def __init__(self, ui, path):
- """Initialize conversion sink (or raise NoRepo("message")
- exception if path is not a valid repository)
-
- created is a list of paths to remove if a fatal error occurs
- later"""
- self.ui = ui
- self.path = path
- self.created = []
-
- def getheads(self):
- """Return a list of this repository's heads"""
- raise NotImplementedError()
-
- def revmapfile(self):
- """Path to a file that will contain lines
- source_rev_id sink_rev_id
- mapping equivalent revision identifiers for each system."""
- raise NotImplementedError()
-
- def authorfile(self):
- """Path to a file that will contain lines
- srcauthor=dstauthor
- mapping equivalent authors identifiers for each system."""
- return None
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
- """Create a revision with all changed files listed in 'files'
- and having listed parents. 'commit' is a commit object
- containing at a minimum the author, date, and message for this
- changeset. 'files' is a list of (path, version) tuples,
- 'copies' is a dictionary mapping destinations to sources,
- 'source' is the source repository, and 'revmap' is a mapfile
- of source revisions to converted revisions. Only getfile() and
- lookuprev() should be called on 'source'.
-
- Note that the sink repository is not told to update itself to
- a particular revision (or even what that revision would be)
- before it receives the file data.
- """
- raise NotImplementedError()
-
- def puttags(self, tags):
- """Put tags into sink.
-
- tags: {tagname: sink_rev_id, ...} where tagname is an UTF-8 string.
- Return a pair (tag_revision, tag_parent_revision), or (None, None)
- if nothing was changed.
- """
- raise NotImplementedError()
-
- def setbranch(self, branch, pbranches):
- """Set the current branch name. Called before the first putcommit
- on the branch.
- branch: branch name for subsequent commits
- pbranches: (converted parent revision, parent branch) tuples"""
- pass
-
- def setfilemapmode(self, active):
- """Tell the destination that we're using a filemap
-
- Some converter_sources (svn in particular) can claim that a file
- was changed in a revision, even if there was no change. This method
- tells the destination that we're using a filemap and that it should
- filter empty revisions.
- """
- pass
-
- def before(self):
- pass
-
- def after(self):
- pass
-
-
-class commandline(object):
- def __init__(self, ui, command):
- self.ui = ui
- self.command = command
-
- def prerun(self):
- pass
-
- def postrun(self):
- pass
-
- def _cmdline(self, cmd, *args, **kwargs):
- cmdline = [self.command, cmd] + list(args)
- for k, v in kwargs.iteritems():
- if len(k) == 1:
- cmdline.append('-' + k)
- else:
- cmdline.append('--' + k.replace('_', '-'))
- try:
- if len(k) == 1:
- cmdline.append('' + v)
- else:
- cmdline[-1] += '=' + v
- except TypeError:
- pass
- cmdline = [util.shellquote(arg) for arg in cmdline]
- if not self.ui.debugflag:
- cmdline += ['2>', util.nulldev]
- cmdline += ['<', util.nulldev]
- cmdline = ' '.join(cmdline)
- return cmdline
-
- def _run(self, cmd, *args, **kwargs):
- cmdline = self._cmdline(cmd, *args, **kwargs)
- self.ui.debug('running: %s\n' % (cmdline,))
- self.prerun()
- try:
- return util.popen(cmdline)
- finally:
- self.postrun()
-
- def run(self, cmd, *args, **kwargs):
- fp = self._run(cmd, *args, **kwargs)
- output = fp.read()
- self.ui.debug(output)
- return output, fp.close()
-
- def runlines(self, cmd, *args, **kwargs):
- fp = self._run(cmd, *args, **kwargs)
- output = fp.readlines()
- self.ui.debug(''.join(output))
- return output, fp.close()
-
- def checkexit(self, status, output=''):
- if status:
- if output:
- self.ui.warn(_('%s error:\n') % self.command)
- self.ui.warn(output)
- msg = util.explain_exit(status)[0]
- raise util.Abort('%s %s' % (self.command, msg))
-
- def run0(self, cmd, *args, **kwargs):
- output, status = self.run(cmd, *args, **kwargs)
- self.checkexit(status, output)
- return output
-
- def runlines0(self, cmd, *args, **kwargs):
- output, status = self.runlines(cmd, *args, **kwargs)
- self.checkexit(status, ''.join(output))
- return output
-
- def getargmax(self):
- if '_argmax' in self.__dict__:
- return self._argmax
-
- # POSIX requires at least 4096 bytes for ARG_MAX
- self._argmax = 4096
- try:
- self._argmax = os.sysconf("SC_ARG_MAX")
- except:
- pass
-
- # Windows shells impose their own limits on command line length,
- # down to 2047 bytes for cmd.exe under Windows NT/2k and 2500 bytes
- # for older 4nt.exe. See http://support.microsoft.com/kb/830473 for
- # details about cmd.exe limitations.
-
- # Since ARG_MAX is for command line _and_ environment, lower our limit
- # (and make happy Windows shells while doing this).
-
- self._argmax = self._argmax / 2 - 1
- return self._argmax
-
- def limit_arglist(self, arglist, cmd, *args, **kwargs):
- limit = self.getargmax() - len(self._cmdline(cmd, *args, **kwargs))
- bytes = 0
- fl = []
- for fn in arglist:
- b = len(fn) + 3
- if bytes + b < limit or len(fl) == 0:
- fl.append(fn)
- bytes += b
- else:
- yield fl
- fl = [fn]
- bytes = b
- if fl:
- yield fl
-
- def xargs(self, arglist, cmd, *args, **kwargs):
- for l in self.limit_arglist(arglist, cmd, *args, **kwargs):
- self.run0(cmd, *(list(args) + l), **kwargs)
-
-class mapfile(dict):
- def __init__(self, ui, path):
- super(mapfile, self).__init__()
- self.ui = ui
- self.path = path
- self.fp = None
- self.order = []
- self._read()
-
- def _read(self):
- if not self.path:
- return
- try:
- fp = open(self.path, 'r')
- except IOError, err:
- if err.errno != errno.ENOENT:
- raise
- return
- for i, line in enumerate(fp):
- try:
- key, value = line.splitlines()[0].rsplit(' ', 1)
- except ValueError:
- raise util.Abort(
- _('syntax error in %s(%d): key/value pair expected')
- % (self.path, i + 1))
- if key not in self:
- self.order.append(key)
- super(mapfile, self).__setitem__(key, value)
- fp.close()
-
- def __setitem__(self, key, value):
- if self.fp is None:
- try:
- self.fp = open(self.path, 'a')
- except IOError, err:
- raise util.Abort(_('could not open map file %r: %s') %
- (self.path, err.strerror))
- self.fp.write('%s %s\n' % (key, value))
- self.fp.flush()
- super(mapfile, self).__setitem__(key, value)
-
- def close(self):
- if self.fp:
- self.fp.close()
- self.fp = None
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.pyo
deleted file mode 100644
index de20000..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/common.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.py
deleted file mode 100644
index ac91b41..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.py
+++ /dev/null
@@ -1,434 +0,0 @@
-# convcmd - convert extension commands definition
-#
-# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from common import NoRepo, MissingTool, SKIPREV, mapfile
-from cvs import convert_cvs
-from darcs import darcs_source
-from git import convert_git
-from hg import mercurial_source, mercurial_sink
-from subversion import svn_source, svn_sink
-from monotone import monotone_source
-from gnuarch import gnuarch_source
-from bzr import bzr_source
-from p4 import p4_source
-import filemap
-
-import os, shutil
-from mercurial import hg, util, encoding
-from mercurial.i18n import _
-
-orig_encoding = 'ascii'
-
-def recode(s):
- if isinstance(s, unicode):
- return s.encode(orig_encoding, 'replace')
- else:
- return s.decode('utf-8').encode(orig_encoding, 'replace')
-
-source_converters = [
- ('cvs', convert_cvs, 'branchsort'),
- ('git', convert_git, 'branchsort'),
- ('svn', svn_source, 'branchsort'),
- ('hg', mercurial_source, 'sourcesort'),
- ('darcs', darcs_source, 'branchsort'),
- ('mtn', monotone_source, 'branchsort'),
- ('gnuarch', gnuarch_source, 'branchsort'),
- ('bzr', bzr_source, 'branchsort'),
- ('p4', p4_source, 'branchsort'),
- ]
-
-sink_converters = [
- ('hg', mercurial_sink),
- ('svn', svn_sink),
- ]
-
-def convertsource(ui, path, type, rev):
- exceptions = []
- if type and type not in [s[0] for s in source_converters]:
- raise util.Abort(_('%s: invalid source repository type') % type)
- for name, source, sortmode in source_converters:
- try:
- if not type or name == type:
- return source(ui, path, rev), sortmode
- except (NoRepo, MissingTool), inst:
- exceptions.append(inst)
- if not ui.quiet:
- for inst in exceptions:
- ui.write("%s\n" % inst)
- raise util.Abort(_('%s: missing or unsupported repository') % path)
-
-def convertsink(ui, path, type):
- if type and type not in [s[0] for s in sink_converters]:
- raise util.Abort(_('%s: invalid destination repository type') % type)
- for name, sink in sink_converters:
- try:
- if not type or name == type:
- return sink(ui, path)
- except NoRepo, inst:
- ui.note(_("convert: %s\n") % inst)
- raise util.Abort(_('%s: unknown repository type') % path)
-
-class progresssource(object):
- def __init__(self, ui, source, filecount):
- self.ui = ui
- self.source = source
- self.filecount = filecount
- self.retrieved = 0
-
- def getfile(self, file, rev):
- self.retrieved += 1
- self.ui.progress(_('getting files'), self.retrieved,
- item=file, total=self.filecount)
- return self.source.getfile(file, rev)
-
- def lookuprev(self, rev):
- return self.source.lookuprev(rev)
-
- def close(self):
- self.ui.progress(_('getting files'), None)
-
-class converter(object):
- def __init__(self, ui, source, dest, revmapfile, opts):
-
- self.source = source
- self.dest = dest
- self.ui = ui
- self.opts = opts
- self.commitcache = {}
- self.authors = {}
- self.authorfile = None
-
- # Record converted revisions persistently: maps source revision
- # ID to target revision ID (both strings). (This is how
- # incremental conversions work.)
- self.map = mapfile(ui, revmapfile)
-
- # Read first the dst author map if any
- authorfile = self.dest.authorfile()
- if authorfile and os.path.exists(authorfile):
- self.readauthormap(authorfile)
- # Extend/Override with new author map if necessary
- if opts.get('authormap'):
- self.readauthormap(opts.get('authormap'))
- self.authorfile = self.dest.authorfile()
-
- self.splicemap = mapfile(ui, opts.get('splicemap'))
- self.branchmap = mapfile(ui, opts.get('branchmap'))
-
- def walktree(self, heads):
- '''Return a mapping that identifies the uncommitted parents of every
- uncommitted changeset.'''
- visit = heads
- known = set()
- parents = {}
- while visit:
- n = visit.pop(0)
- if n in known or n in self.map:
- continue
- known.add(n)
- self.ui.progress(_('scanning'), len(known), unit=_('revisions'))
- commit = self.cachecommit(n)
- parents[n] = []
- for p in commit.parents:
- parents[n].append(p)
- visit.append(p)
- self.ui.progress(_('scanning'), None)
-
- return parents
-
- def toposort(self, parents, sortmode):
- '''Return an ordering such that every uncommitted changeset is
- preceeded by all its uncommitted ancestors.'''
-
- def mapchildren(parents):
- """Return a (children, roots) tuple where 'children' maps parent
- revision identifiers to children ones, and 'roots' is the list of
- revisions without parents. 'parents' must be a mapping of revision
- identifier to its parents ones.
- """
- visit = parents.keys()
- seen = set()
- children = {}
- roots = []
-
- while visit:
- n = visit.pop(0)
- if n in seen:
- continue
- seen.add(n)
- # Ensure that nodes without parents are present in the
- # 'children' mapping.
- children.setdefault(n, [])
- hasparent = False
- for p in parents[n]:
- if not p in self.map:
- visit.append(p)
- hasparent = True
- children.setdefault(p, []).append(n)
- if not hasparent:
- roots.append(n)
-
- return children, roots
-
- # Sort functions are supposed to take a list of revisions which
- # can be converted immediately and pick one
-
- def makebranchsorter():
- """If the previously converted revision has a child in the
- eligible revisions list, pick it. Return the list head
- otherwise. Branch sort attempts to minimize branch
- switching, which is harmful for Mercurial backend
- compression.
- """
- prev = [None]
- def picknext(nodes):
- next = nodes[0]
- for n in nodes:
- if prev[0] in parents[n]:
- next = n
- break
- prev[0] = next
- return next
- return picknext
-
- def makesourcesorter():
- """Source specific sort."""
- keyfn = lambda n: self.commitcache[n].sortkey
- def picknext(nodes):
- return sorted(nodes, key=keyfn)[0]
- return picknext
-
- def makedatesorter():
- """Sort revisions by date."""
- dates = {}
- def getdate(n):
- if n not in dates:
- dates[n] = util.parsedate(self.commitcache[n].date)
- return dates[n]
-
- def picknext(nodes):
- return min([(getdate(n), n) for n in nodes])[1]
-
- return picknext
-
- if sortmode == 'branchsort':
- picknext = makebranchsorter()
- elif sortmode == 'datesort':
- picknext = makedatesorter()
- elif sortmode == 'sourcesort':
- picknext = makesourcesorter()
- else:
- raise util.Abort(_('unknown sort mode: %s') % sortmode)
-
- children, actives = mapchildren(parents)
-
- s = []
- pendings = {}
- while actives:
- n = picknext(actives)
- actives.remove(n)
- s.append(n)
-
- # Update dependents list
- for c in children.get(n, []):
- if c not in pendings:
- pendings[c] = [p for p in parents[c] if p not in self.map]
- try:
- pendings[c].remove(n)
- except ValueError:
- raise util.Abort(_('cycle detected between %s and %s')
- % (recode(c), recode(n)))
- if not pendings[c]:
- # Parents are converted, node is eligible
- actives.insert(0, c)
- pendings[c] = None
-
- if len(s) != len(parents):
- raise util.Abort(_("not all revisions were sorted"))
-
- return s
-
- def writeauthormap(self):
- authorfile = self.authorfile
- if authorfile:
- self.ui.status(_('Writing author map file %s\n') % authorfile)
- ofile = open(authorfile, 'w+')
- for author in self.authors:
- ofile.write("%s=%s\n" % (author, self.authors[author]))
- ofile.close()
-
- def readauthormap(self, authorfile):
- afile = open(authorfile, 'r')
- for line in afile:
-
- line = line.strip()
- if not line or line.startswith('#'):
- continue
-
- try:
- srcauthor, dstauthor = line.split('=', 1)
- except ValueError:
- msg = _('Ignoring bad line in author map file %s: %s\n')
- self.ui.warn(msg % (authorfile, line.rstrip()))
- continue
-
- srcauthor = srcauthor.strip()
- dstauthor = dstauthor.strip()
- if self.authors.get(srcauthor) in (None, dstauthor):
- msg = _('mapping author %s to %s\n')
- self.ui.debug(msg % (srcauthor, dstauthor))
- self.authors[srcauthor] = dstauthor
- continue
-
- m = _('overriding mapping for author %s, was %s, will be %s\n')
- self.ui.status(m % (srcauthor, self.authors[srcauthor], dstauthor))
-
- afile.close()
-
- def cachecommit(self, rev):
- commit = self.source.getcommit(rev)
- commit.author = self.authors.get(commit.author, commit.author)
- commit.branch = self.branchmap.get(commit.branch, commit.branch)
- self.commitcache[rev] = commit
- return commit
-
- def copy(self, rev):
- commit = self.commitcache[rev]
-
- changes = self.source.getchanges(rev)
- if isinstance(changes, basestring):
- if changes == SKIPREV:
- dest = SKIPREV
- else:
- dest = self.map[changes]
- self.map[rev] = dest
- return
- files, copies = changes
- pbranches = []
- if commit.parents:
- for prev in commit.parents:
- if prev not in self.commitcache:
- self.cachecommit(prev)
- pbranches.append((self.map[prev],
- self.commitcache[prev].branch))
- self.dest.setbranch(commit.branch, pbranches)
- try:
- parents = self.splicemap[rev].replace(',', ' ').split()
- self.ui.status(_('spliced in %s as parents of %s\n') %
- (parents, rev))
- parents = [self.map.get(p, p) for p in parents]
- except KeyError:
- parents = [b[0] for b in pbranches]
- source = progresssource(self.ui, self.source, len(files))
- newnode = self.dest.putcommit(files, copies, parents, commit,
- source, self.map)
- source.close()
- self.source.converted(rev, newnode)
- self.map[rev] = newnode
-
- def convert(self, sortmode):
- try:
- self.source.before()
- self.dest.before()
- self.source.setrevmap(self.map)
- self.ui.status(_("scanning source...\n"))
- heads = self.source.getheads()
- parents = self.walktree(heads)
- self.ui.status(_("sorting...\n"))
- t = self.toposort(parents, sortmode)
- num = len(t)
- c = None
-
- self.ui.status(_("converting...\n"))
- for i, c in enumerate(t):
- num -= 1
- desc = self.commitcache[c].desc
- if "\n" in desc:
- desc = desc.splitlines()[0]
- # convert log message to local encoding without using
- # tolocal() because the encoding.encoding convert()
- # uses is 'utf-8'
- self.ui.status("%d %s\n" % (num, recode(desc)))
- self.ui.note(_("source: %s\n") % recode(c))
- self.ui.progress(_('converting'), i, unit=_('revisions'),
- total=len(t))
- self.copy(c)
- self.ui.progress(_('converting'), None)
-
- tags = self.source.gettags()
- ctags = {}
- for k in tags:
- v = tags[k]
- if self.map.get(v, SKIPREV) != SKIPREV:
- ctags[k] = self.map[v]
-
- if c and ctags:
- nrev, tagsparent = self.dest.puttags(ctags)
- if nrev and tagsparent:
- # write another hash correspondence to override the previous
- # one so we don't end up with extra tag heads
- tagsparents = [e for e in self.map.iteritems()
- if e[1] == tagsparent]
- if tagsparents:
- self.map[tagsparents[0][0]] = nrev
-
- self.writeauthormap()
- finally:
- self.cleanup()
-
- def cleanup(self):
- try:
- self.dest.after()
- finally:
- self.source.after()
- self.map.close()
-
-def convert(ui, src, dest=None, revmapfile=None, **opts):
- global orig_encoding
- orig_encoding = encoding.encoding
- encoding.encoding = 'UTF-8'
-
- # support --authors as an alias for --authormap
- if not opts.get('authormap'):
- opts['authormap'] = opts.get('authors')
-
- if not dest:
- dest = hg.defaultdest(src) + "-hg"
- ui.status(_("assuming destination %s\n") % dest)
-
- destc = convertsink(ui, dest, opts.get('dest_type'))
-
- try:
- srcc, defaultsort = convertsource(ui, src, opts.get('source_type'),
- opts.get('rev'))
- except Exception:
- for path in destc.created:
- shutil.rmtree(path, True)
- raise
-
- sortmodes = ('branchsort', 'datesort', 'sourcesort')
- sortmode = [m for m in sortmodes if opts.get(m)]
- if len(sortmode) > 1:
- raise util.Abort(_('more than one sort mode specified'))
- sortmode = sortmode and sortmode[0] or defaultsort
- if sortmode == 'sourcesort' and not srcc.hasnativeorder():
- raise util.Abort(_('--sourcesort is not supported by this data source'))
-
- fmap = opts.get('filemap')
- if fmap:
- srcc = filemap.filemap_source(ui, srcc, fmap)
- destc.setfilemapmode(True)
-
- if not revmapfile:
- try:
- revmapfile = destc.revmapfile()
- except:
- revmapfile = os.path.join(destc, "map")
-
- c = converter(ui, srcc, destc, revmapfile, opts)
- c.convert(sortmode)
-
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.pyo
deleted file mode 100644
index 15f040a..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/convcmd.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.py
deleted file mode 100644
index 501fae2..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os, re, socket, errno
-from cStringIO import StringIO
-from mercurial import encoding, util
-from mercurial.i18n import _
-
-from common import NoRepo, commit, converter_source, checktool
-import cvsps
-
-class convert_cvs(converter_source):
- def __init__(self, ui, path, rev=None):
- super(convert_cvs, self).__init__(ui, path, rev=rev)
-
- cvs = os.path.join(path, "CVS")
- if not os.path.exists(cvs):
- raise NoRepo(_("%s does not look like a CVS checkout") % path)
-
- checktool('cvs')
-
- self.changeset = None
- self.files = {}
- self.tags = {}
- self.lastbranch = {}
- self.socket = None
- self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1]
- self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1]
- self.encoding = encoding.encoding
-
- self._connect()
-
- def _parse(self):
- if self.changeset is not None:
- return
- self.changeset = {}
-
- maxrev = 0
- if self.rev:
- # TODO: handle tags
- try:
- # patchset number?
- maxrev = int(self.rev)
- except ValueError:
- raise util.Abort(_('revision %s is not a patchset number')
- % self.rev)
-
- d = os.getcwd()
- try:
- os.chdir(self.path)
- id = None
-
- cache = 'update'
- if not self.ui.configbool('convert', 'cvsps.cache', True):
- cache = None
- db = cvsps.createlog(self.ui, cache=cache)
- db = cvsps.createchangeset(self.ui, db,
- fuzz=int(self.ui.config('convert', 'cvsps.fuzz', 60)),
- mergeto=self.ui.config('convert', 'cvsps.mergeto', None),
- mergefrom=self.ui.config('convert', 'cvsps.mergefrom', None))
-
- for cs in db:
- if maxrev and cs.id > maxrev:
- break
- id = str(cs.id)
- cs.author = self.recode(cs.author)
- self.lastbranch[cs.branch] = id
- cs.comment = self.recode(cs.comment)
- date = util.datestr(cs.date)
- self.tags.update(dict.fromkeys(cs.tags, id))
-
- files = {}
- for f in cs.entries:
- files[f.file] = "%s%s" % ('.'.join([str(x)
- for x in f.revision]),
- ['', '(DEAD)'][f.dead])
-
- # add current commit to set
- c = commit(author=cs.author, date=date,
- parents=[str(p.id) for p in cs.parents],
- desc=cs.comment, branch=cs.branch or '')
- self.changeset[id] = c
- self.files[id] = files
-
- self.heads = self.lastbranch.values()
- finally:
- os.chdir(d)
-
- def _connect(self):
- root = self.cvsroot
- conntype = None
- user, host = None, None
- cmd = ['cvs', 'server']
-
- self.ui.status(_("connecting to %s\n") % root)
-
- if root.startswith(":pserver:"):
- root = root[9:]
- m = re.match(r'(?:(.*?)(?::(.*?))?@)?([^:\/]*)(?::(\d*))?(.*)',
- root)
- if m:
- conntype = "pserver"
- user, passw, serv, port, root = m.groups()
- if not user:
- user = "anonymous"
- if not port:
- port = 2401
- else:
- port = int(port)
- format0 = ":pserver:%s@%s:%s" % (user, serv, root)
- format1 = ":pserver:%s@%s:%d%s" % (user, serv, port, root)
-
- if not passw:
- passw = "A"
- cvspass = os.path.expanduser("~/.cvspass")
- try:
- pf = open(cvspass)
- for line in pf.read().splitlines():
- part1, part2 = line.split(' ', 1)
- if part1 == '/1':
- # /1 :pserver:user@example.com:2401/cvsroot/foo Ah<Z
- part1, part2 = part2.split(' ', 1)
- format = format1
- else:
- # :pserver:user@example.com:/cvsroot/foo Ah<Z
- format = format0
- if part1 == format:
- passw = part2
- break
- pf.close()
- except IOError, inst:
- if inst.errno != errno.ENOENT:
- if not getattr(inst, 'filename', None):
- inst.filename = cvspass
- raise
-
- sck = socket.socket()
- sck.connect((serv, port))
- sck.send("\n".join(["BEGIN AUTH REQUEST", root, user, passw,
- "END AUTH REQUEST", ""]))
- if sck.recv(128) != "I LOVE YOU\n":
- raise util.Abort(_("CVS pserver authentication failed"))
-
- self.writep = self.readp = sck.makefile('r+')
-
- if not conntype and root.startswith(":local:"):
- conntype = "local"
- root = root[7:]
-
- if not conntype:
- # :ext:user@host/home/user/path/to/cvsroot
- if root.startswith(":ext:"):
- root = root[5:]
- m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
- # Do not take Windows path "c:\foo\bar" for a connection strings
- if os.path.isdir(root) or not m:
- conntype = "local"
- else:
- conntype = "rsh"
- user, host, root = m.group(1), m.group(2), m.group(3)
-
- if conntype != "pserver":
- if conntype == "rsh":
- rsh = os.environ.get("CVS_RSH") or "ssh"
- if user:
- cmd = [rsh, '-l', user, host] + cmd
- else:
- cmd = [rsh, host] + cmd
-
- # popen2 does not support argument lists under Windows
- cmd = [util.shellquote(arg) for arg in cmd]
- cmd = util.quotecommand(' '.join(cmd))
- self.writep, self.readp = util.popen2(cmd)
-
- self.realroot = root
-
- self.writep.write("Root %s\n" % root)
- self.writep.write("Valid-responses ok error Valid-requests Mode"
- " M Mbinary E Checked-in Created Updated"
- " Merged Removed\n")
- self.writep.write("valid-requests\n")
- self.writep.flush()
- r = self.readp.readline()
- if not r.startswith("Valid-requests"):
- raise util.Abort(_('unexpected response from CVS server '
- '(expected "Valid-requests", but got %r)')
- % r)
- if "UseUnchanged" in r:
- self.writep.write("UseUnchanged\n")
- self.writep.flush()
- r = self.readp.readline()
-
- def getheads(self):
- self._parse()
- return self.heads
-
- def getfile(self, name, rev):
-
- def chunkedread(fp, count):
- # file-objects returned by socked.makefile() do not handle
- # large read() requests very well.
- chunksize = 65536
- output = StringIO()
- while count > 0:
- data = fp.read(min(count, chunksize))
- if not data:
- raise util.Abort(_("%d bytes missing from remote file")
- % count)
- count -= len(data)
- output.write(data)
- return output.getvalue()
-
- self._parse()
- if rev.endswith("(DEAD)"):
- raise IOError
-
- args = ("-N -P -kk -r %s --" % rev).split()
- args.append(self.cvsrepo + '/' + name)
- for x in args:
- self.writep.write("Argument %s\n" % x)
- self.writep.write("Directory .\n%s\nco\n" % self.realroot)
- self.writep.flush()
-
- data = ""
- mode = None
- while 1:
- line = self.readp.readline()
- if line.startswith("Created ") or line.startswith("Updated "):
- self.readp.readline() # path
- self.readp.readline() # entries
- mode = self.readp.readline()[:-1]
- count = int(self.readp.readline()[:-1])
- data = chunkedread(self.readp, count)
- elif line.startswith(" "):
- data += line[1:]
- elif line.startswith("M "):
- pass
- elif line.startswith("Mbinary "):
- count = int(self.readp.readline()[:-1])
- data = chunkedread(self.readp, count)
- else:
- if line == "ok\n":
- if mode is None:
- raise util.Abort(_('malformed response from CVS'))
- return (data, "x" in mode and "x" or "")
- elif line.startswith("E "):
- self.ui.warn(_("cvs server: %s\n") % line[2:])
- elif line.startswith("Remove"):
- self.readp.readline()
- else:
- raise util.Abort(_("unknown CVS response: %s") % line)
-
- def getchanges(self, rev):
- self._parse()
- return sorted(self.files[rev].iteritems()), {}
-
- def getcommit(self, rev):
- self._parse()
- return self.changeset[rev]
-
- def gettags(self):
- self._parse()
- return self.tags
-
- def getchangedfiles(self, rev, i):
- self._parse()
- return sorted(self.files[rev])
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.pyo
deleted file mode 100644
index d73fe3f..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvs.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.py
deleted file mode 100644
index 1519d41..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.py
+++ /dev/null
@@ -1,847 +0,0 @@
-# Mercurial built-in replacement for cvsps.
-#
-# Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os
-import re
-import cPickle as pickle
-from mercurial import util
-from mercurial.i18n import _
-from mercurial import hook
-
-class logentry(object):
- '''Class logentry has the following attributes:
- .author - author name as CVS knows it
- .branch - name of branch this revision is on
- .branches - revision tuple of branches starting at this revision
- .comment - commit message
- .date - the commit date as a (time, tz) tuple
- .dead - true if file revision is dead
- .file - Name of file
- .lines - a tuple (+lines, -lines) or None
- .parent - Previous revision of this entry
- .rcs - name of file as returned from CVS
- .revision - revision number as tuple
- .tags - list of tags on the file
- .synthetic - is this a synthetic "file ... added on ..." revision?
- .mergepoint- the branch that has been merged from
- (if present in rlog output)
- .branchpoints- the branches that start at the current entry
- '''
- def __init__(self, **entries):
- self.synthetic = False
- self.__dict__.update(entries)
-
- def __repr__(self):
- return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
- id(self),
- self.file,
- ".".join(map(str, self.revision)))
-
-class logerror(Exception):
- pass
-
-def getrepopath(cvspath):
- """Return the repository path from a CVS path.
-
- >>> getrepopath('/foo/bar')
- '/foo/bar'
- >>> getrepopath('c:/foo/bar')
- 'c:/foo/bar'
- >>> getrepopath(':pserver:10/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:10c:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:c:/foo/bar')
- 'c:/foo/bar'
- >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
- '/foo/bar'
- >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
- 'c:/foo/bar'
- """
- # According to CVS manual, CVS paths are expressed like:
- # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
- #
- # Unfortunately, Windows absolute paths start with a drive letter
- # like 'c:' making it harder to parse. Here we assume that drive
- # letters are only one character long and any CVS component before
- # the repository path is at least 2 characters long, and use this
- # to disambiguate.
- parts = cvspath.split(':')
- if len(parts) == 1:
- return parts[0]
- # Here there is an ambiguous case if we have a port number
- # immediately followed by a Windows driver letter. We assume this
- # never happens and decide it must be CVS path component,
- # therefore ignoring it.
- if len(parts[-2]) > 1:
- return parts[-1].lstrip('0123456789')
- return parts[-2] + ':' + parts[-1]
-
-def createlog(ui, directory=None, root="", rlog=True, cache=None):
- '''Collect the CVS rlog'''
-
- # Because we store many duplicate commit log messages, reusing strings
- # saves a lot of memory and pickle storage space.
- _scache = {}
- def scache(s):
- "return a shared version of a string"
- return _scache.setdefault(s, s)
-
- ui.status(_('collecting CVS rlog\n'))
-
- log = [] # list of logentry objects containing the CVS state
-
- # patterns to match in CVS (r)log output, by state of use
- re_00 = re.compile('RCS file: (.+)$')
- re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
- re_02 = re.compile('cvs (r?log|server): (.+)\n$')
- re_03 = re.compile("(Cannot access.+CVSROOT)|"
- "(can't create temporary directory.+)$")
- re_10 = re.compile('Working file: (.+)$')
- re_20 = re.compile('symbolic names:')
- re_30 = re.compile('\t(.+): ([\\d.]+)$')
- re_31 = re.compile('----------------------------$')
- re_32 = re.compile('======================================='
- '======================================$')
- re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
- re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
- r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
- r'(.*mergepoint:\s+([^;]+);)?')
- re_70 = re.compile('branches: (.+);$')
-
- file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
-
- prefix = '' # leading path to strip of what we get from CVS
-
- if directory is None:
- # Current working directory
-
- # Get the real directory in the repository
- try:
- prefix = open(os.path.join('CVS','Repository')).read().strip()
- directory = prefix
- if prefix == ".":
- prefix = ""
- except IOError:
- raise logerror(_('not a CVS sandbox'))
-
- if prefix and not prefix.endswith(os.sep):
- prefix += os.sep
-
- # Use the Root file in the sandbox, if it exists
- try:
- root = open(os.path.join('CVS','Root')).read().strip()
- except IOError:
- pass
-
- if not root:
- root = os.environ.get('CVSROOT', '')
-
- # read log cache if one exists
- oldlog = []
- date = None
-
- if cache:
- cachedir = os.path.expanduser('~/.hg.cvsps')
- if not os.path.exists(cachedir):
- os.mkdir(cachedir)
-
- # The cvsps cache pickle needs a uniquified name, based on the
- # repository location. The address may have all sort of nasties
- # in it, slashes, colons and such. So here we take just the
- # alphanumerics, concatenated in a way that does not mix up the
- # various components, so that
- # :pserver:user@server:/path
- # and
- # /pserver/user/server/path
- # are mapped to different cache file names.
- cachefile = root.split(":") + [directory, "cache"]
- cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
- cachefile = os.path.join(cachedir,
- '.'.join([s for s in cachefile if s]))
-
- if cache == 'update':
- try:
- ui.note(_('reading cvs log cache %s\n') % cachefile)
- oldlog = pickle.load(open(cachefile))
- ui.note(_('cache has %d log entries\n') % len(oldlog))
- except Exception, e:
- ui.note(_('error reading cache: %r\n') % e)
-
- if oldlog:
- date = oldlog[-1].date # last commit date as a (time,tz) tuple
- date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
-
- # build the CVS commandline
- cmd = ['cvs', '-q']
- if root:
- cmd.append('-d%s' % root)
- p = util.normpath(getrepopath(root))
- if not p.endswith('/'):
- p += '/'
- if prefix:
- # looks like normpath replaces "" by "."
- prefix = p + util.normpath(prefix)
- else:
- prefix = p
- cmd.append(['log', 'rlog'][rlog])
- if date:
- # no space between option and date string
- cmd.append('-d>%s' % date)
- cmd.append(directory)
-
- # state machine begins here
- tags = {} # dictionary of revisions on current file with their tags
- branchmap = {} # mapping between branch names and revision numbers
- state = 0
- store = False # set when a new record can be appended
-
- cmd = [util.shellquote(arg) for arg in cmd]
- ui.note(_("running %s\n") % (' '.join(cmd)))
- ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
-
- pfp = util.popen(' '.join(cmd))
- peek = pfp.readline()
- while True:
- line = peek
- if line == '':
- break
- peek = pfp.readline()
- if line.endswith('\n'):
- line = line[:-1]
- #ui.debug('state=%d line=%r\n' % (state, line))
-
- if state == 0:
- # initial state, consume input until we see 'RCS file'
- match = re_00.match(line)
- if match:
- rcs = match.group(1)
- tags = {}
- if rlog:
- filename = util.normpath(rcs[:-2])
- if filename.startswith(prefix):
- filename = filename[len(prefix):]
- if filename.startswith('/'):
- filename = filename[1:]
- if filename.startswith('Attic/'):
- filename = filename[6:]
- else:
- filename = filename.replace('/Attic/', '/')
- state = 2
- continue
- state = 1
- continue
- match = re_01.match(line)
- if match:
- raise logerror(match.group(1))
- match = re_02.match(line)
- if match:
- raise logerror(match.group(2))
- if re_03.match(line):
- raise logerror(line)
-
- elif state == 1:
- # expect 'Working file' (only when using log instead of rlog)
- match = re_10.match(line)
- assert match, _('RCS file must be followed by working file')
- filename = util.normpath(match.group(1))
- state = 2
-
- elif state == 2:
- # expect 'symbolic names'
- if re_20.match(line):
- branchmap = {}
- state = 3
-
- elif state == 3:
- # read the symbolic names and store as tags
- match = re_30.match(line)
- if match:
- rev = [int(x) for x in match.group(2).split('.')]
-
- # Convert magic branch number to an odd-numbered one
- revn = len(rev)
- if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
- rev = rev[:-2] + rev[-1:]
- rev = tuple(rev)
-
- if rev not in tags:
- tags[rev] = []
- tags[rev].append(match.group(1))
- branchmap[match.group(1)] = match.group(2)
-
- elif re_31.match(line):
- state = 5
- elif re_32.match(line):
- state = 0
-
- elif state == 4:
- # expecting '------' separator before first revision
- if re_31.match(line):
- state = 5
- else:
- assert not re_32.match(line), _('must have at least '
- 'some revisions')
-
- elif state == 5:
- # expecting revision number and possibly (ignored) lock indication
- # we create the logentry here from values stored in states 0 to 4,
- # as this state is re-entered for subsequent revisions of a file.
- match = re_50.match(line)
- assert match, _('expected revision number')
- e = logentry(rcs=scache(rcs), file=scache(filename),
- revision=tuple([int(x) for x in match.group(1).split('.')]),
- branches=[], parent=None)
- state = 6
-
- elif state == 6:
- # expecting date, author, state, lines changed
- match = re_60.match(line)
- assert match, _('revision must be followed by date line')
- d = match.group(1)
- if d[2] == '/':
- # Y2K
- d = '19' + d
-
- if len(d.split()) != 3:
- # cvs log dates always in GMT
- d = d + ' UTC'
- e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
- '%Y/%m/%d %H:%M:%S',
- '%Y-%m-%d %H:%M:%S'])
- e.author = scache(match.group(2))
- e.dead = match.group(3).lower() == 'dead'
-
- if match.group(5):
- if match.group(6):
- e.lines = (int(match.group(5)), int(match.group(6)))
- else:
- e.lines = (int(match.group(5)), 0)
- elif match.group(6):
- e.lines = (0, int(match.group(6)))
- else:
- e.lines = None
-
- if match.group(7): # cvsnt mergepoint
- myrev = match.group(8).split('.')
- if len(myrev) == 2: # head
- e.mergepoint = 'HEAD'
- else:
- myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
- branches = [b for b in branchmap if branchmap[b] == myrev]
- assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
- e.mergepoint = branches[0]
- else:
- e.mergepoint = None
- e.comment = []
- state = 7
-
- elif state == 7:
- # read the revision numbers of branches that start at this revision
- # or store the commit log message otherwise
- m = re_70.match(line)
- if m:
- e.branches = [tuple([int(y) for y in x.strip().split('.')])
- for x in m.group(1).split(';')]
- state = 8
- elif re_31.match(line) and re_50.match(peek):
- state = 5
- store = True
- elif re_32.match(line):
- state = 0
- store = True
- else:
- e.comment.append(line)
-
- elif state == 8:
- # store commit log message
- if re_31.match(line):
- state = 5
- store = True
- elif re_32.match(line):
- state = 0
- store = True
- else:
- e.comment.append(line)
-
- # When a file is added on a branch B1, CVS creates a synthetic
- # dead trunk revision 1.1 so that the branch has a root.
- # Likewise, if you merge such a file to a later branch B2 (one
- # that already existed when the file was added on B1), CVS
- # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
- # these revisions now, but mark them synthetic so
- # createchangeset() can take care of them.
- if (store and
- e.dead and
- e.revision[-1] == 1 and # 1.1 or 1.1.x.1
- len(e.comment) == 1 and
- file_added_re.match(e.comment[0])):
- ui.debug('found synthetic revision in %s: %r\n'
- % (e.rcs, e.comment[0]))
- e.synthetic = True
-
- if store:
- # clean up the results and save in the log.
- store = False
- e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
- e.comment = scache('\n'.join(e.comment))
-
- revn = len(e.revision)
- if revn > 3 and (revn % 2) == 0:
- e.branch = tags.get(e.revision[:-1], [None])[0]
- else:
- e.branch = None
-
- # find the branches starting from this revision
- branchpoints = set()
- for branch, revision in branchmap.iteritems():
- revparts = tuple([int(i) for i in revision.split('.')])
- if len(revparts) < 2: # bad tags
- continue
- if revparts[-2] == 0 and revparts[-1] % 2 == 0:
- # normal branch
- if revparts[:-2] == e.revision:
- branchpoints.add(branch)
- elif revparts == (1, 1, 1): # vendor branch
- if revparts in e.branches:
- branchpoints.add(branch)
- e.branchpoints = branchpoints
-
- log.append(e)
-
- if len(log) % 100 == 0:
- ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
-
- log.sort(key=lambda x: (x.rcs, x.revision))
-
- # find parent revisions of individual files
- versions = {}
- for e in log:
- branch = e.revision[:-1]
- p = versions.get((e.rcs, branch), None)
- if p is None:
- p = e.revision[:-2]
- e.parent = p
- versions[(e.rcs, branch)] = e.revision
-
- # update the log cache
- if cache:
- if log:
- # join up the old and new logs
- log.sort(key=lambda x: x.date)
-
- if oldlog and oldlog[-1].date >= log[0].date:
- raise logerror(_('log cache overlaps with new log entries,'
- ' re-run without cache.'))
-
- log = oldlog + log
-
- # write the new cachefile
- ui.note(_('writing cvs log cache %s\n') % cachefile)
- pickle.dump(log, open(cachefile, 'w'))
- else:
- log = oldlog
-
- ui.status(_('%d log entries\n') % len(log))
-
- hook.hook(ui, None, "cvslog", True, log=log)
-
- return log
-
-
-class changeset(object):
- '''Class changeset has the following attributes:
- .id - integer identifying this changeset (list index)
- .author - author name as CVS knows it
- .branch - name of branch this changeset is on, or None
- .comment - commit message
- .date - the commit date as a (time,tz) tuple
- .entries - list of logentry objects in this changeset
- .parents - list of one or two parent changesets
- .tags - list of tags on this changeset
- .synthetic - from synthetic revision "file ... added on branch ..."
- .mergepoint- the branch that has been merged from
- (if present in rlog output)
- .branchpoints- the branches that start at the current entry
- '''
- def __init__(self, **entries):
- self.synthetic = False
- self.__dict__.update(entries)
-
- def __repr__(self):
- return "<%s at 0x%x: %s>" % (self.__class__.__name__,
- id(self),
- getattr(self, 'id', "(no id)"))
-
-def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
- '''Convert log into changesets.'''
-
- ui.status(_('creating changesets\n'))
-
- # Merge changesets
-
- log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
-
- changesets = []
- files = set()
- c = None
- for i, e in enumerate(log):
-
- # Check if log entry belongs to the current changeset or not.
-
- # Since CVS is file centric, two different file revisions with
- # different branchpoints should be treated as belonging to two
- # different changesets (and the ordering is important and not
- # honoured by cvsps at this point).
- #
- # Consider the following case:
- # foo 1.1 branchpoints: [MYBRANCH]
- # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
- #
- # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
- # later version of foo may be in MYBRANCH2, so foo should be the
- # first changeset and bar the next and MYBRANCH and MYBRANCH2
- # should both start off of the bar changeset. No provisions are
- # made to ensure that this is, in fact, what happens.
- if not (c and
- e.comment == c.comment and
- e.author == c.author and
- e.branch == c.branch and
- (not hasattr(e, 'branchpoints') or
- not hasattr (c, 'branchpoints') or
- e.branchpoints == c.branchpoints) and
- ((c.date[0] + c.date[1]) <=
- (e.date[0] + e.date[1]) <=
- (c.date[0] + c.date[1]) + fuzz) and
- e.file not in files):
- c = changeset(comment=e.comment, author=e.author,
- branch=e.branch, date=e.date, entries=[],
- mergepoint=getattr(e, 'mergepoint', None),
- branchpoints=getattr(e, 'branchpoints', set()))
- changesets.append(c)
- files = set()
- if len(changesets) % 100 == 0:
- t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
- ui.status(util.ellipsis(t, 80) + '\n')
-
- c.entries.append(e)
- files.add(e.file)
- c.date = e.date # changeset date is date of latest commit in it
-
- # Mark synthetic changesets
-
- for c in changesets:
- # Synthetic revisions always get their own changeset, because
- # the log message includes the filename. E.g. if you add file3
- # and file4 on a branch, you get four log entries and three
- # changesets:
- # "File file3 was added on branch ..." (synthetic, 1 entry)
- # "File file4 was added on branch ..." (synthetic, 1 entry)
- # "Add file3 and file4 to fix ..." (real, 2 entries)
- # Hence the check for 1 entry here.
- c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
-
- # Sort files in each changeset
-
- for c in changesets:
- def pathcompare(l, r):
- 'Mimic cvsps sorting order'
- l = l.split('/')
- r = r.split('/')
- nl = len(l)
- nr = len(r)
- n = min(nl, nr)
- for i in range(n):
- if i + 1 == nl and nl < nr:
- return -1
- elif i + 1 == nr and nl > nr:
- return +1
- elif l[i] < r[i]:
- return -1
- elif l[i] > r[i]:
- return +1
- return 0
- def entitycompare(l, r):
- return pathcompare(l.file, r.file)
-
- c.entries.sort(entitycompare)
-
- # Sort changesets by date
-
- def cscmp(l, r):
- d = sum(l.date) - sum(r.date)
- if d:
- return d
-
- # detect vendor branches and initial commits on a branch
- le = {}
- for e in l.entries:
- le[e.rcs] = e.revision
- re = {}
- for e in r.entries:
- re[e.rcs] = e.revision
-
- d = 0
- for e in l.entries:
- if re.get(e.rcs, None) == e.parent:
- assert not d
- d = 1
- break
-
- for e in r.entries:
- if le.get(e.rcs, None) == e.parent:
- assert not d
- d = -1
- break
-
- return d
-
- changesets.sort(cscmp)
-
- # Collect tags
-
- globaltags = {}
- for c in changesets:
- for e in c.entries:
- for tag in e.tags:
- # remember which is the latest changeset to have this tag
- globaltags[tag] = c
-
- for c in changesets:
- tags = set()
- for e in c.entries:
- tags.update(e.tags)
- # remember tags only if this is the latest changeset to have it
- c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
-
- # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
- # by inserting dummy changesets with two parents, and handle
- # {{mergefrombranch BRANCHNAME}} by setting two parents.
-
- if mergeto is None:
- mergeto = r'{{mergetobranch ([-\w]+)}}'
- if mergeto:
- mergeto = re.compile(mergeto)
-
- if mergefrom is None:
- mergefrom = r'{{mergefrombranch ([-\w]+)}}'
- if mergefrom:
- mergefrom = re.compile(mergefrom)
-
- versions = {} # changeset index where we saw any particular file version
- branches = {} # changeset index where we saw a branch
- n = len(changesets)
- i = 0
- while i < n:
- c = changesets[i]
-
- for f in c.entries:
- versions[(f.rcs, f.revision)] = i
-
- p = None
- if c.branch in branches:
- p = branches[c.branch]
- else:
- # first changeset on a new branch
- # the parent is a changeset with the branch in its
- # branchpoints such that it is the latest possible
- # commit without any intervening, unrelated commits.
-
- for candidate in xrange(i):
- if c.branch not in changesets[candidate].branchpoints:
- if p is not None:
- break
- continue
- p = candidate
-
- c.parents = []
- if p is not None:
- p = changesets[p]
-
- # Ensure no changeset has a synthetic changeset as a parent.
- while p.synthetic:
- assert len(p.parents) <= 1, \
- _('synthetic changeset cannot have multiple parents')
- if p.parents:
- p = p.parents[0]
- else:
- p = None
- break
-
- if p is not None:
- c.parents.append(p)
-
- if c.mergepoint:
- if c.mergepoint == 'HEAD':
- c.mergepoint = None
- c.parents.append(changesets[branches[c.mergepoint]])
-
- if mergefrom:
- m = mergefrom.search(c.comment)
- if m:
- m = m.group(1)
- if m == 'HEAD':
- m = None
- try:
- candidate = changesets[branches[m]]
- except KeyError:
- ui.warn(_("warning: CVS commit message references "
- "non-existent branch %r:\n%s\n")
- % (m, c.comment))
- if m in branches and c.branch != m and not candidate.synthetic:
- c.parents.append(candidate)
-
- if mergeto:
- m = mergeto.search(c.comment)
- if m:
- try:
- m = m.group(1)
- if m == 'HEAD':
- m = None
- except:
- m = None # if no group found then merge to HEAD
- if m in branches and c.branch != m:
- # insert empty changeset for merge
- cc = changeset(
- author=c.author, branch=m, date=c.date,
- comment='convert-repo: CVS merge from branch %s'
- % c.branch,
- entries=[], tags=[],
- parents=[changesets[branches[m]], c])
- changesets.insert(i + 1, cc)
- branches[m] = i + 1
-
- # adjust our loop counters now we have inserted a new entry
- n += 1
- i += 2
- continue
-
- branches[c.branch] = i
- i += 1
-
- # Drop synthetic changesets (safe now that we have ensured no other
- # changesets can have them as parents).
- i = 0
- while i < len(changesets):
- if changesets[i].synthetic:
- del changesets[i]
- else:
- i += 1
-
- # Number changesets
-
- for i, c in enumerate(changesets):
- c.id = i + 1
-
- ui.status(_('%d changeset entries\n') % len(changesets))
-
- hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
-
- return changesets
-
-
-def debugcvsps(ui, *args, **opts):
- '''Read CVS rlog for current directory or named path in
- repository, and convert the log to changesets based on matching
- commit log entries and dates.
- '''
- if opts["new_cache"]:
- cache = "write"
- elif opts["update_cache"]:
- cache = "update"
- else:
- cache = None
-
- revisions = opts["revisions"]
-
- try:
- if args:
- log = []
- for d in args:
- log += createlog(ui, d, root=opts["root"], cache=cache)
- else:
- log = createlog(ui, root=opts["root"], cache=cache)
- except logerror, e:
- ui.write("%r\n"%e)
- return
-
- changesets = createchangeset(ui, log, opts["fuzz"])
- del log
-
- # Print changesets (optionally filtered)
-
- off = len(revisions)
- branches = {} # latest version number in each branch
- ancestors = {} # parent branch
- for cs in changesets:
-
- if opts["ancestors"]:
- if cs.branch not in branches and cs.parents and cs.parents[0].id:
- ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
- cs.parents[0].id)
- branches[cs.branch] = cs.id
-
- # limit by branches
- if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
- continue
-
- if not off:
- # Note: trailing spaces on several lines here are needed to have
- # bug-for-bug compatibility with cvsps.
- ui.write('---------------------\n')
- ui.write('PatchSet %d \n' % cs.id)
- ui.write('Date: %s\n' % util.datestr(cs.date,
- '%Y/%m/%d %H:%M:%S %1%2'))
- ui.write('Author: %s\n' % cs.author)
- ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
- ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
- ','.join(cs.tags) or '(none)'))
- branchpoints = getattr(cs, 'branchpoints', None)
- if branchpoints:
- ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
- if opts["parents"] and cs.parents:
- if len(cs.parents) > 1:
- ui.write('Parents: %s\n' %
- (','.join([str(p.id) for p in cs.parents])))
- else:
- ui.write('Parent: %d\n' % cs.parents[0].id)
-
- if opts["ancestors"]:
- b = cs.branch
- r = []
- while b:
- b, c = ancestors[b]
- r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
- if r:
- ui.write('Ancestors: %s\n' % (','.join(r)))
-
- ui.write('Log:\n')
- ui.write('%s\n\n' % cs.comment)
- ui.write('Members: \n')
- for f in cs.entries:
- fn = f.file
- if fn.startswith(opts["prefix"]):
- fn = fn[len(opts["prefix"]):]
- ui.write('\t%s:%s->%s%s \n' % (
- fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
- '.'.join([str(x) for x in f.revision]),
- ['', '(DEAD)'][f.dead]))
- ui.write('\n')
-
- # have we seen the start tag?
- if revisions and off:
- if revisions[0] == str(cs.id) or \
- revisions[0] in cs.tags:
- off = False
-
- # see if we reached the end tag
- if len(revisions) > 1 and not off:
- if revisions[1] == str(cs.id) or \
- revisions[1] in cs.tags:
- break
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.pyo
deleted file mode 100644
index fdf6d44..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/cvsps.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.py
deleted file mode 100644
index 9863eb8..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# darcs.py - darcs support for the convert extension
-#
-# Copyright 2007-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from common import NoRepo, checktool, commandline, commit, converter_source
-from mercurial.i18n import _
-from mercurial import encoding, util
-import os, shutil, tempfile, re
-
-# The naming drift of ElementTree is fun!
-
-try:
- from xml.etree.cElementTree import ElementTree, XMLParser
-except ImportError:
- try:
- from xml.etree.ElementTree import ElementTree, XMLParser
- except ImportError:
- try:
- from elementtree.cElementTree import ElementTree, XMLParser
- except ImportError:
- try:
- from elementtree.ElementTree import ElementTree, XMLParser
- except ImportError:
- ElementTree = None
-
-class darcs_source(converter_source, commandline):
- def __init__(self, ui, path, rev=None):
- converter_source.__init__(self, ui, path, rev=rev)
- commandline.__init__(self, ui, 'darcs')
-
- # check for _darcs, ElementTree so that we can easily skip
- # test-convert-darcs if ElementTree is not around
- if not os.path.exists(os.path.join(path, '_darcs')):
- raise NoRepo(_("%s does not look like a darcs repository") % path)
-
- checktool('darcs')
- version = self.run0('--version').splitlines()[0].strip()
- if version < '2.1':
- raise util.Abort(_('darcs version 2.1 or newer needed (found %r)') %
- version)
-
- if ElementTree is None:
- raise util.Abort(_("Python ElementTree module is not available"))
-
- self.path = os.path.realpath(path)
-
- self.lastrev = None
- self.changes = {}
- self.parents = {}
- self.tags = {}
-
- # Check darcs repository format
- format = self.format()
- if format:
- if format in ('darcs-1.0', 'hashed'):
- raise NoRepo(_("%s repository format is unsupported, "
- "please upgrade") % format)
- else:
- self.ui.warn(_('failed to detect repository format!'))
-
- def before(self):
- self.tmppath = tempfile.mkdtemp(
- prefix='convert-' + os.path.basename(self.path) + '-')
- output, status = self.run('init', repodir=self.tmppath)
- self.checkexit(status)
-
- tree = self.xml('changes', xml_output=True, summary=True,
- repodir=self.path)
- tagname = None
- child = None
- for elt in tree.findall('patch'):
- node = elt.get('hash')
- name = elt.findtext('name', '')
- if name.startswith('TAG '):
- tagname = name[4:].strip()
- elif tagname is not None:
- self.tags[tagname] = node
- tagname = None
- self.changes[node] = elt
- self.parents[child] = [node]
- child = node
- self.parents[child] = []
-
- def after(self):
- self.ui.debug('cleaning up %s\n' % self.tmppath)
- shutil.rmtree(self.tmppath, ignore_errors=True)
-
- def recode(self, s, encoding=None):
- if isinstance(s, unicode):
- # XMLParser returns unicode objects for anything it can't
- # encode into ASCII. We convert them back to str to get
- # recode's normal conversion behavior.
- s = s.encode('latin-1')
- return super(darcs_source, self).recode(s, encoding)
-
- def xml(self, cmd, **kwargs):
- # NOTE: darcs is currently encoding agnostic and will print
- # patch metadata byte-for-byte, even in the XML changelog.
- etree = ElementTree()
- # While we are decoding the XML as latin-1 to be as liberal as
- # possible, etree will still raise an exception if any
- # non-printable characters are in the XML changelog.
- parser = XMLParser(encoding='latin-1')
- fp = self._run(cmd, **kwargs)
- etree.parse(fp, parser=parser)
- self.checkexit(fp.close())
- return etree.getroot()
-
- def format(self):
- output, status = self.run('show', 'repo', no_files=True,
- repodir=self.path)
- self.checkexit(status)
- m = re.search(r'^\s*Format:\s*(.*)$', output, re.MULTILINE)
- if not m:
- return None
- return ','.join(sorted(f.strip() for f in m.group(1).split(',')))
-
- def manifest(self):
- man = []
- output, status = self.run('show', 'files', no_directories=True,
- repodir=self.tmppath)
- self.checkexit(status)
- for line in output.split('\n'):
- path = line[2:]
- if path:
- man.append(path)
- return man
-
- def getheads(self):
- return self.parents[None]
-
- def getcommit(self, rev):
- elt = self.changes[rev]
- date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y')
- desc = elt.findtext('name') + '\n' + elt.findtext('comment', '')
- # etree can return unicode objects for name, comment, and author,
- # so recode() is used to ensure str objects are emitted.
- return commit(author=self.recode(elt.get('author')),
- date=util.datestr(date),
- desc=self.recode(desc).strip(),
- parents=self.parents[rev])
-
- def pull(self, rev):
- output, status = self.run('pull', self.path, all=True,
- match='hash %s' % rev,
- no_test=True, no_posthook=True,
- external_merge='/bin/false',
- repodir=self.tmppath)
- if status:
- if output.find('We have conflicts in') == -1:
- self.checkexit(status, output)
- output, status = self.run('revert', all=True, repodir=self.tmppath)
- self.checkexit(status, output)
-
- def getchanges(self, rev):
- copies = {}
- changes = []
- man = None
- for elt in self.changes[rev].find('summary').getchildren():
- if elt.tag in ('add_directory', 'remove_directory'):
- continue
- if elt.tag == 'move':
- if man is None:
- man = self.manifest()
- source, dest = elt.get('from'), elt.get('to')
- if source in man:
- # File move
- changes.append((source, rev))
- changes.append((dest, rev))
- copies[dest] = source
- else:
- # Directory move, deduce file moves from manifest
- source = source + '/'
- for f in man:
- if not f.startswith(source):
- continue
- fdest = dest + '/' + f[len(source):]
- changes.append((f, rev))
- changes.append((fdest, rev))
- copies[fdest] = f
- else:
- changes.append((elt.text.strip(), rev))
- self.pull(rev)
- self.lastrev = rev
- return sorted(changes), copies
-
- def getfile(self, name, rev):
- if rev != self.lastrev:
- raise util.Abort(_('internal calling inconsistency'))
- path = os.path.join(self.tmppath, name)
- data = open(path, 'rb').read()
- mode = os.lstat(path).st_mode
- mode = (mode & 0111) and 'x' or ''
- return data, mode
-
- def gettags(self):
- return self.tags
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.pyo
deleted file mode 100644
index 78b7568..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/darcs.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.py
deleted file mode 100644
index 1064642..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.py
+++ /dev/null
@@ -1,365 +0,0 @@
-# Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
-# Copyright 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import shlex
-from mercurial.i18n import _
-from mercurial import util
-from common import SKIPREV, converter_source
-
-def rpairs(name):
- e = len(name)
- while e != -1:
- yield name[:e], name[e + 1:]
- e = name.rfind('/', 0, e)
- yield '.', name
-
-class filemapper(object):
- '''Map and filter filenames when importing.
- A name can be mapped to itself, a new name, or None (omit from new
- repository).'''
-
- def __init__(self, ui, path=None):
- self.ui = ui
- self.include = {}
- self.exclude = {}
- self.rename = {}
- if path:
- if self.parse(path):
- raise util.Abort(_('errors in filemap'))
-
- def parse(self, path):
- errs = 0
- def check(name, mapping, listname):
- if not name:
- self.ui.warn(_('%s:%d: path to %s is missing\n') %
- (lex.infile, lex.lineno, listname))
- return 1
- if name in mapping:
- self.ui.warn(_('%s:%d: %r already in %s list\n') %
- (lex.infile, lex.lineno, name, listname))
- return 1
- if (name.startswith('/') or
- name.endswith('/') or
- '//' in name):
- self.ui.warn(_('%s:%d: superfluous / in %s %r\n') %
- (lex.infile, lex.lineno, listname, name))
- return 1
- return 0
- lex = shlex.shlex(open(path), path, True)
- lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
- cmd = lex.get_token()
- while cmd:
- if cmd == 'include':
- name = lex.get_token()
- errs += check(name, self.exclude, 'exclude')
- self.include[name] = name
- elif cmd == 'exclude':
- name = lex.get_token()
- errs += check(name, self.include, 'include')
- errs += check(name, self.rename, 'rename')
- self.exclude[name] = name
- elif cmd == 'rename':
- src = lex.get_token()
- dest = lex.get_token()
- errs += check(src, self.exclude, 'exclude')
- self.rename[src] = dest
- elif cmd == 'source':
- errs += self.parse(lex.get_token())
- else:
- self.ui.warn(_('%s:%d: unknown directive %r\n') %
- (lex.infile, lex.lineno, cmd))
- errs += 1
- cmd = lex.get_token()
- return errs
-
- def lookup(self, name, mapping):
- for pre, suf in rpairs(name):
- try:
- return mapping[pre], pre, suf
- except KeyError:
- pass
- return '', name, ''
-
- def __call__(self, name):
- if self.include:
- inc = self.lookup(name, self.include)[0]
- else:
- inc = name
- if self.exclude:
- exc = self.lookup(name, self.exclude)[0]
- else:
- exc = ''
- if (not self.include and exc) or (len(inc) <= len(exc)):
- return None
- newpre, pre, suf = self.lookup(name, self.rename)
- if newpre:
- if newpre == '.':
- return suf
- if suf:
- return newpre + '/' + suf
- return newpre
- return name
-
- def active(self):
- return bool(self.include or self.exclude or self.rename)
-
-# This class does two additional things compared to a regular source:
-#
-# - Filter and rename files. This is mostly wrapped by the filemapper
-# class above. We hide the original filename in the revision that is
-# returned by getchanges to be able to find things later in getfile.
-#
-# - Return only revisions that matter for the files we're interested in.
-# This involves rewriting the parents of the original revision to
-# create a graph that is restricted to those revisions.
-#
-# This set of revisions includes not only revisions that directly
-# touch files we're interested in, but also merges that merge two
-# or more interesting revisions.
-
-class filemap_source(converter_source):
- def __init__(self, ui, baseconverter, filemap):
- super(filemap_source, self).__init__(ui)
- self.base = baseconverter
- self.filemapper = filemapper(ui, filemap)
- self.commits = {}
- # if a revision rev has parent p in the original revision graph, then
- # rev will have parent self.parentmap[p] in the restricted graph.
- self.parentmap = {}
- # self.wantedancestors[rev] is the set of all ancestors of rev that
- # are in the restricted graph.
- self.wantedancestors = {}
- self.convertedorder = None
- self._rebuilt = False
- self.origparents = {}
- self.children = {}
- self.seenchildren = {}
-
- def before(self):
- self.base.before()
-
- def after(self):
- self.base.after()
-
- def setrevmap(self, revmap):
- # rebuild our state to make things restartable
- #
- # To avoid calling getcommit for every revision that has already
- # been converted, we rebuild only the parentmap, delaying the
- # rebuild of wantedancestors until we need it (i.e. until a
- # merge).
- #
- # We assume the order argument lists the revisions in
- # topological order, so that we can infer which revisions were
- # wanted by previous runs.
- self._rebuilt = not revmap
- seen = {SKIPREV: SKIPREV}
- dummyset = set()
- converted = []
- for rev in revmap.order:
- mapped = revmap[rev]
- wanted = mapped not in seen
- if wanted:
- seen[mapped] = rev
- self.parentmap[rev] = rev
- else:
- self.parentmap[rev] = seen[mapped]
- self.wantedancestors[rev] = dummyset
- arg = seen[mapped]
- if arg == SKIPREV:
- arg = None
- converted.append((rev, wanted, arg))
- self.convertedorder = converted
- return self.base.setrevmap(revmap)
-
- def rebuild(self):
- if self._rebuilt:
- return True
- self._rebuilt = True
- self.parentmap.clear()
- self.wantedancestors.clear()
- self.seenchildren.clear()
- for rev, wanted, arg in self.convertedorder:
- if rev not in self.origparents:
- self.origparents[rev] = self.getcommit(rev).parents
- if arg is not None:
- self.children[arg] = self.children.get(arg, 0) + 1
-
- for rev, wanted, arg in self.convertedorder:
- parents = self.origparents[rev]
- if wanted:
- self.mark_wanted(rev, parents)
- else:
- self.mark_not_wanted(rev, arg)
- self._discard(arg, *parents)
-
- return True
-
- def getheads(self):
- return self.base.getheads()
-
- def getcommit(self, rev):
- # We want to save a reference to the commit objects to be able
- # to rewrite their parents later on.
- c = self.commits[rev] = self.base.getcommit(rev)
- for p in c.parents:
- self.children[p] = self.children.get(p, 0) + 1
- return c
-
- def _discard(self, *revs):
- for r in revs:
- if r is None:
- continue
- self.seenchildren[r] = self.seenchildren.get(r, 0) + 1
- if self.seenchildren[r] == self.children[r]:
- del self.wantedancestors[r]
- del self.parentmap[r]
- del self.seenchildren[r]
- if self._rebuilt:
- del self.children[r]
-
- def wanted(self, rev, i):
- # Return True if we're directly interested in rev.
- #
- # i is an index selecting one of the parents of rev (if rev
- # has no parents, i is None). getchangedfiles will give us
- # the list of files that are different in rev and in the parent
- # indicated by i. If we're interested in any of these files,
- # we're interested in rev.
- try:
- files = self.base.getchangedfiles(rev, i)
- except NotImplementedError:
- raise util.Abort(_("source repository doesn't support --filemap"))
- for f in files:
- if self.filemapper(f):
- return True
- return False
-
- def mark_not_wanted(self, rev, p):
- # Mark rev as not interesting and update data structures.
-
- if p is None:
- # A root revision. Use SKIPREV to indicate that it doesn't
- # map to any revision in the restricted graph. Put SKIPREV
- # in the set of wanted ancestors to simplify code elsewhere
- self.parentmap[rev] = SKIPREV
- self.wantedancestors[rev] = set((SKIPREV,))
- return
-
- # Reuse the data from our parent.
- self.parentmap[rev] = self.parentmap[p]
- self.wantedancestors[rev] = self.wantedancestors[p]
-
- def mark_wanted(self, rev, parents):
- # Mark rev ss wanted and update data structures.
-
- # rev will be in the restricted graph, so children of rev in
- # the original graph should still have rev as a parent in the
- # restricted graph.
- self.parentmap[rev] = rev
-
- # The set of wanted ancestors of rev is the union of the sets
- # of wanted ancestors of its parents. Plus rev itself.
- wrev = set()
- for p in parents:
- wrev.update(self.wantedancestors[p])
- wrev.add(rev)
- self.wantedancestors[rev] = wrev
-
- def getchanges(self, rev):
- parents = self.commits[rev].parents
- if len(parents) > 1:
- self.rebuild()
-
- # To decide whether we're interested in rev we:
- #
- # - calculate what parents rev will have if it turns out we're
- # interested in it. If it's going to have more than 1 parent,
- # we're interested in it.
- #
- # - otherwise, we'll compare it with the single parent we found.
- # If any of the files we're interested in is different in the
- # the two revisions, we're interested in rev.
-
- # A parent p is interesting if its mapped version (self.parentmap[p]):
- # - is not SKIPREV
- # - is still not in the list of parents (we don't want duplicates)
- # - is not an ancestor of the mapped versions of the other parents
- mparents = []
- wp = None
- for i, p1 in enumerate(parents):
- mp1 = self.parentmap[p1]
- if mp1 == SKIPREV or mp1 in mparents:
- continue
- for p2 in parents:
- if p1 == p2 or mp1 == self.parentmap[p2]:
- continue
- if mp1 in self.wantedancestors[p2]:
- break
- else:
- mparents.append(mp1)
- wp = i
-
- if wp is None and parents:
- wp = 0
-
- self.origparents[rev] = parents
-
- closed = 'close' in self.commits[rev].extra
-
- if len(mparents) < 2 and not closed and not self.wanted(rev, wp):
- # We don't want this revision.
- # Update our state and tell the convert process to map this
- # revision to the same revision its parent as mapped to.
- p = None
- if parents:
- p = parents[wp]
- self.mark_not_wanted(rev, p)
- self.convertedorder.append((rev, False, p))
- self._discard(*parents)
- return self.parentmap[rev]
-
- # We want this revision.
- # Rewrite the parents of the commit object
- self.commits[rev].parents = mparents
- self.mark_wanted(rev, parents)
- self.convertedorder.append((rev, True, None))
- self._discard(*parents)
-
- # Get the real changes and do the filtering/mapping. To be
- # able to get the files later on in getfile, we hide the
- # original filename in the rev part of the return value.
- changes, copies = self.base.getchanges(rev)
- newnames = {}
- files = []
- for f, r in changes:
- newf = self.filemapper(f)
- if newf:
- files.append((newf, (f, r)))
- newnames[f] = newf
-
- ncopies = {}
- for c in copies:
- newc = self.filemapper(c)
- if newc:
- newsource = self.filemapper(copies[c])
- if newsource:
- ncopies[newc] = newsource
-
- return files, ncopies
-
- def getfile(self, name, rev):
- realname, realrev = rev
- return self.base.getfile(realname, realrev)
-
- def gettags(self):
- return self.base.gettags()
-
- def hasnativeorder(self):
- return self.base.hasnativeorder()
-
- def lookuprev(self, rev):
- return self.base.lookuprev(rev)
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.pyo
deleted file mode 100644
index 2ece523..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/filemap.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.py
deleted file mode 100644
index e973031..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# git.py - git support for the convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os
-from mercurial import util
-from mercurial.node import hex, nullid
-from mercurial.i18n import _
-
-from common import NoRepo, commit, converter_source, checktool
-
-class convert_git(converter_source):
- # Windows does not support GIT_DIR= construct while other systems
- # cannot remove environment variable. Just assume none have
- # both issues.
- if hasattr(os, 'unsetenv'):
- def gitopen(self, s):
- prevgitdir = os.environ.get('GIT_DIR')
- os.environ['GIT_DIR'] = self.path
- try:
- return util.popen(s, 'rb')
- finally:
- if prevgitdir is None:
- del os.environ['GIT_DIR']
- else:
- os.environ['GIT_DIR'] = prevgitdir
- else:
- def gitopen(self, s):
- return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
-
- def gitread(self, s):
- fh = self.gitopen(s)
- data = fh.read()
- return data, fh.close()
-
- def __init__(self, ui, path, rev=None):
- super(convert_git, self).__init__(ui, path, rev=rev)
-
- if os.path.isdir(path + "/.git"):
- path += "/.git"
- if not os.path.exists(path + "/objects"):
- raise NoRepo(_("%s does not look like a Git repository") % path)
-
- checktool('git', 'git')
-
- self.path = path
-
- def getheads(self):
- if not self.rev:
- heads, ret = self.gitread('git rev-parse --branches --remotes')
- heads = heads.splitlines()
- else:
- heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
- heads = [heads[:-1]]
- if ret:
- raise util.Abort(_('cannot retrieve git heads'))
- return heads
-
- def catfile(self, rev, type):
- if rev == hex(nullid):
- raise IOError()
- data, ret = self.gitread("git cat-file %s %s" % (type, rev))
- if ret:
- raise util.Abort(_('cannot read %r object at %s') % (type, rev))
- return data
-
- def getfile(self, name, rev):
- data = self.catfile(rev, "blob")
- mode = self.modecache[(name, rev)]
- return data, mode
-
- def getchanges(self, version):
- self.modecache = {}
- fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
- changes = []
- seen = set()
- entry = None
- for l in fh.read().split('\x00'):
- if not entry:
- if not l.startswith(':'):
- continue
- entry = l
- continue
- f = l
- if f not in seen:
- seen.add(f)
- entry = entry.split()
- h = entry[3]
- p = (entry[1] == "100755")
- s = (entry[1] == "120000")
- self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
- changes.append((f, h))
- entry = None
- if fh.close():
- raise util.Abort(_('cannot read changes in %s') % version)
- return (changes, {})
-
- def getcommit(self, version):
- c = self.catfile(version, "commit") # read the commit hash
- end = c.find("\n\n")
- message = c[end + 2:]
- message = self.recode(message)
- l = c[:end].splitlines()
- parents = []
- author = committer = None
- for e in l[1:]:
- n, v = e.split(" ", 1)
- if n == "author":
- p = v.split()
- tm, tz = p[-2:]
- author = " ".join(p[:-2])
- if author[0] == "<": author = author[1:-1]
- author = self.recode(author)
- if n == "committer":
- p = v.split()
- tm, tz = p[-2:]
- committer = " ".join(p[:-2])
- if committer[0] == "<": committer = committer[1:-1]
- committer = self.recode(committer)
- if n == "parent":
- parents.append(v)
-
- if committer and committer != author:
- message += "\ncommitter: %s\n" % committer
- tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
- tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
- date = tm + " " + str(tz)
-
- c = commit(parents=parents, date=date, author=author, desc=message,
- rev=version)
- return c
-
- def gettags(self):
- tags = {}
- fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
- prefix = 'refs/tags/'
- for line in fh:
- line = line.strip()
- if not line.endswith("^{}"):
- continue
- node, tag = line.split(None, 1)
- if not tag.startswith(prefix):
- continue
- tag = tag[len(prefix):-3]
- tags[tag] = node
- if fh.close():
- raise util.Abort(_('cannot read tags from %s') % self.path)
-
- return tags
-
- def getchangedfiles(self, version, i):
- changes = []
- if i is None:
- fh = self.gitopen("git diff-tree --root -m -r %s" % version)
- for l in fh:
- if "\t" not in l:
- continue
- m, f = l[:-1].split("\t")
- changes.append(f)
- else:
- fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
- % (version, version, i + 1))
- changes = [f.rstrip('\n') for f in fh]
- if fh.close():
- raise util.Abort(_('cannot read changes in %s') % version)
-
- return changes
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.pyo
deleted file mode 100644
index 3166318..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/git.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.py
deleted file mode 100644
index 60cfede..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# gnuarch.py - GNU Arch support for the convert extension
-#
-# Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
-# and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from common import NoRepo, commandline, commit, converter_source
-from mercurial.i18n import _
-from mercurial import encoding, util
-import os, shutil, tempfile, stat
-from email.Parser import Parser
-
-class gnuarch_source(converter_source, commandline):
-
- class gnuarch_rev(object):
- def __init__(self, rev):
- self.rev = rev
- self.summary = ''
- self.date = None
- self.author = ''
- self.continuationof = None
- self.add_files = []
- self.mod_files = []
- self.del_files = []
- self.ren_files = {}
- self.ren_dirs = {}
-
- def __init__(self, ui, path, rev=None):
- super(gnuarch_source, self).__init__(ui, path, rev=rev)
-
- if not os.path.exists(os.path.join(path, '{arch}')):
- raise NoRepo(_("%s does not look like a GNU Arch repository")
- % path)
-
- # Could use checktool, but we want to check for baz or tla.
- self.execmd = None
- if util.find_exe('baz'):
- self.execmd = 'baz'
- else:
- if util.find_exe('tla'):
- self.execmd = 'tla'
- else:
- raise util.Abort(_('cannot find a GNU Arch tool'))
-
- commandline.__init__(self, ui, self.execmd)
-
- self.path = os.path.realpath(path)
- self.tmppath = None
-
- self.treeversion = None
- self.lastrev = None
- self.changes = {}
- self.parents = {}
- self.tags = {}
- self.catlogparser = Parser()
- self.encoding = encoding.encoding
- self.archives = []
-
- def before(self):
- # Get registered archives
- self.archives = [i.rstrip('\n')
- for i in self.runlines0('archives', '-n')]
-
- if self.execmd == 'tla':
- output = self.run0('tree-version', self.path)
- else:
- output = self.run0('tree-version', '-d', self.path)
- self.treeversion = output.strip()
-
- # Get name of temporary directory
- version = self.treeversion.split('/')
- self.tmppath = os.path.join(tempfile.gettempdir(),
- 'hg-%s' % version[1])
-
- # Generate parents dictionary
- self.parents[None] = []
- treeversion = self.treeversion
- child = None
- while treeversion:
- self.ui.status(_('analyzing tree version %s...\n') % treeversion)
-
- archive = treeversion.split('/')[0]
- if archive not in self.archives:
- self.ui.status(_('tree analysis stopped because it points to '
- 'an unregistered archive %s...\n') % archive)
- break
-
- # Get the complete list of revisions for that tree version
- output, status = self.runlines('revisions', '-r', '-f', treeversion)
- self.checkexit(status, 'failed retrieveing revisions for %s'
- % treeversion)
-
- # No new iteration unless a revision has a continuation-of header
- treeversion = None
-
- for l in output:
- rev = l.strip()
- self.changes[rev] = self.gnuarch_rev(rev)
- self.parents[rev] = []
-
- # Read author, date and summary
- catlog, status = self.run('cat-log', '-d', self.path, rev)
- if status:
- catlog = self.run0('cat-archive-log', rev)
- self._parsecatlog(catlog, rev)
-
- # Populate the parents map
- self.parents[child].append(rev)
-
- # Keep track of the current revision as the child of the next
- # revision scanned
- child = rev
-
- # Check if we have to follow the usual incremental history
- # or if we have to 'jump' to a different treeversion given
- # by the continuation-of header.
- if self.changes[rev].continuationof:
- treeversion = '--'.join(
- self.changes[rev].continuationof.split('--')[:-1])
- break
-
- # If we reached a base-0 revision w/o any continuation-of
- # header, it means the tree history ends here.
- if rev[-6:] == 'base-0':
- break
-
- def after(self):
- self.ui.debug('cleaning up %s\n' % self.tmppath)
- shutil.rmtree(self.tmppath, ignore_errors=True)
-
- def getheads(self):
- return self.parents[None]
-
- def getfile(self, name, rev):
- if rev != self.lastrev:
- raise util.Abort(_('internal calling inconsistency'))
-
- # Raise IOError if necessary (i.e. deleted files).
- if not os.path.lexists(os.path.join(self.tmppath, name)):
- raise IOError
-
- return self._getfile(name, rev)
-
- def getchanges(self, rev):
- self._update(rev)
- changes = []
- copies = {}
-
- for f in self.changes[rev].add_files:
- changes.append((f, rev))
-
- for f in self.changes[rev].mod_files:
- changes.append((f, rev))
-
- for f in self.changes[rev].del_files:
- changes.append((f, rev))
-
- for src in self.changes[rev].ren_files:
- to = self.changes[rev].ren_files[src]
- changes.append((src, rev))
- changes.append((to, rev))
- copies[to] = src
-
- for src in self.changes[rev].ren_dirs:
- to = self.changes[rev].ren_dirs[src]
- chgs, cps = self._rendirchanges(src, to)
- changes += [(f, rev) for f in chgs]
- copies.update(cps)
-
- self.lastrev = rev
- return sorted(set(changes)), copies
-
- def getcommit(self, rev):
- changes = self.changes[rev]
- return commit(author=changes.author, date=changes.date,
- desc=changes.summary, parents=self.parents[rev], rev=rev)
-
- def gettags(self):
- return self.tags
-
- def _execute(self, cmd, *args, **kwargs):
- cmdline = [self.execmd, cmd]
- cmdline += args
- cmdline = [util.shellquote(arg) for arg in cmdline]
- cmdline += ['>', util.nulldev, '2>', util.nulldev]
- cmdline = util.quotecommand(' '.join(cmdline))
- self.ui.debug(cmdline, '\n')
- return os.system(cmdline)
-
- def _update(self, rev):
- self.ui.debug('applying revision %s...\n' % rev)
- changeset, status = self.runlines('replay', '-d', self.tmppath,
- rev)
- if status:
- # Something went wrong while merging (baz or tla
- # issue?), get latest revision and try from there
- shutil.rmtree(self.tmppath, ignore_errors=True)
- self._obtainrevision(rev)
- else:
- old_rev = self.parents[rev][0]
- self.ui.debug('computing changeset between %s and %s...\n'
- % (old_rev, rev))
- self._parsechangeset(changeset, rev)
-
- def _getfile(self, name, rev):
- mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
- if stat.S_ISLNK(mode):
- data = os.readlink(os.path.join(self.tmppath, name))
- mode = mode and 'l' or ''
- else:
- data = open(os.path.join(self.tmppath, name), 'rb').read()
- mode = (mode & 0111) and 'x' or ''
- return data, mode
-
- def _exclude(self, name):
- exclude = ['{arch}', '.arch-ids', '.arch-inventory']
- for exc in exclude:
- if name.find(exc) != -1:
- return True
- return False
-
- def _readcontents(self, path):
- files = []
- contents = os.listdir(path)
- while len(contents) > 0:
- c = contents.pop()
- p = os.path.join(path, c)
- # os.walk could be used, but here we avoid internal GNU
- # Arch files and directories, thus saving a lot time.
- if not self._exclude(p):
- if os.path.isdir(p):
- contents += [os.path.join(c, f) for f in os.listdir(p)]
- else:
- files.append(c)
- return files
-
- def _rendirchanges(self, src, dest):
- changes = []
- copies = {}
- files = self._readcontents(os.path.join(self.tmppath, dest))
- for f in files:
- s = os.path.join(src, f)
- d = os.path.join(dest, f)
- changes.append(s)
- changes.append(d)
- copies[d] = s
- return changes, copies
-
- def _obtainrevision(self, rev):
- self.ui.debug('obtaining revision %s...\n' % rev)
- output = self._execute('get', rev, self.tmppath)
- self.checkexit(output)
- self.ui.debug('analyzing revision %s...\n' % rev)
- files = self._readcontents(self.tmppath)
- self.changes[rev].add_files += files
-
- def _stripbasepath(self, path):
- if path.startswith('./'):
- return path[2:]
- return path
-
- def _parsecatlog(self, data, rev):
- try:
- catlog = self.catlogparser.parsestr(data)
-
- # Commit date
- self.changes[rev].date = util.datestr(
- util.strdate(catlog['Standard-date'],
- '%Y-%m-%d %H:%M:%S'))
-
- # Commit author
- self.changes[rev].author = self.recode(catlog['Creator'])
-
- # Commit description
- self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
- catlog.get_payload()))
- self.changes[rev].summary = self.recode(self.changes[rev].summary)
-
- # Commit revision origin when dealing with a branch or tag
- if 'Continuation-of' in catlog:
- self.changes[rev].continuationof = self.recode(
- catlog['Continuation-of'])
- except Exception:
- raise util.Abort(_('could not parse cat-log of %s') % rev)
-
- def _parsechangeset(self, data, rev):
- for l in data:
- l = l.strip()
- # Added file (ignore added directory)
- if l.startswith('A') and not l.startswith('A/'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].add_files.append(file)
- # Deleted file (ignore deleted directory)
- elif l.startswith('D') and not l.startswith('D/'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].del_files.append(file)
- # Modified binary file
- elif l.startswith('Mb'):
- file = self._stripbasepath(l[2:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Modified link
- elif l.startswith('M->'):
- file = self._stripbasepath(l[3:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Modified file
- elif l.startswith('M'):
- file = self._stripbasepath(l[1:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Renamed file (or link)
- elif l.startswith('=>'):
- files = l[2:].strip().split(' ')
- if len(files) == 1:
- files = l[2:].strip().split('\t')
- src = self._stripbasepath(files[0])
- dst = self._stripbasepath(files[1])
- if not self._exclude(src) and not self._exclude(dst):
- self.changes[rev].ren_files[src] = dst
- # Conversion from file to link or from link to file (modified)
- elif l.startswith('ch'):
- file = self._stripbasepath(l[2:].strip())
- if not self._exclude(file):
- self.changes[rev].mod_files.append(file)
- # Renamed directory
- elif l.startswith('/>'):
- dirs = l[2:].strip().split(' ')
- if len(dirs) == 1:
- dirs = l[2:].strip().split('\t')
- src = self._stripbasepath(dirs[0])
- dst = self._stripbasepath(dirs[1])
- if not self._exclude(src) and not self._exclude(dst):
- self.changes[rev].ren_dirs[src] = dst
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.pyo
deleted file mode 100644
index 0e564ba..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/gnuarch.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.py
deleted file mode 100644
index 183377d..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.py
+++ /dev/null
@@ -1,376 +0,0 @@
-# hg.py - hg backend for convert extension
-#
-# Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-# Notes for hg->hg conversion:
-#
-# * Old versions of Mercurial didn't trim the whitespace from the ends
-# of commit messages, but new versions do. Changesets created by
-# those older versions, then converted, may thus have different
-# hashes for changesets that are otherwise identical.
-#
-# * Using "--config convert.hg.saverev=true" will make the source
-# identifier to be stored in the converted revision. This will cause
-# the converted revision to have a different identity than the
-# source.
-
-
-import os, time, cStringIO
-from mercurial.i18n import _
-from mercurial.node import bin, hex, nullid
-from mercurial import hg, util, context, error
-
-from common import NoRepo, commit, converter_source, converter_sink
-
-class mercurial_sink(converter_sink):
- def __init__(self, ui, path):
- converter_sink.__init__(self, ui, path)
- self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
- self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
- self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
- self.lastbranch = None
- if os.path.isdir(path) and len(os.listdir(path)) > 0:
- try:
- self.repo = hg.repository(self.ui, path)
- if not self.repo.local():
- raise NoRepo(_('%s is not a local Mercurial repository')
- % path)
- except error.RepoError, err:
- ui.traceback()
- raise NoRepo(err.args[0])
- else:
- try:
- ui.status(_('initializing destination %s repository\n') % path)
- self.repo = hg.repository(self.ui, path, create=True)
- if not self.repo.local():
- raise NoRepo(_('%s is not a local Mercurial repository')
- % path)
- self.created.append(path)
- except error.RepoError:
- ui.traceback()
- raise NoRepo(_("could not create hg repository %s as sink")
- % path)
- self.lock = None
- self.wlock = None
- self.filemapmode = False
-
- def before(self):
- self.ui.debug('run hg sink pre-conversion action\n')
- self.wlock = self.repo.wlock()
- self.lock = self.repo.lock()
-
- def after(self):
- self.ui.debug('run hg sink post-conversion action\n')
- if self.lock:
- self.lock.release()
- if self.wlock:
- self.wlock.release()
-
- def revmapfile(self):
- return os.path.join(self.path, ".hg", "shamap")
-
- def authorfile(self):
- return os.path.join(self.path, ".hg", "authormap")
-
- def getheads(self):
- h = self.repo.changelog.heads()
- return [hex(x) for x in h]
-
- def setbranch(self, branch, pbranches):
- if not self.clonebranches:
- return
-
- setbranch = (branch != self.lastbranch)
- self.lastbranch = branch
- if not branch:
- branch = 'default'
- pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
- pbranch = pbranches and pbranches[0][1] or 'default'
-
- branchpath = os.path.join(self.path, branch)
- if setbranch:
- self.after()
- try:
- self.repo = hg.repository(self.ui, branchpath)
- except:
- self.repo = hg.repository(self.ui, branchpath, create=True)
- self.before()
-
- # pbranches may bring revisions from other branches (merge parents)
- # Make sure we have them, or pull them.
- missings = {}
- for b in pbranches:
- try:
- self.repo.lookup(b[0])
- except:
- missings.setdefault(b[1], []).append(b[0])
-
- if missings:
- self.after()
- for pbranch, heads in missings.iteritems():
- pbranchpath = os.path.join(self.path, pbranch)
- prepo = hg.repository(self.ui, pbranchpath)
- self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
- self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
- self.before()
-
- def _rewritetags(self, source, revmap, data):
- fp = cStringIO.StringIO()
- for line in data.splitlines():
- s = line.split(' ', 1)
- if len(s) != 2:
- continue
- revid = revmap.get(source.lookuprev(s[0]))
- if not revid:
- continue
- fp.write('%s %s\n' % (revid, s[1]))
- return fp.getvalue()
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
-
- files = dict(files)
- def getfilectx(repo, memctx, f):
- v = files[f]
- data, mode = source.getfile(f, v)
- if f == '.hgtags':
- data = self._rewritetags(source, revmap, data)
- return context.memfilectx(f, data, 'l' in mode, 'x' in mode,
- copies.get(f))
-
- pl = []
- for p in parents:
- if p not in pl:
- pl.append(p)
- parents = pl
- nparents = len(parents)
- if self.filemapmode and nparents == 1:
- m1node = self.repo.changelog.read(bin(parents[0]))[0]
- parent = parents[0]
-
- if len(parents) < 2:
- parents.append(nullid)
- if len(parents) < 2:
- parents.append(nullid)
- p2 = parents.pop(0)
-
- text = commit.desc
- extra = commit.extra.copy()
- if self.branchnames and commit.branch:
- extra['branch'] = commit.branch
- if commit.rev:
- extra['convert_revision'] = commit.rev
-
- while parents:
- p1 = p2
- p2 = parents.pop(0)
- ctx = context.memctx(self.repo, (p1, p2), text, files.keys(),
- getfilectx, commit.author, commit.date, extra)
- self.repo.commitctx(ctx)
- text = "(octopus merge fixup)\n"
- p2 = hex(self.repo.changelog.tip())
-
- if self.filemapmode and nparents == 1:
- man = self.repo.manifest
- mnode = self.repo.changelog.read(bin(p2))[0]
- closed = 'close' in commit.extra
- if not closed and not man.cmp(m1node, man.revision(mnode)):
- self.ui.status(_("filtering out empty revision\n"))
- self.repo.rollback()
- return parent
- return p2
-
- def puttags(self, tags):
- try:
- parentctx = self.repo[self.tagsbranch]
- tagparent = parentctx.node()
- except error.RepoError:
- parentctx = None
- tagparent = nullid
-
- try:
- oldlines = sorted(parentctx['.hgtags'].data().splitlines(True))
- except:
- oldlines = []
-
- newlines = sorted([("%s %s\n" % (tags[tag], tag)) for tag in tags])
- if newlines == oldlines:
- return None, None
- data = "".join(newlines)
- def getfilectx(repo, memctx, f):
- return context.memfilectx(f, data, False, False, None)
-
- self.ui.status(_("updating tags\n"))
- date = "%s 0" % int(time.mktime(time.gmtime()))
- extra = {'branch': self.tagsbranch}
- ctx = context.memctx(self.repo, (tagparent, None), "update tags",
- [".hgtags"], getfilectx, "convert-repo", date,
- extra)
- self.repo.commitctx(ctx)
- return hex(self.repo.changelog.tip()), hex(tagparent)
-
- def setfilemapmode(self, active):
- self.filemapmode = active
-
-class mercurial_source(converter_source):
- def __init__(self, ui, path, rev=None):
- converter_source.__init__(self, ui, path, rev)
- self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
- self.ignored = set()
- self.saverev = ui.configbool('convert', 'hg.saverev', False)
- try:
- self.repo = hg.repository(self.ui, path)
- # try to provoke an exception if this isn't really a hg
- # repo, but some other bogus compatible-looking url
- if not self.repo.local():
- raise error.RepoError()
- except error.RepoError:
- ui.traceback()
- raise NoRepo(_("%s is not a local Mercurial repository") % path)
- self.lastrev = None
- self.lastctx = None
- self._changescache = None
- self.convertfp = None
- # Restrict converted revisions to startrev descendants
- startnode = ui.config('convert', 'hg.startrev')
- if startnode is not None:
- try:
- startnode = self.repo.lookup(startnode)
- except error.RepoError:
- raise util.Abort(_('%s is not a valid start revision')
- % startnode)
- startrev = self.repo.changelog.rev(startnode)
- children = {startnode: 1}
- for rev in self.repo.changelog.descendants(startrev):
- children[self.repo.changelog.node(rev)] = 1
- self.keep = children.__contains__
- else:
- self.keep = util.always
-
- def changectx(self, rev):
- if self.lastrev != rev:
- self.lastctx = self.repo[rev]
- self.lastrev = rev
- return self.lastctx
-
- def parents(self, ctx):
- return [p for p in ctx.parents() if p and self.keep(p.node())]
-
- def getheads(self):
- if self.rev:
- heads = [self.repo[self.rev].node()]
- else:
- heads = self.repo.heads()
- return [hex(h) for h in heads if self.keep(h)]
-
- def getfile(self, name, rev):
- try:
- fctx = self.changectx(rev)[name]
- return fctx.data(), fctx.flags()
- except error.LookupError, err:
- raise IOError(err)
-
- def getchanges(self, rev):
- ctx = self.changectx(rev)
- parents = self.parents(ctx)
- if not parents:
- files = sorted(ctx.manifest())
- if self.ignoreerrors:
- # calling getcopies() is a simple way to detect missing
- # revlogs and populate self.ignored
- self.getcopies(ctx, parents, files)
- return [(f, rev) for f in files if f not in self.ignored], {}
- if self._changescache and self._changescache[0] == rev:
- m, a, r = self._changescache[1]
- else:
- m, a, r = self.repo.status(parents[0].node(), ctx.node())[:3]
- # getcopies() detects missing revlogs early, run it before
- # filtering the changes.
- copies = self.getcopies(ctx, parents, m + a)
- changes = [(name, rev) for name in m + a + r
- if name not in self.ignored]
- return sorted(changes), copies
-
- def getcopies(self, ctx, parents, files):
- copies = {}
- for name in files:
- if name in self.ignored:
- continue
- try:
- copysource, copynode = ctx.filectx(name).renamed()
- if copysource in self.ignored or not self.keep(copynode):
- continue
- # Ignore copy sources not in parent revisions
- found = False
- for p in parents:
- if copysource in p:
- found = True
- break
- if not found:
- continue
- copies[name] = copysource
- except TypeError:
- pass
- except error.LookupError, e:
- if not self.ignoreerrors:
- raise
- self.ignored.add(name)
- self.ui.warn(_('ignoring: %s\n') % e)
- return copies
-
- def getcommit(self, rev):
- ctx = self.changectx(rev)
- parents = [p.hex() for p in self.parents(ctx)]
- if self.saverev:
- crev = rev
- else:
- crev = None
- return commit(author=ctx.user(), date=util.datestr(ctx.date()),
- desc=ctx.description(), rev=crev, parents=parents,
- branch=ctx.branch(), extra=ctx.extra(),
- sortkey=ctx.rev())
-
- def gettags(self):
- tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
- return dict([(name, hex(node)) for name, node in tags
- if self.keep(node)])
-
- def getchangedfiles(self, rev, i):
- ctx = self.changectx(rev)
- parents = self.parents(ctx)
- if not parents and i is None:
- i = 0
- changes = [], ctx.manifest().keys(), []
- else:
- i = i or 0
- changes = self.repo.status(parents[i].node(), ctx.node())[:3]
- changes = [[f for f in l if f not in self.ignored] for l in changes]
-
- if i == 0:
- self._changescache = (rev, changes)
-
- return changes[0] + changes[1] + changes[2]
-
- def converted(self, rev, destrev):
- if self.convertfp is None:
- self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
- 'a')
- self.convertfp.write('%s %s\n' % (destrev, rev))
- self.convertfp.flush()
-
- def before(self):
- self.ui.debug('run hg source pre-conversion action\n')
-
- def after(self):
- self.ui.debug('run hg source post-conversion action\n')
-
- def hasnativeorder(self):
- return True
-
- def lookuprev(self, rev):
- try:
- return hex(self.repo.lookup(rev))
- except error.RepoError:
- return None
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.pyo
deleted file mode 100644
index 633af5e..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/hg.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.py
deleted file mode 100644
index 151ddc5..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.py
+++ /dev/null
@@ -1,227 +0,0 @@
-# monotone.py - monotone support for the convert extension
-#
-# Copyright 2008, 2009 Mikkel Fahnoe Jorgensen <mikkel@dvide.com> and
-# others
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-import os, re
-from mercurial import util
-from common import NoRepo, commit, converter_source, checktool
-from common import commandline
-from mercurial.i18n import _
-
-class monotone_source(converter_source, commandline):
- def __init__(self, ui, path=None, rev=None):
- converter_source.__init__(self, ui, path, rev)
- commandline.__init__(self, ui, 'mtn')
-
- self.ui = ui
- self.path = path
-
- norepo = NoRepo(_("%s does not look like a monotone repository")
- % path)
- if not os.path.exists(os.path.join(path, '_MTN')):
- # Could be a monotone repository (SQLite db file)
- try:
- header = file(path, 'rb').read(16)
- except:
- header = ''
- if header != 'SQLite format 3\x00':
- raise norepo
-
- # regular expressions for parsing monotone output
- space = r'\s*'
- name = r'\s+"((?:\\"|[^"])*)"\s*'
- value = name
- revision = r'\s+\[(\w+)\]\s*'
- lines = r'(?:.|\n)+'
-
- self.dir_re = re.compile(space + "dir" + name)
- self.file_re = re.compile(space + "file" + name +
- "content" + revision)
- self.add_file_re = re.compile(space + "add_file" + name +
- "content" + revision)
- self.patch_re = re.compile(space + "patch" + name +
- "from" + revision + "to" + revision)
- self.rename_re = re.compile(space + "rename" + name + "to" + name)
- self.delete_re = re.compile(space + "delete" + name)
- self.tag_re = re.compile(space + "tag" + name + "revision" +
- revision)
- self.cert_re = re.compile(lines + space + "name" + name +
- "value" + value)
-
- attr = space + "file" + lines + space + "attr" + space
- self.attr_execute_re = re.compile(attr + '"mtn:execute"' +
- space + '"true"')
-
- # cached data
- self.manifest_rev = None
- self.manifest = None
- self.files = None
- self.dirs = None
-
- checktool('mtn', abort=False)
-
- # test if there are any revisions
- self.rev = None
- try:
- self.getheads()
- except:
- raise norepo
- self.rev = rev
-
- def mtnrun(self, *args, **kwargs):
- kwargs['d'] = self.path
- return self.run0('automate', *args, **kwargs)
-
- def mtnloadmanifest(self, rev):
- if self.manifest_rev == rev:
- return
- self.manifest = self.mtnrun("get_manifest_of", rev).split("\n\n")
- self.manifest_rev = rev
- self.files = {}
- self.dirs = {}
-
- for e in self.manifest:
- m = self.file_re.match(e)
- if m:
- attr = ""
- name = m.group(1)
- node = m.group(2)
- if self.attr_execute_re.match(e):
- attr += "x"
- self.files[name] = (node, attr)
- m = self.dir_re.match(e)
- if m:
- self.dirs[m.group(1)] = True
-
- def mtnisfile(self, name, rev):
- # a non-file could be a directory or a deleted or renamed file
- self.mtnloadmanifest(rev)
- return name in self.files
-
- def mtnisdir(self, name, rev):
- self.mtnloadmanifest(rev)
- return name in self.dirs
-
- def mtngetcerts(self, rev):
- certs = {"author":"<missing>", "date":"<missing>",
- "changelog":"<missing>", "branch":"<missing>"}
- certlist = self.mtnrun("certs", rev)
- # mtn < 0.45:
- # key "test@selenic.com"
- # mtn >= 0.45:
- # key [ff58a7ffb771907c4ff68995eada1c4da068d328]
- certlist = re.split('\n\n key ["\[]', certlist)
- for e in certlist:
- m = self.cert_re.match(e)
- if m:
- name, value = m.groups()
- value = value.replace(r'\"', '"')
- value = value.replace(r'\\', '\\')
- certs[name] = value
- # Monotone may have subsecond dates: 2005-02-05T09:39:12.364306
- # and all times are stored in UTC
- certs["date"] = certs["date"].split('.')[0] + " UTC"
- return certs
-
- # implement the converter_source interface:
-
- def getheads(self):
- if not self.rev:
- return self.mtnrun("leaves").splitlines()
- else:
- return [self.rev]
-
- def getchanges(self, rev):
- #revision = self.mtncmd("get_revision %s" % rev).split("\n\n")
- revision = self.mtnrun("get_revision", rev).split("\n\n")
- files = {}
- ignoremove = {}
- renameddirs = []
- copies = {}
- for e in revision:
- m = self.add_file_re.match(e)
- if m:
- files[m.group(1)] = rev
- ignoremove[m.group(1)] = rev
- m = self.patch_re.match(e)
- if m:
- files[m.group(1)] = rev
- # Delete/rename is handled later when the convert engine
- # discovers an IOError exception from getfile,
- # but only if we add the "from" file to the list of changes.
- m = self.delete_re.match(e)
- if m:
- files[m.group(1)] = rev
- m = self.rename_re.match(e)
- if m:
- toname = m.group(2)
- fromname = m.group(1)
- if self.mtnisfile(toname, rev):
- ignoremove[toname] = 1
- copies[toname] = fromname
- files[toname] = rev
- files[fromname] = rev
- elif self.mtnisdir(toname, rev):
- renameddirs.append((fromname, toname))
-
- # Directory renames can be handled only once we have recorded
- # all new files
- for fromdir, todir in renameddirs:
- renamed = {}
- for tofile in self.files:
- if tofile in ignoremove:
- continue
- if tofile.startswith(todir + '/'):
- renamed[tofile] = fromdir + tofile[len(todir):]
- # Avoid chained moves like:
- # d1(/a) => d3/d1(/a)
- # d2 => d3
- ignoremove[tofile] = 1
- for tofile, fromfile in renamed.items():
- self.ui.debug (_("copying file in renamed directory "
- "from '%s' to '%s'")
- % (fromfile, tofile), '\n')
- files[tofile] = rev
- copies[tofile] = fromfile
- for fromfile in renamed.values():
- files[fromfile] = rev
-
- return (files.items(), copies)
-
- def getfile(self, name, rev):
- if not self.mtnisfile(name, rev):
- raise IOError() # file was deleted or renamed
- try:
- data = self.mtnrun("get_file_of", name, r=rev)
- except:
- raise IOError() # file was deleted or renamed
- self.mtnloadmanifest(rev)
- node, attr = self.files.get(name, (None, ""))
- return data, attr
-
- def getcommit(self, rev):
- certs = self.mtngetcerts(rev)
- return commit(
- author=certs["author"],
- date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")),
- desc=certs["changelog"],
- rev=rev,
- parents=self.mtnrun("parents", rev).splitlines(),
- branch=certs["branch"])
-
- def gettags(self):
- tags = {}
- for e in self.mtnrun("tags").split("\n\n"):
- m = self.tag_re.match(e)
- if m:
- tags[m.group(1)] = m.group(2)
- return tags
-
- def getchangedfiles(self, rev, i):
- # This function is only needed to support --filemap
- # ... and we don't support that
- raise NotImplementedError()
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.pyo
deleted file mode 100644
index b51d266..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/monotone.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.py
deleted file mode 100644
index 5d640ad..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Perforce source for convert extension.
-#
-# Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
-#
-# This software may be used and distributed according to the terms of the
-# GNU General Public License version 2 or any later version.
-
-from mercurial import util
-from mercurial.i18n import _
-
-from common import commit, converter_source, checktool, NoRepo
-import marshal
-import re
-
-def loaditer(f):
- "Yield the dictionary objects generated by p4"
- try:
- while True:
- d = marshal.load(f)
- if not d:
- break
- yield d
- except EOFError:
- pass
-
-class p4_source(converter_source):
- def __init__(self, ui, path, rev=None):
- super(p4_source, self).__init__(ui, path, rev=rev)
-
- if "/" in path and not path.startswith('//'):
- raise NoRepo(_('%s does not look like a P4 repository') % path)
-
- checktool('p4', abort=False)
-
- self.p4changes = {}
- self.heads = {}
- self.changeset = {}
- self.files = {}
- self.tags = {}
- self.lastbranch = {}
- self.parent = {}
- self.encoding = "latin_1"
- self.depotname = {} # mapping from local name to depot name
- self.re_type = re.compile(
- "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
- "(\+\w+)?$")
- self.re_keywords = re.compile(
- r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
- r":[^$\n]*\$")
- self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
-
- self._parse(ui, path)
-
- def _parse_view(self, path):
- "Read changes affecting the path"
- cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
- stdout = util.popen(cmd, mode='rb')
- for d in loaditer(stdout):
- c = d.get("change", None)
- if c:
- self.p4changes[c] = True
-
- def _parse(self, ui, path):
- "Prepare list of P4 filenames and revisions to import"
- ui.status(_('reading p4 views\n'))
-
- # read client spec or view
- if "/" in path:
- self._parse_view(path)
- if path.startswith("//") and path.endswith("/..."):
- views = {path[:-3]:""}
- else:
- views = {"//": ""}
- else:
- cmd = 'p4 -G client -o %s' % util.shellquote(path)
- clientspec = marshal.load(util.popen(cmd, mode='rb'))
-
- views = {}
- for client in clientspec:
- if client.startswith("View"):
- sview, cview = clientspec[client].split()
- self._parse_view(sview)
- if sview.endswith("...") and cview.endswith("..."):
- sview = sview[:-3]
- cview = cview[:-3]
- cview = cview[2:]
- cview = cview[cview.find("/") + 1:]
- views[sview] = cview
-
- # list of changes that affect our source files
- self.p4changes = self.p4changes.keys()
- self.p4changes.sort(key=int)
-
- # list with depot pathnames, longest first
- vieworder = views.keys()
- vieworder.sort(key=len, reverse=True)
-
- # handle revision limiting
- startrev = self.ui.config('convert', 'p4.startrev', default=0)
- self.p4changes = [x for x in self.p4changes
- if ((not startrev or int(x) >= int(startrev)) and
- (not self.rev or int(x) <= int(self.rev)))]
-
- # now read the full changelists to get the list of file revisions
- ui.status(_('collecting p4 changelists\n'))
- lastid = None
- for change in self.p4changes:
- cmd = "p4 -G describe -s %s" % change
- stdout = util.popen(cmd, mode='rb')
- d = marshal.load(stdout)
- desc = self.recode(d["desc"])
- shortdesc = desc.split("\n", 1)[0]
- t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
- ui.status(util.ellipsis(t, 80) + '\n')
-
- if lastid:
- parents = [lastid]
- else:
- parents = []
-
- date = (int(d["time"]), 0) # timezone not set
- c = commit(author=self.recode(d["user"]), date=util.datestr(date),
- parents=parents, desc=desc, branch='',
- extra={"p4": change})
-
- files = []
- i = 0
- while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
- oldname = d["depotFile%d" % i]
- filename = None
- for v in vieworder:
- if oldname.startswith(v):
- filename = views[v] + oldname[len(v):]
- break
- if filename:
- files.append((filename, d["rev%d" % i]))
- self.depotname[filename] = oldname
- i += 1
- self.changeset[change] = c
- self.files[change] = files
- lastid = change
-
- if lastid:
- self.heads = [lastid]
-
- def getheads(self):
- return self.heads
-
- def getfile(self, name, rev):
- cmd = 'p4 -G print %s' \
- % util.shellquote("%s#%s" % (self.depotname[name], rev))
- stdout = util.popen(cmd, mode='rb')
-
- mode = None
- contents = ""
- keywords = None
-
- for d in loaditer(stdout):
- code = d["code"]
- data = d.get("data")
-
- if code == "error":
- raise IOError(d["generic"], data)
-
- elif code == "stat":
- p4type = self.re_type.match(d["type"])
- if p4type:
- mode = ""
- flags = (p4type.group(1) or "") + (p4type.group(3) or "")
- if "x" in flags:
- mode = "x"
- if p4type.group(2) == "symlink":
- mode = "l"
- if "ko" in flags:
- keywords = self.re_keywords_old
- elif "k" in flags:
- keywords = self.re_keywords
-
- elif code == "text" or code == "binary":
- contents += data
-
- if mode is None:
- raise IOError(0, "bad stat")
-
- if keywords:
- contents = keywords.sub("$\\1$", contents)
- if mode == "l" and contents.endswith("\n"):
- contents = contents[:-1]
-
- return contents, mode
-
- def getchanges(self, rev):
- return self.files[rev], {}
-
- def getcommit(self, rev):
- return self.changeset[rev]
-
- def gettags(self):
- return self.tags
-
- def getchangedfiles(self, rev, i):
- return sorted([x[0] for x in self.files[rev]])
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.pyo
deleted file mode 100644
index 45f1e2a..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/p4.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.py
deleted file mode 100644
index f2d26ad..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.py
+++ /dev/null
@@ -1,1168 +0,0 @@
-# Subversion 1.4/1.5 Python API backend
-#
-# Copyright(C) 2007 Daniel Holth et al
-
-import os
-import re
-import sys
-import cPickle as pickle
-import tempfile
-import urllib
-import urllib2
-
-from mercurial import strutil, util, encoding
-from mercurial.i18n import _
-
-# Subversion stuff. Works best with very recent Python SVN bindings
-# e.g. SVN 1.5 or backports. Thanks to the bzr folks for enhancing
-# these bindings.
-
-from cStringIO import StringIO
-
-from common import NoRepo, MissingTool, commit, encodeargs, decodeargs
-from common import commandline, converter_source, converter_sink, mapfile
-
-try:
- from svn.core import SubversionException, Pool
- import svn
- import svn.client
- import svn.core
- import svn.ra
- import svn.delta
- import transport
- import warnings
- warnings.filterwarnings('ignore',
- module='svn.core',
- category=DeprecationWarning)
-
-except ImportError:
- pass
-
-class SvnPathNotFound(Exception):
- pass
-
-def geturl(path):
- try:
- return svn.client.url_from_path(svn.core.svn_path_canonicalize(path))
- except SubversionException:
- pass
- if os.path.isdir(path):
- path = os.path.normpath(os.path.abspath(path))
- if os.name == 'nt':
- path = '/' + util.normpath(path)
- # Module URL is later compared with the repository URL returned
- # by svn API, which is UTF-8.
- path = encoding.tolocal(path)
- return 'file://%s' % urllib.quote(path)
- return path
-
-def optrev(number):
- optrev = svn.core.svn_opt_revision_t()
- optrev.kind = svn.core.svn_opt_revision_number
- optrev.value.number = number
- return optrev
-
-class changedpath(object):
- def __init__(self, p):
- self.copyfrom_path = p.copyfrom_path
- self.copyfrom_rev = p.copyfrom_rev
- self.action = p.action
-
-def get_log_child(fp, url, paths, start, end, limit=0, discover_changed_paths=True,
- strict_node_history=False):
- protocol = -1
- def receiver(orig_paths, revnum, author, date, message, pool):
- if orig_paths is not None:
- for k, v in orig_paths.iteritems():
- orig_paths[k] = changedpath(v)
- pickle.dump((orig_paths, revnum, author, date, message),
- fp, protocol)
-
- try:
- # Use an ra of our own so that our parent can consume
- # our results without confusing the server.
- t = transport.SvnRaTransport(url=url)
- svn.ra.get_log(t.ra, paths, start, end, limit,
- discover_changed_paths,
- strict_node_history,
- receiver)
- except SubversionException, (inst, num):
- pickle.dump(num, fp, protocol)
- except IOError:
- # Caller may interrupt the iteration
- pickle.dump(None, fp, protocol)
- else:
- pickle.dump(None, fp, protocol)
- fp.close()
- # With large history, cleanup process goes crazy and suddenly
- # consumes *huge* amount of memory. The output file being closed,
- # there is no need for clean termination.
- os._exit(0)
-
-def debugsvnlog(ui, **opts):
- """Fetch SVN log in a subprocess and channel them back to parent to
- avoid memory collection issues.
- """
- util.set_binary(sys.stdin)
- util.set_binary(sys.stdout)
- args = decodeargs(sys.stdin.read())
- get_log_child(sys.stdout, *args)
-
-class logstream(object):
- """Interruptible revision log iterator."""
- def __init__(self, stdout):
- self._stdout = stdout
-
- def __iter__(self):
- while True:
- try:
- entry = pickle.load(self._stdout)
- except EOFError:
- raise util.Abort(_('Mercurial failed to run itself, check'
- ' hg executable is in PATH'))
- try:
- orig_paths, revnum, author, date, message = entry
- except:
- if entry is None:
- break
- raise SubversionException("child raised exception", entry)
- yield entry
-
- def close(self):
- if self._stdout:
- self._stdout.close()
- self._stdout = None
-
-
-# Check to see if the given path is a local Subversion repo. Verify this by
-# looking for several svn-specific files and directories in the given
-# directory.
-def filecheck(ui, path, proto):
- for x in ('locks', 'hooks', 'format', 'db'):
- if not os.path.exists(os.path.join(path, x)):
- return False
- return True
-
-# Check to see if a given path is the root of an svn repo over http. We verify
-# this by requesting a version-controlled URL we know can't exist and looking
-# for the svn-specific "not found" XML.
-def httpcheck(ui, path, proto):
- try:
- opener = urllib2.build_opener()
- rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path))
- data = rsp.read()
- except urllib2.HTTPError, inst:
- if inst.code != 404:
- # Except for 404 we cannot know for sure this is not an svn repo
- ui.warn(_('svn: cannot probe remote repository, assume it could '
- 'be a subversion repository. Use --source-type if you '
- 'know better.\n'))
- return True
- data = inst.fp.read()
- except:
- # Could be urllib2.URLError if the URL is invalid or anything else.
- return False
- return '<m:human-readable errcode="160013">' in data
-
-protomap = {'http': httpcheck,
- 'https': httpcheck,
- 'file': filecheck,
- }
-def issvnurl(ui, url):
- try:
- proto, path = url.split('://', 1)
- if proto == 'file':
- path = urllib.url2pathname(path)
- except ValueError:
- proto = 'file'
- path = os.path.abspath(url)
- if proto == 'file':
- path = path.replace(os.sep, '/')
- check = protomap.get(proto, lambda *args: False)
- while '/' in path:
- if check(ui, path, proto):
- return True
- path = path.rsplit('/', 1)[0]
- return False
-
-# SVN conversion code stolen from bzr-svn and tailor
-#
-# Subversion looks like a versioned filesystem, branches structures
-# are defined by conventions and not enforced by the tool. First,
-# we define the potential branches (modules) as "trunk" and "branches"
-# children directories. Revisions are then identified by their
-# module and revision number (and a repository identifier).
-#
-# The revision graph is really a tree (or a forest). By default, a
-# revision parent is the previous revision in the same module. If the
-# module directory is copied/moved from another module then the
-# revision is the module root and its parent the source revision in
-# the parent module. A revision has at most one parent.
-#
-class svn_source(converter_source):
- def __init__(self, ui, url, rev=None):
- super(svn_source, self).__init__(ui, url, rev=rev)
-
- if not (url.startswith('svn://') or url.startswith('svn+ssh://') or
- (os.path.exists(url) and
- os.path.exists(os.path.join(url, '.svn'))) or
- issvnurl(ui, url)):
- raise NoRepo(_("%s does not look like a Subversion repository")
- % url)
-
- try:
- SubversionException
- except NameError:
- raise MissingTool(_('Subversion python bindings could not be loaded'))
-
- try:
- version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
- if version < (1, 4):
- raise MissingTool(_('Subversion python bindings %d.%d found, '
- '1.4 or later required') % version)
- except AttributeError:
- raise MissingTool(_('Subversion python bindings are too old, 1.4 '
- 'or later required'))
-
- self.lastrevs = {}
-
- latest = None
- try:
- # Support file://path@rev syntax. Useful e.g. to convert
- # deleted branches.
- at = url.rfind('@')
- if at >= 0:
- latest = int(url[at + 1:])
- url = url[:at]
- except ValueError:
- pass
- self.url = geturl(url)
- self.encoding = 'UTF-8' # Subversion is always nominal UTF-8
- try:
- self.transport = transport.SvnRaTransport(url=self.url)
- self.ra = self.transport.ra
- self.ctx = self.transport.client
- self.baseurl = svn.ra.get_repos_root(self.ra)
- # Module is either empty or a repository path starting with
- # a slash and not ending with a slash.
- self.module = urllib.unquote(self.url[len(self.baseurl):])
- self.prevmodule = None
- self.rootmodule = self.module
- self.commits = {}
- self.paths = {}
- self.uuid = svn.ra.get_uuid(self.ra)
- except SubversionException:
- ui.traceback()
- raise NoRepo(_("%s does not look like a Subversion repository")
- % self.url)
-
- if rev:
- try:
- latest = int(rev)
- except ValueError:
- raise util.Abort(_('svn: revision %s is not an integer') % rev)
-
- self.startrev = self.ui.config('convert', 'svn.startrev', default=0)
- try:
- self.startrev = int(self.startrev)
- if self.startrev < 0:
- self.startrev = 0
- except ValueError:
- raise util.Abort(_('svn: start revision %s is not an integer')
- % self.startrev)
-
- self.head = self.latest(self.module, latest)
- if not self.head:
- raise util.Abort(_('no revision found in module %s')
- % self.module)
- self.last_changed = self.revnum(self.head)
-
- self._changescache = None
-
- if os.path.exists(os.path.join(url, '.svn/entries')):
- self.wc = url
- else:
- self.wc = None
- self.convertfp = None
-
- def setrevmap(self, revmap):
- lastrevs = {}
- for revid in revmap.iterkeys():
- uuid, module, revnum = self.revsplit(revid)
- lastrevnum = lastrevs.setdefault(module, revnum)
- if revnum > lastrevnum:
- lastrevs[module] = revnum
- self.lastrevs = lastrevs
-
- def exists(self, path, optrev):
- try:
- svn.client.ls(self.url.rstrip('/') + '/' + urllib.quote(path),
- optrev, False, self.ctx)
- return True
- except SubversionException:
- return False
-
- def getheads(self):
-
- def isdir(path, revnum):
- kind = self._checkpath(path, revnum)
- return kind == svn.core.svn_node_dir
-
- def getcfgpath(name, rev):
- cfgpath = self.ui.config('convert', 'svn.' + name)
- if cfgpath is not None and cfgpath.strip() == '':
- return None
- path = (cfgpath or name).strip('/')
- if not self.exists(path, rev):
- if cfgpath:
- raise util.Abort(_('expected %s to be at %r, but not found')
- % (name, path))
- return None
- self.ui.note(_('found %s at %r\n') % (name, path))
- return path
-
- rev = optrev(self.last_changed)
- oldmodule = ''
- trunk = getcfgpath('trunk', rev)
- self.tags = getcfgpath('tags', rev)
- branches = getcfgpath('branches', rev)
-
- # If the project has a trunk or branches, we will extract heads
- # from them. We keep the project root otherwise.
- if trunk:
- oldmodule = self.module or ''
- self.module += '/' + trunk
- self.head = self.latest(self.module, self.last_changed)
- if not self.head:
- raise util.Abort(_('no revision found in module %s')
- % self.module)
-
- # First head in the list is the module's head
- self.heads = [self.head]
- if self.tags is not None:
- self.tags = '%s/%s' % (oldmodule , (self.tags or 'tags'))
-
- # Check if branches bring a few more heads to the list
- if branches:
- rpath = self.url.strip('/')
- branchnames = svn.client.ls(rpath + '/' + urllib.quote(branches),
- rev, False, self.ctx)
- for branch in branchnames.keys():
- module = '%s/%s/%s' % (oldmodule, branches, branch)
- if not isdir(module, self.last_changed):
- continue
- brevid = self.latest(module, self.last_changed)
- if not brevid:
- self.ui.note(_('ignoring empty branch %s\n') % branch)
- continue
- self.ui.note(_('found branch %s at %d\n') %
- (branch, self.revnum(brevid)))
- self.heads.append(brevid)
-
- if self.startrev and self.heads:
- if len(self.heads) > 1:
- raise util.Abort(_('svn: start revision is not supported '
- 'with more than one branch'))
- revnum = self.revnum(self.heads[0])
- if revnum < self.startrev:
- raise util.Abort(
- _('svn: no revision found after start revision %d')
- % self.startrev)
-
- return self.heads
-
- def getchanges(self, rev):
- if self._changescache and self._changescache[0] == rev:
- return self._changescache[1]
- self._changescache = None
- (paths, parents) = self.paths[rev]
- if parents:
- files, self.removed, copies = self.expandpaths(rev, paths, parents)
- else:
- # Perform a full checkout on roots
- uuid, module, revnum = self.revsplit(rev)
- entries = svn.client.ls(self.baseurl + urllib.quote(module),
- optrev(revnum), True, self.ctx)
- files = [n for n, e in entries.iteritems()
- if e.kind == svn.core.svn_node_file]
- copies = {}
- self.removed = set()
-
- files.sort()
- files = zip(files, [rev] * len(files))
-
- # caller caches the result, so free it here to release memory
- del self.paths[rev]
- return (files, copies)
-
- def getchangedfiles(self, rev, i):
- changes = self.getchanges(rev)
- self._changescache = (rev, changes)
- return [f[0] for f in changes[0]]
-
- def getcommit(self, rev):
- if rev not in self.commits:
- uuid, module, revnum = self.revsplit(rev)
- self.module = module
- self.reparent(module)
- # We assume that:
- # - requests for revisions after "stop" come from the
- # revision graph backward traversal. Cache all of them
- # down to stop, they will be used eventually.
- # - requests for revisions before "stop" come to get
- # isolated branches parents. Just fetch what is needed.
- stop = self.lastrevs.get(module, 0)
- if revnum < stop:
- stop = revnum + 1
- self._fetch_revisions(revnum, stop)
- commit = self.commits[rev]
- # caller caches the result, so free it here to release memory
- del self.commits[rev]
- return commit
-
- def gettags(self):
- tags = {}
- if self.tags is None:
- return tags
-
- # svn tags are just a convention, project branches left in a
- # 'tags' directory. There is no other relationship than
- # ancestry, which is expensive to discover and makes them hard
- # to update incrementally. Worse, past revisions may be
- # referenced by tags far away in the future, requiring a deep
- # history traversal on every calculation. Current code
- # performs a single backward traversal, tracking moves within
- # the tags directory (tag renaming) and recording a new tag
- # everytime a project is copied from outside the tags
- # directory. It also lists deleted tags, this behaviour may
- # change in the future.
- pendings = []
- tagspath = self.tags
- start = svn.ra.get_latest_revnum(self.ra)
- stream = self._getlog([self.tags], start, self.startrev)
- try:
- for entry in stream:
- origpaths, revnum, author, date, message = entry
- copies = [(e.copyfrom_path, e.copyfrom_rev, p) for p, e
- in origpaths.iteritems() if e.copyfrom_path]
- # Apply moves/copies from more specific to general
- copies.sort(reverse=True)
-
- srctagspath = tagspath
- if copies and copies[-1][2] == tagspath:
- # Track tags directory moves
- srctagspath = copies.pop()[0]
-
- for source, sourcerev, dest in copies:
- if not dest.startswith(tagspath + '/'):
- continue
- for tag in pendings:
- if tag[0].startswith(dest):
- tagpath = source + tag[0][len(dest):]
- tag[:2] = [tagpath, sourcerev]
- break
- else:
- pendings.append([source, sourcerev, dest])
-
- # Filter out tags with children coming from different
- # parts of the repository like:
- # /tags/tag.1 (from /trunk:10)
- # /tags/tag.1/foo (from /branches/foo:12)
- # Here/tags/tag.1 discarded as well as its children.
- # It happens with tools like cvs2svn. Such tags cannot
- # be represented in mercurial.
- addeds = dict((p, e.copyfrom_path) for p, e
- in origpaths.iteritems()
- if e.action == 'A' and e.copyfrom_path)
- badroots = set()
- for destroot in addeds:
- for source, sourcerev, dest in pendings:
- if (not dest.startswith(destroot + '/')
- or source.startswith(addeds[destroot] + '/')):
- continue
- badroots.add(destroot)
- break
-
- for badroot in badroots:
- pendings = [p for p in pendings if p[2] != badroot
- and not p[2].startswith(badroot + '/')]
-
- # Tell tag renamings from tag creations
- remainings = []
- for source, sourcerev, dest in pendings:
- tagname = dest.split('/')[-1]
- if source.startswith(srctagspath):
- remainings.append([source, sourcerev, tagname])
- continue
- if tagname in tags:
- # Keep the latest tag value
- continue
- # From revision may be fake, get one with changes
- try:
- tagid = self.latest(source, sourcerev)
- if tagid and tagname not in tags:
- tags[tagname] = tagid
- except SvnPathNotFound:
- # It happens when we are following directories
- # we assumed were copied with their parents
- # but were really created in the tag
- # directory.
- pass
- pendings = remainings
- tagspath = srctagspath
- finally:
- stream.close()
- return tags
-
- def converted(self, rev, destrev):
- if not self.wc:
- return
- if self.convertfp is None:
- self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'),
- 'a')
- self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev)))
- self.convertfp.flush()
-
- def revid(self, revnum, module=None):
- return 'svn:%s%s@%s' % (self.uuid, module or self.module, revnum)
-
- def revnum(self, rev):
- return int(rev.split('@')[-1])
-
- def revsplit(self, rev):
- url, revnum = rev.rsplit('@', 1)
- revnum = int(revnum)
- parts = url.split('/', 1)
- uuid = parts.pop(0)[4:]
- mod = ''
- if parts:
- mod = '/' + parts[0]
- return uuid, mod, revnum
-
- def latest(self, path, stop=0):
- """Find the latest revid affecting path, up to stop. It may return
- a revision in a different module, since a branch may be moved without
- a change being reported. Return None if computed module does not
- belong to rootmodule subtree.
- """
- if not path.startswith(self.rootmodule):
- # Requests on foreign branches may be forbidden at server level
- self.ui.debug('ignoring foreign branch %r\n' % path)
- return None
-
- if not stop:
- stop = svn.ra.get_latest_revnum(self.ra)
- try:
- prevmodule = self.reparent('')
- dirent = svn.ra.stat(self.ra, path.strip('/'), stop)
- self.reparent(prevmodule)
- except SubversionException:
- dirent = None
- if not dirent:
- raise SvnPathNotFound(_('%s not found up to revision %d')
- % (path, stop))
-
- # stat() gives us the previous revision on this line of
- # development, but it might be in *another module*. Fetch the
- # log and detect renames down to the latest revision.
- stream = self._getlog([path], stop, dirent.created_rev)
- try:
- for entry in stream:
- paths, revnum, author, date, message = entry
- if revnum <= dirent.created_rev:
- break
-
- for p in paths:
- if not path.startswith(p) or not paths[p].copyfrom_path:
- continue
- newpath = paths[p].copyfrom_path + path[len(p):]
- self.ui.debug("branch renamed from %s to %s at %d\n" %
- (path, newpath, revnum))
- path = newpath
- break
- finally:
- stream.close()
-
- if not path.startswith(self.rootmodule):
- self.ui.debug('ignoring foreign branch %r\n' % path)
- return None
- return self.revid(dirent.created_rev, path)
-
- def reparent(self, module):
- """Reparent the svn transport and return the previous parent."""
- if self.prevmodule == module:
- return module
- svnurl = self.baseurl + urllib.quote(module)
- prevmodule = self.prevmodule
- if prevmodule is None:
- prevmodule = ''
- self.ui.debug("reparent to %s\n" % svnurl)
- svn.ra.reparent(self.ra, svnurl)
- self.prevmodule = module
- return prevmodule
-
- def expandpaths(self, rev, paths, parents):
- changed, removed = set(), set()
- copies = {}
-
- new_module, revnum = self.revsplit(rev)[1:]
- if new_module != self.module:
- self.module = new_module
- self.reparent(self.module)
-
- for i, (path, ent) in enumerate(paths):
- self.ui.progress(_('scanning paths'), i, item=path,
- total=len(paths))
- entrypath = self.getrelpath(path)
-
- kind = self._checkpath(entrypath, revnum)
- if kind == svn.core.svn_node_file:
- changed.add(self.recode(entrypath))
- if not ent.copyfrom_path or not parents:
- continue
- # Copy sources not in parent revisions cannot be
- # represented, ignore their origin for now
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- if ent.copyfrom_rev < prevnum:
- continue
- copyfrom_path = self.getrelpath(ent.copyfrom_path, pmodule)
- if not copyfrom_path:
- continue
- self.ui.debug("copied to %s from %s@%s\n" %
- (entrypath, copyfrom_path, ent.copyfrom_rev))
- copies[self.recode(entrypath)] = self.recode(copyfrom_path)
- elif kind == 0: # gone, but had better be a deleted *file*
- self.ui.debug("gone from %s\n" % ent.copyfrom_rev)
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- parentpath = pmodule + "/" + entrypath
- fromkind = self._checkpath(entrypath, prevnum, pmodule)
-
- if fromkind == svn.core.svn_node_file:
- removed.add(self.recode(entrypath))
- elif fromkind == svn.core.svn_node_dir:
- oroot = parentpath.strip('/')
- nroot = path.strip('/')
- children = self._iterfiles(oroot, prevnum)
- for childpath in children:
- childpath = childpath.replace(oroot, nroot)
- childpath = self.getrelpath("/" + childpath, pmodule)
- if childpath:
- removed.add(self.recode(childpath))
- else:
- self.ui.debug('unknown path in revision %d: %s\n' % \
- (revnum, path))
- elif kind == svn.core.svn_node_dir:
- if ent.action == 'M':
- # If the directory just had a prop change,
- # then we shouldn't need to look for its children.
- continue
- if ent.action == 'R' and parents:
- # If a directory is replacing a file, mark the previous
- # file as deleted
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- pkind = self._checkpath(entrypath, prevnum, pmodule)
- if pkind == svn.core.svn_node_file:
- removed.add(self.recode(entrypath))
- elif pkind == svn.core.svn_node_dir:
- # We do not know what files were kept or removed,
- # mark them all as changed.
- for childpath in self._iterfiles(pmodule, prevnum):
- childpath = self.getrelpath("/" + childpath)
- if childpath:
- changed.add(self.recode(childpath))
-
- for childpath in self._iterfiles(path, revnum):
- childpath = self.getrelpath("/" + childpath)
- if childpath:
- changed.add(self.recode(childpath))
-
- # Handle directory copies
- if not ent.copyfrom_path or not parents:
- continue
- # Copy sources not in parent revisions cannot be
- # represented, ignore their origin for now
- pmodule, prevnum = self.revsplit(parents[0])[1:]
- if ent.copyfrom_rev < prevnum:
- continue
- copyfrompath = self.getrelpath(ent.copyfrom_path, pmodule)
- if not copyfrompath:
- continue
- self.ui.debug("mark %s came from %s:%d\n"
- % (path, copyfrompath, ent.copyfrom_rev))
- children = self._iterfiles(ent.copyfrom_path, ent.copyfrom_rev)
- for childpath in children:
- childpath = self.getrelpath("/" + childpath, pmodule)
- if not childpath:
- continue
- copytopath = path + childpath[len(copyfrompath):]
- copytopath = self.getrelpath(copytopath)
- copies[self.recode(copytopath)] = self.recode(childpath)
-
- self.ui.progress(_('scanning paths'), None)
- changed.update(removed)
- return (list(changed), removed, copies)
-
- def _fetch_revisions(self, from_revnum, to_revnum):
- if from_revnum < to_revnum:
- from_revnum, to_revnum = to_revnum, from_revnum
-
- self.child_cset = None
-
- def parselogentry(orig_paths, revnum, author, date, message):
- """Return the parsed commit object or None, and True if
- the revision is a branch root.
- """
- self.ui.debug("parsing revision %d (%d changes)\n" %
- (revnum, len(orig_paths)))
-
- branched = False
- rev = self.revid(revnum)
- # branch log might return entries for a parent we already have
-
- if rev in self.commits or revnum < to_revnum:
- return None, branched
-
- parents = []
- # check whether this revision is the start of a branch or part
- # of a branch renaming
- orig_paths = sorted(orig_paths.iteritems())
- root_paths = [(p, e) for p, e in orig_paths
- if self.module.startswith(p)]
- if root_paths:
- path, ent = root_paths[-1]
- if ent.copyfrom_path:
- branched = True
- newpath = ent.copyfrom_path + self.module[len(path):]
- # ent.copyfrom_rev may not be the actual last revision
- previd = self.latest(newpath, ent.copyfrom_rev)
- if previd is not None:
- prevmodule, prevnum = self.revsplit(previd)[1:]
- if prevnum >= self.startrev:
- parents = [previd]
- self.ui.note(
- _('found parent of branch %s at %d: %s\n') %
- (self.module, prevnum, prevmodule))
- else:
- self.ui.debug("no copyfrom path, don't know what to do.\n")
-
- paths = []
- # filter out unrelated paths
- for path, ent in orig_paths:
- if self.getrelpath(path) is None:
- continue
- paths.append((path, ent))
-
- # Example SVN datetime. Includes microseconds.
- # ISO-8601 conformant
- # '2007-01-04T17:35:00.902377Z'
- date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"])
-
- log = message and self.recode(message) or ''
- author = author and self.recode(author) or ''
- try:
- branch = self.module.split("/")[-1]
- if branch == 'trunk':
- branch = ''
- except IndexError:
- branch = None
-
- cset = commit(author=author,
- date=util.datestr(date),
- desc=log,
- parents=parents,
- branch=branch,
- rev=rev)
-
- self.commits[rev] = cset
- # The parents list is *shared* among self.paths and the
- # commit object. Both will be updated below.
- self.paths[rev] = (paths, cset.parents)
- if self.child_cset and not self.child_cset.parents:
- self.child_cset.parents[:] = [rev]
- self.child_cset = cset
- return cset, branched
-
- self.ui.note(_('fetching revision log for "%s" from %d to %d\n') %
- (self.module, from_revnum, to_revnum))
-
- try:
- firstcset = None
- lastonbranch = False
- stream = self._getlog([self.module], from_revnum, to_revnum)
- try:
- for entry in stream:
- paths, revnum, author, date, message = entry
- if revnum < self.startrev:
- lastonbranch = True
- break
- if not paths:
- self.ui.debug('revision %d has no entries\n' % revnum)
- # If we ever leave the loop on an empty
- # revision, do not try to get a parent branch
- lastonbranch = lastonbranch or revnum == 0
- continue
- cset, lastonbranch = parselogentry(paths, revnum, author,
- date, message)
- if cset:
- firstcset = cset
- if lastonbranch:
- break
- finally:
- stream.close()
-
- if not lastonbranch and firstcset and not firstcset.parents:
- # The first revision of the sequence (the last fetched one)
- # has invalid parents if not a branch root. Find the parent
- # revision now, if any.
- try:
- firstrevnum = self.revnum(firstcset.rev)
- if firstrevnum > 1:
- latest = self.latest(self.module, firstrevnum - 1)
- if latest:
- firstcset.parents.append(latest)
- except SvnPathNotFound:
- pass
- except SubversionException, (inst, num):
- if num == svn.core.SVN_ERR_FS_NO_SUCH_REVISION:
- raise util.Abort(_('svn: branch has no revision %s') % to_revnum)
- raise
-
- def getfile(self, file, rev):
- # TODO: ra.get_file transmits the whole file instead of diffs.
- if file in self.removed:
- raise IOError()
- mode = ''
- try:
- new_module, revnum = self.revsplit(rev)[1:]
- if self.module != new_module:
- self.module = new_module
- self.reparent(self.module)
- io = StringIO()
- info = svn.ra.get_file(self.ra, file, revnum, io)
- data = io.getvalue()
- # ra.get_files() seems to keep a reference on the input buffer
- # preventing collection. Release it explicitely.
- io.close()
- if isinstance(info, list):
- info = info[-1]
- mode = ("svn:executable" in info) and 'x' or ''
- mode = ("svn:special" in info) and 'l' or mode
- except SubversionException, e:
- notfound = (svn.core.SVN_ERR_FS_NOT_FOUND,
- svn.core.SVN_ERR_RA_DAV_PATH_NOT_FOUND)
- if e.apr_err in notfound: # File not found
- raise IOError()
- raise
- if mode == 'l':
- link_prefix = "link "
- if data.startswith(link_prefix):
- data = data[len(link_prefix):]
- return data, mode
-
- def _iterfiles(self, path, revnum):
- """Enumerate all files in path at revnum, recursively."""
- path = path.strip('/')
- pool = Pool()
- rpath = '/'.join([self.baseurl, urllib.quote(path)]).strip('/')
- entries = svn.client.ls(rpath, optrev(revnum), True, self.ctx, pool)
- return ((path + '/' + p) for p, e in entries.iteritems()
- if e.kind == svn.core.svn_node_file)
-
- def getrelpath(self, path, module=None):
- if module is None:
- module = self.module
- # Given the repository url of this wc, say
- # "http://server/plone/CMFPlone/branches/Plone-2_0-branch"
- # extract the "entry" portion (a relative path) from what
- # svn log --xml says, ie
- # "/CMFPlone/branches/Plone-2_0-branch/tests/PloneTestCase.py"
- # that is to say "tests/PloneTestCase.py"
- if path.startswith(module):
- relative = path.rstrip('/')[len(module):]
- if relative.startswith('/'):
- return relative[1:]
- elif relative == '':
- return relative
-
- # The path is outside our tracked tree...
- self.ui.debug('%r is not under %r, ignoring\n' % (path, module))
- return None
-
- def _checkpath(self, path, revnum, module=None):
- if module is not None:
- prevmodule = self.reparent('')
- path = module + '/' + path
- try:
- # ra.check_path does not like leading slashes very much, it leads
- # to PROPFIND subversion errors
- return svn.ra.check_path(self.ra, path.strip('/'), revnum)
- finally:
- if module is not None:
- self.reparent(prevmodule)
-
- def _getlog(self, paths, start, end, limit=0, discover_changed_paths=True,
- strict_node_history=False):
- # Normalize path names, svn >= 1.5 only wants paths relative to
- # supplied URL
- relpaths = []
- for p in paths:
- if not p.startswith('/'):
- p = self.module + '/' + p
- relpaths.append(p.strip('/'))
- args = [self.baseurl, relpaths, start, end, limit, discover_changed_paths,
- strict_node_history]
- arg = encodeargs(args)
- hgexe = util.hgexecutable()
- cmd = '%s debugsvnlog' % util.shellquote(hgexe)
- stdin, stdout = util.popen2(cmd)
- stdin.write(arg)
- try:
- stdin.close()
- except IOError:
- raise util.Abort(_('Mercurial failed to run itself, check'
- ' hg executable is in PATH'))
- return logstream(stdout)
-
-pre_revprop_change = '''#!/bin/sh
-
-REPOS="$1"
-REV="$2"
-USER="$3"
-PROPNAME="$4"
-ACTION="$5"
-
-if [ "$ACTION" = "M" -a "$PROPNAME" = "svn:log" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-branch" ]; then exit 0; fi
-if [ "$ACTION" = "A" -a "$PROPNAME" = "hg:convert-rev" ]; then exit 0; fi
-
-echo "Changing prohibited revision property" >&2
-exit 1
-'''
-
-class svn_sink(converter_sink, commandline):
- commit_re = re.compile(r'Committed revision (\d+).', re.M)
-
- def prerun(self):
- if self.wc:
- os.chdir(self.wc)
-
- def postrun(self):
- if self.wc:
- os.chdir(self.cwd)
-
- def join(self, name):
- return os.path.join(self.wc, '.svn', name)
-
- def revmapfile(self):
- return self.join('hg-shamap')
-
- def authorfile(self):
- return self.join('hg-authormap')
-
- def __init__(self, ui, path):
- converter_sink.__init__(self, ui, path)
- commandline.__init__(self, ui, 'svn')
- self.delete = []
- self.setexec = []
- self.delexec = []
- self.copies = []
- self.wc = None
- self.cwd = os.getcwd()
-
- path = os.path.realpath(path)
-
- created = False
- if os.path.isfile(os.path.join(path, '.svn', 'entries')):
- self.wc = path
- self.run0('update')
- else:
- wcpath = os.path.join(os.getcwd(), os.path.basename(path) + '-wc')
-
- if os.path.isdir(os.path.dirname(path)):
- if not os.path.exists(os.path.join(path, 'db', 'fs-type')):
- ui.status(_('initializing svn repository %r\n') %
- os.path.basename(path))
- commandline(ui, 'svnadmin').run0('create', path)
- created = path
- path = util.normpath(path)
- if not path.startswith('/'):
- path = '/' + path
- path = 'file://' + path
-
- ui.status(_('initializing svn working copy %r\n')
- % os.path.basename(wcpath))
- self.run0('checkout', path, wcpath)
-
- self.wc = wcpath
- self.opener = util.opener(self.wc)
- self.wopener = util.opener(self.wc)
- self.childmap = mapfile(ui, self.join('hg-childmap'))
- self.is_exec = util.checkexec(self.wc) and util.is_exec or None
-
- if created:
- hook = os.path.join(created, 'hooks', 'pre-revprop-change')
- fp = open(hook, 'w')
- fp.write(pre_revprop_change)
- fp.close()
- util.set_flags(hook, False, True)
-
- xport = transport.SvnRaTransport(url=geturl(path))
- self.uuid = svn.ra.get_uuid(xport.ra)
-
- def wjoin(self, *names):
- return os.path.join(self.wc, *names)
-
- def putfile(self, filename, flags, data):
- if 'l' in flags:
- self.wopener.symlink(data, filename)
- else:
- try:
- if os.path.islink(self.wjoin(filename)):
- os.unlink(filename)
- except OSError:
- pass
- self.wopener(filename, 'w').write(data)
-
- if self.is_exec:
- was_exec = self.is_exec(self.wjoin(filename))
- else:
- # On filesystems not supporting execute-bit, there is no way
- # to know if it is set but asking subversion. Setting it
- # systematically is just as expensive and much simpler.
- was_exec = 'x' not in flags
-
- util.set_flags(self.wjoin(filename), False, 'x' in flags)
- if was_exec:
- if 'x' not in flags:
- self.delexec.append(filename)
- else:
- if 'x' in flags:
- self.setexec.append(filename)
-
- def _copyfile(self, source, dest):
- # SVN's copy command pukes if the destination file exists, but
- # our copyfile method expects to record a copy that has
- # already occurred. Cross the semantic gap.
- wdest = self.wjoin(dest)
- exists = os.path.lexists(wdest)
- if exists:
- fd, tempname = tempfile.mkstemp(
- prefix='hg-copy-', dir=os.path.dirname(wdest))
- os.close(fd)
- os.unlink(tempname)
- os.rename(wdest, tempname)
- try:
- self.run0('copy', source, dest)
- finally:
- if exists:
- try:
- os.unlink(wdest)
- except OSError:
- pass
- os.rename(tempname, wdest)
-
- def dirs_of(self, files):
- dirs = set()
- for f in files:
- if os.path.isdir(self.wjoin(f)):
- dirs.add(f)
- for i in strutil.rfindall(f, '/'):
- dirs.add(f[:i])
- return dirs
-
- def add_dirs(self, files):
- add_dirs = [d for d in sorted(self.dirs_of(files))
- if not os.path.exists(self.wjoin(d, '.svn', 'entries'))]
- if add_dirs:
- self.xargs(add_dirs, 'add', non_recursive=True, quiet=True)
- return add_dirs
-
- def add_files(self, files):
- if files:
- self.xargs(files, 'add', quiet=True)
- return files
-
- def tidy_dirs(self, names):
- deleted = []
- for d in sorted(self.dirs_of(names), reverse=True):
- wd = self.wjoin(d)
- if os.listdir(wd) == '.svn':
- self.run0('delete', d)
- deleted.append(d)
- return deleted
-
- def addchild(self, parent, child):
- self.childmap[parent] = child
-
- def revid(self, rev):
- return u"svn:%s@%s" % (self.uuid, rev)
-
- def putcommit(self, files, copies, parents, commit, source, revmap):
- # Apply changes to working copy
- for f, v in files:
- try:
- data, mode = source.getfile(f, v)
- except IOError:
- self.delete.append(f)
- else:
- self.putfile(f, mode, data)
- if f in copies:
- self.copies.append([copies[f], f])
- files = [f[0] for f in files]
-
- for parent in parents:
- try:
- return self.revid(self.childmap[parent])
- except KeyError:
- pass
- entries = set(self.delete)
- files = frozenset(files)
- entries.update(self.add_dirs(files.difference(entries)))
- if self.copies:
- for s, d in self.copies:
- self._copyfile(s, d)
- self.copies = []
- if self.delete:
- self.xargs(self.delete, 'delete')
- self.delete = []
- entries.update(self.add_files(files.difference(entries)))
- entries.update(self.tidy_dirs(entries))
- if self.delexec:
- self.xargs(self.delexec, 'propdel', 'svn:executable')
- self.delexec = []
- if self.setexec:
- self.xargs(self.setexec, 'propset', 'svn:executable', '*')
- self.setexec = []
-
- fd, messagefile = tempfile.mkstemp(prefix='hg-convert-')
- fp = os.fdopen(fd, 'w')
- fp.write(commit.desc)
- fp.close()
- try:
- output = self.run0('commit',
- username=util.shortuser(commit.author),
- file=messagefile,
- encoding='utf-8')
- try:
- rev = self.commit_re.search(output).group(1)
- except AttributeError:
- if not files:
- return parents[0]
- self.ui.warn(_('unexpected svn output:\n'))
- self.ui.warn(output)
- raise util.Abort(_('unable to cope with svn output'))
- if commit.rev:
- self.run('propset', 'hg:convert-rev', commit.rev,
- revprop=True, revision=rev)
- if commit.branch and commit.branch != 'default':
- self.run('propset', 'hg:convert-branch', commit.branch,
- revprop=True, revision=rev)
- for parent in parents:
- self.addchild(parent, rev)
- return self.revid(rev)
- finally:
- os.unlink(messagefile)
-
- def puttags(self, tags):
- self.ui.warn(_('writing Subversion tags is not yet implemented\n'))
- return None, None
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.pyo
deleted file mode 100644
index df675f8..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/subversion.pyo
+++ /dev/null
Binary files differ
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.py b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.py
deleted file mode 100644
index db68ede..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
-# This is a stripped-down version of the original bzr-svn transport.py,
-# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-from svn.core import SubversionException, Pool
-import svn.ra
-import svn.client
-import svn.core
-
-# Some older versions of the Python bindings need to be
-# explicitly initialized. But what we want to do probably
-# won't work worth a darn against those libraries anyway!
-svn.ra.initialize()
-
-svn_config = svn.core.svn_config_get_config(None)
-
-
-def _create_auth_baton(pool):
- """Create a Subversion authentication baton. """
- import svn.client
- # Give the client context baton a suite of authentication
- # providers.h
- providers = [
- svn.client.get_simple_provider(pool),
- svn.client.get_username_provider(pool),
- svn.client.get_ssl_client_cert_file_provider(pool),
- svn.client.get_ssl_client_cert_pw_file_provider(pool),
- svn.client.get_ssl_server_trust_file_provider(pool),
- ]
- # Platform-dependant authentication methods
- getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
- None)
- if getprovider:
- # Available in svn >= 1.6
- for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
- for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
- p = getprovider(name, type, pool)
- if p:
- providers.append(p)
- else:
- if hasattr(svn.client, 'get_windows_simple_provider'):
- providers.append(svn.client.get_windows_simple_provider(pool))
-
- return svn.core.svn_auth_open(providers, pool)
-
-class NotBranchError(SubversionException):
- pass
-
-class SvnRaTransport(object):
- """
- Open an ra connection to a Subversion repository.
- """
- def __init__(self, url="", ra=None):
- self.pool = Pool()
- self.svn_url = url
- self.username = ''
- self.password = ''
-
- # Only Subversion 1.4 has reparent()
- if ra is None or not hasattr(svn.ra, 'reparent'):
- self.client = svn.client.create_context(self.pool)
- ab = _create_auth_baton(self.pool)
- if False:
- svn.core.svn_auth_set_parameter(
- ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
- svn.core.svn_auth_set_parameter(
- ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
- self.client.auth_baton = ab
- self.client.config = svn_config
- try:
- self.ra = svn.client.open_ra_session(
- self.svn_url.encode('utf8'),
- self.client, self.pool)
- except SubversionException, (inst, num):
- if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
- svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
- svn.core.SVN_ERR_BAD_URL):
- raise NotBranchError(url)
- raise
- else:
- self.ra = ra
- svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
-
- class Reporter(object):
- def __init__(self, reporter_data):
- self._reporter, self._baton = reporter_data
-
- def set_path(self, path, revnum, start_empty, lock_token, pool=None):
- svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
- path, revnum, start_empty, lock_token, pool)
-
- def delete_path(self, path, pool=None):
- svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
- path, pool)
-
- def link_path(self, path, url, revision, start_empty, lock_token,
- pool=None):
- svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
- path, url, revision, start_empty, lock_token,
- pool)
-
- def finish_report(self, pool=None):
- svn.ra.reporter2_invoke_finish_report(self._reporter,
- self._baton, pool)
-
- def abort_report(self, pool=None):
- svn.ra.reporter2_invoke_abort_report(self._reporter,
- self._baton, pool)
-
- def do_update(self, revnum, path, *args, **kwargs):
- return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
- *args, **kwargs))
diff --git a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.pyo b/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.pyo
deleted file mode 100644
index ee1d3d1..0000000
--- a/eggs/mercurial-1.7.3-py2.6-linux-x86_64.egg/hgext/convert/transport.pyo
+++ /dev/null
Binary files differ