summaryrefslogtreecommitdiff
path: root/lib/python2.7/Tools/scripts
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/Tools/scripts')
-rw-r--r--lib/python2.7/Tools/scripts/README69
-rw-r--r--lib/python2.7/Tools/scripts/analyze_dxp.py130
-rw-r--r--lib/python2.7/Tools/scripts/byext.py130
-rw-r--r--lib/python2.7/Tools/scripts/byteyears.py61
-rw-r--r--lib/python2.7/Tools/scripts/checkappend.py167
-rw-r--r--lib/python2.7/Tools/scripts/checkpip.py31
-rw-r--r--lib/python2.7/Tools/scripts/checkpyc.py66
-rw-r--r--lib/python2.7/Tools/scripts/classfix.py190
-rw-r--r--lib/python2.7/Tools/scripts/cleanfuture.py276
-rw-r--r--lib/python2.7/Tools/scripts/combinerefs.py127
-rw-r--r--lib/python2.7/Tools/scripts/copytime.py26
-rw-r--r--lib/python2.7/Tools/scripts/crlf.py23
-rw-r--r--lib/python2.7/Tools/scripts/cvsfiles.py72
-rw-r--r--lib/python2.7/Tools/scripts/db2pickle.py135
-rw-r--r--lib/python2.7/Tools/scripts/diff.py50
-rw-r--r--lib/python2.7/Tools/scripts/dutree.py60
-rw-r--r--lib/python2.7/Tools/scripts/eptags.py56
-rw-r--r--lib/python2.7/Tools/scripts/find_recursionlimit.py117
-rw-r--r--lib/python2.7/Tools/scripts/finddiv.py89
-rw-r--r--lib/python2.7/Tools/scripts/findlinksto.py43
-rw-r--r--lib/python2.7/Tools/scripts/findnocoding.py106
-rw-r--r--lib/python2.7/Tools/scripts/fixcid.py316
-rw-r--r--lib/python2.7/Tools/scripts/fixdiv.py378
-rw-r--r--lib/python2.7/Tools/scripts/fixheader.py49
-rw-r--r--lib/python2.7/Tools/scripts/fixnotice.py113
-rw-r--r--lib/python2.7/Tools/scripts/fixps.py33
-rw-r--r--lib/python2.7/Tools/scripts/google.py23
-rw-r--r--lib/python2.7/Tools/scripts/gprof2html.py79
-rw-r--r--lib/python2.7/Tools/scripts/h2py.py181
-rw-r--r--lib/python2.7/Tools/scripts/hotshotmain.py55
-rw-r--r--lib/python2.7/Tools/scripts/ifdef.py112
-rw-r--r--lib/python2.7/Tools/scripts/lfcr.py24
-rw-r--r--lib/python2.7/Tools/scripts/linktree.py80
-rw-r--r--lib/python2.7/Tools/scripts/lll.py28
-rw-r--r--lib/python2.7/Tools/scripts/logmerge.py185
-rw-r--r--lib/python2.7/Tools/scripts/mailerdaemon.py238
-rw-r--r--lib/python2.7/Tools/scripts/md5sum.py90
-rw-r--r--lib/python2.7/Tools/scripts/methfix.py171
-rw-r--r--lib/python2.7/Tools/scripts/mkreal.py66
-rw-r--r--lib/python2.7/Tools/scripts/ndiff.py133
-rw-r--r--lib/python2.7/Tools/scripts/nm2def.py103
-rw-r--r--lib/python2.7/Tools/scripts/objgraph.py215
-rw-r--r--lib/python2.7/Tools/scripts/parseentities.py64
-rw-r--r--lib/python2.7/Tools/scripts/patchcheck.py185
-rw-r--r--lib/python2.7/Tools/scripts/pathfix.py149
-rw-r--r--lib/python2.7/Tools/scripts/pdeps.py167
-rw-r--r--lib/python2.7/Tools/scripts/pickle2db.py147
-rw-r--r--lib/python2.7/Tools/scripts/pindent.py508
-rw-r--r--lib/python2.7/Tools/scripts/ptags.py53
-rw-r--r--lib/python2.7/Tools/scripts/pysource.py130
-rw-r--r--lib/python2.7/Tools/scripts/redemo.py172
-rw-r--r--lib/python2.7/Tools/scripts/reindent-rst.py14
-rw-r--r--lib/python2.7/Tools/scripts/reindent.py315
-rw-r--r--lib/python2.7/Tools/scripts/rgrep.py64
-rw-r--r--lib/python2.7/Tools/scripts/serve.py35
-rw-r--r--lib/python2.7/Tools/scripts/setup.py20
-rw-r--r--lib/python2.7/Tools/scripts/suff.py30
-rw-r--r--lib/python2.7/Tools/scripts/svneol.py91
-rw-r--r--lib/python2.7/Tools/scripts/texcheck.py233
-rw-r--r--lib/python2.7/Tools/scripts/texi2html.py2078
-rw-r--r--lib/python2.7/Tools/scripts/treesync.py205
-rw-r--r--lib/python2.7/Tools/scripts/untabify.py52
-rw-r--r--lib/python2.7/Tools/scripts/which.py60
-rw-r--r--lib/python2.7/Tools/scripts/win_add2path.py57
-rw-r--r--lib/python2.7/Tools/scripts/xxci.py116
65 files changed, 9641 insertions, 0 deletions
diff --git a/lib/python2.7/Tools/scripts/README b/lib/python2.7/Tools/scripts/README
new file mode 100644
index 0000000..eaf9aee
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/README
@@ -0,0 +1,69 @@
+This directory contains a collection of executable Python scripts that
+are useful while building, extending or managing Python. Some (e.g.,
+dutree or lll) are also generally useful UNIX tools.
+
+See also the Demo/scripts directory!
+
+analyze_dxp.py Analyzes the result of sys.getdxp()
+byext.py Print lines/words/chars stats of files by extension
+byteyears.py Print product of a file's size and age
+checkappend.py Search for multi-argument .append() calls
+checkpyc.py Check presence and validity of ".pyc" files
+classfix.py Convert old class syntax to new
+cleanfuture.py Fix reduntant Python __future__ statements
+combinerefs.py A helper for analyzing PYTHONDUMPREFS output.
+copytime.py Copy one file's atime and mtime to another
+crlf.py Change CRLF line endings to LF (Windows to Unix)
+cvsfiles.py Print a list of files that are under CVS
+db2pickle.py Dump a database file to a pickle
+diff.py Print file diffs in context, unified, or ndiff formats
+dutree.py Format du(1) output as a tree sorted by size
+eptags.py Create Emacs TAGS file for Python modules
+find_recursionlimit.py Find the maximum recursion limit on this machine
+finddiv.py A grep-like tool that looks for division operators
+findlinksto.py Recursively find symbolic links to a given path prefix
+findnocoding.py Find source files which need an encoding declaration
+fixcid.py Massive identifier substitution on C source files
+fixdiv.py Tool to fix division operators.
+fixheader.py Add some cpp magic to a C include file
+fixnotice.py Fix the copyright notice in source files
+fixps.py Fix Python scripts' first line (if #!)
+ftpmirror.py FTP mirror script
+google.py Open a webbrowser with Google
+gprof2html.py Transform gprof(1) output into useful HTML
+h2py.py Translate #define's into Python assignments
+hotshotmain.py Main program to run script under control of hotshot
+idle Main program to start IDLE
+ifdef.py Remove #if(n)def groups from C sources
+lfcr.py Change LF line endings to CRLF (Unix to Windows)
+linktree.py Make a copy of a tree with links to original files
+lll.py Find and list symbolic links in current directory
+logmerge.py Consolidate CVS/RCS logs read from stdin
+mailerdaemon.py parse error messages from mailer daemons (Sjoerd&Jack)
+md5sum.py Print MD5 checksums of argument files.
+methfix.py Fix old method syntax def f(self, (a1, ..., aN)):
+mkreal.py Turn a symbolic link into a real file or directory
+ndiff.py Intelligent diff between text files (Tim Peters)
+nm2def.py Create a template for PC/python_nt.def (Marc Lemburg)
+objgraph.py Print object graph from nm output on a library
+parseentities.py Utility for parsing HTML entity definitions
+pathfix.py Change #!/usr/local/bin/python into something else
+pdeps.py Print dependencies between Python modules
+pickle2db.py Load a pickle generated by db2pickle.py to a database
+pindent.py Indent Python code, giving block-closing comments
+ptags.py Create vi tags file for Python modules
+pydoc Python documentation browser.
+pysource.py Find Python source files
+redemo.py Basic regular expression demonstration facility
+reindent.py Change .py files to use 4-space indents.
+rgrep.py Reverse grep through a file (useful for big logfiles)
+serve.py Small wsgiref-based web server, used in make serve in Doc
+setup.py Install all scripts listed here
+suff.py Sort a list of files by suffix
+svneol.py Sets svn:eol-style on all files in directory
+texcheck.py Validate Python LaTeX formatting (Raymond Hettinger)
+texi2html.py Convert GNU texinfo files into HTML
+treesync.py Synchronize source trees (very ideosyncratic)
+untabify.py Replace tabs with spaces in argument files
+which.py Find a program in $PATH
+xxci.py Wrapper for rcsdiff and ci
diff --git a/lib/python2.7/Tools/scripts/analyze_dxp.py b/lib/python2.7/Tools/scripts/analyze_dxp.py
new file mode 100644
index 0000000..387b61a
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/analyze_dxp.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python2
+"""
+Some helper functions to analyze the output of sys.getdxp() (which is
+only available if Python was built with -DDYNAMIC_EXECUTION_PROFILE).
+These will tell you which opcodes have been executed most frequently
+in the current process, and, if Python was also built with -DDXPAIRS,
+will tell you which instruction _pairs_ were executed most frequently,
+which may help in choosing new instructions.
+
+If Python was built without -DDYNAMIC_EXECUTION_PROFILE, importing
+this module will raise a RuntimeError.
+
+If you're running a script you want to profile, a simple way to get
+the common pairs is:
+
+$ PYTHONPATH=$PYTHONPATH:<python_srcdir>/Tools/scripts \
+./python -i -O the_script.py --args
+...
+> from analyze_dxp import *
+> s = render_common_pairs()
+> open('/tmp/some_file', 'w').write(s)
+"""
+
+import copy
+import opcode
+import operator
+import sys
+import threading
+
+if not hasattr(sys, "getdxp"):
+ raise RuntimeError("Can't import analyze_dxp: Python built without"
+ " -DDYNAMIC_EXECUTION_PROFILE.")
+
+
+_profile_lock = threading.RLock()
+_cumulative_profile = sys.getdxp()
+
+# If Python was built with -DDXPAIRS, sys.getdxp() returns a list of
+# lists of ints. Otherwise it returns just a list of ints.
+def has_pairs(profile):
+ """Returns True if the Python that produced the argument profile
+ was built with -DDXPAIRS."""
+
+ return len(profile) > 0 and isinstance(profile[0], list)
+
+
+def reset_profile():
+ """Forgets any execution profile that has been gathered so far."""
+ with _profile_lock:
+ sys.getdxp() # Resets the internal profile
+ global _cumulative_profile
+ _cumulative_profile = sys.getdxp() # 0s out our copy.
+
+
+def merge_profile():
+ """Reads sys.getdxp() and merges it into this module's cached copy.
+
+ We need this because sys.getdxp() 0s itself every time it's called."""
+
+ with _profile_lock:
+ new_profile = sys.getdxp()
+ if has_pairs(new_profile):
+ for first_inst in range(len(_cumulative_profile)):
+ for second_inst in range(len(_cumulative_profile[first_inst])):
+ _cumulative_profile[first_inst][second_inst] += (
+ new_profile[first_inst][second_inst])
+ else:
+ for inst in range(len(_cumulative_profile)):
+ _cumulative_profile[inst] += new_profile[inst]
+
+
+def snapshot_profile():
+ """Returns the cumulative execution profile until this call."""
+ with _profile_lock:
+ merge_profile()
+ return copy.deepcopy(_cumulative_profile)
+
+
+def common_instructions(profile):
+ """Returns the most common opcodes in order of descending frequency.
+
+ The result is a list of tuples of the form
+ (opcode, opname, # of occurrences)
+
+ """
+ if has_pairs(profile) and profile:
+ inst_list = profile[-1]
+ else:
+ inst_list = profile
+ result = [(op, opcode.opname[op], count)
+ for op, count in enumerate(inst_list)
+ if count > 0]
+ result.sort(key=operator.itemgetter(2), reverse=True)
+ return result
+
+
+def common_pairs(profile):
+ """Returns the most common opcode pairs in order of descending frequency.
+
+ The result is a list of tuples of the form
+ ((1st opcode, 2nd opcode),
+ (1st opname, 2nd opname),
+ # of occurrences of the pair)
+
+ """
+ if not has_pairs(profile):
+ return []
+ result = [((op1, op2), (opcode.opname[op1], opcode.opname[op2]), count)
+ # Drop the row of single-op profiles with [:-1]
+ for op1, op1profile in enumerate(profile[:-1])
+ for op2, count in enumerate(op1profile)
+ if count > 0]
+ result.sort(key=operator.itemgetter(2), reverse=True)
+ return result
+
+
+def render_common_pairs(profile=None):
+ """Renders the most common opcode pairs to a string in order of
+ descending frequency.
+
+ The result is a series of lines of the form:
+ # of occurrences: ('1st opname', '2nd opname')
+
+ """
+ if profile is None:
+ profile = snapshot_profile()
+ def seq():
+ for _, ops, count in common_pairs(profile):
+ yield "%s: %s\n" % (count, ops)
+ return ''.join(seq())
diff --git a/lib/python2.7/Tools/scripts/byext.py b/lib/python2.7/Tools/scripts/byext.py
new file mode 100644
index 0000000..eb7c395
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/byext.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python2
+
+"""Show file statistics by extension."""
+
+from __future__ import print_function
+
+import os
+import sys
+
+class Stats:
+
+ def __init__(self):
+ self.stats = {}
+
+ def statargs(self, args):
+ for arg in args:
+ if os.path.isdir(arg):
+ self.statdir(arg)
+ elif os.path.isfile(arg):
+ self.statfile(arg)
+ else:
+ sys.stderr.write("Can't find %s\n" % arg)
+ self.addstats("<???>", "unknown", 1)
+
+ def statdir(self, dir):
+ self.addstats("<dir>", "dirs", 1)
+ try:
+ names = sorted(os.listdir(dir))
+ except os.error as err:
+ sys.stderr.write("Can't list %s: %s\n" % (dir, err))
+ self.addstats("<dir>", "unlistable", 1)
+ return
+ for name in names:
+ if name.startswith(".#"):
+ continue # Skip CVS temp files
+ if name.endswith("~"):
+ continue# Skip Emacs backup files
+ full = os.path.join(dir, name)
+ if os.path.islink(full):
+ self.addstats("<lnk>", "links", 1)
+ elif os.path.isdir(full):
+ self.statdir(full)
+ else:
+ self.statfile(full)
+
+ def statfile(self, filename):
+ head, ext = os.path.splitext(filename)
+ head, base = os.path.split(filename)
+ if ext == base:
+ ext = "" # E.g. .cvsignore is deemed not to have an extension
+ ext = os.path.normcase(ext)
+ if not ext:
+ ext = "<none>"
+ self.addstats(ext, "files", 1)
+ try:
+ f = open(filename, "rb")
+ except IOError as err:
+ sys.stderr.write("Can't open %s: %s\n" % (filename, err))
+ self.addstats(ext, "unopenable", 1)
+ return
+ data = f.read()
+ f.close()
+ self.addstats(ext, "bytes", len(data))
+ if b'\0' in data:
+ self.addstats(ext, "binary", 1)
+ return
+ if not data:
+ self.addstats(ext, "empty", 1)
+ #self.addstats(ext, "chars", len(data))
+ lines = data.splitlines()
+ self.addstats(ext, "lines", len(lines))
+ del lines
+ words = data.split()
+ self.addstats(ext, "words", len(words))
+
+ def addstats(self, ext, key, n):
+ d = self.stats.setdefault(ext, {})
+ d[key] = d.get(key, 0) + n
+
+ def report(self):
+ exts = sorted(self.stats.keys())
+ # Get the column keys
+ columns = {}
+ for ext in exts:
+ columns.update(self.stats[ext])
+ cols = sorted(columns.keys())
+ colwidth = {}
+ colwidth["ext"] = max([len(ext) for ext in exts])
+ minwidth = 6
+ self.stats["TOTAL"] = {}
+ for col in cols:
+ total = 0
+ cw = max(minwidth, len(col))
+ for ext in exts:
+ value = self.stats[ext].get(col)
+ if value is None:
+ w = 0
+ else:
+ w = len("%d" % value)
+ total += value
+ cw = max(cw, w)
+ cw = max(cw, len(str(total)))
+ colwidth[col] = cw
+ self.stats["TOTAL"][col] = total
+ exts.append("TOTAL")
+ for ext in exts:
+ self.stats[ext]["ext"] = ext
+ cols.insert(0, "ext")
+ def printheader():
+ for col in cols:
+ print("%*s" % (colwidth[col], col), end=" ")
+ print()
+ printheader()
+ for ext in exts:
+ for col in cols:
+ value = self.stats[ext].get(col, "")
+ print("%*s" % (colwidth[col], value), end=" ")
+ print()
+ printheader() # Another header at the bottom
+
+def main():
+ args = sys.argv[1:]
+ if not args:
+ args = [os.curdir]
+ s = Stats()
+ s.statargs(args)
+ s.report()
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/python2.7/Tools/scripts/byteyears.py b/lib/python2.7/Tools/scripts/byteyears.py
new file mode 100644
index 0000000..e06171d
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/byteyears.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python2
+
+# Print the product of age and size of each file, in suitable units.
+#
+# Usage: byteyears [ -a | -m | -c ] file ...
+#
+# Options -[amc] select atime, mtime (default) or ctime as age.
+
+import sys, os, time
+from stat import *
+
+def main():
+
+ # Use lstat() to stat files if it exists, else stat()
+ try:
+ statfunc = os.lstat
+ except AttributeError:
+ statfunc = os.stat
+
+ # Parse options
+ if sys.argv[1] == '-m':
+ itime = ST_MTIME
+ del sys.argv[1]
+ elif sys.argv[1] == '-c':
+ itime = ST_CTIME
+ del sys.argv[1]
+ elif sys.argv[1] == '-a':
+ itime = ST_CTIME
+ del sys.argv[1]
+ else:
+ itime = ST_MTIME
+
+ secs_per_year = 365.0 * 24.0 * 3600.0 # Scale factor
+ now = time.time() # Current time, for age computations
+ status = 0 # Exit status, set to 1 on errors
+
+ # Compute max file name length
+ maxlen = 1
+ for filename in sys.argv[1:]:
+ maxlen = max(maxlen, len(filename))
+
+ # Process each argument in turn
+ for filename in sys.argv[1:]:
+ try:
+ st = statfunc(filename)
+ except os.error, msg:
+ sys.stderr.write("can't stat %r: %r\n" % (filename, msg))
+ status = 1
+ st = ()
+ if st:
+ anytime = st[itime]
+ size = st[ST_SIZE]
+ age = now - anytime
+ byteyears = float(size) * float(age) / secs_per_year
+ print filename.ljust(maxlen),
+ print repr(int(byteyears)).rjust(8)
+
+ sys.exit(status)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/checkappend.py b/lib/python2.7/Tools/scripts/checkappend.py
new file mode 100644
index 0000000..daf55d3
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/checkappend.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python2
+
+# Released to the public domain, by Tim Peters, 28 February 2000.
+
+"""checkappend.py -- search for multi-argument .append() calls.
+
+Usage: specify one or more file or directory paths:
+ checkappend [-v] file_or_dir [file_or_dir] ...
+
+Each file_or_dir is checked for multi-argument .append() calls. When
+a directory, all .py files in the directory, and recursively in its
+subdirectories, are checked.
+
+Use -v for status msgs. Use -vv for more status msgs.
+
+In the absence of -v, the only output is pairs of the form
+
+ filename(linenumber):
+ line containing the suspicious append
+
+Note that this finds multi-argument append calls regardless of whether
+they're attached to list objects. If a module defines a class with an
+append method that takes more than one argument, calls to that method
+will be listed.
+
+Note that this will not find multi-argument list.append calls made via a
+bound method object. For example, this is not caught:
+
+ somelist = []
+ push = somelist.append
+ push(1, 2, 3)
+"""
+
+__version__ = 1, 0, 0
+
+import os
+import sys
+import getopt
+import tokenize
+
+verbose = 0
+
+def errprint(*args):
+ msg = ' '.join(args)
+ sys.stderr.write(msg)
+ sys.stderr.write("\n")
+
+def main():
+ args = sys.argv[1:]
+ global verbose
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "v")
+ except getopt.error, msg:
+ errprint(str(msg) + "\n\n" + __doc__)
+ return
+ for opt, optarg in opts:
+ if opt == '-v':
+ verbose = verbose + 1
+ if not args:
+ errprint(__doc__)
+ return
+ for arg in args:
+ check(arg)
+
+def check(file):
+ if os.path.isdir(file) and not os.path.islink(file):
+ if verbose:
+ print "%r: listing directory" % (file,)
+ names = os.listdir(file)
+ for name in names:
+ fullname = os.path.join(file, name)
+ if ((os.path.isdir(fullname) and
+ not os.path.islink(fullname))
+ or os.path.normcase(name[-3:]) == ".py"):
+ check(fullname)
+ return
+
+ try:
+ f = open(file)
+ except IOError, msg:
+ errprint("%r: I/O Error: %s" % (file, msg))
+ return
+
+ if verbose > 1:
+ print "checking %r ..." % (file,)
+
+ ok = AppendChecker(file, f).run()
+ if verbose and ok:
+ print "%r: Clean bill of health." % (file,)
+
+[FIND_DOT,
+ FIND_APPEND,
+ FIND_LPAREN,
+ FIND_COMMA,
+ FIND_STMT] = range(5)
+
+class AppendChecker:
+ def __init__(self, fname, file):
+ self.fname = fname
+ self.file = file
+ self.state = FIND_DOT
+ self.nerrors = 0
+
+ def run(self):
+ try:
+ tokenize.tokenize(self.file.readline, self.tokeneater)
+ except tokenize.TokenError, msg:
+ errprint("%r: Token Error: %s" % (self.fname, msg))
+ self.nerrors = self.nerrors + 1
+ return self.nerrors == 0
+
+ def tokeneater(self, type, token, start, end, line,
+ NEWLINE=tokenize.NEWLINE,
+ JUNK=(tokenize.COMMENT, tokenize.NL),
+ OP=tokenize.OP,
+ NAME=tokenize.NAME):
+
+ state = self.state
+
+ if type in JUNK:
+ pass
+
+ elif state is FIND_DOT:
+ if type is OP and token == ".":
+ state = FIND_APPEND
+
+ elif state is FIND_APPEND:
+ if type is NAME and token == "append":
+ self.line = line
+ self.lineno = start[0]
+ state = FIND_LPAREN
+ else:
+ state = FIND_DOT
+
+ elif state is FIND_LPAREN:
+ if type is OP and token == "(":
+ self.level = 1
+ state = FIND_COMMA
+ else:
+ state = FIND_DOT
+
+ elif state is FIND_COMMA:
+ if type is OP:
+ if token in ("(", "{", "["):
+ self.level = self.level + 1
+ elif token in (")", "}", "]"):
+ self.level = self.level - 1
+ if self.level == 0:
+ state = FIND_DOT
+ elif token == "," and self.level == 1:
+ self.nerrors = self.nerrors + 1
+ print "%s(%d):\n%s" % (self.fname, self.lineno,
+ self.line)
+ # don't gripe about this stmt again
+ state = FIND_STMT
+
+ elif state is FIND_STMT:
+ if type is NEWLINE:
+ state = FIND_DOT
+
+ else:
+ raise SystemError("unknown internal state '%r'" % (state,))
+
+ self.state = state
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/checkpip.py b/lib/python2.7/Tools/scripts/checkpip.py
new file mode 100644
index 0000000..1b6049d
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/checkpip.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python2
+"""
+Checks that the version of the projects bundled in ensurepip are the latest
+versions available.
+"""
+import ensurepip
+import json
+import urllib2
+import sys
+
+
+def main():
+ outofdate = False
+
+ for project, version in ensurepip._PROJECTS:
+ data = json.loads(urllib2.urlopen(
+ "https://pypi.python.org/pypi/{}/json".format(project),
+ ).read().decode("utf8"))
+ upstream_version = data["info"]["version"]
+
+ if version != upstream_version:
+ outofdate = True
+ print("The latest version of {} on PyPI is {}, but ensurepip "
+ "has {}".format(project, upstream_version, version))
+
+ if outofdate:
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/python2.7/Tools/scripts/checkpyc.py b/lib/python2.7/Tools/scripts/checkpyc.py
new file mode 100644
index 0000000..309a75c
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/checkpyc.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python2
+# Check that all ".pyc" files exist and are up-to-date
+# Uses module 'os'
+
+import sys
+import os
+from stat import ST_MTIME
+import imp
+
+def main():
+ silent = 0
+ verbose = 0
+ if sys.argv[1:]:
+ if sys.argv[1] == '-v':
+ verbose = 1
+ elif sys.argv[1] == '-s':
+ silent = 1
+ MAGIC = imp.get_magic()
+ if not silent:
+ print 'Using MAGIC word', repr(MAGIC)
+ for dirname in sys.path:
+ try:
+ names = os.listdir(dirname)
+ except os.error:
+ print 'Cannot list directory', repr(dirname)
+ continue
+ if not silent:
+ print 'Checking ', repr(dirname), '...'
+ names.sort()
+ for name in names:
+ if name[-3:] == '.py':
+ name = os.path.join(dirname, name)
+ try:
+ st = os.stat(name)
+ except os.error:
+ print 'Cannot stat', repr(name)
+ continue
+ if verbose:
+ print 'Check', repr(name), '...'
+ name_c = name + 'c'
+ try:
+ f = open(name_c, 'r')
+ except IOError:
+ print 'Cannot open', repr(name_c)
+ continue
+ magic_str = f.read(4)
+ mtime_str = f.read(4)
+ f.close()
+ if magic_str <> MAGIC:
+ print 'Bad MAGIC word in ".pyc" file',
+ print repr(name_c)
+ continue
+ mtime = get_long(mtime_str)
+ if mtime == 0 or mtime == -1:
+ print 'Bad ".pyc" file', repr(name_c)
+ elif mtime <> st[ST_MTIME]:
+ print 'Out-of-date ".pyc" file',
+ print repr(name_c)
+
+def get_long(s):
+ if len(s) <> 4:
+ return -1
+ return ord(s[0]) + (ord(s[1])<<8) + (ord(s[2])<<16) + (ord(s[3])<<24)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/classfix.py b/lib/python2.7/Tools/scripts/classfix.py
new file mode 100644
index 0000000..c9008e9
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/classfix.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python2
+
+# This script is obsolete -- it is kept for historical purposes only.
+#
+# Fix Python source files to use the new class definition syntax, i.e.,
+# the syntax used in Python versions before 0.9.8:
+# class C() = base(), base(), ...: ...
+# is changed to the current syntax:
+# class C(base, base, ...): ...
+#
+# The script uses heuristics to find class definitions that usually
+# work but occasionally can fail; carefully check the output!
+#
+# Command line arguments are files or directories to be processed.
+# Directories are searched recursively for files whose name looks
+# like a python module.
+# Symbolic links are always ignored (except as explicit directory
+# arguments). Of course, the original file is kept as a back-up
+# (with a "~" attached to its name).
+#
+# Changes made are reported to stdout in a diff-like format.
+#
+# Undoubtedly you can do this using find and sed or perl, but this is
+# a nice example of Python code that recurses down a directory tree
+# and uses regular expressions. Also note several subtleties like
+# preserving the file's mode and avoiding to even write a temp file
+# when no changes are needed for a file.
+#
+# NB: by changing only the function fixline() you can turn this
+# into a program for a different change to Python programs...
+
+import sys
+import re
+import os
+from stat import *
+
+err = sys.stderr.write
+dbg = err
+rep = sys.stdout.write
+
+def main():
+ bad = 0
+ if not sys.argv[1:]: # No arguments
+ err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
+ sys.exit(2)
+ for arg in sys.argv[1:]:
+ if os.path.isdir(arg):
+ if recursedown(arg): bad = 1
+ elif os.path.islink(arg):
+ err(arg + ': will not process symbolic links\n')
+ bad = 1
+ else:
+ if fix(arg): bad = 1
+ sys.exit(bad)
+
+ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
+def ispython(name):
+ return ispythonprog.match(name) >= 0
+
+def recursedown(dirname):
+ dbg('recursedown(%r)\n' % (dirname,))
+ bad = 0
+ try:
+ names = os.listdir(dirname)
+ except os.error, msg:
+ err('%s: cannot list directory: %r\n' % (dirname, msg))
+ return 1
+ names.sort()
+ subdirs = []
+ for name in names:
+ if name in (os.curdir, os.pardir): continue
+ fullname = os.path.join(dirname, name)
+ if os.path.islink(fullname): pass
+ elif os.path.isdir(fullname):
+ subdirs.append(fullname)
+ elif ispython(name):
+ if fix(fullname): bad = 1
+ for fullname in subdirs:
+ if recursedown(fullname): bad = 1
+ return bad
+
+def fix(filename):
+## dbg('fix(%r)\n' % (filename,))
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ err('%s: cannot open: %r\n' % (filename, msg))
+ return 1
+ head, tail = os.path.split(filename)
+ tempname = os.path.join(head, '@' + tail)
+ g = None
+ # If we find a match, we rewind the file and start over but
+ # now copy everything to a temp file.
+ lineno = 0
+ while 1:
+ line = f.readline()
+ if not line: break
+ lineno = lineno + 1
+ while line[-2:] == '\\\n':
+ nextline = f.readline()
+ if not nextline: break
+ line = line + nextline
+ lineno = lineno + 1
+ newline = fixline(line)
+ if newline != line:
+ if g is None:
+ try:
+ g = open(tempname, 'w')
+ except IOError, msg:
+ f.close()
+ err('%s: cannot create: %r\n' % (tempname, msg))
+ return 1
+ f.seek(0)
+ lineno = 0
+ rep(filename + ':\n')
+ continue # restart from the beginning
+ rep(repr(lineno) + '\n')
+ rep('< ' + line)
+ rep('> ' + newline)
+ if g is not None:
+ g.write(newline)
+
+ # End of file
+ f.close()
+ if not g: return 0 # No changes
+
+ # Finishing touch -- move files
+
+ # First copy the file's mode to the temp file
+ try:
+ statbuf = os.stat(filename)
+ os.chmod(tempname, statbuf[ST_MODE] & 07777)
+ except os.error, msg:
+ err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
+ # Then make a backup of the original file as filename~
+ try:
+ os.rename(filename, filename + '~')
+ except os.error, msg:
+ err('%s: warning: backup failed (%r)\n' % (filename, msg))
+ # Now move the temp file to the original file
+ try:
+ os.rename(tempname, filename)
+ except os.error, msg:
+ err('%s: rename failed (%r)\n' % (filename, msg))
+ return 1
+ # Return succes
+ return 0
+
+# This expression doesn't catch *all* class definition headers,
+# but it's pretty darn close.
+classexpr = '^([ \t]*class +[a-zA-Z0-9_]+) *( *) *((=.*)?):'
+classprog = re.compile(classexpr)
+
+# Expressions for finding base class expressions.
+baseexpr = '^ *(.*) *( *) *$'
+baseprog = re.compile(baseexpr)
+
+def fixline(line):
+ if classprog.match(line) < 0: # No 'class' keyword -- no change
+ return line
+
+ (a0, b0), (a1, b1), (a2, b2) = classprog.regs[:3]
+ # a0, b0 = Whole match (up to ':')
+ # a1, b1 = First subexpression (up to classname)
+ # a2, b2 = Second subexpression (=.*)
+ head = line[:b1]
+ tail = line[b0:] # Unmatched rest of line
+
+ if a2 == b2: # No base classes -- easy case
+ return head + ':' + tail
+
+ # Get rid of leading '='
+ basepart = line[a2+1:b2]
+
+ # Extract list of base expressions
+ bases = basepart.split(',')
+
+ # Strip trailing '()' from each base expression
+ for i in range(len(bases)):
+ if baseprog.match(bases[i]) >= 0:
+ x1, y1 = baseprog.regs[1]
+ bases[i] = bases[i][x1:y1]
+
+ # Join the bases back again and build the new line
+ basepart = ', '.join(bases)
+
+ return head + '(' + basepart + '):' + tail
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/cleanfuture.py b/lib/python2.7/Tools/scripts/cleanfuture.py
new file mode 100644
index 0000000..e7814b9
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/cleanfuture.py
@@ -0,0 +1,276 @@
+#!/usr/bin/env python2
+
+"""cleanfuture [-d][-r][-v] path ...
+
+-d Dry run. Analyze, but don't make any changes to, files.
+-r Recurse. Search for all .py files in subdirectories too.
+-v Verbose. Print informative msgs.
+
+Search Python (.py) files for future statements, and remove the features
+from such statements that are already mandatory in the version of Python
+you're using.
+
+Pass one or more file and/or directory paths. When a directory path, all
+.py files within the directory will be examined, and, if the -r option is
+given, likewise recursively for subdirectories.
+
+Overwrites files in place, renaming the originals with a .bak extension. If
+cleanfuture finds nothing to change, the file is left alone. If cleanfuture
+does change a file, the changed file is a fixed-point (i.e., running
+cleanfuture on the resulting .py file won't change it again, at least not
+until you try it again with a later Python release).
+
+Limitations: You can do these things, but this tool won't help you then:
+
++ A future statement cannot be mixed with any other statement on the same
+ physical line (separated by semicolon).
+
++ A future statement cannot contain an "as" clause.
+
+Example: Assuming you're using Python 2.2, if a file containing
+
+from __future__ import nested_scopes, generators
+
+is analyzed by cleanfuture, the line is rewritten to
+
+from __future__ import generators
+
+because nested_scopes is no longer optional in 2.2 but generators is.
+"""
+
+import __future__
+import tokenize
+import os
+import sys
+
+dryrun = 0
+recurse = 0
+verbose = 0
+
+def errprint(*args):
+ strings = map(str, args)
+ msg = ' '.join(strings)
+ if msg[-1:] != '\n':
+ msg += '\n'
+ sys.stderr.write(msg)
+
+def main():
+ import getopt
+ global verbose, recurse, dryrun
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "drv")
+ except getopt.error, msg:
+ errprint(msg)
+ return
+ for o, a in opts:
+ if o == '-d':
+ dryrun += 1
+ elif o == '-r':
+ recurse += 1
+ elif o == '-v':
+ verbose += 1
+ if not args:
+ errprint("Usage:", __doc__)
+ return
+ for arg in args:
+ check(arg)
+
+def check(file):
+ if os.path.isdir(file) and not os.path.islink(file):
+ if verbose:
+ print "listing directory", file
+ names = os.listdir(file)
+ for name in names:
+ fullname = os.path.join(file, name)
+ if ((recurse and os.path.isdir(fullname) and
+ not os.path.islink(fullname))
+ or name.lower().endswith(".py")):
+ check(fullname)
+ return
+
+ if verbose:
+ print "checking", file, "...",
+ try:
+ f = open(file)
+ except IOError, msg:
+ errprint("%r: I/O Error: %s" % (file, str(msg)))
+ return
+
+ ff = FutureFinder(f, file)
+ changed = ff.run()
+ if changed:
+ ff.gettherest()
+ f.close()
+ if changed:
+ if verbose:
+ print "changed."
+ if dryrun:
+ print "But this is a dry run, so leaving it alone."
+ for s, e, line in changed:
+ print "%r lines %d-%d" % (file, s+1, e+1)
+ for i in range(s, e+1):
+ print ff.lines[i],
+ if line is None:
+ print "-- deleted"
+ else:
+ print "-- change to:"
+ print line,
+ if not dryrun:
+ bak = file + ".bak"
+ if os.path.exists(bak):
+ os.remove(bak)
+ os.rename(file, bak)
+ if verbose:
+ print "renamed", file, "to", bak
+ g = open(file, "w")
+ ff.write(g)
+ g.close()
+ if verbose:
+ print "wrote new", file
+ else:
+ if verbose:
+ print "unchanged."
+
+class FutureFinder:
+
+ def __init__(self, f, fname):
+ self.f = f
+ self.fname = fname
+ self.ateof = 0
+ self.lines = [] # raw file lines
+
+ # List of (start_index, end_index, new_line) triples.
+ self.changed = []
+
+ # Line-getter for tokenize.
+ def getline(self):
+ if self.ateof:
+ return ""
+ line = self.f.readline()
+ if line == "":
+ self.ateof = 1
+ else:
+ self.lines.append(line)
+ return line
+
+ def run(self):
+ STRING = tokenize.STRING
+ NL = tokenize.NL
+ NEWLINE = tokenize.NEWLINE
+ COMMENT = tokenize.COMMENT
+ NAME = tokenize.NAME
+ OP = tokenize.OP
+
+ changed = self.changed
+ get = tokenize.generate_tokens(self.getline).next
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ # Chew up initial comments and blank lines (if any).
+ while type in (COMMENT, NL, NEWLINE):
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ # Chew up docstring (if any -- and it may be implicitly catenated!).
+ while type is STRING:
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ # Analyze the future stmts.
+ while 1:
+ # Chew up comments and blank lines (if any).
+ while type in (COMMENT, NL, NEWLINE):
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ if not (type is NAME and token == "from"):
+ break
+ startline = srow - 1 # tokenize is one-based
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ if not (type is NAME and token == "__future__"):
+ break
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ if not (type is NAME and token == "import"):
+ break
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ # Get the list of features.
+ features = []
+ while type is NAME:
+ features.append(token)
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ if not (type is OP and token == ','):
+ break
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ # A trailing comment?
+ comment = None
+ if type is COMMENT:
+ comment = token
+ type, token, (srow, scol), (erow, ecol), line = get()
+
+ if type is not NEWLINE:
+ errprint("Skipping file %r; can't parse line %d:\n%s" %
+ (self.fname, srow, line))
+ return []
+
+ endline = srow - 1
+
+ # Check for obsolete features.
+ okfeatures = []
+ for f in features:
+ object = getattr(__future__, f, None)
+ if object is None:
+ # A feature we don't know about yet -- leave it in.
+ # They'll get a compile-time error when they compile
+ # this program, but that's not our job to sort out.
+ okfeatures.append(f)
+ else:
+ released = object.getMandatoryRelease()
+ if released is None or released <= sys.version_info:
+ # Withdrawn or obsolete.
+ pass
+ else:
+ okfeatures.append(f)
+
+ # Rewrite the line if at least one future-feature is obsolete.
+ if len(okfeatures) < len(features):
+ if len(okfeatures) == 0:
+ line = None
+ else:
+ line = "from __future__ import "
+ line += ', '.join(okfeatures)
+ if comment is not None:
+ line += ' ' + comment
+ line += '\n'
+ changed.append((startline, endline, line))
+
+ # Loop back for more future statements.
+
+ return changed
+
+ def gettherest(self):
+ if self.ateof:
+ self.therest = ''
+ else:
+ self.therest = self.f.read()
+
+ def write(self, f):
+ changed = self.changed
+ assert changed
+ # Prevent calling this again.
+ self.changed = []
+ # Apply changes in reverse order.
+ changed.reverse()
+ for s, e, line in changed:
+ if line is None:
+ # pure deletion
+ del self.lines[s:e+1]
+ else:
+ self.lines[s:e+1] = [line]
+ f.writelines(self.lines)
+ # Copy over the remainder of the file.
+ if self.therest:
+ f.write(self.therest)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/combinerefs.py b/lib/python2.7/Tools/scripts/combinerefs.py
new file mode 100644
index 0000000..a2b6ef8
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/combinerefs.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python2
+
+"""
+combinerefs path
+
+A helper for analyzing PYTHONDUMPREFS output.
+
+When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
+time Py_Finalize() prints the list of all live objects twice: first it
+prints the repr() of each object while the interpreter is still fully intact.
+After cleaning up everything it can, it prints all remaining live objects
+again, but the second time just prints their addresses, refcounts, and type
+names (because the interpreter has been torn down, calling repr methods at
+this point can get into infinite loops or blow up).
+
+Save all this output into a file, then run this script passing the path to
+that file. The script finds both output chunks, combines them, then prints
+a line of output for each object still alive at the end:
+
+ address refcnt typename repr
+
+address is the address of the object, in whatever format the platform C
+produces for a %p format code.
+
+refcnt is of the form
+
+ "[" ref "]"
+
+when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
+or
+
+ "[" ref_before "->" ref_after "]"
+
+if the refcount changed.
+
+typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
+output block.
+
+repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
+CAUTION: If object is a container type, it may not actually contain all the
+objects shown in the repr: the repr was captured from the first output block,
+and some of the containees may have been released since then. For example,
+it's common for the line showing the dict of interned strings to display
+strings that no longer exist at the end of Py_Finalize; this can be recognized
+(albeit painfully) because such containees don't have a line of their own.
+
+The objects are listed in allocation order, with most-recently allocated
+printed first, and the first object allocated printed last.
+
+
+Simple examples:
+
+ 00857060 [14] str '__len__'
+
+The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
+output blocks said there were 14 references to it. This is probably due to
+C modules that intern the string "__len__" and keep a reference to it in a
+file static.
+
+ 00857038 [46->5] tuple ()
+
+46-5 = 41 references to the empty tuple were removed by the cleanup actions
+between the times PYTHONDUMPREFS produced output.
+
+ 00858028 [1025->1456] str '<dummy key>'
+
+The string '<dummy key>', which is used in dictobject.c to overwrite a real
+key that gets deleted, grew several hundred references during cleanup. It
+suggests that stuff did get removed from dicts by cleanup, but that the dicts
+themselves are staying alive for some reason. """
+
+import re
+import sys
+
+# Generate lines from fileiter. If whilematch is true, continue reading
+# while the regexp object pat matches line. If whilematch is false, lines
+# are read so long as pat doesn't match them. In any case, the first line
+# that doesn't match pat (when whilematch is true), or that does match pat
+# (when whilematch is false), is lost, and fileiter will resume at the line
+# following it.
+def read(fileiter, pat, whilematch):
+ for line in fileiter:
+ if bool(pat.match(line)) == whilematch:
+ yield line
+ else:
+ break
+
+def combine(fname):
+ f = file(fname)
+ fi = iter(f)
+
+ for line in read(fi, re.compile(r'^Remaining objects:$'), False):
+ pass
+
+ crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
+ addr2rc = {}
+ addr2guts = {}
+ before = 0
+ for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
+ m = crack.match(line)
+ if m:
+ addr, addr2rc[addr], addr2guts[addr] = m.groups()
+ before += 1
+ else:
+ print '??? skipped:', line
+
+ after = 0
+ for line in read(fi, crack, True):
+ after += 1
+ m = crack.match(line)
+ assert m
+ addr, rc, guts = m.groups() # guts is type name here
+ if addr not in addr2rc:
+ print '??? new object created while tearing down:', line.rstrip()
+ continue
+ print addr,
+ if rc == addr2rc[addr]:
+ print '[%s]' % rc,
+ else:
+ print '[%s->%s]' % (addr2rc[addr], rc),
+ print guts, addr2guts[addr]
+
+ f.close()
+ print "%d objects before, %d after" % (before, after)
+
+if __name__ == '__main__':
+ combine(sys.argv[1])
diff --git a/lib/python2.7/Tools/scripts/copytime.py b/lib/python2.7/Tools/scripts/copytime.py
new file mode 100644
index 0000000..ad88cb8
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/copytime.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python2
+
+# Copy one file's atime and mtime to another
+
+import sys
+import os
+from stat import ST_ATIME, ST_MTIME # Really constants 7 and 8
+
+def main():
+ if len(sys.argv) <> 3:
+ sys.stderr.write('usage: copytime source destination\n')
+ sys.exit(2)
+ file1, file2 = sys.argv[1], sys.argv[2]
+ try:
+ stat1 = os.stat(file1)
+ except os.error:
+ sys.stderr.write(file1 + ': cannot stat\n')
+ sys.exit(1)
+ try:
+ os.utime(file2, (stat1[ST_ATIME], stat1[ST_MTIME]))
+ except os.error:
+ sys.stderr.write(file2 + ': cannot change time\n')
+ sys.exit(2)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/crlf.py b/lib/python2.7/Tools/scripts/crlf.py
new file mode 100644
index 0000000..69e2fc3
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/crlf.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python2
+"Replace CRLF with LF in argument files. Print names of changed files."
+
+import sys, os
+
+def main():
+ for filename in sys.argv[1:]:
+ if os.path.isdir(filename):
+ print filename, "Directory!"
+ continue
+ data = open(filename, "rb").read()
+ if '\0' in data:
+ print filename, "Binary!"
+ continue
+ newdata = data.replace("\r\n", "\n")
+ if newdata != data:
+ print filename
+ f = open(filename, "wb")
+ f.write(newdata)
+ f.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/cvsfiles.py b/lib/python2.7/Tools/scripts/cvsfiles.py
new file mode 100644
index 0000000..eca1723
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/cvsfiles.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python2
+
+"""Print a list of files that are mentioned in CVS directories.
+
+Usage: cvsfiles.py [-n file] [directory] ...
+
+If the '-n file' option is given, only files under CVS that are newer
+than the given file are printed; by default, all files under CVS are
+printed. As a special case, if a file does not exist, it is always
+printed.
+"""
+
+import os
+import sys
+import stat
+import getopt
+
+cutofftime = 0
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "n:")
+ except getopt.error, msg:
+ print msg
+ print __doc__,
+ return 1
+ global cutofftime
+ newerfile = None
+ for o, a in opts:
+ if o == '-n':
+ cutofftime = getmtime(a)
+ if args:
+ for arg in args:
+ process(arg)
+ else:
+ process(".")
+
+def process(dir):
+ cvsdir = 0
+ subdirs = []
+ names = os.listdir(dir)
+ for name in names:
+ fullname = os.path.join(dir, name)
+ if name == "CVS":
+ cvsdir = fullname
+ else:
+ if os.path.isdir(fullname):
+ if not os.path.islink(fullname):
+ subdirs.append(fullname)
+ if cvsdir:
+ entries = os.path.join(cvsdir, "Entries")
+ for e in open(entries).readlines():
+ words = e.split('/')
+ if words[0] == '' and words[1:]:
+ name = words[1]
+ fullname = os.path.join(dir, name)
+ if cutofftime and getmtime(fullname) <= cutofftime:
+ pass
+ else:
+ print fullname
+ for sub in subdirs:
+ process(sub)
+
+def getmtime(filename):
+ try:
+ st = os.stat(filename)
+ except os.error:
+ return 0
+ return st[stat.ST_MTIME]
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/db2pickle.py b/lib/python2.7/Tools/scripts/db2pickle.py
new file mode 100644
index 0000000..9f746b3
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/db2pickle.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python2
+
+"""
+Synopsis: %(prog)s [-h|-g|-b|-r|-a] dbfile [ picklefile ]
+
+Convert the database file given on the command line to a pickle
+representation. The optional flags indicate the type of the database:
+
+ -a - open using anydbm
+ -b - open as bsddb btree file
+ -d - open as dbm file
+ -g - open as gdbm file
+ -h - open as bsddb hash file
+ -r - open as bsddb recno file
+
+The default is hash. If a pickle file is named it is opened for write
+access (deleting any existing data). If no pickle file is named, the pickle
+output is written to standard output.
+
+"""
+
+import getopt
+try:
+ import bsddb
+except ImportError:
+ bsddb = None
+try:
+ import dbm
+except ImportError:
+ dbm = None
+try:
+ import gdbm
+except ImportError:
+ gdbm = None
+try:
+ import anydbm
+except ImportError:
+ anydbm = None
+import sys
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+prog = sys.argv[0]
+
+def usage():
+ sys.stderr.write(__doc__ % globals())
+
+def main(args):
+ try:
+ opts, args = getopt.getopt(args, "hbrdag",
+ ["hash", "btree", "recno", "dbm",
+ "gdbm", "anydbm"])
+ except getopt.error:
+ usage()
+ return 1
+
+ if len(args) == 0 or len(args) > 2:
+ usage()
+ return 1
+ elif len(args) == 1:
+ dbfile = args[0]
+ pfile = sys.stdout
+ else:
+ dbfile = args[0]
+ try:
+ pfile = open(args[1], 'wb')
+ except IOError:
+ sys.stderr.write("Unable to open %s\n" % args[1])
+ return 1
+
+ dbopen = None
+ for opt, arg in opts:
+ if opt in ("-h", "--hash"):
+ try:
+ dbopen = bsddb.hashopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-b", "--btree"):
+ try:
+ dbopen = bsddb.btopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-r", "--recno"):
+ try:
+ dbopen = bsddb.rnopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-a", "--anydbm"):
+ try:
+ dbopen = anydbm.open
+ except AttributeError:
+ sys.stderr.write("anydbm module unavailable.\n")
+ return 1
+ elif opt in ("-g", "--gdbm"):
+ try:
+ dbopen = gdbm.open
+ except AttributeError:
+ sys.stderr.write("gdbm module unavailable.\n")
+ return 1
+ elif opt in ("-d", "--dbm"):
+ try:
+ dbopen = dbm.open
+ except AttributeError:
+ sys.stderr.write("dbm module unavailable.\n")
+ return 1
+ if dbopen is None:
+ if bsddb is None:
+ sys.stderr.write("bsddb module unavailable - ")
+ sys.stderr.write("must specify dbtype.\n")
+ return 1
+ else:
+ dbopen = bsddb.hashopen
+
+ try:
+ db = dbopen(dbfile, 'r')
+ except bsddb.error:
+ sys.stderr.write("Unable to open %s. " % dbfile)
+ sys.stderr.write("Check for format or version mismatch.\n")
+ return 1
+
+ for k in db.keys():
+ pickle.dump((k, db[k]), pfile, 1==1)
+
+ db.close()
+ pfile.close()
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/lib/python2.7/Tools/scripts/diff.py b/lib/python2.7/Tools/scripts/diff.py
new file mode 100644
index 0000000..068cea6
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/diff.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python2
+""" Command line interface to difflib.py providing diffs in four formats:
+
+* ndiff: lists every line and highlights interline changes.
+* context: highlights clusters of changes in a before/after format.
+* unified: highlights clusters of changes in an inline format.
+* html: generates side by side comparison with change highlights.
+
+"""
+
+import sys, os, time, difflib, optparse
+
+def main():
+
+ usage = "usage: %prog [options] fromfile tofile"
+ parser = optparse.OptionParser(usage)
+ parser.add_option("-c", action="store_true", default=False, help='Produce a context format diff (default)')
+ parser.add_option("-u", action="store_true", default=False, help='Produce a unified format diff')
+ parser.add_option("-m", action="store_true", default=False, help='Produce HTML side by side diff (can use -c and -l in conjunction)')
+ parser.add_option("-n", action="store_true", default=False, help='Produce a ndiff format diff')
+ parser.add_option("-l", "--lines", type="int", default=3, help='Set number of context lines (default 3)')
+ (options, args) = parser.parse_args()
+
+ if len(args) == 0:
+ parser.print_help()
+ sys.exit(1)
+ if len(args) != 2:
+ parser.error("need to specify both a fromfile and tofile")
+
+ n = options.lines
+ fromfile, tofile = args
+
+ fromdate = time.ctime(os.stat(fromfile).st_mtime)
+ todate = time.ctime(os.stat(tofile).st_mtime)
+ fromlines = open(fromfile, 'U').readlines()
+ tolines = open(tofile, 'U').readlines()
+
+ if options.u:
+ diff = difflib.unified_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
+ elif options.n:
+ diff = difflib.ndiff(fromlines, tolines)
+ elif options.m:
+ diff = difflib.HtmlDiff().make_file(fromlines,tolines,fromfile,tofile,context=options.c,numlines=n)
+ else:
+ diff = difflib.context_diff(fromlines, tolines, fromfile, tofile, fromdate, todate, n=n)
+
+ sys.stdout.writelines(diff)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/dutree.py b/lib/python2.7/Tools/scripts/dutree.py
new file mode 100644
index 0000000..f0edea3
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/dutree.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2
+# Format du output in a tree shape
+
+import os, sys, errno
+
+def main():
+ p = os.popen('du ' + ' '.join(sys.argv[1:]), 'r')
+ total, d = None, {}
+ for line in p.readlines():
+ i = 0
+ while line[i] in '0123456789': i = i+1
+ size = eval(line[:i])
+ while line[i] in ' \t': i = i+1
+ filename = line[i:-1]
+ comps = filename.split('/')
+ if comps[0] == '': comps[0] = '/'
+ if comps[len(comps)-1] == '': del comps[len(comps)-1]
+ total, d = store(size, comps, total, d)
+ try:
+ display(total, d)
+ except IOError, e:
+ if e.errno != errno.EPIPE:
+ raise
+
+def store(size, comps, total, d):
+ if comps == []:
+ return size, d
+ if not d.has_key(comps[0]):
+ d[comps[0]] = None, {}
+ t1, d1 = d[comps[0]]
+ d[comps[0]] = store(size, comps[1:], t1, d1)
+ return total, d
+
+def display(total, d):
+ show(total, d, '')
+
+def show(total, d, prefix):
+ if not d: return
+ list = []
+ sum = 0
+ for key in d.keys():
+ tsub, dsub = d[key]
+ list.append((tsub, key))
+ if tsub is not None: sum = sum + tsub
+## if sum < total:
+## list.append((total - sum, os.curdir))
+ list.sort()
+ list.reverse()
+ width = len(repr(list[0][0]))
+ for tsub, key in list:
+ if tsub is None:
+ psub = prefix
+ else:
+ print prefix + repr(tsub).rjust(width) + ' ' + key
+ psub = prefix + ' '*(width-1) + '|' + ' '*(len(key)+1)
+ if d.has_key(key):
+ show(tsub, d[key][1], psub)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/eptags.py b/lib/python2.7/Tools/scripts/eptags.py
new file mode 100644
index 0000000..38154f7
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/eptags.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python2
+"""Create a TAGS file for Python programs, usable with GNU Emacs.
+
+usage: eptags pyfiles...
+
+The output TAGS file is usable with Emacs version 18, 19, 20.
+Tagged are:
+ - functions (even inside other defs or classes)
+ - classes
+
+eptags warns about files it cannot open.
+eptags will not give warnings about duplicate tags.
+
+BUGS:
+ Because of tag duplication (methods with the same name in different
+ classes), TAGS files are not very useful for most object-oriented
+ python projects.
+"""
+import sys,re
+
+expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]'
+matcher = re.compile(expr)
+
+def treat_file(filename, outfp):
+ """Append tags found in file named 'filename' to the open file 'outfp'"""
+ try:
+ fp = open(filename, 'r')
+ except:
+ sys.stderr.write('Cannot open %s\n'%filename)
+ return
+ charno = 0
+ lineno = 0
+ tags = []
+ size = 0
+ while 1:
+ line = fp.readline()
+ if not line:
+ break
+ lineno = lineno + 1
+ m = matcher.search(line)
+ if m:
+ tag = m.group(0) + '\177%d,%d\n' % (lineno, charno)
+ tags.append(tag)
+ size = size + len(tag)
+ charno = charno + len(line)
+ outfp.write('\f\n%s,%d\n' % (filename,size))
+ for tag in tags:
+ outfp.write(tag)
+
+def main():
+ outfp = open('TAGS', 'w')
+ for filename in sys.argv[1:]:
+ treat_file(filename, outfp)
+
+if __name__=="__main__":
+ main()
diff --git a/lib/python2.7/Tools/scripts/find_recursionlimit.py b/lib/python2.7/Tools/scripts/find_recursionlimit.py
new file mode 100644
index 0000000..add591c
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/find_recursionlimit.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python2
+"""Find the maximum recursion limit that prevents interpreter termination.
+
+This script finds the maximum safe recursion limit on a particular
+platform. If you need to change the recursion limit on your system,
+this script will tell you a safe upper bound. To use the new limit,
+call sys.setrecursionlimit().
+
+This module implements several ways to create infinite recursion in
+Python. Different implementations end up pushing different numbers of
+C stack frames, depending on how many calls through Python's abstract
+C API occur.
+
+After each round of tests, it prints a message:
+"Limit of NNNN is fine".
+
+The highest printed value of "NNNN" is therefore the highest potentially
+safe limit for your system (which depends on the OS, architecture, but also
+the compilation flags). Please note that it is practically impossible to
+test all possible recursion paths in the interpreter, so the results of
+this test should not be trusted blindly -- although they give a good hint
+of which values are reasonable.
+
+NOTE: When the C stack space allocated by your system is exceeded due
+to excessive recursion, exact behaviour depends on the platform, although
+the interpreter will always fail in a likely brutal way: either a
+segmentation fault, a MemoryError, or just a silent abort.
+
+NB: A program that does not use __methods__ can set a higher limit.
+"""
+
+import sys
+import itertools
+
+class RecursiveBlowup1:
+ def __init__(self):
+ self.__init__()
+
+def test_init():
+ return RecursiveBlowup1()
+
+class RecursiveBlowup2:
+ def __repr__(self):
+ return repr(self)
+
+def test_repr():
+ return repr(RecursiveBlowup2())
+
+class RecursiveBlowup4:
+ def __add__(self, x):
+ return x + self
+
+def test_add():
+ return RecursiveBlowup4() + RecursiveBlowup4()
+
+class RecursiveBlowup5:
+ def __getattr__(self, attr):
+ return getattr(self, attr)
+
+def test_getattr():
+ return RecursiveBlowup5().attr
+
+class RecursiveBlowup6:
+ def __getitem__(self, item):
+ return self[item - 2] + self[item - 1]
+
+def test_getitem():
+ return RecursiveBlowup6()[5]
+
+def test_recurse():
+ return test_recurse()
+
+def test_cpickle(_cache={}):
+ try:
+ import cPickle
+ except ImportError:
+ print "cannot import cPickle, skipped!"
+ return
+ l = None
+ for n in itertools.count():
+ try:
+ l = _cache[n]
+ continue # Already tried and it works, let's save some time
+ except KeyError:
+ for i in range(100):
+ l = [l]
+ cPickle.dumps(l, protocol=-1)
+ _cache[n] = l
+
+def check_limit(n, test_func_name):
+ sys.setrecursionlimit(n)
+ if test_func_name.startswith("test_"):
+ print test_func_name[5:]
+ else:
+ print test_func_name
+ test_func = globals()[test_func_name]
+ try:
+ test_func()
+ # AttributeError can be raised because of the way e.g. PyDict_GetItem()
+ # silences all exceptions and returns NULL, which is usually interpreted
+ # as "missing attribute".
+ except (RuntimeError, AttributeError):
+ pass
+ else:
+ print "Yikes!"
+
+limit = 1000
+while 1:
+ check_limit(limit, "test_recurse")
+ check_limit(limit, "test_add")
+ check_limit(limit, "test_repr")
+ check_limit(limit, "test_init")
+ check_limit(limit, "test_getattr")
+ check_limit(limit, "test_getitem")
+ check_limit(limit, "test_cpickle")
+ print "Limit of %d is fine" % limit
+ limit = limit + 100
diff --git a/lib/python2.7/Tools/scripts/finddiv.py b/lib/python2.7/Tools/scripts/finddiv.py
new file mode 100644
index 0000000..d597744
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/finddiv.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2
+
+"""finddiv - a grep-like tool that looks for division operators.
+
+Usage: finddiv [-l] file_or_directory ...
+
+For directory arguments, all files in the directory whose name ends in
+.py are processed, and subdirectories are processed recursively.
+
+This actually tokenizes the files to avoid false hits in comments or
+strings literals.
+
+By default, this prints all lines containing a / or /= operator, in
+grep -n style. With the -l option specified, it prints the filename
+of files that contain at least one / or /= operator.
+"""
+
+import os
+import sys
+import getopt
+import tokenize
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "lh")
+ except getopt.error, msg:
+ usage(msg)
+ return 2
+ if not args:
+ usage("at least one file argument is required")
+ return 2
+ listnames = 0
+ for o, a in opts:
+ if o == "-h":
+ print __doc__
+ return
+ if o == "-l":
+ listnames = 1
+ exit = None
+ for filename in args:
+ x = process(filename, listnames)
+ exit = exit or x
+ return exit
+
+def usage(msg):
+ sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
+ sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
+ sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
+
+def process(filename, listnames):
+ if os.path.isdir(filename):
+ return processdir(filename, listnames)
+ try:
+ fp = open(filename)
+ except IOError, msg:
+ sys.stderr.write("Can't open: %s\n" % msg)
+ return 1
+ g = tokenize.generate_tokens(fp.readline)
+ lastrow = None
+ for type, token, (row, col), end, line in g:
+ if token in ("/", "/="):
+ if listnames:
+ print filename
+ break
+ if row != lastrow:
+ lastrow = row
+ print "%s:%d:%s" % (filename, row, line),
+ fp.close()
+
+def processdir(dir, listnames):
+ try:
+ names = os.listdir(dir)
+ except os.error, msg:
+ sys.stderr.write("Can't list directory: %s\n" % dir)
+ return 1
+ files = []
+ for name in names:
+ fn = os.path.join(dir, name)
+ if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
+ files.append(fn)
+ files.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
+ exit = None
+ for fn in files:
+ x = process(fn, listnames)
+ exit = exit or x
+ return exit
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/lib/python2.7/Tools/scripts/findlinksto.py b/lib/python2.7/Tools/scripts/findlinksto.py
new file mode 100644
index 0000000..0641b15
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/findlinksto.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2
+
+# findlinksto
+#
+# find symbolic links to a path matching a regular expression
+
+import os
+import sys
+import re
+import getopt
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], '')
+ if len(args) < 2:
+ raise getopt.GetoptError('not enough arguments', None)
+ except getopt.GetoptError, msg:
+ sys.stdout = sys.stderr
+ print msg
+ print 'usage: findlinksto pattern directory ...'
+ sys.exit(2)
+ pat, dirs = args[0], args[1:]
+ prog = re.compile(pat)
+ for dirname in dirs:
+ os.path.walk(dirname, visit, prog)
+
+def visit(prog, dirname, names):
+ if os.path.islink(dirname):
+ names[:] = []
+ return
+ if os.path.ismount(dirname):
+ print 'descend into', dirname
+ for name in names:
+ name = os.path.join(dirname, name)
+ try:
+ linkto = os.readlink(name)
+ if prog.search(linkto) is not None:
+ print name, '->', linkto
+ except os.error:
+ pass
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/findnocoding.py b/lib/python2.7/Tools/scripts/findnocoding.py
new file mode 100644
index 0000000..62f65b8
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/findnocoding.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python2
+
+"""List all those Python files that require a coding directive
+
+Usage: nocoding.py dir1 [dir2...]
+"""
+
+__author__ = "Oleg Broytmann, Georg Brandl"
+
+import sys, os, re, getopt
+
+# our pysource module finds Python source files
+try:
+ import pysource
+except ImportError:
+ # emulate the module with a simple os.walk
+ class pysource:
+ has_python_ext = looks_like_python = can_be_compiled = None
+ def walk_python_files(self, paths, *args, **kwargs):
+ for path in paths:
+ if os.path.isfile(path):
+ yield path.endswith(".py")
+ elif os.path.isdir(path):
+ for root, dirs, files in os.walk(path):
+ for filename in files:
+ if filename.endswith(".py"):
+ yield os.path.join(root, filename)
+ pysource = pysource()
+
+
+ print >>sys.stderr, ("The pysource module is not available; "
+ "no sophisticated Python source file search will be done.")
+
+
+decl_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
+blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)')
+
+def get_declaration(line):
+ match = decl_re.match(line)
+ if match:
+ return match.group(1)
+ return b''
+
+def has_correct_encoding(text, codec):
+ try:
+ unicode(text, codec)
+ except UnicodeDecodeError:
+ return False
+ else:
+ return True
+
+def needs_declaration(fullpath):
+ try:
+ infile = open(fullpath, 'rU')
+ except IOError: # Oops, the file was removed - ignore it
+ return None
+
+ line1 = infile.readline()
+ line2 = infile.readline()
+
+ if (get_declaration(line1) or
+ blank_re.match(line1) and get_declaration(line2)):
+ # the file does have an encoding declaration, so trust it
+ infile.close()
+ return False
+
+ # check the whole file for non-ASCII characters
+ rest = infile.read()
+ infile.close()
+
+ if has_correct_encoding(line1+line2+rest, "ascii"):
+ return False
+
+ return True
+
+
+usage = """Usage: %s [-cd] paths...
+ -c: recognize Python source files trying to compile them
+ -d: debug output""" % sys.argv[0]
+
+try:
+ opts, args = getopt.getopt(sys.argv[1:], 'cd')
+except getopt.error, msg:
+ print >>sys.stderr, msg
+ print >>sys.stderr, usage
+ sys.exit(1)
+
+is_python = pysource.looks_like_python
+debug = False
+
+for o, a in opts:
+ if o == '-c':
+ is_python = pysource.can_be_compiled
+ elif o == '-d':
+ debug = True
+
+if not args:
+ print >>sys.stderr, usage
+ sys.exit(1)
+
+for fullpath in pysource.walk_python_files(args, is_python):
+ if debug:
+ print "Testing for coding: %s" % fullpath
+ result = needs_declaration(fullpath)
+ if result:
+ print fullpath
diff --git a/lib/python2.7/Tools/scripts/fixcid.py b/lib/python2.7/Tools/scripts/fixcid.py
new file mode 100644
index 0000000..3395793
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/fixcid.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python2
+
+# Perform massive identifier substitution on C source files.
+# This actually tokenizes the files (to some extent) so it can
+# avoid making substitutions inside strings or comments.
+# Inside strings, substitutions are never made; inside comments,
+# it is a user option (off by default).
+#
+# The substitutions are read from one or more files whose lines,
+# when not empty, after stripping comments starting with #,
+# must contain exactly two words separated by whitespace: the
+# old identifier and its replacement.
+#
+# The option -r reverses the sense of the substitutions (this may be
+# useful to undo a particular substitution).
+#
+# If the old identifier is prefixed with a '*' (with no intervening
+# whitespace), then it will not be substituted inside comments.
+#
+# Command line arguments are files or directories to be processed.
+# Directories are searched recursively for files whose name looks
+# like a C file (ends in .h or .c). The special filename '-' means
+# operate in filter mode: read stdin, write stdout.
+#
+# Symbolic links are always ignored (except as explicit directory
+# arguments).
+#
+# The original files are kept as back-up with a "~" suffix.
+#
+# Changes made are reported to stdout in a diff-like format.
+#
+# NB: by changing only the function fixline() you can turn this
+# into a program for different changes to C source files; by
+# changing the function wanted() you can make a different selection of
+# files.
+
+import sys
+import re
+import os
+from stat import *
+import getopt
+
+err = sys.stderr.write
+dbg = err
+rep = sys.stdout.write
+
+def usage():
+ progname = sys.argv[0]
+ err('Usage: ' + progname +
+ ' [-c] [-r] [-s file] ... file-or-directory ...\n')
+ err('\n')
+ err('-c : substitute inside comments\n')
+ err('-r : reverse direction for following -s options\n')
+ err('-s substfile : add a file of substitutions\n')
+ err('\n')
+ err('Each non-empty non-comment line in a substitution file must\n')
+ err('contain exactly two words: an identifier and its replacement.\n')
+ err('Comments start with a # character and end at end of line.\n')
+ err('If an identifier is preceded with a *, it is not substituted\n')
+ err('inside a comment even when -c is specified.\n')
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'crs:')
+ except getopt.error, msg:
+ err('Options error: ' + str(msg) + '\n')
+ usage()
+ sys.exit(2)
+ bad = 0
+ if not args: # No arguments
+ usage()
+ sys.exit(2)
+ for opt, arg in opts:
+ if opt == '-c':
+ setdocomments()
+ if opt == '-r':
+ setreverse()
+ if opt == '-s':
+ addsubst(arg)
+ for arg in args:
+ if os.path.isdir(arg):
+ if recursedown(arg): bad = 1
+ elif os.path.islink(arg):
+ err(arg + ': will not process symbolic links\n')
+ bad = 1
+ else:
+ if fix(arg): bad = 1
+ sys.exit(bad)
+
+# Change this regular expression to select a different set of files
+Wanted = r'^[a-zA-Z0-9_]+\.[ch]$'
+def wanted(name):
+ return re.match(Wanted, name)
+
+def recursedown(dirname):
+ dbg('recursedown(%r)\n' % (dirname,))
+ bad = 0
+ try:
+ names = os.listdir(dirname)
+ except os.error, msg:
+ err(dirname + ': cannot list directory: ' + str(msg) + '\n')
+ return 1
+ names.sort()
+ subdirs = []
+ for name in names:
+ if name in (os.curdir, os.pardir): continue
+ fullname = os.path.join(dirname, name)
+ if os.path.islink(fullname): pass
+ elif os.path.isdir(fullname):
+ subdirs.append(fullname)
+ elif wanted(name):
+ if fix(fullname): bad = 1
+ for fullname in subdirs:
+ if recursedown(fullname): bad = 1
+ return bad
+
+def fix(filename):
+## dbg('fix(%r)\n' % (filename,))
+ if filename == '-':
+ # Filter mode
+ f = sys.stdin
+ g = sys.stdout
+ else:
+ # File replacement mode
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ err(filename + ': cannot open: ' + str(msg) + '\n')
+ return 1
+ head, tail = os.path.split(filename)
+ tempname = os.path.join(head, '@' + tail)
+ g = None
+ # If we find a match, we rewind the file and start over but
+ # now copy everything to a temp file.
+ lineno = 0
+ initfixline()
+ while 1:
+ line = f.readline()
+ if not line: break
+ lineno = lineno + 1
+ while line[-2:] == '\\\n':
+ nextline = f.readline()
+ if not nextline: break
+ line = line + nextline
+ lineno = lineno + 1
+ newline = fixline(line)
+ if newline != line:
+ if g is None:
+ try:
+ g = open(tempname, 'w')
+ except IOError, msg:
+ f.close()
+ err(tempname+': cannot create: '+
+ str(msg)+'\n')
+ return 1
+ f.seek(0)
+ lineno = 0
+ initfixline()
+ rep(filename + ':\n')
+ continue # restart from the beginning
+ rep(repr(lineno) + '\n')
+ rep('< ' + line)
+ rep('> ' + newline)
+ if g is not None:
+ g.write(newline)
+
+ # End of file
+ if filename == '-': return 0 # Done in filter mode
+ f.close()
+ if not g: return 0 # No changes
+ g.close()
+
+ # Finishing touch -- move files
+
+ # First copy the file's mode to the temp file
+ try:
+ statbuf = os.stat(filename)
+ os.chmod(tempname, statbuf[ST_MODE] & 07777)
+ except os.error, msg:
+ err(tempname + ': warning: chmod failed (' + str(msg) + ')\n')
+ # Then make a backup of the original file as filename~
+ try:
+ os.rename(filename, filename + '~')
+ except os.error, msg:
+ err(filename + ': warning: backup failed (' + str(msg) + ')\n')
+ # Now move the temp file to the original file
+ try:
+ os.rename(tempname, filename)
+ except os.error, msg:
+ err(filename + ': rename failed (' + str(msg) + ')\n')
+ return 1
+ # Return success
+ return 0
+
+# Tokenizing ANSI C (partly)
+
+Identifier = '(struct )?[a-zA-Z_][a-zA-Z0-9_]+'
+String = r'"([^\n\\"]|\\.)*"'
+Char = r"'([^\n\\']|\\.)*'"
+CommentStart = r'/\*'
+CommentEnd = r'\*/'
+
+Hexnumber = '0[xX][0-9a-fA-F]*[uUlL]*'
+Octnumber = '0[0-7]*[uUlL]*'
+Decnumber = '[1-9][0-9]*[uUlL]*'
+Intnumber = Hexnumber + '|' + Octnumber + '|' + Decnumber
+Exponent = '[eE][-+]?[0-9]+'
+Pointfloat = r'([0-9]+\.[0-9]*|\.[0-9]+)(' + Exponent + r')?'
+Expfloat = '[0-9]+' + Exponent
+Floatnumber = Pointfloat + '|' + Expfloat
+Number = Floatnumber + '|' + Intnumber
+
+# Anything else is an operator -- don't list this explicitly because of '/*'
+
+OutsideComment = (Identifier, Number, String, Char, CommentStart)
+OutsideCommentPattern = '(' + '|'.join(OutsideComment) + ')'
+OutsideCommentProgram = re.compile(OutsideCommentPattern)
+
+InsideComment = (Identifier, Number, CommentEnd)
+InsideCommentPattern = '(' + '|'.join(InsideComment) + ')'
+InsideCommentProgram = re.compile(InsideCommentPattern)
+
+def initfixline():
+ global Program
+ Program = OutsideCommentProgram
+
+def fixline(line):
+ global Program
+## print '-->', repr(line)
+ i = 0
+ while i < len(line):
+ match = Program.search(line, i)
+ if match is None: break
+ i = match.start()
+ found = match.group(0)
+## if Program is InsideCommentProgram: print '...',
+## else: print ' ',
+## print found
+ if len(found) == 2:
+ if found == '/*':
+ Program = InsideCommentProgram
+ elif found == '*/':
+ Program = OutsideCommentProgram
+ n = len(found)
+ if found in Dict:
+ subst = Dict[found]
+ if Program is InsideCommentProgram:
+ if not Docomments:
+ print 'Found in comment:', found
+ i = i + n
+ continue
+ if found in NotInComment:
+## print 'Ignored in comment:',
+## print found, '-->', subst
+## print 'Line:', line,
+ subst = found
+## else:
+## print 'Substituting in comment:',
+## print found, '-->', subst
+## print 'Line:', line,
+ line = line[:i] + subst + line[i+n:]
+ n = len(subst)
+ i = i + n
+ return line
+
+Docomments = 0
+def setdocomments():
+ global Docomments
+ Docomments = 1
+
+Reverse = 0
+def setreverse():
+ global Reverse
+ Reverse = (not Reverse)
+
+Dict = {}
+NotInComment = {}
+def addsubst(substfile):
+ try:
+ fp = open(substfile, 'r')
+ except IOError, msg:
+ err(substfile + ': cannot read substfile: ' + str(msg) + '\n')
+ sys.exit(1)
+ lineno = 0
+ while 1:
+ line = fp.readline()
+ if not line: break
+ lineno = lineno + 1
+ try:
+ i = line.index('#')
+ except ValueError:
+ i = -1 # Happens to delete trailing \n
+ words = line[:i].split()
+ if not words: continue
+ if len(words) == 3 and words[0] == 'struct':
+ words[:2] = [words[0] + ' ' + words[1]]
+ elif len(words) != 2:
+ err(substfile + '%s:%r: warning: bad line: %r' % (substfile, lineno, line))
+ continue
+ if Reverse:
+ [value, key] = words
+ else:
+ [key, value] = words
+ if value[0] == '*':
+ value = value[1:]
+ if key[0] == '*':
+ key = key[1:]
+ NotInComment[key] = value
+ if key in Dict:
+ err('%s:%r: warning: overriding: %r %r\n' % (substfile, lineno, key, value))
+ err('%s:%r: warning: previous: %r\n' % (substfile, lineno, Dict[key]))
+ Dict[key] = value
+ fp.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/fixdiv.py b/lib/python2.7/Tools/scripts/fixdiv.py
new file mode 100644
index 0000000..cdd1914
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/fixdiv.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python2
+
+"""fixdiv - tool to fix division operators.
+
+To use this tool, first run `python -Qwarnall yourscript.py 2>warnings'.
+This runs the script `yourscript.py' while writing warning messages
+about all uses of the classic division operator to the file
+`warnings'. The warnings look like this:
+
+ <file>:<line>: DeprecationWarning: classic <type> division
+
+The warnings are written to stderr, so you must use `2>' for the I/O
+redirect. I know of no way to redirect stderr on Windows in a DOS
+box, so you will have to modify the script to set sys.stderr to some
+kind of log file if you want to do this on Windows.
+
+The warnings are not limited to the script; modules imported by the
+script may also trigger warnings. In fact a useful technique is to
+write a test script specifically intended to exercise all code in a
+particular module or set of modules.
+
+Then run `python fixdiv.py warnings'. This first reads the warnings,
+looking for classic division warnings, and sorts them by file name and
+line number. Then, for each file that received at least one warning,
+it parses the file and tries to match the warnings up to the division
+operators found in the source code. If it is successful, it writes
+its findings to stdout, preceded by a line of dashes and a line of the
+form:
+
+ Index: <file>
+
+If the only findings found are suggestions to change a / operator into
+a // operator, the output is acceptable input for the Unix 'patch'
+program.
+
+Here are the possible messages on stdout (N stands for a line number):
+
+- A plain-diff-style change ('NcN', a line marked by '<', a line
+ containing '---', and a line marked by '>'):
+
+ A / operator was found that should be changed to //. This is the
+ recommendation when only int and/or long arguments were seen.
+
+- 'True division / operator at line N' and a line marked by '=':
+
+ A / operator was found that can remain unchanged. This is the
+ recommendation when only float and/or complex arguments were seen.
+
+- 'Ambiguous / operator (..., ...) at line N', line marked by '?':
+
+ A / operator was found for which int or long as well as float or
+ complex arguments were seen. This is highly unlikely; if it occurs,
+ you may have to restructure the code to keep the classic semantics,
+ or maybe you don't care about the classic semantics.
+
+- 'No conclusive evidence on line N', line marked by '*':
+
+ A / operator was found for which no warnings were seen. This could
+ be code that was never executed, or code that was only executed
+ with user-defined objects as arguments. You will have to
+ investigate further. Note that // can be overloaded separately from
+ /, using __floordiv__. True division can also be separately
+ overloaded, using __truediv__. Classic division should be the same
+ as either of those. (XXX should I add a warning for division on
+ user-defined objects, to disambiguate this case from code that was
+ never executed?)
+
+- 'Phantom ... warnings for line N', line marked by '*':
+
+ A warning was seen for a line not containing a / operator. The most
+ likely cause is a warning about code executed by 'exec' or eval()
+ (see note below), or an indirect invocation of the / operator, for
+ example via the div() function in the operator module. It could
+ also be caused by a change to the file between the time the test
+ script was run to collect warnings and the time fixdiv was run.
+
+- 'More than one / operator in line N'; or
+ 'More than one / operator per statement in lines N-N':
+
+ The scanner found more than one / operator on a single line, or in a
+ statement split across multiple lines. Because the warnings
+ framework doesn't (and can't) show the offset within the line, and
+ the code generator doesn't always give the correct line number for
+ operations in a multi-line statement, we can't be sure whether all
+ operators in the statement were executed. To be on the safe side,
+ by default a warning is issued about this case. In practice, these
+ cases are usually safe, and the -m option suppresses these warning.
+
+- 'Can't find the / operator in line N', line marked by '*':
+
+ This really shouldn't happen. It means that the tokenize module
+ reported a '/' operator but the line it returns didn't contain a '/'
+ character at the indicated position.
+
+- 'Bad warning for line N: XYZ', line marked by '*':
+
+ This really shouldn't happen. It means that a 'classic XYZ
+ division' warning was read with XYZ being something other than
+ 'int', 'long', 'float', or 'complex'.
+
+Notes:
+
+- The augmented assignment operator /= is handled the same way as the
+ / operator.
+
+- This tool never looks at the // operator; no warnings are ever
+ generated for use of this operator.
+
+- This tool never looks at the / operator when a future division
+ statement is in effect; no warnings are generated in this case, and
+ because the tool only looks at files for which at least one classic
+ division warning was seen, it will never look at files containing a
+ future division statement.
+
+- Warnings may be issued for code not read from a file, but executed
+ using an exec statement or the eval() function. These may have
+ <string> in the filename position, in which case the fixdiv script
+ will attempt and fail to open a file named '<string>' and issue a
+ warning about this failure; or these may be reported as 'Phantom'
+ warnings (see above). You're on your own to deal with these. You
+ could make all recommended changes and add a future division
+ statement to all affected files, and then re-run the test script; it
+ should not issue any warnings. If there are any, and you have a
+ hard time tracking down where they are generated, you can use the
+ -Werror option to force an error instead of a first warning,
+ generating a traceback.
+
+- The tool should be run from the same directory as that from which
+ the original script was run, otherwise it won't be able to open
+ files given by relative pathnames.
+"""
+
+import sys
+import getopt
+import re
+import tokenize
+
+multi_ok = 0
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "hm")
+ except getopt.error, msg:
+ usage(msg)
+ return 2
+ for o, a in opts:
+ if o == "-h":
+ print __doc__
+ return
+ if o == "-m":
+ global multi_ok
+ multi_ok = 1
+ if not args:
+ usage("at least one file argument is required")
+ return 2
+ if args[1:]:
+ sys.stderr.write("%s: extra file arguments ignored\n", sys.argv[0])
+ warnings = readwarnings(args[0])
+ if warnings is None:
+ return 1
+ files = warnings.keys()
+ if not files:
+ print "No classic division warnings read from", args[0]
+ return
+ files.sort()
+ exit = None
+ for filename in files:
+ x = process(filename, warnings[filename])
+ exit = exit or x
+ return exit
+
+def usage(msg):
+ sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
+ sys.stderr.write("Usage: %s [-m] warnings\n" % sys.argv[0])
+ sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
+
+PATTERN = ("^(.+?):(\d+): DeprecationWarning: "
+ "classic (int|long|float|complex) division$")
+
+def readwarnings(warningsfile):
+ prog = re.compile(PATTERN)
+ try:
+ f = open(warningsfile)
+ except IOError, msg:
+ sys.stderr.write("can't open: %s\n" % msg)
+ return
+ warnings = {}
+ while 1:
+ line = f.readline()
+ if not line:
+ break
+ m = prog.match(line)
+ if not m:
+ if line.find("division") >= 0:
+ sys.stderr.write("Warning: ignored input " + line)
+ continue
+ filename, lineno, what = m.groups()
+ list = warnings.get(filename)
+ if list is None:
+ warnings[filename] = list = []
+ list.append((int(lineno), intern(what)))
+ f.close()
+ return warnings
+
+def process(filename, list):
+ print "-"*70
+ assert list # if this fails, readwarnings() is broken
+ try:
+ fp = open(filename)
+ except IOError, msg:
+ sys.stderr.write("can't open: %s\n" % msg)
+ return 1
+ print "Index:", filename
+ f = FileContext(fp)
+ list.sort()
+ index = 0 # list[:index] has been processed, list[index:] is still to do
+ g = tokenize.generate_tokens(f.readline)
+ while 1:
+ startlineno, endlineno, slashes = lineinfo = scanline(g)
+ if startlineno is None:
+ break
+ assert startlineno <= endlineno is not None
+ orphans = []
+ while index < len(list) and list[index][0] < startlineno:
+ orphans.append(list[index])
+ index += 1
+ if orphans:
+ reportphantomwarnings(orphans, f)
+ warnings = []
+ while index < len(list) and list[index][0] <= endlineno:
+ warnings.append(list[index])
+ index += 1
+ if not slashes and not warnings:
+ pass
+ elif slashes and not warnings:
+ report(slashes, "No conclusive evidence")
+ elif warnings and not slashes:
+ reportphantomwarnings(warnings, f)
+ else:
+ if len(slashes) > 1:
+ if not multi_ok:
+ rows = []
+ lastrow = None
+ for (row, col), line in slashes:
+ if row == lastrow:
+ continue
+ rows.append(row)
+ lastrow = row
+ assert rows
+ if len(rows) == 1:
+ print "*** More than one / operator in line", rows[0]
+ else:
+ print "*** More than one / operator per statement",
+ print "in lines %d-%d" % (rows[0], rows[-1])
+ intlong = []
+ floatcomplex = []
+ bad = []
+ for lineno, what in warnings:
+ if what in ("int", "long"):
+ intlong.append(what)
+ elif what in ("float", "complex"):
+ floatcomplex.append(what)
+ else:
+ bad.append(what)
+ lastrow = None
+ for (row, col), line in slashes:
+ if row == lastrow:
+ continue
+ lastrow = row
+ line = chop(line)
+ if line[col:col+1] != "/":
+ print "*** Can't find the / operator in line %d:" % row
+ print "*", line
+ continue
+ if bad:
+ print "*** Bad warning for line %d:" % row, bad
+ print "*", line
+ elif intlong and not floatcomplex:
+ print "%dc%d" % (row, row)
+ print "<", line
+ print "---"
+ print ">", line[:col] + "/" + line[col:]
+ elif floatcomplex and not intlong:
+ print "True division / operator at line %d:" % row
+ print "=", line
+ elif intlong and floatcomplex:
+ print "*** Ambiguous / operator (%s, %s) at line %d:" % (
+ "|".join(intlong), "|".join(floatcomplex), row)
+ print "?", line
+ fp.close()
+
+def reportphantomwarnings(warnings, f):
+ blocks = []
+ lastrow = None
+ lastblock = None
+ for row, what in warnings:
+ if row != lastrow:
+ lastblock = [row]
+ blocks.append(lastblock)
+ lastblock.append(what)
+ for block in blocks:
+ row = block[0]
+ whats = "/".join(block[1:])
+ print "*** Phantom %s warnings for line %d:" % (whats, row)
+ f.report(row, mark="*")
+
+def report(slashes, message):
+ lastrow = None
+ for (row, col), line in slashes:
+ if row != lastrow:
+ print "*** %s on line %d:" % (message, row)
+ print "*", chop(line)
+ lastrow = row
+
+class FileContext:
+ def __init__(self, fp, window=5, lineno=1):
+ self.fp = fp
+ self.window = 5
+ self.lineno = 1
+ self.eoflookahead = 0
+ self.lookahead = []
+ self.buffer = []
+ def fill(self):
+ while len(self.lookahead) < self.window and not self.eoflookahead:
+ line = self.fp.readline()
+ if not line:
+ self.eoflookahead = 1
+ break
+ self.lookahead.append(line)
+ def readline(self):
+ self.fill()
+ if not self.lookahead:
+ return ""
+ line = self.lookahead.pop(0)
+ self.buffer.append(line)
+ self.lineno += 1
+ return line
+ def __getitem__(self, index):
+ self.fill()
+ bufstart = self.lineno - len(self.buffer)
+ lookend = self.lineno + len(self.lookahead)
+ if bufstart <= index < self.lineno:
+ return self.buffer[index - bufstart]
+ if self.lineno <= index < lookend:
+ return self.lookahead[index - self.lineno]
+ raise KeyError
+ def report(self, first, last=None, mark="*"):
+ if last is None:
+ last = first
+ for i in range(first, last+1):
+ try:
+ line = self[first]
+ except KeyError:
+ line = "<missing line>"
+ print mark, chop(line)
+
+def scanline(g):
+ slashes = []
+ startlineno = None
+ endlineno = None
+ for type, token, start, end, line in g:
+ endlineno = end[0]
+ if startlineno is None:
+ startlineno = endlineno
+ if token in ("/", "/="):
+ slashes.append((start, line))
+ if type == tokenize.NEWLINE:
+ break
+ return startlineno, endlineno, slashes
+
+def chop(line):
+ if line.endswith("\n"):
+ return line[:-1]
+ else:
+ return line
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/lib/python2.7/Tools/scripts/fixheader.py b/lib/python2.7/Tools/scripts/fixheader.py
new file mode 100644
index 0000000..de04be6
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/fixheader.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python2
+
+# Add some standard cpp magic to a header file
+
+import sys
+
+def main():
+ args = sys.argv[1:]
+ for filename in args:
+ process(filename)
+
+def process(filename):
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ sys.stderr.write('%s: can\'t open: %s\n' % (filename, str(msg)))
+ return
+ data = f.read()
+ f.close()
+ if data[:2] <> '/*':
+ sys.stderr.write('%s does not begin with C comment\n' % filename)
+ return
+ try:
+ f = open(filename, 'w')
+ except IOError, msg:
+ sys.stderr.write('%s: can\'t write: %s\n' % (filename, str(msg)))
+ return
+ sys.stderr.write('Processing %s ...\n' % filename)
+ magic = 'Py_'
+ for c in filename:
+ if ord(c)<=0x80 and c.isalnum():
+ magic = magic + c.upper()
+ else: magic = magic + '_'
+ sys.stdout = f
+ print '#ifndef', magic
+ print '#define', magic
+ print '#ifdef __cplusplus'
+ print 'extern "C" {'
+ print '#endif'
+ print
+ f.write(data)
+ print
+ print '#ifdef __cplusplus'
+ print '}'
+ print '#endif'
+ print '#endif /*', '!'+magic, '*/'
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/fixnotice.py b/lib/python2.7/Tools/scripts/fixnotice.py
new file mode 100644
index 0000000..1cd9712
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/fixnotice.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python2
+
+"""(Ostensibly) fix copyright notices in files.
+
+Actually, this script will simply replace a block of text in a file from one
+string to another. It will only do this once though, i.e. not globally
+throughout the file. It writes a backup file and then does an os.rename()
+dance for atomicity.
+
+Usage: fixnotices.py [options] [filenames]
+Options:
+ -h / --help
+ Print this message and exit
+
+ --oldnotice=file
+ Use the notice in the file as the old (to be replaced) string, instead
+ of the hard coded value in the script.
+
+ --newnotice=file
+ Use the notice in the file as the new (replacement) string, instead of
+ the hard coded value in the script.
+
+ --dry-run
+ Don't actually make the changes, but print out the list of files that
+ would change. When used with -v, a status will be printed for every
+ file.
+
+ -v / --verbose
+ Print a message for every file looked at, indicating whether the file
+ is changed or not.
+"""
+
+OLD_NOTICE = """/***********************************************************
+Copyright (c) 2000, BeOpen.com.
+Copyright (c) 1995-2000, Corporation for National Research Initiatives.
+Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
+All rights reserved.
+
+See the file "Misc/COPYRIGHT" for information on usage and
+redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
+******************************************************************/
+"""
+import os
+import sys
+import getopt
+
+NEW_NOTICE = ""
+DRYRUN = 0
+VERBOSE = 0
+
+
+def usage(code, msg=''):
+ print __doc__ % globals()
+ if msg:
+ print msg
+ sys.exit(code)
+
+
+def main():
+ global DRYRUN, OLD_NOTICE, NEW_NOTICE, VERBOSE
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'hv',
+ ['help', 'oldnotice=', 'newnotice=',
+ 'dry-run', 'verbose'])
+ except getopt.error, msg:
+ usage(1, msg)
+
+ for opt, arg in opts:
+ if opt in ('-h', '--help'):
+ usage(0)
+ elif opt in ('-v', '--verbose'):
+ VERBOSE = 1
+ elif opt == '--dry-run':
+ DRYRUN = 1
+ elif opt == '--oldnotice':
+ fp = open(arg)
+ OLD_NOTICE = fp.read()
+ fp.close()
+ elif opt == '--newnotice':
+ fp = open(arg)
+ NEW_NOTICE = fp.read()
+ fp.close()
+
+ for arg in args:
+ process(arg)
+
+
+def process(file):
+ f = open(file)
+ data = f.read()
+ f.close()
+ i = data.find(OLD_NOTICE)
+ if i < 0:
+ if VERBOSE:
+ print 'no change:', file
+ return
+ elif DRYRUN or VERBOSE:
+ print ' change:', file
+ if DRYRUN:
+ # Don't actually change the file
+ return
+ data = data[:i] + NEW_NOTICE + data[i+len(OLD_NOTICE):]
+ new = file + ".new"
+ backup = file + ".bak"
+ f = open(new, "w")
+ f.write(data)
+ f.close()
+ os.rename(file, backup)
+ os.rename(new, file)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/fixps.py b/lib/python2.7/Tools/scripts/fixps.py
new file mode 100644
index 0000000..df7ae31
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/fixps.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python2
+
+# Fix Python script(s) to reference the interpreter via /usr/bin/env python.
+# Warning: this overwrites the file without making a backup.
+
+import sys
+import re
+
+
+def main():
+ for filename in sys.argv[1:]:
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ print filename, ': can\'t open :', msg
+ continue
+ line = f.readline()
+ if not re.match('^#! */usr/local/bin/python', line):
+ print filename, ': not a /usr/local/bin/python script'
+ f.close()
+ continue
+ rest = f.read()
+ f.close()
+ line = re.sub('/usr/local/bin/python',
+ '/usr/bin/env python', line)
+ print filename, ':', repr(line)
+ f = open(filename, "w")
+ f.write(line)
+ f.write(rest)
+ f.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/google.py b/lib/python2.7/Tools/scripts/google.py
new file mode 100644
index 0000000..1cd08d4
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/google.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python2
+
+import sys, webbrowser
+
+def main():
+ args = sys.argv[1:]
+ if not args:
+ print "Usage: %s querystring" % sys.argv[0]
+ return
+ list = []
+ for arg in args:
+ if '+' in arg:
+ arg = arg.replace('+', '%2B')
+ if ' ' in arg:
+ arg = '"%s"' % arg
+ arg = arg.replace(' ', '+')
+ list.append(arg)
+ s = '+'.join(list)
+ url = "http://www.google.com/search?q=%s" % s
+ webbrowser.open(url)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/gprof2html.py b/lib/python2.7/Tools/scripts/gprof2html.py
new file mode 100644
index 0000000..4aa5642
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/gprof2html.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python2
+
+"""Transform gprof(1) output into useful HTML."""
+
+import re, os, sys, cgi, webbrowser
+
+header = """\
+<html>
+<head>
+ <title>gprof output (%s)</title>
+</head>
+<body>
+<pre>
+"""
+
+trailer = """\
+</pre>
+</body>
+</html>
+"""
+
+def add_escapes(input):
+ for line in input:
+ yield cgi.escape(line)
+
+def main():
+ filename = "gprof.out"
+ if sys.argv[1:]:
+ filename = sys.argv[1]
+ outputfilename = filename + ".html"
+ input = add_escapes(file(filename))
+ output = file(outputfilename, "w")
+ output.write(header % filename)
+ for line in input:
+ output.write(line)
+ if line.startswith(" time"):
+ break
+ labels = {}
+ for line in input:
+ m = re.match(r"(.* )(\w+)\n", line)
+ if not m:
+ output.write(line)
+ break
+ stuff, fname = m.group(1, 2)
+ labels[fname] = fname
+ output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
+ (stuff, fname, fname, fname))
+ for line in input:
+ output.write(line)
+ if line.startswith("index % time"):
+ break
+ for line in input:
+ m = re.match(r"(.* )(\w+)(( &lt;cycle.*&gt;)? \[\d+\])\n", line)
+ if not m:
+ output.write(line)
+ if line.startswith("Index by function name"):
+ break
+ continue
+ prefix, fname, suffix = m.group(1, 2, 3)
+ if fname not in labels:
+ output.write(line)
+ continue
+ if line.startswith("["):
+ output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
+ (prefix, fname, fname, fname, suffix))
+ else:
+ output.write('%s<a href="#call:%s">%s</a>%s\n' %
+ (prefix, fname, fname, suffix))
+ for line in input:
+ for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
+ if part in labels:
+ part = '<a href="#call:%s">%s</a>' % (part, part)
+ output.write(part)
+ output.write(trailer)
+ output.close()
+ webbrowser.open("file:" + os.path.abspath(outputfilename))
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/h2py.py b/lib/python2.7/Tools/scripts/h2py.py
new file mode 100644
index 0000000..0d6aab1
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/h2py.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python2
+
+# Read #define's and translate to Python code.
+# Handle #include statements.
+# Handle #define macros with one argument.
+# Anything that isn't recognized or doesn't translate into valid
+# Python is ignored.
+
+# Without filename arguments, acts as a filter.
+# If one or more filenames are given, output is written to corresponding
+# filenames in the local directory, translated to all uppercase, with
+# the extension replaced by ".py".
+
+# By passing one or more options of the form "-i regular_expression"
+# you can specify additional strings to be ignored. This is useful
+# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
+
+# XXX To do:
+# - turn trailing C comments into Python comments
+# - turn C Boolean operators "&& || !" into Python "and or not"
+# - what to do about #if(def)?
+# - what to do about macros with multiple parameters?
+
+import sys, re, getopt, os
+
+p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
+
+p_macro = re.compile(
+ '^[\t ]*#[\t ]*define[\t ]+'
+ '([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
+
+p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([^>\n]+)>')
+
+p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
+p_cpp_comment = re.compile('//.*')
+
+ignores = [p_comment, p_cpp_comment]
+
+p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
+
+p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
+
+filedict = {}
+importable = {}
+
+try:
+ searchdirs=os.environ['include'].split(';')
+except KeyError:
+ try:
+ searchdirs=os.environ['INCLUDE'].split(';')
+ except KeyError:
+ try:
+ if sys.platform.find("beos") == 0:
+ searchdirs=os.environ['BEINCLUDES'].split(';')
+ elif sys.platform.startswith("atheos"):
+ searchdirs=os.environ['C_INCLUDE_PATH'].split(':')
+ else:
+ raise KeyError
+ except KeyError:
+ searchdirs=['/usr/include']
+ try:
+ searchdirs.insert(0, os.path.join('/usr/include',
+ os.environ['MULTIARCH']))
+ except KeyError:
+ pass
+
+
+def main():
+ global filedict
+ opts, args = getopt.getopt(sys.argv[1:], 'i:')
+ for o, a in opts:
+ if o == '-i':
+ ignores.append(re.compile(a))
+ if not args:
+ args = ['-']
+ for filename in args:
+ if filename == '-':
+ sys.stdout.write('# Generated by h2py from stdin\n')
+ process(sys.stdin, sys.stdout)
+ else:
+ fp = open(filename, 'r')
+ outfile = os.path.basename(filename)
+ i = outfile.rfind('.')
+ if i > 0: outfile = outfile[:i]
+ modname = outfile.upper()
+ outfile = modname + '.py'
+ outfp = open(outfile, 'w')
+ outfp.write('# Generated by h2py from %s\n' % filename)
+ filedict = {}
+ for dir in searchdirs:
+ if filename[:len(dir)] == dir:
+ filedict[filename[len(dir)+1:]] = None # no '/' trailing
+ importable[filename[len(dir)+1:]] = modname
+ break
+ process(fp, outfp)
+ outfp.close()
+ fp.close()
+
+def pytify(body):
+ # replace ignored patterns by spaces
+ for p in ignores:
+ body = p.sub(' ', body)
+ # replace char literals by ord(...)
+ body = p_char.sub("ord('\\1')", body)
+ # Compute negative hexadecimal constants
+ start = 0
+ UMAX = 2*(sys.maxint+1)
+ while 1:
+ m = p_hex.search(body, start)
+ if not m: break
+ s,e = m.span()
+ val = long(body[slice(*m.span(1))], 16)
+ if val > sys.maxint:
+ val -= UMAX
+ body = body[:s] + "(" + str(val) + ")" + body[e:]
+ start = s + 1
+ return body
+
+def process(fp, outfp, env = {}):
+ lineno = 0
+ while 1:
+ line = fp.readline()
+ if not line: break
+ lineno = lineno + 1
+ match = p_define.match(line)
+ if match:
+ # gobble up continuation lines
+ while line[-2:] == '\\\n':
+ nextline = fp.readline()
+ if not nextline: break
+ lineno = lineno + 1
+ line = line + nextline
+ name = match.group(1)
+ body = line[match.end():]
+ body = pytify(body)
+ ok = 0
+ stmt = '%s = %s\n' % (name, body.strip())
+ try:
+ exec stmt in env
+ except:
+ sys.stderr.write('Skipping: %s' % stmt)
+ else:
+ outfp.write(stmt)
+ match = p_macro.match(line)
+ if match:
+ macro, arg = match.group(1, 2)
+ body = line[match.end():]
+ body = pytify(body)
+ stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
+ try:
+ exec stmt in env
+ except:
+ sys.stderr.write('Skipping: %s' % stmt)
+ else:
+ outfp.write(stmt)
+ match = p_include.match(line)
+ if match:
+ regs = match.regs
+ a, b = regs[1]
+ filename = line[a:b]
+ if importable.has_key(filename):
+ outfp.write('from %s import *\n' % importable[filename])
+ elif not filedict.has_key(filename):
+ filedict[filename] = None
+ inclfp = None
+ for dir in searchdirs:
+ try:
+ inclfp = open(dir + '/' + filename)
+ break
+ except IOError:
+ pass
+ if inclfp:
+ outfp.write(
+ '\n# Included from %s\n' % filename)
+ process(inclfp, outfp, env)
+ else:
+ sys.stderr.write('Warning - could not find file %s\n' %
+ filename)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/hotshotmain.py b/lib/python2.7/Tools/scripts/hotshotmain.py
new file mode 100644
index 0000000..10ced94
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/hotshotmain.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2
+# -*- coding: iso-8859-1 -*-
+
+"""
+Run a Python script under hotshot's control.
+
+Adapted from a posting on python-dev by Walter Dörwald
+
+usage %prog [ %prog args ] filename [ filename args ]
+
+Any arguments after the filename are used as sys.argv for the filename.
+"""
+
+import sys
+import optparse
+import os
+import hotshot
+import hotshot.stats
+
+PROFILE = "hotshot.prof"
+
+def run_hotshot(filename, profile, args):
+ prof = hotshot.Profile(profile)
+ sys.path.insert(0, os.path.dirname(filename))
+ sys.argv = [filename] + args
+ prof.run("execfile(%r)" % filename)
+ prof.close()
+ stats = hotshot.stats.load(profile)
+ stats.sort_stats("time", "calls")
+
+ # print_stats uses unadorned print statements, so the only way
+ # to force output to stderr is to reassign sys.stdout temporarily
+ save_stdout = sys.stdout
+ sys.stdout = sys.stderr
+ stats.print_stats()
+ sys.stdout = save_stdout
+
+ return 0
+
+def main(args):
+ parser = optparse.OptionParser(__doc__)
+ parser.disable_interspersed_args()
+ parser.add_option("-p", "--profile", action="store", default=PROFILE,
+ dest="profile", help='Specify profile file to use')
+ (options, args) = parser.parse_args(args)
+
+ if len(args) == 0:
+ parser.print_help("missing script to execute")
+ return 1
+
+ filename = args[0]
+ return run_hotshot(filename, options.profile, args[1:])
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/lib/python2.7/Tools/scripts/ifdef.py b/lib/python2.7/Tools/scripts/ifdef.py
new file mode 100644
index 0000000..60a5107
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/ifdef.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python2
+
+# Selectively preprocess #ifdef / #ifndef statements.
+# Usage:
+# ifdef [-Dname] ... [-Uname] ... [file] ...
+#
+# This scans the file(s), looking for #ifdef and #ifndef preprocessor
+# commands that test for one of the names mentioned in the -D and -U
+# options. On standard output it writes a copy of the input file(s)
+# minus those code sections that are suppressed by the selected
+# combination of defined/undefined symbols. The #if(n)def/#else/#else
+# lines themselves (if the #if(n)def tests for one of the mentioned
+# names) are removed as well.
+
+# Features: Arbitrary nesting of recognized and unrecognized
+# preprocessor statements works correctly. Unrecognized #if* commands
+# are left in place, so it will never remove too much, only too
+# little. It does accept whitespace around the '#' character.
+
+# Restrictions: There should be no comments or other symbols on the
+# #if(n)def lines. The effect of #define/#undef commands in the input
+# file or in included files is not taken into account. Tests using
+# #if and the defined() pseudo function are not recognized. The #elif
+# command is not recognized. Improperly nesting is not detected.
+# Lines that look like preprocessor commands but which are actually
+# part of comments or string literals will be mistaken for
+# preprocessor commands.
+
+import sys
+import getopt
+
+defs = []
+undefs = []
+
+def main():
+ opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
+ for o, a in opts:
+ if o == '-D':
+ defs.append(a)
+ if o == '-U':
+ undefs.append(a)
+ if not args:
+ args = ['-']
+ for filename in args:
+ if filename == '-':
+ process(sys.stdin, sys.stdout)
+ else:
+ f = open(filename, 'r')
+ process(f, sys.stdout)
+ f.close()
+
+def process(fpi, fpo):
+ keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
+ ok = 1
+ stack = []
+ while 1:
+ line = fpi.readline()
+ if not line: break
+ while line[-2:] == '\\\n':
+ nextline = fpi.readline()
+ if not nextline: break
+ line = line + nextline
+ tmp = line.strip()
+ if tmp[:1] != '#':
+ if ok: fpo.write(line)
+ continue
+ tmp = tmp[1:].strip()
+ words = tmp.split()
+ keyword = words[0]
+ if keyword not in keywords:
+ if ok: fpo.write(line)
+ continue
+ if keyword in ('ifdef', 'ifndef') and len(words) == 2:
+ if keyword == 'ifdef':
+ ko = 1
+ else:
+ ko = 0
+ word = words[1]
+ if word in defs:
+ stack.append((ok, ko, word))
+ if not ko: ok = 0
+ elif word in undefs:
+ stack.append((ok, not ko, word))
+ if ko: ok = 0
+ else:
+ stack.append((ok, -1, word))
+ if ok: fpo.write(line)
+ elif keyword == 'if':
+ stack.append((ok, -1, ''))
+ if ok: fpo.write(line)
+ elif keyword == 'else' and stack:
+ s_ok, s_ko, s_word = stack[-1]
+ if s_ko < 0:
+ if ok: fpo.write(line)
+ else:
+ s_ko = not s_ko
+ ok = s_ok
+ if not s_ko: ok = 0
+ stack[-1] = s_ok, s_ko, s_word
+ elif keyword == 'endif' and stack:
+ s_ok, s_ko, s_word = stack[-1]
+ if s_ko < 0:
+ if ok: fpo.write(line)
+ del stack[-1]
+ ok = s_ok
+ else:
+ sys.stderr.write('Unknown keyword %s\n' % keyword)
+ if stack:
+ sys.stderr.write('stack: %s\n' % stack)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/lfcr.py b/lib/python2.7/Tools/scripts/lfcr.py
new file mode 100644
index 0000000..1f19da6
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/lfcr.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python2
+
+"Replace LF with CRLF in argument files. Print names of changed files."
+
+import sys, re, os
+
+def main():
+ for filename in sys.argv[1:]:
+ if os.path.isdir(filename):
+ print filename, "Directory!"
+ continue
+ data = open(filename, "rb").read()
+ if '\0' in data:
+ print filename, "Binary!"
+ continue
+ newdata = re.sub("\r?\n", "\r\n", data)
+ if newdata != data:
+ print filename
+ f = open(filename, "wb")
+ f.write(newdata)
+ f.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/linktree.py b/lib/python2.7/Tools/scripts/linktree.py
new file mode 100644
index 0000000..2dbf18e
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/linktree.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python2
+
+# linktree
+#
+# Make a copy of a directory tree with symbolic links to all files in the
+# original tree.
+# All symbolic links go to a special symbolic link at the top, so you
+# can easily fix things if the original source tree moves.
+# See also "mkreal".
+#
+# usage: mklinks oldtree newtree
+
+import sys, os
+
+LINK = '.LINK' # Name of special symlink at the top.
+
+debug = 0
+
+def main():
+ if not 3 <= len(sys.argv) <= 4:
+ print 'usage:', sys.argv[0], 'oldtree newtree [linkto]'
+ return 2
+ oldtree, newtree = sys.argv[1], sys.argv[2]
+ if len(sys.argv) > 3:
+ link = sys.argv[3]
+ link_may_fail = 1
+ else:
+ link = LINK
+ link_may_fail = 0
+ if not os.path.isdir(oldtree):
+ print oldtree + ': not a directory'
+ return 1
+ try:
+ os.mkdir(newtree, 0777)
+ except os.error, msg:
+ print newtree + ': cannot mkdir:', msg
+ return 1
+ linkname = os.path.join(newtree, link)
+ try:
+ os.symlink(os.path.join(os.pardir, oldtree), linkname)
+ except os.error, msg:
+ if not link_may_fail:
+ print linkname + ': cannot symlink:', msg
+ return 1
+ else:
+ print linkname + ': warning: cannot symlink:', msg
+ linknames(oldtree, newtree, link)
+ return 0
+
+def linknames(old, new, link):
+ if debug: print 'linknames', (old, new, link)
+ try:
+ names = os.listdir(old)
+ except os.error, msg:
+ print old + ': warning: cannot listdir:', msg
+ return
+ for name in names:
+ if name not in (os.curdir, os.pardir):
+ oldname = os.path.join(old, name)
+ linkname = os.path.join(link, name)
+ newname = os.path.join(new, name)
+ if debug > 1: print oldname, newname, linkname
+ if os.path.isdir(oldname) and \
+ not os.path.islink(oldname):
+ try:
+ os.mkdir(newname, 0777)
+ ok = 1
+ except:
+ print newname + \
+ ': warning: cannot mkdir:', msg
+ ok = 0
+ if ok:
+ linkname = os.path.join(os.pardir,
+ linkname)
+ linknames(oldname, newname, linkname)
+ else:
+ os.symlink(linkname, newname)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/lib/python2.7/Tools/scripts/lll.py b/lib/python2.7/Tools/scripts/lll.py
new file mode 100644
index 0000000..32994e4
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/lll.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python2
+
+# Find symbolic links and show where they point to.
+# Arguments are directories to search; default is current directory.
+# No recursion.
+# (This is a totally different program from "findsymlinks.py"!)
+
+import sys, os
+
+def lll(dirname):
+ for name in os.listdir(dirname):
+ if name not in (os.curdir, os.pardir):
+ full = os.path.join(dirname, name)
+ if os.path.islink(full):
+ print name, '->', os.readlink(full)
+def main():
+ args = sys.argv[1:]
+ if not args: args = [os.curdir]
+ first = 1
+ for arg in args:
+ if len(args) > 1:
+ if not first: print
+ first = 0
+ print arg + ':'
+ lll(arg)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/logmerge.py b/lib/python2.7/Tools/scripts/logmerge.py
new file mode 100644
index 0000000..6cc55fa
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/logmerge.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python2
+
+"""Consolidate a bunch of CVS or RCS logs read from stdin.
+
+Input should be the output of a CVS or RCS logging command, e.g.
+
+ cvs log -rrelease14:
+
+which dumps all log messages from release1.4 upwards (assuming that
+release 1.4 was tagged with tag 'release14'). Note the trailing
+colon!
+
+This collects all the revision records and outputs them sorted by date
+rather than by file, collapsing duplicate revision record, i.e.,
+records with the same message for different files.
+
+The -t option causes it to truncate (discard) the last revision log
+entry; this is useful when using something like the above cvs log
+command, which shows the revisions including the given tag, while you
+probably want everything *since* that tag.
+
+The -r option reverses the output (oldest first; the default is oldest
+last).
+
+The -b tag option restricts the output to *only* checkin messages
+belonging to the given branch tag. The form -b HEAD restricts the
+output to checkin messages belonging to the CVS head (trunk). (It
+produces some output if tag is a non-branch tag, but this output is
+not very useful.)
+
+-h prints this message and exits.
+
+XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7
+from their output.
+"""
+
+import sys, errno, getopt, re
+
+sep1 = '='*77 + '\n' # file separator
+sep2 = '-'*28 + '\n' # revision separator
+
+def main():
+ """Main program"""
+ truncate_last = 0
+ reverse = 0
+ branch = None
+ opts, args = getopt.getopt(sys.argv[1:], "trb:h")
+ for o, a in opts:
+ if o == '-t':
+ truncate_last = 1
+ elif o == '-r':
+ reverse = 1
+ elif o == '-b':
+ branch = a
+ elif o == '-h':
+ print __doc__
+ sys.exit(0)
+ database = []
+ while 1:
+ chunk = read_chunk(sys.stdin)
+ if not chunk:
+ break
+ records = digest_chunk(chunk, branch)
+ if truncate_last:
+ del records[-1]
+ database[len(database):] = records
+ database.sort()
+ if not reverse:
+ database.reverse()
+ format_output(database)
+
+def read_chunk(fp):
+ """Read a chunk -- data for one file, ending with sep1.
+
+ Split the chunk in parts separated by sep2.
+
+ """
+ chunk = []
+ lines = []
+ while 1:
+ line = fp.readline()
+ if not line:
+ break
+ if line == sep1:
+ if lines:
+ chunk.append(lines)
+ break
+ if line == sep2:
+ if lines:
+ chunk.append(lines)
+ lines = []
+ else:
+ lines.append(line)
+ return chunk
+
+def digest_chunk(chunk, branch=None):
+ """Digest a chunk -- extract working file name and revisions"""
+ lines = chunk[0]
+ key = 'Working file:'
+ keylen = len(key)
+ for line in lines:
+ if line[:keylen] == key:
+ working_file = line[keylen:].strip()
+ break
+ else:
+ working_file = None
+ if branch is None:
+ pass
+ elif branch == "HEAD":
+ branch = re.compile(r"^\d+\.\d+$")
+ else:
+ revisions = {}
+ key = 'symbolic names:\n'
+ found = 0
+ for line in lines:
+ if line == key:
+ found = 1
+ elif found:
+ if line[0] in '\t ':
+ tag, rev = line.split()
+ if tag[-1] == ':':
+ tag = tag[:-1]
+ revisions[tag] = rev
+ else:
+ found = 0
+ rev = revisions.get(branch)
+ branch = re.compile(r"^<>$") # <> to force a mismatch by default
+ if rev:
+ if rev.find('.0.') >= 0:
+ rev = rev.replace('.0.', '.')
+ branch = re.compile(r"^" + re.escape(rev) + r"\.\d+$")
+ records = []
+ for lines in chunk[1:]:
+ revline = lines[0]
+ dateline = lines[1]
+ text = lines[2:]
+ words = dateline.split()
+ author = None
+ if len(words) >= 3 and words[0] == 'date:':
+ dateword = words[1]
+ timeword = words[2]
+ if timeword[-1:] == ';':
+ timeword = timeword[:-1]
+ date = dateword + ' ' + timeword
+ if len(words) >= 5 and words[3] == 'author:':
+ author = words[4]
+ if author[-1:] == ';':
+ author = author[:-1]
+ else:
+ date = None
+ text.insert(0, revline)
+ words = revline.split()
+ if len(words) >= 2 and words[0] == 'revision':
+ rev = words[1]
+ else:
+ # No 'revision' line -- weird...
+ rev = None
+ text.insert(0, revline)
+ if branch:
+ if rev is None or not branch.match(rev):
+ continue
+ records.append((date, working_file, rev, author, text))
+ return records
+
+def format_output(database):
+ prevtext = None
+ prev = []
+ database.append((None, None, None, None, None)) # Sentinel
+ for (date, working_file, rev, author, text) in database:
+ if text != prevtext:
+ if prev:
+ print sep2,
+ for (p_date, p_working_file, p_rev, p_author) in prev:
+ print p_date, p_author, p_working_file, p_rev
+ sys.stdout.writelines(prevtext)
+ prev = []
+ prev.append((date, working_file, rev, author))
+ prevtext = text
+
+if __name__ == '__main__':
+ try:
+ main()
+ except IOError, e:
+ if e.errno != errno.EPIPE:
+ raise
diff --git a/lib/python2.7/Tools/scripts/mailerdaemon.py b/lib/python2.7/Tools/scripts/mailerdaemon.py
new file mode 100644
index 0000000..1a5b664
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/mailerdaemon.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python2
+"""mailerdaemon - classes to parse mailer-daemon messages"""
+
+import rfc822
+import calendar
+import re
+import os
+import sys
+
+Unparseable = 'mailerdaemon.Unparseable'
+
+class ErrorMessage(rfc822.Message):
+ def __init__(self, fp):
+ rfc822.Message.__init__(self, fp)
+ self.sub = ''
+
+ def is_warning(self):
+ sub = self.getheader('Subject')
+ if not sub:
+ return 0
+ sub = sub.lower()
+ if sub.startswith('waiting mail'): return 1
+ if 'warning' in sub: return 1
+ self.sub = sub
+ return 0
+
+ def get_errors(self):
+ for p in EMPARSERS:
+ self.rewindbody()
+ try:
+ return p(self.fp, self.sub)
+ except Unparseable:
+ pass
+ raise Unparseable
+
+# List of re's or tuples of re's.
+# If a re, it should contain at least a group (?P<email>...) which
+# should refer to the email address. The re can also contain a group
+# (?P<reason>...) which should refer to the reason (error message).
+# If no reason is present, the emparse_list_reason list is used to
+# find a reason.
+# If a tuple, the tuple should contain 2 re's. The first re finds a
+# location, the second re is repeated one or more times to find
+# multiple email addresses. The second re is matched (not searched)
+# where the previous match ended.
+# The re's are compiled using the re module.
+emparse_list_list = [
+ 'error: (?P<reason>unresolvable): (?P<email>.+)',
+ ('----- The following addresses had permanent fatal errors -----\n',
+ '(?P<email>[^ \n].*)\n( .*\n)?'),
+ 'remote execution.*\n.*rmail (?P<email>.+)',
+ ('The following recipients did not receive your message:\n\n',
+ ' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
+ '------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
+ '^<(?P<email>.*)>:\n(?P<reason>.*)',
+ '^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
+ '^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
+ '^Original-Recipient: rfc822;(?P<email>.*)',
+ '^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
+ '^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
+ '^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
+ '^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
+ '^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
+ ]
+# compile the re's in the list and store them in-place.
+for i in range(len(emparse_list_list)):
+ x = emparse_list_list[i]
+ if type(x) is type(''):
+ x = re.compile(x, re.MULTILINE)
+ else:
+ xl = []
+ for x in x:
+ xl.append(re.compile(x, re.MULTILINE))
+ x = tuple(xl)
+ del xl
+ emparse_list_list[i] = x
+ del x
+del i
+
+# list of re's used to find reasons (error messages).
+# if a string, "<>" is replaced by a copy of the email address.
+# The expressions are searched for in order. After the first match,
+# no more expressions are searched for. So, order is important.
+emparse_list_reason = [
+ r'^5\d{2} <>\.\.\. (?P<reason>.*)',
+ '<>\.\.\. (?P<reason>.*)',
+ re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
+ re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
+ re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
+ ]
+emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
+def emparse_list(fp, sub):
+ data = fp.read()
+ res = emparse_list_from.search(data)
+ if res is None:
+ from_index = len(data)
+ else:
+ from_index = res.start(0)
+ errors = []
+ emails = []
+ reason = None
+ for regexp in emparse_list_list:
+ if type(regexp) is type(()):
+ res = regexp[0].search(data, 0, from_index)
+ if res is not None:
+ try:
+ reason = res.group('reason')
+ except IndexError:
+ pass
+ while 1:
+ res = regexp[1].match(data, res.end(0), from_index)
+ if res is None:
+ break
+ emails.append(res.group('email'))
+ break
+ else:
+ res = regexp.search(data, 0, from_index)
+ if res is not None:
+ emails.append(res.group('email'))
+ try:
+ reason = res.group('reason')
+ except IndexError:
+ pass
+ break
+ if not emails:
+ raise Unparseable
+ if not reason:
+ reason = sub
+ if reason[:15] == 'returned mail: ':
+ reason = reason[15:]
+ for regexp in emparse_list_reason:
+ if type(regexp) is type(''):
+ for i in range(len(emails)-1,-1,-1):
+ email = emails[i]
+ exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
+ res = exp.search(data)
+ if res is not None:
+ errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
+ del emails[i]
+ continue
+ res = regexp.search(data)
+ if res is not None:
+ reason = res.group('reason')
+ break
+ for email in emails:
+ errors.append(' '.join((email.strip()+': '+reason).split()))
+ return errors
+
+EMPARSERS = [emparse_list, ]
+
+def sort_numeric(a, b):
+ a = int(a)
+ b = int(b)
+ if a < b: return -1
+ elif a > b: return 1
+ else: return 0
+
+def parsedir(dir, modify):
+ os.chdir(dir)
+ pat = re.compile('^[0-9]*$')
+ errordict = {}
+ errorfirst = {}
+ errorlast = {}
+ nok = nwarn = nbad = 0
+
+ # find all numeric file names and sort them
+ files = filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.'))
+ files.sort(sort_numeric)
+
+ for fn in files:
+ # Lets try to parse the file.
+ fp = open(fn)
+ m = ErrorMessage(fp)
+ sender = m.getaddr('From')
+ print '%s\t%-40s\t'%(fn, sender[1]),
+
+ if m.is_warning():
+ fp.close()
+ print 'warning only'
+ nwarn = nwarn + 1
+ if modify:
+ os.rename(fn, ','+fn)
+## os.unlink(fn)
+ continue
+
+ try:
+ errors = m.get_errors()
+ except Unparseable:
+ print '** Not parseable'
+ nbad = nbad + 1
+ fp.close()
+ continue
+ print len(errors), 'errors'
+
+ # Remember them
+ for e in errors:
+ try:
+ mm, dd = m.getdate('date')[1:1+2]
+ date = '%s %02d' % (calendar.month_abbr[mm], dd)
+ except:
+ date = '??????'
+ if not errordict.has_key(e):
+ errordict[e] = 1
+ errorfirst[e] = '%s (%s)' % (fn, date)
+ else:
+ errordict[e] = errordict[e] + 1
+ errorlast[e] = '%s (%s)' % (fn, date)
+
+ fp.close()
+ nok = nok + 1
+ if modify:
+ os.rename(fn, ','+fn)
+## os.unlink(fn)
+
+ print '--------------'
+ print nok, 'files parsed,',nwarn,'files warning-only,',
+ print nbad,'files unparseable'
+ print '--------------'
+ list = []
+ for e in errordict.keys():
+ list.append((errordict[e], errorfirst[e], errorlast[e], e))
+ list.sort()
+ for num, first, last, e in list:
+ print '%d %s - %s\t%s' % (num, first, last, e)
+
+def main():
+ modify = 0
+ if len(sys.argv) > 1 and sys.argv[1] == '-d':
+ modify = 1
+ del sys.argv[1]
+ if len(sys.argv) > 1:
+ for folder in sys.argv[1:]:
+ parsedir(folder, modify)
+ else:
+ parsedir('/ufs/jack/Mail/errorsinbox', modify)
+
+if __name__ == '__main__' or sys.argv[0] == __name__:
+ main()
diff --git a/lib/python2.7/Tools/scripts/md5sum.py b/lib/python2.7/Tools/scripts/md5sum.py
new file mode 100644
index 0000000..a93697e
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/md5sum.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python2
+
+"""Python utility to print MD5 checksums of argument files.
+"""
+
+
+bufsize = 8096
+fnfilter = None
+rmode = 'rb'
+
+usage = """
+usage: sum5 [-b] [-t] [-l] [-s bufsize] [file ...]
+-b : read files in binary mode (default)
+-t : read files in text mode (you almost certainly don't want this!)
+-l : print last pathname component only
+-s bufsize: read buffer size (default %d)
+file ... : files to sum; '-' or no files means stdin
+""" % bufsize
+
+import sys
+import os
+import getopt
+import md5
+
+def sum(*files):
+ sts = 0
+ if files and isinstance(files[-1], file):
+ out, files = files[-1], files[:-1]
+ else:
+ out = sys.stdout
+ if len(files) == 1 and not isinstance(files[0], str):
+ files = files[0]
+ for f in files:
+ if isinstance(f, str):
+ if f == '-':
+ sts = printsumfp(sys.stdin, '<stdin>', out) or sts
+ else:
+ sts = printsum(f, out) or sts
+ else:
+ sts = sum(f, out) or sts
+ return sts
+
+def printsum(filename, out=sys.stdout):
+ try:
+ fp = open(filename, rmode)
+ except IOError, msg:
+ sys.stderr.write('%s: Can\'t open: %s\n' % (filename, msg))
+ return 1
+ if fnfilter:
+ filename = fnfilter(filename)
+ sts = printsumfp(fp, filename, out)
+ fp.close()
+ return sts
+
+def printsumfp(fp, filename, out=sys.stdout):
+ m = md5.new()
+ try:
+ while 1:
+ data = fp.read(bufsize)
+ if not data:
+ break
+ m.update(data)
+ except IOError, msg:
+ sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
+ return 1
+ out.write('%s %s\n' % (m.hexdigest(), filename))
+ return 0
+
+def main(args = sys.argv[1:], out=sys.stdout):
+ global fnfilter, rmode, bufsize
+ try:
+ opts, args = getopt.getopt(args, 'blts:')
+ except getopt.error, msg:
+ sys.stderr.write('%s: %s\n%s' % (sys.argv[0], msg, usage))
+ return 2
+ for o, a in opts:
+ if o == '-l':
+ fnfilter = os.path.basename
+ elif o == '-b':
+ rmode = 'rb'
+ elif o == '-t':
+ rmode = 'r'
+ elif o == '-s':
+ bufsize = int(a)
+ if not args:
+ args = ['-']
+ return sum(args, out)
+
+if __name__ == '__main__' or __name__ == sys.argv[0]:
+ sys.exit(main(sys.argv[1:], sys.stdout))
diff --git a/lib/python2.7/Tools/scripts/methfix.py b/lib/python2.7/Tools/scripts/methfix.py
new file mode 100644
index 0000000..ba464f2
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/methfix.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python2
+
+# Fix Python source files to avoid using
+# def method(self, (arg1, ..., argn)):
+# instead of the more rational
+# def method(self, arg1, ..., argn):
+#
+# Command line arguments are files or directories to be processed.
+# Directories are searched recursively for files whose name looks
+# like a python module.
+# Symbolic links are always ignored (except as explicit directory
+# arguments). Of course, the original file is kept as a back-up
+# (with a "~" attached to its name).
+# It complains about binaries (files containing null bytes)
+# and about files that are ostensibly not Python files: if the first
+# line starts with '#!' and does not contain the string 'python'.
+#
+# Changes made are reported to stdout in a diff-like format.
+#
+# Undoubtedly you can do this using find and sed or perl, but this is
+# a nice example of Python code that recurses down a directory tree
+# and uses regular expressions. Also note several subtleties like
+# preserving the file's mode and avoiding to even write a temp file
+# when no changes are needed for a file.
+#
+# NB: by changing only the function fixline() you can turn this
+# into a program for a different change to Python programs...
+
+import sys
+import re
+import os
+from stat import *
+
+err = sys.stderr.write
+dbg = err
+rep = sys.stdout.write
+
+def main():
+ bad = 0
+ if not sys.argv[1:]: # No arguments
+ err('usage: ' + sys.argv[0] + ' file-or-directory ...\n')
+ sys.exit(2)
+ for arg in sys.argv[1:]:
+ if os.path.isdir(arg):
+ if recursedown(arg): bad = 1
+ elif os.path.islink(arg):
+ err(arg + ': will not process symbolic links\n')
+ bad = 1
+ else:
+ if fix(arg): bad = 1
+ sys.exit(bad)
+
+ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
+def ispython(name):
+ return ispythonprog.match(name) >= 0
+
+def recursedown(dirname):
+ dbg('recursedown(%r)\n' % (dirname,))
+ bad = 0
+ try:
+ names = os.listdir(dirname)
+ except os.error, msg:
+ err('%s: cannot list directory: %r\n' % (dirname, msg))
+ return 1
+ names.sort()
+ subdirs = []
+ for name in names:
+ if name in (os.curdir, os.pardir): continue
+ fullname = os.path.join(dirname, name)
+ if os.path.islink(fullname): pass
+ elif os.path.isdir(fullname):
+ subdirs.append(fullname)
+ elif ispython(name):
+ if fix(fullname): bad = 1
+ for fullname in subdirs:
+ if recursedown(fullname): bad = 1
+ return bad
+
+def fix(filename):
+## dbg('fix(%r)\n' % (filename,))
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ err('%s: cannot open: %r\n' % (filename, msg))
+ return 1
+ head, tail = os.path.split(filename)
+ tempname = os.path.join(head, '@' + tail)
+ g = None
+ # If we find a match, we rewind the file and start over but
+ # now copy everything to a temp file.
+ lineno = 0
+ while 1:
+ line = f.readline()
+ if not line: break
+ lineno = lineno + 1
+ if g is None and '\0' in line:
+ # Check for binary files
+ err(filename + ': contains null bytes; not fixed\n')
+ f.close()
+ return 1
+ if lineno == 1 and g is None and line[:2] == '#!':
+ # Check for non-Python scripts
+ words = line[2:].split()
+ if words and re.search('[pP]ython', words[0]) < 0:
+ msg = filename + ': ' + words[0]
+ msg = msg + ' script; not fixed\n'
+ err(msg)
+ f.close()
+ return 1
+ while line[-2:] == '\\\n':
+ nextline = f.readline()
+ if not nextline: break
+ line = line + nextline
+ lineno = lineno + 1
+ newline = fixline(line)
+ if newline != line:
+ if g is None:
+ try:
+ g = open(tempname, 'w')
+ except IOError, msg:
+ f.close()
+ err('%s: cannot create: %r\n' % (tempname, msg))
+ return 1
+ f.seek(0)
+ lineno = 0
+ rep(filename + ':\n')
+ continue # restart from the beginning
+ rep(repr(lineno) + '\n')
+ rep('< ' + line)
+ rep('> ' + newline)
+ if g is not None:
+ g.write(newline)
+
+ # End of file
+ f.close()
+ if not g: return 0 # No changes
+
+ # Finishing touch -- move files
+
+ # First copy the file's mode to the temp file
+ try:
+ statbuf = os.stat(filename)
+ os.chmod(tempname, statbuf[ST_MODE] & 07777)
+ except os.error, msg:
+ err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
+ # Then make a backup of the original file as filename~
+ try:
+ os.rename(filename, filename + '~')
+ except os.error, msg:
+ err('%s: warning: backup failed (%r)\n' % (filename, msg))
+ # Now move the temp file to the original file
+ try:
+ os.rename(tempname, filename)
+ except os.error, msg:
+ err('%s: rename failed (%r)\n' % (filename, msg))
+ return 1
+ # Return succes
+ return 0
+
+
+fixpat = '^[ \t]+def +[a-zA-Z0-9_]+ *( *self *, *(( *(.*) *)) *) *:'
+fixprog = re.compile(fixpat)
+
+def fixline(line):
+ if fixprog.match(line) >= 0:
+ (a, b), (c, d) = fixprog.regs[1:3]
+ line = line[:a] + line[c:d] + line[b:]
+ return line
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/mkreal.py b/lib/python2.7/Tools/scripts/mkreal.py
new file mode 100644
index 0000000..fab3299
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/mkreal.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python2
+
+# mkreal
+#
+# turn a symlink to a directory into a real directory
+
+import sys
+import os
+from stat import *
+
+join = os.path.join
+
+error = 'mkreal error'
+
+BUFSIZE = 32*1024
+
+def mkrealfile(name):
+ st = os.stat(name) # Get the mode
+ mode = S_IMODE(st[ST_MODE])
+ linkto = os.readlink(name) # Make sure again it's a symlink
+ f_in = open(name, 'r') # This ensures it's a file
+ os.unlink(name)
+ f_out = open(name, 'w')
+ while 1:
+ buf = f_in.read(BUFSIZE)
+ if not buf: break
+ f_out.write(buf)
+ del f_out # Flush data to disk before changing mode
+ os.chmod(name, mode)
+
+def mkrealdir(name):
+ st = os.stat(name) # Get the mode
+ mode = S_IMODE(st[ST_MODE])
+ linkto = os.readlink(name)
+ files = os.listdir(name)
+ os.unlink(name)
+ os.mkdir(name, mode)
+ os.chmod(name, mode)
+ linkto = join(os.pardir, linkto)
+ #
+ for filename in files:
+ if filename not in (os.curdir, os.pardir):
+ os.symlink(join(linkto, filename), join(name, filename))
+
+def main():
+ sys.stdout = sys.stderr
+ progname = os.path.basename(sys.argv[0])
+ if progname == '-c': progname = 'mkreal'
+ args = sys.argv[1:]
+ if not args:
+ print 'usage:', progname, 'path ...'
+ sys.exit(2)
+ status = 0
+ for name in args:
+ if not os.path.islink(name):
+ print progname+':', name+':', 'not a symlink'
+ status = 1
+ else:
+ if os.path.isdir(name):
+ mkrealdir(name)
+ else:
+ mkrealfile(name)
+ sys.exit(status)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/ndiff.py b/lib/python2.7/Tools/scripts/ndiff.py
new file mode 100644
index 0000000..d858f73
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/ndiff.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python2
+
+# Module ndiff version 1.7.0
+# Released to the public domain 08-Dec-2000,
+# by Tim Peters (tim.one@home.com).
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+
+# ndiff.py is now simply a front-end to the difflib.ndiff() function.
+# Originally, it contained the difflib.SequenceMatcher class as well.
+# This completes the raiding of reusable code from this formerly
+# self-contained script.
+
+"""ndiff [-q] file1 file2
+ or
+ndiff (-r1 | -r2) < ndiff_output > file1_or_file2
+
+Print a human-friendly file difference report to stdout. Both inter-
+and intra-line differences are noted. In the second form, recreate file1
+(-r1) or file2 (-r2) on stdout, from an ndiff report on stdin.
+
+In the first form, if -q ("quiet") is not specified, the first two lines
+of output are
+
+-: file1
++: file2
+
+Each remaining line begins with a two-letter code:
+
+ "- " line unique to file1
+ "+ " line unique to file2
+ " " line common to both files
+ "? " line not present in either input file
+
+Lines beginning with "? " attempt to guide the eye to intraline
+differences, and were not present in either input file. These lines can be
+confusing if the source files contain tab characters.
+
+The first file can be recovered by retaining only lines that begin with
+" " or "- ", and deleting those 2-character prefixes; use ndiff with -r1.
+
+The second file can be recovered similarly, but by retaining only " " and
+"+ " lines; use ndiff with -r2; or, on Unix, the second file can be
+recovered by piping the output through
+
+ sed -n '/^[+ ] /s/^..//p'
+"""
+
+__version__ = 1, 7, 0
+
+import difflib, sys
+
+def fail(msg):
+ out = sys.stderr.write
+ out(msg + "\n\n")
+ out(__doc__)
+ return 0
+
+# open a file & return the file object; gripe and return 0 if it
+# couldn't be opened
+def fopen(fname):
+ try:
+ return open(fname, 'U')
+ except IOError, detail:
+ return fail("couldn't open " + fname + ": " + str(detail))
+
+# open two files & spray the diff to stdout; return false iff a problem
+def fcompare(f1name, f2name):
+ f1 = fopen(f1name)
+ f2 = fopen(f2name)
+ if not f1 or not f2:
+ return 0
+
+ a = f1.readlines(); f1.close()
+ b = f2.readlines(); f2.close()
+ for line in difflib.ndiff(a, b):
+ print line,
+
+ return 1
+
+# crack args (sys.argv[1:] is normal) & compare;
+# return false iff a problem
+
+def main(args):
+ import getopt
+ try:
+ opts, args = getopt.getopt(args, "qr:")
+ except getopt.error, detail:
+ return fail(str(detail))
+ noisy = 1
+ qseen = rseen = 0
+ for opt, val in opts:
+ if opt == "-q":
+ qseen = 1
+ noisy = 0
+ elif opt == "-r":
+ rseen = 1
+ whichfile = val
+ if qseen and rseen:
+ return fail("can't specify both -q and -r")
+ if rseen:
+ if args:
+ return fail("no args allowed with -r option")
+ if whichfile in ("1", "2"):
+ restore(whichfile)
+ return 1
+ return fail("-r value must be 1 or 2")
+ if len(args) != 2:
+ return fail("need 2 filename args")
+ f1name, f2name = args
+ if noisy:
+ print '-:', f1name
+ print '+:', f2name
+ return fcompare(f1name, f2name)
+
+# read ndiff output from stdin, and print file1 (which=='1') or
+# file2 (which=='2') to stdout
+
+def restore(which):
+ restored = difflib.restore(sys.stdin.readlines(), which)
+ sys.stdout.writelines(restored)
+
+if __name__ == '__main__':
+ args = sys.argv[1:]
+ if "-profile" in args:
+ import profile, pstats
+ args.remove("-profile")
+ statf = "ndiff.pro"
+ profile.run("main(args)", statf)
+ stats = pstats.Stats(statf)
+ stats.strip_dirs().sort_stats('time').print_stats()
+ else:
+ main(args)
diff --git a/lib/python2.7/Tools/scripts/nm2def.py b/lib/python2.7/Tools/scripts/nm2def.py
new file mode 100644
index 0000000..612e81a
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/nm2def.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python2
+"""nm2def.py
+
+Helpers to extract symbols from Unix libs and auto-generate
+Windows definition files from them. Depends on nm(1). Tested
+on Linux and Solaris only (-p option to nm is for Solaris only).
+
+By Marc-Andre Lemburg, Aug 1998.
+
+Additional notes: the output of nm is supposed to look like this:
+
+acceler.o:
+000001fd T PyGrammar_AddAccelerators
+ U PyGrammar_FindDFA
+00000237 T PyGrammar_RemoveAccelerators
+ U _IO_stderr_
+ U exit
+ U fprintf
+ U free
+ U malloc
+ U printf
+
+grammar1.o:
+00000000 T PyGrammar_FindDFA
+00000034 T PyGrammar_LabelRepr
+ U _PyParser_TokenNames
+ U abort
+ U printf
+ U sprintf
+
+...
+
+Even if this isn't the default output of your nm, there is generally an
+option to produce this format (since it is the original v7 Unix format).
+
+"""
+import os, sys
+
+PYTHONLIB = 'libpython'+sys.version[:3]+'.a'
+PC_PYTHONLIB = 'Python'+sys.version[0]+sys.version[2]+'.dll'
+NM = 'nm -p -g %s' # For Linux, use "nm -g %s"
+
+def symbols(lib=PYTHONLIB,types=('T','C','D')):
+
+ lines = os.popen(NM % lib).readlines()
+ lines = [s.strip() for s in lines]
+ symbols = {}
+ for line in lines:
+ if len(line) == 0 or ':' in line:
+ continue
+ items = line.split()
+ if len(items) != 3:
+ continue
+ address, type, name = items
+ if type not in types:
+ continue
+ symbols[name] = address,type
+ return symbols
+
+def export_list(symbols):
+
+ data = []
+ code = []
+ for name,(addr,type) in symbols.items():
+ if type in ('C','D'):
+ data.append('\t'+name)
+ else:
+ code.append('\t'+name)
+ data.sort()
+ data.append('')
+ code.sort()
+ return ' DATA\n'.join(data)+'\n'+'\n'.join(code)
+
+# Definition file template
+DEF_TEMPLATE = """\
+EXPORTS
+%s
+"""
+
+# Special symbols that have to be included even though they don't
+# pass the filter
+SPECIALS = (
+ )
+
+def filter_Python(symbols,specials=SPECIALS):
+
+ for name in symbols.keys():
+ if name[:2] == 'Py' or name[:3] == '_Py':
+ pass
+ elif name not in specials:
+ del symbols[name]
+
+def main():
+
+ s = symbols(PYTHONLIB)
+ filter_Python(s)
+ exports = export_list(s)
+ f = sys.stdout # open('PC/python_nt.def','w')
+ f.write(DEF_TEMPLATE % (exports))
+ f.close()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/objgraph.py b/lib/python2.7/Tools/scripts/objgraph.py
new file mode 100644
index 0000000..dd58f15
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/objgraph.py
@@ -0,0 +1,215 @@
+#!/usr/bin/env python2
+
+# objgraph
+#
+# Read "nm -o" input (on IRIX: "nm -Bo") of a set of libraries or modules
+# and print various interesting listings, such as:
+#
+# - which names are used but not defined in the set (and used where),
+# - which names are defined in the set (and where),
+# - which modules use which other modules,
+# - which modules are used by which other modules.
+#
+# Usage: objgraph [-cdu] [file] ...
+# -c: print callers per objectfile
+# -d: print callees per objectfile
+# -u: print usage of undefined symbols
+# If none of -cdu is specified, all are assumed.
+# Use "nm -o" to generate the input (on IRIX: "nm -Bo"),
+# e.g.: nm -o /lib/libc.a | objgraph
+
+
+import sys
+import os
+import getopt
+import re
+
+# Types of symbols.
+#
+definitions = 'TRGDSBAEC'
+externals = 'UV'
+ignore = 'Nntrgdsbavuc'
+
+# Regular expression to parse "nm -o" output.
+#
+matcher = re.compile('(.*):\t?........ (.) (.*)$')
+
+# Store "item" in "dict" under "key".
+# The dictionary maps keys to lists of items.
+# If there is no list for the key yet, it is created.
+#
+def store(dict, key, item):
+ if dict.has_key(key):
+ dict[key].append(item)
+ else:
+ dict[key] = [item]
+
+# Return a flattened version of a list of strings: the concatenation
+# of its elements with intervening spaces.
+#
+def flat(list):
+ s = ''
+ for item in list:
+ s = s + ' ' + item
+ return s[1:]
+
+# Global variables mapping defined/undefined names to files and back.
+#
+file2undef = {}
+def2file = {}
+file2def = {}
+undef2file = {}
+
+# Read one input file and merge the data into the tables.
+# Argument is an open file.
+#
+def readinput(fp):
+ while 1:
+ s = fp.readline()
+ if not s:
+ break
+ # If you get any output from this line,
+ # it is probably caused by an unexpected input line:
+ if matcher.search(s) < 0: s; continue # Shouldn't happen
+ (ra, rb), (r1a, r1b), (r2a, r2b), (r3a, r3b) = matcher.regs[:4]
+ fn, name, type = s[r1a:r1b], s[r3a:r3b], s[r2a:r2b]
+ if type in definitions:
+ store(def2file, name, fn)
+ store(file2def, fn, name)
+ elif type in externals:
+ store(file2undef, fn, name)
+ store(undef2file, name, fn)
+ elif not type in ignore:
+ print fn + ':' + name + ': unknown type ' + type
+
+# Print all names that were undefined in some module and where they are
+# defined.
+#
+def printcallee():
+ flist = file2undef.keys()
+ flist.sort()
+ for filename in flist:
+ print filename + ':'
+ elist = file2undef[filename]
+ elist.sort()
+ for ext in elist:
+ if len(ext) >= 8:
+ tabs = '\t'
+ else:
+ tabs = '\t\t'
+ if not def2file.has_key(ext):
+ print '\t' + ext + tabs + ' *undefined'
+ else:
+ print '\t' + ext + tabs + flat(def2file[ext])
+
+# Print for each module the names of the other modules that use it.
+#
+def printcaller():
+ files = file2def.keys()
+ files.sort()
+ for filename in files:
+ callers = []
+ for label in file2def[filename]:
+ if undef2file.has_key(label):
+ callers = callers + undef2file[label]
+ if callers:
+ callers.sort()
+ print filename + ':'
+ lastfn = ''
+ for fn in callers:
+ if fn <> lastfn:
+ print '\t' + fn
+ lastfn = fn
+ else:
+ print filename + ': unused'
+
+# Print undefined names and where they are used.
+#
+def printundef():
+ undefs = {}
+ for filename in file2undef.keys():
+ for ext in file2undef[filename]:
+ if not def2file.has_key(ext):
+ store(undefs, ext, filename)
+ elist = undefs.keys()
+ elist.sort()
+ for ext in elist:
+ print ext + ':'
+ flist = undefs[ext]
+ flist.sort()
+ for filename in flist:
+ print '\t' + filename
+
+# Print warning messages about names defined in more than one file.
+#
+def warndups():
+ savestdout = sys.stdout
+ sys.stdout = sys.stderr
+ names = def2file.keys()
+ names.sort()
+ for name in names:
+ if len(def2file[name]) > 1:
+ print 'warning:', name, 'multiply defined:',
+ print flat(def2file[name])
+ sys.stdout = savestdout
+
+# Main program
+#
+def main():
+ try:
+ optlist, args = getopt.getopt(sys.argv[1:], 'cdu')
+ except getopt.error:
+ sys.stdout = sys.stderr
+ print 'Usage:', os.path.basename(sys.argv[0]),
+ print '[-cdu] [file] ...'
+ print '-c: print callers per objectfile'
+ print '-d: print callees per objectfile'
+ print '-u: print usage of undefined symbols'
+ print 'If none of -cdu is specified, all are assumed.'
+ print 'Use "nm -o" to generate the input (on IRIX: "nm -Bo"),'
+ print 'e.g.: nm -o /lib/libc.a | objgraph'
+ return 1
+ optu = optc = optd = 0
+ for opt, void in optlist:
+ if opt == '-u':
+ optu = 1
+ elif opt == '-c':
+ optc = 1
+ elif opt == '-d':
+ optd = 1
+ if optu == optc == optd == 0:
+ optu = optc = optd = 1
+ if not args:
+ args = ['-']
+ for filename in args:
+ if filename == '-':
+ readinput(sys.stdin)
+ else:
+ readinput(open(filename, 'r'))
+ #
+ warndups()
+ #
+ more = (optu + optc + optd > 1)
+ if optd:
+ if more:
+ print '---------------All callees------------------'
+ printcallee()
+ if optu:
+ if more:
+ print '---------------Undefined callees------------'
+ printundef()
+ if optc:
+ if more:
+ print '---------------All Callers------------------'
+ printcaller()
+ return 0
+
+# Call the main program.
+# Use its return value as exit status.
+# Catch interrupts to avoid stack trace.
+#
+if __name__ == '__main__':
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/lib/python2.7/Tools/scripts/parseentities.py b/lib/python2.7/Tools/scripts/parseentities.py
new file mode 100644
index 0000000..45a98ee
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/parseentities.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python2
+""" Utility for parsing HTML entity definitions available from:
+
+ http://www.w3.org/ as e.g.
+ http://www.w3.org/TR/REC-html40/HTMLlat1.ent
+
+ Input is read from stdin, output is written to stdout in form of a
+ Python snippet defining a dictionary "entitydefs" mapping literal
+ entity name to character or numeric entity.
+
+ Marc-Andre Lemburg, mal@lemburg.com, 1999.
+ Use as you like. NO WARRANTIES.
+
+"""
+import re,sys
+import TextTools
+
+entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
+
+def parse(text,pos=0,endpos=None):
+
+ pos = 0
+ if endpos is None:
+ endpos = len(text)
+ d = {}
+ while 1:
+ m = entityRE.search(text,pos,endpos)
+ if not m:
+ break
+ name,charcode,comment = m.groups()
+ d[name] = charcode,comment
+ pos = m.end()
+ return d
+
+def writefile(f,defs):
+
+ f.write("entitydefs = {\n")
+ items = defs.items()
+ items.sort()
+ for name,(charcode,comment) in items:
+ if charcode[:2] == '&#':
+ code = int(charcode[2:-1])
+ if code < 256:
+ charcode = "'\%o'" % code
+ else:
+ charcode = repr(charcode)
+ else:
+ charcode = repr(charcode)
+ comment = TextTools.collapse(comment)
+ f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
+ f.write('\n}\n')
+
+if __name__ == '__main__':
+ if len(sys.argv) > 1:
+ infile = open(sys.argv[1])
+ else:
+ infile = sys.stdin
+ if len(sys.argv) > 2:
+ outfile = open(sys.argv[2],'w')
+ else:
+ outfile = sys.stdout
+ text = infile.read()
+ defs = parse(text)
+ writefile(outfile,defs)
diff --git a/lib/python2.7/Tools/scripts/patchcheck.py b/lib/python2.7/Tools/scripts/patchcheck.py
new file mode 100644
index 0000000..55f8b4c
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/patchcheck.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python2
+import re
+import sys
+import shutil
+import os.path
+import subprocess
+import sysconfig
+
+import reindent
+import untabify
+
+
+SRCDIR = sysconfig.get_config_var('srcdir')
+
+
+def n_files_str(count):
+ """Return 'N file(s)' with the proper plurality on 'file'."""
+ return "{} file{}".format(count, "s" if count != 1 else "")
+
+
+def status(message, modal=False, info=None):
+ """Decorator to output status info to stdout."""
+ def decorated_fxn(fxn):
+ def call_fxn(*args, **kwargs):
+ sys.stdout.write(message + ' ... ')
+ sys.stdout.flush()
+ result = fxn(*args, **kwargs)
+ if not modal and not info:
+ print "done"
+ elif info:
+ print info(result)
+ else:
+ print "yes" if result else "NO"
+ return result
+ return call_fxn
+ return decorated_fxn
+
+
+def mq_patches_applied():
+ """Check if there are any applied MQ patches."""
+ cmd = 'hg qapplied'
+ st = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ try:
+ bstdout, _ = st.communicate()
+ return st.returncode == 0 and bstdout
+ finally:
+ st.stdout.close()
+ st.stderr.close()
+
+
+@status("Getting the list of files that have been added/changed",
+ info=lambda x: n_files_str(len(x)))
+def changed_files():
+ """Get the list of changed or added files from the VCS."""
+ if os.path.isdir(os.path.join(SRCDIR, '.hg')):
+ vcs = 'hg'
+ cmd = 'hg status --added --modified --no-status'
+ if mq_patches_applied():
+ cmd += ' --rev qparent'
+ elif os.path.isdir('.svn'):
+ vcs = 'svn'
+ cmd = 'svn status --quiet --non-interactive --ignore-externals'
+ else:
+ sys.exit('need a checkout to get modified files')
+
+ st = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
+ try:
+ st.wait()
+ if vcs == 'hg':
+ return [x.decode().rstrip() for x in st.stdout]
+ else:
+ output = (x.decode().rstrip().rsplit(None, 1)[-1]
+ for x in st.stdout if x[0] in 'AM')
+ return set(path for path in output if os.path.isfile(path))
+ finally:
+ st.stdout.close()
+
+
+def report_modified_files(file_paths):
+ count = len(file_paths)
+ if count == 0:
+ return n_files_str(count)
+ else:
+ lines = ["{}:".format(n_files_str(count))]
+ for path in file_paths:
+ lines.append(" {}".format(path))
+ return "\n".join(lines)
+
+
+@status("Fixing whitespace", info=report_modified_files)
+def normalize_whitespace(file_paths):
+ """Make sure that the whitespace for .py files have been normalized."""
+ reindent.makebackup = False # No need to create backups.
+ fixed = []
+ for path in (x for x in file_paths if x.endswith('.py')):
+ if reindent.check(os.path.join(SRCDIR, path)):
+ fixed.append(path)
+ return fixed
+
+
+@status("Fixing C file whitespace", info=report_modified_files)
+def normalize_c_whitespace(file_paths):
+ """Report if any C files """
+ fixed = []
+ for path in file_paths:
+ abspath = os.path.join(SRCDIR, path)
+ with open(abspath, 'r') as f:
+ if '\t' not in f.read():
+ continue
+ untabify.process(abspath, 8, verbose=False)
+ fixed.append(path)
+ return fixed
+
+
+ws_re = re.compile(br'\s+(\r?\n)$')
+
+@status("Fixing docs whitespace", info=report_modified_files)
+def normalize_docs_whitespace(file_paths):
+ fixed = []
+ for path in file_paths:
+ abspath = os.path.join(SRCDIR, path)
+ try:
+ with open(abspath, 'rb') as f:
+ lines = f.readlines()
+ new_lines = [ws_re.sub(br'\1', line) for line in lines]
+ if new_lines != lines:
+ shutil.copyfile(abspath, abspath + '.bak')
+ with open(abspath, 'wb') as f:
+ f.writelines(new_lines)
+ fixed.append(path)
+ except Exception as err:
+ print 'Cannot fix %s: %s' % (path, err)
+ return fixed
+
+
+@status("Docs modified", modal=True)
+def docs_modified(file_paths):
+ """Report if any file in the Doc directory has been changed."""
+ return bool(file_paths)
+
+
+@status("Misc/ACKS updated", modal=True)
+def credit_given(file_paths):
+ """Check if Misc/ACKS has been changed."""
+ return os.path.join('Misc', 'ACKS') in file_paths
+
+
+@status("Misc/NEWS updated", modal=True)
+def reported_news(file_paths):
+ """Check if Misc/NEWS has been changed."""
+ return os.path.join('Misc', 'NEWS') in file_paths
+
+
+def main():
+ file_paths = changed_files()
+ python_files = [fn for fn in file_paths if fn.endswith('.py')]
+ c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
+ doc_files = [fn for fn in file_paths if fn.startswith('Doc') and
+ fn.endswith(('.rst', '.inc'))]
+ misc_files = {os.path.join('Misc', 'ACKS'), os.path.join('Misc', 'NEWS')}\
+ & set(file_paths)
+ # PEP 8 whitespace rules enforcement.
+ normalize_whitespace(python_files)
+ # C rules enforcement.
+ normalize_c_whitespace(c_files)
+ # Doc whitespace enforcement.
+ normalize_docs_whitespace(doc_files)
+ # Docs updated.
+ docs_modified(doc_files)
+ # Misc/ACKS changed.
+ credit_given(misc_files)
+ # Misc/NEWS changed.
+ reported_news(misc_files)
+
+ # Test suite run and passed.
+ if python_files or c_files:
+ end = " and check for refleaks?" if c_files else "?"
+ print
+ print "Did you run the test suite" + end
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/pathfix.py b/lib/python2.7/Tools/scripts/pathfix.py
new file mode 100644
index 0000000..7a6af5d
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/pathfix.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python2
+
+# Change the #! line occurring in Python scripts. The new interpreter
+# pathname must be given with a -i option.
+#
+# Command line arguments are files or directories to be processed.
+# Directories are searched recursively for files whose name looks
+# like a python module.
+# Symbolic links are always ignored (except as explicit directory
+# arguments). Of course, the original file is kept as a back-up
+# (with a "~" attached to its name).
+#
+# Undoubtedly you can do this using find and sed or perl, but this is
+# a nice example of Python code that recurses down a directory tree
+# and uses regular expressions. Also note several subtleties like
+# preserving the file's mode and avoiding to even write a temp file
+# when no changes are needed for a file.
+#
+# NB: by changing only the function fixfile() you can turn this
+# into a program for a different change to Python programs...
+
+import sys
+import re
+import os
+from stat import *
+import getopt
+
+err = sys.stderr.write
+dbg = err
+rep = sys.stdout.write
+
+new_interpreter = None
+
+def main():
+ global new_interpreter
+ usage = ('usage: %s -i /interpreter file-or-directory ...\n' %
+ sys.argv[0])
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'i:')
+ except getopt.error, msg:
+ err(msg + '\n')
+ err(usage)
+ sys.exit(2)
+ for o, a in opts:
+ if o == '-i':
+ new_interpreter = a
+ if not new_interpreter or new_interpreter[0] != '/' or not args:
+ err('-i option or file-or-directory missing\n')
+ err(usage)
+ sys.exit(2)
+ bad = 0
+ for arg in args:
+ if os.path.isdir(arg):
+ if recursedown(arg): bad = 1
+ elif os.path.islink(arg):
+ err(arg + ': will not process symbolic links\n')
+ bad = 1
+ else:
+ if fix(arg): bad = 1
+ sys.exit(bad)
+
+ispythonprog = re.compile('^[a-zA-Z0-9_]+\.py$')
+def ispython(name):
+ return ispythonprog.match(name) >= 0
+
+def recursedown(dirname):
+ dbg('recursedown(%r)\n' % (dirname,))
+ bad = 0
+ try:
+ names = os.listdir(dirname)
+ except os.error, msg:
+ err('%s: cannot list directory: %r\n' % (dirname, msg))
+ return 1
+ names.sort()
+ subdirs = []
+ for name in names:
+ if name in (os.curdir, os.pardir): continue
+ fullname = os.path.join(dirname, name)
+ if os.path.islink(fullname): pass
+ elif os.path.isdir(fullname):
+ subdirs.append(fullname)
+ elif ispython(name):
+ if fix(fullname): bad = 1
+ for fullname in subdirs:
+ if recursedown(fullname): bad = 1
+ return bad
+
+def fix(filename):
+## dbg('fix(%r)\n' % (filename,))
+ try:
+ f = open(filename, 'r')
+ except IOError, msg:
+ err('%s: cannot open: %r\n' % (filename, msg))
+ return 1
+ line = f.readline()
+ fixed = fixline(line)
+ if line == fixed:
+ rep(filename+': no change\n')
+ f.close()
+ return
+ head, tail = os.path.split(filename)
+ tempname = os.path.join(head, '@' + tail)
+ try:
+ g = open(tempname, 'w')
+ except IOError, msg:
+ f.close()
+ err('%s: cannot create: %r\n' % (tempname, msg))
+ return 1
+ rep(filename + ': updating\n')
+ g.write(fixed)
+ BUFSIZE = 8*1024
+ while 1:
+ buf = f.read(BUFSIZE)
+ if not buf: break
+ g.write(buf)
+ g.close()
+ f.close()
+
+ # Finishing touch -- move files
+
+ # First copy the file's mode to the temp file
+ try:
+ statbuf = os.stat(filename)
+ os.chmod(tempname, statbuf[ST_MODE] & 07777)
+ except os.error, msg:
+ err('%s: warning: chmod failed (%r)\n' % (tempname, msg))
+ # Then make a backup of the original file as filename~
+ try:
+ os.rename(filename, filename + '~')
+ except os.error, msg:
+ err('%s: warning: backup failed (%r)\n' % (filename, msg))
+ # Now move the temp file to the original file
+ try:
+ os.rename(tempname, filename)
+ except os.error, msg:
+ err('%s: rename failed (%r)\n' % (filename, msg))
+ return 1
+ # Return success
+ return 0
+
+def fixline(line):
+ if not line.startswith('#!'):
+ return line
+ if "python" not in line:
+ return line
+ return '#! %s\n' % new_interpreter
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/pdeps.py b/lib/python2.7/Tools/scripts/pdeps.py
new file mode 100644
index 0000000..e838bcf
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/pdeps.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python2
+
+# pdeps
+#
+# Find dependencies between a bunch of Python modules.
+#
+# Usage:
+# pdeps file1.py file2.py ...
+#
+# Output:
+# Four tables separated by lines like '--- Closure ---':
+# 1) Direct dependencies, listing which module imports which other modules
+# 2) The inverse of (1)
+# 3) Indirect dependencies, or the closure of the above
+# 4) The inverse of (3)
+#
+# To do:
+# - command line options to select output type
+# - option to automatically scan the Python library for referenced modules
+# - option to limit output to particular modules
+
+
+import sys
+import re
+import os
+
+
+# Main program
+#
+def main():
+ args = sys.argv[1:]
+ if not args:
+ print 'usage: pdeps file.py file.py ...'
+ return 2
+ #
+ table = {}
+ for arg in args:
+ process(arg, table)
+ #
+ print '--- Uses ---'
+ printresults(table)
+ #
+ print '--- Used By ---'
+ inv = inverse(table)
+ printresults(inv)
+ #
+ print '--- Closure of Uses ---'
+ reach = closure(table)
+ printresults(reach)
+ #
+ print '--- Closure of Used By ---'
+ invreach = inverse(reach)
+ printresults(invreach)
+ #
+ return 0
+
+
+# Compiled regular expressions to search for import statements
+#
+m_import = re.compile('^[ \t]*from[ \t]+([^ \t]+)[ \t]+')
+m_from = re.compile('^[ \t]*import[ \t]+([^#]+)')
+
+
+# Collect data from one file
+#
+def process(filename, table):
+ fp = open(filename, 'r')
+ mod = os.path.basename(filename)
+ if mod[-3:] == '.py':
+ mod = mod[:-3]
+ table[mod] = list = []
+ while 1:
+ line = fp.readline()
+ if not line: break
+ while line[-1:] == '\\':
+ nextline = fp.readline()
+ if not nextline: break
+ line = line[:-1] + nextline
+ if m_import.match(line) >= 0:
+ (a, b), (a1, b1) = m_import.regs[:2]
+ elif m_from.match(line) >= 0:
+ (a, b), (a1, b1) = m_from.regs[:2]
+ else: continue
+ words = line[a1:b1].split(',')
+ # print '#', line, words
+ for word in words:
+ word = word.strip()
+ if word not in list:
+ list.append(word)
+
+
+# Compute closure (this is in fact totally general)
+#
+def closure(table):
+ modules = table.keys()
+ #
+ # Initialize reach with a copy of table
+ #
+ reach = {}
+ for mod in modules:
+ reach[mod] = table[mod][:]
+ #
+ # Iterate until no more change
+ #
+ change = 1
+ while change:
+ change = 0
+ for mod in modules:
+ for mo in reach[mod]:
+ if mo in modules:
+ for m in reach[mo]:
+ if m not in reach[mod]:
+ reach[mod].append(m)
+ change = 1
+ #
+ return reach
+
+
+# Invert a table (this is again totally general).
+# All keys of the original table are made keys of the inverse,
+# so there may be empty lists in the inverse.
+#
+def inverse(table):
+ inv = {}
+ for key in table.keys():
+ if not inv.has_key(key):
+ inv[key] = []
+ for item in table[key]:
+ store(inv, item, key)
+ return inv
+
+
+# Store "item" in "dict" under "key".
+# The dictionary maps keys to lists of items.
+# If there is no list for the key yet, it is created.
+#
+def store(dict, key, item):
+ if dict.has_key(key):
+ dict[key].append(item)
+ else:
+ dict[key] = [item]
+
+
+# Tabulate results neatly
+#
+def printresults(table):
+ modules = table.keys()
+ maxlen = 0
+ for mod in modules: maxlen = max(maxlen, len(mod))
+ modules.sort()
+ for mod in modules:
+ list = table[mod]
+ list.sort()
+ print mod.ljust(maxlen), ':',
+ if mod in list:
+ print '(*)',
+ for ref in list:
+ print ref,
+ print
+
+
+# Call main and honor exit status
+if __name__ == '__main__':
+ try:
+ sys.exit(main())
+ except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/lib/python2.7/Tools/scripts/pickle2db.py b/lib/python2.7/Tools/scripts/pickle2db.py
new file mode 100644
index 0000000..2913ea8
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/pickle2db.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python2
+
+"""
+Synopsis: %(prog)s [-h|-b|-g|-r|-a|-d] [ picklefile ] dbfile
+
+Read the given picklefile as a series of key/value pairs and write to a new
+database. If the database already exists, any contents are deleted. The
+optional flags indicate the type of the output database:
+
+ -a - open using anydbm
+ -b - open as bsddb btree file
+ -d - open as dbm file
+ -g - open as gdbm file
+ -h - open as bsddb hash file
+ -r - open as bsddb recno file
+
+The default is hash. If a pickle file is named it is opened for read
+access. If no pickle file is named, the pickle input is read from standard
+input.
+
+Note that recno databases can only contain integer keys, so you can't dump a
+hash or btree database using db2pickle.py and reconstitute it to a recno
+database with %(prog)s unless your keys are integers.
+
+"""
+
+import getopt
+try:
+ import bsddb
+except ImportError:
+ bsddb = None
+try:
+ import dbm
+except ImportError:
+ dbm = None
+try:
+ import gdbm
+except ImportError:
+ gdbm = None
+try:
+ import anydbm
+except ImportError:
+ anydbm = None
+import sys
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+prog = sys.argv[0]
+
+def usage():
+ sys.stderr.write(__doc__ % globals())
+
+def main(args):
+ try:
+ opts, args = getopt.getopt(args, "hbrdag",
+ ["hash", "btree", "recno", "dbm", "anydbm",
+ "gdbm"])
+ except getopt.error:
+ usage()
+ return 1
+
+ if len(args) == 0 or len(args) > 2:
+ usage()
+ return 1
+ elif len(args) == 1:
+ pfile = sys.stdin
+ dbfile = args[0]
+ else:
+ try:
+ pfile = open(args[0], 'rb')
+ except IOError:
+ sys.stderr.write("Unable to open %s\n" % args[0])
+ return 1
+ dbfile = args[1]
+
+ dbopen = None
+ for opt, arg in opts:
+ if opt in ("-h", "--hash"):
+ try:
+ dbopen = bsddb.hashopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-b", "--btree"):
+ try:
+ dbopen = bsddb.btopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-r", "--recno"):
+ try:
+ dbopen = bsddb.rnopen
+ except AttributeError:
+ sys.stderr.write("bsddb module unavailable.\n")
+ return 1
+ elif opt in ("-a", "--anydbm"):
+ try:
+ dbopen = anydbm.open
+ except AttributeError:
+ sys.stderr.write("anydbm module unavailable.\n")
+ return 1
+ elif opt in ("-g", "--gdbm"):
+ try:
+ dbopen = gdbm.open
+ except AttributeError:
+ sys.stderr.write("gdbm module unavailable.\n")
+ return 1
+ elif opt in ("-d", "--dbm"):
+ try:
+ dbopen = dbm.open
+ except AttributeError:
+ sys.stderr.write("dbm module unavailable.\n")
+ return 1
+ if dbopen is None:
+ if bsddb is None:
+ sys.stderr.write("bsddb module unavailable - ")
+ sys.stderr.write("must specify dbtype.\n")
+ return 1
+ else:
+ dbopen = bsddb.hashopen
+
+ try:
+ db = dbopen(dbfile, 'c')
+ except bsddb.error:
+ sys.stderr.write("Unable to open %s. " % dbfile)
+ sys.stderr.write("Check for format or version mismatch.\n")
+ return 1
+ else:
+ for k in db.keys():
+ del db[k]
+
+ while 1:
+ try:
+ (key, val) = pickle.load(pfile)
+ except EOFError:
+ break
+ db[key] = val
+
+ db.close()
+ pfile.close()
+
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/lib/python2.7/Tools/scripts/pindent.py b/lib/python2.7/Tools/scripts/pindent.py
new file mode 100644
index 0000000..e61a532
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/pindent.py
@@ -0,0 +1,508 @@
+#!/usr/bin/env python2
+
+# This file contains a class and a main program that perform three
+# related (though complimentary) formatting operations on Python
+# programs. When called as "pindent -c", it takes a valid Python
+# program as input and outputs a version augmented with block-closing
+# comments. When called as "pindent -d", it assumes its input is a
+# Python program with block-closing comments and outputs a commentless
+# version. When called as "pindent -r" it assumes its input is a
+# Python program with block-closing comments but with its indentation
+# messed up, and outputs a properly indented version.
+
+# A "block-closing comment" is a comment of the form '# end <keyword>'
+# where <keyword> is the keyword that opened the block. If the
+# opening keyword is 'def' or 'class', the function or class name may
+# be repeated in the block-closing comment as well. Here is an
+# example of a program fully augmented with block-closing comments:
+
+# def foobar(a, b):
+# if a == b:
+# a = a+1
+# elif a < b:
+# b = b-1
+# if b > a: a = a-1
+# # end if
+# else:
+# print 'oops!'
+# # end if
+# # end def foobar
+
+# Note that only the last part of an if...elif...else... block needs a
+# block-closing comment; the same is true for other compound
+# statements (e.g. try...except). Also note that "short-form" blocks
+# like the second 'if' in the example must be closed as well;
+# otherwise the 'else' in the example would be ambiguous (remember
+# that indentation is not significant when interpreting block-closing
+# comments).
+
+# The operations are idempotent (i.e. applied to their own output
+# they yield an identical result). Running first "pindent -c" and
+# then "pindent -r" on a valid Python program produces a program that
+# is semantically identical to the input (though its indentation may
+# be different). Running "pindent -e" on that output produces a
+# program that only differs from the original in indentation.
+
+# Other options:
+# -s stepsize: set the indentation step size (default 8)
+# -t tabsize : set the number of spaces a tab character is worth (default 8)
+# -e : expand TABs into spaces
+# file ... : input file(s) (default standard input)
+# The results always go to standard output
+
+# Caveats:
+# - comments ending in a backslash will be mistaken for continued lines
+# - continuations using backslash are always left unchanged
+# - continuations inside parentheses are not extra indented by -r
+# but must be indented for -c to work correctly (this breaks
+# idempotency!)
+# - continued lines inside triple-quoted strings are totally garbled
+
+# Secret feature:
+# - On input, a block may also be closed with an "end statement" --
+# this is a block-closing comment without the '#' sign.
+
+# Possible improvements:
+# - check syntax based on transitions in 'next' table
+# - better error reporting
+# - better error recovery
+# - check identifier after class/def
+
+# The following wishes need a more complete tokenization of the source:
+# - Don't get fooled by comments ending in backslash
+# - reindent continuation lines indicated by backslash
+# - handle continuation lines inside parentheses/braces/brackets
+# - handle triple quoted strings spanning lines
+# - realign comments
+# - optionally do much more thorough reformatting, a la C indent
+
+from __future__ import print_function
+
+# Defaults
+STEPSIZE = 8
+TABSIZE = 8
+EXPANDTABS = False
+
+import io
+import re
+import sys
+
+next = {}
+next['if'] = next['elif'] = 'elif', 'else', 'end'
+next['while'] = next['for'] = 'else', 'end'
+next['try'] = 'except', 'finally'
+next['except'] = 'except', 'else', 'finally', 'end'
+next['else'] = next['finally'] = next['with'] = \
+ next['def'] = next['class'] = 'end'
+next['end'] = ()
+start = 'if', 'while', 'for', 'try', 'with', 'def', 'class'
+
+class PythonIndenter:
+
+ def __init__(self, fpi = sys.stdin, fpo = sys.stdout,
+ indentsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ self.fpi = fpi
+ self.fpo = fpo
+ self.indentsize = indentsize
+ self.tabsize = tabsize
+ self.lineno = 0
+ self.expandtabs = expandtabs
+ self._write = fpo.write
+ self.kwprog = re.compile(
+ r'^(?:\s|\\\n)*(?P<kw>[a-z]+)'
+ r'((?:\s|\\\n)+(?P<id>[a-zA-Z_]\w*))?'
+ r'[^\w]')
+ self.endprog = re.compile(
+ r'^(?:\s|\\\n)*#?\s*end\s+(?P<kw>[a-z]+)'
+ r'(\s+(?P<id>[a-zA-Z_]\w*))?'
+ r'[^\w]')
+ self.wsprog = re.compile(r'^[ \t]*')
+ # end def __init__
+
+ def write(self, line):
+ if self.expandtabs:
+ self._write(line.expandtabs(self.tabsize))
+ else:
+ self._write(line)
+ # end if
+ # end def write
+
+ def readline(self):
+ line = self.fpi.readline()
+ if line: self.lineno += 1
+ # end if
+ return line
+ # end def readline
+
+ def error(self, fmt, *args):
+ if args: fmt = fmt % args
+ # end if
+ sys.stderr.write('Error at line %d: %s\n' % (self.lineno, fmt))
+ self.write('### %s ###\n' % fmt)
+ # end def error
+
+ def getline(self):
+ line = self.readline()
+ while line[-2:] == '\\\n':
+ line2 = self.readline()
+ if not line2: break
+ # end if
+ line += line2
+ # end while
+ return line
+ # end def getline
+
+ def putline(self, line, indent):
+ tabs, spaces = divmod(indent*self.indentsize, self.tabsize)
+ i = self.wsprog.match(line).end()
+ line = line[i:]
+ if line[:1] not in ('\n', '\r', ''):
+ line = '\t'*tabs + ' '*spaces + line
+ # end if
+ self.write(line)
+ # end def putline
+
+ def reformat(self):
+ stack = []
+ while True:
+ line = self.getline()
+ if not line: break # EOF
+ # end if
+ m = self.endprog.match(line)
+ if m:
+ kw = 'end'
+ kw2 = m.group('kw')
+ if not stack:
+ self.error('unexpected end')
+ elif stack.pop()[0] != kw2:
+ self.error('unmatched end')
+ # end if
+ self.putline(line, len(stack))
+ continue
+ # end if
+ m = self.kwprog.match(line)
+ if m:
+ kw = m.group('kw')
+ if kw in start:
+ self.putline(line, len(stack))
+ stack.append((kw, kw))
+ continue
+ # end if
+ if next.has_key(kw) and stack:
+ self.putline(line, len(stack)-1)
+ kwa, kwb = stack[-1]
+ stack[-1] = kwa, kw
+ continue
+ # end if
+ # end if
+ self.putline(line, len(stack))
+ # end while
+ if stack:
+ self.error('unterminated keywords')
+ for kwa, kwb in stack:
+ self.write('\t%s\n' % kwa)
+ # end for
+ # end if
+ # end def reformat
+
+ def delete(self):
+ begin_counter = 0
+ end_counter = 0
+ while True:
+ line = self.getline()
+ if not line: break # EOF
+ # end if
+ m = self.endprog.match(line)
+ if m:
+ end_counter += 1
+ continue
+ # end if
+ m = self.kwprog.match(line)
+ if m:
+ kw = m.group('kw')
+ if kw in start:
+ begin_counter += 1
+ # end if
+ # end if
+ self.write(line)
+ # end while
+ if begin_counter - end_counter < 0:
+ sys.stderr.write('Warning: input contained more end tags than expected\n')
+ elif begin_counter - end_counter > 0:
+ sys.stderr.write('Warning: input contained less end tags than expected\n')
+ # end if
+ # end def delete
+
+ def complete(self):
+ stack = []
+ todo = []
+ currentws = thisid = firstkw = lastkw = topid = ''
+ while True:
+ line = self.getline()
+ i = self.wsprog.match(line).end()
+ m = self.endprog.match(line)
+ if m:
+ thiskw = 'end'
+ endkw = m.group('kw')
+ thisid = m.group('id')
+ else:
+ m = self.kwprog.match(line)
+ if m:
+ thiskw = m.group('kw')
+ if not next.has_key(thiskw):
+ thiskw = ''
+ # end if
+ if thiskw in ('def', 'class'):
+ thisid = m.group('id')
+ else:
+ thisid = ''
+ # end if
+ elif line[i:i+1] in ('\n', '#'):
+ todo.append(line)
+ continue
+ else:
+ thiskw = ''
+ # end if
+ # end if
+ indentws = line[:i]
+ indent = len(indentws.expandtabs(self.tabsize))
+ current = len(currentws.expandtabs(self.tabsize))
+ while indent < current:
+ if firstkw:
+ if topid:
+ s = '# end %s %s\n' % (
+ firstkw, topid)
+ else:
+ s = '# end %s\n' % firstkw
+ # end if
+ self.write(currentws + s)
+ firstkw = lastkw = ''
+ # end if
+ currentws, firstkw, lastkw, topid = stack.pop()
+ current = len(currentws.expandtabs(self.tabsize))
+ # end while
+ if indent == current and firstkw:
+ if thiskw == 'end':
+ if endkw != firstkw:
+ self.error('mismatched end')
+ # end if
+ firstkw = lastkw = ''
+ elif not thiskw or thiskw in start:
+ if topid:
+ s = '# end %s %s\n' % (
+ firstkw, topid)
+ else:
+ s = '# end %s\n' % firstkw
+ # end if
+ self.write(currentws + s)
+ firstkw = lastkw = topid = ''
+ # end if
+ # end if
+ if indent > current:
+ stack.append((currentws, firstkw, lastkw, topid))
+ if thiskw and thiskw not in start:
+ # error
+ thiskw = ''
+ # end if
+ currentws, firstkw, lastkw, topid = \
+ indentws, thiskw, thiskw, thisid
+ # end if
+ if thiskw:
+ if thiskw in start:
+ firstkw = lastkw = thiskw
+ topid = thisid
+ else:
+ lastkw = thiskw
+ # end if
+ # end if
+ for l in todo: self.write(l)
+ # end for
+ todo = []
+ if not line: break
+ # end if
+ self.write(line)
+ # end while
+ # end def complete
+# end class PythonIndenter
+
+# Simplified user interface
+# - xxx_filter(input, output): read and write file objects
+# - xxx_string(s): take and return string object
+# - xxx_file(filename): process file in place, return true iff changed
+
+def complete_filter(input = sys.stdin, output = sys.stdout,
+ stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.complete()
+# end def complete_filter
+
+def delete_filter(input= sys.stdin, output = sys.stdout,
+ stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.delete()
+# end def delete_filter
+
+def reformat_filter(input = sys.stdin, output = sys.stdout,
+ stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.reformat()
+# end def reformat_filter
+
+def complete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ input = io.BytesIO(source)
+ output = io.BytesIO()
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.complete()
+ return output.getvalue()
+# end def complete_string
+
+def delete_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ input = io.BytesIO(source)
+ output = io.BytesIO()
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.delete()
+ return output.getvalue()
+# end def delete_string
+
+def reformat_string(source, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ input = io.BytesIO(source)
+ output = io.BytesIO()
+ pi = PythonIndenter(input, output, stepsize, tabsize, expandtabs)
+ pi.reformat()
+ return output.getvalue()
+# end def reformat_string
+
+def make_backup(filename):
+ import os, os.path
+ backup = filename + '~'
+ if os.path.lexists(backup):
+ try:
+ os.remove(backup)
+ except os.error:
+ print("Can't remove backup %r" % (backup,), file=sys.stderr)
+ # end try
+ # end if
+ try:
+ os.rename(filename, backup)
+ except os.error:
+ print("Can't rename %r to %r" % (filename, backup), file=sys.stderr)
+ # end try
+# end def make_backup
+
+def complete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
+ result = complete_string(source, stepsize, tabsize, expandtabs)
+ if source == result: return 0
+ # end if
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
+ return 1
+# end def complete_file
+
+def delete_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
+ result = delete_string(source, stepsize, tabsize, expandtabs)
+ if source == result: return 0
+ # end if
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
+ return 1
+# end def delete_file
+
+def reformat_file(filename, stepsize = STEPSIZE, tabsize = TABSIZE, expandtabs = EXPANDTABS):
+ with open(filename, 'r') as f:
+ source = f.read()
+ # end with
+ result = reformat_string(source, stepsize, tabsize, expandtabs)
+ if source == result: return 0
+ # end if
+ make_backup(filename)
+ with open(filename, 'w') as f:
+ f.write(result)
+ # end with
+ return 1
+# end def reformat_file
+
+# Test program when called as a script
+
+usage = """
+usage: pindent (-c|-d|-r) [-s stepsize] [-t tabsize] [-e] [file] ...
+-c : complete a correctly indented program (add #end directives)
+-d : delete #end directives
+-r : reformat a completed program (use #end directives)
+-s stepsize: indentation step (default %(STEPSIZE)d)
+-t tabsize : the worth in spaces of a tab (default %(TABSIZE)d)
+-e : expand TABs into spaces (default OFF)
+[file] ... : files are changed in place, with backups in file~
+If no files are specified or a single - is given,
+the program acts as a filter (reads stdin, writes stdout).
+""" % vars()
+
+def error_both(op1, op2):
+ sys.stderr.write('Error: You can not specify both '+op1+' and -'+op2[0]+' at the same time\n')
+ sys.stderr.write(usage)
+ sys.exit(2)
+# end def error_both
+
+def test():
+ import getopt
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'cdrs:t:e')
+ except getopt.error, msg:
+ sys.stderr.write('Error: %s\n' % msg)
+ sys.stderr.write(usage)
+ sys.exit(2)
+ # end try
+ action = None
+ stepsize = STEPSIZE
+ tabsize = TABSIZE
+ expandtabs = EXPANDTABS
+ for o, a in opts:
+ if o == '-c':
+ if action: error_both(o, action)
+ # end if
+ action = 'complete'
+ elif o == '-d':
+ if action: error_both(o, action)
+ # end if
+ action = 'delete'
+ elif o == '-r':
+ if action: error_both(o, action)
+ # end if
+ action = 'reformat'
+ elif o == '-s':
+ stepsize = int(a)
+ elif o == '-t':
+ tabsize = int(a)
+ elif o == '-e':
+ expandtabs = True
+ # end if
+ # end for
+ if not action:
+ sys.stderr.write(
+ 'You must specify -c(omplete), -d(elete) or -r(eformat)\n')
+ sys.stderr.write(usage)
+ sys.exit(2)
+ # end if
+ if not args or args == ['-']:
+ action = eval(action + '_filter')
+ action(sys.stdin, sys.stdout, stepsize, tabsize, expandtabs)
+ else:
+ action = eval(action + '_file')
+ for filename in args:
+ action(filename, stepsize, tabsize, expandtabs)
+ # end for
+ # end if
+# end def test
+
+if __name__ == '__main__':
+ test()
+# end if
diff --git a/lib/python2.7/Tools/scripts/ptags.py b/lib/python2.7/Tools/scripts/ptags.py
new file mode 100644
index 0000000..11b6558
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/ptags.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python2
+
+# ptags
+#
+# Create a tags file for Python programs, usable with vi.
+# Tagged are:
+# - functions (even inside other defs or classes)
+# - classes
+# - filenames
+# Warns about files it cannot open.
+# No warnings about duplicate tags.
+
+import sys, re, os
+
+tags = [] # Modified global variable!
+
+def main():
+ args = sys.argv[1:]
+ for filename in args:
+ treat_file(filename)
+ if tags:
+ fp = open('tags', 'w')
+ tags.sort()
+ for s in tags: fp.write(s)
+
+
+expr = '^[ \t]*(def|class)[ \t]+([a-zA-Z0-9_]+)[ \t]*[:\(]'
+matcher = re.compile(expr)
+
+def treat_file(filename):
+ try:
+ fp = open(filename, 'r')
+ except:
+ sys.stderr.write('Cannot open %s\n' % filename)
+ return
+ base = os.path.basename(filename)
+ if base[-3:] == '.py':
+ base = base[:-3]
+ s = base + '\t' + filename + '\t' + '1\n'
+ tags.append(s)
+ while 1:
+ line = fp.readline()
+ if not line:
+ break
+ m = matcher.match(line)
+ if m:
+ content = m.group(0)
+ name = m.group(2)
+ s = name + '\t' + filename + '\t/^' + content + '/\n'
+ tags.append(s)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/pysource.py b/lib/python2.7/Tools/scripts/pysource.py
new file mode 100644
index 0000000..062cfbb
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/pysource.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python2
+
+"""\
+List python source files.
+
+There are three functions to check whether a file is a Python source, listed
+here with increasing complexity:
+
+- has_python_ext() checks whether a file name ends in '.py[w]'.
+- look_like_python() checks whether the file is not binary and either has
+ the '.py[w]' extension or the first line contains the word 'python'.
+- can_be_compiled() checks whether the file can be compiled by compile().
+
+The file also must be of appropriate size - not bigger than a megabyte.
+
+walk_python_files() recursively lists all Python files under the given directories.
+"""
+__author__ = "Oleg Broytmann, Georg Brandl"
+
+__all__ = ["has_python_ext", "looks_like_python", "can_be_compiled", "walk_python_files"]
+
+
+import os, re
+
+binary_re = re.compile('[\x00-\x08\x0E-\x1F\x7F]')
+
+debug = False
+
+def print_debug(msg):
+ if debug: print msg
+
+
+def _open(fullpath):
+ try:
+ size = os.stat(fullpath).st_size
+ except OSError, err: # Permission denied - ignore the file
+ print_debug("%s: permission denied: %s" % (fullpath, err))
+ return None
+
+ if size > 1024*1024: # too big
+ print_debug("%s: the file is too big: %d bytes" % (fullpath, size))
+ return None
+
+ try:
+ return open(fullpath, 'rU')
+ except IOError, err: # Access denied, or a special file - ignore it
+ print_debug("%s: access denied: %s" % (fullpath, err))
+ return None
+
+def has_python_ext(fullpath):
+ return fullpath.endswith(".py") or fullpath.endswith(".pyw")
+
+def looks_like_python(fullpath):
+ infile = _open(fullpath)
+ if infile is None:
+ return False
+
+ line = infile.readline()
+ infile.close()
+
+ if binary_re.search(line):
+ # file appears to be binary
+ print_debug("%s: appears to be binary" % fullpath)
+ return False
+
+ if fullpath.endswith(".py") or fullpath.endswith(".pyw"):
+ return True
+ elif "python" in line:
+ # disguised Python script (e.g. CGI)
+ return True
+
+ return False
+
+def can_be_compiled(fullpath):
+ infile = _open(fullpath)
+ if infile is None:
+ return False
+
+ code = infile.read()
+ infile.close()
+
+ try:
+ compile(code, fullpath, "exec")
+ except Exception, err:
+ print_debug("%s: cannot compile: %s" % (fullpath, err))
+ return False
+
+ return True
+
+
+def walk_python_files(paths, is_python=looks_like_python, exclude_dirs=None):
+ """\
+ Recursively yield all Python source files below the given paths.
+
+ paths: a list of files and/or directories to be checked.
+ is_python: a function that takes a file name and checks whether it is a
+ Python source file
+ exclude_dirs: a list of directory base names that should be excluded in
+ the search
+ """
+ if exclude_dirs is None:
+ exclude_dirs=[]
+
+ for path in paths:
+ print_debug("testing: %s" % path)
+ if os.path.isfile(path):
+ if is_python(path):
+ yield path
+ elif os.path.isdir(path):
+ print_debug(" it is a directory")
+ for dirpath, dirnames, filenames in os.walk(path):
+ for exclude in exclude_dirs:
+ if exclude in dirnames:
+ dirnames.remove(exclude)
+ for filename in filenames:
+ fullpath = os.path.join(dirpath, filename)
+ print_debug("testing: %s" % fullpath)
+ if is_python(fullpath):
+ yield fullpath
+ else:
+ print_debug(" unknown type")
+
+
+if __name__ == "__main__":
+ # Two simple examples/tests
+ for fullpath in walk_python_files(['.']):
+ print fullpath
+ print "----------"
+ for fullpath in walk_python_files(['.'], is_python=can_be_compiled):
+ print fullpath
diff --git a/lib/python2.7/Tools/scripts/redemo.py b/lib/python2.7/Tools/scripts/redemo.py
new file mode 100644
index 0000000..39f3670
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/redemo.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python2
+"""Basic regular expression demonstration facility (Perl style syntax)."""
+
+from Tkinter import *
+import re
+
+class ReDemo:
+
+ def __init__(self, master):
+ self.master = master
+
+ self.promptdisplay = Label(self.master, anchor=W,
+ text="Enter a Perl-style regular expression:")
+ self.promptdisplay.pack(side=TOP, fill=X)
+
+ self.regexdisplay = Entry(self.master)
+ self.regexdisplay.pack(fill=X)
+ self.regexdisplay.focus_set()
+
+ self.addoptions()
+
+ self.statusdisplay = Label(self.master, text="", anchor=W)
+ self.statusdisplay.pack(side=TOP, fill=X)
+
+ self.labeldisplay = Label(self.master, anchor=W,
+ text="Enter a string to search:")
+ self.labeldisplay.pack(fill=X)
+ self.labeldisplay.pack(fill=X)
+
+ self.showframe = Frame(master)
+ self.showframe.pack(fill=X, anchor=W)
+
+ self.showvar = StringVar(master)
+ self.showvar.set("first")
+
+ self.showfirstradio = Radiobutton(self.showframe,
+ text="Highlight first match",
+ variable=self.showvar,
+ value="first",
+ command=self.recompile)
+ self.showfirstradio.pack(side=LEFT)
+
+ self.showallradio = Radiobutton(self.showframe,
+ text="Highlight all matches",
+ variable=self.showvar,
+ value="all",
+ command=self.recompile)
+ self.showallradio.pack(side=LEFT)
+
+ self.stringdisplay = Text(self.master, width=60, height=4)
+ self.stringdisplay.pack(fill=BOTH, expand=1)
+ self.stringdisplay.tag_configure("hit", background="yellow")
+
+ self.grouplabel = Label(self.master, text="Groups:", anchor=W)
+ self.grouplabel.pack(fill=X)
+
+ self.grouplist = Listbox(self.master)
+ self.grouplist.pack(expand=1, fill=BOTH)
+
+ self.regexdisplay.bind('<Key>', self.recompile)
+ self.stringdisplay.bind('<Key>', self.reevaluate)
+
+ self.compiled = None
+ self.recompile()
+
+ btags = self.regexdisplay.bindtags()
+ self.regexdisplay.bindtags(btags[1:] + btags[:1])
+
+ btags = self.stringdisplay.bindtags()
+ self.stringdisplay.bindtags(btags[1:] + btags[:1])
+
+ def addoptions(self):
+ self.frames = []
+ self.boxes = []
+ self.vars = []
+ for name in ('IGNORECASE',
+ 'LOCALE',
+ 'MULTILINE',
+ 'DOTALL',
+ 'VERBOSE'):
+ if len(self.boxes) % 3 == 0:
+ frame = Frame(self.master)
+ frame.pack(fill=X)
+ self.frames.append(frame)
+ val = getattr(re, name)
+ var = IntVar()
+ box = Checkbutton(frame,
+ variable=var, text=name,
+ offvalue=0, onvalue=val,
+ command=self.recompile)
+ box.pack(side=LEFT)
+ self.boxes.append(box)
+ self.vars.append(var)
+
+ def getflags(self):
+ flags = 0
+ for var in self.vars:
+ flags = flags | var.get()
+ flags = flags
+ return flags
+
+ def recompile(self, event=None):
+ try:
+ self.compiled = re.compile(self.regexdisplay.get(),
+ self.getflags())
+ bg = self.promptdisplay['background']
+ self.statusdisplay.config(text="", background=bg)
+ except re.error, msg:
+ self.compiled = None
+ self.statusdisplay.config(
+ text="re.error: %s" % str(msg),
+ background="red")
+ self.reevaluate()
+
+ def reevaluate(self, event=None):
+ try:
+ self.stringdisplay.tag_remove("hit", "1.0", END)
+ except TclError:
+ pass
+ try:
+ self.stringdisplay.tag_remove("hit0", "1.0", END)
+ except TclError:
+ pass
+ self.grouplist.delete(0, END)
+ if not self.compiled:
+ return
+ self.stringdisplay.tag_configure("hit", background="yellow")
+ self.stringdisplay.tag_configure("hit0", background="orange")
+ text = self.stringdisplay.get("1.0", END)
+ last = 0
+ nmatches = 0
+ while last <= len(text):
+ m = self.compiled.search(text, last)
+ if m is None:
+ break
+ first, last = m.span()
+ if last == first:
+ last = first+1
+ tag = "hit0"
+ else:
+ tag = "hit"
+ pfirst = "1.0 + %d chars" % first
+ plast = "1.0 + %d chars" % last
+ self.stringdisplay.tag_add(tag, pfirst, plast)
+ if nmatches == 0:
+ self.stringdisplay.yview_pickplace(pfirst)
+ groups = list(m.groups())
+ groups.insert(0, m.group())
+ for i in range(len(groups)):
+ g = "%2d: %r" % (i, groups[i])
+ self.grouplist.insert(END, g)
+ nmatches = nmatches + 1
+ if self.showvar.get() == "first":
+ break
+
+ if nmatches == 0:
+ self.statusdisplay.config(text="(no match)",
+ background="yellow")
+ else:
+ self.statusdisplay.config(text="")
+
+
+# Main function, run when invoked as a stand-alone Python program.
+
+def main():
+ root = Tk()
+ demo = ReDemo(root)
+ root.protocol('WM_DELETE_WINDOW', root.quit)
+ root.mainloop()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/reindent-rst.py b/lib/python2.7/Tools/scripts/reindent-rst.py
new file mode 100644
index 0000000..88cf409
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/reindent-rst.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python2
+
+# Make a reST file compliant to our pre-commit hook.
+# Currently just remove trailing whitespace.
+
+import sys
+
+import patchcheck
+
+def main(argv=sys.argv):
+ patchcheck.normalize_docs_whitespace(argv[1:])
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/lib/python2.7/Tools/scripts/reindent.py b/lib/python2.7/Tools/scripts/reindent.py
new file mode 100644
index 0000000..b2d81c8
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/reindent.py
@@ -0,0 +1,315 @@
+#!/usr/bin/env python2
+
+# Released to the public domain, by Tim Peters, 03 October 2000.
+
+"""reindent [-d][-r][-v] [ path ... ]
+
+-d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
+-r (--recurse) Recurse. Search for all .py files in subdirectories too.
+-n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
+-v (--verbose) Verbose. Print informative msgs; else no output.
+-h (--help) Help. Print this usage information and exit.
+
+Change Python (.py) files to use 4-space indents and no hard tab characters.
+Also trim excess spaces and tabs from ends of lines, and remove empty lines
+at the end of files. Also ensure the last line ends with a newline.
+
+If no paths are given on the command line, reindent operates as a filter,
+reading a single source file from standard input and writing the transformed
+source to standard output. In this case, the -d, -r and -v flags are
+ignored.
+
+You can pass one or more file and/or directory paths. When a directory
+path, all .py files within the directory will be examined, and, if the -r
+option is given, likewise recursively for subdirectories.
+
+If output is not to standard output, reindent overwrites files in place,
+renaming the originals with a .bak extension. If it finds nothing to
+change, the file is left alone. If reindent does change a file, the changed
+file is a fixed-point for future runs (i.e., running reindent on the
+resulting .py file won't change it again).
+
+The hard part of reindenting is figuring out what to do with comment
+lines. So long as the input files get a clean bill of health from
+tabnanny.py, reindent should do a good job.
+
+The backup file is a copy of the one that is being reindented. The ".bak"
+file is generated with shutil.copy(), but some corner cases regarding
+user/group and permissions could leave the backup file more readable than
+you'd prefer. You can always use the --nobackup option to prevent this.
+"""
+
+__version__ = "1"
+
+import tokenize
+import os, shutil
+import sys
+import io
+
+verbose = 0
+recurse = 0
+dryrun = 0
+makebackup = True
+
+def usage(msg=None):
+ if msg is not None:
+ print >> sys.stderr, msg
+ print >> sys.stderr, __doc__
+
+def errprint(*args):
+ sep = ""
+ for arg in args:
+ sys.stderr.write(sep + str(arg))
+ sep = " "
+ sys.stderr.write("\n")
+
+def main():
+ import getopt
+ global verbose, recurse, dryrun, makebackup
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "drnvh",
+ ["dryrun", "recurse", "nobackup", "verbose", "help"])
+ except getopt.error, msg:
+ usage(msg)
+ return
+ for o, a in opts:
+ if o in ('-d', '--dryrun'):
+ dryrun += 1
+ elif o in ('-r', '--recurse'):
+ recurse += 1
+ elif o in ('-n', '--nobackup'):
+ makebackup = False
+ elif o in ('-v', '--verbose'):
+ verbose += 1
+ elif o in ('-h', '--help'):
+ usage()
+ return
+ if not args:
+ r = Reindenter(sys.stdin)
+ r.run()
+ r.write(sys.stdout)
+ return
+ for arg in args:
+ check(arg)
+
+def check(file):
+ if os.path.isdir(file) and not os.path.islink(file):
+ if verbose:
+ print "listing directory", file
+ names = os.listdir(file)
+ for name in names:
+ fullname = os.path.join(file, name)
+ if ((recurse and os.path.isdir(fullname) and
+ not os.path.islink(fullname) and
+ not os.path.split(fullname)[1].startswith("."))
+ or name.lower().endswith(".py")):
+ check(fullname)
+ return
+
+ if verbose:
+ print "checking", file, "...",
+ try:
+ f = io.open(file)
+ except IOError, msg:
+ errprint("%s: I/O Error: %s" % (file, str(msg)))
+ return
+
+ r = Reindenter(f)
+ f.close()
+
+ newline = r.newlines
+ if isinstance(newline, tuple):
+ errprint("%s: mixed newlines detected; cannot process file" % file)
+ return
+
+ if r.run():
+ if verbose:
+ print "changed."
+ if dryrun:
+ print "But this is a dry run, so leaving it alone."
+ if not dryrun:
+ bak = file + ".bak"
+ if makebackup:
+ shutil.copyfile(file, bak)
+ if verbose:
+ print "backed up", file, "to", bak
+ f = io.open(file, "w", newline=newline)
+ r.write(f)
+ f.close()
+ if verbose:
+ print "wrote new", file
+ return True
+ else:
+ if verbose:
+ print "unchanged."
+ return False
+
+def _rstrip(line, JUNK='\n \t'):
+ """Return line stripped of trailing spaces, tabs, newlines.
+
+ Note that line.rstrip() instead also strips sundry control characters,
+ but at least one known Emacs user expects to keep junk like that, not
+ mentioning Barry by name or anything <wink>.
+ """
+
+ i = len(line)
+ while i > 0 and line[i-1] in JUNK:
+ i -= 1
+ return line[:i]
+
+class Reindenter:
+
+ def __init__(self, f):
+ self.find_stmt = 1 # next token begins a fresh stmt?
+ self.level = 0 # current indent level
+
+ # Raw file lines.
+ self.raw = f.readlines()
+
+ # File lines, rstripped & tab-expanded. Dummy at start is so
+ # that we can use tokenize's 1-based line numbering easily.
+ # Note that a line is all-blank iff it's "\n".
+ self.lines = [_rstrip(line).expandtabs() + "\n"
+ for line in self.raw]
+ self.lines.insert(0, None)
+ self.index = 1 # index into self.lines of next line
+
+ # List of (lineno, indentlevel) pairs, one for each stmt and
+ # comment line. indentlevel is -1 for comment lines, as a
+ # signal that tokenize doesn't know what to do about them;
+ # indeed, they're our headache!
+ self.stats = []
+
+ # Save the newlines found in the file so they can be used to
+ # create output without mutating the newlines.
+ self.newlines = f.newlines
+
+ def run(self):
+ tokenize.tokenize(self.getline, self.tokeneater)
+ # Remove trailing empty lines.
+ lines = self.lines
+ while lines and lines[-1] == "\n":
+ lines.pop()
+ # Sentinel.
+ stats = self.stats
+ stats.append((len(lines), 0))
+ # Map count of leading spaces to # we want.
+ have2want = {}
+ # Program after transformation.
+ after = self.after = []
+ # Copy over initial empty lines -- there's nothing to do until
+ # we see a line with *something* on it.
+ i = stats[0][0]
+ after.extend(lines[1:i])
+ for i in range(len(stats)-1):
+ thisstmt, thislevel = stats[i]
+ nextstmt = stats[i+1][0]
+ have = getlspace(lines[thisstmt])
+ want = thislevel * 4
+ if want < 0:
+ # A comment line.
+ if have:
+ # An indented comment line. If we saw the same
+ # indentation before, reuse what it most recently
+ # mapped to.
+ want = have2want.get(have, -1)
+ if want < 0:
+ # Then it probably belongs to the next real stmt.
+ for j in xrange(i+1, len(stats)-1):
+ jline, jlevel = stats[j]
+ if jlevel >= 0:
+ if have == getlspace(lines[jline]):
+ want = jlevel * 4
+ break
+ if want < 0: # Maybe it's a hanging
+ # comment like this one,
+ # in which case we should shift it like its base
+ # line got shifted.
+ for j in xrange(i-1, -1, -1):
+ jline, jlevel = stats[j]
+ if jlevel >= 0:
+ want = have + getlspace(after[jline-1]) - \
+ getlspace(lines[jline])
+ break
+ if want < 0:
+ # Still no luck -- leave it alone.
+ want = have
+ else:
+ want = 0
+ assert want >= 0
+ have2want[have] = want
+ diff = want - have
+ if diff == 0 or have == 0:
+ after.extend(lines[thisstmt:nextstmt])
+ else:
+ for line in lines[thisstmt:nextstmt]:
+ if diff > 0:
+ if line == "\n":
+ after.append(line)
+ else:
+ after.append(" " * diff + line)
+ else:
+ remove = min(getlspace(line), -diff)
+ after.append(line[remove:])
+ return self.raw != self.after
+
+ def write(self, f):
+ f.writelines(self.after)
+
+ # Line-getter for tokenize.
+ def getline(self):
+ if self.index >= len(self.lines):
+ line = ""
+ else:
+ line = self.lines[self.index]
+ self.index += 1
+ return line
+
+ # Line-eater for tokenize.
+ def tokeneater(self, type, token, (sline, scol), end, line,
+ INDENT=tokenize.INDENT,
+ DEDENT=tokenize.DEDENT,
+ NEWLINE=tokenize.NEWLINE,
+ COMMENT=tokenize.COMMENT,
+ NL=tokenize.NL):
+
+ if type == NEWLINE:
+ # A program statement, or ENDMARKER, will eventually follow,
+ # after some (possibly empty) run of tokens of the form
+ # (NL | COMMENT)* (INDENT | DEDENT+)?
+ self.find_stmt = 1
+
+ elif type == INDENT:
+ self.find_stmt = 1
+ self.level += 1
+
+ elif type == DEDENT:
+ self.find_stmt = 1
+ self.level -= 1
+
+ elif type == COMMENT:
+ if self.find_stmt:
+ self.stats.append((sline, -1))
+ # but we're still looking for a new stmt, so leave
+ # find_stmt alone
+
+ elif type == NL:
+ pass
+
+ elif self.find_stmt:
+ # This is the first "real token" following a NEWLINE, so it
+ # must be the first token of the next program statement, or an
+ # ENDMARKER.
+ self.find_stmt = 0
+ if line: # not endmarker
+ self.stats.append((sline, self.level))
+
+# Count number of leading blanks.
+def getlspace(line):
+ i, n = 0, len(line)
+ while i < n and line[i] == " ":
+ i += 1
+ return i
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/rgrep.py b/lib/python2.7/Tools/scripts/rgrep.py
new file mode 100644
index 0000000..a40bc3f
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/rgrep.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python2
+
+"""Reverse grep.
+
+Usage: rgrep [-i] pattern file
+"""
+
+import sys
+import re
+import getopt
+
+def main():
+ bufsize = 64*1024
+ reflags = 0
+ opts, args = getopt.getopt(sys.argv[1:], "i")
+ for o, a in opts:
+ if o == '-i':
+ reflags = reflags | re.IGNORECASE
+ if len(args) < 2:
+ usage("not enough arguments")
+ if len(args) > 2:
+ usage("exactly one file argument required")
+ pattern, filename = args
+ try:
+ prog = re.compile(pattern, reflags)
+ except re.error, msg:
+ usage("error in regular expression: %s" % str(msg))
+ try:
+ f = open(filename)
+ except IOError, msg:
+ usage("can't open %s: %s" % (repr(filename), str(msg)), 1)
+ f.seek(0, 2)
+ pos = f.tell()
+ leftover = None
+ while pos > 0:
+ size = min(pos, bufsize)
+ pos = pos - size
+ f.seek(pos)
+ buffer = f.read(size)
+ lines = buffer.split("\n")
+ del buffer
+ if leftover is None:
+ if not lines[-1]:
+ del lines[-1]
+ else:
+ lines[-1] = lines[-1] + leftover
+ if pos > 0:
+ leftover = lines[0]
+ del lines[0]
+ else:
+ leftover = None
+ lines.reverse()
+ for line in lines:
+ if prog.search(line):
+ print line
+
+def usage(msg, code=2):
+ sys.stdout = sys.stderr
+ print msg
+ print __doc__
+ sys.exit(code)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/serve.py b/lib/python2.7/Tools/scripts/serve.py
new file mode 100644
index 0000000..369aeec
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/serve.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python2
+'''
+Small wsgiref based web server. Takes a path to serve from and an
+optional port number (defaults to 8000), then tries to serve files.
+Mime types are guessed from the file names, 404 errors are raised
+if the file is not found. Used for the make serve target in Doc.
+'''
+import sys
+import os
+import mimetypes
+from wsgiref import simple_server, util
+
+def app(environ, respond):
+
+ fn = os.path.join(path, environ['PATH_INFO'][1:])
+ if '.' not in fn.split(os.path.sep)[-1]:
+ fn = os.path.join(fn, 'index.html')
+ type = mimetypes.guess_type(fn)[0]
+
+ if os.path.exists(fn):
+ respond('200 OK', [('Content-Type', type)])
+ return util.FileWrapper(open(fn))
+ else:
+ respond('404 Not Found', [('Content-Type', 'text/plain')])
+ return ['not found']
+
+if __name__ == '__main__':
+ path = sys.argv[1]
+ port = int(sys.argv[2]) if len(sys.argv) > 2 else 8000
+ httpd = simple_server.make_server('', port, app)
+ print "Serving %s on port %s, control-C to stop" % (path, port)
+ try:
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ print "\b\bShutting down."
diff --git a/lib/python2.7/Tools/scripts/setup.py b/lib/python2.7/Tools/scripts/setup.py
new file mode 100644
index 0000000..7a50368
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/setup.py
@@ -0,0 +1,20 @@
+from distutils.core import setup
+
+if __name__ == '__main__':
+ setup(
+ scripts=[
+ 'byteyears.py',
+ 'checkpyc.py',
+ 'copytime.py',
+ 'crlf.py',
+ 'dutree.py',
+ 'ftpmirror.py',
+ 'h2py.py',
+ 'lfcr.py',
+ '../i18n/pygettext.py',
+ 'logmerge.py',
+ '../../Lib/tabnanny.py',
+ '../../Lib/timeit.py',
+ 'untabify.py',
+ ],
+ )
diff --git a/lib/python2.7/Tools/scripts/suff.py b/lib/python2.7/Tools/scripts/suff.py
new file mode 100644
index 0000000..1b8cd8b
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/suff.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python2
+
+# suff
+#
+# show different suffixes amongst arguments
+
+import sys
+
+def main():
+ files = sys.argv[1:]
+ suffixes = {}
+ for filename in files:
+ suff = getsuffix(filename)
+ if not suffixes.has_key(suff):
+ suffixes[suff] = []
+ suffixes[suff].append(filename)
+ keys = suffixes.keys()
+ keys.sort()
+ for suff in keys:
+ print repr(suff), len(suffixes[suff])
+
+def getsuffix(filename):
+ suff = ''
+ for i in range(len(filename)):
+ if filename[i] == '.':
+ suff = filename[i:]
+ return suff
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/svneol.py b/lib/python2.7/Tools/scripts/svneol.py
new file mode 100644
index 0000000..325828d
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/svneol.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python2
+
+"""
+SVN helper script.
+
+Try to set the svn:eol-style property to "native" on every .py, .txt, .c and
+.h file in the directory tree rooted at the current directory.
+
+Files with the svn:eol-style property already set (to anything) are skipped.
+
+svn will itself refuse to set this property on a file that's not under SVN
+control, or that has a binary mime-type property set. This script inherits
+that behavior, and passes on whatever warning message the failing "svn
+propset" command produces.
+
+In the Python project, it's safe to invoke this script from the root of
+a checkout.
+
+No output is produced for files that are ignored. For a file that gets
+svn:eol-style set, output looks like:
+
+ property 'svn:eol-style' set on 'Lib\ctypes\__init__.py'
+
+For a file not under version control:
+
+ svn: warning: 'patch-finalizer.txt' is not under version control
+
+and for a file with a binary mime-type property:
+
+ svn: File 'Lib\test\test_pep263.py' has binary mime type property
+"""
+
+import re
+import os
+
+def propfiles(root, fn):
+ default = os.path.join(root, ".svn", "props", fn+".svn-work")
+ try:
+ format = int(open(os.path.join(root, ".svn", "format")).read().strip())
+ except IOError:
+ return []
+ if format in (8, 9):
+ # In version 8 and 9, committed props are stored in prop-base, local
+ # modifications in props
+ return [os.path.join(root, ".svn", "prop-base", fn+".svn-base"),
+ os.path.join(root, ".svn", "props", fn+".svn-work")]
+ raise ValueError, "Unknown repository format"
+
+def proplist(root, fn):
+ "Return a list of property names for file fn in directory root"
+ result = []
+ for path in propfiles(root, fn):
+ try:
+ f = open(path)
+ except IOError:
+ # no properties file: not under version control,
+ # or no properties set
+ continue
+ while 1:
+ # key-value pairs, of the form
+ # K <length>
+ # <keyname>NL
+ # V length
+ # <value>NL
+ # END
+ line = f.readline()
+ if line.startswith("END"):
+ break
+ assert line.startswith("K ")
+ L = int(line.split()[1])
+ key = f.read(L)
+ result.append(key)
+ f.readline()
+ line = f.readline()
+ assert line.startswith("V ")
+ L = int(line.split()[1])
+ value = f.read(L)
+ f.readline()
+ f.close()
+ return result
+
+possible_text_file = re.compile(r"\.([hc]|py|txt|sln|vcproj)$").search
+
+for root, dirs, files in os.walk('.'):
+ if '.svn' in dirs:
+ dirs.remove('.svn')
+ for fn in files:
+ if possible_text_file(fn):
+ if 'svn:eol-style' not in proplist(root, fn):
+ path = os.path.join(root, fn)
+ os.system('svn propset svn:eol-style native "%s"' % path)
diff --git a/lib/python2.7/Tools/scripts/texcheck.py b/lib/python2.7/Tools/scripts/texcheck.py
new file mode 100644
index 0000000..4a56635
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/texcheck.py
@@ -0,0 +1,233 @@
+""" TeXcheck.py -- rough syntax checking on Python style LaTeX documents.
+
+ Written by Raymond D. Hettinger <python at rcn.com>
+ Copyright (c) 2003 Python Software Foundation. All rights reserved.
+
+Designed to catch common markup errors including:
+* Unbalanced or mismatched parenthesis, brackets, and braces.
+* Unbalanced or mismatched \\begin and \\end blocks.
+* Misspelled or invalid LaTeX commands.
+* Use of forward slashes instead of backslashes for commands.
+* Table line size mismatches.
+
+Sample command line usage:
+ python texcheck.py -k chapterheading -m lib/librandomtex *.tex
+
+Options:
+ -m Munge parenthesis and brackets. [0,n) would normally mismatch.
+ -k keyword: Keyword is a valid LaTeX command. Do not include the backslash.
+ -d: Delimiter check only (useful for non-LaTeX files).
+ -h: Help
+ -s lineno: Start at lineno (useful for skipping complex sections).
+ -v: Verbose. Trace the matching of //begin and //end blocks.
+"""
+
+import re
+import sys
+import getopt
+from itertools import izip, count, islice
+import glob
+
+cmdstr = r"""
+ \section \module \declaremodule \modulesynopsis \moduleauthor
+ \sectionauthor \versionadded \code \class \method \begin
+ \optional \var \ref \end \subsection \lineiii \hline \label
+ \indexii \textrm \ldots \keyword \stindex \index \item \note
+ \withsubitem \ttindex \footnote \citetitle \samp \opindex
+ \noindent \exception \strong \dfn \ctype \obindex \character
+ \indexiii \function \bifuncindex \refmodule \refbimodindex
+ \subsubsection \nodename \member \chapter \emph \ASCII \UNIX
+ \regexp \program \production \token \productioncont \term
+ \grammartoken \lineii \seemodule \file \EOF \documentclass
+ \usepackage \title \input \maketitle \ifhtml \fi \url \Cpp
+ \tableofcontents \kbd \programopt \envvar \refstmodindex
+ \cfunction \constant \NULL \moreargs \cfuncline \cdata
+ \textasciicircum \n \ABC \setindexsubitem \versionchanged
+ \deprecated \seetext \newcommand \POSIX \pep \warning \rfc
+ \verbatiminput \methodline \textgreater \seetitle \lineiv
+ \funclineni \ulink \manpage \funcline \dataline \unspecified
+ \textbackslash \mimetype \mailheader \seepep \textunderscore
+ \longprogramopt \infinity \plusminus \shortversion \version
+ \refmodindex \seerfc \makeindex \makemodindex \renewcommand
+ \indexname \appendix \protect \indexiv \mbox \textasciitilde
+ \platform \seeurl \leftmargin \labelwidth \localmoduletable
+ \LaTeX \copyright \memberline \backslash \pi \centerline
+ \caption \vspace \textwidth \menuselection \textless
+ \makevar \csimplemacro \menuselection \bfcode \sub \release
+ \email \kwindex \refexmodindex \filenq \e \menuselection
+ \exindex \linev \newsgroup \verbatim \setshortversion
+ \author \authoraddress \paragraph \subparagraph \cmemberline
+ \textbar \C \seelink
+"""
+
+def matchclose(c_lineno, c_symbol, openers, pairmap):
+ "Verify that closing delimiter matches most recent opening delimiter"
+ try:
+ o_lineno, o_symbol = openers.pop()
+ except IndexError:
+ print "\nDelimiter mismatch. On line %d, encountered closing '%s' without corresponding open" % (c_lineno, c_symbol)
+ return
+ if o_symbol in pairmap.get(c_symbol, [c_symbol]): return
+ print "\nOpener '%s' on line %d was not closed before encountering '%s' on line %d" % (o_symbol, o_lineno, c_symbol, c_lineno)
+ return
+
+def checkit(source, opts, morecmds=[]):
+ """Check the LaTeX formatting in a sequence of lines.
+
+ Opts is a mapping of options to option values if any:
+ -m munge parenthesis and brackets
+ -d delimiters only checking
+ -v verbose trace of delimiter matching
+ -s lineno: linenumber to start scan (default is 1).
+
+ Morecmds is a sequence of LaTeX commands (without backslashes) that
+ are to be considered valid in the scan.
+ """
+
+ texcmd = re.compile(r'\\[A-Za-z]+')
+ falsetexcmd = re.compile(r'\/([A-Za-z]+)') # Mismarked with forward slash
+
+ validcmds = set(cmdstr.split())
+ for cmd in morecmds:
+ validcmds.add('\\' + cmd)
+
+ if '-m' in opts:
+ pairmap = {']':'[(', ')':'(['} # Munged openers
+ else:
+ pairmap = {']':'[', ')':'('} # Normal opener for a given closer
+ openpunct = set('([') # Set of valid openers
+
+ delimiters = re.compile(r'\\(begin|end){([_a-zA-Z]+)}|([()\[\]])')
+ braces = re.compile(r'({)|(})')
+ doubledwords = re.compile(r'(\b[A-za-z]+\b) \b\1\b')
+ spacingmarkup = re.compile(r'\\(ABC|ASCII|C|Cpp|EOF|infinity|NULL|plusminus|POSIX|UNIX)\s')
+
+ openers = [] # Stack of pending open delimiters
+ bracestack = [] # Stack of pending open braces
+
+ tablestart = re.compile(r'\\begin{(?:long)?table([iv]+)}')
+ tableline = re.compile(r'\\line([iv]+){')
+ tableend = re.compile(r'\\end{(?:long)?table([iv]+)}')
+ tablelevel = ''
+ tablestartline = 0
+
+ startline = int(opts.get('-s', '1'))
+ lineno = 0
+
+ for lineno, line in izip(count(startline), islice(source, startline-1, None)):
+ line = line.rstrip()
+
+ # Check balancing of open/close parenthesis, brackets, and begin/end blocks
+ for begend, name, punct in delimiters.findall(line):
+ if '-v' in opts:
+ print lineno, '|', begend, name, punct,
+ if begend == 'begin' and '-d' not in opts:
+ openers.append((lineno, name))
+ elif punct in openpunct:
+ openers.append((lineno, punct))
+ elif begend == 'end' and '-d' not in opts:
+ matchclose(lineno, name, openers, pairmap)
+ elif punct in pairmap:
+ matchclose(lineno, punct, openers, pairmap)
+ if '-v' in opts:
+ print ' --> ', openers
+
+ # Balance opening and closing braces
+ for open, close in braces.findall(line):
+ if open == '{':
+ bracestack.append(lineno)
+ if close == '}':
+ try:
+ bracestack.pop()
+ except IndexError:
+ print r'Warning, unmatched } on line %s.' % (lineno,)
+
+ # Optionally, skip LaTeX specific checks
+ if '-d' in opts:
+ continue
+
+ # Warn whenever forward slashes encountered with a LaTeX command
+ for cmd in falsetexcmd.findall(line):
+ if '822' in line or '.html' in line:
+ continue # Ignore false positives for urls and for /rfc822
+ if '\\' + cmd in validcmds:
+ print 'Warning, forward slash used on line %d with cmd: /%s' % (lineno, cmd)
+
+ # Check for markup requiring {} for correct spacing
+ for cmd in spacingmarkup.findall(line):
+ print r'Warning, \%s should be written as \%s{} on line %d' % (cmd, cmd, lineno)
+
+ # Validate commands
+ nc = line.find(r'\newcommand')
+ if nc != -1:
+ start = line.find('{', nc)
+ end = line.find('}', start)
+ validcmds.add(line[start+1:end])
+ for cmd in texcmd.findall(line):
+ if cmd not in validcmds:
+ print r'Warning, unknown tex cmd on line %d: \%s' % (lineno, cmd)
+
+ # Check table levels (make sure lineii only inside tableii)
+ m = tablestart.search(line)
+ if m:
+ tablelevel = m.group(1)
+ tablestartline = lineno
+ m = tableline.search(line)
+ if m and m.group(1) != tablelevel:
+ print r'Warning, \line%s on line %d does not match \table%s on line %d' % (m.group(1), lineno, tablelevel, tablestartline)
+ if tableend.search(line):
+ tablelevel = ''
+
+ # Style guide warnings
+ if 'e.g.' in line or 'i.e.' in line:
+ print r'Style warning, avoid use of i.e or e.g. on line %d' % (lineno,)
+
+ for dw in doubledwords.findall(line):
+ print r'Doubled word warning. "%s" on line %d' % (dw, lineno)
+
+ lastline = lineno
+ for lineno, symbol in openers:
+ print "Unmatched open delimiter '%s' on line %d" % (symbol, lineno)
+ for lineno in bracestack:
+ print "Unmatched { on line %d" % (lineno,)
+ print 'Done checking %d lines.' % (lastline,)
+ return 0
+
+def main(args=None):
+ if args is None:
+ args = sys.argv[1:]
+ optitems, arglist = getopt.getopt(args, "k:mdhs:v")
+ opts = dict(optitems)
+ if '-h' in opts or args==[]:
+ print __doc__
+ return 0
+
+ if len(arglist) < 1:
+ print 'Please specify a file to be checked'
+ return 1
+
+ for i, filespec in enumerate(arglist):
+ if '*' in filespec or '?' in filespec:
+ arglist[i:i+1] = glob.glob(filespec)
+
+ morecmds = [v for k,v in optitems if k=='-k']
+ err = []
+
+ for filename in arglist:
+ print '=' * 30
+ print "Checking", filename
+ try:
+ f = open(filename)
+ except IOError:
+ print 'Cannot open file %s.' % arglist[0]
+ return 2
+
+ try:
+ err.append(checkit(f, opts, morecmds))
+ finally:
+ f.close()
+
+ return max(err)
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/lib/python2.7/Tools/scripts/texi2html.py b/lib/python2.7/Tools/scripts/texi2html.py
new file mode 100644
index 0000000..174c788
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/texi2html.py
@@ -0,0 +1,2078 @@
+#!/usr/bin/env python2
+
+# Convert GNU texinfo files into HTML, one file per node.
+# Based on Texinfo 2.14.
+# Usage: texi2html [-d] [-d] [-c] inputfile outputdirectory
+# The input file must be a complete texinfo file, e.g. emacs.texi.
+# This creates many files (one per info node) in the output directory,
+# overwriting existing files of the same name. All files created have
+# ".html" as their extension.
+
+
+# XXX To do:
+# - handle @comment*** correctly
+# - handle @xref {some words} correctly
+# - handle @ftable correctly (items aren't indexed?)
+# - handle @itemx properly
+# - handle @exdent properly
+# - add links directly to the proper line from indices
+# - check against the definitive list of @-cmds; we still miss (among others):
+# - @defindex (hard)
+# - @c(omment) in the middle of a line (rarely used)
+# - @this* (not really needed, only used in headers anyway)
+# - @today{} (ever used outside title page?)
+
+# More consistent handling of chapters/sections/etc.
+# Lots of documentation
+# Many more options:
+# -top designate top node
+# -links customize which types of links are included
+# -split split at chapters or sections instead of nodes
+# -name Allow different types of filename handling. Non unix systems
+# will have problems with long node names
+# ...
+# Support the most recent texinfo version and take a good look at HTML 3.0
+# More debugging output (customizable) and more flexible error handling
+# How about icons ?
+
+# rpyron 2002-05-07
+# Robert Pyron <rpyron@alum.mit.edu>
+# 1. BUGFIX: In function makefile(), strip blanks from the nodename.
+# This is necessary to match the behavior of parser.makeref() and
+# parser.do_node().
+# 2. BUGFIX fixed KeyError in end_ifset (well, I may have just made
+# it go away, rather than fix it)
+# 3. BUGFIX allow @menu and menu items inside @ifset or @ifclear
+# 4. Support added for:
+# @uref URL reference
+# @image image file reference (see note below)
+# @multitable output an HTML table
+# @vtable
+# 5. Partial support for accents, to match MAKEINFO output
+# 6. I added a new command-line option, '-H basename', to specify
+# HTML Help output. This will cause three files to be created
+# in the current directory:
+# `basename`.hhp HTML Help Workshop project file
+# `basename`.hhc Contents file for the project
+# `basename`.hhk Index file for the project
+# When fed into HTML Help Workshop, the resulting file will be
+# named `basename`.chm.
+# 7. A new class, HTMLHelp, to accomplish item 6.
+# 8. Various calls to HTMLHelp functions.
+# A NOTE ON IMAGES: Just as 'outputdirectory' must exist before
+# running this program, all referenced images must already exist
+# in outputdirectory.
+
+import os
+import sys
+import string
+import re
+
+MAGIC = '\\input texinfo'
+
+cmprog = re.compile('^@([a-z]+)([ \t]|$)') # Command (line-oriented)
+blprog = re.compile('^[ \t]*$') # Blank line
+kwprog = re.compile('@[a-z]+') # Keyword (embedded, usually
+ # with {} args)
+spprog = re.compile('[\n@{}&<>]') # Special characters in
+ # running text
+ #
+ # menu item (Yuck!)
+miprog = re.compile('^\* ([^:]*):(:|[ \t]*([^\t,\n.]+)([^ \t\n]*))[ \t\n]*')
+# 0 1 1 2 3 34 42 0
+# ----- ---------- ---------
+# -|-----------------------------
+# -----------------------------------------------------
+
+
+
+
+class HTMLNode:
+ """Some of the parser's functionality is separated into this class.
+
+ A Node accumulates its contents, takes care of links to other Nodes
+ and saves itself when it is finished and all links are resolved.
+ """
+
+ DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">'
+
+ type = 0
+ cont = ''
+ epilogue = '</BODY></HTML>\n'
+
+ def __init__(self, dir, name, topname, title, next, prev, up):
+ self.dirname = dir
+ self.name = name
+ if topname:
+ self.topname = topname
+ else:
+ self.topname = name
+ self.title = title
+ self.next = next
+ self.prev = prev
+ self.up = up
+ self.lines = []
+
+ def write(self, *lines):
+ map(self.lines.append, lines)
+
+ def flush(self):
+ fp = open(self.dirname + '/' + makefile(self.name), 'w')
+ fp.write(self.prologue)
+ fp.write(self.text)
+ fp.write(self.epilogue)
+ fp.close()
+
+ def link(self, label, nodename, rel=None, rev=None):
+ if nodename:
+ if nodename.lower() == '(dir)':
+ addr = '../dir.html'
+ title = ''
+ else:
+ addr = makefile(nodename)
+ title = ' TITLE="%s"' % nodename
+ self.write(label, ': <A HREF="', addr, '"', \
+ rel and (' REL=' + rel) or "", \
+ rev and (' REV=' + rev) or "", \
+ title, '>', nodename, '</A> \n')
+
+ def finalize(self):
+ length = len(self.lines)
+ self.text = ''.join(self.lines)
+ self.lines = []
+ self.open_links()
+ self.output_links()
+ self.close_links()
+ links = ''.join(self.lines)
+ self.lines = []
+ self.prologue = (
+ self.DOCTYPE +
+ '\n<HTML><HEAD>\n'
+ ' <!-- Converted with texi2html and Python -->\n'
+ ' <TITLE>' + self.title + '</TITLE>\n'
+ ' <LINK REL=Next HREF="'
+ + makefile(self.next) + '" TITLE="' + self.next + '">\n'
+ ' <LINK REL=Previous HREF="'
+ + makefile(self.prev) + '" TITLE="' + self.prev + '">\n'
+ ' <LINK REL=Up HREF="'
+ + makefile(self.up) + '" TITLE="' + self.up + '">\n'
+ '</HEAD><BODY>\n' +
+ links)
+ if length > 20:
+ self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
+
+ def open_links(self):
+ self.write('<HR>\n')
+
+ def close_links(self):
+ self.write('<HR>\n')
+
+ def output_links(self):
+ if self.cont != self.next:
+ self.link(' Cont', self.cont)
+ self.link(' Next', self.next, rel='Next')
+ self.link(' Prev', self.prev, rel='Previous')
+ self.link(' Up', self.up, rel='Up')
+ if self.name <> self.topname:
+ self.link(' Top', self.topname)
+
+
+class HTML3Node(HTMLNode):
+
+ DOCTYPE = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML Level 3//EN//3.0">'
+
+ def open_links(self):
+ self.write('<DIV CLASS=Navigation>\n <HR>\n')
+
+ def close_links(self):
+ self.write(' <HR>\n</DIV>\n')
+
+
+class TexinfoParser:
+
+ COPYRIGHT_SYMBOL = "&copy;"
+ FN_ID_PATTERN = "(%(id)s)"
+ FN_SOURCE_PATTERN = '<A NAME=footnoteref%(id)s' \
+ ' HREF="#footnotetext%(id)s">' \
+ + FN_ID_PATTERN + '</A>'
+ FN_TARGET_PATTERN = '<A NAME=footnotetext%(id)s' \
+ ' HREF="#footnoteref%(id)s">' \
+ + FN_ID_PATTERN + '</A>\n%(text)s<P>\n'
+ FN_HEADER = '\n<P>\n<HR NOSHADE SIZE=1 WIDTH=200>\n' \
+ '<STRONG><EM>Footnotes</EM></STRONG>\n<P>'
+
+
+ Node = HTMLNode
+
+ # Initialize an instance
+ def __init__(self):
+ self.unknown = {} # statistics about unknown @-commands
+ self.filenames = {} # Check for identical filenames
+ self.debugging = 0 # larger values produce more output
+ self.print_headers = 0 # always print headers?
+ self.nodefp = None # open file we're writing to
+ self.nodelineno = 0 # Linenumber relative to node
+ self.links = None # Links from current node
+ self.savetext = None # If not None, save text head instead
+ self.savestack = [] # If not None, save text head instead
+ self.htmlhelp = None # html help data
+ self.dirname = 'tmp' # directory where files are created
+ self.includedir = '.' # directory to search @include files
+ self.nodename = '' # name of current node
+ self.topname = '' # name of top node (first node seen)
+ self.title = '' # title of this whole Texinfo tree
+ self.resetindex() # Reset all indices
+ self.contents = [] # Reset table of contents
+ self.numbering = [] # Reset section numbering counters
+ self.nofill = 0 # Normal operation: fill paragraphs
+ self.values={'html': 1} # Names that should be parsed in ifset
+ self.stackinfo={} # Keep track of state in the stack
+ # XXX The following should be reset per node?!
+ self.footnotes = [] # Reset list of footnotes
+ self.itemarg = None # Reset command used by @item
+ self.itemnumber = None # Reset number for @item in @enumerate
+ self.itemindex = None # Reset item index name
+ self.node = None
+ self.nodestack = []
+ self.cont = 0
+ self.includedepth = 0
+
+ # Set htmlhelp helper class
+ def sethtmlhelp(self, htmlhelp):
+ self.htmlhelp = htmlhelp
+
+ # Set (output) directory name
+ def setdirname(self, dirname):
+ self.dirname = dirname
+
+ # Set include directory name
+ def setincludedir(self, includedir):
+ self.includedir = includedir
+
+ # Parse the contents of an entire file
+ def parse(self, fp):
+ line = fp.readline()
+ lineno = 1
+ while line and (line[0] == '%' or blprog.match(line)):
+ line = fp.readline()
+ lineno = lineno + 1
+ if line[:len(MAGIC)] <> MAGIC:
+ raise SyntaxError, 'file does not begin with %r' % (MAGIC,)
+ self.parserest(fp, lineno)
+
+ # Parse the contents of a file, not expecting a MAGIC header
+ def parserest(self, fp, initial_lineno):
+ lineno = initial_lineno
+ self.done = 0
+ self.skip = 0
+ self.stack = []
+ accu = []
+ while not self.done:
+ line = fp.readline()
+ self.nodelineno = self.nodelineno + 1
+ if not line:
+ if accu:
+ if not self.skip: self.process(accu)
+ accu = []
+ if initial_lineno > 0:
+ print '*** EOF before @bye'
+ break
+ lineno = lineno + 1
+ mo = cmprog.match(line)
+ if mo:
+ a, b = mo.span(1)
+ cmd = line[a:b]
+ if cmd in ('noindent', 'refill'):
+ accu.append(line)
+ else:
+ if accu:
+ if not self.skip:
+ self.process(accu)
+ accu = []
+ self.command(line, mo)
+ elif blprog.match(line) and \
+ 'format' not in self.stack and \
+ 'example' not in self.stack:
+ if accu:
+ if not self.skip:
+ self.process(accu)
+ if self.nofill:
+ self.write('\n')
+ else:
+ self.write('<P>\n')
+ accu = []
+ else:
+ # Append the line including trailing \n!
+ accu.append(line)
+ #
+ if self.skip:
+ print '*** Still skipping at the end'
+ if self.stack:
+ print '*** Stack not empty at the end'
+ print '***', self.stack
+ if self.includedepth == 0:
+ while self.nodestack:
+ self.nodestack[-1].finalize()
+ self.nodestack[-1].flush()
+ del self.nodestack[-1]
+
+ # Start saving text in a buffer instead of writing it to a file
+ def startsaving(self):
+ if self.savetext <> None:
+ self.savestack.append(self.savetext)
+ # print '*** Recursively saving text, expect trouble'
+ self.savetext = ''
+
+ # Return the text saved so far and start writing to file again
+ def collectsavings(self):
+ savetext = self.savetext
+ if len(self.savestack) > 0:
+ self.savetext = self.savestack[-1]
+ del self.savestack[-1]
+ else:
+ self.savetext = None
+ return savetext or ''
+
+ # Write text to file, or save it in a buffer, or ignore it
+ def write(self, *args):
+ try:
+ text = ''.join(args)
+ except:
+ print args
+ raise TypeError
+ if self.savetext <> None:
+ self.savetext = self.savetext + text
+ elif self.nodefp:
+ self.nodefp.write(text)
+ elif self.node:
+ self.node.write(text)
+
+ # Complete the current node -- write footnotes and close file
+ def endnode(self):
+ if self.savetext <> None:
+ print '*** Still saving text at end of node'
+ dummy = self.collectsavings()
+ if self.footnotes:
+ self.writefootnotes()
+ if self.nodefp:
+ if self.nodelineno > 20:
+ self.write('<HR>\n')
+ [name, next, prev, up] = self.nodelinks[:4]
+ self.link('Next', next)
+ self.link('Prev', prev)
+ self.link('Up', up)
+ if self.nodename <> self.topname:
+ self.link('Top', self.topname)
+ self.write('<HR>\n')
+ self.write('</BODY>\n')
+ self.nodefp.close()
+ self.nodefp = None
+ elif self.node:
+ if not self.cont and \
+ (not self.node.type or \
+ (self.node.next and self.node.prev and self.node.up)):
+ self.node.finalize()
+ self.node.flush()
+ else:
+ self.nodestack.append(self.node)
+ self.node = None
+ self.nodename = ''
+
+ # Process a list of lines, expanding embedded @-commands
+ # This mostly distinguishes between menus and normal text
+ def process(self, accu):
+ if self.debugging > 1:
+ print '!'*self.debugging, 'process:', self.skip, self.stack,
+ if accu: print accu[0][:30],
+ if accu[0][30:] or accu[1:]: print '...',
+ print
+ if self.inmenu():
+ # XXX should be done differently
+ for line in accu:
+ mo = miprog.match(line)
+ if not mo:
+ line = line.strip() + '\n'
+ self.expand(line)
+ continue
+ bgn, end = mo.span(0)
+ a, b = mo.span(1)
+ c, d = mo.span(2)
+ e, f = mo.span(3)
+ g, h = mo.span(4)
+ label = line[a:b]
+ nodename = line[c:d]
+ if nodename[0] == ':': nodename = label
+ else: nodename = line[e:f]
+ punct = line[g:h]
+ self.write(' <LI><A HREF="',
+ makefile(nodename),
+ '">', nodename,
+ '</A>', punct, '\n')
+ self.htmlhelp.menuitem(nodename)
+ self.expand(line[end:])
+ else:
+ text = ''.join(accu)
+ self.expand(text)
+
+ # find 'menu' (we might be inside 'ifset' or 'ifclear')
+ def inmenu(self):
+ #if 'menu' in self.stack:
+ # print 'inmenu :', self.skip, self.stack, self.stackinfo
+ stack = self.stack
+ while stack and stack[-1] in ('ifset','ifclear'):
+ try:
+ if self.stackinfo[len(stack)]:
+ return 0
+ except KeyError:
+ pass
+ stack = stack[:-1]
+ return (stack and stack[-1] == 'menu')
+
+ # Write a string, expanding embedded @-commands
+ def expand(self, text):
+ stack = []
+ i = 0
+ n = len(text)
+ while i < n:
+ start = i
+ mo = spprog.search(text, i)
+ if mo:
+ i = mo.start()
+ else:
+ self.write(text[start:])
+ break
+ self.write(text[start:i])
+ c = text[i]
+ i = i+1
+ if c == '\n':
+ self.write('\n')
+ continue
+ if c == '<':
+ self.write('&lt;')
+ continue
+ if c == '>':
+ self.write('&gt;')
+ continue
+ if c == '&':
+ self.write('&amp;')
+ continue
+ if c == '{':
+ stack.append('')
+ continue
+ if c == '}':
+ if not stack:
+ print '*** Unmatched }'
+ self.write('}')
+ continue
+ cmd = stack[-1]
+ del stack[-1]
+ try:
+ method = getattr(self, 'close_' + cmd)
+ except AttributeError:
+ self.unknown_close(cmd)
+ continue
+ method()
+ continue
+ if c <> '@':
+ # Cannot happen unless spprog is changed
+ raise RuntimeError, 'unexpected funny %r' % c
+ start = i
+ while i < n and text[i] in string.ascii_letters: i = i+1
+ if i == start:
+ # @ plus non-letter: literal next character
+ i = i+1
+ c = text[start:i]
+ if c == ':':
+ # `@:' means no extra space after
+ # preceding `.', `?', `!' or `:'
+ pass
+ else:
+ # `@.' means a sentence-ending period;
+ # `@@', `@{', `@}' quote `@', `{', `}'
+ self.write(c)
+ continue
+ cmd = text[start:i]
+ if i < n and text[i] == '{':
+ i = i+1
+ stack.append(cmd)
+ try:
+ method = getattr(self, 'open_' + cmd)
+ except AttributeError:
+ self.unknown_open(cmd)
+ continue
+ method()
+ continue
+ try:
+ method = getattr(self, 'handle_' + cmd)
+ except AttributeError:
+ self.unknown_handle(cmd)
+ continue
+ method()
+ if stack:
+ print '*** Stack not empty at para:', stack
+
+ # --- Handle unknown embedded @-commands ---
+
+ def unknown_open(self, cmd):
+ print '*** No open func for @' + cmd + '{...}'
+ cmd = cmd + '{'
+ self.write('@', cmd)
+ if not self.unknown.has_key(cmd):
+ self.unknown[cmd] = 1
+ else:
+ self.unknown[cmd] = self.unknown[cmd] + 1
+
+ def unknown_close(self, cmd):
+ print '*** No close func for @' + cmd + '{...}'
+ cmd = '}' + cmd
+ self.write('}')
+ if not self.unknown.has_key(cmd):
+ self.unknown[cmd] = 1
+ else:
+ self.unknown[cmd] = self.unknown[cmd] + 1
+
+ def unknown_handle(self, cmd):
+ print '*** No handler for @' + cmd
+ self.write('@', cmd)
+ if not self.unknown.has_key(cmd):
+ self.unknown[cmd] = 1
+ else:
+ self.unknown[cmd] = self.unknown[cmd] + 1
+
+ # XXX The following sections should be ordered as the texinfo docs
+
+ # --- Embedded @-commands without {} argument list --
+
+ def handle_noindent(self): pass
+
+ def handle_refill(self): pass
+
+ # --- Include file handling ---
+
+ def do_include(self, args):
+ file = args
+ file = os.path.join(self.includedir, file)
+ try:
+ fp = open(file, 'r')
+ except IOError, msg:
+ print '*** Can\'t open include file', repr(file)
+ return
+ print '!'*self.debugging, '--> file', repr(file)
+ save_done = self.done
+ save_skip = self.skip
+ save_stack = self.stack
+ self.includedepth = self.includedepth + 1
+ self.parserest(fp, 0)
+ self.includedepth = self.includedepth - 1
+ fp.close()
+ self.done = save_done
+ self.skip = save_skip
+ self.stack = save_stack
+ print '!'*self.debugging, '<-- file', repr(file)
+
+ # --- Special Insertions ---
+
+ def open_dmn(self): pass
+ def close_dmn(self): pass
+
+ def open_dots(self): self.write('...')
+ def close_dots(self): pass
+
+ def open_bullet(self): pass
+ def close_bullet(self): pass
+
+ def open_TeX(self): self.write('TeX')
+ def close_TeX(self): pass
+
+ def handle_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
+ def open_copyright(self): self.write(self.COPYRIGHT_SYMBOL)
+ def close_copyright(self): pass
+
+ def open_minus(self): self.write('-')
+ def close_minus(self): pass
+
+ # --- Accents ---
+
+ # rpyron 2002-05-07
+ # I would like to do at least as well as makeinfo when
+ # it is producing HTML output:
+ #
+ # input output
+ # @"o @"o umlaut accent
+ # @'o 'o acute accent
+ # @,{c} @,{c} cedilla accent
+ # @=o @=o macron/overbar accent
+ # @^o @^o circumflex accent
+ # @`o `o grave accent
+ # @~o @~o tilde accent
+ # @dotaccent{o} @dotaccent{o} overdot accent
+ # @H{o} @H{o} long Hungarian umlaut
+ # @ringaccent{o} @ringaccent{o} ring accent
+ # @tieaccent{oo} @tieaccent{oo} tie-after accent
+ # @u{o} @u{o} breve accent
+ # @ubaraccent{o} @ubaraccent{o} underbar accent
+ # @udotaccent{o} @udotaccent{o} underdot accent
+ # @v{o} @v{o} hacek or check accent
+ # @exclamdown{} &#161; upside-down !
+ # @questiondown{} &#191; upside-down ?
+ # @aa{},@AA{} &#229;,&#197; a,A with circle
+ # @ae{},@AE{} &#230;,&#198; ae,AE ligatures
+ # @dotless{i} @dotless{i} dotless i
+ # @dotless{j} @dotless{j} dotless j
+ # @l{},@L{} l/,L/ suppressed-L,l
+ # @o{},@O{} &#248;,&#216; O,o with slash
+ # @oe{},@OE{} oe,OE oe,OE ligatures
+ # @ss{} &#223; es-zet or sharp S
+ #
+ # The following character codes and approximations have been
+ # copied from makeinfo's HTML output.
+
+ def open_exclamdown(self): self.write('&#161;') # upside-down !
+ def close_exclamdown(self): pass
+ def open_questiondown(self): self.write('&#191;') # upside-down ?
+ def close_questiondown(self): pass
+ def open_aa(self): self.write('&#229;') # a with circle
+ def close_aa(self): pass
+ def open_AA(self): self.write('&#197;') # A with circle
+ def close_AA(self): pass
+ def open_ae(self): self.write('&#230;') # ae ligatures
+ def close_ae(self): pass
+ def open_AE(self): self.write('&#198;') # AE ligatures
+ def close_AE(self): pass
+ def open_o(self): self.write('&#248;') # o with slash
+ def close_o(self): pass
+ def open_O(self): self.write('&#216;') # O with slash
+ def close_O(self): pass
+ def open_ss(self): self.write('&#223;') # es-zet or sharp S
+ def close_ss(self): pass
+ def open_oe(self): self.write('oe') # oe ligatures
+ def close_oe(self): pass
+ def open_OE(self): self.write('OE') # OE ligatures
+ def close_OE(self): pass
+ def open_l(self): self.write('l/') # suppressed-l
+ def close_l(self): pass
+ def open_L(self): self.write('L/') # suppressed-L
+ def close_L(self): pass
+
+ # --- Special Glyphs for Examples ---
+
+ def open_result(self): self.write('=&gt;')
+ def close_result(self): pass
+
+ def open_expansion(self): self.write('==&gt;')
+ def close_expansion(self): pass
+
+ def open_print(self): self.write('-|')
+ def close_print(self): pass
+
+ def open_error(self): self.write('error--&gt;')
+ def close_error(self): pass
+
+ def open_equiv(self): self.write('==')
+ def close_equiv(self): pass
+
+ def open_point(self): self.write('-!-')
+ def close_point(self): pass
+
+ # --- Cross References ---
+
+ def open_pxref(self):
+ self.write('see ')
+ self.startsaving()
+ def close_pxref(self):
+ self.makeref()
+
+ def open_xref(self):
+ self.write('See ')
+ self.startsaving()
+ def close_xref(self):
+ self.makeref()
+
+ def open_ref(self):
+ self.startsaving()
+ def close_ref(self):
+ self.makeref()
+
+ def open_inforef(self):
+ self.write('See info file ')
+ self.startsaving()
+ def close_inforef(self):
+ text = self.collectsavings()
+ args = [s.strip() for s in text.split(',')]
+ while len(args) < 3: args.append('')
+ node = args[0]
+ file = args[2]
+ self.write('`', file, '\', node `', node, '\'')
+
+ def makeref(self):
+ text = self.collectsavings()
+ args = [s.strip() for s in text.split(',')]
+ while len(args) < 5: args.append('')
+ nodename = label = args[0]
+ if args[2]: label = args[2]
+ file = args[3]
+ title = args[4]
+ href = makefile(nodename)
+ if file:
+ href = '../' + file + '/' + href
+ self.write('<A HREF="', href, '">', label, '</A>')
+
+ # rpyron 2002-05-07 uref support
+ def open_uref(self):
+ self.startsaving()
+ def close_uref(self):
+ text = self.collectsavings()
+ args = [s.strip() for s in text.split(',')]
+ while len(args) < 2: args.append('')
+ href = args[0]
+ label = args[1]
+ if not label: label = href
+ self.write('<A HREF="', href, '">', label, '</A>')
+
+ # rpyron 2002-05-07 image support
+ # GNU makeinfo producing HTML output tries `filename.png'; if
+ # that does not exist, it tries `filename.jpg'. If that does
+ # not exist either, it complains. GNU makeinfo does not handle
+ # GIF files; however, I include GIF support here because
+ # MySQL documentation uses GIF files.
+
+ def open_image(self):
+ self.startsaving()
+ def close_image(self):
+ self.makeimage()
+ def makeimage(self):
+ text = self.collectsavings()
+ args = [s.strip() for s in text.split(',')]
+ while len(args) < 5: args.append('')
+ filename = args[0]
+ width = args[1]
+ height = args[2]
+ alt = args[3]
+ ext = args[4]
+
+ # The HTML output will have a reference to the image
+ # that is relative to the HTML output directory,
+ # which is what 'filename' gives us. However, we need
+ # to find it relative to our own current directory,
+ # so we construct 'imagename'.
+ imagelocation = self.dirname + '/' + filename
+
+ if os.path.exists(imagelocation+'.png'):
+ filename += '.png'
+ elif os.path.exists(imagelocation+'.jpg'):
+ filename += '.jpg'
+ elif os.path.exists(imagelocation+'.gif'): # MySQL uses GIF files
+ filename += '.gif'
+ else:
+ print "*** Cannot find image " + imagelocation
+ #TODO: what is 'ext'?
+ self.write('<IMG SRC="', filename, '"', \
+ width and (' WIDTH="' + width + '"') or "", \
+ height and (' HEIGHT="' + height + '"') or "", \
+ alt and (' ALT="' + alt + '"') or "", \
+ '/>' )
+ self.htmlhelp.addimage(imagelocation)
+
+
+ # --- Marking Words and Phrases ---
+
+ # --- Other @xxx{...} commands ---
+
+ def open_(self): pass # Used by {text enclosed in braces}
+ def close_(self): pass
+
+ open_asis = open_
+ close_asis = close_
+
+ def open_cite(self): self.write('<CITE>')
+ def close_cite(self): self.write('</CITE>')
+
+ def open_code(self): self.write('<CODE>')
+ def close_code(self): self.write('</CODE>')
+
+ def open_t(self): self.write('<TT>')
+ def close_t(self): self.write('</TT>')
+
+ def open_dfn(self): self.write('<DFN>')
+ def close_dfn(self): self.write('</DFN>')
+
+ def open_emph(self): self.write('<EM>')
+ def close_emph(self): self.write('</EM>')
+
+ def open_i(self): self.write('<I>')
+ def close_i(self): self.write('</I>')
+
+ def open_footnote(self):
+ # if self.savetext <> None:
+ # print '*** Recursive footnote -- expect weirdness'
+ id = len(self.footnotes) + 1
+ self.write(self.FN_SOURCE_PATTERN % {'id': repr(id)})
+ self.startsaving()
+
+ def close_footnote(self):
+ id = len(self.footnotes) + 1
+ self.footnotes.append((id, self.collectsavings()))
+
+ def writefootnotes(self):
+ self.write(self.FN_HEADER)
+ for id, text in self.footnotes:
+ self.write(self.FN_TARGET_PATTERN
+ % {'id': repr(id), 'text': text})
+ self.footnotes = []
+
+ def open_file(self): self.write('<CODE>')
+ def close_file(self): self.write('</CODE>')
+
+ def open_kbd(self): self.write('<KBD>')
+ def close_kbd(self): self.write('</KBD>')
+
+ def open_key(self): self.write('<KEY>')
+ def close_key(self): self.write('</KEY>')
+
+ def open_r(self): self.write('<R>')
+ def close_r(self): self.write('</R>')
+
+ def open_samp(self): self.write('`<SAMP>')
+ def close_samp(self): self.write('</SAMP>\'')
+
+ def open_sc(self): self.write('<SMALLCAPS>')
+ def close_sc(self): self.write('</SMALLCAPS>')
+
+ def open_strong(self): self.write('<STRONG>')
+ def close_strong(self): self.write('</STRONG>')
+
+ def open_b(self): self.write('<B>')
+ def close_b(self): self.write('</B>')
+
+ def open_var(self): self.write('<VAR>')
+ def close_var(self): self.write('</VAR>')
+
+ def open_w(self): self.write('<NOBREAK>')
+ def close_w(self): self.write('</NOBREAK>')
+
+ def open_url(self): self.startsaving()
+ def close_url(self):
+ text = self.collectsavings()
+ self.write('<A HREF="', text, '">', text, '</A>')
+
+ def open_email(self): self.startsaving()
+ def close_email(self):
+ text = self.collectsavings()
+ self.write('<A HREF="mailto:', text, '">', text, '</A>')
+
+ open_titlefont = open_
+ close_titlefont = close_
+
+ def open_small(self): pass
+ def close_small(self): pass
+
+ def command(self, line, mo):
+ a, b = mo.span(1)
+ cmd = line[a:b]
+ args = line[b:].strip()
+ if self.debugging > 1:
+ print '!'*self.debugging, 'command:', self.skip, self.stack, \
+ '@' + cmd, args
+ try:
+ func = getattr(self, 'do_' + cmd)
+ except AttributeError:
+ try:
+ func = getattr(self, 'bgn_' + cmd)
+ except AttributeError:
+ # don't complain if we are skipping anyway
+ if not self.skip:
+ self.unknown_cmd(cmd, args)
+ return
+ self.stack.append(cmd)
+ func(args)
+ return
+ if not self.skip or cmd == 'end':
+ func(args)
+
+ def unknown_cmd(self, cmd, args):
+ print '*** unknown', '@' + cmd, args
+ if not self.unknown.has_key(cmd):
+ self.unknown[cmd] = 1
+ else:
+ self.unknown[cmd] = self.unknown[cmd] + 1
+
+ def do_end(self, args):
+ words = args.split()
+ if not words:
+ print '*** @end w/o args'
+ else:
+ cmd = words[0]
+ if not self.stack or self.stack[-1] <> cmd:
+ print '*** @end', cmd, 'unexpected'
+ else:
+ del self.stack[-1]
+ try:
+ func = getattr(self, 'end_' + cmd)
+ except AttributeError:
+ self.unknown_end(cmd)
+ return
+ func()
+
+ def unknown_end(self, cmd):
+ cmd = 'end ' + cmd
+ print '*** unknown', '@' + cmd
+ if not self.unknown.has_key(cmd):
+ self.unknown[cmd] = 1
+ else:
+ self.unknown[cmd] = self.unknown[cmd] + 1
+
+ # --- Comments ---
+
+ def do_comment(self, args): pass
+ do_c = do_comment
+
+ # --- Conditional processing ---
+
+ def bgn_ifinfo(self, args): pass
+ def end_ifinfo(self): pass
+
+ def bgn_iftex(self, args): self.skip = self.skip + 1
+ def end_iftex(self): self.skip = self.skip - 1
+
+ def bgn_ignore(self, args): self.skip = self.skip + 1
+ def end_ignore(self): self.skip = self.skip - 1
+
+ def bgn_tex(self, args): self.skip = self.skip + 1
+ def end_tex(self): self.skip = self.skip - 1
+
+ def do_set(self, args):
+ fields = args.split(' ')
+ key = fields[0]
+ if len(fields) == 1:
+ value = 1
+ else:
+ value = ' '.join(fields[1:])
+ self.values[key] = value
+
+ def do_clear(self, args):
+ self.values[args] = None
+
+ def bgn_ifset(self, args):
+ if args not in self.values.keys() \
+ or self.values[args] is None:
+ self.skip = self.skip + 1
+ self.stackinfo[len(self.stack)] = 1
+ else:
+ self.stackinfo[len(self.stack)] = 0
+ def end_ifset(self):
+ try:
+ if self.stackinfo[len(self.stack) + 1]:
+ self.skip = self.skip - 1
+ del self.stackinfo[len(self.stack) + 1]
+ except KeyError:
+ print '*** end_ifset: KeyError :', len(self.stack) + 1
+
+ def bgn_ifclear(self, args):
+ if args in self.values.keys() \
+ and self.values[args] is not None:
+ self.skip = self.skip + 1
+ self.stackinfo[len(self.stack)] = 1
+ else:
+ self.stackinfo[len(self.stack)] = 0
+ def end_ifclear(self):
+ try:
+ if self.stackinfo[len(self.stack) + 1]:
+ self.skip = self.skip - 1
+ del self.stackinfo[len(self.stack) + 1]
+ except KeyError:
+ print '*** end_ifclear: KeyError :', len(self.stack) + 1
+
+ def open_value(self):
+ self.startsaving()
+
+ def close_value(self):
+ key = self.collectsavings()
+ if key in self.values.keys():
+ self.write(self.values[key])
+ else:
+ print '*** Undefined value: ', key
+
+ # --- Beginning a file ---
+
+ do_finalout = do_comment
+ do_setchapternewpage = do_comment
+ do_setfilename = do_comment
+
+ def do_settitle(self, args):
+ self.startsaving()
+ self.expand(args)
+ self.title = self.collectsavings()
+ def do_parskip(self, args): pass
+
+ # --- Ending a file ---
+
+ def do_bye(self, args):
+ self.endnode()
+ self.done = 1
+
+ # --- Title page ---
+
+ def bgn_titlepage(self, args): self.skip = self.skip + 1
+ def end_titlepage(self): self.skip = self.skip - 1
+ def do_shorttitlepage(self, args): pass
+
+ def do_center(self, args):
+ # Actually not used outside title page...
+ self.write('<H1>')
+ self.expand(args)
+ self.write('</H1>\n')
+ do_title = do_center
+ do_subtitle = do_center
+ do_author = do_center
+
+ do_vskip = do_comment
+ do_vfill = do_comment
+ do_smallbook = do_comment
+
+ do_paragraphindent = do_comment
+ do_setchapternewpage = do_comment
+ do_headings = do_comment
+ do_footnotestyle = do_comment
+
+ do_evenheading = do_comment
+ do_evenfooting = do_comment
+ do_oddheading = do_comment
+ do_oddfooting = do_comment
+ do_everyheading = do_comment
+ do_everyfooting = do_comment
+
+ # --- Nodes ---
+
+ def do_node(self, args):
+ self.endnode()
+ self.nodelineno = 0
+ parts = [s.strip() for s in args.split(',')]
+ while len(parts) < 4: parts.append('')
+ self.nodelinks = parts
+ [name, next, prev, up] = parts[:4]
+ file = self.dirname + '/' + makefile(name)
+ if self.filenames.has_key(file):
+ print '*** Filename already in use: ', file
+ else:
+ if self.debugging: print '!'*self.debugging, '--- writing', file
+ self.filenames[file] = 1
+ # self.nodefp = open(file, 'w')
+ self.nodename = name
+ if self.cont and self.nodestack:
+ self.nodestack[-1].cont = self.nodename
+ if not self.topname: self.topname = name
+ title = name
+ if self.title: title = title + ' -- ' + self.title
+ self.node = self.Node(self.dirname, self.nodename, self.topname,
+ title, next, prev, up)
+ self.htmlhelp.addnode(self.nodename,next,prev,up,file)
+
+ def link(self, label, nodename):
+ if nodename:
+ if nodename.lower() == '(dir)':
+ addr = '../dir.html'
+ else:
+ addr = makefile(nodename)
+ self.write(label, ': <A HREF="', addr, '" TYPE="',
+ label, '">', nodename, '</A> \n')
+
+ # --- Sectioning commands ---
+
+ def popstack(self, type):
+ if (self.node):
+ self.node.type = type
+ while self.nodestack:
+ if self.nodestack[-1].type > type:
+ self.nodestack[-1].finalize()
+ self.nodestack[-1].flush()
+ del self.nodestack[-1]
+ elif self.nodestack[-1].type == type:
+ if not self.nodestack[-1].next:
+ self.nodestack[-1].next = self.node.name
+ if not self.node.prev:
+ self.node.prev = self.nodestack[-1].name
+ self.nodestack[-1].finalize()
+ self.nodestack[-1].flush()
+ del self.nodestack[-1]
+ else:
+ if type > 1 and not self.node.up:
+ self.node.up = self.nodestack[-1].name
+ break
+
+ def do_chapter(self, args):
+ self.heading('H1', args, 0)
+ self.popstack(1)
+
+ def do_unnumbered(self, args):
+ self.heading('H1', args, -1)
+ self.popstack(1)
+ def do_appendix(self, args):
+ self.heading('H1', args, -1)
+ self.popstack(1)
+ def do_top(self, args):
+ self.heading('H1', args, -1)
+ def do_chapheading(self, args):
+ self.heading('H1', args, -1)
+ def do_majorheading(self, args):
+ self.heading('H1', args, -1)
+
+ def do_section(self, args):
+ self.heading('H1', args, 1)
+ self.popstack(2)
+
+ def do_unnumberedsec(self, args):
+ self.heading('H1', args, -1)
+ self.popstack(2)
+ def do_appendixsec(self, args):
+ self.heading('H1', args, -1)
+ self.popstack(2)
+ do_appendixsection = do_appendixsec
+ def do_heading(self, args):
+ self.heading('H1', args, -1)
+
+ def do_subsection(self, args):
+ self.heading('H2', args, 2)
+ self.popstack(3)
+ def do_unnumberedsubsec(self, args):
+ self.heading('H2', args, -1)
+ self.popstack(3)
+ def do_appendixsubsec(self, args):
+ self.heading('H2', args, -1)
+ self.popstack(3)
+ def do_subheading(self, args):
+ self.heading('H2', args, -1)
+
+ def do_subsubsection(self, args):
+ self.heading('H3', args, 3)
+ self.popstack(4)
+ def do_unnumberedsubsubsec(self, args):
+ self.heading('H3', args, -1)
+ self.popstack(4)
+ def do_appendixsubsubsec(self, args):
+ self.heading('H3', args, -1)
+ self.popstack(4)
+ def do_subsubheading(self, args):
+ self.heading('H3', args, -1)
+
+ def heading(self, type, args, level):
+ if level >= 0:
+ while len(self.numbering) <= level:
+ self.numbering.append(0)
+ del self.numbering[level+1:]
+ self.numbering[level] = self.numbering[level] + 1
+ x = ''
+ for i in self.numbering:
+ x = x + repr(i) + '.'
+ args = x + ' ' + args
+ self.contents.append((level, args, self.nodename))
+ self.write('<', type, '>')
+ self.expand(args)
+ self.write('</', type, '>\n')
+ if self.debugging or self.print_headers:
+ print '---', args
+
+ def do_contents(self, args):
+ # pass
+ self.listcontents('Table of Contents', 999)
+
+ def do_shortcontents(self, args):
+ pass
+ # self.listcontents('Short Contents', 0)
+ do_summarycontents = do_shortcontents
+
+ def listcontents(self, title, maxlevel):
+ self.write('<H1>', title, '</H1>\n<UL COMPACT PLAIN>\n')
+ prevlevels = [0]
+ for level, title, node in self.contents:
+ if level > maxlevel:
+ continue
+ if level > prevlevels[-1]:
+ # can only advance one level at a time
+ self.write(' '*prevlevels[-1], '<UL PLAIN>\n')
+ prevlevels.append(level)
+ elif level < prevlevels[-1]:
+ # might drop back multiple levels
+ while level < prevlevels[-1]:
+ del prevlevels[-1]
+ self.write(' '*prevlevels[-1],
+ '</UL>\n')
+ self.write(' '*level, '<LI> <A HREF="',
+ makefile(node), '">')
+ self.expand(title)
+ self.write('</A>\n')
+ self.write('</UL>\n' * len(prevlevels))
+
+ # --- Page lay-out ---
+
+ # These commands are only meaningful in printed text
+
+ def do_page(self, args): pass
+
+ def do_need(self, args): pass
+
+ def bgn_group(self, args): pass
+ def end_group(self): pass
+
+ # --- Line lay-out ---
+
+ def do_sp(self, args):
+ if self.nofill:
+ self.write('\n')
+ else:
+ self.write('<P>\n')
+
+ def do_hline(self, args):
+ self.write('<HR>')
+
+ # --- Function and variable definitions ---
+
+ def bgn_deffn(self, args):
+ self.write('<DL>')
+ self.do_deffnx(args)
+
+ def end_deffn(self):
+ self.write('</DL>\n')
+
+ def do_deffnx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 2)
+ [category, name], rest = words[:2], words[2:]
+ self.expand('@b{%s}' % name)
+ for word in rest: self.expand(' ' + makevar(word))
+ #self.expand(' -- ' + category)
+ self.write('\n<DD>')
+ self.index('fn', name)
+
+ def bgn_defun(self, args): self.bgn_deffn('Function ' + args)
+ end_defun = end_deffn
+ def do_defunx(self, args): self.do_deffnx('Function ' + args)
+
+ def bgn_defmac(self, args): self.bgn_deffn('Macro ' + args)
+ end_defmac = end_deffn
+ def do_defmacx(self, args): self.do_deffnx('Macro ' + args)
+
+ def bgn_defspec(self, args): self.bgn_deffn('{Special Form} ' + args)
+ end_defspec = end_deffn
+ def do_defspecx(self, args): self.do_deffnx('{Special Form} ' + args)
+
+ def bgn_defvr(self, args):
+ self.write('<DL>')
+ self.do_defvrx(args)
+
+ end_defvr = end_deffn
+
+ def do_defvrx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 2)
+ [category, name], rest = words[:2], words[2:]
+ self.expand('@code{%s}' % name)
+ # If there are too many arguments, show them
+ for word in rest: self.expand(' ' + word)
+ #self.expand(' -- ' + category)
+ self.write('\n<DD>')
+ self.index('vr', name)
+
+ def bgn_defvar(self, args): self.bgn_defvr('Variable ' + args)
+ end_defvar = end_defvr
+ def do_defvarx(self, args): self.do_defvrx('Variable ' + args)
+
+ def bgn_defopt(self, args): self.bgn_defvr('{User Option} ' + args)
+ end_defopt = end_defvr
+ def do_defoptx(self, args): self.do_defvrx('{User Option} ' + args)
+
+ # --- Ditto for typed languages ---
+
+ def bgn_deftypefn(self, args):
+ self.write('<DL>')
+ self.do_deftypefnx(args)
+
+ end_deftypefn = end_deffn
+
+ def do_deftypefnx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 3)
+ [category, datatype, name], rest = words[:3], words[3:]
+ self.expand('@code{%s} @b{%s}' % (datatype, name))
+ for word in rest: self.expand(' ' + makevar(word))
+ #self.expand(' -- ' + category)
+ self.write('\n<DD>')
+ self.index('fn', name)
+
+
+ def bgn_deftypefun(self, args): self.bgn_deftypefn('Function ' + args)
+ end_deftypefun = end_deftypefn
+ def do_deftypefunx(self, args): self.do_deftypefnx('Function ' + args)
+
+ def bgn_deftypevr(self, args):
+ self.write('<DL>')
+ self.do_deftypevrx(args)
+
+ end_deftypevr = end_deftypefn
+
+ def do_deftypevrx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 3)
+ [category, datatype, name], rest = words[:3], words[3:]
+ self.expand('@code{%s} @b{%s}' % (datatype, name))
+ # If there are too many arguments, show them
+ for word in rest: self.expand(' ' + word)
+ #self.expand(' -- ' + category)
+ self.write('\n<DD>')
+ self.index('fn', name)
+
+ def bgn_deftypevar(self, args):
+ self.bgn_deftypevr('Variable ' + args)
+ end_deftypevar = end_deftypevr
+ def do_deftypevarx(self, args):
+ self.do_deftypevrx('Variable ' + args)
+
+ # --- Ditto for object-oriented languages ---
+
+ def bgn_defcv(self, args):
+ self.write('<DL>')
+ self.do_defcvx(args)
+
+ end_defcv = end_deftypevr
+
+ def do_defcvx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 3)
+ [category, classname, name], rest = words[:3], words[3:]
+ self.expand('@b{%s}' % name)
+ # If there are too many arguments, show them
+ for word in rest: self.expand(' ' + word)
+ #self.expand(' -- %s of @code{%s}' % (category, classname))
+ self.write('\n<DD>')
+ self.index('vr', '%s @r{on %s}' % (name, classname))
+
+ def bgn_defivar(self, args):
+ self.bgn_defcv('{Instance Variable} ' + args)
+ end_defivar = end_defcv
+ def do_defivarx(self, args):
+ self.do_defcvx('{Instance Variable} ' + args)
+
+ def bgn_defop(self, args):
+ self.write('<DL>')
+ self.do_defopx(args)
+
+ end_defop = end_defcv
+
+ def do_defopx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 3)
+ [category, classname, name], rest = words[:3], words[3:]
+ self.expand('@b{%s}' % name)
+ for word in rest: self.expand(' ' + makevar(word))
+ #self.expand(' -- %s of @code{%s}' % (category, classname))
+ self.write('\n<DD>')
+ self.index('fn', '%s @r{on %s}' % (name, classname))
+
+ def bgn_defmethod(self, args):
+ self.bgn_defop('Method ' + args)
+ end_defmethod = end_defop
+ def do_defmethodx(self, args):
+ self.do_defopx('Method ' + args)
+
+ # --- Ditto for data types ---
+
+ def bgn_deftp(self, args):
+ self.write('<DL>')
+ self.do_deftpx(args)
+
+ end_deftp = end_defcv
+
+ def do_deftpx(self, args):
+ self.write('<DT>')
+ words = splitwords(args, 2)
+ [category, name], rest = words[:2], words[2:]
+ self.expand('@b{%s}' % name)
+ for word in rest: self.expand(' ' + word)
+ #self.expand(' -- ' + category)
+ self.write('\n<DD>')
+ self.index('tp', name)
+
+ # --- Making Lists and Tables
+
+ def bgn_enumerate(self, args):
+ if not args:
+ self.write('<OL>\n')
+ self.stackinfo[len(self.stack)] = '</OL>\n'
+ else:
+ self.itemnumber = args
+ self.write('<UL>\n')
+ self.stackinfo[len(self.stack)] = '</UL>\n'
+ def end_enumerate(self):
+ self.itemnumber = None
+ self.write(self.stackinfo[len(self.stack) + 1])
+ del self.stackinfo[len(self.stack) + 1]
+
+ def bgn_itemize(self, args):
+ self.itemarg = args
+ self.write('<UL>\n')
+ def end_itemize(self):
+ self.itemarg = None
+ self.write('</UL>\n')
+
+ def bgn_table(self, args):
+ self.itemarg = args
+ self.write('<DL>\n')
+ def end_table(self):
+ self.itemarg = None
+ self.write('</DL>\n')
+
+ def bgn_ftable(self, args):
+ self.itemindex = 'fn'
+ self.bgn_table(args)
+ def end_ftable(self):
+ self.itemindex = None
+ self.end_table()
+
+ def bgn_vtable(self, args):
+ self.itemindex = 'vr'
+ self.bgn_table(args)
+ def end_vtable(self):
+ self.itemindex = None
+ self.end_table()
+
+ def do_item(self, args):
+ if self.itemindex: self.index(self.itemindex, args)
+ if self.itemarg:
+ if self.itemarg[0] == '@' and self.itemarg[1] and \
+ self.itemarg[1] in string.ascii_letters:
+ args = self.itemarg + '{' + args + '}'
+ else:
+ # some other character, e.g. '-'
+ args = self.itemarg + ' ' + args
+ if self.itemnumber <> None:
+ args = self.itemnumber + '. ' + args
+ self.itemnumber = increment(self.itemnumber)
+ if self.stack and self.stack[-1] == 'table':
+ self.write('<DT>')
+ self.expand(args)
+ self.write('\n<DD>')
+ elif self.stack and self.stack[-1] == 'multitable':
+ self.write('<TR><TD>')
+ self.expand(args)
+ self.write('</TD>\n</TR>\n')
+ else:
+ self.write('<LI>')
+ self.expand(args)
+ self.write(' ')
+ do_itemx = do_item # XXX Should suppress leading blank line
+
+ # rpyron 2002-05-07 multitable support
+ def bgn_multitable(self, args):
+ self.itemarg = None # should be handled by columnfractions
+ self.write('<TABLE BORDER="">\n')
+ def end_multitable(self):
+ self.itemarg = None
+ self.write('</TABLE>\n<BR>\n')
+ def handle_columnfractions(self):
+ # It would be better to handle this, but for now it's in the way...
+ self.itemarg = None
+ def handle_tab(self):
+ self.write('</TD>\n <TD>')
+
+ # --- Enumerations, displays, quotations ---
+ # XXX Most of these should increase the indentation somehow
+
+ def bgn_quotation(self, args): self.write('<BLOCKQUOTE>')
+ def end_quotation(self): self.write('</BLOCKQUOTE>\n')
+
+ def bgn_example(self, args):
+ self.nofill = self.nofill + 1
+ self.write('<PRE>')
+ def end_example(self):
+ self.write('</PRE>\n')
+ self.nofill = self.nofill - 1
+
+ bgn_lisp = bgn_example # Synonym when contents are executable lisp code
+ end_lisp = end_example
+
+ bgn_smallexample = bgn_example # XXX Should use smaller font
+ end_smallexample = end_example
+
+ bgn_smalllisp = bgn_lisp # Ditto
+ end_smalllisp = end_lisp
+
+ bgn_display = bgn_example
+ end_display = end_example
+
+ bgn_format = bgn_display
+ end_format = end_display
+
+ def do_exdent(self, args): self.expand(args + '\n')
+ # XXX Should really mess with indentation
+
+ def bgn_flushleft(self, args):
+ self.nofill = self.nofill + 1
+ self.write('<PRE>\n')
+ def end_flushleft(self):
+ self.write('</PRE>\n')
+ self.nofill = self.nofill - 1
+
+ def bgn_flushright(self, args):
+ self.nofill = self.nofill + 1
+ self.write('<ADDRESS COMPACT>\n')
+ def end_flushright(self):
+ self.write('</ADDRESS>\n')
+ self.nofill = self.nofill - 1
+
+ def bgn_menu(self, args):
+ self.write('<DIR>\n')
+ self.write(' <STRONG><EM>Menu</EM></STRONG><P>\n')
+ self.htmlhelp.beginmenu()
+ def end_menu(self):
+ self.write('</DIR>\n')
+ self.htmlhelp.endmenu()
+
+ def bgn_cartouche(self, args): pass
+ def end_cartouche(self): pass
+
+ # --- Indices ---
+
+ def resetindex(self):
+ self.noncodeindices = ['cp']
+ self.indextitle = {}
+ self.indextitle['cp'] = 'Concept'
+ self.indextitle['fn'] = 'Function'
+ self.indextitle['ky'] = 'Keyword'
+ self.indextitle['pg'] = 'Program'
+ self.indextitle['tp'] = 'Type'
+ self.indextitle['vr'] = 'Variable'
+ #
+ self.whichindex = {}
+ for name in self.indextitle.keys():
+ self.whichindex[name] = []
+
+ def user_index(self, name, args):
+ if self.whichindex.has_key(name):
+ self.index(name, args)
+ else:
+ print '*** No index named', repr(name)
+
+ def do_cindex(self, args): self.index('cp', args)
+ def do_findex(self, args): self.index('fn', args)
+ def do_kindex(self, args): self.index('ky', args)
+ def do_pindex(self, args): self.index('pg', args)
+ def do_tindex(self, args): self.index('tp', args)
+ def do_vindex(self, args): self.index('vr', args)
+
+ def index(self, name, args):
+ self.whichindex[name].append((args, self.nodename))
+ self.htmlhelp.index(args, self.nodename)
+
+ def do_synindex(self, args):
+ words = args.split()
+ if len(words) <> 2:
+ print '*** bad @synindex', args
+ return
+ [old, new] = words
+ if not self.whichindex.has_key(old) or \
+ not self.whichindex.has_key(new):
+ print '*** bad key(s) in @synindex', args
+ return
+ if old <> new and \
+ self.whichindex[old] is not self.whichindex[new]:
+ inew = self.whichindex[new]
+ inew[len(inew):] = self.whichindex[old]
+ self.whichindex[old] = inew
+ do_syncodeindex = do_synindex # XXX Should use code font
+
+ def do_printindex(self, args):
+ words = args.split()
+ for name in words:
+ if self.whichindex.has_key(name):
+ self.prindex(name)
+ else:
+ print '*** No index named', repr(name)
+
+ def prindex(self, name):
+ iscodeindex = (name not in self.noncodeindices)
+ index = self.whichindex[name]
+ if not index: return
+ if self.debugging:
+ print '!'*self.debugging, '--- Generating', \
+ self.indextitle[name], 'index'
+ # The node already provides a title
+ index1 = []
+ junkprog = re.compile('^(@[a-z]+)?{')
+ for key, node in index:
+ sortkey = key.lower()
+ # Remove leading `@cmd{' from sort key
+ # -- don't bother about the matching `}'
+ oldsortkey = sortkey
+ while 1:
+ mo = junkprog.match(sortkey)
+ if not mo:
+ break
+ i = mo.end()
+ sortkey = sortkey[i:]
+ index1.append((sortkey, key, node))
+ del index[:]
+ index1.sort()
+ self.write('<DL COMPACT>\n')
+ prevkey = prevnode = None
+ for sortkey, key, node in index1:
+ if (key, node) == (prevkey, prevnode):
+ continue
+ if self.debugging > 1: print '!'*self.debugging, key, ':', node
+ self.write('<DT>')
+ if iscodeindex: key = '@code{' + key + '}'
+ if key != prevkey:
+ self.expand(key)
+ self.write('\n<DD><A HREF="%s">%s</A>\n' % (makefile(node), node))
+ prevkey, prevnode = key, node
+ self.write('</DL>\n')
+
+ # --- Final error reports ---
+
+ def report(self):
+ if self.unknown:
+ print '--- Unrecognized commands ---'
+ cmds = self.unknown.keys()
+ cmds.sort()
+ for cmd in cmds:
+ print cmd.ljust(20), self.unknown[cmd]
+
+
+class TexinfoParserHTML3(TexinfoParser):
+
+ COPYRIGHT_SYMBOL = "&copy;"
+ FN_ID_PATTERN = "[%(id)s]"
+ FN_SOURCE_PATTERN = '<A ID=footnoteref%(id)s ' \
+ 'HREF="#footnotetext%(id)s">' + FN_ID_PATTERN + '</A>'
+ FN_TARGET_PATTERN = '<FN ID=footnotetext%(id)s>\n' \
+ '<P><A HREF="#footnoteref%(id)s">' + FN_ID_PATTERN \
+ + '</A>\n%(text)s</P></FN>\n'
+ FN_HEADER = '<DIV CLASS=footnotes>\n <HR NOSHADE WIDTH=200>\n' \
+ ' <STRONG><EM>Footnotes</EM></STRONG>\n <P>\n'
+
+ Node = HTML3Node
+
+ def bgn_quotation(self, args): self.write('<BQ>')
+ def end_quotation(self): self.write('</BQ>\n')
+
+ def bgn_example(self, args):
+ # this use of <CODE> would not be legal in HTML 2.0,
+ # but is in more recent DTDs.
+ self.nofill = self.nofill + 1
+ self.write('<PRE CLASS=example><CODE>')
+ def end_example(self):
+ self.write("</CODE></PRE>\n")
+ self.nofill = self.nofill - 1
+
+ def bgn_flushleft(self, args):
+ self.nofill = self.nofill + 1
+ self.write('<PRE CLASS=flushleft>\n')
+
+ def bgn_flushright(self, args):
+ self.nofill = self.nofill + 1
+ self.write('<DIV ALIGN=right CLASS=flushright><ADDRESS COMPACT>\n')
+ def end_flushright(self):
+ self.write('</ADDRESS></DIV>\n')
+ self.nofill = self.nofill - 1
+
+ def bgn_menu(self, args):
+ self.write('<UL PLAIN CLASS=menu>\n')
+ self.write(' <LH>Menu</LH>\n')
+ def end_menu(self):
+ self.write('</UL>\n')
+
+
+# rpyron 2002-05-07
+class HTMLHelp:
+ """
+ This class encapsulates support for HTML Help. Node names,
+ file names, menu items, index items, and image file names are
+ accumulated until a call to finalize(). At that time, three
+ output files are created in the current directory:
+
+ `helpbase`.hhp is a HTML Help Workshop project file.
+ It contains various information, some of
+ which I do not understand; I just copied
+ the default project info from a fresh
+ installation.
+ `helpbase`.hhc is the Contents file for the project.
+ `helpbase`.hhk is the Index file for the project.
+
+ When these files are used as input to HTML Help Workshop,
+ the resulting file will be named:
+
+ `helpbase`.chm
+
+ If none of the defaults in `helpbase`.hhp are changed,
+ the .CHM file will have Contents, Index, Search, and
+ Favorites tabs.
+ """
+
+ codeprog = re.compile('@code{(.*?)}')
+
+ def __init__(self,helpbase,dirname):
+ self.helpbase = helpbase
+ self.dirname = dirname
+ self.projectfile = None
+ self.contentfile = None
+ self.indexfile = None
+ self.nodelist = []
+ self.nodenames = {} # nodename : index
+ self.nodeindex = {}
+ self.filenames = {} # filename : filename
+ self.indexlist = [] # (args,nodename) == (key,location)
+ self.current = ''
+ self.menudict = {}
+ self.dumped = {}
+
+
+ def addnode(self,name,next,prev,up,filename):
+ node = (name,next,prev,up,filename)
+ # add this file to dict
+ # retrieve list with self.filenames.values()
+ self.filenames[filename] = filename
+ # add this node to nodelist
+ self.nodeindex[name] = len(self.nodelist)
+ self.nodelist.append(node)
+ # set 'current' for menu items
+ self.current = name
+ self.menudict[self.current] = []
+
+ def menuitem(self,nodename):
+ menu = self.menudict[self.current]
+ menu.append(nodename)
+
+
+ def addimage(self,imagename):
+ self.filenames[imagename] = imagename
+
+ def index(self, args, nodename):
+ self.indexlist.append((args,nodename))
+
+ def beginmenu(self):
+ pass
+
+ def endmenu(self):
+ pass
+
+ def finalize(self):
+ if not self.helpbase:
+ return
+
+ # generate interesting filenames
+ resultfile = self.helpbase + '.chm'
+ projectfile = self.helpbase + '.hhp'
+ contentfile = self.helpbase + '.hhc'
+ indexfile = self.helpbase + '.hhk'
+
+ # generate a reasonable title
+ title = self.helpbase
+
+ # get the default topic file
+ (topname,topnext,topprev,topup,topfile) = self.nodelist[0]
+ defaulttopic = topfile
+
+ # PROJECT FILE
+ try:
+ fp = open(projectfile,'w')
+ print>>fp, '[OPTIONS]'
+ print>>fp, 'Auto Index=Yes'
+ print>>fp, 'Binary TOC=No'
+ print>>fp, 'Binary Index=Yes'
+ print>>fp, 'Compatibility=1.1'
+ print>>fp, 'Compiled file=' + resultfile + ''
+ print>>fp, 'Contents file=' + contentfile + ''
+ print>>fp, 'Default topic=' + defaulttopic + ''
+ print>>fp, 'Error log file=ErrorLog.log'
+ print>>fp, 'Index file=' + indexfile + ''
+ print>>fp, 'Title=' + title + ''
+ print>>fp, 'Display compile progress=Yes'
+ print>>fp, 'Full-text search=Yes'
+ print>>fp, 'Default window=main'
+ print>>fp, ''
+ print>>fp, '[WINDOWS]'
+ print>>fp, ('main=,"' + contentfile + '","' + indexfile
+ + '","","",,,,,0x23520,222,0x1046,[10,10,780,560],'
+ '0xB0000,,,,,,0')
+ print>>fp, ''
+ print>>fp, '[FILES]'
+ print>>fp, ''
+ self.dumpfiles(fp)
+ fp.close()
+ except IOError, msg:
+ print projectfile, ':', msg
+ sys.exit(1)
+
+ # CONTENT FILE
+ try:
+ fp = open(contentfile,'w')
+ print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
+ print>>fp, '<!-- This file defines the table of contents -->'
+ print>>fp, '<HTML>'
+ print>>fp, '<HEAD>'
+ print>>fp, ('<meta name="GENERATOR"'
+ 'content="Microsoft&reg; HTML Help Workshop 4.1">')
+ print>>fp, '<!-- Sitemap 1.0 -->'
+ print>>fp, '</HEAD>'
+ print>>fp, '<BODY>'
+ print>>fp, ' <OBJECT type="text/site properties">'
+ print>>fp, ' <param name="Window Styles" value="0x800025">'
+ print>>fp, ' <param name="comment" value="title:">'
+ print>>fp, ' <param name="comment" value="base:">'
+ print>>fp, ' </OBJECT>'
+ self.dumpnodes(fp)
+ print>>fp, '</BODY>'
+ print>>fp, '</HTML>'
+ fp.close()
+ except IOError, msg:
+ print contentfile, ':', msg
+ sys.exit(1)
+
+ # INDEX FILE
+ try:
+ fp = open(indexfile ,'w')
+ print>>fp, '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">'
+ print>>fp, '<!-- This file defines the index -->'
+ print>>fp, '<HTML>'
+ print>>fp, '<HEAD>'
+ print>>fp, ('<meta name="GENERATOR"'
+ 'content="Microsoft&reg; HTML Help Workshop 4.1">')
+ print>>fp, '<!-- Sitemap 1.0 -->'
+ print>>fp, '</HEAD>'
+ print>>fp, '<BODY>'
+ print>>fp, '<OBJECT type="text/site properties">'
+ print>>fp, '</OBJECT>'
+ self.dumpindex(fp)
+ print>>fp, '</BODY>'
+ print>>fp, '</HTML>'
+ fp.close()
+ except IOError, msg:
+ print indexfile , ':', msg
+ sys.exit(1)
+
+ def dumpfiles(self, outfile=sys.stdout):
+ filelist = self.filenames.values()
+ filelist.sort()
+ for filename in filelist:
+ print>>outfile, filename
+
+ def dumpnodes(self, outfile=sys.stdout):
+ self.dumped = {}
+ if self.nodelist:
+ nodename, dummy, dummy, dummy, dummy = self.nodelist[0]
+ self.topnode = nodename
+
+ print>>outfile, '<UL>'
+ for node in self.nodelist:
+ self.dumpnode(node,0,outfile)
+ print>>outfile, '</UL>'
+
+ def dumpnode(self, node, indent=0, outfile=sys.stdout):
+ if node:
+ # Retrieve info for this node
+ (nodename,next,prev,up,filename) = node
+ self.current = nodename
+
+ # Have we been dumped already?
+ if self.dumped.has_key(nodename):
+ return
+ self.dumped[nodename] = 1
+
+ # Print info for this node
+ print>>outfile, ' '*indent,
+ print>>outfile, '<LI><OBJECT type="text/sitemap">',
+ print>>outfile, '<param name="Name" value="' + nodename +'">',
+ print>>outfile, '<param name="Local" value="'+ filename +'">',
+ print>>outfile, '</OBJECT>'
+
+ # Does this node have menu items?
+ try:
+ menu = self.menudict[nodename]
+ self.dumpmenu(menu,indent+2,outfile)
+ except KeyError:
+ pass
+
+ def dumpmenu(self, menu, indent=0, outfile=sys.stdout):
+ if menu:
+ currentnode = self.current
+ if currentnode != self.topnode: # XXX this is a hack
+ print>>outfile, ' '*indent + '<UL>'
+ indent += 2
+ for item in menu:
+ menunode = self.getnode(item)
+ self.dumpnode(menunode,indent,outfile)
+ if currentnode != self.topnode: # XXX this is a hack
+ print>>outfile, ' '*indent + '</UL>'
+ indent -= 2
+
+ def getnode(self, nodename):
+ try:
+ index = self.nodeindex[nodename]
+ return self.nodelist[index]
+ except KeyError:
+ return None
+ except IndexError:
+ return None
+
+ # (args,nodename) == (key,location)
+ def dumpindex(self, outfile=sys.stdout):
+ print>>outfile, '<UL>'
+ for (key,location) in self.indexlist:
+ key = self.codeexpand(key)
+ location = makefile(location)
+ location = self.dirname + '/' + location
+ print>>outfile, '<LI><OBJECT type="text/sitemap">',
+ print>>outfile, '<param name="Name" value="' + key + '">',
+ print>>outfile, '<param name="Local" value="' + location + '">',
+ print>>outfile, '</OBJECT>'
+ print>>outfile, '</UL>'
+
+ def codeexpand(self, line):
+ co = self.codeprog.match(line)
+ if not co:
+ return line
+ bgn, end = co.span(0)
+ a, b = co.span(1)
+ line = line[:bgn] + line[a:b] + line[end:]
+ return line
+
+
+# Put @var{} around alphabetic substrings
+def makevar(str):
+ return '@var{'+str+'}'
+
+
+# Split a string in "words" according to findwordend
+def splitwords(str, minlength):
+ words = []
+ i = 0
+ n = len(str)
+ while i < n:
+ while i < n and str[i] in ' \t\n': i = i+1
+ if i >= n: break
+ start = i
+ i = findwordend(str, i, n)
+ words.append(str[start:i])
+ while len(words) < minlength: words.append('')
+ return words
+
+
+# Find the end of a "word", matching braces and interpreting @@ @{ @}
+fwprog = re.compile('[@{} ]')
+def findwordend(str, i, n):
+ level = 0
+ while i < n:
+ mo = fwprog.search(str, i)
+ if not mo:
+ break
+ i = mo.start()
+ c = str[i]; i = i+1
+ if c == '@': i = i+1 # Next character is not special
+ elif c == '{': level = level+1
+ elif c == '}': level = level-1
+ elif c == ' ' and level <= 0: return i-1
+ return n
+
+
+# Convert a node name into a file name
+def makefile(nodename):
+ nodename = nodename.strip()
+ return fixfunnychars(nodename) + '.html'
+
+
+# Characters that are perfectly safe in filenames and hyperlinks
+goodchars = string.ascii_letters + string.digits + '!@-=+.'
+
+# Replace characters that aren't perfectly safe by dashes
+# Underscores are bad since Cern HTTPD treats them as delimiters for
+# encoding times, so you get mismatches if you compress your files:
+# a.html.gz will map to a_b.html.gz
+def fixfunnychars(addr):
+ i = 0
+ while i < len(addr):
+ c = addr[i]
+ if c not in goodchars:
+ c = '-'
+ addr = addr[:i] + c + addr[i+1:]
+ i = i + len(c)
+ return addr
+
+
+# Increment a string used as an enumeration
+def increment(s):
+ if not s:
+ return '1'
+ for sequence in string.digits, string.lowercase, string.uppercase:
+ lastc = s[-1]
+ if lastc in sequence:
+ i = sequence.index(lastc) + 1
+ if i >= len(sequence):
+ if len(s) == 1:
+ s = sequence[0]*2
+ if s == '00':
+ s = '10'
+ else:
+ s = increment(s[:-1]) + sequence[0]
+ else:
+ s = s[:-1] + sequence[i]
+ return s
+ return s # Don't increment
+
+
+def test():
+ import sys
+ debugging = 0
+ print_headers = 0
+ cont = 0
+ html3 = 0
+ htmlhelp = ''
+
+ while sys.argv[1] == ['-d']:
+ debugging = debugging + 1
+ del sys.argv[1]
+ if sys.argv[1] == '-p':
+ print_headers = 1
+ del sys.argv[1]
+ if sys.argv[1] == '-c':
+ cont = 1
+ del sys.argv[1]
+ if sys.argv[1] == '-3':
+ html3 = 1
+ del sys.argv[1]
+ if sys.argv[1] == '-H':
+ helpbase = sys.argv[2]
+ del sys.argv[1:3]
+ if len(sys.argv) <> 3:
+ print 'usage: texi2hh [-d [-d]] [-p] [-c] [-3] [-H htmlhelp]', \
+ 'inputfile outputdirectory'
+ sys.exit(2)
+
+ if html3:
+ parser = TexinfoParserHTML3()
+ else:
+ parser = TexinfoParser()
+ parser.cont = cont
+ parser.debugging = debugging
+ parser.print_headers = print_headers
+
+ file = sys.argv[1]
+ dirname = sys.argv[2]
+ parser.setdirname(dirname)
+ parser.setincludedir(os.path.dirname(file))
+
+ htmlhelp = HTMLHelp(helpbase, dirname)
+ parser.sethtmlhelp(htmlhelp)
+
+ try:
+ fp = open(file, 'r')
+ except IOError, msg:
+ print file, ':', msg
+ sys.exit(1)
+
+ parser.parse(fp)
+ fp.close()
+ parser.report()
+
+ htmlhelp.finalize()
+
+
+if __name__ == "__main__":
+ test()
diff --git a/lib/python2.7/Tools/scripts/treesync.py b/lib/python2.7/Tools/scripts/treesync.py
new file mode 100644
index 0000000..322a5e3
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/treesync.py
@@ -0,0 +1,205 @@
+#!/usr/bin/env python2
+
+"""Script to synchronize two source trees.
+
+Invoke with two arguments:
+
+python treesync.py slave master
+
+The assumption is that "master" contains CVS administration while
+slave doesn't. All files in the slave tree that have a CVS/Entries
+entry in the master tree are synchronized. This means:
+
+ If the files differ:
+ if the slave file is newer:
+ normalize the slave file
+ if the files still differ:
+ copy the slave to the master
+ else (the master is newer):
+ copy the master to the slave
+
+ normalizing the slave means replacing CRLF with LF when the master
+ doesn't use CRLF
+
+"""
+
+import os, sys, stat, getopt
+
+# Interactivity options
+default_answer = "ask"
+create_files = "yes"
+create_directories = "no"
+write_slave = "ask"
+write_master = "ask"
+
+def main():
+ global always_no, always_yes
+ global create_directories, write_master, write_slave
+ opts, args = getopt.getopt(sys.argv[1:], "nym:s:d:f:a:")
+ for o, a in opts:
+ if o == '-y':
+ default_answer = "yes"
+ if o == '-n':
+ default_answer = "no"
+ if o == '-s':
+ write_slave = a
+ if o == '-m':
+ write_master = a
+ if o == '-d':
+ create_directories = a
+ if o == '-f':
+ create_files = a
+ if o == '-a':
+ create_files = create_directories = write_slave = write_master = a
+ try:
+ [slave, master] = args
+ except ValueError:
+ print "usage: python", sys.argv[0] or "treesync.py",
+ print "[-n] [-y] [-m y|n|a] [-s y|n|a] [-d y|n|a] [-f n|y|a]",
+ print "slavedir masterdir"
+ return
+ process(slave, master)
+
+def process(slave, master):
+ cvsdir = os.path.join(master, "CVS")
+ if not os.path.isdir(cvsdir):
+ print "skipping master subdirectory", master
+ print "-- not under CVS"
+ return
+ print "-"*40
+ print "slave ", slave
+ print "master", master
+ if not os.path.isdir(slave):
+ if not okay("create slave directory %s?" % slave,
+ answer=create_directories):
+ print "skipping master subdirectory", master
+ print "-- no corresponding slave", slave
+ return
+ print "creating slave directory", slave
+ try:
+ os.mkdir(slave)
+ except os.error, msg:
+ print "can't make slave directory", slave, ":", msg
+ return
+ else:
+ print "made slave directory", slave
+ cvsdir = None
+ subdirs = []
+ names = os.listdir(master)
+ for name in names:
+ mastername = os.path.join(master, name)
+ slavename = os.path.join(slave, name)
+ if name == "CVS":
+ cvsdir = mastername
+ else:
+ if os.path.isdir(mastername) and not os.path.islink(mastername):
+ subdirs.append((slavename, mastername))
+ if cvsdir:
+ entries = os.path.join(cvsdir, "Entries")
+ for e in open(entries).readlines():
+ words = e.split('/')
+ if words[0] == '' and words[1:]:
+ name = words[1]
+ s = os.path.join(slave, name)
+ m = os.path.join(master, name)
+ compare(s, m)
+ for (s, m) in subdirs:
+ process(s, m)
+
+def compare(slave, master):
+ try:
+ sf = open(slave, 'r')
+ except IOError:
+ sf = None
+ try:
+ mf = open(master, 'rb')
+ except IOError:
+ mf = None
+ if not sf:
+ if not mf:
+ print "Neither master nor slave exists", master
+ return
+ print "Creating missing slave", slave
+ copy(master, slave, answer=create_files)
+ return
+ if not mf:
+ print "Not updating missing master", master
+ return
+ if sf and mf:
+ if identical(sf, mf):
+ return
+ sft = mtime(sf)
+ mft = mtime(mf)
+ if mft > sft:
+ # Master is newer -- copy master to slave
+ sf.close()
+ mf.close()
+ print "Master ", master
+ print "is newer than slave", slave
+ copy(master, slave, answer=write_slave)
+ return
+ # Slave is newer -- copy slave to master
+ print "Slave is", sft-mft, "seconds newer than master"
+ # But first check what to do about CRLF
+ mf.seek(0)
+ fun = funnychars(mf)
+ mf.close()
+ sf.close()
+ if fun:
+ print "***UPDATING MASTER (BINARY COPY)***"
+ copy(slave, master, "rb", answer=write_master)
+ else:
+ print "***UPDATING MASTER***"
+ copy(slave, master, "r", answer=write_master)
+
+BUFSIZE = 16*1024
+
+def identical(sf, mf):
+ while 1:
+ sd = sf.read(BUFSIZE)
+ md = mf.read(BUFSIZE)
+ if sd != md: return 0
+ if not sd: break
+ return 1
+
+def mtime(f):
+ st = os.fstat(f.fileno())
+ return st[stat.ST_MTIME]
+
+def funnychars(f):
+ while 1:
+ buf = f.read(BUFSIZE)
+ if not buf: break
+ if '\r' in buf or '\0' in buf: return 1
+ return 0
+
+def copy(src, dst, rmode="rb", wmode="wb", answer='ask'):
+ print "copying", src
+ print " to", dst
+ if not okay("okay to copy? ", answer):
+ return
+ f = open(src, rmode)
+ g = open(dst, wmode)
+ while 1:
+ buf = f.read(BUFSIZE)
+ if not buf: break
+ g.write(buf)
+ f.close()
+ g.close()
+
+def okay(prompt, answer='ask'):
+ answer = answer.strip().lower()
+ if not answer or answer[0] not in 'ny':
+ answer = raw_input(prompt)
+ answer = answer.strip().lower()
+ if not answer:
+ answer = default_answer
+ if answer[:1] == 'y':
+ return 1
+ if answer[:1] == 'n':
+ return 0
+ print "Yes or No please -- try again:"
+ return okay(prompt)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/untabify.py b/lib/python2.7/Tools/scripts/untabify.py
new file mode 100644
index 0000000..600026f
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/untabify.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python2
+
+"Replace tabs with spaces in argument files. Print names of changed files."
+
+import os
+import sys
+import getopt
+
+def main():
+ tabsize = 8
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "t:")
+ if not args:
+ raise getopt.error, "At least one file argument required"
+ except getopt.error, msg:
+ print msg
+ print "usage:", sys.argv[0], "[-t tabwidth] file ..."
+ return
+ for optname, optvalue in opts:
+ if optname == '-t':
+ tabsize = int(optvalue)
+
+ for filename in args:
+ process(filename, tabsize)
+
+def process(filename, tabsize, verbose=True):
+ try:
+ f = open(filename)
+ text = f.read()
+ f.close()
+ except IOError, msg:
+ print "%r: I/O error: %s" % (filename, msg)
+ return
+ newtext = text.expandtabs(tabsize)
+ if newtext == text:
+ return
+ backup = filename + "~"
+ try:
+ os.unlink(backup)
+ except os.error:
+ pass
+ try:
+ os.rename(filename, backup)
+ except os.error:
+ pass
+ with open(filename, "w") as f:
+ f.write(newtext)
+ if verbose:
+ print filename
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/which.py b/lib/python2.7/Tools/scripts/which.py
new file mode 100644
index 0000000..b76919c
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/which.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2
+
+# Variant of "which".
+# On stderr, near and total misses are reported.
+# '-l<flags>' argument adds ls -l<flags> of each file found.
+
+import sys
+if sys.path[0] in (".", ""): del sys.path[0]
+
+import sys, os
+from stat import *
+
+def msg(str):
+ sys.stderr.write(str + '\n')
+
+def main():
+ pathlist = os.environ['PATH'].split(os.pathsep)
+
+ sts = 0
+ longlist = ''
+
+ if sys.argv[1:] and sys.argv[1][:2] == '-l':
+ longlist = sys.argv[1]
+ del sys.argv[1]
+
+ for prog in sys.argv[1:]:
+ ident = ()
+ for dir in pathlist:
+ filename = os.path.join(dir, prog)
+ try:
+ st = os.stat(filename)
+ except os.error:
+ continue
+ if not S_ISREG(st[ST_MODE]):
+ msg(filename + ': not a disk file')
+ else:
+ mode = S_IMODE(st[ST_MODE])
+ if mode & 0111:
+ if not ident:
+ print filename
+ ident = st[:3]
+ else:
+ if st[:3] == ident:
+ s = 'same as: '
+ else:
+ s = 'also: '
+ msg(s + filename)
+ else:
+ msg(filename + ': not executable')
+ if longlist:
+ sts = os.system('ls ' + longlist + ' ' + filename)
+ if sts: msg('"ls -l" exit status: ' + repr(sts))
+ if not ident:
+ msg(prog + ': not found')
+ sts = 1
+
+ sys.exit(sts)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/win_add2path.py b/lib/python2.7/Tools/scripts/win_add2path.py
new file mode 100644
index 0000000..876bfb2
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/win_add2path.py
@@ -0,0 +1,57 @@
+"""Add Python to the search path on Windows
+
+This is a simple script to add Python to the Windows search path. It
+modifies the current user (HKCU) tree of the registry.
+
+Copyright (c) 2008 by Christian Heimes <christian@cheimes.de>
+Licensed to PSF under a Contributor Agreement.
+"""
+
+import sys
+import site
+import os
+import _winreg
+
+HKCU = _winreg.HKEY_CURRENT_USER
+ENV = "Environment"
+PATH = "PATH"
+DEFAULT = u"%PATH%"
+
+def modify():
+ pythonpath = os.path.dirname(os.path.normpath(sys.executable))
+ scripts = os.path.join(pythonpath, "Scripts")
+ appdata = os.environ["APPDATA"]
+ if hasattr(site, "USER_SITE"):
+ userpath = site.USER_SITE.replace(appdata, "%APPDATA%")
+ userscripts = os.path.join(userpath, "Scripts")
+ else:
+ userscripts = None
+
+ with _winreg.CreateKey(HKCU, ENV) as key:
+ try:
+ envpath = _winreg.QueryValueEx(key, PATH)[0]
+ except WindowsError:
+ envpath = DEFAULT
+
+ paths = [envpath]
+ for path in (pythonpath, scripts, userscripts):
+ if path and path not in envpath and os.path.isdir(path):
+ paths.append(path)
+
+ envpath = os.pathsep.join(paths)
+ _winreg.SetValueEx(key, PATH, 0, _winreg.REG_EXPAND_SZ, envpath)
+ return paths, envpath
+
+def main():
+ paths, envpath = modify()
+ if len(paths) > 1:
+ print "Path(s) added:"
+ print '\n'.join(paths[1:])
+ else:
+ print "No path was added"
+ print "\nPATH is now:\n%s\n" % envpath
+ print "Expanded:"
+ print _winreg.ExpandEnvironmentStrings(envpath)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/python2.7/Tools/scripts/xxci.py b/lib/python2.7/Tools/scripts/xxci.py
new file mode 100644
index 0000000..6cf0a81
--- /dev/null
+++ b/lib/python2.7/Tools/scripts/xxci.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python2
+
+# xxci
+#
+# check in files for which rcsdiff returns nonzero exit status
+
+import sys
+import os
+from stat import *
+import fnmatch
+
+EXECMAGIC = '\001\140\000\010'
+
+MAXSIZE = 200*1024 # Files this big must be binaries and are skipped.
+
+def getargs():
+ args = sys.argv[1:]
+ if args:
+ return args
+ print 'No arguments, checking almost *, in "ls -t" order'
+ list = []
+ for file in os.listdir(os.curdir):
+ if not skipfile(file):
+ list.append((getmtime(file), file))
+ list.sort()
+ if not list:
+ print 'Nothing to do -- exit 1'
+ sys.exit(1)
+ list.sort()
+ list.reverse()
+ for mtime, file in list: args.append(file)
+ return args
+
+def getmtime(file):
+ try:
+ st = os.stat(file)
+ return st[ST_MTIME]
+ except os.error:
+ return -1
+
+badnames = ['tags', 'TAGS', 'xyzzy', 'nohup.out', 'core']
+badprefixes = ['.', ',', '@', '#', 'o.']
+badsuffixes = \
+ ['~', '.a', '.o', '.old', '.bak', '.orig', '.new', '.prev', '.not', \
+ '.pyc', '.fdc', '.rgb', '.elc', ',v']
+ignore = []
+
+def setup():
+ ignore[:] = badnames
+ for p in badprefixes:
+ ignore.append(p + '*')
+ for p in badsuffixes:
+ ignore.append('*' + p)
+ try:
+ f = open('.xxcign', 'r')
+ except IOError:
+ return
+ ignore[:] = ignore + f.read().split()
+
+def skipfile(file):
+ for p in ignore:
+ if fnmatch.fnmatch(file, p): return 1
+ try:
+ st = os.lstat(file)
+ except os.error:
+ return 1 # Doesn't exist -- skip it
+ # Skip non-plain files.
+ if not S_ISREG(st[ST_MODE]): return 1
+ # Skip huge files -- probably binaries.
+ if st[ST_SIZE] >= MAXSIZE: return 1
+ # Skip executables
+ try:
+ data = open(file, 'r').read(len(EXECMAGIC))
+ if data == EXECMAGIC: return 1
+ except:
+ pass
+ return 0
+
+def badprefix(file):
+ for bad in badprefixes:
+ if file[:len(bad)] == bad: return 1
+ return 0
+
+def badsuffix(file):
+ for bad in badsuffixes:
+ if file[-len(bad):] == bad: return 1
+ return 0
+
+def go(args):
+ for file in args:
+ print file + ':'
+ if differing(file):
+ showdiffs(file)
+ if askyesno('Check in ' + file + ' ? '):
+ sts = os.system('rcs -l ' + file) # ignored
+ sts = os.system('ci -l ' + file)
+
+def differing(file):
+ cmd = 'co -p ' + file + ' 2>/dev/null | cmp -s - ' + file
+ sts = os.system(cmd)
+ return sts != 0
+
+def showdiffs(file):
+ cmd = 'rcsdiff ' + file + ' 2>&1 | ${PAGER-more}'
+ sts = os.system(cmd)
+
+def askyesno(prompt):
+ s = raw_input(prompt)
+ return s in ['y', 'yes']
+
+if __name__ == '__main__':
+ try:
+ setup()
+ go(getargs())
+ except KeyboardInterrupt:
+ print '[Intr]'