summaryrefslogtreecommitdiff
path: root/lib/python2.7/lib2to3
diff options
context:
space:
mode:
Diffstat (limited to 'lib/python2.7/lib2to3')
-rw-r--r--lib/python2.7/lib2to3/Grammar.txt170
-rw-r--r--lib/python2.7/lib2to3/Grammar2.7.13.final.0.picklebin0 -> 40486 bytes
-rw-r--r--lib/python2.7/lib2to3/PatternGrammar.txt28
-rw-r--r--lib/python2.7/lib2to3/PatternGrammar2.7.13.final.0.picklebin0 -> 2799 bytes
-rw-r--r--lib/python2.7/lib2to3/__init__.py1
-rw-r--r--lib/python2.7/lib2to3/__main__.py4
-rw-r--r--lib/python2.7/lib2to3/btm_matcher.py168
-rw-r--r--lib/python2.7/lib2to3/btm_utils.py283
-rw-r--r--lib/python2.7/lib2to3/fixer_base.py189
-rw-r--r--lib/python2.7/lib2to3/fixer_util.py432
-rw-r--r--lib/python2.7/lib2to3/fixes/__init__.py1
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_apply.py70
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_asserts.py34
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_basestring.py14
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_buffer.py22
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_dict.py107
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_except.py93
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_exec.py40
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_execfile.py52
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_exitfunc.py72
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_filter.py76
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_funcattrs.py21
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_future.py22
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_getcwdu.py19
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_has_key.py110
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_idioms.py152
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_import.py99
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_imports.py145
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_imports2.py16
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_input.py26
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_intern.py56
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_isinstance.py52
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_itertools.py43
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_itertools_imports.py57
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_long.py19
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_map.py91
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_metaclass.py228
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_methodattrs.py24
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_ne.py23
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_next.py103
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_nonzero.py21
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_numliterals.py28
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_operator.py96
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_paren.py44
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_print.py87
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_raise.py90
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_raw_input.py17
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_reduce.py35
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_renames.py70
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_repr.py23
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_set_literal.py53
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_standarderror.py18
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_sys_exc.py30
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_throw.py56
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_tuple_params.py175
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_types.py62
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_unicode.py42
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_urllib.py197
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_ws_comma.py39
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_xrange.py73
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_xreadlines.py25
-rw-r--r--lib/python2.7/lib2to3/fixes/fix_zip.py35
-rw-r--r--lib/python2.7/lib2to3/main.py269
-rw-r--r--lib/python2.7/lib2to3/patcomp.py205
-rw-r--r--lib/python2.7/lib2to3/pgen2/__init__.py4
-rw-r--r--lib/python2.7/lib2to3/pgen2/conv.py257
-rw-r--r--lib/python2.7/lib2to3/pgen2/driver.py160
-rw-r--r--lib/python2.7/lib2to3/pgen2/grammar.py208
-rw-r--r--lib/python2.7/lib2to3/pgen2/literals.py60
-rw-r--r--lib/python2.7/lib2to3/pgen2/parse.py201
-rw-r--r--lib/python2.7/lib2to3/pgen2/pgen.py386
-rw-r--r--lib/python2.7/lib2to3/pgen2/token.py83
-rw-r--r--lib/python2.7/lib2to3/pgen2/tokenize.py502
-rw-r--r--lib/python2.7/lib2to3/pygram.py40
-rw-r--r--lib/python2.7/lib2to3/pytree.py887
-rw-r--r--lib/python2.7/lib2to3/refactor.py747
-rw-r--r--lib/python2.7/lib2to3/tests/__init__.py24
-rw-r--r--lib/python2.7/lib2to3/tests/data/README6
-rw-r--r--lib/python2.7/lib2to3/tests/data/bom.py2
-rw-r--r--lib/python2.7/lib2to3/tests/data/crlf.py3
-rw-r--r--lib/python2.7/lib2to3/tests/data/different_encoding.py6
-rw-r--r--lib/python2.7/lib2to3/tests/data/false_encoding.py2
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/bad_order.py5
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/__init__.py0
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_explicit.py6
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_first.py6
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_last.py7
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_parrot.py13
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_preorder.py6
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/no_fixer_cls.py1
-rw-r--r--lib/python2.7/lib2to3/tests/data/fixers/parrot_example.py2
-rw-r--r--lib/python2.7/lib2to3/tests/data/infinite_recursion.py2669
-rw-r--r--lib/python2.7/lib2to3/tests/data/py2_test_grammar.py974
-rw-r--r--lib/python2.7/lib2to3/tests/data/py3_test_grammar.py923
-rw-r--r--lib/python2.7/lib2to3/tests/pytree_idempotency.py92
-rw-r--r--lib/python2.7/lib2to3/tests/support.py54
-rw-r--r--lib/python2.7/lib2to3/tests/test_all_fixers.py23
-rw-r--r--lib/python2.7/lib2to3/tests/test_fixers.py4544
-rw-r--r--lib/python2.7/lib2to3/tests/test_main.py149
-rw-r--r--lib/python2.7/lib2to3/tests/test_parser.py343
-rw-r--r--lib/python2.7/lib2to3/tests/test_pytree.py494
-rw-r--r--lib/python2.7/lib2to3/tests/test_refactor.py322
-rw-r--r--lib/python2.7/lib2to3/tests/test_util.py594
103 files changed, 19757 insertions, 0 deletions
diff --git a/lib/python2.7/lib2to3/Grammar.txt b/lib/python2.7/lib2to3/Grammar.txt
new file mode 100644
index 0000000..9be7c9f
--- /dev/null
+++ b/lib/python2.7/lib2to3/Grammar.txt
@@ -0,0 +1,170 @@
+# Grammar for 2to3. This grammar supports Python 2.x and 3.x.
+
+# Note: Changing the grammar specified in this file will most likely
+# require corresponding changes in the parser module
+# (../Modules/parsermodule.c). If you can't make the changes to
+# that module yourself, please co-ordinate the required changes
+# with someone who can; ask around on python-dev for help. Fred
+# Drake <fdrake@acm.org> will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed in PEP 306,
+# "How to Change Python's Grammar"
+
+# Commands for Kees Blom's railroad program
+#diagram:token NAME
+#diagram:token NUMBER
+#diagram:token STRING
+#diagram:token NEWLINE
+#diagram:token ENDMARKER
+#diagram:token INDENT
+#diagram:output\input python.bla
+#diagram:token DEDENT
+#diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm
+#diagram:rules
+
+# Start symbols for the grammar:
+# file_input is a module or sequence of commands read from an input file;
+# single_input is a single interactive statement;
+# eval_input is the input for the eval() and input() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+file_input: (NEWLINE | stmt)* ENDMARKER
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef)
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+parameters: '(' [typedargslist] ')'
+typedargslist: ((tfpdef ['=' test] ',')*
+ ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
+ | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+tname: NAME [':' test]
+tfpdef: tname | '(' tfplist ')'
+tfplist: tfpdef (',' tfpdef)* [',']
+varargslist: ((vfpdef ['=' test] ',')*
+ ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname)
+ | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+vname: NAME
+vfpdef: vname | '(' vfplist ')'
+vfplist: vfpdef (',' vfpdef)* [',']
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
+ import_stmt | global_stmt | exec_stmt | assert_stmt)
+expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
+ ('=' (yield_expr|testlist_star_expr))*)
+testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
+augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+ '<<=' | '>>=' | '**=' | '//=')
+# For normal assignments, additional restrictions enforced by the interpreter
+print_stmt: 'print' ( [ test (',' test)* [','] ] |
+ '>>' test [ (',' test)+ [','] ] )
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+import_from: ('from' ('.'* dotted_name | '.'+)
+ 'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
+exec_stmt: 'exec' expr ['in' test [',' test]]
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+while_stmt: 'while' test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+ ((except_clause ':' suite)+
+ ['else' ':' suite]
+ ['finally' ':' suite] |
+ 'finally' ':' suite))
+with_stmt: 'with' with_item (',' with_item)* ':' suite
+with_item: test ['as' expr]
+with_var: 'as' expr
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test [(',' | 'as') test]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+# Backward compatibility cruft to support:
+# [ x for x in lambda: True, lambda: False if x() ]
+# even while also allowing:
+# lambda x: 5 if x else 2
+# (But not a mix of the two)
+testlist_safe: old_test [(',' old_test)+ [',']]
+old_test: or_test | old_lambdef
+old_lambdef: 'lambda' [varargslist] ':' old_test
+
+test: or_test ['if' or_test 'else' test] | lambdef
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+star_expr: '*' expr
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'@'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom trailer* ['**' factor]
+atom: ('(' [yield_expr|testlist_gexp] ')' |
+ '[' [listmaker] ']' |
+ '{' [dictsetmaker] '}' |
+ '`' testlist1 '`' |
+ NAME | NUMBER | STRING+ | '.' '.' '.')
+listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+testlist_gexp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
+lambdef: 'lambda' [varargslist] ':' test
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
+testlist: test (',' test)* [',']
+dictsetmaker: ( ((test ':' test | '**' expr)
+ (comp_for | (',' (test ':' test | '**' expr))* [','])) |
+ ((test | star_expr)
+ (comp_for | (',' (test | star_expr))* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: argument (',' argument)* [',']
+
+# "test '=' test" is really "keyword '=' test", but we have no such token.
+# These need to be in a single rule to avoid grammar that is ambiguous
+# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
+# we explicitly match '*' here, too, to give it proper precedence.
+# Illegal combinations and orderings are blocked in ast.c:
+# multiple (test comp_for) arguments are blocked; keyword unpackings
+# that precede iterable unpackings are blocked; etc.
+argument: ( test [comp_for] |
+ test '=' test |
+ '**' expr |
+ star_expr )
+
+comp_iter: comp_for | comp_if
+comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
+comp_if: 'if' old_test [comp_iter]
+
+testlist1: test (',' test)*
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [yield_arg]
+yield_arg: 'from' test | testlist
diff --git a/lib/python2.7/lib2to3/Grammar2.7.13.final.0.pickle b/lib/python2.7/lib2to3/Grammar2.7.13.final.0.pickle
new file mode 100644
index 0000000..705bcfd
--- /dev/null
+++ b/lib/python2.7/lib2to3/Grammar2.7.13.final.0.pickle
Binary files differ
diff --git a/lib/python2.7/lib2to3/PatternGrammar.txt b/lib/python2.7/lib2to3/PatternGrammar.txt
new file mode 100644
index 0000000..36bf814
--- /dev/null
+++ b/lib/python2.7/lib2to3/PatternGrammar.txt
@@ -0,0 +1,28 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# A grammar to describe tree matching patterns.
+# Not shown here:
+# - 'TOKEN' stands for any token (leaf node)
+# - 'any' stands for any node (leaf or interior)
+# With 'any' we can still specify the sub-structure.
+
+# The start symbol is 'Matcher'.
+
+Matcher: Alternatives ENDMARKER
+
+Alternatives: Alternative ('|' Alternative)*
+
+Alternative: (Unit | NegatedUnit)+
+
+Unit: [NAME '='] ( STRING [Repeater]
+ | NAME [Details] [Repeater]
+ | '(' Alternatives ')' [Repeater]
+ | '[' Alternatives ']'
+ )
+
+NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
+
+Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
+
+Details: '<' Alternatives '>'
diff --git a/lib/python2.7/lib2to3/PatternGrammar2.7.13.final.0.pickle b/lib/python2.7/lib2to3/PatternGrammar2.7.13.final.0.pickle
new file mode 100644
index 0000000..b42689c
--- /dev/null
+++ b/lib/python2.7/lib2to3/PatternGrammar2.7.13.final.0.pickle
Binary files differ
diff --git a/lib/python2.7/lib2to3/__init__.py b/lib/python2.7/lib2to3/__init__.py
new file mode 100644
index 0000000..ea30561
--- /dev/null
+++ b/lib/python2.7/lib2to3/__init__.py
@@ -0,0 +1 @@
+#empty
diff --git a/lib/python2.7/lib2to3/__main__.py b/lib/python2.7/lib2to3/__main__.py
new file mode 100644
index 0000000..80688ba
--- /dev/null
+++ b/lib/python2.7/lib2to3/__main__.py
@@ -0,0 +1,4 @@
+import sys
+from .main import main
+
+sys.exit(main("lib2to3.fixes"))
diff --git a/lib/python2.7/lib2to3/btm_matcher.py b/lib/python2.7/lib2to3/btm_matcher.py
new file mode 100644
index 0000000..736ba2b
--- /dev/null
+++ b/lib/python2.7/lib2to3/btm_matcher.py
@@ -0,0 +1,168 @@
+"""A bottom-up tree matching algorithm implementation meant to speed
+up 2to3's matching process. After the tree patterns are reduced to
+their rarest linear path, a linear Aho-Corasick automaton is
+created. The linear automaton traverses the linear paths from the
+leaves to the root of the AST and returns a set of nodes for further
+matching. This reduces significantly the number of candidate nodes."""
+
+__author__ = "George Boutsioukis <gboutsioukis@gmail.com>"
+
+import logging
+import itertools
+from collections import defaultdict
+
+from . import pytree
+from .btm_utils import reduce_tree
+
+class BMNode(object):
+ """Class for a node of the Aho-Corasick automaton used in matching"""
+ count = itertools.count()
+ def __init__(self):
+ self.transition_table = {}
+ self.fixers = []
+ self.id = next(BMNode.count)
+ self.content = ''
+
+class BottomMatcher(object):
+ """The main matcher class. After instantiating the patterns should
+ be added using the add_fixer method"""
+
+ def __init__(self):
+ self.match = set()
+ self.root = BMNode()
+ self.nodes = [self.root]
+ self.fixers = []
+ self.logger = logging.getLogger("RefactoringTool")
+
+ def add_fixer(self, fixer):
+ """Reduces a fixer's pattern tree to a linear path and adds it
+ to the matcher(a common Aho-Corasick automaton). The fixer is
+ appended on the matching states and called when they are
+ reached"""
+ self.fixers.append(fixer)
+ tree = reduce_tree(fixer.pattern_tree)
+ linear = tree.get_linear_subpattern()
+ match_nodes = self.add(linear, start=self.root)
+ for match_node in match_nodes:
+ match_node.fixers.append(fixer)
+
+ def add(self, pattern, start):
+ "Recursively adds a linear pattern to the AC automaton"
+ #print("adding pattern", pattern, "to", start)
+ if not pattern:
+ #print("empty pattern")
+ return [start]
+ if isinstance(pattern[0], tuple):
+ #alternatives
+ #print("alternatives")
+ match_nodes = []
+ for alternative in pattern[0]:
+ #add all alternatives, and add the rest of the pattern
+ #to each end node
+ end_nodes = self.add(alternative, start=start)
+ for end in end_nodes:
+ match_nodes.extend(self.add(pattern[1:], end))
+ return match_nodes
+ else:
+ #single token
+ #not last
+ if pattern[0] not in start.transition_table:
+ #transition did not exist, create new
+ next_node = BMNode()
+ start.transition_table[pattern[0]] = next_node
+ else:
+ #transition exists already, follow
+ next_node = start.transition_table[pattern[0]]
+
+ if pattern[1:]:
+ end_nodes = self.add(pattern[1:], start=next_node)
+ else:
+ end_nodes = [next_node]
+ return end_nodes
+
+ def run(self, leaves):
+ """The main interface with the bottom matcher. The tree is
+ traversed from the bottom using the constructed
+ automaton. Nodes are only checked once as the tree is
+ retraversed. When the automaton fails, we give it one more
+ shot(in case the above tree matches as a whole with the
+ rejected leaf), then we break for the next leaf. There is the
+ special case of multiple arguments(see code comments) where we
+ recheck the nodes
+
+ Args:
+ The leaves of the AST tree to be matched
+
+ Returns:
+ A dictionary of node matches with fixers as the keys
+ """
+ current_ac_node = self.root
+ results = defaultdict(list)
+ for leaf in leaves:
+ current_ast_node = leaf
+ while current_ast_node:
+ current_ast_node.was_checked = True
+ for child in current_ast_node.children:
+ # multiple statements, recheck
+ if isinstance(child, pytree.Leaf) and child.value == u";":
+ current_ast_node.was_checked = False
+ break
+ if current_ast_node.type == 1:
+ #name
+ node_token = current_ast_node.value
+ else:
+ node_token = current_ast_node.type
+
+ if node_token in current_ac_node.transition_table:
+ #token matches
+ current_ac_node = current_ac_node.transition_table[node_token]
+ for fixer in current_ac_node.fixers:
+ if not fixer in results:
+ results[fixer] = []
+ results[fixer].append(current_ast_node)
+
+ else:
+ #matching failed, reset automaton
+ current_ac_node = self.root
+ if (current_ast_node.parent is not None
+ and current_ast_node.parent.was_checked):
+ #the rest of the tree upwards has been checked, next leaf
+ break
+
+ #recheck the rejected node once from the root
+ if node_token in current_ac_node.transition_table:
+ #token matches
+ current_ac_node = current_ac_node.transition_table[node_token]
+ for fixer in current_ac_node.fixers:
+ if not fixer in results.keys():
+ results[fixer] = []
+ results[fixer].append(current_ast_node)
+
+ current_ast_node = current_ast_node.parent
+ return results
+
+ def print_ac(self):
+ "Prints a graphviz diagram of the BM automaton(for debugging)"
+ print("digraph g{")
+ def print_node(node):
+ for subnode_key in node.transition_table.keys():
+ subnode = node.transition_table[subnode_key]
+ print("%d -> %d [label=%s] //%s" %
+ (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
+ if subnode_key == 1:
+ print(subnode.content)
+ print_node(subnode)
+ print_node(self.root)
+ print("}")
+
+# taken from pytree.py for debugging; only used by print_ac
+_type_reprs = {}
+def type_repr(type_num):
+ global _type_reprs
+ if not _type_reprs:
+ from .pygram import python_symbols
+ # printing tokens is possible but not as useful
+ # from .pgen2 import token // token.__dict__.items():
+ for name, val in python_symbols.__dict__.items():
+ if type(val) == int: _type_reprs[val] = name
+ return _type_reprs.setdefault(type_num, type_num)
diff --git a/lib/python2.7/lib2to3/btm_utils.py b/lib/python2.7/lib2to3/btm_utils.py
new file mode 100644
index 0000000..501f834
--- /dev/null
+++ b/lib/python2.7/lib2to3/btm_utils.py
@@ -0,0 +1,283 @@
+"Utility functions used by the btm_matcher module"
+
+from . import pytree
+from .pgen2 import grammar, token
+from .pygram import pattern_symbols, python_symbols
+
+syms = pattern_symbols
+pysyms = python_symbols
+tokens = grammar.opmap
+token_labels = token
+
+TYPE_ANY = -1
+TYPE_ALTERNATIVES = -2
+TYPE_GROUP = -3
+
+class MinNode(object):
+ """This class serves as an intermediate representation of the
+ pattern tree during the conversion to sets of leaf-to-root
+ subpatterns"""
+
+ def __init__(self, type=None, name=None):
+ self.type = type
+ self.name = name
+ self.children = []
+ self.leaf = False
+ self.parent = None
+ self.alternatives = []
+ self.group = []
+
+ def __repr__(self):
+ return str(self.type) + ' ' + str(self.name)
+
+ def leaf_to_root(self):
+ """Internal method. Returns a characteristic path of the
+ pattern tree. This method must be run for all leaves until the
+ linear subpatterns are merged into a single"""
+ node = self
+ subp = []
+ while node:
+ if node.type == TYPE_ALTERNATIVES:
+ node.alternatives.append(subp)
+ if len(node.alternatives) == len(node.children):
+ #last alternative
+ subp = [tuple(node.alternatives)]
+ node.alternatives = []
+ node = node.parent
+ continue
+ else:
+ node = node.parent
+ subp = None
+ break
+
+ if node.type == TYPE_GROUP:
+ node.group.append(subp)
+ #probably should check the number of leaves
+ if len(node.group) == len(node.children):
+ subp = get_characteristic_subpattern(node.group)
+ node.group = []
+ node = node.parent
+ continue
+ else:
+ node = node.parent
+ subp = None
+ break
+
+ if node.type == token_labels.NAME and node.name:
+ #in case of type=name, use the name instead
+ subp.append(node.name)
+ else:
+ subp.append(node.type)
+
+ node = node.parent
+ return subp
+
+ def get_linear_subpattern(self):
+ """Drives the leaf_to_root method. The reason that
+ leaf_to_root must be run multiple times is because we need to
+ reject 'group' matches; for example the alternative form
+ (a | b c) creates a group [b c] that needs to be matched. Since
+ matching multiple linear patterns overcomes the automaton's
+ capabilities, leaf_to_root merges each group into a single
+ choice based on 'characteristic'ity,
+
+ i.e. (a|b c) -> (a|b) if b more characteristic than c
+
+ Returns: The most 'characteristic'(as defined by
+ get_characteristic_subpattern) path for the compiled pattern
+ tree.
+ """
+
+ for l in self.leaves():
+ subp = l.leaf_to_root()
+ if subp:
+ return subp
+
+ def leaves(self):
+ "Generator that returns the leaves of the tree"
+ for child in self.children:
+ for x in child.leaves():
+ yield x
+ if not self.children:
+ yield self
+
+def reduce_tree(node, parent=None):
+ """
+ Internal function. Reduces a compiled pattern tree to an
+ intermediate representation suitable for feeding the
+ automaton. This also trims off any optional pattern elements(like
+ [a], a*).
+ """
+
+ new_node = None
+ #switch on the node type
+ if node.type == syms.Matcher:
+ #skip
+ node = node.children[0]
+
+ if node.type == syms.Alternatives :
+ #2 cases
+ if len(node.children) <= 2:
+ #just a single 'Alternative', skip this node
+ new_node = reduce_tree(node.children[0], parent)
+ else:
+ #real alternatives
+ new_node = MinNode(type=TYPE_ALTERNATIVES)
+ #skip odd children('|' tokens)
+ for child in node.children:
+ if node.children.index(child)%2:
+ continue
+ reduced = reduce_tree(child, new_node)
+ if reduced is not None:
+ new_node.children.append(reduced)
+ elif node.type == syms.Alternative:
+ if len(node.children) > 1:
+
+ new_node = MinNode(type=TYPE_GROUP)
+ for child in node.children:
+ reduced = reduce_tree(child, new_node)
+ if reduced:
+ new_node.children.append(reduced)
+ if not new_node.children:
+ # delete the group if all of the children were reduced to None
+ new_node = None
+
+ else:
+ new_node = reduce_tree(node.children[0], parent)
+
+ elif node.type == syms.Unit:
+ if (isinstance(node.children[0], pytree.Leaf) and
+ node.children[0].value == '('):
+ #skip parentheses
+ return reduce_tree(node.children[1], parent)
+ if ((isinstance(node.children[0], pytree.Leaf) and
+ node.children[0].value == '[')
+ or
+ (len(node.children)>1 and
+ hasattr(node.children[1], "value") and
+ node.children[1].value == '[')):
+ #skip whole unit if its optional
+ return None
+
+ leaf = True
+ details_node = None
+ alternatives_node = None
+ has_repeater = False
+ repeater_node = None
+ has_variable_name = False
+
+ for child in node.children:
+ if child.type == syms.Details:
+ leaf = False
+ details_node = child
+ elif child.type == syms.Repeater:
+ has_repeater = True
+ repeater_node = child
+ elif child.type == syms.Alternatives:
+ alternatives_node = child
+ if hasattr(child, 'value') and child.value == '=': # variable name
+ has_variable_name = True
+
+ #skip variable name
+ if has_variable_name:
+ #skip variable name, '='
+ name_leaf = node.children[2]
+ if hasattr(name_leaf, 'value') and name_leaf.value == '(':
+ # skip parenthesis
+ name_leaf = node.children[3]
+ else:
+ name_leaf = node.children[0]
+
+ #set node type
+ if name_leaf.type == token_labels.NAME:
+ #(python) non-name or wildcard
+ if name_leaf.value == 'any':
+ new_node = MinNode(type=TYPE_ANY)
+ else:
+ if hasattr(token_labels, name_leaf.value):
+ new_node = MinNode(type=getattr(token_labels, name_leaf.value))
+ else:
+ new_node = MinNode(type=getattr(pysyms, name_leaf.value))
+
+ elif name_leaf.type == token_labels.STRING:
+ #(python) name or character; remove the apostrophes from
+ #the string value
+ name = name_leaf.value.strip("'")
+ if name in tokens:
+ new_node = MinNode(type=tokens[name])
+ else:
+ new_node = MinNode(type=token_labels.NAME, name=name)
+ elif name_leaf.type == syms.Alternatives:
+ new_node = reduce_tree(alternatives_node, parent)
+
+ #handle repeaters
+ if has_repeater:
+ if repeater_node.children[0].value == '*':
+ #reduce to None
+ new_node = None
+ elif repeater_node.children[0].value == '+':
+ #reduce to a single occurrence i.e. do nothing
+ pass
+ else:
+ #TODO: handle {min, max} repeaters
+ raise NotImplementedError
+ pass
+
+ #add children
+ if details_node and new_node is not None:
+ for child in details_node.children[1:-1]:
+ #skip '<', '>' markers
+ reduced = reduce_tree(child, new_node)
+ if reduced is not None:
+ new_node.children.append(reduced)
+ if new_node:
+ new_node.parent = parent
+ return new_node
+
+
+def get_characteristic_subpattern(subpatterns):
+ """Picks the most characteristic from a list of linear patterns
+ Current order used is:
+ names > common_names > common_chars
+ """
+ if not isinstance(subpatterns, list):
+ return subpatterns
+ if len(subpatterns)==1:
+ return subpatterns[0]
+
+ # first pick out the ones containing variable names
+ subpatterns_with_names = []
+ subpatterns_with_common_names = []
+ common_names = ['in', 'for', 'if' , 'not', 'None']
+ subpatterns_with_common_chars = []
+ common_chars = "[]().,:"
+ for subpattern in subpatterns:
+ if any(rec_test(subpattern, lambda x: type(x) is str)):
+ if any(rec_test(subpattern,
+ lambda x: isinstance(x, str) and x in common_chars)):
+ subpatterns_with_common_chars.append(subpattern)
+ elif any(rec_test(subpattern,
+ lambda x: isinstance(x, str) and x in common_names)):
+ subpatterns_with_common_names.append(subpattern)
+
+ else:
+ subpatterns_with_names.append(subpattern)
+
+ if subpatterns_with_names:
+ subpatterns = subpatterns_with_names
+ elif subpatterns_with_common_names:
+ subpatterns = subpatterns_with_common_names
+ elif subpatterns_with_common_chars:
+ subpatterns = subpatterns_with_common_chars
+ # of the remaining subpatterns pick out the longest one
+ return max(subpatterns, key=len)
+
+def rec_test(sequence, test_func):
+ """Tests test_func on all items of sequence and items of included
+ sub-iterables"""
+ for x in sequence:
+ if isinstance(x, (list, tuple)):
+ for y in rec_test(x, test_func):
+ yield y
+ else:
+ yield test_func(x)
diff --git a/lib/python2.7/lib2to3/fixer_base.py b/lib/python2.7/lib2to3/fixer_base.py
new file mode 100644
index 0000000..d437b96
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixer_base.py
@@ -0,0 +1,189 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Base class for fixers (optional, but recommended)."""
+
+# Python imports
+import logging
+import itertools
+
+# Local imports
+from .patcomp import PatternCompiler
+from . import pygram
+from .fixer_util import does_tree_import
+
+class BaseFix(object):
+
+ """Optional base class for fixers.
+
+ The subclass name must be FixFooBar where FooBar is the result of
+ removing underscores and capitalizing the words of the fix name.
+ For example, the class name for a fixer named 'has_key' should be
+ FixHasKey.
+ """
+
+ PATTERN = None # Most subclasses should override with a string literal
+ pattern = None # Compiled pattern, set by compile_pattern()
+ pattern_tree = None # Tree representation of the pattern
+ options = None # Options object passed to initializer
+ filename = None # The filename (set by set_filename)
+ logger = None # A logger (set by set_filename)
+ numbers = itertools.count(1) # For new_name()
+ used_names = set() # A set of all used NAMEs
+ order = "post" # Does the fixer prefer pre- or post-order traversal
+ explicit = False # Is this ignored by refactor.py -f all?
+ run_order = 5 # Fixers will be sorted by run order before execution
+ # Lower numbers will be run first.
+ _accept_type = None # [Advanced and not public] This tells RefactoringTool
+ # which node type to accept when there's not a pattern.
+
+ keep_line_order = False # For the bottom matcher: match with the
+ # original line order
+ BM_compatible = False # Compatibility with the bottom matching
+ # module; every fixer should set this
+ # manually
+
+ # Shortcut for access to Python grammar symbols
+ syms = pygram.python_symbols
+
+ def __init__(self, options, log):
+ """Initializer. Subclass may override.
+
+ Args:
+ options: a dict containing the options passed to RefactoringTool
+ that could be used to customize the fixer through the command line.
+ log: a list to append warnings and other messages to.
+ """
+ self.options = options
+ self.log = log
+ self.compile_pattern()
+
+ def compile_pattern(self):
+ """Compiles self.PATTERN into self.pattern.
+
+ Subclass may override if it doesn't want to use
+ self.{pattern,PATTERN} in .match().
+ """
+ if self.PATTERN is not None:
+ PC = PatternCompiler()
+ self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN,
+ with_tree=True)
+
+ def set_filename(self, filename):
+ """Set the filename, and a logger derived from it.
+
+ The main refactoring tool should call this.
+ """
+ self.filename = filename
+ self.logger = logging.getLogger(filename)
+
+ def match(self, node):
+ """Returns match for a given parse tree node.
+
+ Should return a true or false object (not necessarily a bool).
+ It may return a non-empty dict of matching sub-nodes as
+ returned by a matching pattern.
+
+ Subclass may override.
+ """
+ results = {"node": node}
+ return self.pattern.match(node, results) and results
+
+ def transform(self, node, results):
+ """Returns the transformation for a given parse tree node.
+
+ Args:
+ node: the root of the parse tree that matched the fixer.
+ results: a dict mapping symbolic names to part of the match.
+
+ Returns:
+ None, or a node that is a modified copy of the
+ argument node. The node argument may also be modified in-place to
+ effect the same change.
+
+ Subclass *must* override.
+ """
+ raise NotImplementedError()
+
+ def new_name(self, template=u"xxx_todo_changeme"):
+ """Return a string suitable for use as an identifier
+
+ The new name is guaranteed not to conflict with other identifiers.
+ """
+ name = template
+ while name in self.used_names:
+ name = template + unicode(self.numbers.next())
+ self.used_names.add(name)
+ return name
+
+ def log_message(self, message):
+ if self.first_log:
+ self.first_log = False
+ self.log.append("### In file %s ###" % self.filename)
+ self.log.append(message)
+
+ def cannot_convert(self, node, reason=None):
+ """Warn the user that a given chunk of code is not valid Python 3,
+ but that it cannot be converted automatically.
+
+ First argument is the top-level node for the code in question.
+ Optional second argument is why it can't be converted.
+ """
+ lineno = node.get_lineno()
+ for_output = node.clone()
+ for_output.prefix = u""
+ msg = "Line %d: could not convert: %s"
+ self.log_message(msg % (lineno, for_output))
+ if reason:
+ self.log_message(reason)
+
+ def warning(self, node, reason):
+ """Used for warning the user about possible uncertainty in the
+ translation.
+
+ First argument is the top-level node for the code in question.
+ Optional second argument is why it can't be converted.
+ """
+ lineno = node.get_lineno()
+ self.log_message("Line %d: %s" % (lineno, reason))
+
+ def start_tree(self, tree, filename):
+ """Some fixers need to maintain tree-wide state.
+ This method is called once, at the start of tree fix-up.
+
+ tree - the root node of the tree to be processed.
+ filename - the name of the file the tree came from.
+ """
+ self.used_names = tree.used_names
+ self.set_filename(filename)
+ self.numbers = itertools.count(1)
+ self.first_log = True
+
+ def finish_tree(self, tree, filename):
+ """Some fixers need to maintain tree-wide state.
+ This method is called once, at the conclusion of tree fix-up.
+
+ tree - the root node of the tree to be processed.
+ filename - the name of the file the tree came from.
+ """
+ pass
+
+
+class ConditionalFix(BaseFix):
+ """ Base class for fixers which not execute if an import is found. """
+
+ # This is the name of the import which, if found, will cause the test to be skipped
+ skip_on = None
+
+ def start_tree(self, *args):
+ super(ConditionalFix, self).start_tree(*args)
+ self._should_skip = None
+
+ def should_skip(self, node):
+ if self._should_skip is not None:
+ return self._should_skip
+ pkg = self.skip_on.split(".")
+ name = pkg[-1]
+ pkg = ".".join(pkg[:-1])
+ self._should_skip = does_tree_import(pkg, name, node)
+ return self._should_skip
diff --git a/lib/python2.7/lib2to3/fixer_util.py b/lib/python2.7/lib2to3/fixer_util.py
new file mode 100644
index 0000000..78fdf26
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixer_util.py
@@ -0,0 +1,432 @@
+"""Utility functions, node construction macros, etc."""
+# Author: Collin Winter
+
+from itertools import islice
+
+# Local imports
+from .pgen2 import token
+from .pytree import Leaf, Node
+from .pygram import python_symbols as syms
+from . import patcomp
+
+
+###########################################################
+### Common node-construction "macros"
+###########################################################
+
+def KeywordArg(keyword, value):
+ return Node(syms.argument,
+ [keyword, Leaf(token.EQUAL, u"="), value])
+
+def LParen():
+ return Leaf(token.LPAR, u"(")
+
+def RParen():
+ return Leaf(token.RPAR, u")")
+
+def Assign(target, source):
+ """Build an assignment statement"""
+ if not isinstance(target, list):
+ target = [target]
+ if not isinstance(source, list):
+ source.prefix = u" "
+ source = [source]
+
+ return Node(syms.atom,
+ target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source)
+
+def Name(name, prefix=None):
+ """Return a NAME leaf"""
+ return Leaf(token.NAME, name, prefix=prefix)
+
+def Attr(obj, attr):
+ """A node tuple for obj.attr"""
+ return [obj, Node(syms.trailer, [Dot(), attr])]
+
+def Comma():
+ """A comma leaf"""
+ return Leaf(token.COMMA, u",")
+
+def Dot():
+ """A period (.) leaf"""
+ return Leaf(token.DOT, u".")
+
+def ArgList(args, lparen=LParen(), rparen=RParen()):
+ """A parenthesised argument list, used by Call()"""
+ node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
+ if args:
+ node.insert_child(1, Node(syms.arglist, args))
+ return node
+
+def Call(func_name, args=None, prefix=None):
+ """A function call"""
+ node = Node(syms.power, [func_name, ArgList(args)])
+ if prefix is not None:
+ node.prefix = prefix
+ return node
+
+def Newline():
+ """A newline literal"""
+ return Leaf(token.NEWLINE, u"\n")
+
+def BlankLine():
+ """A blank line"""
+ return Leaf(token.NEWLINE, u"")
+
+def Number(n, prefix=None):
+ return Leaf(token.NUMBER, n, prefix=prefix)
+
+def Subscript(index_node):
+ """A numeric or string subscript"""
+ return Node(syms.trailer, [Leaf(token.LBRACE, u"["),
+ index_node,
+ Leaf(token.RBRACE, u"]")])
+
+def String(string, prefix=None):
+ """A string leaf"""
+ return Leaf(token.STRING, string, prefix=prefix)
+
+def ListComp(xp, fp, it, test=None):
+ """A list comprehension of the form [xp for fp in it if test].
+
+ If test is None, the "if test" part is omitted.
+ """
+ xp.prefix = u""
+ fp.prefix = u" "
+ it.prefix = u" "
+ for_leaf = Leaf(token.NAME, u"for")
+ for_leaf.prefix = u" "
+ in_leaf = Leaf(token.NAME, u"in")
+ in_leaf.prefix = u" "
+ inner_args = [for_leaf, fp, in_leaf, it]
+ if test:
+ test.prefix = u" "
+ if_leaf = Leaf(token.NAME, u"if")
+ if_leaf.prefix = u" "
+ inner_args.append(Node(syms.comp_if, [if_leaf, test]))
+ inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
+ return Node(syms.atom,
+ [Leaf(token.LBRACE, u"["),
+ inner,
+ Leaf(token.RBRACE, u"]")])
+
+def FromImport(package_name, name_leafs):
+ """ Return an import statement in the form:
+ from package import name_leafs"""
+ # XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
+ #assert package_name == '.' or '.' not in package_name, "FromImport has "\
+ # "not been tested with dotted package names -- use at your own "\
+ # "peril!"
+
+ for leaf in name_leafs:
+ # Pull the leaves out of their old tree
+ leaf.remove()
+
+ children = [Leaf(token.NAME, u"from"),
+ Leaf(token.NAME, package_name, prefix=u" "),
+ Leaf(token.NAME, u"import", prefix=u" "),
+ Node(syms.import_as_names, name_leafs)]
+ imp = Node(syms.import_from, children)
+ return imp
+
+
+###########################################################
+### Determine whether a node represents a given literal
+###########################################################
+
+def is_tuple(node):
+ """Does the node represent a tuple literal?"""
+ if isinstance(node, Node) and node.children == [LParen(), RParen()]:
+ return True
+ return (isinstance(node, Node)
+ and len(node.children) == 3
+ and isinstance(node.children[0], Leaf)
+ and isinstance(node.children[1], Node)
+ and isinstance(node.children[2], Leaf)
+ and node.children[0].value == u"("
+ and node.children[2].value == u")")
+
+def is_list(node):
+ """Does the node represent a list literal?"""
+ return (isinstance(node, Node)
+ and len(node.children) > 1
+ and isinstance(node.children[0], Leaf)
+ and isinstance(node.children[-1], Leaf)
+ and node.children[0].value == u"["
+ and node.children[-1].value == u"]")
+
+
+###########################################################
+### Misc
+###########################################################
+
+def parenthesize(node):
+ return Node(syms.atom, [LParen(), node, RParen()])
+
+
+consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
+ "min", "max", "enumerate"])
+
+def attr_chain(obj, attr):
+ """Follow an attribute chain.
+
+ If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
+ use this to iterate over all objects in the chain. Iteration is
+ terminated by getattr(x, attr) is None.
+
+ Args:
+ obj: the starting object
+ attr: the name of the chaining attribute
+
+ Yields:
+ Each successive object in the chain.
+ """
+ next = getattr(obj, attr)
+ while next:
+ yield next
+ next = getattr(next, attr)
+
+p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
+ | comp_for< 'for' any 'in' node=any any* >
+ """
+p1 = """
+power<
+ ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
+ 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) )
+ trailer< '(' node=any ')' >
+ any*
+>
+"""
+p2 = """
+power<
+ ( 'sorted' | 'enumerate' )
+ trailer< '(' arglist<node=any any*> ')' >
+ any*
+>
+"""
+pats_built = False
+def in_special_context(node):
+ """ Returns true if node is in an environment where all that is required
+ of it is being iterable (ie, it doesn't matter if it returns a list
+ or an iterator).
+ See test_map_nochange in test_fixers.py for some examples and tests.
+ """
+ global p0, p1, p2, pats_built
+ if not pats_built:
+ p0 = patcomp.compile_pattern(p0)
+ p1 = patcomp.compile_pattern(p1)
+ p2 = patcomp.compile_pattern(p2)
+ pats_built = True
+ patterns = [p0, p1, p2]
+ for pattern, parent in zip(patterns, attr_chain(node, "parent")):
+ results = {}
+ if pattern.match(parent, results) and results["node"] is node:
+ return True
+ return False
+
+def is_probably_builtin(node):
+ """
+ Check that something isn't an attribute or function name etc.
+ """
+ prev = node.prev_sibling
+ if prev is not None and prev.type == token.DOT:
+ # Attribute lookup.
+ return False
+ parent = node.parent
+ if parent.type in (syms.funcdef, syms.classdef):
+ return False
+ if parent.type == syms.expr_stmt and parent.children[0] is node:
+ # Assignment.
+ return False
+ if parent.type == syms.parameters or \
+ (parent.type == syms.typedargslist and (
+ (prev is not None and prev.type == token.COMMA) or
+ parent.children[0] is node
+ )):
+ # The name of an argument.
+ return False
+ return True
+
+def find_indentation(node):
+ """Find the indentation of *node*."""
+ while node is not None:
+ if node.type == syms.suite and len(node.children) > 2:
+ indent = node.children[1]
+ if indent.type == token.INDENT:
+ return indent.value
+ node = node.parent
+ return u""
+
+###########################################################
+### The following functions are to find bindings in a suite
+###########################################################
+
+def make_suite(node):
+ if node.type == syms.suite:
+ return node
+ node = node.clone()
+ parent, node.parent = node.parent, None
+ suite = Node(syms.suite, [node])
+ suite.parent = parent
+ return suite
+
+def find_root(node):
+ """Find the top level namespace."""
+ # Scamper up to the top level namespace
+ while node.type != syms.file_input:
+ node = node.parent
+ if not node:
+ raise ValueError("root found before file_input node was found.")
+ return node
+
+def does_tree_import(package, name, node):
+ """ Returns true if name is imported from package at the
+ top level of the tree which node belongs to.
+ To cover the case of an import like 'import foo', use
+ None for the package and 'foo' for the name. """
+ binding = find_binding(name, find_root(node), package)
+ return bool(binding)
+
+def is_import(node):
+ """Returns true if the node is an import statement."""
+ return node.type in (syms.import_name, syms.import_from)
+
+def touch_import(package, name, node):
+ """ Works like `does_tree_import` but adds an import statement
+ if it was not imported. """
+ def is_import_stmt(node):
+ return (node.type == syms.simple_stmt and node.children and
+ is_import(node.children[0]))
+
+ root = find_root(node)
+
+ if does_tree_import(package, name, root):
+ return
+
+ # figure out where to insert the new import. First try to find
+ # the first import and then skip to the last one.
+ insert_pos = offset = 0
+ for idx, node in enumerate(root.children):
+ if not is_import_stmt(node):
+ continue
+ for offset, node2 in enumerate(root.children[idx:]):
+ if not is_import_stmt(node2):
+ break
+ insert_pos = idx + offset
+ break
+
+ # if there are no imports where we can insert, find the docstring.
+ # if that also fails, we stick to the beginning of the file
+ if insert_pos == 0:
+ for idx, node in enumerate(root.children):
+ if (node.type == syms.simple_stmt and node.children and
+ node.children[0].type == token.STRING):
+ insert_pos = idx + 1
+ break
+
+ if package is None:
+ import_ = Node(syms.import_name, [
+ Leaf(token.NAME, u"import"),
+ Leaf(token.NAME, name, prefix=u" ")
+ ])
+ else:
+ import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u" ")])
+
+ children = [import_, Newline()]
+ root.insert_child(insert_pos, Node(syms.simple_stmt, children))
+
+
+_def_syms = set([syms.classdef, syms.funcdef])
+def find_binding(name, node, package=None):
+ """ Returns the node which binds variable name, otherwise None.
+ If optional argument package is supplied, only imports will
+ be returned.
+ See test cases for examples."""
+ for child in node.children:
+ ret = None
+ if child.type == syms.for_stmt:
+ if _find(name, child.children[1]):
+ return child
+ n = find_binding(name, make_suite(child.children[-1]), package)
+ if n: ret = n
+ elif child.type in (syms.if_stmt, syms.while_stmt):
+ n = find_binding(name, make_suite(child.children[-1]), package)
+ if n: ret = n
+ elif child.type == syms.try_stmt:
+ n = find_binding(name, make_suite(child.children[2]), package)
+ if n:
+ ret = n
+ else:
+ for i, kid in enumerate(child.children[3:]):
+ if kid.type == token.COLON and kid.value == ":":
+ # i+3 is the colon, i+4 is the suite
+ n = find_binding(name, make_suite(child.children[i+4]), package)
+ if n: ret = n
+ elif child.type in _def_syms and child.children[1].value == name:
+ ret = child
+ elif _is_import_binding(child, name, package):
+ ret = child
+ elif child.type == syms.simple_stmt:
+ ret = find_binding(name, child, package)
+ elif child.type == syms.expr_stmt:
+ if _find(name, child.children[0]):
+ ret = child
+
+ if ret:
+ if not package:
+ return ret
+ if is_import(ret):
+ return ret
+ return None
+
+_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
+def _find(name, node):
+ nodes = [node]
+ while nodes:
+ node = nodes.pop()
+ if node.type > 256 and node.type not in _block_syms:
+ nodes.extend(node.children)
+ elif node.type == token.NAME and node.value == name:
+ return node
+ return None
+
+def _is_import_binding(node, name, package=None):
+ """ Will reuturn node if node will import name, or node
+ will import * from package. None is returned otherwise.
+ See test cases for examples. """
+
+ if node.type == syms.import_name and not package:
+ imp = node.children[1]
+ if imp.type == syms.dotted_as_names:
+ for child in imp.children:
+ if child.type == syms.dotted_as_name:
+ if child.children[2].value == name:
+ return node
+ elif child.type == token.NAME and child.value == name:
+ return node
+ elif imp.type == syms.dotted_as_name:
+ last = imp.children[-1]
+ if last.type == token.NAME and last.value == name:
+ return node
+ elif imp.type == token.NAME and imp.value == name:
+ return node
+ elif node.type == syms.import_from:
+ # unicode(...) is used to make life easier here, because
+ # from a.b import parses to ['import', ['a', '.', 'b'], ...]
+ if package and unicode(node.children[1]).strip() != package:
+ return None
+ n = node.children[3]
+ if package and _find(u"as", n):
+ # See test_from_import_as for explanation
+ return None
+ elif n.type == syms.import_as_names and _find(name, n):
+ return node
+ elif n.type == syms.import_as_name:
+ child = n.children[2]
+ if child.type == token.NAME and child.value == name:
+ return node
+ elif n.type == token.NAME and n.value == name:
+ return node
+ elif package and n.type == token.STAR:
+ return node
+ return None
diff --git a/lib/python2.7/lib2to3/fixes/__init__.py b/lib/python2.7/lib2to3/fixes/__init__.py
new file mode 100644
index 0000000..b93054b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/__init__.py
@@ -0,0 +1 @@
+# Dummy file to make this directory a package.
diff --git a/lib/python2.7/lib2to3/fixes/fix_apply.py b/lib/python2.7/lib2to3/fixes/fix_apply.py
new file mode 100644
index 0000000..1a465c2
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_apply.py
@@ -0,0 +1,70 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for apply().
+
+This converts apply(func, v, k) into (func)(*v, **k)."""
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Call, Comma, parenthesize
+
+class FixApply(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ power< 'apply'
+ trailer<
+ '('
+ arglist<
+ (not argument<NAME '=' any>) func=any ','
+ (not argument<NAME '=' any>) args=any [','
+ (not argument<NAME '=' any>) kwds=any] [',']
+ >
+ ')'
+ >
+ >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+ assert results
+ func = results["func"]
+ args = results["args"]
+ kwds = results.get("kwds")
+ # I feel like we should be able to express this logic in the
+ # PATTERN above but I don't know how to do it so...
+ if args:
+ if args.type == self.syms.star_expr:
+ return # Make no change.
+ if (args.type == self.syms.argument and
+ args.children[0].value == '**'):
+ return # Make no change.
+ if kwds and (kwds.type == self.syms.argument and
+ kwds.children[0].value == '**'):
+ return # Make no change.
+ prefix = node.prefix
+ func = func.clone()
+ if (func.type not in (token.NAME, syms.atom) and
+ (func.type != syms.power or
+ func.children[-2].type == token.DOUBLESTAR)):
+ # Need to parenthesize
+ func = parenthesize(func)
+ func.prefix = ""
+ args = args.clone()
+ args.prefix = ""
+ if kwds is not None:
+ kwds = kwds.clone()
+ kwds.prefix = ""
+ l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
+ if kwds is not None:
+ l_newargs.extend([Comma(),
+ pytree.Leaf(token.DOUBLESTAR, u"**"),
+ kwds])
+ l_newargs[-2].prefix = u" " # that's the ** token
+ # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
+ # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
+ #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
+ return Call(func, l_newargs, prefix=prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_asserts.py b/lib/python2.7/lib2to3/fixes/fix_asserts.py
new file mode 100644
index 0000000..5bcec88
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_asserts.py
@@ -0,0 +1,34 @@
+"""Fixer that replaces deprecated unittest method names."""
+
+# Author: Ezio Melotti
+
+from ..fixer_base import BaseFix
+from ..fixer_util import Name
+
+NAMES = dict(
+ assert_="assertTrue",
+ assertEquals="assertEqual",
+ assertNotEquals="assertNotEqual",
+ assertAlmostEquals="assertAlmostEqual",
+ assertNotAlmostEquals="assertNotAlmostEqual",
+ assertRegexpMatches="assertRegex",
+ assertRaisesRegexp="assertRaisesRegex",
+ failUnlessEqual="assertEqual",
+ failIfEqual="assertNotEqual",
+ failUnlessAlmostEqual="assertAlmostEqual",
+ failIfAlmostEqual="assertNotAlmostEqual",
+ failUnless="assertTrue",
+ failUnlessRaises="assertRaises",
+ failIf="assertFalse",
+)
+
+
+class FixAsserts(BaseFix):
+
+ PATTERN = """
+ power< any+ trailer< '.' meth=(%s)> any* >
+ """ % '|'.join(map(repr, NAMES))
+
+ def transform(self, node, results):
+ name = results["meth"][0]
+ name.replace(Name(NAMES[str(name)], prefix=name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_basestring.py b/lib/python2.7/lib2to3/fixes/fix_basestring.py
new file mode 100644
index 0000000..a3c9a43
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_basestring.py
@@ -0,0 +1,14 @@
+"""Fixer for basestring -> str."""
+# Author: Christian Heimes
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+class FixBasestring(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = "'basestring'"
+
+ def transform(self, node, results):
+ return Name(u"str", prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_buffer.py b/lib/python2.7/lib2to3/fixes/fix_buffer.py
new file mode 100644
index 0000000..c6b0928
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_buffer.py
@@ -0,0 +1,22 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes buffer(...) into memoryview(...)."""
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+
+class FixBuffer(fixer_base.BaseFix):
+ BM_compatible = True
+
+ explicit = True # The user must ask for this fixer
+
+ PATTERN = """
+ power< name='buffer' trailer< '(' [any] ')' > any* >
+ """
+
+ def transform(self, node, results):
+ name = results["name"]
+ name.replace(Name(u"memoryview", prefix=name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_dict.py b/lib/python2.7/lib2to3/fixes/fix_dict.py
new file mode 100644
index 0000000..f681e4d
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_dict.py
@@ -0,0 +1,107 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for dict methods.
+
+d.keys() -> list(d.keys())
+d.items() -> list(d.items())
+d.values() -> list(d.values())
+
+d.iterkeys() -> iter(d.keys())
+d.iteritems() -> iter(d.items())
+d.itervalues() -> iter(d.values())
+
+d.viewkeys() -> d.keys()
+d.viewitems() -> d.items()
+d.viewvalues() -> d.values()
+
+Except in certain very specific contexts: the iter() can be dropped
+when the context is list(), sorted(), iter() or for...in; the list()
+can be dropped when the context is list() or sorted() (but not iter()
+or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
+set(), any(), all(), sum().
+
+Note: iter(d.keys()) could be written as iter(d) but since the
+original d.iterkeys() was also redundant we don't fix this. And there
+are (rare) contexts where it makes a difference (e.g. when passing it
+as an argument to a function that introspects the argument).
+"""
+
+# Local imports
+from .. import pytree
+from .. import patcomp
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
+from .. import fixer_util
+
+
+iter_exempt = fixer_util.consuming_calls | set(["iter"])
+
+
+class FixDict(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ power< head=any+
+ trailer< '.' method=('keys'|'items'|'values'|
+ 'iterkeys'|'iteritems'|'itervalues'|
+ 'viewkeys'|'viewitems'|'viewvalues') >
+ parens=trailer< '(' ')' >
+ tail=any*
+ >
+ """
+
+ def transform(self, node, results):
+ head = results["head"]
+ method = results["method"][0] # Extract node for method name
+ tail = results["tail"]
+ syms = self.syms
+ method_name = method.value
+ isiter = method_name.startswith(u"iter")
+ isview = method_name.startswith(u"view")
+ if isiter or isview:
+ method_name = method_name[4:]
+ assert method_name in (u"keys", u"items", u"values"), repr(method)
+ head = [n.clone() for n in head]
+ tail = [n.clone() for n in tail]
+ special = not tail and self.in_special_context(node, isiter)
+ args = head + [pytree.Node(syms.trailer,
+ [Dot(),
+ Name(method_name,
+ prefix=method.prefix)]),
+ results["parens"].clone()]
+ new = pytree.Node(syms.power, args)
+ if not (special or isview):
+ new.prefix = u""
+ new = Call(Name(u"iter" if isiter else u"list"), [new])
+ if tail:
+ new = pytree.Node(syms.power, [new] + tail)
+ new.prefix = node.prefix
+ return new
+
+ P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+ p1 = patcomp.compile_pattern(P1)
+
+ P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+ | comp_for< 'for' any 'in' node=any any* >
+ """
+ p2 = patcomp.compile_pattern(P2)
+
+ def in_special_context(self, node, isiter):
+ if node.parent is None:
+ return False
+ results = {}
+ if (node.parent.parent is not None and
+ self.p1.match(node.parent.parent, results) and
+ results["node"] is node):
+ if isiter:
+ # iter(d.iterkeys()) -> iter(d.keys()), etc.
+ return results["func"].value in iter_exempt
+ else:
+ # list(d.keys()) -> list(d.keys()), etc.
+ return results["func"].value in fixer_util.consuming_calls
+ if not isiter:
+ return False
+ # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+ return self.p2.match(node.parent, results) and results["node"] is node
diff --git a/lib/python2.7/lib2to3/fixes/fix_except.py b/lib/python2.7/lib2to3/fixes/fix_except.py
new file mode 100644
index 0000000..e324718
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_except.py
@@ -0,0 +1,93 @@
+"""Fixer for except statements with named exceptions.
+
+The following cases will be converted:
+
+- "except E, T:" where T is a name:
+
+ except E as T:
+
+- "except E, T:" where T is not a name, tuple or list:
+
+ except E as t:
+ T = t
+
+ This is done because the target of an "except" clause must be a
+ name.
+
+- "except E, T:" where T is a tuple or list literal:
+
+ except E as t:
+ T = t.args
+"""
+# Author: Collin Winter
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
+
+def find_excepts(nodes):
+ for i, n in enumerate(nodes):
+ if n.type == syms.except_clause:
+ if n.children[0].value == u'except':
+ yield (n, nodes[i+2])
+
+class FixExcept(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ try_stmt< 'try' ':' (simple_stmt | suite)
+ cleanup=(except_clause ':' (simple_stmt | suite))+
+ tail=(['except' ':' (simple_stmt | suite)]
+ ['else' ':' (simple_stmt | suite)]
+ ['finally' ':' (simple_stmt | suite)]) >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+
+ tail = [n.clone() for n in results["tail"]]
+
+ try_cleanup = [ch.clone() for ch in results["cleanup"]]
+ for except_clause, e_suite in find_excepts(try_cleanup):
+ if len(except_clause.children) == 4:
+ (E, comma, N) = except_clause.children[1:4]
+ comma.replace(Name(u"as", prefix=u" "))
+
+ if N.type != token.NAME:
+ # Generate a new N for the except clause
+ new_N = Name(self.new_name(), prefix=u" ")
+ target = N.clone()
+ target.prefix = u""
+ N.replace(new_N)
+ new_N = new_N.clone()
+
+ # Insert "old_N = new_N" as the first statement in
+ # the except body. This loop skips leading whitespace
+ # and indents
+ #TODO(cwinter) suite-cleanup
+ suite_stmts = e_suite.children
+ for i, stmt in enumerate(suite_stmts):
+ if isinstance(stmt, pytree.Node):
+ break
+
+ # The assignment is different if old_N is a tuple or list
+ # In that case, the assignment is old_N = new_N.args
+ if is_tuple(N) or is_list(N):
+ assign = Assign(target, Attr(new_N, Name(u'args')))
+ else:
+ assign = Assign(target, new_N)
+
+ #TODO(cwinter) stopgap until children becomes a smart list
+ for child in reversed(suite_stmts[:i]):
+ e_suite.insert_child(0, child)
+ e_suite.insert_child(i, assign)
+ elif N.prefix == u"":
+ # No space after a comma is legal; no space after "as",
+ # not so much.
+ N.prefix = u" "
+
+ #TODO(cwinter) fix this when children becomes a smart list
+ children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
+ return pytree.Node(node.type, children)
diff --git a/lib/python2.7/lib2to3/fixes/fix_exec.py b/lib/python2.7/lib2to3/fixes/fix_exec.py
new file mode 100644
index 0000000..50e1854
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_exec.py
@@ -0,0 +1,40 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for exec.
+
+This converts usages of the exec statement into calls to a built-in
+exec() function.
+
+exec code in ns1, ns2 -> exec(code, ns1, ns2)
+"""
+
+# Local imports
+from .. import pytree
+from .. import fixer_base
+from ..fixer_util import Comma, Name, Call
+
+
+class FixExec(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
+ |
+ exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
+ """
+
+ def transform(self, node, results):
+ assert results
+ syms = self.syms
+ a = results["a"]
+ b = results.get("b")
+ c = results.get("c")
+ args = [a.clone()]
+ args[0].prefix = ""
+ if b is not None:
+ args.extend([Comma(), b.clone()])
+ if c is not None:
+ args.extend([Comma(), c.clone()])
+
+ return Call(Name(u"exec"), args, prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_execfile.py b/lib/python2.7/lib2to3/fixes/fix_execfile.py
new file mode 100644
index 0000000..2f29d3b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_execfile.py
@@ -0,0 +1,52 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for execfile.
+
+This converts usages of the execfile function into calls to the built-in
+exec() function.
+"""
+
+from .. import fixer_base
+from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
+ ArgList, String, syms)
+
+
+class FixExecfile(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
+ |
+ power< 'execfile' trailer< '(' filename=any ')' > >
+ """
+
+ def transform(self, node, results):
+ assert results
+ filename = results["filename"]
+ globals = results.get("globals")
+ locals = results.get("locals")
+
+ # Copy over the prefix from the right parentheses end of the execfile
+ # call.
+ execfile_paren = node.children[-1].children[-1].clone()
+ # Construct open().read().
+ open_args = ArgList([filename.clone()], rparen=execfile_paren)
+ open_call = Node(syms.power, [Name(u"open"), open_args])
+ read = [Node(syms.trailer, [Dot(), Name(u'read')]),
+ Node(syms.trailer, [LParen(), RParen()])]
+ open_expr = [open_call] + read
+ # Wrap the open call in a compile call. This is so the filename will be
+ # preserved in the execed code.
+ filename_arg = filename.clone()
+ filename_arg.prefix = u" "
+ exec_str = String(u"'exec'", u" ")
+ compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
+ compile_call = Call(Name(u"compile"), compile_args, u"")
+ # Finally, replace the execfile call with an exec call.
+ args = [compile_call]
+ if globals is not None:
+ args.extend([Comma(), globals.clone()])
+ if locals is not None:
+ args.extend([Comma(), locals.clone()])
+ return Call(Name(u"exec"), args, prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_exitfunc.py b/lib/python2.7/lib2to3/fixes/fix_exitfunc.py
new file mode 100644
index 0000000..3f3fbbf
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_exitfunc.py
@@ -0,0 +1,72 @@
+"""
+Convert use of sys.exitfunc to use the atexit module.
+"""
+
+# Author: Benjamin Peterson
+
+from lib2to3 import pytree, fixer_base
+from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms
+
+
+class FixExitfunc(fixer_base.BaseFix):
+ keep_line_order = True
+ BM_compatible = True
+
+ PATTERN = """
+ (
+ sys_import=import_name<'import'
+ ('sys'
+ |
+ dotted_as_names< (any ',')* 'sys' (',' any)* >
+ )
+ >
+ |
+ expr_stmt<
+ power< 'sys' trailer< '.' 'exitfunc' > >
+ '=' func=any >
+ )
+ """
+
+ def __init__(self, *args):
+ super(FixExitfunc, self).__init__(*args)
+
+ def start_tree(self, tree, filename):
+ super(FixExitfunc, self).start_tree(tree, filename)
+ self.sys_import = None
+
+ def transform(self, node, results):
+ # First, find the sys import. We'll just hope it's global scope.
+ if "sys_import" in results:
+ if self.sys_import is None:
+ self.sys_import = results["sys_import"]
+ return
+
+ func = results["func"].clone()
+ func.prefix = u""
+ register = pytree.Node(syms.power,
+ Attr(Name(u"atexit"), Name(u"register"))
+ )
+ call = Call(register, [func], node.prefix)
+ node.replace(call)
+
+ if self.sys_import is None:
+ # That's interesting.
+ self.warning(node, "Can't find sys import; Please add an atexit "
+ "import at the top of your file.")
+ return
+
+ # Now add an atexit import after the sys import.
+ names = self.sys_import.children[1]
+ if names.type == syms.dotted_as_names:
+ names.append_child(Comma())
+ names.append_child(Name(u"atexit", u" "))
+ else:
+ containing_stmt = self.sys_import.parent
+ position = containing_stmt.children.index(self.sys_import)
+ stmt_container = containing_stmt.parent
+ new_import = pytree.Node(syms.import_name,
+ [Name(u"import"), Name(u"atexit", u" ")]
+ )
+ new = pytree.Node(syms.simple_stmt, [new_import])
+ containing_stmt.insert_child(position + 1, Newline())
+ containing_stmt.insert_child(position + 2, new)
diff --git a/lib/python2.7/lib2to3/fixes/fix_filter.py b/lib/python2.7/lib2to3/fixes/fix_filter.py
new file mode 100644
index 0000000..18ee2ff
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_filter.py
@@ -0,0 +1,76 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes filter(F, X) into list(filter(F, X)).
+
+We avoid the transformation if the filter() call is directly contained
+in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
+for V in <>:.
+
+NOTE: This is still not correct if the original code was depending on
+filter(F, X) to return a string if X is a string and a tuple if X is a
+tuple. That would require type inference, which we don't do. Let
+Python 2.6 figure it out.
+"""
+
+# Local imports
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, ListComp, in_special_context
+
+class FixFilter(fixer_base.ConditionalFix):
+ BM_compatible = True
+
+ PATTERN = """
+ filter_lambda=power<
+ 'filter'
+ trailer<
+ '('
+ arglist<
+ lambdef< 'lambda'
+ (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+ >
+ ','
+ it=any
+ >
+ ')'
+ >
+ >
+ |
+ power<
+ 'filter'
+ trailer< '(' arglist< none='None' ',' seq=any > ')' >
+ >
+ |
+ power<
+ 'filter'
+ args=trailer< '(' [any] ')' >
+ >
+ """
+
+ skip_on = "future_builtins.filter"
+
+ def transform(self, node, results):
+ if self.should_skip(node):
+ return
+
+ if "filter_lambda" in results:
+ new = ListComp(results.get("fp").clone(),
+ results.get("fp").clone(),
+ results.get("it").clone(),
+ results.get("xp").clone())
+
+ elif "none" in results:
+ new = ListComp(Name(u"_f"),
+ Name(u"_f"),
+ results["seq"].clone(),
+ Name(u"_f"))
+
+ else:
+ if in_special_context(node):
+ return None
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
+ new.prefix = node.prefix
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_funcattrs.py b/lib/python2.7/lib2to3/fixes/fix_funcattrs.py
new file mode 100644
index 0000000..9e45c02
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_funcattrs.py
@@ -0,0 +1,21 @@
+"""Fix function attribute names (f.func_x -> f.__x__)."""
+# Author: Collin Winter
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+
+class FixFuncattrs(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
+ | 'func_name' | 'func_defaults' | 'func_code'
+ | 'func_dict') > any* >
+ """
+
+ def transform(self, node, results):
+ attr = results["attr"][0]
+ attr.replace(Name((u"__%s__" % attr.value[5:]),
+ prefix=attr.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_future.py b/lib/python2.7/lib2to3/fixes/fix_future.py
new file mode 100644
index 0000000..fbcb86a
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_future.py
@@ -0,0 +1,22 @@
+"""Remove __future__ imports
+
+from __future__ import foo is replaced with an empty line.
+"""
+# Author: Christian Heimes
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import BlankLine
+
+class FixFuture(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
+
+ # This should be run last -- some things check for the import
+ run_order = 10
+
+ def transform(self, node, results):
+ new = BlankLine()
+ new.prefix = node.prefix
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_getcwdu.py b/lib/python2.7/lib2to3/fixes/fix_getcwdu.py
new file mode 100644
index 0000000..82233c8
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_getcwdu.py
@@ -0,0 +1,19 @@
+"""
+Fixer that changes os.getcwdu() to os.getcwd().
+"""
+# Author: Victor Stinner
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+class FixGetcwdu(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ power< 'os' trailer< dot='.' name='getcwdu' > any* >
+ """
+
+ def transform(self, node, results):
+ name = results["name"]
+ name.replace(Name(u"getcwd", prefix=name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_has_key.py b/lib/python2.7/lib2to3/fixes/fix_has_key.py
new file mode 100644
index 0000000..bead4cb
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_has_key.py
@@ -0,0 +1,110 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for has_key().
+
+Calls to .has_key() methods are expressed in terms of the 'in'
+operator:
+
+ d.has_key(k) -> k in d
+
+CAVEATS:
+1) While the primary target of this fixer is dict.has_key(), the
+ fixer will change any has_key() method call, regardless of its
+ class.
+
+2) Cases like this will not be converted:
+
+ m = d.has_key
+ if m(k):
+ ...
+
+ Only *calls* to has_key() are converted. While it is possible to
+ convert the above to something like
+
+ m = d.__contains__
+ if m(k):
+ ...
+
+ this is currently not done.
+"""
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, parenthesize
+
+
+class FixHasKey(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ anchor=power<
+ before=any+
+ trailer< '.' 'has_key' >
+ trailer<
+ '('
+ ( not(arglist | argument<any '=' any>) arg=any
+ | arglist<(not argument<any '=' any>) arg=any ','>
+ )
+ ')'
+ >
+ after=any*
+ >
+ |
+ negation=not_test<
+ 'not'
+ anchor=power<
+ before=any+
+ trailer< '.' 'has_key' >
+ trailer<
+ '('
+ ( not(arglist | argument<any '=' any>) arg=any
+ | arglist<(not argument<any '=' any>) arg=any ','>
+ )
+ ')'
+ >
+ >
+ >
+ """
+
+ def transform(self, node, results):
+ assert results
+ syms = self.syms
+ if (node.parent.type == syms.not_test and
+ self.pattern.match(node.parent)):
+ # Don't transform a node matching the first alternative of the
+ # pattern when its parent matches the second alternative
+ return None
+ negation = results.get("negation")
+ anchor = results["anchor"]
+ prefix = node.prefix
+ before = [n.clone() for n in results["before"]]
+ arg = results["arg"].clone()
+ after = results.get("after")
+ if after:
+ after = [n.clone() for n in after]
+ if arg.type in (syms.comparison, syms.not_test, syms.and_test,
+ syms.or_test, syms.test, syms.lambdef, syms.argument):
+ arg = parenthesize(arg)
+ if len(before) == 1:
+ before = before[0]
+ else:
+ before = pytree.Node(syms.power, before)
+ before.prefix = u" "
+ n_op = Name(u"in", prefix=u" ")
+ if negation:
+ n_not = Name(u"not", prefix=u" ")
+ n_op = pytree.Node(syms.comp_op, (n_not, n_op))
+ new = pytree.Node(syms.comparison, (arg, n_op, before))
+ if after:
+ new = parenthesize(new)
+ new = pytree.Node(syms.power, (new,) + tuple(after))
+ if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
+ syms.and_expr, syms.shift_expr,
+ syms.arith_expr, syms.term,
+ syms.factor, syms.power):
+ new = parenthesize(new)
+ new.prefix = prefix
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_idioms.py b/lib/python2.7/lib2to3/fixes/fix_idioms.py
new file mode 100644
index 0000000..37b6eef
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_idioms.py
@@ -0,0 +1,152 @@
+"""Adjust some old Python 2 idioms to their modern counterparts.
+
+* Change some type comparisons to isinstance() calls:
+ type(x) == T -> isinstance(x, T)
+ type(x) is T -> isinstance(x, T)
+ type(x) != T -> not isinstance(x, T)
+ type(x) is not T -> not isinstance(x, T)
+
+* Change "while 1:" into "while True:".
+
+* Change both
+
+ v = list(EXPR)
+ v.sort()
+ foo(v)
+
+and the more general
+
+ v = EXPR
+ v.sort()
+ foo(v)
+
+into
+
+ v = sorted(EXPR)
+ foo(v)
+"""
+# Author: Jacques Frechet, Collin Winter
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms
+
+CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
+TYPE = "power< 'type' trailer< '(' x=any ')' > >"
+
+class FixIdioms(fixer_base.BaseFix):
+ explicit = True # The user must ask for this fixer
+
+ PATTERN = r"""
+ isinstance=comparison< %s %s T=any >
+ |
+ isinstance=comparison< T=any %s %s >
+ |
+ while_stmt< 'while' while='1' ':' any+ >
+ |
+ sorted=any<
+ any*
+ simple_stmt<
+ expr_stmt< id1=any '='
+ power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
+ >
+ '\n'
+ >
+ sort=
+ simple_stmt<
+ power< id2=any
+ trailer< '.' 'sort' > trailer< '(' ')' >
+ >
+ '\n'
+ >
+ next=any*
+ >
+ |
+ sorted=any<
+ any*
+ simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
+ sort=
+ simple_stmt<
+ power< id2=any
+ trailer< '.' 'sort' > trailer< '(' ')' >
+ >
+ '\n'
+ >
+ next=any*
+ >
+ """ % (TYPE, CMP, CMP, TYPE)
+
+ def match(self, node):
+ r = super(FixIdioms, self).match(node)
+ # If we've matched one of the sort/sorted subpatterns above, we
+ # want to reject matches where the initial assignment and the
+ # subsequent .sort() call involve different identifiers.
+ if r and "sorted" in r:
+ if r["id1"] == r["id2"]:
+ return r
+ return None
+ return r
+
+ def transform(self, node, results):
+ if "isinstance" in results:
+ return self.transform_isinstance(node, results)
+ elif "while" in results:
+ return self.transform_while(node, results)
+ elif "sorted" in results:
+ return self.transform_sort(node, results)
+ else:
+ raise RuntimeError("Invalid match")
+
+ def transform_isinstance(self, node, results):
+ x = results["x"].clone() # The thing inside of type()
+ T = results["T"].clone() # The type being compared against
+ x.prefix = u""
+ T.prefix = u" "
+ test = Call(Name(u"isinstance"), [x, Comma(), T])
+ if "n" in results:
+ test.prefix = u" "
+ test = Node(syms.not_test, [Name(u"not"), test])
+ test.prefix = node.prefix
+ return test
+
+ def transform_while(self, node, results):
+ one = results["while"]
+ one.replace(Name(u"True", prefix=one.prefix))
+
+ def transform_sort(self, node, results):
+ sort_stmt = results["sort"]
+ next_stmt = results["next"]
+ list_call = results.get("list")
+ simple_expr = results.get("expr")
+
+ if list_call:
+ list_call.replace(Name(u"sorted", prefix=list_call.prefix))
+ elif simple_expr:
+ new = simple_expr.clone()
+ new.prefix = u""
+ simple_expr.replace(Call(Name(u"sorted"), [new],
+ prefix=simple_expr.prefix))
+ else:
+ raise RuntimeError("should not have reached here")
+ sort_stmt.remove()
+
+ btwn = sort_stmt.prefix
+ # Keep any prefix lines between the sort_stmt and the list_call and
+ # shove them right after the sorted() call.
+ if u"\n" in btwn:
+ if next_stmt:
+ # The new prefix should be everything from the sort_stmt's
+ # prefix up to the last newline, then the old prefix after a new
+ # line.
+ prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix)
+ next_stmt[0].prefix = u"\n".join(prefix_lines)
+ else:
+ assert list_call.parent
+ assert list_call.next_sibling is None
+ # Put a blank line after list_call and set its prefix.
+ end_line = BlankLine()
+ list_call.parent.append_child(end_line)
+ assert list_call.next_sibling is end_line
+ # The new prefix should be everything up to the first new line
+ # of sort_stmt's prefix.
+ end_line.prefix = btwn.rpartition(u"\n")[0]
diff --git a/lib/python2.7/lib2to3/fixes/fix_import.py b/lib/python2.7/lib2to3/fixes/fix_import.py
new file mode 100644
index 0000000..88e9d10
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_import.py
@@ -0,0 +1,99 @@
+"""Fixer for import statements.
+If spam is being imported from the local directory, this import:
+ from spam import eggs
+Becomes:
+ from .spam import eggs
+
+And this import:
+ import spam
+Becomes:
+ from . import spam
+"""
+
+# Local imports
+from .. import fixer_base
+from os.path import dirname, join, exists, sep
+from ..fixer_util import FromImport, syms, token
+
+
+def traverse_imports(names):
+ """
+ Walks over all the names imported in a dotted_as_names node.
+ """
+ pending = [names]
+ while pending:
+ node = pending.pop()
+ if node.type == token.NAME:
+ yield node.value
+ elif node.type == syms.dotted_name:
+ yield "".join([ch.value for ch in node.children])
+ elif node.type == syms.dotted_as_name:
+ pending.append(node.children[0])
+ elif node.type == syms.dotted_as_names:
+ pending.extend(node.children[::-2])
+ else:
+ raise AssertionError("unknown node type")
+
+
+class FixImport(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ import_from< 'from' imp=any 'import' ['('] any [')'] >
+ |
+ import_name< 'import' imp=any >
+ """
+
+ def start_tree(self, tree, name):
+ super(FixImport, self).start_tree(tree, name)
+ self.skip = "absolute_import" in tree.future_features
+
+ def transform(self, node, results):
+ if self.skip:
+ return
+ imp = results['imp']
+
+ if node.type == syms.import_from:
+ # Some imps are top-level (eg: 'import ham')
+ # some are first level (eg: 'import ham.eggs')
+ # some are third level (eg: 'import ham.eggs as spam')
+ # Hence, the loop
+ while not hasattr(imp, 'value'):
+ imp = imp.children[0]
+ if self.probably_a_local_import(imp.value):
+ imp.value = u"." + imp.value
+ imp.changed()
+ else:
+ have_local = False
+ have_absolute = False
+ for mod_name in traverse_imports(imp):
+ if self.probably_a_local_import(mod_name):
+ have_local = True
+ else:
+ have_absolute = True
+ if have_absolute:
+ if have_local:
+ # We won't handle both sibling and absolute imports in the
+ # same statement at the moment.
+ self.warning(node, "absolute and local imports together")
+ return
+
+ new = FromImport(u".", [imp])
+ new.prefix = node.prefix
+ return new
+
+ def probably_a_local_import(self, imp_name):
+ if imp_name.startswith(u"."):
+ # Relative imports are certainly not local imports.
+ return False
+ imp_name = imp_name.split(u".", 1)[0]
+ base_path = dirname(self.filename)
+ base_path = join(base_path, imp_name)
+ # If there is no __init__.py next to the file its not in a package
+ # so can't be a relative import.
+ if not exists(join(dirname(base_path), "__init__.py")):
+ return False
+ for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]:
+ if exists(base_path + ext):
+ return True
+ return False
diff --git a/lib/python2.7/lib2to3/fixes/fix_imports.py b/lib/python2.7/lib2to3/fixes/fix_imports.py
new file mode 100644
index 0000000..93c9e67
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_imports.py
@@ -0,0 +1,145 @@
+"""Fix incompatible imports and module references."""
+# Authors: Collin Winter, Nick Edds
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name, attr_chain
+
+MAPPING = {'StringIO': 'io',
+ 'cStringIO': 'io',
+ 'cPickle': 'pickle',
+ '__builtin__' : 'builtins',
+ 'copy_reg': 'copyreg',
+ 'Queue': 'queue',
+ 'SocketServer': 'socketserver',
+ 'ConfigParser': 'configparser',
+ 'repr': 'reprlib',
+ 'FileDialog': 'tkinter.filedialog',
+ 'tkFileDialog': 'tkinter.filedialog',
+ 'SimpleDialog': 'tkinter.simpledialog',
+ 'tkSimpleDialog': 'tkinter.simpledialog',
+ 'tkColorChooser': 'tkinter.colorchooser',
+ 'tkCommonDialog': 'tkinter.commondialog',
+ 'Dialog': 'tkinter.dialog',
+ 'Tkdnd': 'tkinter.dnd',
+ 'tkFont': 'tkinter.font',
+ 'tkMessageBox': 'tkinter.messagebox',
+ 'ScrolledText': 'tkinter.scrolledtext',
+ 'Tkconstants': 'tkinter.constants',
+ 'Tix': 'tkinter.tix',
+ 'ttk': 'tkinter.ttk',
+ 'Tkinter': 'tkinter',
+ 'markupbase': '_markupbase',
+ '_winreg': 'winreg',
+ 'thread': '_thread',
+ 'dummy_thread': '_dummy_thread',
+ # anydbm and whichdb are handled by fix_imports2
+ 'dbhash': 'dbm.bsd',
+ 'dumbdbm': 'dbm.dumb',
+ 'dbm': 'dbm.ndbm',
+ 'gdbm': 'dbm.gnu',
+ 'xmlrpclib': 'xmlrpc.client',
+ 'DocXMLRPCServer': 'xmlrpc.server',
+ 'SimpleXMLRPCServer': 'xmlrpc.server',
+ 'httplib': 'http.client',
+ 'htmlentitydefs' : 'html.entities',
+ 'HTMLParser' : 'html.parser',
+ 'Cookie': 'http.cookies',
+ 'cookielib': 'http.cookiejar',
+ 'BaseHTTPServer': 'http.server',
+ 'SimpleHTTPServer': 'http.server',
+ 'CGIHTTPServer': 'http.server',
+ #'test.test_support': 'test.support',
+ 'commands': 'subprocess',
+ 'UserString' : 'collections',
+ 'UserList' : 'collections',
+ 'urlparse' : 'urllib.parse',
+ 'robotparser' : 'urllib.robotparser',
+}
+
+
+def alternates(members):
+ return "(" + "|".join(map(repr, members)) + ")"
+
+
+def build_pattern(mapping=MAPPING):
+ mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
+ bare_names = alternates(mapping.keys())
+
+ yield """name_import=import_name< 'import' ((%s) |
+ multiple_imports=dotted_as_names< any* (%s) any* >) >
+ """ % (mod_list, mod_list)
+ yield """import_from< 'from' (%s) 'import' ['(']
+ ( any | import_as_name< any 'as' any > |
+ import_as_names< any* >) [')'] >
+ """ % mod_list
+ yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
+ multiple_imports=dotted_as_names<
+ any* dotted_as_name< (%s) 'as' any > any* >) >
+ """ % (mod_list, mod_list)
+
+ # Find usages of module members in code e.g. thread.foo(bar)
+ yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
+
+
+class FixImports(fixer_base.BaseFix):
+
+ BM_compatible = True
+ keep_line_order = True
+ # This is overridden in fix_imports2.
+ mapping = MAPPING
+
+ # We want to run this fixer late, so fix_import doesn't try to make stdlib
+ # renames into relative imports.
+ run_order = 6
+
+ def build_pattern(self):
+ return "|".join(build_pattern(self.mapping))
+
+ def compile_pattern(self):
+ # We override this, so MAPPING can be pragmatically altered and the
+ # changes will be reflected in PATTERN.
+ self.PATTERN = self.build_pattern()
+ super(FixImports, self).compile_pattern()
+
+ # Don't match the node if it's within another match.
+ def match(self, node):
+ match = super(FixImports, self).match
+ results = match(node)
+ if results:
+ # Module usage could be in the trailer of an attribute lookup, so we
+ # might have nested matches when "bare_with_attr" is present.
+ if "bare_with_attr" not in results and \
+ any(match(obj) for obj in attr_chain(node, "parent")):
+ return False
+ return results
+ return False
+
+ def start_tree(self, tree, filename):
+ super(FixImports, self).start_tree(tree, filename)
+ self.replace = {}
+
+ def transform(self, node, results):
+ import_mod = results.get("module_name")
+ if import_mod:
+ mod_name = import_mod.value
+ new_name = unicode(self.mapping[mod_name])
+ import_mod.replace(Name(new_name, prefix=import_mod.prefix))
+ if "name_import" in results:
+ # If it's not a "from x import x, y" or "import x as y" import,
+ # marked its usage to be replaced.
+ self.replace[mod_name] = new_name
+ if "multiple_imports" in results:
+ # This is a nasty hack to fix multiple imports on a line (e.g.,
+ # "import StringIO, urlparse"). The problem is that I can't
+ # figure out an easy way to make a pattern recognize the keys of
+ # MAPPING randomly sprinkled in an import statement.
+ results = self.match(node)
+ if results:
+ self.transform(node, results)
+ else:
+ # Replace usage of the module.
+ bare_name = results["bare_with_attr"][0]
+ new_name = self.replace.get(bare_name.value)
+ if new_name:
+ bare_name.replace(Name(new_name, prefix=bare_name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_imports2.py b/lib/python2.7/lib2to3/fixes/fix_imports2.py
new file mode 100644
index 0000000..9a33c67
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_imports2.py
@@ -0,0 +1,16 @@
+"""Fix incompatible imports and module references that must be fixed after
+fix_imports."""
+from . import fix_imports
+
+
+MAPPING = {
+ 'whichdb': 'dbm',
+ 'anydbm': 'dbm',
+ }
+
+
+class FixImports2(fix_imports.FixImports):
+
+ run_order = 7
+
+ mapping = MAPPING
diff --git a/lib/python2.7/lib2to3/fixes/fix_input.py b/lib/python2.7/lib2to3/fixes/fix_input.py
new file mode 100644
index 0000000..728636b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_input.py
@@ -0,0 +1,26 @@
+"""Fixer that changes input(...) into eval(input(...))."""
+# Author: Andre Roberge
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Call, Name
+from .. import patcomp
+
+
+context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
+
+
+class FixInput(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< 'input' args=trailer< '(' [any] ')' > >
+ """
+
+ def transform(self, node, results):
+ # If we're already wrapped in an eval() call, we're done.
+ if context.match(node.parent.parent):
+ return
+
+ new = node.clone()
+ new.prefix = u""
+ return Call(Name(u"eval"), [new], prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_intern.py b/lib/python2.7/lib2to3/fixes/fix_intern.py
new file mode 100644
index 0000000..285c126
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_intern.py
@@ -0,0 +1,56 @@
+# Copyright 2006 Georg Brandl.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for intern().
+
+intern(s) -> sys.intern(s)"""
+
+# Local imports
+from .. import pytree
+from .. import fixer_base
+from ..fixer_util import Name, Attr, touch_import
+
+
+class FixIntern(fixer_base.BaseFix):
+ BM_compatible = True
+ order = "pre"
+
+ PATTERN = """
+ power< 'intern'
+ trailer< lpar='('
+ ( not(arglist | argument<any '=' any>) obj=any
+ | obj=arglist<(not argument<any '=' any>) any ','> )
+ rpar=')' >
+ after=any*
+ >
+ """
+
+ def transform(self, node, results):
+ if results:
+ # I feel like we should be able to express this logic in the
+ # PATTERN above but I don't know how to do it so...
+ obj = results['obj']
+ if obj:
+ if obj.type == self.syms.star_expr:
+ return # Make no change.
+ if (obj.type == self.syms.argument and
+ obj.children[0].value == '**'):
+ return # Make no change.
+ syms = self.syms
+ obj = results["obj"].clone()
+ if obj.type == syms.arglist:
+ newarglist = obj.clone()
+ else:
+ newarglist = pytree.Node(syms.arglist, [obj.clone()])
+ after = results["after"]
+ if after:
+ after = [n.clone() for n in after]
+ new = pytree.Node(syms.power,
+ Attr(Name(u"sys"), Name(u"intern")) +
+ [pytree.Node(syms.trailer,
+ [results["lpar"].clone(),
+ newarglist,
+ results["rpar"].clone()])] + after)
+ new.prefix = node.prefix
+ touch_import(None, u'sys', node)
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_isinstance.py b/lib/python2.7/lib2to3/fixes/fix_isinstance.py
new file mode 100644
index 0000000..4b04c8f
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_isinstance.py
@@ -0,0 +1,52 @@
+# Copyright 2008 Armin Ronacher.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that cleans up a tuple argument to isinstance after the tokens
+in it were fixed. This is mainly used to remove double occurrences of
+tokens as a leftover of the long -> int / unicode -> str conversion.
+
+eg. isinstance(x, (int, long)) -> isinstance(x, (int, int))
+ -> isinstance(x, int)
+"""
+
+from .. import fixer_base
+from ..fixer_util import token
+
+
+class FixIsinstance(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power<
+ 'isinstance'
+ trailer< '(' arglist< any ',' atom< '('
+ args=testlist_gexp< any+ >
+ ')' > > ')' >
+ >
+ """
+
+ run_order = 6
+
+ def transform(self, node, results):
+ names_inserted = set()
+ testlist = results["args"]
+ args = testlist.children
+ new_args = []
+ iterator = enumerate(args)
+ for idx, arg in iterator:
+ if arg.type == token.NAME and arg.value in names_inserted:
+ if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
+ iterator.next()
+ continue
+ else:
+ new_args.append(arg)
+ if arg.type == token.NAME:
+ names_inserted.add(arg.value)
+ if new_args and new_args[-1].type == token.COMMA:
+ del new_args[-1]
+ if len(new_args) == 1:
+ atom = testlist.parent
+ new_args[0].prefix = atom.prefix
+ atom.replace(new_args[0])
+ else:
+ args[:] = new_args
+ node.changed()
diff --git a/lib/python2.7/lib2to3/fixes/fix_itertools.py b/lib/python2.7/lib2to3/fixes/fix_itertools.py
new file mode 100644
index 0000000..067641b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_itertools.py
@@ -0,0 +1,43 @@
+""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
+ itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
+
+ imports from itertools are fixed in fix_itertools_import.py
+
+ If itertools is imported as something else (ie: import itertools as it;
+ it.izip(spam, eggs)) method calls will not get fixed.
+ """
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+class FixItertools(fixer_base.BaseFix):
+ BM_compatible = True
+ it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')"
+ PATTERN = """
+ power< it='itertools'
+ trailer<
+ dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
+ |
+ power< func=%(it_funcs)s trailer< '(' [any] ')' > >
+ """ %(locals())
+
+ # Needs to be run after fix_(map|zip|filter)
+ run_order = 6
+
+ def transform(self, node, results):
+ prefix = None
+ func = results['func'][0]
+ if ('it' in results and
+ func.value not in (u'ifilterfalse', u'izip_longest')):
+ dot, it = (results['dot'], results['it'])
+ # Remove the 'itertools'
+ prefix = it.prefix
+ it.remove()
+ # Replace the node which contains ('.', 'function') with the
+ # function (to be consistent with the second part of the pattern)
+ dot.remove()
+ func.parent.replace(func)
+
+ prefix = prefix or func.prefix
+ func.replace(Name(func.value[1:], prefix=prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_itertools_imports.py b/lib/python2.7/lib2to3/fixes/fix_itertools_imports.py
new file mode 100644
index 0000000..28610cf
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_itertools_imports.py
@@ -0,0 +1,57 @@
+""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
+
+# Local imports
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import BlankLine, syms, token
+
+
+class FixItertoolsImports(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ import_from< 'from' 'itertools' 'import' imports=any >
+ """ %(locals())
+
+ def transform(self, node, results):
+ imports = results['imports']
+ if imports.type == syms.import_as_name or not imports.children:
+ children = [imports]
+ else:
+ children = imports.children
+ for child in children[::2]:
+ if child.type == token.NAME:
+ member = child.value
+ name_node = child
+ elif child.type == token.STAR:
+ # Just leave the import as is.
+ return
+ else:
+ assert child.type == syms.import_as_name
+ name_node = child.children[0]
+ member_name = name_node.value
+ if member_name in (u'imap', u'izip', u'ifilter'):
+ child.value = None
+ child.remove()
+ elif member_name in (u'ifilterfalse', u'izip_longest'):
+ node.changed()
+ name_node.value = (u'filterfalse' if member_name[1] == u'f'
+ else u'zip_longest')
+
+ # Make sure the import statement is still sane
+ children = imports.children[:] or [imports]
+ remove_comma = True
+ for child in children:
+ if remove_comma and child.type == token.COMMA:
+ child.remove()
+ else:
+ remove_comma ^= True
+
+ while children and children[-1].type == token.COMMA:
+ children.pop().remove()
+
+ # If there are no imports left, just get rid of the entire statement
+ if (not (imports.children or getattr(imports, 'value', None)) or
+ imports.parent is None):
+ p = node.prefix
+ node = BlankLine()
+ node.prefix = p
+ return node
diff --git a/lib/python2.7/lib2to3/fixes/fix_long.py b/lib/python2.7/lib2to3/fixes/fix_long.py
new file mode 100644
index 0000000..5dddde0
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_long.py
@@ -0,0 +1,19 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that turns 'long' into 'int' everywhere.
+"""
+
+# Local imports
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import is_probably_builtin
+
+
+class FixLong(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "'long'"
+
+ def transform(self, node, results):
+ if is_probably_builtin(node):
+ node.value = u"int"
+ node.changed()
diff --git a/lib/python2.7/lib2to3/fixes/fix_map.py b/lib/python2.7/lib2to3/fixes/fix_map.py
new file mode 100644
index 0000000..7a7d0db
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_map.py
@@ -0,0 +1,91 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
+exists a 'from future_builtins import map' statement in the top-level
+namespace.
+
+As a special case, map(None, X) is changed into list(X). (This is
+necessary because the semantics are changed in this case -- the new
+map(None, X) is equivalent to [(x,) for x in X].)
+
+We avoid the transformation (except for the special case mentioned
+above) if the map() call is directly contained in iter(<>), list(<>),
+tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+
+NOTE: This is still not correct if the original code was depending on
+map(F, X, Y, ...) to go on until the longest argument is exhausted,
+substituting None for missing values -- like zip(), it now stops as
+soon as the shortest argument is exhausted.
+"""
+
+# Local imports
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, ListComp, in_special_context
+from ..pygram import python_symbols as syms
+
+class FixMap(fixer_base.ConditionalFix):
+ BM_compatible = True
+
+ PATTERN = """
+ map_none=power<
+ 'map'
+ trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
+ >
+ |
+ map_lambda=power<
+ 'map'
+ trailer<
+ '('
+ arglist<
+ lambdef< 'lambda'
+ (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+ >
+ ','
+ it=any
+ >
+ ')'
+ >
+ >
+ |
+ power<
+ 'map' trailer< '(' [arglist=any] ')' >
+ >
+ """
+
+ skip_on = 'future_builtins.map'
+
+ def transform(self, node, results):
+ if self.should_skip(node):
+ return
+
+ if node.parent.type == syms.simple_stmt:
+ self.warning(node, "You should use a for loop here")
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
+ elif "map_lambda" in results:
+ new = ListComp(results["xp"].clone(),
+ results["fp"].clone(),
+ results["it"].clone())
+ else:
+ if "map_none" in results:
+ new = results["arg"].clone()
+ else:
+ if "arglist" in results:
+ args = results["arglist"]
+ if args.type == syms.arglist and \
+ args.children[0].type == token.NAME and \
+ args.children[0].value == "None":
+ self.warning(node, "cannot convert map(None, ...) "
+ "with multiple arguments because map() "
+ "now truncates to the shortest sequence")
+ return
+ if in_special_context(node):
+ return None
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
+ new.prefix = node.prefix
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_metaclass.py b/lib/python2.7/lib2to3/fixes/fix_metaclass.py
new file mode 100644
index 0000000..45f9937
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_metaclass.py
@@ -0,0 +1,228 @@
+"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
+
+ The various forms of classef (inherits nothing, inherits once, inherints
+ many) don't parse the same in the CST so we look at ALL classes for
+ a __metaclass__ and if we find one normalize the inherits to all be
+ an arglist.
+
+ For one-liner classes ('class X: pass') there is no indent/dedent so
+ we normalize those into having a suite.
+
+ Moving the __metaclass__ into the classdef can also cause the class
+ body to be empty so there is some special casing for that as well.
+
+ This fixer also tries very hard to keep original indenting and spacing
+ in all those corner cases.
+
+"""
+# Author: Jack Diederich
+
+# Local imports
+from .. import fixer_base
+from ..pygram import token
+from ..fixer_util import Name, syms, Node, Leaf
+
+
+def has_metaclass(parent):
+ """ we have to check the cls_node without changing it.
+ There are two possibilities:
+ 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
+ 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
+ """
+ for node in parent.children:
+ if node.type == syms.suite:
+ return has_metaclass(node)
+ elif node.type == syms.simple_stmt and node.children:
+ expr_node = node.children[0]
+ if expr_node.type == syms.expr_stmt and expr_node.children:
+ left_side = expr_node.children[0]
+ if isinstance(left_side, Leaf) and \
+ left_side.value == '__metaclass__':
+ return True
+ return False
+
+
+def fixup_parse_tree(cls_node):
+ """ one-line classes don't get a suite in the parse tree so we add
+ one to normalize the tree
+ """
+ for node in cls_node.children:
+ if node.type == syms.suite:
+ # already in the preferred format, do nothing
+ return
+
+ # !%@#! oneliners have no suite node, we have to fake one up
+ for i, node in enumerate(cls_node.children):
+ if node.type == token.COLON:
+ break
+ else:
+ raise ValueError("No class suite and no ':'!")
+
+ # move everything into a suite node
+ suite = Node(syms.suite, [])
+ while cls_node.children[i+1:]:
+ move_node = cls_node.children[i+1]
+ suite.append_child(move_node.clone())
+ move_node.remove()
+ cls_node.append_child(suite)
+ node = suite
+
+
+def fixup_simple_stmt(parent, i, stmt_node):
+ """ if there is a semi-colon all the parts count as part of the same
+ simple_stmt. We just want the __metaclass__ part so we move
+ everything after the semi-colon into its own simple_stmt node
+ """
+ for semi_ind, node in enumerate(stmt_node.children):
+ if node.type == token.SEMI: # *sigh*
+ break
+ else:
+ return
+
+ node.remove() # kill the semicolon
+ new_expr = Node(syms.expr_stmt, [])
+ new_stmt = Node(syms.simple_stmt, [new_expr])
+ while stmt_node.children[semi_ind:]:
+ move_node = stmt_node.children[semi_ind]
+ new_expr.append_child(move_node.clone())
+ move_node.remove()
+ parent.insert_child(i, new_stmt)
+ new_leaf1 = new_stmt.children[0].children[0]
+ old_leaf1 = stmt_node.children[0].children[0]
+ new_leaf1.prefix = old_leaf1.prefix
+
+
+def remove_trailing_newline(node):
+ if node.children and node.children[-1].type == token.NEWLINE:
+ node.children[-1].remove()
+
+
+def find_metas(cls_node):
+ # find the suite node (Mmm, sweet nodes)
+ for node in cls_node.children:
+ if node.type == syms.suite:
+ break
+ else:
+ raise ValueError("No class suite!")
+
+ # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
+ for i, simple_node in list(enumerate(node.children)):
+ if simple_node.type == syms.simple_stmt and simple_node.children:
+ expr_node = simple_node.children[0]
+ if expr_node.type == syms.expr_stmt and expr_node.children:
+ # Check if the expr_node is a simple assignment.
+ left_node = expr_node.children[0]
+ if isinstance(left_node, Leaf) and \
+ left_node.value == u'__metaclass__':
+ # We found an assignment to __metaclass__.
+ fixup_simple_stmt(node, i, simple_node)
+ remove_trailing_newline(simple_node)
+ yield (node, i, simple_node)
+
+
+def fixup_indent(suite):
+ """ If an INDENT is followed by a thing with a prefix then nuke the prefix
+ Otherwise we get in trouble when removing __metaclass__ at suite start
+ """
+ kids = suite.children[::-1]
+ # find the first indent
+ while kids:
+ node = kids.pop()
+ if node.type == token.INDENT:
+ break
+
+ # find the first Leaf
+ while kids:
+ node = kids.pop()
+ if isinstance(node, Leaf) and node.type != token.DEDENT:
+ if node.prefix:
+ node.prefix = u''
+ return
+ else:
+ kids.extend(node.children[::-1])
+
+
+class FixMetaclass(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ classdef<any*>
+ """
+
+ def transform(self, node, results):
+ if not has_metaclass(node):
+ return
+
+ fixup_parse_tree(node)
+
+ # find metaclasses, keep the last one
+ last_metaclass = None
+ for suite, i, stmt in find_metas(node):
+ last_metaclass = stmt
+ stmt.remove()
+
+ text_type = node.children[0].type # always Leaf(nnn, 'class')
+
+ # figure out what kind of classdef we have
+ if len(node.children) == 7:
+ # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
+ # 0 1 2 3 4 5 6
+ if node.children[3].type == syms.arglist:
+ arglist = node.children[3]
+ # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
+ else:
+ parent = node.children[3].clone()
+ arglist = Node(syms.arglist, [parent])
+ node.set_child(3, arglist)
+ elif len(node.children) == 6:
+ # Node(classdef, ['class', 'name', '(', ')', ':', suite])
+ # 0 1 2 3 4 5
+ arglist = Node(syms.arglist, [])
+ node.insert_child(3, arglist)
+ elif len(node.children) == 4:
+ # Node(classdef, ['class', 'name', ':', suite])
+ # 0 1 2 3
+ arglist = Node(syms.arglist, [])
+ node.insert_child(2, Leaf(token.RPAR, u')'))
+ node.insert_child(2, arglist)
+ node.insert_child(2, Leaf(token.LPAR, u'('))
+ else:
+ raise ValueError("Unexpected class definition")
+
+ # now stick the metaclass in the arglist
+ meta_txt = last_metaclass.children[0].children[0]
+ meta_txt.value = 'metaclass'
+ orig_meta_prefix = meta_txt.prefix
+
+ if arglist.children:
+ arglist.append_child(Leaf(token.COMMA, u','))
+ meta_txt.prefix = u' '
+ else:
+ meta_txt.prefix = u''
+
+ # compact the expression "metaclass = Meta" -> "metaclass=Meta"
+ expr_stmt = last_metaclass.children[0]
+ assert expr_stmt.type == syms.expr_stmt
+ expr_stmt.children[1].prefix = u''
+ expr_stmt.children[2].prefix = u''
+
+ arglist.append_child(last_metaclass)
+
+ fixup_indent(suite)
+
+ # check for empty suite
+ if not suite.children:
+ # one-liner that was just __metaclass_
+ suite.remove()
+ pass_leaf = Leaf(text_type, u'pass')
+ pass_leaf.prefix = orig_meta_prefix
+ node.append_child(pass_leaf)
+ node.append_child(Leaf(token.NEWLINE, u'\n'))
+
+ elif len(suite.children) > 1 and \
+ (suite.children[-2].type == token.INDENT and
+ suite.children[-1].type == token.DEDENT):
+ # there was only one line in the class body and it was __metaclass__
+ pass_leaf = Leaf(text_type, u'pass')
+ suite.insert_child(-1, pass_leaf)
+ suite.insert_child(-1, Leaf(token.NEWLINE, u'\n'))
diff --git a/lib/python2.7/lib2to3/fixes/fix_methodattrs.py b/lib/python2.7/lib2to3/fixes/fix_methodattrs.py
new file mode 100644
index 0000000..f3c1ecf
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_methodattrs.py
@@ -0,0 +1,24 @@
+"""Fix bound method attributes (method.im_? -> method.__?__).
+"""
+# Author: Christian Heimes
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+MAP = {
+ "im_func" : "__func__",
+ "im_self" : "__self__",
+ "im_class" : "__self__.__class__"
+ }
+
+class FixMethodattrs(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
+ """
+
+ def transform(self, node, results):
+ attr = results["attr"][0]
+ new = unicode(MAP[attr.value])
+ attr.replace(Name(new, prefix=attr.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_ne.py b/lib/python2.7/lib2to3/fixes/fix_ne.py
new file mode 100644
index 0000000..7025980
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_ne.py
@@ -0,0 +1,23 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that turns <> into !=."""
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+
+
+class FixNe(fixer_base.BaseFix):
+ # This is so simple that we don't need the pattern compiler.
+
+ _accept_type = token.NOTEQUAL
+
+ def match(self, node):
+ # Override
+ return node.value == u"<>"
+
+ def transform(self, node, results):
+ new = pytree.Leaf(token.NOTEQUAL, u"!=", prefix=node.prefix)
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_next.py b/lib/python2.7/lib2to3/fixes/fix_next.py
new file mode 100644
index 0000000..f021a9b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_next.py
@@ -0,0 +1,103 @@
+"""Fixer for it.next() -> next(it), per PEP 3114."""
+# Author: Collin Winter
+
+# Things that currently aren't covered:
+# - listcomp "next" names aren't warned
+# - "with" statement targets aren't checked
+
+# Local imports
+from ..pgen2 import token
+from ..pygram import python_symbols as syms
+from .. import fixer_base
+from ..fixer_util import Name, Call, find_binding
+
+bind_warning = "Calls to builtin next() possibly shadowed by global binding"
+
+
+class FixNext(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
+ |
+ power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
+ |
+ classdef< 'class' any+ ':'
+ suite< any*
+ funcdef< 'def'
+ name='next'
+ parameters< '(' NAME ')' > any+ >
+ any* > >
+ |
+ global=global_stmt< 'global' any* 'next' any* >
+ """
+
+ order = "pre" # Pre-order tree traversal
+
+ def start_tree(self, tree, filename):
+ super(FixNext, self).start_tree(tree, filename)
+
+ n = find_binding(u'next', tree)
+ if n:
+ self.warning(n, bind_warning)
+ self.shadowed_next = True
+ else:
+ self.shadowed_next = False
+
+ def transform(self, node, results):
+ assert results
+
+ base = results.get("base")
+ attr = results.get("attr")
+ name = results.get("name")
+
+ if base:
+ if self.shadowed_next:
+ attr.replace(Name(u"__next__", prefix=attr.prefix))
+ else:
+ base = [n.clone() for n in base]
+ base[0].prefix = u""
+ node.replace(Call(Name(u"next", prefix=node.prefix), base))
+ elif name:
+ n = Name(u"__next__", prefix=name.prefix)
+ name.replace(n)
+ elif attr:
+ # We don't do this transformation if we're assigning to "x.next".
+ # Unfortunately, it doesn't seem possible to do this in PATTERN,
+ # so it's being done here.
+ if is_assign_target(node):
+ head = results["head"]
+ if "".join([str(n) for n in head]).strip() == u'__builtin__':
+ self.warning(node, bind_warning)
+ return
+ attr.replace(Name(u"__next__"))
+ elif "global" in results:
+ self.warning(node, bind_warning)
+ self.shadowed_next = True
+
+
+### The following functions help test if node is part of an assignment
+### target.
+
+def is_assign_target(node):
+ assign = find_assign(node)
+ if assign is None:
+ return False
+
+ for child in assign.children:
+ if child.type == token.EQUAL:
+ return False
+ elif is_subtree(child, node):
+ return True
+ return False
+
+def find_assign(node):
+ if node.type == syms.expr_stmt:
+ return node
+ if node.type == syms.simple_stmt or node.parent is None:
+ return None
+ return find_assign(node.parent)
+
+def is_subtree(root, node):
+ if root == node:
+ return True
+ return any(is_subtree(c, node) for c in root.children)
diff --git a/lib/python2.7/lib2to3/fixes/fix_nonzero.py b/lib/python2.7/lib2to3/fixes/fix_nonzero.py
new file mode 100644
index 0000000..ba83478
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_nonzero.py
@@ -0,0 +1,21 @@
+"""Fixer for __nonzero__ -> __bool__ methods."""
+# Author: Collin Winter
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name, syms
+
+class FixNonzero(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ classdef< 'class' any+ ':'
+ suite< any*
+ funcdef< 'def' name='__nonzero__'
+ parameters< '(' NAME ')' > any+ >
+ any* > >
+ """
+
+ def transform(self, node, results):
+ name = results["name"]
+ new = Name(u"__bool__", prefix=name.prefix)
+ name.replace(new)
diff --git a/lib/python2.7/lib2to3/fixes/fix_numliterals.py b/lib/python2.7/lib2to3/fixes/fix_numliterals.py
new file mode 100644
index 0000000..b0c23f8
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_numliterals.py
@@ -0,0 +1,28 @@
+"""Fixer that turns 1L into 1, 0755 into 0o755.
+"""
+# Copyright 2007 Georg Brandl.
+# Licensed to PSF under a Contributor Agreement.
+
+# Local imports
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Number
+
+
+class FixNumliterals(fixer_base.BaseFix):
+ # This is so simple that we don't need the pattern compiler.
+
+ _accept_type = token.NUMBER
+
+ def match(self, node):
+ # Override
+ return (node.value.startswith(u"0") or node.value[-1] in u"Ll")
+
+ def transform(self, node, results):
+ val = node.value
+ if val[-1] in u'Ll':
+ val = val[:-1]
+ elif val.startswith(u'0') and val.isdigit() and len(set(val)) > 1:
+ val = u"0o" + val[1:]
+
+ return Number(val, prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_operator.py b/lib/python2.7/lib2to3/fixes/fix_operator.py
new file mode 100644
index 0000000..7bf2c0d
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_operator.py
@@ -0,0 +1,96 @@
+"""Fixer for operator functions.
+
+operator.isCallable(obj) -> hasattr(obj, '__call__')
+operator.sequenceIncludes(obj) -> operator.contains(obj)
+operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence)
+operator.isMappingType(obj) -> isinstance(obj, collections.Mapping)
+operator.isNumberType(obj) -> isinstance(obj, numbers.Number)
+operator.repeat(obj, n) -> operator.mul(obj, n)
+operator.irepeat(obj, n) -> operator.imul(obj, n)
+"""
+
+# Local imports
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import Call, Name, String, touch_import
+
+
+def invocation(s):
+ def dec(f):
+ f.invocation = s
+ return f
+ return dec
+
+
+class FixOperator(fixer_base.BaseFix):
+ BM_compatible = True
+ order = "pre"
+
+ methods = """
+ method=('isCallable'|'sequenceIncludes'
+ |'isSequenceType'|'isMappingType'|'isNumberType'
+ |'repeat'|'irepeat')
+ """
+ obj = "'(' obj=any ')'"
+ PATTERN = """
+ power< module='operator'
+ trailer< '.' %(methods)s > trailer< %(obj)s > >
+ |
+ power< %(methods)s trailer< %(obj)s > >
+ """ % dict(methods=methods, obj=obj)
+
+ def transform(self, node, results):
+ method = self._check_method(node, results)
+ if method is not None:
+ return method(node, results)
+
+ @invocation("operator.contains(%s)")
+ def _sequenceIncludes(self, node, results):
+ return self._handle_rename(node, results, u"contains")
+
+ @invocation("hasattr(%s, '__call__')")
+ def _isCallable(self, node, results):
+ obj = results["obj"]
+ args = [obj.clone(), String(u", "), String(u"'__call__'")]
+ return Call(Name(u"hasattr"), args, prefix=node.prefix)
+
+ @invocation("operator.mul(%s)")
+ def _repeat(self, node, results):
+ return self._handle_rename(node, results, u"mul")
+
+ @invocation("operator.imul(%s)")
+ def _irepeat(self, node, results):
+ return self._handle_rename(node, results, u"imul")
+
+ @invocation("isinstance(%s, collections.Sequence)")
+ def _isSequenceType(self, node, results):
+ return self._handle_type2abc(node, results, u"collections", u"Sequence")
+
+ @invocation("isinstance(%s, collections.Mapping)")
+ def _isMappingType(self, node, results):
+ return self._handle_type2abc(node, results, u"collections", u"Mapping")
+
+ @invocation("isinstance(%s, numbers.Number)")
+ def _isNumberType(self, node, results):
+ return self._handle_type2abc(node, results, u"numbers", u"Number")
+
+ def _handle_rename(self, node, results, name):
+ method = results["method"][0]
+ method.value = name
+ method.changed()
+
+ def _handle_type2abc(self, node, results, module, abc):
+ touch_import(None, module, node)
+ obj = results["obj"]
+ args = [obj.clone(), String(u", " + u".".join([module, abc]))]
+ return Call(Name(u"isinstance"), args, prefix=node.prefix)
+
+ def _check_method(self, node, results):
+ method = getattr(self, "_" + results["method"][0].value.encode("ascii"))
+ if callable(method):
+ if "module" in results:
+ return method
+ else:
+ sub = (unicode(results["obj"]),)
+ invocation_str = unicode(method.invocation) % sub
+ self.warning(node, u"You should use '%s' here." % invocation_str)
+ return None
diff --git a/lib/python2.7/lib2to3/fixes/fix_paren.py b/lib/python2.7/lib2to3/fixes/fix_paren.py
new file mode 100644
index 0000000..8650cd9
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_paren.py
@@ -0,0 +1,44 @@
+"""Fixer that addes parentheses where they are required
+
+This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
+
+# By Taek Joo Kim and Benjamin Peterson
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import LParen, RParen
+
+# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
+class FixParen(fixer_base.BaseFix):
+ BM_compatible = True
+
+ PATTERN = """
+ atom< ('[' | '(')
+ (listmaker< any
+ comp_for<
+ 'for' NAME 'in'
+ target=testlist_safe< any (',' any)+ [',']
+ >
+ [any]
+ >
+ >
+ |
+ testlist_gexp< any
+ comp_for<
+ 'for' NAME 'in'
+ target=testlist_safe< any (',' any)+ [',']
+ >
+ [any]
+ >
+ >)
+ (']' | ')') >
+ """
+
+ def transform(self, node, results):
+ target = results["target"]
+
+ lparen = LParen()
+ lparen.prefix = target.prefix
+ target.prefix = u"" # Make it hug the parentheses
+ target.insert_child(0, lparen)
+ target.append_child(RParen())
diff --git a/lib/python2.7/lib2to3/fixes/fix_print.py b/lib/python2.7/lib2to3/fixes/fix_print.py
new file mode 100644
index 0000000..98786b3
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_print.py
@@ -0,0 +1,87 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for print.
+
+Change:
+ 'print' into 'print()'
+ 'print ...' into 'print(...)'
+ 'print ... ,' into 'print(..., end=" ")'
+ 'print >>x, ...' into 'print(..., file=x)'
+
+No changes are applied if print_function is imported from __future__
+
+"""
+
+# Local imports
+from .. import patcomp
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, Comma, String, is_tuple
+
+
+parend_expr = patcomp.compile_pattern(
+ """atom< '(' [atom|STRING|NAME] ')' >"""
+ )
+
+
+class FixPrint(fixer_base.BaseFix):
+
+ BM_compatible = True
+
+ PATTERN = """
+ simple_stmt< any* bare='print' any* > | print_stmt
+ """
+
+ def transform(self, node, results):
+ assert results
+
+ bare_print = results.get("bare")
+
+ if bare_print:
+ # Special-case print all by itself
+ bare_print.replace(Call(Name(u"print"), [],
+ prefix=bare_print.prefix))
+ return
+ assert node.children[0] == Name(u"print")
+ args = node.children[1:]
+ if len(args) == 1 and parend_expr.match(args[0]):
+ # We don't want to keep sticking parens around an
+ # already-parenthesised expression.
+ return
+
+ sep = end = file = None
+ if args and args[-1] == Comma():
+ args = args[:-1]
+ end = " "
+ if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"):
+ assert len(args) >= 2
+ file = args[1].clone()
+ args = args[3:] # Strip a possible comma after the file expression
+ # Now synthesize a print(args, sep=..., end=..., file=...) node.
+ l_args = [arg.clone() for arg in args]
+ if l_args:
+ l_args[0].prefix = u""
+ if sep is not None or end is not None or file is not None:
+ if sep is not None:
+ self.add_kwarg(l_args, u"sep", String(repr(sep)))
+ if end is not None:
+ self.add_kwarg(l_args, u"end", String(repr(end)))
+ if file is not None:
+ self.add_kwarg(l_args, u"file", file)
+ n_stmt = Call(Name(u"print"), l_args)
+ n_stmt.prefix = node.prefix
+ return n_stmt
+
+ def add_kwarg(self, l_nodes, s_kwd, n_expr):
+ # XXX All this prefix-setting may lose comments (though rarely)
+ n_expr.prefix = u""
+ n_argument = pytree.Node(self.syms.argument,
+ (Name(s_kwd),
+ pytree.Leaf(token.EQUAL, u"="),
+ n_expr))
+ if l_nodes:
+ l_nodes.append(Comma())
+ n_argument.prefix = u" "
+ l_nodes.append(n_argument)
diff --git a/lib/python2.7/lib2to3/fixes/fix_raise.py b/lib/python2.7/lib2to3/fixes/fix_raise.py
new file mode 100644
index 0000000..b958ba0
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_raise.py
@@ -0,0 +1,90 @@
+"""Fixer for 'raise E, V, T'
+
+raise -> raise
+raise E -> raise E
+raise E, V -> raise E(V)
+raise E, V, T -> raise E(V).with_traceback(T)
+raise E, None, T -> raise E.with_traceback(T)
+
+raise (((E, E'), E''), E'''), V -> raise E(V)
+raise "foo", V, T -> warns about string exceptions
+
+
+CAVEATS:
+1) "raise E, V" will be incorrectly translated if V is an exception
+ instance. The correct Python 3 idiom is
+
+ raise E from V
+
+ but since we can't detect instance-hood by syntax alone and since
+ any client code would have to be changed as well, we don't automate
+ this.
+"""
+# Author: Collin Winter
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
+
+class FixRaise(fixer_base.BaseFix):
+
+ BM_compatible = True
+ PATTERN = """
+ raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+
+ exc = results["exc"].clone()
+ if exc.type == token.STRING:
+ msg = "Python 3 does not support string exceptions"
+ self.cannot_convert(node, msg)
+ return
+
+ # Python 2 supports
+ # raise ((((E1, E2), E3), E4), E5), V
+ # as a synonym for
+ # raise E1, V
+ # Since Python 3 will not support this, we recurse down any tuple
+ # literals, always taking the first element.
+ if is_tuple(exc):
+ while is_tuple(exc):
+ # exc.children[1:-1] is the unparenthesized tuple
+ # exc.children[1].children[0] is the first element of the tuple
+ exc = exc.children[1].children[0].clone()
+ exc.prefix = u" "
+
+ if "val" not in results:
+ # One-argument raise
+ new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc])
+ new.prefix = node.prefix
+ return new
+
+ val = results["val"].clone()
+ if is_tuple(val):
+ args = [c.clone() for c in val.children[1:-1]]
+ else:
+ val.prefix = u""
+ args = [val]
+
+ if "tb" in results:
+ tb = results["tb"].clone()
+ tb.prefix = u""
+
+ e = exc
+ # If there's a traceback and None is passed as the value, then don't
+ # add a call, since the user probably just wants to add a
+ # traceback. See issue #9661.
+ if val.type != token.NAME or val.value != u"None":
+ e = Call(exc, args)
+ with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
+ new = pytree.Node(syms.simple_stmt, [Name(u"raise")] + with_tb)
+ new.prefix = node.prefix
+ return new
+ else:
+ return pytree.Node(syms.raise_stmt,
+ [Name(u"raise"), Call(exc, args)],
+ prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_raw_input.py b/lib/python2.7/lib2to3/fixes/fix_raw_input.py
new file mode 100644
index 0000000..3a73b81
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_raw_input.py
@@ -0,0 +1,17 @@
+"""Fixer that changes raw_input(...) into input(...)."""
+# Author: Andre Roberge
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+class FixRawInput(fixer_base.BaseFix):
+
+ BM_compatible = True
+ PATTERN = """
+ power< name='raw_input' trailer< '(' [any] ')' > any* >
+ """
+
+ def transform(self, node, results):
+ name = results["name"]
+ name.replace(Name(u"input", prefix=name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_reduce.py b/lib/python2.7/lib2to3/fixes/fix_reduce.py
new file mode 100644
index 0000000..6bd785c
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_reduce.py
@@ -0,0 +1,35 @@
+# Copyright 2008 Armin Ronacher.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for reduce().
+
+Makes sure reduce() is imported from the functools module if reduce is
+used in that module.
+"""
+
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import touch_import
+
+
+
+class FixReduce(fixer_base.BaseFix):
+
+ BM_compatible = True
+ order = "pre"
+
+ PATTERN = """
+ power< 'reduce'
+ trailer< '('
+ arglist< (
+ (not(argument<any '=' any>) any ','
+ not(argument<any '=' any>) any) |
+ (not(argument<any '=' any>) any ','
+ not(argument<any '=' any>) any ','
+ not(argument<any '=' any>) any)
+ ) >
+ ')' >
+ >
+ """
+
+ def transform(self, node, results):
+ touch_import(u'functools', u'reduce', node)
diff --git a/lib/python2.7/lib2to3/fixes/fix_renames.py b/lib/python2.7/lib2to3/fixes/fix_renames.py
new file mode 100644
index 0000000..4bcce8c
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_renames.py
@@ -0,0 +1,70 @@
+"""Fix incompatible renames
+
+Fixes:
+ * sys.maxint -> sys.maxsize
+"""
+# Author: Christian Heimes
+# based on Collin Winter's fix_import
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name, attr_chain
+
+MAPPING = {"sys": {"maxint" : "maxsize"},
+ }
+LOOKUP = {}
+
+def alternates(members):
+ return "(" + "|".join(map(repr, members)) + ")"
+
+
+def build_pattern():
+ #bare = set()
+ for module, replace in MAPPING.items():
+ for old_attr, new_attr in replace.items():
+ LOOKUP[(module, old_attr)] = new_attr
+ #bare.add(module)
+ #bare.add(old_attr)
+ #yield """
+ # import_name< 'import' (module=%r
+ # | dotted_as_names< any* module=%r any* >) >
+ # """ % (module, module)
+ yield """
+ import_from< 'from' module_name=%r 'import'
+ ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
+ """ % (module, old_attr, old_attr)
+ yield """
+ power< module_name=%r trailer< '.' attr_name=%r > any* >
+ """ % (module, old_attr)
+ #yield """bare_name=%s""" % alternates(bare)
+
+
+class FixRenames(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "|".join(build_pattern())
+
+ order = "pre" # Pre-order tree traversal
+
+ # Don't match the node if it's within another match
+ def match(self, node):
+ match = super(FixRenames, self).match
+ results = match(node)
+ if results:
+ if any(match(obj) for obj in attr_chain(node, "parent")):
+ return False
+ return results
+ return False
+
+ #def start_tree(self, tree, filename):
+ # super(FixRenames, self).start_tree(tree, filename)
+ # self.replace = {}
+
+ def transform(self, node, results):
+ mod_name = results.get("module_name")
+ attr_name = results.get("attr_name")
+ #bare_name = results.get("bare_name")
+ #import_mod = results.get("module")
+
+ if mod_name and attr_name:
+ new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)])
+ attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
diff --git a/lib/python2.7/lib2to3/fixes/fix_repr.py b/lib/python2.7/lib2to3/fixes/fix_repr.py
new file mode 100644
index 0000000..f343656
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_repr.py
@@ -0,0 +1,23 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Call, Name, parenthesize
+
+
+class FixRepr(fixer_base.BaseFix):
+
+ BM_compatible = True
+ PATTERN = """
+ atom < '`' expr=any '`' >
+ """
+
+ def transform(self, node, results):
+ expr = results["expr"].clone()
+
+ if expr.type == self.syms.testlist1:
+ expr = parenthesize(expr)
+ return Call(Name(u"repr"), [expr], prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_set_literal.py b/lib/python2.7/lib2to3/fixes/fix_set_literal.py
new file mode 100644
index 0000000..d3d38ec
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_set_literal.py
@@ -0,0 +1,53 @@
+"""
+Optional fixer to transform set() calls to set literals.
+"""
+
+# Author: Benjamin Peterson
+
+from lib2to3 import fixer_base, pytree
+from lib2to3.fixer_util import token, syms
+
+
+
+class FixSetLiteral(fixer_base.BaseFix):
+
+ BM_compatible = True
+ explicit = True
+
+ PATTERN = """power< 'set' trailer< '('
+ (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
+ |
+ single=any) ']' >
+ |
+ atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
+ )
+ ')' > >
+ """
+
+ def transform(self, node, results):
+ single = results.get("single")
+ if single:
+ # Make a fake listmaker
+ fake = pytree.Node(syms.listmaker, [single.clone()])
+ single.replace(fake)
+ items = fake
+ else:
+ items = results["items"]
+
+ # Build the contents of the literal
+ literal = [pytree.Leaf(token.LBRACE, u"{")]
+ literal.extend(n.clone() for n in items.children)
+ literal.append(pytree.Leaf(token.RBRACE, u"}"))
+ # Set the prefix of the right brace to that of the ')' or ']'
+ literal[-1].prefix = items.next_sibling.prefix
+ maker = pytree.Node(syms.dictsetmaker, literal)
+ maker.prefix = node.prefix
+
+ # If the original was a one tuple, we need to remove the extra comma.
+ if len(maker.children) == 4:
+ n = maker.children[2]
+ n.remove()
+ maker.children[-1].prefix = n.prefix
+
+ # Finally, replace the set call with our shiny new literal.
+ return maker
diff --git a/lib/python2.7/lib2to3/fixes/fix_standarderror.py b/lib/python2.7/lib2to3/fixes/fix_standarderror.py
new file mode 100644
index 0000000..6cad511
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_standarderror.py
@@ -0,0 +1,18 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for StandardError -> Exception."""
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+
+class FixStandarderror(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ 'StandardError'
+ """
+
+ def transform(self, node, results):
+ return Name(u"Exception", prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_sys_exc.py b/lib/python2.7/lib2to3/fixes/fix_sys_exc.py
new file mode 100644
index 0000000..2ecca2b
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_sys_exc.py
@@ -0,0 +1,30 @@
+"""Fixer for sys.exc_{type, value, traceback}
+
+sys.exc_type -> sys.exc_info()[0]
+sys.exc_value -> sys.exc_info()[1]
+sys.exc_traceback -> sys.exc_info()[2]
+"""
+
+# By Jeff Balogh and Benjamin Peterson
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
+
+class FixSysExc(fixer_base.BaseFix):
+ # This order matches the ordering of sys.exc_info().
+ exc_info = [u"exc_type", u"exc_value", u"exc_traceback"]
+ BM_compatible = True
+ PATTERN = """
+ power< 'sys' trailer< dot='.' attribute=(%s) > >
+ """ % '|'.join("'%s'" % e for e in exc_info)
+
+ def transform(self, node, results):
+ sys_attr = results["attribute"][0]
+ index = Number(self.exc_info.index(sys_attr.value))
+
+ call = Call(Name(u"exc_info"), prefix=sys_attr.prefix)
+ attr = Attr(Name(u"sys"), call)
+ attr[1].children[0].prefix = results["dot"].prefix
+ attr.append(Subscript(index))
+ return Node(syms.power, attr, prefix=node.prefix)
diff --git a/lib/python2.7/lib2to3/fixes/fix_throw.py b/lib/python2.7/lib2to3/fixes/fix_throw.py
new file mode 100644
index 0000000..1468d89
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_throw.py
@@ -0,0 +1,56 @@
+"""Fixer for generator.throw(E, V, T).
+
+g.throw(E) -> g.throw(E)
+g.throw(E, V) -> g.throw(E(V))
+g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
+
+g.throw("foo"[, V[, T]]) will warn about string exceptions."""
+# Author: Collin Winter
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
+
+class FixThrow(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< any trailer< '.' 'throw' >
+ trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
+ >
+ |
+ power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
+ """
+
+ def transform(self, node, results):
+ syms = self.syms
+
+ exc = results["exc"].clone()
+ if exc.type is token.STRING:
+ self.cannot_convert(node, "Python 3 does not support string exceptions")
+ return
+
+ # Leave "g.throw(E)" alone
+ val = results.get(u"val")
+ if val is None:
+ return
+
+ val = val.clone()
+ if is_tuple(val):
+ args = [c.clone() for c in val.children[1:-1]]
+ else:
+ val.prefix = u""
+ args = [val]
+
+ throw_args = results["args"]
+
+ if "tb" in results:
+ tb = results["tb"].clone()
+ tb.prefix = u""
+
+ e = Call(exc, args)
+ with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])]
+ throw_args.replace(pytree.Node(syms.power, with_tb))
+ else:
+ throw_args.replace(Call(exc, args))
diff --git a/lib/python2.7/lib2to3/fixes/fix_tuple_params.py b/lib/python2.7/lib2to3/fixes/fix_tuple_params.py
new file mode 100644
index 0000000..6361717
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_tuple_params.py
@@ -0,0 +1,175 @@
+"""Fixer for function definitions with tuple parameters.
+
+def func(((a, b), c), d):
+ ...
+
+ ->
+
+def func(x, d):
+ ((a, b), c) = x
+ ...
+
+It will also support lambdas:
+
+ lambda (x, y): x + y -> lambda t: t[0] + t[1]
+
+ # The parens are a syntax error in Python 3
+ lambda (x): x + y -> lambda x: x + y
+"""
+# Author: Collin Winter
+
+# Local imports
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
+
+def is_docstring(stmt):
+ return isinstance(stmt, pytree.Node) and \
+ stmt.children[0].type == token.STRING
+
+class FixTupleParams(fixer_base.BaseFix):
+ run_order = 4 #use a lower order since lambda is part of other
+ #patterns
+ BM_compatible = True
+
+ PATTERN = """
+ funcdef< 'def' any parameters< '(' args=any ')' >
+ ['->' any] ':' suite=any+ >
+ |
+ lambda=
+ lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
+ ':' body=any
+ >
+ """
+
+ def transform(self, node, results):
+ if "lambda" in results:
+ return self.transform_lambda(node, results)
+
+ new_lines = []
+ suite = results["suite"]
+ args = results["args"]
+ # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
+ # TODO(cwinter): suite-cleanup
+ if suite[0].children[1].type == token.INDENT:
+ start = 2
+ indent = suite[0].children[1].value
+ end = Newline()
+ else:
+ start = 0
+ indent = u"; "
+ end = pytree.Leaf(token.INDENT, u"")
+
+ # We need access to self for new_name(), and making this a method
+ # doesn't feel right. Closing over self and new_lines makes the
+ # code below cleaner.
+ def handle_tuple(tuple_arg, add_prefix=False):
+ n = Name(self.new_name())
+ arg = tuple_arg.clone()
+ arg.prefix = u""
+ stmt = Assign(arg, n.clone())
+ if add_prefix:
+ n.prefix = u" "
+ tuple_arg.replace(n)
+ new_lines.append(pytree.Node(syms.simple_stmt,
+ [stmt, end.clone()]))
+
+ if args.type == syms.tfpdef:
+ handle_tuple(args)
+ elif args.type == syms.typedargslist:
+ for i, arg in enumerate(args.children):
+ if arg.type == syms.tfpdef:
+ # Without add_prefix, the emitted code is correct,
+ # just ugly.
+ handle_tuple(arg, add_prefix=(i > 0))
+
+ if not new_lines:
+ return
+
+ # This isn't strictly necessary, but it plays nicely with other fixers.
+ # TODO(cwinter) get rid of this when children becomes a smart list
+ for line in new_lines:
+ line.parent = suite[0]
+
+ # TODO(cwinter) suite-cleanup
+ after = start
+ if start == 0:
+ new_lines[0].prefix = u" "
+ elif is_docstring(suite[0].children[start]):
+ new_lines[0].prefix = indent
+ after = start + 1
+
+ for line in new_lines:
+ line.parent = suite[0]
+ suite[0].children[after:after] = new_lines
+ for i in range(after+1, after+len(new_lines)+1):
+ suite[0].children[i].prefix = indent
+ suite[0].changed()
+
+ def transform_lambda(self, node, results):
+ args = results["args"]
+ body = results["body"]
+ inner = simplify_args(results["inner"])
+
+ # Replace lambda ((((x)))): x with lambda x: x
+ if inner.type == token.NAME:
+ inner = inner.clone()
+ inner.prefix = u" "
+ args.replace(inner)
+ return
+
+ params = find_params(args)
+ to_index = map_to_index(params)
+ tup_name = self.new_name(tuple_name(params))
+
+ new_param = Name(tup_name, prefix=u" ")
+ args.replace(new_param.clone())
+ for n in body.post_order():
+ if n.type == token.NAME and n.value in to_index:
+ subscripts = [c.clone() for c in to_index[n.value]]
+ new = pytree.Node(syms.power,
+ [new_param.clone()] + subscripts)
+ new.prefix = n.prefix
+ n.replace(new)
+
+
+### Helper functions for transform_lambda()
+
+def simplify_args(node):
+ if node.type in (syms.vfplist, token.NAME):
+ return node
+ elif node.type == syms.vfpdef:
+ # These look like vfpdef< '(' x ')' > where x is NAME
+ # or another vfpdef instance (leading to recursion).
+ while node.type == syms.vfpdef:
+ node = node.children[1]
+ return node
+ raise RuntimeError("Received unexpected node %s" % node)
+
+def find_params(node):
+ if node.type == syms.vfpdef:
+ return find_params(node.children[1])
+ elif node.type == token.NAME:
+ return node.value
+ return [find_params(c) for c in node.children if c.type != token.COMMA]
+
+def map_to_index(param_list, prefix=[], d=None):
+ if d is None:
+ d = {}
+ for i, obj in enumerate(param_list):
+ trailer = [Subscript(Number(unicode(i)))]
+ if isinstance(obj, list):
+ map_to_index(obj, trailer, d=d)
+ else:
+ d[obj] = prefix + trailer
+ return d
+
+def tuple_name(param_list):
+ l = []
+ for obj in param_list:
+ if isinstance(obj, list):
+ l.append(tuple_name(obj))
+ else:
+ l.append(obj)
+ return u"_".join(l)
diff --git a/lib/python2.7/lib2to3/fixes/fix_types.py b/lib/python2.7/lib2to3/fixes/fix_types.py
new file mode 100644
index 0000000..baaeabd
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_types.py
@@ -0,0 +1,62 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for removing uses of the types module.
+
+These work for only the known names in the types module. The forms above
+can include types. or not. ie, It is assumed the module is imported either as:
+
+ import types
+ from types import ... # either * or specific types
+
+The import statements are not modified.
+
+There should be another fixer that handles at least the following constants:
+
+ type([]) -> list
+ type(()) -> tuple
+ type('') -> str
+
+"""
+
+# Local imports
+from ..pgen2 import token
+from .. import fixer_base
+from ..fixer_util import Name
+
+_TYPE_MAPPING = {
+ 'BooleanType' : 'bool',
+ 'BufferType' : 'memoryview',
+ 'ClassType' : 'type',
+ 'ComplexType' : 'complex',
+ 'DictType': 'dict',
+ 'DictionaryType' : 'dict',
+ 'EllipsisType' : 'type(Ellipsis)',
+ #'FileType' : 'io.IOBase',
+ 'FloatType': 'float',
+ 'IntType': 'int',
+ 'ListType': 'list',
+ 'LongType': 'int',
+ 'ObjectType' : 'object',
+ 'NoneType': 'type(None)',
+ 'NotImplementedType' : 'type(NotImplemented)',
+ 'SliceType' : 'slice',
+ 'StringType': 'bytes', # XXX ?
+ 'StringTypes' : '(str,)', # XXX ?
+ 'TupleType': 'tuple',
+ 'TypeType' : 'type',
+ 'UnicodeType': 'str',
+ 'XRangeType' : 'range',
+ }
+
+_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
+
+class FixTypes(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = '|'.join(_pats)
+
+ def transform(self, node, results):
+ new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
+ if new_value:
+ return Name(new_value, prefix=node.prefix)
+ return None
diff --git a/lib/python2.7/lib2to3/fixes/fix_unicode.py b/lib/python2.7/lib2to3/fixes/fix_unicode.py
new file mode 100644
index 0000000..2d776f6
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_unicode.py
@@ -0,0 +1,42 @@
+r"""Fixer for unicode.
+
+* Changes unicode to str and unichr to chr.
+
+* If "...\u..." is not unicode literal change it into "...\\u...".
+
+* Change u"..." into "...".
+
+"""
+
+from ..pgen2 import token
+from .. import fixer_base
+
+_mapping = {u"unichr" : u"chr", u"unicode" : u"str"}
+
+class FixUnicode(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = "STRING | 'unicode' | 'unichr'"
+
+ def start_tree(self, tree, filename):
+ super(FixUnicode, self).start_tree(tree, filename)
+ self.unicode_literals = 'unicode_literals' in tree.future_features
+
+ def transform(self, node, results):
+ if node.type == token.NAME:
+ new = node.clone()
+ new.value = _mapping[node.value]
+ return new
+ elif node.type == token.STRING:
+ val = node.value
+ if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val:
+ val = ur'\\'.join([
+ v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U')
+ for v in val.split(ur'\\')
+ ])
+ if val[0] in u'uU':
+ val = val[1:]
+ if val == node.value:
+ return node
+ new = node.clone()
+ new.value = val
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_urllib.py b/lib/python2.7/lib2to3/fixes/fix_urllib.py
new file mode 100644
index 0000000..34e1b27
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_urllib.py
@@ -0,0 +1,197 @@
+"""Fix changes imports of urllib which are now incompatible.
+ This is rather similar to fix_imports, but because of the more
+ complex nature of the fixing for urllib, it has its own fixer.
+"""
+# Author: Nick Edds
+
+# Local imports
+from lib2to3.fixes.fix_imports import alternates, FixImports
+from lib2to3 import fixer_base
+from lib2to3.fixer_util import (Name, Comma, FromImport, Newline,
+ find_indentation, Node, syms)
+
+MAPPING = {"urllib": [
+ ("urllib.request",
+ ["URLopener", "FancyURLopener", "urlretrieve",
+ "_urlopener", "urlopen", "urlcleanup",
+ "pathname2url", "url2pathname"]),
+ ("urllib.parse",
+ ["quote", "quote_plus", "unquote", "unquote_plus",
+ "urlencode", "splitattr", "splithost", "splitnport",
+ "splitpasswd", "splitport", "splitquery", "splittag",
+ "splittype", "splituser", "splitvalue", ]),
+ ("urllib.error",
+ ["ContentTooShortError"])],
+ "urllib2" : [
+ ("urllib.request",
+ ["urlopen", "install_opener", "build_opener",
+ "Request", "OpenerDirector", "BaseHandler",
+ "HTTPDefaultErrorHandler", "HTTPRedirectHandler",
+ "HTTPCookieProcessor", "ProxyHandler",
+ "HTTPPasswordMgr",
+ "HTTPPasswordMgrWithDefaultRealm",
+ "AbstractBasicAuthHandler",
+ "HTTPBasicAuthHandler", "ProxyBasicAuthHandler",
+ "AbstractDigestAuthHandler",
+ "HTTPDigestAuthHandler", "ProxyDigestAuthHandler",
+ "HTTPHandler", "HTTPSHandler", "FileHandler",
+ "FTPHandler", "CacheFTPHandler",
+ "UnknownHandler"]),
+ ("urllib.error",
+ ["URLError", "HTTPError"]),
+ ]
+}
+
+# Duplicate the url parsing functions for urllib2.
+MAPPING["urllib2"].append(MAPPING["urllib"][1])
+
+
+def build_pattern():
+ bare = set()
+ for old_module, changes in MAPPING.items():
+ for change in changes:
+ new_module, members = change
+ members = alternates(members)
+ yield """import_name< 'import' (module=%r
+ | dotted_as_names< any* module=%r any* >) >
+ """ % (old_module, old_module)
+ yield """import_from< 'from' mod_member=%r 'import'
+ ( member=%s | import_as_name< member=%s 'as' any > |
+ import_as_names< members=any* >) >
+ """ % (old_module, members, members)
+ yield """import_from< 'from' module_star=%r 'import' star='*' >
+ """ % old_module
+ yield """import_name< 'import'
+ dotted_as_name< module_as=%r 'as' any > >
+ """ % old_module
+ # bare_with_attr has a special significance for FixImports.match().
+ yield """power< bare_with_attr=%r trailer< '.' member=%s > any* >
+ """ % (old_module, members)
+
+
+class FixUrllib(FixImports):
+
+ def build_pattern(self):
+ return "|".join(build_pattern())
+
+ def transform_import(self, node, results):
+ """Transform for the basic import case. Replaces the old
+ import name with a comma separated list of its
+ replacements.
+ """
+ import_mod = results.get("module")
+ pref = import_mod.prefix
+
+ names = []
+
+ # create a Node list of the replacement modules
+ for name in MAPPING[import_mod.value][:-1]:
+ names.extend([Name(name[0], prefix=pref), Comma()])
+ names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
+ import_mod.replace(names)
+
+ def transform_member(self, node, results):
+ """Transform for imports of specific module elements. Replaces
+ the module to be imported from with the appropriate new
+ module.
+ """
+ mod_member = results.get("mod_member")
+ pref = mod_member.prefix
+ member = results.get("member")
+
+ # Simple case with only a single member being imported
+ if member:
+ # this may be a list of length one, or just a node
+ if isinstance(member, list):
+ member = member[0]
+ new_name = None
+ for change in MAPPING[mod_member.value]:
+ if member.value in change[1]:
+ new_name = change[0]
+ break
+ if new_name:
+ mod_member.replace(Name(new_name, prefix=pref))
+ else:
+ self.cannot_convert(node, "This is an invalid module element")
+
+ # Multiple members being imported
+ else:
+ # a dictionary for replacements, order matters
+ modules = []
+ mod_dict = {}
+ members = results["members"]
+ for member in members:
+ # we only care about the actual members
+ if member.type == syms.import_as_name:
+ as_name = member.children[2].value
+ member_name = member.children[0].value
+ else:
+ member_name = member.value
+ as_name = None
+ if member_name != u",":
+ for change in MAPPING[mod_member.value]:
+ if member_name in change[1]:
+ if change[0] not in mod_dict:
+ modules.append(change[0])
+ mod_dict.setdefault(change[0], []).append(member)
+
+ new_nodes = []
+ indentation = find_indentation(node)
+ first = True
+ def handle_name(name, prefix):
+ if name.type == syms.import_as_name:
+ kids = [Name(name.children[0].value, prefix=prefix),
+ name.children[1].clone(),
+ name.children[2].clone()]
+ return [Node(syms.import_as_name, kids)]
+ return [Name(name.value, prefix=prefix)]
+ for module in modules:
+ elts = mod_dict[module]
+ names = []
+ for elt in elts[:-1]:
+ names.extend(handle_name(elt, pref))
+ names.append(Comma())
+ names.extend(handle_name(elts[-1], pref))
+ new = FromImport(module, names)
+ if not first or node.parent.prefix.endswith(indentation):
+ new.prefix = indentation
+ new_nodes.append(new)
+ first = False
+ if new_nodes:
+ nodes = []
+ for new_node in new_nodes[:-1]:
+ nodes.extend([new_node, Newline()])
+ nodes.append(new_nodes[-1])
+ node.replace(nodes)
+ else:
+ self.cannot_convert(node, "All module elements are invalid")
+
+ def transform_dot(self, node, results):
+ """Transform for calls to module members in code."""
+ module_dot = results.get("bare_with_attr")
+ member = results.get("member")
+ new_name = None
+ if isinstance(member, list):
+ member = member[0]
+ for change in MAPPING[module_dot.value]:
+ if member.value in change[1]:
+ new_name = change[0]
+ break
+ if new_name:
+ module_dot.replace(Name(new_name,
+ prefix=module_dot.prefix))
+ else:
+ self.cannot_convert(node, "This is an invalid module element")
+
+ def transform(self, node, results):
+ if results.get("module"):
+ self.transform_import(node, results)
+ elif results.get("mod_member"):
+ self.transform_member(node, results)
+ elif results.get("bare_with_attr"):
+ self.transform_dot(node, results)
+ # Renaming and star imports are not supported for these modules.
+ elif results.get("module_star"):
+ self.cannot_convert(node, "Cannot handle star imports.")
+ elif results.get("module_as"):
+ self.cannot_convert(node, "This module is now multiple modules")
diff --git a/lib/python2.7/lib2to3/fixes/fix_ws_comma.py b/lib/python2.7/lib2to3/fixes/fix_ws_comma.py
new file mode 100644
index 0000000..37ff624
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_ws_comma.py
@@ -0,0 +1,39 @@
+"""Fixer that changes 'a ,b' into 'a, b'.
+
+This also changes '{a :b}' into '{a: b}', but does not touch other
+uses of colons. It does not touch other uses of whitespace.
+
+"""
+
+from .. import pytree
+from ..pgen2 import token
+from .. import fixer_base
+
+class FixWsComma(fixer_base.BaseFix):
+
+ explicit = True # The user must ask for this fixers
+
+ PATTERN = """
+ any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
+ """
+
+ COMMA = pytree.Leaf(token.COMMA, u",")
+ COLON = pytree.Leaf(token.COLON, u":")
+ SEPS = (COMMA, COLON)
+
+ def transform(self, node, results):
+ new = node.clone()
+ comma = False
+ for child in new.children:
+ if child in self.SEPS:
+ prefix = child.prefix
+ if prefix.isspace() and u"\n" not in prefix:
+ child.prefix = u""
+ comma = True
+ else:
+ if comma:
+ prefix = child.prefix
+ if not prefix:
+ child.prefix = u" "
+ comma = False
+ return new
diff --git a/lib/python2.7/lib2to3/fixes/fix_xrange.py b/lib/python2.7/lib2to3/fixes/fix_xrange.py
new file mode 100644
index 0000000..f143672
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_xrange.py
@@ -0,0 +1,73 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes xrange(...) into range(...)."""
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name, Call, consuming_calls
+from .. import patcomp
+
+
+class FixXrange(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power<
+ (name='range'|name='xrange') trailer< '(' args=any ')' >
+ rest=any* >
+ """
+
+ def start_tree(self, tree, filename):
+ super(FixXrange, self).start_tree(tree, filename)
+ self.transformed_xranges = set()
+
+ def finish_tree(self, tree, filename):
+ self.transformed_xranges = None
+
+ def transform(self, node, results):
+ name = results["name"]
+ if name.value == u"xrange":
+ return self.transform_xrange(node, results)
+ elif name.value == u"range":
+ return self.transform_range(node, results)
+ else:
+ raise ValueError(repr(name))
+
+ def transform_xrange(self, node, results):
+ name = results["name"]
+ name.replace(Name(u"range", prefix=name.prefix))
+ # This prevents the new range call from being wrapped in a list later.
+ self.transformed_xranges.add(id(node))
+
+ def transform_range(self, node, results):
+ if (id(node) not in self.transformed_xranges and
+ not self.in_special_context(node)):
+ range_call = Call(Name(u"range"), [results["args"].clone()])
+ # Encase the range call in list().
+ list_call = Call(Name(u"list"), [range_call],
+ prefix=node.prefix)
+ # Put things that were after the range() call after the list call.
+ for n in results["rest"]:
+ list_call.append_child(n)
+ return list_call
+
+ P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+ p1 = patcomp.compile_pattern(P1)
+
+ P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+ | comp_for< 'for' any 'in' node=any any* >
+ | comparison< any 'in' node=any any*>
+ """
+ p2 = patcomp.compile_pattern(P2)
+
+ def in_special_context(self, node):
+ if node.parent is None:
+ return False
+ results = {}
+ if (node.parent.parent is not None and
+ self.p1.match(node.parent.parent, results) and
+ results["node"] is node):
+ # list(d.keys()) -> list(d.keys()), etc.
+ return results["func"].value in consuming_calls
+ # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+ return self.p2.match(node.parent, results) and results["node"] is node
diff --git a/lib/python2.7/lib2to3/fixes/fix_xreadlines.py b/lib/python2.7/lib2to3/fixes/fix_xreadlines.py
new file mode 100644
index 0000000..f50b9a2
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_xreadlines.py
@@ -0,0 +1,25 @@
+"""Fix "for x in f.xreadlines()" -> "for x in f".
+
+This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
+# Author: Collin Winter
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name
+
+
+class FixXreadlines(fixer_base.BaseFix):
+ BM_compatible = True
+ PATTERN = """
+ power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
+ |
+ power< any+ trailer< '.' no_call='xreadlines' > >
+ """
+
+ def transform(self, node, results):
+ no_call = results.get("no_call")
+
+ if no_call:
+ no_call.replace(Name(u"__iter__", prefix=no_call.prefix))
+ else:
+ node.replace([x.clone() for x in results["call"]])
diff --git a/lib/python2.7/lib2to3/fixes/fix_zip.py b/lib/python2.7/lib2to3/fixes/fix_zip.py
new file mode 100644
index 0000000..c5d7b66
--- /dev/null
+++ b/lib/python2.7/lib2to3/fixes/fix_zip.py
@@ -0,0 +1,35 @@
+"""
+Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
+unless there exists a 'from future_builtins import zip' statement in the
+top-level namespace.
+
+We avoid the transformation if the zip() call is directly contained in
+iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+"""
+
+# Local imports
+from .. import fixer_base
+from ..fixer_util import Name, Call, in_special_context
+
+class FixZip(fixer_base.ConditionalFix):
+
+ BM_compatible = True
+ PATTERN = """
+ power< 'zip' args=trailer< '(' [any] ')' >
+ >
+ """
+
+ skip_on = "future_builtins.zip"
+
+ def transform(self, node, results):
+ if self.should_skip(node):
+ return
+
+ if in_special_context(node):
+ return None
+
+ new = node.clone()
+ new.prefix = u""
+ new = Call(Name(u"list"), [new])
+ new.prefix = node.prefix
+ return new
diff --git a/lib/python2.7/lib2to3/main.py b/lib/python2.7/lib2to3/main.py
new file mode 100644
index 0000000..ad0625e
--- /dev/null
+++ b/lib/python2.7/lib2to3/main.py
@@ -0,0 +1,269 @@
+"""
+Main program for 2to3.
+"""
+
+from __future__ import with_statement
+
+import sys
+import os
+import difflib
+import logging
+import shutil
+import optparse
+
+from . import refactor
+
+
+def diff_texts(a, b, filename):
+ """Return a unified diff of two strings."""
+ a = a.splitlines()
+ b = b.splitlines()
+ return difflib.unified_diff(a, b, filename, filename,
+ "(original)", "(refactored)",
+ lineterm="")
+
+
+class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
+ """
+ A refactoring tool that can avoid overwriting its input files.
+ Prints output to stdout.
+
+ Output files can optionally be written to a different directory and or
+ have an extra file suffix appended to their name for use in situations
+ where you do not want to replace the input files.
+ """
+
+ def __init__(self, fixers, options, explicit, nobackups, show_diffs,
+ input_base_dir='', output_dir='', append_suffix=''):
+ """
+ Args:
+ fixers: A list of fixers to import.
+ options: A dict with RefactoringTool configuration.
+ explicit: A list of fixers to run even if they are explicit.
+ nobackups: If true no backup '.bak' files will be created for those
+ files that are being refactored.
+ show_diffs: Should diffs of the refactoring be printed to stdout?
+ input_base_dir: The base directory for all input files. This class
+ will strip this path prefix off of filenames before substituting
+ it with output_dir. Only meaningful if output_dir is supplied.
+ All files processed by refactor() must start with this path.
+ output_dir: If supplied, all converted files will be written into
+ this directory tree instead of input_base_dir.
+ append_suffix: If supplied, all files output by this tool will have
+ this appended to their filename. Useful for changing .py to
+ .py3 for example by passing append_suffix='3'.
+ """
+ self.nobackups = nobackups
+ self.show_diffs = show_diffs
+ if input_base_dir and not input_base_dir.endswith(os.sep):
+ input_base_dir += os.sep
+ self._input_base_dir = input_base_dir
+ self._output_dir = output_dir
+ self._append_suffix = append_suffix
+ super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
+
+ def log_error(self, msg, *args, **kwargs):
+ self.errors.append((msg, args, kwargs))
+ self.logger.error(msg, *args, **kwargs)
+
+ def write_file(self, new_text, filename, old_text, encoding):
+ orig_filename = filename
+ if self._output_dir:
+ if filename.startswith(self._input_base_dir):
+ filename = os.path.join(self._output_dir,
+ filename[len(self._input_base_dir):])
+ else:
+ raise ValueError('filename %s does not start with the '
+ 'input_base_dir %s' % (
+ filename, self._input_base_dir))
+ if self._append_suffix:
+ filename += self._append_suffix
+ if orig_filename != filename:
+ output_dir = os.path.dirname(filename)
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ self.log_message('Writing converted %s to %s.', orig_filename,
+ filename)
+ if not self.nobackups:
+ # Make backup
+ backup = filename + ".bak"
+ if os.path.lexists(backup):
+ try:
+ os.remove(backup)
+ except os.error, err:
+ self.log_message("Can't remove backup %s", backup)
+ try:
+ os.rename(filename, backup)
+ except os.error, err:
+ self.log_message("Can't rename %s to %s", filename, backup)
+ # Actually write the new file
+ write = super(StdoutRefactoringTool, self).write_file
+ write(new_text, filename, old_text, encoding)
+ if not self.nobackups:
+ shutil.copymode(backup, filename)
+ if orig_filename != filename:
+ # Preserve the file mode in the new output directory.
+ shutil.copymode(orig_filename, filename)
+
+ def print_output(self, old, new, filename, equal):
+ if equal:
+ self.log_message("No changes to %s", filename)
+ else:
+ self.log_message("Refactored %s", filename)
+ if self.show_diffs:
+ diff_lines = diff_texts(old, new, filename)
+ try:
+ if self.output_lock is not None:
+ with self.output_lock:
+ for line in diff_lines:
+ print line
+ sys.stdout.flush()
+ else:
+ for line in diff_lines:
+ print line
+ except UnicodeEncodeError:
+ warn("couldn't encode %s's diff for your terminal" %
+ (filename,))
+ return
+
+
+def warn(msg):
+ print >> sys.stderr, "WARNING: %s" % (msg,)
+
+
+def main(fixer_pkg, args=None):
+ """Main program.
+
+ Args:
+ fixer_pkg: the name of a package where the fixers are located.
+ args: optional; a list of command line arguments. If omitted,
+ sys.argv[1:] is used.
+
+ Returns a suggested exit status (0, 1, 2).
+ """
+ # Set up option parser
+ parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
+ parser.add_option("-d", "--doctests_only", action="store_true",
+ help="Fix up doctests only")
+ parser.add_option("-f", "--fix", action="append", default=[],
+ help="Each FIX specifies a transformation; default: all")
+ parser.add_option("-j", "--processes", action="store", default=1,
+ type="int", help="Run 2to3 concurrently")
+ parser.add_option("-x", "--nofix", action="append", default=[],
+ help="Prevent a transformation from being run")
+ parser.add_option("-l", "--list-fixes", action="store_true",
+ help="List available transformations")
+ parser.add_option("-p", "--print-function", action="store_true",
+ help="Modify the grammar so that print() is a function")
+ parser.add_option("-v", "--verbose", action="store_true",
+ help="More verbose logging")
+ parser.add_option("--no-diffs", action="store_true",
+ help="Don't show diffs of the refactoring")
+ parser.add_option("-w", "--write", action="store_true",
+ help="Write back modified files")
+ parser.add_option("-n", "--nobackups", action="store_true", default=False,
+ help="Don't write backups for modified files")
+ parser.add_option("-o", "--output-dir", action="store", type="str",
+ default="", help="Put output files in this directory "
+ "instead of overwriting the input files. Requires -n.")
+ parser.add_option("-W", "--write-unchanged-files", action="store_true",
+ help="Also write files even if no changes were required"
+ " (useful with --output-dir); implies -w.")
+ parser.add_option("--add-suffix", action="store", type="str", default="",
+ help="Append this string to all output filenames."
+ " Requires -n if non-empty. "
+ "ex: --add-suffix='3' will generate .py3 files.")
+
+ # Parse command line arguments
+ refactor_stdin = False
+ flags = {}
+ options, args = parser.parse_args(args)
+ if options.write_unchanged_files:
+ flags["write_unchanged_files"] = True
+ if not options.write:
+ warn("--write-unchanged-files/-W implies -w.")
+ options.write = True
+ # If we allowed these, the original files would be renamed to backup names
+ # but not replaced.
+ if options.output_dir and not options.nobackups:
+ parser.error("Can't use --output-dir/-o without -n.")
+ if options.add_suffix and not options.nobackups:
+ parser.error("Can't use --add-suffix without -n.")
+
+ if not options.write and options.no_diffs:
+ warn("not writing files and not printing diffs; that's not very useful")
+ if not options.write and options.nobackups:
+ parser.error("Can't use -n without -w")
+ if options.list_fixes:
+ print "Available transformations for the -f/--fix option:"
+ for fixname in refactor.get_all_fix_names(fixer_pkg):
+ print fixname
+ if not args:
+ return 0
+ if not args:
+ print >> sys.stderr, "At least one file or directory argument required."
+ print >> sys.stderr, "Use --help to show usage."
+ return 2
+ if "-" in args:
+ refactor_stdin = True
+ if options.write:
+ print >> sys.stderr, "Can't write to stdin."
+ return 2
+ if options.print_function:
+ flags["print_function"] = True
+
+ # Set up logging handler
+ level = logging.DEBUG if options.verbose else logging.INFO
+ logging.basicConfig(format='%(name)s: %(message)s', level=level)
+ logger = logging.getLogger('lib2to3.main')
+
+ # Initialize the refactoring tool
+ avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
+ unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
+ explicit = set()
+ if options.fix:
+ all_present = False
+ for fix in options.fix:
+ if fix == "all":
+ all_present = True
+ else:
+ explicit.add(fixer_pkg + ".fix_" + fix)
+ requested = avail_fixes.union(explicit) if all_present else explicit
+ else:
+ requested = avail_fixes.union(explicit)
+ fixer_names = requested.difference(unwanted_fixes)
+ input_base_dir = os.path.commonprefix(args)
+ if (input_base_dir and not input_base_dir.endswith(os.sep)
+ and not os.path.isdir(input_base_dir)):
+ # One or more similar names were passed, their directory is the base.
+ # os.path.commonprefix() is ignorant of path elements, this corrects
+ # for that weird API.
+ input_base_dir = os.path.dirname(input_base_dir)
+ if options.output_dir:
+ input_base_dir = input_base_dir.rstrip(os.sep)
+ logger.info('Output in %r will mirror the input directory %r layout.',
+ options.output_dir, input_base_dir)
+ rt = StdoutRefactoringTool(
+ sorted(fixer_names), flags, sorted(explicit),
+ options.nobackups, not options.no_diffs,
+ input_base_dir=input_base_dir,
+ output_dir=options.output_dir,
+ append_suffix=options.add_suffix)
+
+ # Refactor all files and directories passed as arguments
+ if not rt.errors:
+ if refactor_stdin:
+ rt.refactor_stdin()
+ else:
+ try:
+ rt.refactor(args, options.write, options.doctests_only,
+ options.processes)
+ except refactor.MultiprocessingUnsupported:
+ assert options.processes > 1
+ print >> sys.stderr, "Sorry, -j isn't " \
+ "supported on this platform."
+ return 1
+ rt.summarize()
+
+ # Return error status (0 if rt.errors is zero)
+ return int(bool(rt.errors))
diff --git a/lib/python2.7/lib2to3/patcomp.py b/lib/python2.7/lib2to3/patcomp.py
new file mode 100644
index 0000000..d31a9da
--- /dev/null
+++ b/lib/python2.7/lib2to3/patcomp.py
@@ -0,0 +1,205 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Pattern compiler.
+
+The grammar is taken from PatternGrammar.txt.
+
+The compiler compiles a pattern to a pytree.*Pattern instance.
+"""
+
+__author__ = "Guido van Rossum <guido@python.org>"
+
+# Python imports
+import os
+import StringIO
+
+# Fairly local imports
+from .pgen2 import driver, literals, token, tokenize, parse, grammar
+
+# Really local imports
+from . import pytree
+from . import pygram
+
+# The pattern grammar file
+_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
+ "PatternGrammar.txt")
+
+
+class PatternSyntaxError(Exception):
+ pass
+
+
+def tokenize_wrapper(input):
+ """Tokenizes a string suppressing significant whitespace."""
+ skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
+ tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline)
+ for quintuple in tokens:
+ type, value, start, end, line_text = quintuple
+ if type not in skip:
+ yield quintuple
+
+
+class PatternCompiler(object):
+
+ def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
+ """Initializer.
+
+ Takes an optional alternative filename for the pattern grammar.
+ """
+ self.grammar = driver.load_grammar(grammar_file)
+ self.syms = pygram.Symbols(self.grammar)
+ self.pygrammar = pygram.python_grammar
+ self.pysyms = pygram.python_symbols
+ self.driver = driver.Driver(self.grammar, convert=pattern_convert)
+
+ def compile_pattern(self, input, debug=False, with_tree=False):
+ """Compiles a pattern string to a nested pytree.*Pattern object."""
+ tokens = tokenize_wrapper(input)
+ try:
+ root = self.driver.parse_tokens(tokens, debug=debug)
+ except parse.ParseError as e:
+ raise PatternSyntaxError(str(e))
+ if with_tree:
+ return self.compile_node(root), root
+ else:
+ return self.compile_node(root)
+
+ def compile_node(self, node):
+ """Compiles a node, recursively.
+
+ This is one big switch on the node type.
+ """
+ # XXX Optimize certain Wildcard-containing-Wildcard patterns
+ # that can be merged
+ if node.type == self.syms.Matcher:
+ node = node.children[0] # Avoid unneeded recursion
+
+ if node.type == self.syms.Alternatives:
+ # Skip the odd children since they are just '|' tokens
+ alts = [self.compile_node(ch) for ch in node.children[::2]]
+ if len(alts) == 1:
+ return alts[0]
+ p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
+ return p.optimize()
+
+ if node.type == self.syms.Alternative:
+ units = [self.compile_node(ch) for ch in node.children]
+ if len(units) == 1:
+ return units[0]
+ p = pytree.WildcardPattern([units], min=1, max=1)
+ return p.optimize()
+
+ if node.type == self.syms.NegatedUnit:
+ pattern = self.compile_basic(node.children[1:])
+ p = pytree.NegatedPattern(pattern)
+ return p.optimize()
+
+ assert node.type == self.syms.Unit
+
+ name = None
+ nodes = node.children
+ if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
+ name = nodes[0].value
+ nodes = nodes[2:]
+ repeat = None
+ if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
+ repeat = nodes[-1]
+ nodes = nodes[:-1]
+
+ # Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
+ pattern = self.compile_basic(nodes, repeat)
+
+ if repeat is not None:
+ assert repeat.type == self.syms.Repeater
+ children = repeat.children
+ child = children[0]
+ if child.type == token.STAR:
+ min = 0
+ max = pytree.HUGE
+ elif child.type == token.PLUS:
+ min = 1
+ max = pytree.HUGE
+ elif child.type == token.LBRACE:
+ assert children[-1].type == token.RBRACE
+ assert len(children) in (3, 5)
+ min = max = self.get_int(children[1])
+ if len(children) == 5:
+ max = self.get_int(children[3])
+ else:
+ assert False
+ if min != 1 or max != 1:
+ pattern = pattern.optimize()
+ pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
+
+ if name is not None:
+ pattern.name = name
+ return pattern.optimize()
+
+ def compile_basic(self, nodes, repeat=None):
+ # Compile STRING | NAME [Details] | (...) | [...]
+ assert len(nodes) >= 1
+ node = nodes[0]
+ if node.type == token.STRING:
+ value = unicode(literals.evalString(node.value))
+ return pytree.LeafPattern(_type_of_literal(value), value)
+ elif node.type == token.NAME:
+ value = node.value
+ if value.isupper():
+ if value not in TOKEN_MAP:
+ raise PatternSyntaxError("Invalid token: %r" % value)
+ if nodes[1:]:
+ raise PatternSyntaxError("Can't have details for token")
+ return pytree.LeafPattern(TOKEN_MAP[value])
+ else:
+ if value == "any":
+ type = None
+ elif not value.startswith("_"):
+ type = getattr(self.pysyms, value, None)
+ if type is None:
+ raise PatternSyntaxError("Invalid symbol: %r" % value)
+ if nodes[1:]: # Details present
+ content = [self.compile_node(nodes[1].children[1])]
+ else:
+ content = None
+ return pytree.NodePattern(type, content)
+ elif node.value == "(":
+ return self.compile_node(nodes[1])
+ elif node.value == "[":
+ assert repeat is None
+ subpattern = self.compile_node(nodes[1])
+ return pytree.WildcardPattern([[subpattern]], min=0, max=1)
+ assert False, node
+
+ def get_int(self, node):
+ assert node.type == token.NUMBER
+ return int(node.value)
+
+
+# Map named tokens to the type value for a LeafPattern
+TOKEN_MAP = {"NAME": token.NAME,
+ "STRING": token.STRING,
+ "NUMBER": token.NUMBER,
+ "TOKEN": None}
+
+
+def _type_of_literal(value):
+ if value[0].isalpha():
+ return token.NAME
+ elif value in grammar.opmap:
+ return grammar.opmap[value]
+ else:
+ return None
+
+
+def pattern_convert(grammar, raw_node_info):
+ """Converts raw node information to a Node or Leaf instance."""
+ type, value, context, children = raw_node_info
+ if children or type in grammar.number2symbol:
+ return pytree.Node(type, children, context=context)
+ else:
+ return pytree.Leaf(type, value, context=context)
+
+
+def compile_pattern(pattern):
+ return PatternCompiler().compile_pattern(pattern)
diff --git a/lib/python2.7/lib2to3/pgen2/__init__.py b/lib/python2.7/lib2to3/pgen2/__init__.py
new file mode 100644
index 0000000..af39048
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/__init__.py
@@ -0,0 +1,4 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""The pgen2 package."""
diff --git a/lib/python2.7/lib2to3/pgen2/conv.py b/lib/python2.7/lib2to3/pgen2/conv.py
new file mode 100644
index 0000000..28fbb0b
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/conv.py
@@ -0,0 +1,257 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Convert graminit.[ch] spit out by pgen to Python code.
+
+Pgen is the Python parser generator. It is useful to quickly create a
+parser from a grammar file in Python's grammar notation. But I don't
+want my parsers to be written in C (yet), so I'm translating the
+parsing tables to Python data structures and writing a Python parse
+engine.
+
+Note that the token numbers are constants determined by the standard
+Python tokenizer. The standard token module defines these numbers and
+their names (the names are not used much). The token numbers are
+hardcoded into the Python tokenizer and into pgen. A Python
+implementation of the Python tokenizer is also available, in the
+standard tokenize module.
+
+On the other hand, symbol numbers (representing the grammar's
+non-terminals) are assigned by pgen based on the actual grammar
+input.
+
+Note: this module is pretty much obsolete; the pgen module generates
+equivalent grammar tables directly from the Grammar.txt input file
+without having to invoke the Python pgen C program.
+
+"""
+
+# Python imports
+import re
+
+# Local imports
+from pgen2 import grammar, token
+
+
+class Converter(grammar.Grammar):
+ """Grammar subclass that reads classic pgen output files.
+
+ The run() method reads the tables as produced by the pgen parser
+ generator, typically contained in two C files, graminit.h and
+ graminit.c. The other methods are for internal use only.
+
+ See the base class for more documentation.
+
+ """
+
+ def run(self, graminit_h, graminit_c):
+ """Load the grammar tables from the text files written by pgen."""
+ self.parse_graminit_h(graminit_h)
+ self.parse_graminit_c(graminit_c)
+ self.finish_off()
+
+ def parse_graminit_h(self, filename):
+ """Parse the .h file written by pgen. (Internal)
+
+ This file is a sequence of #define statements defining the
+ nonterminals of the grammar as numbers. We build two tables
+ mapping the numbers to names and back.
+
+ """
+ try:
+ f = open(filename)
+ except IOError, err:
+ print "Can't open %s: %s" % (filename, err)
+ return False
+ self.symbol2number = {}
+ self.number2symbol = {}
+ lineno = 0
+ for line in f:
+ lineno += 1
+ mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
+ if not mo and line.strip():
+ print "%s(%s): can't parse %s" % (filename, lineno,
+ line.strip())
+ else:
+ symbol, number = mo.groups()
+ number = int(number)
+ assert symbol not in self.symbol2number
+ assert number not in self.number2symbol
+ self.symbol2number[symbol] = number
+ self.number2symbol[number] = symbol
+ return True
+
+ def parse_graminit_c(self, filename):
+ """Parse the .c file written by pgen. (Internal)
+
+ The file looks as follows. The first two lines are always this:
+
+ #include "pgenheaders.h"
+ #include "grammar.h"
+
+ After that come four blocks:
+
+ 1) one or more state definitions
+ 2) a table defining dfas
+ 3) a table defining labels
+ 4) a struct defining the grammar
+
+ A state definition has the following form:
+ - one or more arc arrays, each of the form:
+ static arc arcs_<n>_<m>[<k>] = {
+ {<i>, <j>},
+ ...
+ };
+ - followed by a state array, of the form:
+ static state states_<s>[<t>] = {
+ {<k>, arcs_<n>_<m>},
+ ...
+ };
+
+ """
+ try:
+ f = open(filename)
+ except IOError, err:
+ print "Can't open %s: %s" % (filename, err)
+ return False
+ # The code below essentially uses f's iterator-ness!
+ lineno = 0
+
+ # Expect the two #include lines
+ lineno, line = lineno+1, f.next()
+ assert line == '#include "pgenheaders.h"\n', (lineno, line)
+ lineno, line = lineno+1, f.next()
+ assert line == '#include "grammar.h"\n', (lineno, line)
+
+ # Parse the state definitions
+ lineno, line = lineno+1, f.next()
+ allarcs = {}
+ states = []
+ while line.startswith("static arc "):
+ while line.startswith("static arc "):
+ mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
+ line)
+ assert mo, (lineno, line)
+ n, m, k = map(int, mo.groups())
+ arcs = []
+ for _ in range(k):
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"\s+{(\d+), (\d+)},$", line)
+ assert mo, (lineno, line)
+ i, j = map(int, mo.groups())
+ arcs.append((i, j))
+ lineno, line = lineno+1, f.next()
+ assert line == "};\n", (lineno, line)
+ allarcs[(n, m)] = arcs
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
+ assert mo, (lineno, line)
+ s, t = map(int, mo.groups())
+ assert s == len(states), (lineno, line)
+ state = []
+ for _ in range(t):
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
+ assert mo, (lineno, line)
+ k, n, m = map(int, mo.groups())
+ arcs = allarcs[n, m]
+ assert k == len(arcs), (lineno, line)
+ state.append(arcs)
+ states.append(state)
+ lineno, line = lineno+1, f.next()
+ assert line == "};\n", (lineno, line)
+ lineno, line = lineno+1, f.next()
+ self.states = states
+
+ # Parse the dfas
+ dfas = {}
+ mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
+ assert mo, (lineno, line)
+ ndfas = int(mo.group(1))
+ for i in range(ndfas):
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
+ line)
+ assert mo, (lineno, line)
+ symbol = mo.group(2)
+ number, x, y, z = map(int, mo.group(1, 3, 4, 5))
+ assert self.symbol2number[symbol] == number, (lineno, line)
+ assert self.number2symbol[number] == symbol, (lineno, line)
+ assert x == 0, (lineno, line)
+ state = states[z]
+ assert y == len(state), (lineno, line)
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
+ assert mo, (lineno, line)
+ first = {}
+ rawbitset = eval(mo.group(1))
+ for i, c in enumerate(rawbitset):
+ byte = ord(c)
+ for j in range(8):
+ if byte & (1<<j):
+ first[i*8 + j] = 1
+ dfas[number] = (state, first)
+ lineno, line = lineno+1, f.next()
+ assert line == "};\n", (lineno, line)
+ self.dfas = dfas
+
+ # Parse the labels
+ labels = []
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"static label labels\[(\d+)\] = {$", line)
+ assert mo, (lineno, line)
+ nlabels = int(mo.group(1))
+ for i in range(nlabels):
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
+ assert mo, (lineno, line)
+ x, y = mo.groups()
+ x = int(x)
+ if y == "0":
+ y = None
+ else:
+ y = eval(y)
+ labels.append((x, y))
+ lineno, line = lineno+1, f.next()
+ assert line == "};\n", (lineno, line)
+ self.labels = labels
+
+ # Parse the grammar struct
+ lineno, line = lineno+1, f.next()
+ assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"\s+(\d+),$", line)
+ assert mo, (lineno, line)
+ ndfas = int(mo.group(1))
+ assert ndfas == len(self.dfas)
+ lineno, line = lineno+1, f.next()
+ assert line == "\tdfas,\n", (lineno, line)
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"\s+{(\d+), labels},$", line)
+ assert mo, (lineno, line)
+ nlabels = int(mo.group(1))
+ assert nlabels == len(self.labels), (lineno, line)
+ lineno, line = lineno+1, f.next()
+ mo = re.match(r"\s+(\d+)$", line)
+ assert mo, (lineno, line)
+ start = int(mo.group(1))
+ assert start in self.number2symbol, (lineno, line)
+ self.start = start
+ lineno, line = lineno+1, f.next()
+ assert line == "};\n", (lineno, line)
+ try:
+ lineno, line = lineno+1, f.next()
+ except StopIteration:
+ pass
+ else:
+ assert 0, (lineno, line)
+
+ def finish_off(self):
+ """Create additional useful structures. (Internal)."""
+ self.keywords = {} # map from keyword strings to arc labels
+ self.tokens = {} # map from numeric token values to arc labels
+ for ilabel, (type, value) in enumerate(self.labels):
+ if type == token.NAME and value is not None:
+ self.keywords[value] = ilabel
+ elif value is None:
+ self.tokens[type] = ilabel
diff --git a/lib/python2.7/lib2to3/pgen2/driver.py b/lib/python2.7/lib2to3/pgen2/driver.py
new file mode 100644
index 0000000..ce601bb
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/driver.py
@@ -0,0 +1,160 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# Modifications:
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Parser driver.
+
+This provides a high-level interface to parse a file into a syntax tree.
+
+"""
+
+__author__ = "Guido van Rossum <guido@python.org>"
+
+__all__ = ["Driver", "load_grammar"]
+
+# Python imports
+import codecs
+import os
+import logging
+import StringIO
+import sys
+
+# Pgen imports
+from . import grammar, parse, token, tokenize, pgen
+
+
+class Driver(object):
+
+ def __init__(self, grammar, convert=None, logger=None):
+ self.grammar = grammar
+ if logger is None:
+ logger = logging.getLogger()
+ self.logger = logger
+ self.convert = convert
+
+ def parse_tokens(self, tokens, debug=False):
+ """Parse a series of tokens and return the syntax tree."""
+ # XXX Move the prefix computation into a wrapper around tokenize.
+ p = parse.Parser(self.grammar, self.convert)
+ p.setup()
+ lineno = 1
+ column = 0
+ type = value = start = end = line_text = None
+ prefix = u""
+ for quintuple in tokens:
+ type, value, start, end, line_text = quintuple
+ if start != (lineno, column):
+ assert (lineno, column) <= start, ((lineno, column), start)
+ s_lineno, s_column = start
+ if lineno < s_lineno:
+ prefix += "\n" * (s_lineno - lineno)
+ lineno = s_lineno
+ column = 0
+ if column < s_column:
+ prefix += line_text[column:s_column]
+ column = s_column
+ if type in (tokenize.COMMENT, tokenize.NL):
+ prefix += value
+ lineno, column = end
+ if value.endswith("\n"):
+ lineno += 1
+ column = 0
+ continue
+ if type == token.OP:
+ type = grammar.opmap[value]
+ if debug:
+ self.logger.debug("%s %r (prefix=%r)",
+ token.tok_name[type], value, prefix)
+ if p.addtoken(type, value, (prefix, start)):
+ if debug:
+ self.logger.debug("Stop.")
+ break
+ prefix = ""
+ lineno, column = end
+ if value.endswith("\n"):
+ lineno += 1
+ column = 0
+ else:
+ # We never broke out -- EOF is too soon (how can this happen???)
+ raise parse.ParseError("incomplete input",
+ type, value, (prefix, start))
+ return p.rootnode
+
+ def parse_stream_raw(self, stream, debug=False):
+ """Parse a stream and return the syntax tree."""
+ tokens = tokenize.generate_tokens(stream.readline)
+ return self.parse_tokens(tokens, debug)
+
+ def parse_stream(self, stream, debug=False):
+ """Parse a stream and return the syntax tree."""
+ return self.parse_stream_raw(stream, debug)
+
+ def parse_file(self, filename, encoding=None, debug=False):
+ """Parse a file and return the syntax tree."""
+ stream = codecs.open(filename, "r", encoding)
+ try:
+ return self.parse_stream(stream, debug)
+ finally:
+ stream.close()
+
+ def parse_string(self, text, debug=False):
+ """Parse a string and return the syntax tree."""
+ tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
+ return self.parse_tokens(tokens, debug)
+
+
+def _generate_pickle_name(gt):
+ head, tail = os.path.splitext(gt)
+ if tail == ".txt":
+ tail = ""
+ return head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+
+
+def load_grammar(gt="Grammar.txt", gp=None,
+ save=True, force=False, logger=None):
+ """Load the grammar (maybe from a pickle)."""
+ if logger is None:
+ logger = logging.getLogger()
+ gp = _generate_pickle_name(gt) if gp is None else gp
+ if force or not _newer(gp, gt):
+ logger.info("Generating grammar tables from %s", gt)
+ g = pgen.generate_grammar(gt)
+ if save:
+ logger.info("Writing grammar tables to %s", gp)
+ try:
+ g.dump(gp)
+ except IOError as e:
+ logger.info("Writing failed: %s", e)
+ else:
+ g = grammar.Grammar()
+ g.load(gp)
+ return g
+
+
+def _newer(a, b):
+ """Inquire whether file a was written since file b."""
+ if not os.path.exists(a):
+ return False
+ if not os.path.exists(b):
+ return True
+ return os.path.getmtime(a) >= os.path.getmtime(b)
+
+
+def main(*args):
+ """Main program, when run as a script: produce grammar pickle files.
+
+ Calls load_grammar for each argument, a path to a grammar text file.
+ """
+ if not args:
+ args = sys.argv[1:]
+ logging.basicConfig(level=logging.INFO, stream=sys.stdout,
+ format='%(message)s')
+ for gt in args:
+ load_grammar(gt, save=True, force=True)
+ return True
+
+if __name__ == "__main__":
+ sys.exit(int(not main()))
diff --git a/lib/python2.7/lib2to3/pgen2/grammar.py b/lib/python2.7/lib2to3/pgen2/grammar.py
new file mode 100644
index 0000000..75255e9
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/grammar.py
@@ -0,0 +1,208 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""This module defines the data structures used to represent a grammar.
+
+These are a bit arcane because they are derived from the data
+structures used by Python's 'pgen' parser generator.
+
+There's also a table here mapping operators to their names in the
+token module; the Python tokenize module reports all operators as the
+fallback token code OP, but the parser needs the actual token code.
+
+"""
+
+# Python imports
+import collections
+import pickle
+
+# Local imports
+from . import token, tokenize
+
+
+class Grammar(object):
+ """Pgen parsing tables conversion class.
+
+ Once initialized, this class supplies the grammar tables for the
+ parsing engine implemented by parse.py. The parsing engine
+ accesses the instance variables directly. The class here does not
+ provide initialization of the tables; several subclasses exist to
+ do this (see the conv and pgen modules).
+
+ The load() method reads the tables from a pickle file, which is
+ much faster than the other ways offered by subclasses. The pickle
+ file is written by calling dump() (after loading the grammar
+ tables using a subclass). The report() method prints a readable
+ representation of the tables to stdout, for debugging.
+
+ The instance variables are as follows:
+
+ symbol2number -- a dict mapping symbol names to numbers. Symbol
+ numbers are always 256 or higher, to distinguish
+ them from token numbers, which are between 0 and
+ 255 (inclusive).
+
+ number2symbol -- a dict mapping numbers to symbol names;
+ these two are each other's inverse.
+
+ states -- a list of DFAs, where each DFA is a list of
+ states, each state is a list of arcs, and each
+ arc is a (i, j) pair where i is a label and j is
+ a state number. The DFA number is the index into
+ this list. (This name is slightly confusing.)
+ Final states are represented by a special arc of
+ the form (0, j) where j is its own state number.
+
+ dfas -- a dict mapping symbol numbers to (DFA, first)
+ pairs, where DFA is an item from the states list
+ above, and first is a set of tokens that can
+ begin this grammar rule (represented by a dict
+ whose values are always 1).
+
+ labels -- a list of (x, y) pairs where x is either a token
+ number or a symbol number, and y is either None
+ or a string; the strings are keywords. The label
+ number is the index in this list; label numbers
+ are used to mark state transitions (arcs) in the
+ DFAs.
+
+ start -- the number of the grammar's start symbol.
+
+ keywords -- a dict mapping keyword strings to arc labels.
+
+ tokens -- a dict mapping token numbers to arc labels.
+
+ """
+
+ def __init__(self):
+ self.symbol2number = {}
+ self.number2symbol = {}
+ self.states = []
+ self.dfas = {}
+ self.labels = [(0, "EMPTY")]
+ self.keywords = {}
+ self.tokens = {}
+ self.symbol2label = {}
+ self.start = 256
+
+ def dump(self, filename):
+ """Dump the grammar tables to a pickle file.
+
+ dump() recursively changes all dict to OrderedDict, so the pickled file
+ is not exactly the same as what was passed in to dump(). load() uses the
+ pickled file to create the tables, but only changes OrderedDict to dict
+ at the top level; it does not recursively change OrderedDict to dict.
+ So, the loaded tables are different from the original tables that were
+ passed to load() in that some of the OrderedDict (from the pickled file)
+ are not changed back to dict. For parsing, this has no effect on
+ performance because OrderedDict uses dict's __getitem__ with nothing in
+ between.
+ """
+ with open(filename, "wb") as f:
+ d = _make_deterministic(self.__dict__)
+ pickle.dump(d, f, 2)
+
+ def load(self, filename):
+ """Load the grammar tables from a pickle file."""
+ f = open(filename, "rb")
+ d = pickle.load(f)
+ f.close()
+ self.__dict__.update(d)
+
+ def copy(self):
+ """
+ Copy the grammar.
+ """
+ new = self.__class__()
+ for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords",
+ "tokens", "symbol2label"):
+ setattr(new, dict_attr, getattr(self, dict_attr).copy())
+ new.labels = self.labels[:]
+ new.states = self.states[:]
+ new.start = self.start
+ return new
+
+ def report(self):
+ """Dump the grammar tables to standard output, for debugging."""
+ from pprint import pprint
+ print "s2n"
+ pprint(self.symbol2number)
+ print "n2s"
+ pprint(self.number2symbol)
+ print "states"
+ pprint(self.states)
+ print "dfas"
+ pprint(self.dfas)
+ print "labels"
+ pprint(self.labels)
+ print "start", self.start
+
+
+def _make_deterministic(top):
+ if isinstance(top, dict):
+ return collections.OrderedDict(
+ sorted(((k, _make_deterministic(v)) for k, v in top.iteritems())))
+ if isinstance(top, list):
+ return [_make_deterministic(e) for e in top]
+ if isinstance(top, tuple):
+ return tuple(_make_deterministic(e) for e in top)
+ return top
+
+
+# Map from operator to number (since tokenize doesn't do this)
+
+opmap_raw = """
+( LPAR
+) RPAR
+[ LSQB
+] RSQB
+: COLON
+, COMMA
+; SEMI
++ PLUS
+- MINUS
+* STAR
+/ SLASH
+| VBAR
+& AMPER
+< LESS
+> GREATER
+= EQUAL
+. DOT
+% PERCENT
+` BACKQUOTE
+{ LBRACE
+} RBRACE
+@ AT
+@= ATEQUAL
+== EQEQUAL
+!= NOTEQUAL
+<> NOTEQUAL
+<= LESSEQUAL
+>= GREATEREQUAL
+~ TILDE
+^ CIRCUMFLEX
+<< LEFTSHIFT
+>> RIGHTSHIFT
+** DOUBLESTAR
++= PLUSEQUAL
+-= MINEQUAL
+*= STAREQUAL
+/= SLASHEQUAL
+%= PERCENTEQUAL
+&= AMPEREQUAL
+|= VBAREQUAL
+^= CIRCUMFLEXEQUAL
+<<= LEFTSHIFTEQUAL
+>>= RIGHTSHIFTEQUAL
+**= DOUBLESTAREQUAL
+// DOUBLESLASH
+//= DOUBLESLASHEQUAL
+-> RARROW
+"""
+
+opmap = {}
+for line in opmap_raw.splitlines():
+ if line:
+ op, name = line.split()
+ opmap[op] = getattr(token, name)
diff --git a/lib/python2.7/lib2to3/pgen2/literals.py b/lib/python2.7/lib2to3/pgen2/literals.py
new file mode 100644
index 0000000..0b3948a
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/literals.py
@@ -0,0 +1,60 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Safely evaluate Python string literals without using eval()."""
+
+import re
+
+simple_escapes = {"a": "\a",
+ "b": "\b",
+ "f": "\f",
+ "n": "\n",
+ "r": "\r",
+ "t": "\t",
+ "v": "\v",
+ "'": "'",
+ '"': '"',
+ "\\": "\\"}
+
+def escape(m):
+ all, tail = m.group(0, 1)
+ assert all.startswith("\\")
+ esc = simple_escapes.get(tail)
+ if esc is not None:
+ return esc
+ if tail.startswith("x"):
+ hexes = tail[1:]
+ if len(hexes) < 2:
+ raise ValueError("invalid hex string escape ('\\%s')" % tail)
+ try:
+ i = int(hexes, 16)
+ except ValueError:
+ raise ValueError("invalid hex string escape ('\\%s')" % tail)
+ else:
+ try:
+ i = int(tail, 8)
+ except ValueError:
+ raise ValueError("invalid octal string escape ('\\%s')" % tail)
+ return chr(i)
+
+def evalString(s):
+ assert s.startswith("'") or s.startswith('"'), repr(s[:1])
+ q = s[0]
+ if s[:3] == q*3:
+ q = q*3
+ assert s.endswith(q), repr(s[-len(q):])
+ assert len(s) >= 2*len(q)
+ s = s[len(q):-len(q)]
+ return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
+
+def test():
+ for i in range(256):
+ c = chr(i)
+ s = repr(c)
+ e = evalString(s)
+ if e != c:
+ print i, c, s, e
+
+
+if __name__ == "__main__":
+ test()
diff --git a/lib/python2.7/lib2to3/pgen2/parse.py b/lib/python2.7/lib2to3/pgen2/parse.py
new file mode 100644
index 0000000..6bebdbb
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/parse.py
@@ -0,0 +1,201 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Parser engine for the grammar tables generated by pgen.
+
+The grammar table must be loaded first.
+
+See Parser/parser.c in the Python distribution for additional info on
+how this parsing engine works.
+
+"""
+
+# Local imports
+from . import token
+
+class ParseError(Exception):
+ """Exception to signal the parser is stuck."""
+
+ def __init__(self, msg, type, value, context):
+ Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
+ (msg, type, value, context))
+ self.msg = msg
+ self.type = type
+ self.value = value
+ self.context = context
+
+class Parser(object):
+ """Parser engine.
+
+ The proper usage sequence is:
+
+ p = Parser(grammar, [converter]) # create instance
+ p.setup([start]) # prepare for parsing
+ <for each input token>:
+ if p.addtoken(...): # parse a token; may raise ParseError
+ break
+ root = p.rootnode # root of abstract syntax tree
+
+ A Parser instance may be reused by calling setup() repeatedly.
+
+ A Parser instance contains state pertaining to the current token
+ sequence, and should not be used concurrently by different threads
+ to parse separate token sequences.
+
+ See driver.py for how to get input tokens by tokenizing a file or
+ string.
+
+ Parsing is complete when addtoken() returns True; the root of the
+ abstract syntax tree can then be retrieved from the rootnode
+ instance variable. When a syntax error occurs, addtoken() raises
+ the ParseError exception. There is no error recovery; the parser
+ cannot be used after a syntax error was reported (but it can be
+ reinitialized by calling setup()).
+
+ """
+
+ def __init__(self, grammar, convert=None):
+ """Constructor.
+
+ The grammar argument is a grammar.Grammar instance; see the
+ grammar module for more information.
+
+ The parser is not ready yet for parsing; you must call the
+ setup() method to get it started.
+
+ The optional convert argument is a function mapping concrete
+ syntax tree nodes to abstract syntax tree nodes. If not
+ given, no conversion is done and the syntax tree produced is
+ the concrete syntax tree. If given, it must be a function of
+ two arguments, the first being the grammar (a grammar.Grammar
+ instance), and the second being the concrete syntax tree node
+ to be converted. The syntax tree is converted from the bottom
+ up.
+
+ A concrete syntax tree node is a (type, value, context, nodes)
+ tuple, where type is the node type (a token or symbol number),
+ value is None for symbols and a string for tokens, context is
+ None or an opaque value used for error reporting (typically a
+ (lineno, offset) pair), and nodes is a list of children for
+ symbols, and None for tokens.
+
+ An abstract syntax tree node may be anything; this is entirely
+ up to the converter function.
+
+ """
+ self.grammar = grammar
+ self.convert = convert or (lambda grammar, node: node)
+
+ def setup(self, start=None):
+ """Prepare for parsing.
+
+ This *must* be called before starting to parse.
+
+ The optional argument is an alternative start symbol; it
+ defaults to the grammar's start symbol.
+
+ You can use a Parser instance to parse any number of programs;
+ each time you call setup() the parser is reset to an initial
+ state determined by the (implicit or explicit) start symbol.
+
+ """
+ if start is None:
+ start = self.grammar.start
+ # Each stack entry is a tuple: (dfa, state, node).
+ # A node is a tuple: (type, value, context, children),
+ # where children is a list of nodes or None, and context may be None.
+ newnode = (start, None, None, [])
+ stackentry = (self.grammar.dfas[start], 0, newnode)
+ self.stack = [stackentry]
+ self.rootnode = None
+ self.used_names = set() # Aliased to self.rootnode.used_names in pop()
+
+ def addtoken(self, type, value, context):
+ """Add a token; return True iff this is the end of the program."""
+ # Map from token to label
+ ilabel = self.classify(type, value, context)
+ # Loop until the token is shifted; may raise exceptions
+ while True:
+ dfa, state, node = self.stack[-1]
+ states, first = dfa
+ arcs = states[state]
+ # Look for a state with this label
+ for i, newstate in arcs:
+ t, v = self.grammar.labels[i]
+ if ilabel == i:
+ # Look it up in the list of labels
+ assert t < 256
+ # Shift a token; we're done with it
+ self.shift(type, value, newstate, context)
+ # Pop while we are in an accept-only state
+ state = newstate
+ while states[state] == [(0, state)]:
+ self.pop()
+ if not self.stack:
+ # Done parsing!
+ return True
+ dfa, state, node = self.stack[-1]
+ states, first = dfa
+ # Done with this token
+ return False
+ elif t >= 256:
+ # See if it's a symbol and if we're in its first set
+ itsdfa = self.grammar.dfas[t]
+ itsstates, itsfirst = itsdfa
+ if ilabel in itsfirst:
+ # Push a symbol
+ self.push(t, self.grammar.dfas[t], newstate, context)
+ break # To continue the outer while loop
+ else:
+ if (0, state) in arcs:
+ # An accepting state, pop it and try something else
+ self.pop()
+ if not self.stack:
+ # Done parsing, but another token is input
+ raise ParseError("too much input",
+ type, value, context)
+ else:
+ # No success finding a transition
+ raise ParseError("bad input", type, value, context)
+
+ def classify(self, type, value, context):
+ """Turn a token into a label. (Internal)"""
+ if type == token.NAME:
+ # Keep a listing of all used names
+ self.used_names.add(value)
+ # Check for reserved words
+ ilabel = self.grammar.keywords.get(value)
+ if ilabel is not None:
+ return ilabel
+ ilabel = self.grammar.tokens.get(type)
+ if ilabel is None:
+ raise ParseError("bad token", type, value, context)
+ return ilabel
+
+ def shift(self, type, value, newstate, context):
+ """Shift a token. (Internal)"""
+ dfa, state, node = self.stack[-1]
+ newnode = (type, value, context, None)
+ newnode = self.convert(self.grammar, newnode)
+ if newnode is not None:
+ node[-1].append(newnode)
+ self.stack[-1] = (dfa, newstate, node)
+
+ def push(self, type, newdfa, newstate, context):
+ """Push a nonterminal. (Internal)"""
+ dfa, state, node = self.stack[-1]
+ newnode = (type, None, context, [])
+ self.stack[-1] = (dfa, newstate, node)
+ self.stack.append((newdfa, 0, newnode))
+
+ def pop(self):
+ """Pop a nonterminal. (Internal)"""
+ popdfa, popstate, popnode = self.stack.pop()
+ newnode = self.convert(self.grammar, popnode)
+ if newnode is not None:
+ if self.stack:
+ dfa, state, node = self.stack[-1]
+ node[-1].append(newnode)
+ else:
+ self.rootnode = newnode
+ self.rootnode.used_names = self.used_names
diff --git a/lib/python2.7/lib2to3/pgen2/pgen.py b/lib/python2.7/lib2to3/pgen2/pgen.py
new file mode 100644
index 0000000..ed16992
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/pgen.py
@@ -0,0 +1,386 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# Pgen imports
+from . import grammar, token, tokenize
+
+class PgenGrammar(grammar.Grammar):
+ pass
+
+class ParserGenerator(object):
+
+ def __init__(self, filename, stream=None):
+ close_stream = None
+ if stream is None:
+ stream = open(filename)
+ close_stream = stream.close
+ self.filename = filename
+ self.stream = stream
+ self.generator = tokenize.generate_tokens(stream.readline)
+ self.gettoken() # Initialize lookahead
+ self.dfas, self.startsymbol = self.parse()
+ if close_stream is not None:
+ close_stream()
+ self.first = {} # map from symbol name to set of tokens
+ self.addfirstsets()
+
+ def make_grammar(self):
+ c = PgenGrammar()
+ names = self.dfas.keys()
+ names.sort()
+ names.remove(self.startsymbol)
+ names.insert(0, self.startsymbol)
+ for name in names:
+ i = 256 + len(c.symbol2number)
+ c.symbol2number[name] = i
+ c.number2symbol[i] = name
+ for name in names:
+ dfa = self.dfas[name]
+ states = []
+ for state in dfa:
+ arcs = []
+ for label, next in sorted(state.arcs.iteritems()):
+ arcs.append((self.make_label(c, label), dfa.index(next)))
+ if state.isfinal:
+ arcs.append((0, dfa.index(state)))
+ states.append(arcs)
+ c.states.append(states)
+ c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
+ c.start = c.symbol2number[self.startsymbol]
+ return c
+
+ def make_first(self, c, name):
+ rawfirst = self.first[name]
+ first = {}
+ for label in sorted(rawfirst):
+ ilabel = self.make_label(c, label)
+ ##assert ilabel not in first # XXX failed on <> ... !=
+ first[ilabel] = 1
+ return first
+
+ def make_label(self, c, label):
+ # XXX Maybe this should be a method on a subclass of converter?
+ ilabel = len(c.labels)
+ if label[0].isalpha():
+ # Either a symbol name or a named token
+ if label in c.symbol2number:
+ # A symbol name (a non-terminal)
+ if label in c.symbol2label:
+ return c.symbol2label[label]
+ else:
+ c.labels.append((c.symbol2number[label], None))
+ c.symbol2label[label] = ilabel
+ return ilabel
+ else:
+ # A named token (NAME, NUMBER, STRING)
+ itoken = getattr(token, label, None)
+ assert isinstance(itoken, int), label
+ assert itoken in token.tok_name, label
+ if itoken in c.tokens:
+ return c.tokens[itoken]
+ else:
+ c.labels.append((itoken, None))
+ c.tokens[itoken] = ilabel
+ return ilabel
+ else:
+ # Either a keyword or an operator
+ assert label[0] in ('"', "'"), label
+ value = eval(label)
+ if value[0].isalpha():
+ # A keyword
+ if value in c.keywords:
+ return c.keywords[value]
+ else:
+ c.labels.append((token.NAME, value))
+ c.keywords[value] = ilabel
+ return ilabel
+ else:
+ # An operator (any non-numeric token)
+ itoken = grammar.opmap[value] # Fails if unknown token
+ if itoken in c.tokens:
+ return c.tokens[itoken]
+ else:
+ c.labels.append((itoken, None))
+ c.tokens[itoken] = ilabel
+ return ilabel
+
+ def addfirstsets(self):
+ names = self.dfas.keys()
+ names.sort()
+ for name in names:
+ if name not in self.first:
+ self.calcfirst(name)
+ #print name, self.first[name].keys()
+
+ def calcfirst(self, name):
+ dfa = self.dfas[name]
+ self.first[name] = None # dummy to detect left recursion
+ state = dfa[0]
+ totalset = {}
+ overlapcheck = {}
+ for label, next in state.arcs.iteritems():
+ if label in self.dfas:
+ if label in self.first:
+ fset = self.first[label]
+ if fset is None:
+ raise ValueError("recursion for rule %r" % name)
+ else:
+ self.calcfirst(label)
+ fset = self.first[label]
+ totalset.update(fset)
+ overlapcheck[label] = fset
+ else:
+ totalset[label] = 1
+ overlapcheck[label] = {label: 1}
+ inverse = {}
+ for label, itsfirst in overlapcheck.iteritems():
+ for symbol in itsfirst:
+ if symbol in inverse:
+ raise ValueError("rule %s is ambiguous; %s is in the"
+ " first sets of %s as well as %s" %
+ (name, symbol, label, inverse[symbol]))
+ inverse[symbol] = label
+ self.first[name] = totalset
+
+ def parse(self):
+ dfas = {}
+ startsymbol = None
+ # MSTART: (NEWLINE | RULE)* ENDMARKER
+ while self.type != token.ENDMARKER:
+ while self.type == token.NEWLINE:
+ self.gettoken()
+ # RULE: NAME ':' RHS NEWLINE
+ name = self.expect(token.NAME)
+ self.expect(token.OP, ":")
+ a, z = self.parse_rhs()
+ self.expect(token.NEWLINE)
+ #self.dump_nfa(name, a, z)
+ dfa = self.make_dfa(a, z)
+ #self.dump_dfa(name, dfa)
+ oldlen = len(dfa)
+ self.simplify_dfa(dfa)
+ newlen = len(dfa)
+ dfas[name] = dfa
+ #print name, oldlen, newlen
+ if startsymbol is None:
+ startsymbol = name
+ return dfas, startsymbol
+
+ def make_dfa(self, start, finish):
+ # To turn an NFA into a DFA, we define the states of the DFA
+ # to correspond to *sets* of states of the NFA. Then do some
+ # state reduction. Let's represent sets as dicts with 1 for
+ # values.
+ assert isinstance(start, NFAState)
+ assert isinstance(finish, NFAState)
+ def closure(state):
+ base = {}
+ addclosure(state, base)
+ return base
+ def addclosure(state, base):
+ assert isinstance(state, NFAState)
+ if state in base:
+ return
+ base[state] = 1
+ for label, next in state.arcs:
+ if label is None:
+ addclosure(next, base)
+ states = [DFAState(closure(start), finish)]
+ for state in states: # NB states grows while we're iterating
+ arcs = {}
+ for nfastate in state.nfaset:
+ for label, next in nfastate.arcs:
+ if label is not None:
+ addclosure(next, arcs.setdefault(label, {}))
+ for label, nfaset in sorted(arcs.iteritems()):
+ for st in states:
+ if st.nfaset == nfaset:
+ break
+ else:
+ st = DFAState(nfaset, finish)
+ states.append(st)
+ state.addarc(st, label)
+ return states # List of DFAState instances; first one is start
+
+ def dump_nfa(self, name, start, finish):
+ print "Dump of NFA for", name
+ todo = [start]
+ for i, state in enumerate(todo):
+ print " State", i, state is finish and "(final)" or ""
+ for label, next in state.arcs:
+ if next in todo:
+ j = todo.index(next)
+ else:
+ j = len(todo)
+ todo.append(next)
+ if label is None:
+ print " -> %d" % j
+ else:
+ print " %s -> %d" % (label, j)
+
+ def dump_dfa(self, name, dfa):
+ print "Dump of DFA for", name
+ for i, state in enumerate(dfa):
+ print " State", i, state.isfinal and "(final)" or ""
+ for label, next in sorted(state.arcs.iteritems()):
+ print " %s -> %d" % (label, dfa.index(next))
+
+ def simplify_dfa(self, dfa):
+ # This is not theoretically optimal, but works well enough.
+ # Algorithm: repeatedly look for two states that have the same
+ # set of arcs (same labels pointing to the same nodes) and
+ # unify them, until things stop changing.
+
+ # dfa is a list of DFAState instances
+ changes = True
+ while changes:
+ changes = False
+ for i, state_i in enumerate(dfa):
+ for j in range(i+1, len(dfa)):
+ state_j = dfa[j]
+ if state_i == state_j:
+ #print " unify", i, j
+ del dfa[j]
+ for state in dfa:
+ state.unifystate(state_j, state_i)
+ changes = True
+ break
+
+ def parse_rhs(self):
+ # RHS: ALT ('|' ALT)*
+ a, z = self.parse_alt()
+ if self.value != "|":
+ return a, z
+ else:
+ aa = NFAState()
+ zz = NFAState()
+ aa.addarc(a)
+ z.addarc(zz)
+ while self.value == "|":
+ self.gettoken()
+ a, z = self.parse_alt()
+ aa.addarc(a)
+ z.addarc(zz)
+ return aa, zz
+
+ def parse_alt(self):
+ # ALT: ITEM+
+ a, b = self.parse_item()
+ while (self.value in ("(", "[") or
+ self.type in (token.NAME, token.STRING)):
+ c, d = self.parse_item()
+ b.addarc(c)
+ b = d
+ return a, b
+
+ def parse_item(self):
+ # ITEM: '[' RHS ']' | ATOM ['+' | '*']
+ if self.value == "[":
+ self.gettoken()
+ a, z = self.parse_rhs()
+ self.expect(token.OP, "]")
+ a.addarc(z)
+ return a, z
+ else:
+ a, z = self.parse_atom()
+ value = self.value
+ if value not in ("+", "*"):
+ return a, z
+ self.gettoken()
+ z.addarc(a)
+ if value == "+":
+ return a, z
+ else:
+ return a, a
+
+ def parse_atom(self):
+ # ATOM: '(' RHS ')' | NAME | STRING
+ if self.value == "(":
+ self.gettoken()
+ a, z = self.parse_rhs()
+ self.expect(token.OP, ")")
+ return a, z
+ elif self.type in (token.NAME, token.STRING):
+ a = NFAState()
+ z = NFAState()
+ a.addarc(z, self.value)
+ self.gettoken()
+ return a, z
+ else:
+ self.raise_error("expected (...) or NAME or STRING, got %s/%s",
+ self.type, self.value)
+
+ def expect(self, type, value=None):
+ if self.type != type or (value is not None and self.value != value):
+ self.raise_error("expected %s/%s, got %s/%s",
+ type, value, self.type, self.value)
+ value = self.value
+ self.gettoken()
+ return value
+
+ def gettoken(self):
+ tup = self.generator.next()
+ while tup[0] in (tokenize.COMMENT, tokenize.NL):
+ tup = self.generator.next()
+ self.type, self.value, self.begin, self.end, self.line = tup
+ #print token.tok_name[self.type], repr(self.value)
+
+ def raise_error(self, msg, *args):
+ if args:
+ try:
+ msg = msg % args
+ except:
+ msg = " ".join([msg] + map(str, args))
+ raise SyntaxError(msg, (self.filename, self.end[0],
+ self.end[1], self.line))
+
+class NFAState(object):
+
+ def __init__(self):
+ self.arcs = [] # list of (label, NFAState) pairs
+
+ def addarc(self, next, label=None):
+ assert label is None or isinstance(label, str)
+ assert isinstance(next, NFAState)
+ self.arcs.append((label, next))
+
+class DFAState(object):
+
+ def __init__(self, nfaset, final):
+ assert isinstance(nfaset, dict)
+ assert isinstance(iter(nfaset).next(), NFAState)
+ assert isinstance(final, NFAState)
+ self.nfaset = nfaset
+ self.isfinal = final in nfaset
+ self.arcs = {} # map from label to DFAState
+
+ def addarc(self, next, label):
+ assert isinstance(label, str)
+ assert label not in self.arcs
+ assert isinstance(next, DFAState)
+ self.arcs[label] = next
+
+ def unifystate(self, old, new):
+ for label, next in self.arcs.iteritems():
+ if next is old:
+ self.arcs[label] = new
+
+ def __eq__(self, other):
+ # Equality test -- ignore the nfaset instance variable
+ assert isinstance(other, DFAState)
+ if self.isfinal != other.isfinal:
+ return False
+ # Can't just return self.arcs == other.arcs, because that
+ # would invoke this method recursively, with cycles...
+ if len(self.arcs) != len(other.arcs):
+ return False
+ for label, next in self.arcs.iteritems():
+ if next is not other.arcs.get(label):
+ return False
+ return True
+
+ __hash__ = None # For Py3 compatibility.
+
+def generate_grammar(filename="Grammar.txt"):
+ p = ParserGenerator(filename)
+ return p.make_grammar()
diff --git a/lib/python2.7/lib2to3/pgen2/token.py b/lib/python2.7/lib2to3/pgen2/token.py
new file mode 100644
index 0000000..9b35b1f
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/token.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python2
+
+"""Token constants (from "token.h")."""
+
+# Taken from Python (r53757) and modified to include some tokens
+# originally monkeypatched in by pgen2.tokenize
+
+#--start constants--
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+BACKQUOTE = 25
+LBRACE = 26
+RBRACE = 27
+EQEQUAL = 28
+NOTEQUAL = 29
+LESSEQUAL = 30
+GREATEREQUAL = 31
+TILDE = 32
+CIRCUMFLEX = 33
+LEFTSHIFT = 34
+RIGHTSHIFT = 35
+DOUBLESTAR = 36
+PLUSEQUAL = 37
+MINEQUAL = 38
+STAREQUAL = 39
+SLASHEQUAL = 40
+PERCENTEQUAL = 41
+AMPEREQUAL = 42
+VBAREQUAL = 43
+CIRCUMFLEXEQUAL = 44
+LEFTSHIFTEQUAL = 45
+RIGHTSHIFTEQUAL = 46
+DOUBLESTAREQUAL = 47
+DOUBLESLASH = 48
+DOUBLESLASHEQUAL = 49
+AT = 50
+ATEQUAL = 51
+OP = 52
+COMMENT = 53
+NL = 54
+RARROW = 55
+ERRORTOKEN = 56
+N_TOKENS = 57
+NT_OFFSET = 256
+#--end constants--
+
+tok_name = {}
+for _name, _value in globals().items():
+ if type(_value) is type(0):
+ tok_name[_value] = _name
+
+
+def ISTERMINAL(x):
+ return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+ return x >= NT_OFFSET
+
+def ISEOF(x):
+ return x == ENDMARKER
diff --git a/lib/python2.7/lib2to3/pgen2/tokenize.py b/lib/python2.7/lib2to3/pgen2/tokenize.py
new file mode 100644
index 0000000..8cae873
--- /dev/null
+++ b/lib/python2.7/lib2to3/pgen2/tokenize.py
@@ -0,0 +1,502 @@
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
+# All rights reserved.
+
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens. It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF). It generates
+5-tuples with these members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+ tokenize_loop(readline, tokeneater)
+ tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = \
+ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
+
+import string, re
+from codecs import BOM_UTF8, lookup
+from lib2to3.pgen2.token import *
+
+from . import token
+__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
+ "generate_tokens", "untokenize"]
+del token
+
+try:
+ bytes
+except NameError:
+ # Support bytes type in Python <= 2.5, so 2to3 turns itself into
+ # valid Python 3 code.
+ bytes = str
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Binnumber = r'0[bB][01]*'
+Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+Octnumber = r'0[oO]?[0-7]*[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+ r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+ r"//=?", r"->",
+ r"[+\-*/%&@|^=<>]=?",
+ r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+ group("'", r'\\\r?\n'),
+ r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+ group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+ re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+ "'''": single3prog, '"""': double3prog,
+ "r'''": single3prog, 'r"""': double3prog,
+ "u'''": single3prog, 'u"""': double3prog,
+ "b'''": single3prog, 'b"""': double3prog,
+ "ur'''": single3prog, 'ur"""': double3prog,
+ "br'''": single3prog, 'br"""': double3prog,
+ "R'''": single3prog, 'R"""': double3prog,
+ "U'''": single3prog, 'U"""': double3prog,
+ "B'''": single3prog, 'B"""': double3prog,
+ "uR'''": single3prog, 'uR"""': double3prog,
+ "Ur'''": single3prog, 'Ur"""': double3prog,
+ "UR'''": single3prog, 'UR"""': double3prog,
+ "bR'''": single3prog, 'bR"""': double3prog,
+ "Br'''": single3prog, 'Br"""': double3prog,
+ "BR'''": single3prog, 'BR"""': double3prog,
+ 'r': None, 'R': None,
+ 'u': None, 'U': None,
+ 'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+ "r'''", 'r"""', "R'''", 'R"""',
+ "u'''", 'u"""', "U'''", 'U"""',
+ "b'''", 'b"""', "B'''", 'B"""',
+ "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+ "uR'''", 'uR"""', "UR'''", 'UR"""',
+ "br'''", 'br"""', "Br'''", 'Br"""',
+ "bR'''", 'bR"""', "BR'''", 'BR"""',):
+ triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+ "r'", 'r"', "R'", 'R"',
+ "u'", 'u"', "U'", 'U"',
+ "b'", 'b"', "B'", 'B"',
+ "ur'", 'ur"', "Ur'", 'Ur"',
+ "uR'", 'uR"', "UR'", 'UR"',
+ "br'", 'br"', "Br'", 'Br"',
+ "bR'", 'bR"', "BR'", 'BR"', ):
+ single_quoted[t] = t
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, start, end, line): # for testing
+ (srow, scol) = start
+ (erow, ecol) = end
+ print "%d,%d-%d,%d:\t%s\t%s" % \
+ (srow, scol, erow, ecol, tok_name[type], repr(token))
+
+def tokenize(readline, tokeneater=printtoken):
+ """
+ The tokenize() function accepts two parameters: one representing the
+ input stream, and one providing an output mechanism for tokenize().
+
+ The first parameter, readline, must be a callable object which provides
+ the same interface as the readline() method of built-in file objects.
+ Each call to the function should return one line of input as a string.
+
+ The second parameter, tokeneater, must also be a callable object. It is
+ called once for each token, with five arguments, corresponding to the
+ tuples generated by generate_tokens().
+ """
+ try:
+ tokenize_loop(readline, tokeneater)
+ except StopTokenizing:
+ pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+ for token_info in generate_tokens(readline):
+ tokeneater(*token_info)
+
+class Untokenizer:
+
+ def __init__(self):
+ self.tokens = []
+ self.prev_row = 1
+ self.prev_col = 0
+
+ def add_whitespace(self, start):
+ row, col = start
+ assert row <= self.prev_row
+ col_offset = col - self.prev_col
+ if col_offset:
+ self.tokens.append(" " * col_offset)
+
+ def untokenize(self, iterable):
+ for t in iterable:
+ if len(t) == 2:
+ self.compat(t, iterable)
+ break
+ tok_type, token, start, end, line = t
+ self.add_whitespace(start)
+ self.tokens.append(token)
+ self.prev_row, self.prev_col = end
+ if tok_type in (NEWLINE, NL):
+ self.prev_row += 1
+ self.prev_col = 0
+ return "".join(self.tokens)
+
+ def compat(self, token, iterable):
+ startline = False
+ indents = []
+ toks_append = self.tokens.append
+ toknum, tokval = token
+ if toknum in (NAME, NUMBER):
+ tokval += ' '
+ if toknum in (NEWLINE, NL):
+ startline = True
+ for tok in iterable:
+ toknum, tokval = tok[:2]
+
+ if toknum in (NAME, NUMBER):
+ tokval += ' '
+
+ if toknum == INDENT:
+ indents.append(tokval)
+ continue
+ elif toknum == DEDENT:
+ indents.pop()
+ continue
+ elif toknum in (NEWLINE, NL):
+ startline = True
+ elif startline and indents:
+ toks_append(indents[-1])
+ startline = False
+ toks_append(tokval)
+
+cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)')
+blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)')
+
+def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argument, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read
+ in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present, but
+ disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
+ charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return bytes()
+
+ def find_cookie(line):
+ try:
+ line_string = line.decode('ascii')
+ except UnicodeDecodeError:
+ return None
+ match = cookie_re.match(line_string)
+ if not match:
+ return None
+ encoding = _get_normal_name(match.group(1))
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError("unknown encoding: " + encoding)
+
+ if bom_found:
+ if codec.name != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ raise SyntaxError('encoding problem: utf-8')
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+ if not blank_re.match(first):
+ return default, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+def untokenize(iterable):
+ """Transform tokens back into Python source code.
+
+ Each element returned by the iterable must be a token sequence
+ with at least two elements, a token number and token value. If
+ only two tokens are passed, the resulting output is poor.
+
+ Round-trip invariant for full input:
+ Untokenized source will match input source exactly
+
+ Round-trip invariant for limited intput:
+ # Output text will tokenize the back to the input
+ t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+ newcode = untokenize(t1)
+ readline = iter(newcode.splitlines(1)).next
+ t2 = [tok[:2] for tokin generate_tokens(readline)]
+ assert t1 == t2
+ """
+ ut = Untokenizer()
+ return ut.untokenize(iterable)
+
+def generate_tokens(readline):
+ """
+ The generate_tokens() generator requires one argument, readline, which
+ must be a callable object which provides the same interface as the
+ readline() method of built-in file objects. Each call to the function
+ should return one line of input as a string. Alternately, readline
+ can be a callable function terminating with StopIteration:
+ readline = open(myfile).next # Example of alternate readline
+
+ The generator produces 5-tuples with these members: the token type; the
+ token string; a 2-tuple (srow, scol) of ints specifying the row and
+ column where the token begins in the source; a 2-tuple (erow, ecol) of
+ ints specifying the row and column where the token ends in the source;
+ and the line on which the token was found. The line passed is the
+ logical line; continuation lines are included.
+ """
+ lnum = parenlev = continued = 0
+ namechars, numchars = string.ascii_letters + '_', '0123456789'
+ contstr, needcont = '', 0
+ contline = None
+ indents = [0]
+
+ while 1: # loop over lines in stream
+ try:
+ line = readline()
+ except StopIteration:
+ line = ''
+ lnum = lnum + 1
+ pos, max = 0, len(line)
+
+ if contstr: # continued string
+ if not line:
+ raise TokenError, ("EOF in multi-line string", strstart)
+ endmatch = endprog.match(line)
+ if endmatch:
+ pos = end = endmatch.end(0)
+ yield (STRING, contstr + line[:end],
+ strstart, (lnum, end), contline + line)
+ contstr, needcont = '', 0
+ contline = None
+ elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+ yield (ERRORTOKEN, contstr + line,
+ strstart, (lnum, len(line)), contline)
+ contstr = ''
+ contline = None
+ continue
+ else:
+ contstr = contstr + line
+ contline = contline + line
+ continue
+
+ elif parenlev == 0 and not continued: # new statement
+ if not line: break
+ column = 0
+ while pos < max: # measure leading whitespace
+ if line[pos] == ' ': column = column + 1
+ elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize
+ elif line[pos] == '\f': column = 0
+ else: break
+ pos = pos + 1
+ if pos == max: break
+
+ if line[pos] in '#\r\n': # skip comments or blank lines
+ if line[pos] == '#':
+ comment_token = line[pos:].rstrip('\r\n')
+ nl_pos = pos + len(comment_token)
+ yield (COMMENT, comment_token,
+ (lnum, pos), (lnum, pos + len(comment_token)), line)
+ yield (NL, line[nl_pos:],
+ (lnum, nl_pos), (lnum, len(line)), line)
+ else:
+ yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+ (lnum, pos), (lnum, len(line)), line)
+ continue
+
+ if column > indents[-1]: # count indents or dedents
+ indents.append(column)
+ yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+ while column < indents[-1]:
+ if column not in indents:
+ raise IndentationError(
+ "unindent does not match any outer indentation level",
+ ("<tokenize>", lnum, pos, line))
+ indents = indents[:-1]
+ yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+ else: # continued statement
+ if not line:
+ raise TokenError, ("EOF in multi-line statement", (lnum, 0))
+ continued = 0
+
+ while pos < max:
+ pseudomatch = pseudoprog.match(line, pos)
+ if pseudomatch: # scan for tokens
+ start, end = pseudomatch.span(1)
+ spos, epos, pos = (lnum, start), (lnum, end), end
+ token, initial = line[start:end], line[start]
+
+ if initial in numchars or \
+ (initial == '.' and token != '.'): # ordinary number
+ yield (NUMBER, token, spos, epos, line)
+ elif initial in '\r\n':
+ newline = NEWLINE
+ if parenlev > 0:
+ newline = NL
+ yield (newline, token, spos, epos, line)
+ elif initial == '#':
+ assert not token.endswith("\n")
+ yield (COMMENT, token, spos, epos, line)
+ elif token in triple_quoted:
+ endprog = endprogs[token]
+ endmatch = endprog.match(line, pos)
+ if endmatch: # all on one line
+ pos = endmatch.end(0)
+ token = line[start:pos]
+ yield (STRING, token, spos, (lnum, pos), line)
+ else:
+ strstart = (lnum, start) # multiple lines
+ contstr = line[start:]
+ contline = line
+ break
+ elif initial in single_quoted or \
+ token[:2] in single_quoted or \
+ token[:3] in single_quoted:
+ if token[-1] == '\n': # continued string
+ strstart = (lnum, start)
+ endprog = (endprogs[initial] or endprogs[token[1]] or
+ endprogs[token[2]])
+ contstr, needcont = line[start:], 1
+ contline = line
+ break
+ else: # ordinary string
+ yield (STRING, token, spos, epos, line)
+ elif initial in namechars: # ordinary name
+ yield (NAME, token, spos, epos, line)
+ elif initial == '\\': # continued stmt
+ # This yield is new; needed for better idempotency:
+ yield (NL, token, spos, (lnum, pos), line)
+ continued = 1
+ else:
+ if initial in '([{': parenlev = parenlev + 1
+ elif initial in ')]}': parenlev = parenlev - 1
+ yield (OP, token, spos, epos, line)
+ else:
+ yield (ERRORTOKEN, line[pos],
+ (lnum, pos), (lnum, pos+1), line)
+ pos = pos + 1
+
+ for indent in indents[1:]: # pop remaining indent levels
+ yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+ yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__': # testing
+ import sys
+ if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
+ else: tokenize(sys.stdin.readline)
diff --git a/lib/python2.7/lib2to3/pygram.py b/lib/python2.7/lib2to3/pygram.py
new file mode 100644
index 0000000..621ff24
--- /dev/null
+++ b/lib/python2.7/lib2to3/pygram.py
@@ -0,0 +1,40 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Export the Python grammar and symbols."""
+
+# Python imports
+import os
+
+# Local imports
+from .pgen2 import token
+from .pgen2 import driver
+from . import pytree
+
+# The grammar file
+_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
+_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
+ "PatternGrammar.txt")
+
+
+class Symbols(object):
+
+ def __init__(self, grammar):
+ """Initializer.
+
+ Creates an attribute for each grammar symbol (nonterminal),
+ whose value is the symbol's type (an int >= 256).
+ """
+ for name, symbol in grammar.symbol2number.iteritems():
+ setattr(self, name, symbol)
+
+
+python_grammar = driver.load_grammar(_GRAMMAR_FILE)
+
+python_symbols = Symbols(python_grammar)
+
+python_grammar_no_print_statement = python_grammar.copy()
+del python_grammar_no_print_statement.keywords["print"]
+
+pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
+pattern_symbols = Symbols(pattern_grammar)
diff --git a/lib/python2.7/lib2to3/pytree.py b/lib/python2.7/lib2to3/pytree.py
new file mode 100644
index 0000000..179caca
--- /dev/null
+++ b/lib/python2.7/lib2to3/pytree.py
@@ -0,0 +1,887 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""
+Python parse tree definitions.
+
+This is a very concrete parse tree; we need to keep every token and
+even the comments and whitespace between tokens.
+
+There's also a pattern matching implementation here.
+"""
+
+__author__ = "Guido van Rossum <guido@python.org>"
+
+import sys
+import warnings
+from StringIO import StringIO
+
+HUGE = 0x7FFFFFFF # maximum repeat count, default max
+
+_type_reprs = {}
+def type_repr(type_num):
+ global _type_reprs
+ if not _type_reprs:
+ from .pygram import python_symbols
+ # printing tokens is possible but not as useful
+ # from .pgen2 import token // token.__dict__.items():
+ for name, val in python_symbols.__dict__.items():
+ if type(val) == int: _type_reprs[val] = name
+ return _type_reprs.setdefault(type_num, type_num)
+
+class Base(object):
+
+ """
+ Abstract base class for Node and Leaf.
+
+ This provides some default functionality and boilerplate using the
+ template pattern.
+
+ A node may be a subnode of at most one parent.
+ """
+
+ # Default values for instance variables
+ type = None # int: token number (< 256) or symbol number (>= 256)
+ parent = None # Parent node pointer, or None
+ children = () # Tuple of subnodes
+ was_changed = False
+ was_checked = False
+
+ def __new__(cls, *args, **kwds):
+ """Constructor that prevents Base from being instantiated."""
+ assert cls is not Base, "Cannot instantiate Base"
+ return object.__new__(cls)
+
+ def __eq__(self, other):
+ """
+ Compare two nodes for equality.
+
+ This calls the method _eq().
+ """
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return self._eq(other)
+
+ __hash__ = None # For Py3 compatibility.
+
+ def __ne__(self, other):
+ """
+ Compare two nodes for inequality.
+
+ This calls the method _eq().
+ """
+ if self.__class__ is not other.__class__:
+ return NotImplemented
+ return not self._eq(other)
+
+ def _eq(self, other):
+ """
+ Compare two nodes for equality.
+
+ This is called by __eq__ and __ne__. It is only called if the two nodes
+ have the same type. This must be implemented by the concrete subclass.
+ Nodes should be considered equal if they have the same structure,
+ ignoring the prefix string and other context information.
+ """
+ raise NotImplementedError
+
+ def clone(self):
+ """
+ Return a cloned (deep) copy of self.
+
+ This must be implemented by the concrete subclass.
+ """
+ raise NotImplementedError
+
+ def post_order(self):
+ """
+ Return a post-order iterator for the tree.
+
+ This must be implemented by the concrete subclass.
+ """
+ raise NotImplementedError
+
+ def pre_order(self):
+ """
+ Return a pre-order iterator for the tree.
+
+ This must be implemented by the concrete subclass.
+ """
+ raise NotImplementedError
+
+ def set_prefix(self, prefix):
+ """
+ Set the prefix for the node (see Leaf class).
+
+ DEPRECATED; use the prefix property directly.
+ """
+ warnings.warn("set_prefix() is deprecated; use the prefix property",
+ DeprecationWarning, stacklevel=2)
+ self.prefix = prefix
+
+ def get_prefix(self):
+ """
+ Return the prefix for the node (see Leaf class).
+
+ DEPRECATED; use the prefix property directly.
+ """
+ warnings.warn("get_prefix() is deprecated; use the prefix property",
+ DeprecationWarning, stacklevel=2)
+ return self.prefix
+
+ def replace(self, new):
+ """Replace this node with a new one in the parent."""
+ assert self.parent is not None, str(self)
+ assert new is not None
+ if not isinstance(new, list):
+ new = [new]
+ l_children = []
+ found = False
+ for ch in self.parent.children:
+ if ch is self:
+ assert not found, (self.parent.children, self, new)
+ if new is not None:
+ l_children.extend(new)
+ found = True
+ else:
+ l_children.append(ch)
+ assert found, (self.children, self, new)
+ self.parent.changed()
+ self.parent.children = l_children
+ for x in new:
+ x.parent = self.parent
+ self.parent = None
+
+ def get_lineno(self):
+ """Return the line number which generated the invocant node."""
+ node = self
+ while not isinstance(node, Leaf):
+ if not node.children:
+ return
+ node = node.children[0]
+ return node.lineno
+
+ def changed(self):
+ if self.parent:
+ self.parent.changed()
+ self.was_changed = True
+
+ def remove(self):
+ """
+ Remove the node from the tree. Returns the position of the node in its
+ parent's children before it was removed.
+ """
+ if self.parent:
+ for i, node in enumerate(self.parent.children):
+ if node is self:
+ self.parent.changed()
+ del self.parent.children[i]
+ self.parent = None
+ return i
+
+ @property
+ def next_sibling(self):
+ """
+ The node immediately following the invocant in their parent's children
+ list. If the invocant does not have a next sibling, it is None
+ """
+ if self.parent is None:
+ return None
+
+ # Can't use index(); we need to test by identity
+ for i, child in enumerate(self.parent.children):
+ if child is self:
+ try:
+ return self.parent.children[i+1]
+ except IndexError:
+ return None
+
+ @property
+ def prev_sibling(self):
+ """
+ The node immediately preceding the invocant in their parent's children
+ list. If the invocant does not have a previous sibling, it is None.
+ """
+ if self.parent is None:
+ return None
+
+ # Can't use index(); we need to test by identity
+ for i, child in enumerate(self.parent.children):
+ if child is self:
+ if i == 0:
+ return None
+ return self.parent.children[i-1]
+
+ def leaves(self):
+ for child in self.children:
+ for x in child.leaves():
+ yield x
+
+ def depth(self):
+ if self.parent is None:
+ return 0
+ return 1 + self.parent.depth()
+
+ def get_suffix(self):
+ """
+ Return the string immediately following the invocant node. This is
+ effectively equivalent to node.next_sibling.prefix
+ """
+ next_sib = self.next_sibling
+ if next_sib is None:
+ return u""
+ return next_sib.prefix
+
+ if sys.version_info < (3, 0):
+ def __str__(self):
+ return unicode(self).encode("ascii")
+
+class Node(Base):
+
+ """Concrete implementation for interior nodes."""
+
+ def __init__(self,type, children,
+ context=None,
+ prefix=None,
+ fixers_applied=None):
+ """
+ Initializer.
+
+ Takes a type constant (a symbol number >= 256), a sequence of
+ child nodes, and an optional context keyword argument.
+
+ As a side effect, the parent pointers of the children are updated.
+ """
+ assert type >= 256, type
+ self.type = type
+ self.children = list(children)
+ for ch in self.children:
+ assert ch.parent is None, repr(ch)
+ ch.parent = self
+ if prefix is not None:
+ self.prefix = prefix
+ if fixers_applied:
+ self.fixers_applied = fixers_applied[:]
+ else:
+ self.fixers_applied = None
+
+ def __repr__(self):
+ """Return a canonical string representation."""
+ return "%s(%s, %r)" % (self.__class__.__name__,
+ type_repr(self.type),
+ self.children)
+
+ def __unicode__(self):
+ """
+ Return a pretty string representation.
+
+ This reproduces the input source exactly.
+ """
+ return u"".join(map(unicode, self.children))
+
+ if sys.version_info > (3, 0):
+ __str__ = __unicode__
+
+ def _eq(self, other):
+ """Compare two nodes for equality."""
+ return (self.type, self.children) == (other.type, other.children)
+
+ def clone(self):
+ """Return a cloned (deep) copy of self."""
+ return Node(self.type, [ch.clone() for ch in self.children],
+ fixers_applied=self.fixers_applied)
+
+ def post_order(self):
+ """Return a post-order iterator for the tree."""
+ for child in self.children:
+ for node in child.post_order():
+ yield node
+ yield self
+
+ def pre_order(self):
+ """Return a pre-order iterator for the tree."""
+ yield self
+ for child in self.children:
+ for node in child.pre_order():
+ yield node
+
+ def _prefix_getter(self):
+ """
+ The whitespace and comments preceding this node in the input.
+ """
+ if not self.children:
+ return ""
+ return self.children[0].prefix
+
+ def _prefix_setter(self, prefix):
+ if self.children:
+ self.children[0].prefix = prefix
+
+ prefix = property(_prefix_getter, _prefix_setter)
+
+ def set_child(self, i, child):
+ """
+ Equivalent to 'node.children[i] = child'. This method also sets the
+ child's parent attribute appropriately.
+ """
+ child.parent = self
+ self.children[i].parent = None
+ self.children[i] = child
+ self.changed()
+
+ def insert_child(self, i, child):
+ """
+ Equivalent to 'node.children.insert(i, child)'. This method also sets
+ the child's parent attribute appropriately.
+ """
+ child.parent = self
+ self.children.insert(i, child)
+ self.changed()
+
+ def append_child(self, child):
+ """
+ Equivalent to 'node.children.append(child)'. This method also sets the
+ child's parent attribute appropriately.
+ """
+ child.parent = self
+ self.children.append(child)
+ self.changed()
+
+
+class Leaf(Base):
+
+ """Concrete implementation for leaf nodes."""
+
+ # Default values for instance variables
+ _prefix = "" # Whitespace and comments preceding this token in the input
+ lineno = 0 # Line where this token starts in the input
+ column = 0 # Column where this token tarts in the input
+
+ def __init__(self, type, value,
+ context=None,
+ prefix=None,
+ fixers_applied=[]):
+ """
+ Initializer.
+
+ Takes a type constant (a token number < 256), a string value, and an
+ optional context keyword argument.
+ """
+ assert 0 <= type < 256, type
+ if context is not None:
+ self._prefix, (self.lineno, self.column) = context
+ self.type = type
+ self.value = value
+ if prefix is not None:
+ self._prefix = prefix
+ self.fixers_applied = fixers_applied[:]
+
+ def __repr__(self):
+ """Return a canonical string representation."""
+ return "%s(%r, %r)" % (self.__class__.__name__,
+ self.type,
+ self.value)
+
+ def __unicode__(self):
+ """
+ Return a pretty string representation.
+
+ This reproduces the input source exactly.
+ """
+ return self.prefix + unicode(self.value)
+
+ if sys.version_info > (3, 0):
+ __str__ = __unicode__
+
+ def _eq(self, other):
+ """Compare two nodes for equality."""
+ return (self.type, self.value) == (other.type, other.value)
+
+ def clone(self):
+ """Return a cloned (deep) copy of self."""
+ return Leaf(self.type, self.value,
+ (self.prefix, (self.lineno, self.column)),
+ fixers_applied=self.fixers_applied)
+
+ def leaves(self):
+ yield self
+
+ def post_order(self):
+ """Return a post-order iterator for the tree."""
+ yield self
+
+ def pre_order(self):
+ """Return a pre-order iterator for the tree."""
+ yield self
+
+ def _prefix_getter(self):
+ """
+ The whitespace and comments preceding this token in the input.
+ """
+ return self._prefix
+
+ def _prefix_setter(self, prefix):
+ self.changed()
+ self._prefix = prefix
+
+ prefix = property(_prefix_getter, _prefix_setter)
+
+def convert(gr, raw_node):
+ """
+ Convert raw node information to a Node or Leaf instance.
+
+ This is passed to the parser driver which calls it whenever a reduction of a
+ grammar rule produces a new complete node, so that the tree is build
+ strictly bottom-up.
+ """
+ type, value, context, children = raw_node
+ if children or type in gr.number2symbol:
+ # If there's exactly one child, return that child instead of
+ # creating a new node.
+ if len(children) == 1:
+ return children[0]
+ return Node(type, children, context=context)
+ else:
+ return Leaf(type, value, context=context)
+
+
+class BasePattern(object):
+
+ """
+ A pattern is a tree matching pattern.
+
+ It looks for a specific node type (token or symbol), and
+ optionally for a specific content.
+
+ This is an abstract base class. There are three concrete
+ subclasses:
+
+ - LeafPattern matches a single leaf node;
+ - NodePattern matches a single node (usually non-leaf);
+ - WildcardPattern matches a sequence of nodes of variable length.
+ """
+
+ # Defaults for instance variables
+ type = None # Node type (token if < 256, symbol if >= 256)
+ content = None # Optional content matching pattern
+ name = None # Optional name used to store match in results dict
+
+ def __new__(cls, *args, **kwds):
+ """Constructor that prevents BasePattern from being instantiated."""
+ assert cls is not BasePattern, "Cannot instantiate BasePattern"
+ return object.__new__(cls)
+
+ def __repr__(self):
+ args = [type_repr(self.type), self.content, self.name]
+ while args and args[-1] is None:
+ del args[-1]
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
+
+ def optimize(self):
+ """
+ A subclass can define this as a hook for optimizations.
+
+ Returns either self or another node with the same effect.
+ """
+ return self
+
+ def match(self, node, results=None):
+ """
+ Does this pattern exactly match a node?
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ Default implementation for non-wildcard patterns.
+ """
+ if self.type is not None and node.type != self.type:
+ return False
+ if self.content is not None:
+ r = None
+ if results is not None:
+ r = {}
+ if not self._submatch(node, r):
+ return False
+ if r:
+ results.update(r)
+ if results is not None and self.name:
+ results[self.name] = node
+ return True
+
+ def match_seq(self, nodes, results=None):
+ """
+ Does this pattern exactly match a sequence of nodes?
+
+ Default implementation for non-wildcard patterns.
+ """
+ if len(nodes) != 1:
+ return False
+ return self.match(nodes[0], results)
+
+ def generate_matches(self, nodes):
+ """
+ Generator yielding all matches for this pattern.
+
+ Default implementation for non-wildcard patterns.
+ """
+ r = {}
+ if nodes and self.match(nodes[0], r):
+ yield 1, r
+
+
+class LeafPattern(BasePattern):
+
+ def __init__(self, type=None, content=None, name=None):
+ """
+ Initializer. Takes optional type, content, and name.
+
+ The type, if given must be a token type (< 256). If not given,
+ this matches any *leaf* node; the content may still be required.
+
+ The content, if given, must be a string.
+
+ If a name is given, the matching node is stored in the results
+ dict under that key.
+ """
+ if type is not None:
+ assert 0 <= type < 256, type
+ if content is not None:
+ assert isinstance(content, basestring), repr(content)
+ self.type = type
+ self.content = content
+ self.name = name
+
+ def match(self, node, results=None):
+ """Override match() to insist on a leaf node."""
+ if not isinstance(node, Leaf):
+ return False
+ return BasePattern.match(self, node, results)
+
+ def _submatch(self, node, results=None):
+ """
+ Match the pattern's content to the node's children.
+
+ This assumes the node type matches and self.content is not None.
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ When returning False, the results dict may still be updated.
+ """
+ return self.content == node.value
+
+
+class NodePattern(BasePattern):
+
+ wildcards = False
+
+ def __init__(self, type=None, content=None, name=None):
+ """
+ Initializer. Takes optional type, content, and name.
+
+ The type, if given, must be a symbol type (>= 256). If the
+ type is None this matches *any* single node (leaf or not),
+ except if content is not None, in which it only matches
+ non-leaf nodes that also match the content pattern.
+
+ The content, if not None, must be a sequence of Patterns that
+ must match the node's children exactly. If the content is
+ given, the type must not be None.
+
+ If a name is given, the matching node is stored in the results
+ dict under that key.
+ """
+ if type is not None:
+ assert type >= 256, type
+ if content is not None:
+ assert not isinstance(content, basestring), repr(content)
+ content = list(content)
+ for i, item in enumerate(content):
+ assert isinstance(item, BasePattern), (i, item)
+ if isinstance(item, WildcardPattern):
+ self.wildcards = True
+ self.type = type
+ self.content = content
+ self.name = name
+
+ def _submatch(self, node, results=None):
+ """
+ Match the pattern's content to the node's children.
+
+ This assumes the node type matches and self.content is not None.
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ When returning False, the results dict may still be updated.
+ """
+ if self.wildcards:
+ for c, r in generate_matches(self.content, node.children):
+ if c == len(node.children):
+ if results is not None:
+ results.update(r)
+ return True
+ return False
+ if len(self.content) != len(node.children):
+ return False
+ for subpattern, child in zip(self.content, node.children):
+ if not subpattern.match(child, results):
+ return False
+ return True
+
+
+class WildcardPattern(BasePattern):
+
+ """
+ A wildcard pattern can match zero or more nodes.
+
+ This has all the flexibility needed to implement patterns like:
+
+ .* .+ .? .{m,n}
+ (a b c | d e | f)
+ (...)* (...)+ (...)? (...){m,n}
+
+ except it always uses non-greedy matching.
+ """
+
+ def __init__(self, content=None, min=0, max=HUGE, name=None):
+ """
+ Initializer.
+
+ Args:
+ content: optional sequence of subsequences of patterns;
+ if absent, matches one node;
+ if present, each subsequence is an alternative [*]
+ min: optional minimum number of times to match, default 0
+ max: optional maximum number of times to match, default HUGE
+ name: optional name assigned to this match
+
+ [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
+ equivalent to (a b c | d e | f g h); if content is None,
+ this is equivalent to '.' in regular expression terms.
+ The min and max parameters work as follows:
+ min=0, max=maxint: .*
+ min=1, max=maxint: .+
+ min=0, max=1: .?
+ min=1, max=1: .
+ If content is not None, replace the dot with the parenthesized
+ list of alternatives, e.g. (a b c | d e | f g h)*
+ """
+ assert 0 <= min <= max <= HUGE, (min, max)
+ if content is not None:
+ content = tuple(map(tuple, content)) # Protect against alterations
+ # Check sanity of alternatives
+ assert len(content), repr(content) # Can't have zero alternatives
+ for alt in content:
+ assert len(alt), repr(alt) # Can have empty alternatives
+ self.content = content
+ self.min = min
+ self.max = max
+ self.name = name
+
+ def optimize(self):
+ """Optimize certain stacked wildcard patterns."""
+ subpattern = None
+ if (self.content is not None and
+ len(self.content) == 1 and len(self.content[0]) == 1):
+ subpattern = self.content[0][0]
+ if self.min == 1 and self.max == 1:
+ if self.content is None:
+ return NodePattern(name=self.name)
+ if subpattern is not None and self.name == subpattern.name:
+ return subpattern.optimize()
+ if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
+ subpattern.min <= 1 and self.name == subpattern.name):
+ return WildcardPattern(subpattern.content,
+ self.min*subpattern.min,
+ self.max*subpattern.max,
+ subpattern.name)
+ return self
+
+ def match(self, node, results=None):
+ """Does this pattern exactly match a node?"""
+ return self.match_seq([node], results)
+
+ def match_seq(self, nodes, results=None):
+ """Does this pattern exactly match a sequence of nodes?"""
+ for c, r in self.generate_matches(nodes):
+ if c == len(nodes):
+ if results is not None:
+ results.update(r)
+ if self.name:
+ results[self.name] = list(nodes)
+ return True
+ return False
+
+ def generate_matches(self, nodes):
+ """
+ Generator yielding matches for a sequence of nodes.
+
+ Args:
+ nodes: sequence of nodes
+
+ Yields:
+ (count, results) tuples where:
+ count: the match comprises nodes[:count];
+ results: dict containing named submatches.
+ """
+ if self.content is None:
+ # Shortcut for special case (see __init__.__doc__)
+ for count in xrange(self.min, 1 + min(len(nodes), self.max)):
+ r = {}
+ if self.name:
+ r[self.name] = nodes[:count]
+ yield count, r
+ elif self.name == "bare_name":
+ yield self._bare_name_matches(nodes)
+ else:
+ # The reason for this is that hitting the recursion limit usually
+ # results in some ugly messages about how RuntimeErrors are being
+ # ignored. We don't do this on non-CPython implementation because
+ # they don't have this problem.
+ if hasattr(sys, "getrefcount"):
+ save_stderr = sys.stderr
+ sys.stderr = StringIO()
+ try:
+ for count, r in self._recursive_matches(nodes, 0):
+ if self.name:
+ r[self.name] = nodes[:count]
+ yield count, r
+ except RuntimeError:
+ # We fall back to the iterative pattern matching scheme if the recursive
+ # scheme hits the recursion limit.
+ for count, r in self._iterative_matches(nodes):
+ if self.name:
+ r[self.name] = nodes[:count]
+ yield count, r
+ finally:
+ if hasattr(sys, "getrefcount"):
+ sys.stderr = save_stderr
+
+ def _iterative_matches(self, nodes):
+ """Helper to iteratively yield the matches."""
+ nodelen = len(nodes)
+ if 0 >= self.min:
+ yield 0, {}
+
+ results = []
+ # generate matches that use just one alt from self.content
+ for alt in self.content:
+ for c, r in generate_matches(alt, nodes):
+ yield c, r
+ results.append((c, r))
+
+ # for each match, iterate down the nodes
+ while results:
+ new_results = []
+ for c0, r0 in results:
+ # stop if the entire set of nodes has been matched
+ if c0 < nodelen and c0 <= self.max:
+ for alt in self.content:
+ for c1, r1 in generate_matches(alt, nodes[c0:]):
+ if c1 > 0:
+ r = {}
+ r.update(r0)
+ r.update(r1)
+ yield c0 + c1, r
+ new_results.append((c0 + c1, r))
+ results = new_results
+
+ def _bare_name_matches(self, nodes):
+ """Special optimized matcher for bare_name."""
+ count = 0
+ r = {}
+ done = False
+ max = len(nodes)
+ while not done and count < max:
+ done = True
+ for leaf in self.content:
+ if leaf[0].match(nodes[count], r):
+ count += 1
+ done = False
+ break
+ r[self.name] = nodes[:count]
+ return count, r
+
+ def _recursive_matches(self, nodes, count):
+ """Helper to recursively yield the matches."""
+ assert self.content is not None
+ if count >= self.min:
+ yield 0, {}
+ if count < self.max:
+ for alt in self.content:
+ for c0, r0 in generate_matches(alt, nodes):
+ for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
+ r = {}
+ r.update(r0)
+ r.update(r1)
+ yield c0 + c1, r
+
+
+class NegatedPattern(BasePattern):
+
+ def __init__(self, content=None):
+ """
+ Initializer.
+
+ The argument is either a pattern or None. If it is None, this
+ only matches an empty sequence (effectively '$' in regex
+ lingo). If it is not None, this matches whenever the argument
+ pattern doesn't have any matches.
+ """
+ if content is not None:
+ assert isinstance(content, BasePattern), repr(content)
+ self.content = content
+
+ def match(self, node):
+ # We never match a node in its entirety
+ return False
+
+ def match_seq(self, nodes):
+ # We only match an empty sequence of nodes in its entirety
+ return len(nodes) == 0
+
+ def generate_matches(self, nodes):
+ if self.content is None:
+ # Return a match if there is an empty sequence
+ if len(nodes) == 0:
+ yield 0, {}
+ else:
+ # Return a match if the argument pattern has no matches
+ for c, r in self.content.generate_matches(nodes):
+ return
+ yield 0, {}
+
+
+def generate_matches(patterns, nodes):
+ """
+ Generator yielding matches for a sequence of patterns and nodes.
+
+ Args:
+ patterns: a sequence of patterns
+ nodes: a sequence of nodes
+
+ Yields:
+ (count, results) tuples where:
+ count: the entire sequence of patterns matches nodes[:count];
+ results: dict containing named submatches.
+ """
+ if not patterns:
+ yield 0, {}
+ else:
+ p, rest = patterns[0], patterns[1:]
+ for c0, r0 in p.generate_matches(nodes):
+ if not rest:
+ yield c0, r0
+ else:
+ for c1, r1 in generate_matches(rest, nodes[c0:]):
+ r = {}
+ r.update(r0)
+ r.update(r1)
+ yield c0 + c1, r
diff --git a/lib/python2.7/lib2to3/refactor.py b/lib/python2.7/lib2to3/refactor.py
new file mode 100644
index 0000000..98386c5
--- /dev/null
+++ b/lib/python2.7/lib2to3/refactor.py
@@ -0,0 +1,747 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Refactoring framework.
+
+Used as a main program, this can refactor any number of files and/or
+recursively descend down directories. Imported as a module, this
+provides infrastructure to write your own refactoring tool.
+"""
+
+from __future__ import with_statement
+
+__author__ = "Guido van Rossum <guido@python.org>"
+
+
+# Python imports
+import os
+import sys
+import logging
+import operator
+import collections
+import StringIO
+from itertools import chain
+
+# Local imports
+from .pgen2 import driver, tokenize, token
+from .fixer_util import find_root
+from . import pytree, pygram
+from . import btm_utils as bu
+from . import btm_matcher as bm
+
+
+def get_all_fix_names(fixer_pkg, remove_prefix=True):
+ """Return a sorted list of all available fix names in the given package."""
+ pkg = __import__(fixer_pkg, [], [], ["*"])
+ fixer_dir = os.path.dirname(pkg.__file__)
+ fix_names = []
+ for name in sorted(os.listdir(fixer_dir)):
+ if name.startswith("fix_") and name.endswith(".py"):
+ if remove_prefix:
+ name = name[4:]
+ fix_names.append(name[:-3])
+ return fix_names
+
+
+class _EveryNode(Exception):
+ pass
+
+
+def _get_head_types(pat):
+ """ Accepts a pytree Pattern Node and returns a set
+ of the pattern types which will match first. """
+
+ if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
+ # NodePatters must either have no type and no content
+ # or a type and content -- so they don't get any farther
+ # Always return leafs
+ if pat.type is None:
+ raise _EveryNode
+ return set([pat.type])
+
+ if isinstance(pat, pytree.NegatedPattern):
+ if pat.content:
+ return _get_head_types(pat.content)
+ raise _EveryNode # Negated Patterns don't have a type
+
+ if isinstance(pat, pytree.WildcardPattern):
+ # Recurse on each node in content
+ r = set()
+ for p in pat.content:
+ for x in p:
+ r.update(_get_head_types(x))
+ return r
+
+ raise Exception("Oh no! I don't understand pattern %s" %(pat))
+
+
+def _get_headnode_dict(fixer_list):
+ """ Accepts a list of fixers and returns a dictionary
+ of head node type --> fixer list. """
+ head_nodes = collections.defaultdict(list)
+ every = []
+ for fixer in fixer_list:
+ if fixer.pattern:
+ try:
+ heads = _get_head_types(fixer.pattern)
+ except _EveryNode:
+ every.append(fixer)
+ else:
+ for node_type in heads:
+ head_nodes[node_type].append(fixer)
+ else:
+ if fixer._accept_type is not None:
+ head_nodes[fixer._accept_type].append(fixer)
+ else:
+ every.append(fixer)
+ for node_type in chain(pygram.python_grammar.symbol2number.itervalues(),
+ pygram.python_grammar.tokens):
+ head_nodes[node_type].extend(every)
+ return dict(head_nodes)
+
+
+def get_fixers_from_package(pkg_name):
+ """
+ Return the fully qualified names for fixers in the package pkg_name.
+ """
+ return [pkg_name + "." + fix_name
+ for fix_name in get_all_fix_names(pkg_name, False)]
+
+def _identity(obj):
+ return obj
+
+if sys.version_info < (3, 0):
+ import codecs
+ _open_with_encoding = codecs.open
+ # codecs.open doesn't translate newlines sadly.
+ def _from_system_newlines(input):
+ return input.replace(u"\r\n", u"\n")
+ def _to_system_newlines(input):
+ if os.linesep != "\n":
+ return input.replace(u"\n", os.linesep)
+ else:
+ return input
+else:
+ _open_with_encoding = open
+ _from_system_newlines = _identity
+ _to_system_newlines = _identity
+
+
+def _detect_future_features(source):
+ have_docstring = False
+ gen = tokenize.generate_tokens(StringIO.StringIO(source).readline)
+ def advance():
+ tok = gen.next()
+ return tok[0], tok[1]
+ ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT))
+ features = set()
+ try:
+ while True:
+ tp, value = advance()
+ if tp in ignore:
+ continue
+ elif tp == token.STRING:
+ if have_docstring:
+ break
+ have_docstring = True
+ elif tp == token.NAME and value == u"from":
+ tp, value = advance()
+ if tp != token.NAME or value != u"__future__":
+ break
+ tp, value = advance()
+ if tp != token.NAME or value != u"import":
+ break
+ tp, value = advance()
+ if tp == token.OP and value == u"(":
+ tp, value = advance()
+ while tp == token.NAME:
+ features.add(value)
+ tp, value = advance()
+ if tp != token.OP or value != u",":
+ break
+ tp, value = advance()
+ else:
+ break
+ except StopIteration:
+ pass
+ return frozenset(features)
+
+
+class FixerError(Exception):
+ """A fixer could not be loaded."""
+
+
+class RefactoringTool(object):
+
+ _default_options = {"print_function" : False,
+ "write_unchanged_files" : False}
+
+ CLASS_PREFIX = "Fix" # The prefix for fixer classes
+ FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
+
+ def __init__(self, fixer_names, options=None, explicit=None):
+ """Initializer.
+
+ Args:
+ fixer_names: a list of fixers to import
+ options: a dict with configuration.
+ explicit: a list of fixers to run even if they are explicit.
+ """
+ self.fixers = fixer_names
+ self.explicit = explicit or []
+ self.options = self._default_options.copy()
+ if options is not None:
+ self.options.update(options)
+ if self.options["print_function"]:
+ self.grammar = pygram.python_grammar_no_print_statement
+ else:
+ self.grammar = pygram.python_grammar
+ # When this is True, the refactor*() methods will call write_file() for
+ # files processed even if they were not changed during refactoring. If
+ # and only if the refactor method's write parameter was True.
+ self.write_unchanged_files = self.options.get("write_unchanged_files")
+ self.errors = []
+ self.logger = logging.getLogger("RefactoringTool")
+ self.fixer_log = []
+ self.wrote = False
+ self.driver = driver.Driver(self.grammar,
+ convert=pytree.convert,
+ logger=self.logger)
+ self.pre_order, self.post_order = self.get_fixers()
+
+
+ self.files = [] # List of files that were or should be modified
+
+ self.BM = bm.BottomMatcher()
+ self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
+ self.bmi_post_order = []
+
+ for fixer in chain(self.post_order, self.pre_order):
+ if fixer.BM_compatible:
+ self.BM.add_fixer(fixer)
+ # remove fixers that will be handled by the bottom-up
+ # matcher
+ elif fixer in self.pre_order:
+ self.bmi_pre_order.append(fixer)
+ elif fixer in self.post_order:
+ self.bmi_post_order.append(fixer)
+
+ self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
+ self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
+
+
+
+ def get_fixers(self):
+ """Inspects the options to load the requested patterns and handlers.
+
+ Returns:
+ (pre_order, post_order), where pre_order is the list of fixers that
+ want a pre-order AST traversal, and post_order is the list that want
+ post-order traversal.
+ """
+ pre_order_fixers = []
+ post_order_fixers = []
+ for fix_mod_path in self.fixers:
+ mod = __import__(fix_mod_path, {}, {}, ["*"])
+ fix_name = fix_mod_path.rsplit(".", 1)[-1]
+ if fix_name.startswith(self.FILE_PREFIX):
+ fix_name = fix_name[len(self.FILE_PREFIX):]
+ parts = fix_name.split("_")
+ class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
+ try:
+ fix_class = getattr(mod, class_name)
+ except AttributeError:
+ raise FixerError("Can't find %s.%s" % (fix_name, class_name))
+ fixer = fix_class(self.options, self.fixer_log)
+ if fixer.explicit and self.explicit is not True and \
+ fix_mod_path not in self.explicit:
+ self.log_message("Skipping optional fixer: %s", fix_name)
+ continue
+
+ self.log_debug("Adding transformation: %s", fix_name)
+ if fixer.order == "pre":
+ pre_order_fixers.append(fixer)
+ elif fixer.order == "post":
+ post_order_fixers.append(fixer)
+ else:
+ raise FixerError("Illegal fixer order: %r" % fixer.order)
+
+ key_func = operator.attrgetter("run_order")
+ pre_order_fixers.sort(key=key_func)
+ post_order_fixers.sort(key=key_func)
+ return (pre_order_fixers, post_order_fixers)
+
+ def log_error(self, msg, *args, **kwds):
+ """Called when an error occurs."""
+ raise
+
+ def log_message(self, msg, *args):
+ """Hook to log a message."""
+ if args:
+ msg = msg % args
+ self.logger.info(msg)
+
+ def log_debug(self, msg, *args):
+ if args:
+ msg = msg % args
+ self.logger.debug(msg)
+
+ def print_output(self, old_text, new_text, filename, equal):
+ """Called with the old version, new version, and filename of a
+ refactored file."""
+ pass
+
+ def refactor(self, items, write=False, doctests_only=False):
+ """Refactor a list of files and directories."""
+
+ for dir_or_file in items:
+ if os.path.isdir(dir_or_file):
+ self.refactor_dir(dir_or_file, write, doctests_only)
+ else:
+ self.refactor_file(dir_or_file, write, doctests_only)
+
+ def refactor_dir(self, dir_name, write=False, doctests_only=False):
+ """Descends down a directory and refactor every Python file found.
+
+ Python files are assumed to have a .py extension.
+
+ Files and subdirectories starting with '.' are skipped.
+ """
+ py_ext = os.extsep + "py"
+ for dirpath, dirnames, filenames in os.walk(dir_name):
+ self.log_debug("Descending into %s", dirpath)
+ dirnames.sort()
+ filenames.sort()
+ for name in filenames:
+ if (not name.startswith(".") and
+ os.path.splitext(name)[1] == py_ext):
+ fullname = os.path.join(dirpath, name)
+ self.refactor_file(fullname, write, doctests_only)
+ # Modify dirnames in-place to remove subdirs with leading dots
+ dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
+
+ def _read_python_source(self, filename):
+ """
+ Do our best to decode a Python source file correctly.
+ """
+ try:
+ f = open(filename, "rb")
+ except IOError as err:
+ self.log_error("Can't open %s: %s", filename, err)
+ return None, None
+ try:
+ encoding = tokenize.detect_encoding(f.readline)[0]
+ finally:
+ f.close()
+ with _open_with_encoding(filename, "r", encoding=encoding) as f:
+ return _from_system_newlines(f.read()), encoding
+
+ def refactor_file(self, filename, write=False, doctests_only=False):
+ """Refactors a file."""
+ input, encoding = self._read_python_source(filename)
+ if input is None:
+ # Reading the file failed.
+ return
+ input += u"\n" # Silence certain parse errors
+ if doctests_only:
+ self.log_debug("Refactoring doctests in %s", filename)
+ output = self.refactor_docstring(input, filename)
+ if self.write_unchanged_files or output != input:
+ self.processed_file(output, filename, input, write, encoding)
+ else:
+ self.log_debug("No doctest changes in %s", filename)
+ else:
+ tree = self.refactor_string(input, filename)
+ if self.write_unchanged_files or (tree and tree.was_changed):
+ # The [:-1] is to take off the \n we added earlier
+ self.processed_file(unicode(tree)[:-1], filename,
+ write=write, encoding=encoding)
+ else:
+ self.log_debug("No changes in %s", filename)
+
+ def refactor_string(self, data, name):
+ """Refactor a given input string.
+
+ Args:
+ data: a string holding the code to be refactored.
+ name: a human-readable name for use in error/log messages.
+
+ Returns:
+ An AST corresponding to the refactored input stream; None if
+ there were errors during the parse.
+ """
+ features = _detect_future_features(data)
+ if "print_function" in features:
+ self.driver.grammar = pygram.python_grammar_no_print_statement
+ try:
+ tree = self.driver.parse_string(data)
+ except Exception as err:
+ self.log_error("Can't parse %s: %s: %s",
+ name, err.__class__.__name__, err)
+ return
+ finally:
+ self.driver.grammar = self.grammar
+ tree.future_features = features
+ self.log_debug("Refactoring %s", name)
+ self.refactor_tree(tree, name)
+ return tree
+
+ def refactor_stdin(self, doctests_only=False):
+ input = sys.stdin.read()
+ if doctests_only:
+ self.log_debug("Refactoring doctests in stdin")
+ output = self.refactor_docstring(input, "<stdin>")
+ if self.write_unchanged_files or output != input:
+ self.processed_file(output, "<stdin>", input)
+ else:
+ self.log_debug("No doctest changes in stdin")
+ else:
+ tree = self.refactor_string(input, "<stdin>")
+ if self.write_unchanged_files or (tree and tree.was_changed):
+ self.processed_file(unicode(tree), "<stdin>", input)
+ else:
+ self.log_debug("No changes in stdin")
+
+ def refactor_tree(self, tree, name):
+ """Refactors a parse tree (modifying the tree in place).
+
+ For compatible patterns the bottom matcher module is
+ used. Otherwise the tree is traversed node-to-node for
+ matches.
+
+ Args:
+ tree: a pytree.Node instance representing the root of the tree
+ to be refactored.
+ name: a human-readable name for this tree.
+
+ Returns:
+ True if the tree was modified, False otherwise.
+ """
+
+ for fixer in chain(self.pre_order, self.post_order):
+ fixer.start_tree(tree, name)
+
+ #use traditional matching for the incompatible fixers
+ self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
+ self.traverse_by(self.bmi_post_order_heads, tree.post_order())
+
+ # obtain a set of candidate nodes
+ match_set = self.BM.run(tree.leaves())
+
+ while any(match_set.values()):
+ for fixer in self.BM.fixers:
+ if fixer in match_set and match_set[fixer]:
+ #sort by depth; apply fixers from bottom(of the AST) to top
+ match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
+
+ if fixer.keep_line_order:
+ #some fixers(eg fix_imports) must be applied
+ #with the original file's line order
+ match_set[fixer].sort(key=pytree.Base.get_lineno)
+
+ for node in list(match_set[fixer]):
+ if node in match_set[fixer]:
+ match_set[fixer].remove(node)
+
+ try:
+ find_root(node)
+ except ValueError:
+ # this node has been cut off from a
+ # previous transformation ; skip
+ continue
+
+ if node.fixers_applied and fixer in node.fixers_applied:
+ # do not apply the same fixer again
+ continue
+
+ results = fixer.match(node)
+
+ if results:
+ new = fixer.transform(node, results)
+ if new is not None:
+ node.replace(new)
+ #new.fixers_applied.append(fixer)
+ for node in new.post_order():
+ # do not apply the fixer again to
+ # this or any subnode
+ if not node.fixers_applied:
+ node.fixers_applied = []
+ node.fixers_applied.append(fixer)
+
+ # update the original match set for
+ # the added code
+ new_matches = self.BM.run(new.leaves())
+ for fxr in new_matches:
+ if not fxr in match_set:
+ match_set[fxr]=[]
+
+ match_set[fxr].extend(new_matches[fxr])
+
+ for fixer in chain(self.pre_order, self.post_order):
+ fixer.finish_tree(tree, name)
+ return tree.was_changed
+
+ def traverse_by(self, fixers, traversal):
+ """Traverse an AST, applying a set of fixers to each node.
+
+ This is a helper method for refactor_tree().
+
+ Args:
+ fixers: a list of fixer instances.
+ traversal: a generator that yields AST nodes.
+
+ Returns:
+ None
+ """
+ if not fixers:
+ return
+ for node in traversal:
+ for fixer in fixers[node.type]:
+ results = fixer.match(node)
+ if results:
+ new = fixer.transform(node, results)
+ if new is not None:
+ node.replace(new)
+ node = new
+
+ def processed_file(self, new_text, filename, old_text=None, write=False,
+ encoding=None):
+ """
+ Called when a file has been refactored and there may be changes.
+ """
+ self.files.append(filename)
+ if old_text is None:
+ old_text = self._read_python_source(filename)[0]
+ if old_text is None:
+ return
+ equal = old_text == new_text
+ self.print_output(old_text, new_text, filename, equal)
+ if equal:
+ self.log_debug("No changes to %s", filename)
+ if not self.write_unchanged_files:
+ return
+ if write:
+ self.write_file(new_text, filename, old_text, encoding)
+ else:
+ self.log_debug("Not writing changes to %s", filename)
+
+ def write_file(self, new_text, filename, old_text, encoding=None):
+ """Writes a string to a file.
+
+ It first shows a unified diff between the old text and the new text, and
+ then rewrites the file; the latter is only done if the write option is
+ set.
+ """
+ try:
+ f = _open_with_encoding(filename, "w", encoding=encoding)
+ except os.error as err:
+ self.log_error("Can't create %s: %s", filename, err)
+ return
+ try:
+ f.write(_to_system_newlines(new_text))
+ except os.error as err:
+ self.log_error("Can't write %s: %s", filename, err)
+ finally:
+ f.close()
+ self.log_debug("Wrote changes to %s", filename)
+ self.wrote = True
+
+ PS1 = ">>> "
+ PS2 = "... "
+
+ def refactor_docstring(self, input, filename):
+ """Refactors a docstring, looking for doctests.
+
+ This returns a modified version of the input string. It looks
+ for doctests, which start with a ">>>" prompt, and may be
+ continued with "..." prompts, as long as the "..." is indented
+ the same as the ">>>".
+
+ (Unfortunately we can't use the doctest module's parser,
+ since, like most parsers, it is not geared towards preserving
+ the original source.)
+ """
+ result = []
+ block = None
+ block_lineno = None
+ indent = None
+ lineno = 0
+ for line in input.splitlines(True):
+ lineno += 1
+ if line.lstrip().startswith(self.PS1):
+ if block is not None:
+ result.extend(self.refactor_doctest(block, block_lineno,
+ indent, filename))
+ block_lineno = lineno
+ block = [line]
+ i = line.find(self.PS1)
+ indent = line[:i]
+ elif (indent is not None and
+ (line.startswith(indent + self.PS2) or
+ line == indent + self.PS2.rstrip() + u"\n")):
+ block.append(line)
+ else:
+ if block is not None:
+ result.extend(self.refactor_doctest(block, block_lineno,
+ indent, filename))
+ block = None
+ indent = None
+ result.append(line)
+ if block is not None:
+ result.extend(self.refactor_doctest(block, block_lineno,
+ indent, filename))
+ return u"".join(result)
+
+ def refactor_doctest(self, block, lineno, indent, filename):
+ """Refactors one doctest.
+
+ A doctest is given as a block of lines, the first of which starts
+ with ">>>" (possibly indented), while the remaining lines start
+ with "..." (identically indented).
+
+ """
+ try:
+ tree = self.parse_block(block, lineno, indent)
+ except Exception as err:
+ if self.logger.isEnabledFor(logging.DEBUG):
+ for line in block:
+ self.log_debug("Source: %s", line.rstrip(u"\n"))
+ self.log_error("Can't parse docstring in %s line %s: %s: %s",
+ filename, lineno, err.__class__.__name__, err)
+ return block
+ if self.refactor_tree(tree, filename):
+ new = unicode(tree).splitlines(True)
+ # Undo the adjustment of the line numbers in wrap_toks() below.
+ clipped, new = new[:lineno-1], new[lineno-1:]
+ assert clipped == [u"\n"] * (lineno-1), clipped
+ if not new[-1].endswith(u"\n"):
+ new[-1] += u"\n"
+ block = [indent + self.PS1 + new.pop(0)]
+ if new:
+ block += [indent + self.PS2 + line for line in new]
+ return block
+
+ def summarize(self):
+ if self.wrote:
+ were = "were"
+ else:
+ were = "need to be"
+ if not self.files:
+ self.log_message("No files %s modified.", were)
+ else:
+ self.log_message("Files that %s modified:", were)
+ for file in self.files:
+ self.log_message(file)
+ if self.fixer_log:
+ self.log_message("Warnings/messages while refactoring:")
+ for message in self.fixer_log:
+ self.log_message(message)
+ if self.errors:
+ if len(self.errors) == 1:
+ self.log_message("There was 1 error:")
+ else:
+ self.log_message("There were %d errors:", len(self.errors))
+ for msg, args, kwds in self.errors:
+ self.log_message(msg, *args, **kwds)
+
+ def parse_block(self, block, lineno, indent):
+ """Parses a block into a tree.
+
+ This is necessary to get correct line number / offset information
+ in the parser diagnostics and embedded into the parse tree.
+ """
+ tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
+ tree.future_features = frozenset()
+ return tree
+
+ def wrap_toks(self, block, lineno, indent):
+ """Wraps a tokenize stream to systematically modify start/end."""
+ tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
+ for type, value, (line0, col0), (line1, col1), line_text in tokens:
+ line0 += lineno - 1
+ line1 += lineno - 1
+ # Don't bother updating the columns; this is too complicated
+ # since line_text would also have to be updated and it would
+ # still break for tokens spanning lines. Let the user guess
+ # that the column numbers for doctests are relative to the
+ # end of the prompt string (PS1 or PS2).
+ yield type, value, (line0, col0), (line1, col1), line_text
+
+
+ def gen_lines(self, block, indent):
+ """Generates lines as expected by tokenize from a list of lines.
+
+ This strips the first len(indent + self.PS1) characters off each line.
+ """
+ prefix1 = indent + self.PS1
+ prefix2 = indent + self.PS2
+ prefix = prefix1
+ for line in block:
+ if line.startswith(prefix):
+ yield line[len(prefix):]
+ elif line == prefix.rstrip() + u"\n":
+ yield u"\n"
+ else:
+ raise AssertionError("line=%r, prefix=%r" % (line, prefix))
+ prefix = prefix2
+ while True:
+ yield ""
+
+
+class MultiprocessingUnsupported(Exception):
+ pass
+
+
+class MultiprocessRefactoringTool(RefactoringTool):
+
+ def __init__(self, *args, **kwargs):
+ super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
+ self.queue = None
+ self.output_lock = None
+
+ def refactor(self, items, write=False, doctests_only=False,
+ num_processes=1):
+ if num_processes == 1:
+ return super(MultiprocessRefactoringTool, self).refactor(
+ items, write, doctests_only)
+ try:
+ import multiprocessing
+ except ImportError:
+ raise MultiprocessingUnsupported
+ if self.queue is not None:
+ raise RuntimeError("already doing multiple processes")
+ self.queue = multiprocessing.JoinableQueue()
+ self.output_lock = multiprocessing.Lock()
+ processes = [multiprocessing.Process(target=self._child)
+ for i in xrange(num_processes)]
+ try:
+ for p in processes:
+ p.start()
+ super(MultiprocessRefactoringTool, self).refactor(items, write,
+ doctests_only)
+ finally:
+ self.queue.join()
+ for i in xrange(num_processes):
+ self.queue.put(None)
+ for p in processes:
+ if p.is_alive():
+ p.join()
+ self.queue = None
+
+ def _child(self):
+ task = self.queue.get()
+ while task is not None:
+ args, kwargs = task
+ try:
+ super(MultiprocessRefactoringTool, self).refactor_file(
+ *args, **kwargs)
+ finally:
+ self.queue.task_done()
+ task = self.queue.get()
+
+ def refactor_file(self, *args, **kwargs):
+ if self.queue is not None:
+ self.queue.put((args, kwargs))
+ else:
+ return super(MultiprocessRefactoringTool, self).refactor_file(
+ *args, **kwargs)
diff --git a/lib/python2.7/lib2to3/tests/__init__.py b/lib/python2.7/lib2to3/tests/__init__.py
new file mode 100644
index 0000000..cfaea0d
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/__init__.py
@@ -0,0 +1,24 @@
+"""Make tests/ into a package. This allows us to "import tests" and
+have tests.all_tests be a TestSuite representing all test cases
+from all test_*.py files in tests/."""
+# Author: Collin Winter
+
+import os
+import os.path
+import unittest
+import types
+
+from . import support
+
+all_tests = unittest.TestSuite()
+
+tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
+tests = [t[0:-3] for t in os.listdir(tests_dir)
+ if t.startswith('test_') and t.endswith('.py')]
+
+loader = unittest.TestLoader()
+
+for t in tests:
+ __import__("",globals(),locals(),[t],level=1)
+ mod = globals()[t]
+ all_tests.addTests(loader.loadTestsFromModule(mod))
diff --git a/lib/python2.7/lib2to3/tests/data/README b/lib/python2.7/lib2to3/tests/data/README
new file mode 100644
index 0000000..7aa47e4
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/README
@@ -0,0 +1,6 @@
+In this directory:
+- py2_test_grammar.py -- test file that exercises most/all of Python 2.x's grammar.
+- py3_test_grammar.py -- test file that exercises most/all of Python 3.x's grammar.
+- infinite_recursion.py -- test file that causes lib2to3's faster recursive pattern matching
+ scheme to fail, but passes when lib2to3 falls back to iterative pattern matching.
+- fixes/ -- for use by test_refactor.py
diff --git a/lib/python2.7/lib2to3/tests/data/bom.py b/lib/python2.7/lib2to3/tests/data/bom.py
new file mode 100644
index 0000000..9bc3975
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/bom.py
@@ -0,0 +1,2 @@
+# coding: utf-8
+print "BOM BOOM!"
diff --git a/lib/python2.7/lib2to3/tests/data/crlf.py b/lib/python2.7/lib2to3/tests/data/crlf.py
new file mode 100644
index 0000000..dbe2d7b
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/crlf.py
@@ -0,0 +1,3 @@
+print "hi"
+
+print "Like bad Windows newlines?"
diff --git a/lib/python2.7/lib2to3/tests/data/different_encoding.py b/lib/python2.7/lib2to3/tests/data/different_encoding.py
new file mode 100644
index 0000000..4dc1d7b
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/different_encoding.py
@@ -0,0 +1,6 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+print u'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ'
+
+def f(x):
+ print '%s\t-> α(%2i):%s β(%s)'
diff --git a/lib/python2.7/lib2to3/tests/data/false_encoding.py b/lib/python2.7/lib2to3/tests/data/false_encoding.py
new file mode 100644
index 0000000..6223b39
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/false_encoding.py
@@ -0,0 +1,2 @@
+#!/usr/bin/env python2
+print '#coding=0'
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/bad_order.py b/lib/python2.7/lib2to3/tests/data/fixers/bad_order.py
new file mode 100644
index 0000000..061bbf2
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/bad_order.py
@@ -0,0 +1,5 @@
+from lib2to3.fixer_base import BaseFix
+
+class FixBadOrder(BaseFix):
+
+ order = "crazy"
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/__init__.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/__init__.py
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_explicit.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
new file mode 100644
index 0000000..cbe16f6
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
@@ -0,0 +1,6 @@
+from lib2to3.fixer_base import BaseFix
+
+class FixExplicit(BaseFix):
+ explicit = True
+
+ def match(self): return False
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_first.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_first.py
new file mode 100644
index 0000000..a88821f
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_first.py
@@ -0,0 +1,6 @@
+from lib2to3.fixer_base import BaseFix
+
+class FixFirst(BaseFix):
+ run_order = 1
+
+ def match(self, node): return False
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_last.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_last.py
new file mode 100644
index 0000000..9a077d4
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_last.py
@@ -0,0 +1,7 @@
+from lib2to3.fixer_base import BaseFix
+
+class FixLast(BaseFix):
+
+ run_order = 10
+
+ def match(self, node): return False
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_parrot.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
new file mode 100644
index 0000000..6db79ad
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
@@ -0,0 +1,13 @@
+from lib2to3.fixer_base import BaseFix
+from lib2to3.fixer_util import Name
+
+class FixParrot(BaseFix):
+ """
+ Change functions named 'parrot' to 'cheese'.
+ """
+
+ PATTERN = """funcdef < 'def' name='parrot' any* >"""
+
+ def transform(self, node, results):
+ name = results["name"]
+ name.replace(Name("cheese", name.prefix))
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_preorder.py b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
new file mode 100644
index 0000000..b9bfbba
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
@@ -0,0 +1,6 @@
+from lib2to3.fixer_base import BaseFix
+
+class FixPreorder(BaseFix):
+ order = "pre"
+
+ def match(self, node): return False
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/no_fixer_cls.py b/lib/python2.7/lib2to3/tests/data/fixers/no_fixer_cls.py
new file mode 100644
index 0000000..506f794
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/no_fixer_cls.py
@@ -0,0 +1 @@
+# This is empty so trying to fetch the fixer class gives an AttributeError
diff --git a/lib/python2.7/lib2to3/tests/data/fixers/parrot_example.py b/lib/python2.7/lib2to3/tests/data/fixers/parrot_example.py
new file mode 100644
index 0000000..0852928
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/fixers/parrot_example.py
@@ -0,0 +1,2 @@
+def parrot():
+ pass
diff --git a/lib/python2.7/lib2to3/tests/data/infinite_recursion.py b/lib/python2.7/lib2to3/tests/data/infinite_recursion.py
new file mode 100644
index 0000000..71715ef
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/infinite_recursion.py
@@ -0,0 +1,2669 @@
+# This file is used to verify that 2to3 falls back to a slower, iterative pattern matching
+# scheme in the event that the faster recursive system fails due to infinite recursion.
+from ctypes import *
+STRING = c_char_p
+
+
+OSUnknownByteOrder = 0
+UIT_PROMPT = 1
+P_PGID = 2
+P_PID = 1
+UIT_ERROR = 5
+UIT_INFO = 4
+UIT_NONE = 0
+P_ALL = 0
+UIT_VERIFY = 2
+OSBigEndian = 2
+UIT_BOOLEAN = 3
+OSLittleEndian = 1
+__darwin_nl_item = c_int
+__darwin_wctrans_t = c_int
+__darwin_wctype_t = c_ulong
+__int8_t = c_byte
+__uint8_t = c_ubyte
+__int16_t = c_short
+__uint16_t = c_ushort
+__int32_t = c_int
+__uint32_t = c_uint
+__int64_t = c_longlong
+__uint64_t = c_ulonglong
+__darwin_intptr_t = c_long
+__darwin_natural_t = c_uint
+__darwin_ct_rune_t = c_int
+class __mbstate_t(Union):
+ pass
+__mbstate_t._pack_ = 4
+__mbstate_t._fields_ = [
+ ('__mbstate8', c_char * 128),
+ ('_mbstateL', c_longlong),
+]
+assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
+assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
+__darwin_mbstate_t = __mbstate_t
+__darwin_ptrdiff_t = c_int
+__darwin_size_t = c_ulong
+__darwin_va_list = STRING
+__darwin_wchar_t = c_int
+__darwin_rune_t = __darwin_wchar_t
+__darwin_wint_t = c_int
+__darwin_clock_t = c_ulong
+__darwin_socklen_t = __uint32_t
+__darwin_ssize_t = c_long
+__darwin_time_t = c_long
+sig_atomic_t = c_int
+class sigcontext(Structure):
+ pass
+sigcontext._fields_ = [
+ ('sc_onstack', c_int),
+ ('sc_mask', c_int),
+ ('sc_eax', c_uint),
+ ('sc_ebx', c_uint),
+ ('sc_ecx', c_uint),
+ ('sc_edx', c_uint),
+ ('sc_edi', c_uint),
+ ('sc_esi', c_uint),
+ ('sc_ebp', c_uint),
+ ('sc_esp', c_uint),
+ ('sc_ss', c_uint),
+ ('sc_eflags', c_uint),
+ ('sc_eip', c_uint),
+ ('sc_cs', c_uint),
+ ('sc_ds', c_uint),
+ ('sc_es', c_uint),
+ ('sc_fs', c_uint),
+ ('sc_gs', c_uint),
+]
+assert sizeof(sigcontext) == 72, sizeof(sigcontext)
+assert alignment(sigcontext) == 4, alignment(sigcontext)
+u_int8_t = c_ubyte
+u_int16_t = c_ushort
+u_int32_t = c_uint
+u_int64_t = c_ulonglong
+int32_t = c_int
+register_t = int32_t
+user_addr_t = u_int64_t
+user_size_t = u_int64_t
+int64_t = c_longlong
+user_ssize_t = int64_t
+user_long_t = int64_t
+user_ulong_t = u_int64_t
+user_time_t = int64_t
+syscall_arg_t = u_int64_t
+
+# values for unnamed enumeration
+class aes_key_st(Structure):
+ pass
+aes_key_st._fields_ = [
+ ('rd_key', c_ulong * 60),
+ ('rounds', c_int),
+]
+assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
+assert alignment(aes_key_st) == 4, alignment(aes_key_st)
+AES_KEY = aes_key_st
+class asn1_ctx_st(Structure):
+ pass
+asn1_ctx_st._fields_ = [
+ ('p', POINTER(c_ubyte)),
+ ('eos', c_int),
+ ('error', c_int),
+ ('inf', c_int),
+ ('tag', c_int),
+ ('xclass', c_int),
+ ('slen', c_long),
+ ('max', POINTER(c_ubyte)),
+ ('q', POINTER(c_ubyte)),
+ ('pp', POINTER(POINTER(c_ubyte))),
+ ('line', c_int),
+]
+assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
+assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
+ASN1_CTX = asn1_ctx_st
+class asn1_object_st(Structure):
+ pass
+asn1_object_st._fields_ = [
+ ('sn', STRING),
+ ('ln', STRING),
+ ('nid', c_int),
+ ('length', c_int),
+ ('data', POINTER(c_ubyte)),
+ ('flags', c_int),
+]
+assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
+assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
+ASN1_OBJECT = asn1_object_st
+class asn1_string_st(Structure):
+ pass
+asn1_string_st._fields_ = [
+ ('length', c_int),
+ ('type', c_int),
+ ('data', POINTER(c_ubyte)),
+ ('flags', c_long),
+]
+assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
+assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
+ASN1_STRING = asn1_string_st
+class ASN1_ENCODING_st(Structure):
+ pass
+ASN1_ENCODING_st._fields_ = [
+ ('enc', POINTER(c_ubyte)),
+ ('len', c_long),
+ ('modified', c_int),
+]
+assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
+assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
+ASN1_ENCODING = ASN1_ENCODING_st
+class asn1_string_table_st(Structure):
+ pass
+asn1_string_table_st._fields_ = [
+ ('nid', c_int),
+ ('minsize', c_long),
+ ('maxsize', c_long),
+ ('mask', c_ulong),
+ ('flags', c_ulong),
+]
+assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
+assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
+ASN1_STRING_TABLE = asn1_string_table_st
+class ASN1_TEMPLATE_st(Structure):
+ pass
+ASN1_TEMPLATE_st._fields_ = [
+]
+ASN1_TEMPLATE = ASN1_TEMPLATE_st
+class ASN1_ITEM_st(Structure):
+ pass
+ASN1_ITEM = ASN1_ITEM_st
+ASN1_ITEM_st._fields_ = [
+]
+class ASN1_TLC_st(Structure):
+ pass
+ASN1_TLC = ASN1_TLC_st
+ASN1_TLC_st._fields_ = [
+]
+class ASN1_VALUE_st(Structure):
+ pass
+ASN1_VALUE_st._fields_ = [
+]
+ASN1_VALUE = ASN1_VALUE_st
+ASN1_ITEM_EXP = ASN1_ITEM
+class asn1_type_st(Structure):
+ pass
+class N12asn1_type_st4DOLLAR_11E(Union):
+ pass
+ASN1_BOOLEAN = c_int
+ASN1_INTEGER = asn1_string_st
+ASN1_ENUMERATED = asn1_string_st
+ASN1_BIT_STRING = asn1_string_st
+ASN1_OCTET_STRING = asn1_string_st
+ASN1_PRINTABLESTRING = asn1_string_st
+ASN1_T61STRING = asn1_string_st
+ASN1_IA5STRING = asn1_string_st
+ASN1_GENERALSTRING = asn1_string_st
+ASN1_BMPSTRING = asn1_string_st
+ASN1_UNIVERSALSTRING = asn1_string_st
+ASN1_UTCTIME = asn1_string_st
+ASN1_GENERALIZEDTIME = asn1_string_st
+ASN1_VISIBLESTRING = asn1_string_st
+ASN1_UTF8STRING = asn1_string_st
+N12asn1_type_st4DOLLAR_11E._fields_ = [
+ ('ptr', STRING),
+ ('boolean', ASN1_BOOLEAN),
+ ('asn1_string', POINTER(ASN1_STRING)),
+ ('object', POINTER(ASN1_OBJECT)),
+ ('integer', POINTER(ASN1_INTEGER)),
+ ('enumerated', POINTER(ASN1_ENUMERATED)),
+ ('bit_string', POINTER(ASN1_BIT_STRING)),
+ ('octet_string', POINTER(ASN1_OCTET_STRING)),
+ ('printablestring', POINTER(ASN1_PRINTABLESTRING)),
+ ('t61string', POINTER(ASN1_T61STRING)),
+ ('ia5string', POINTER(ASN1_IA5STRING)),
+ ('generalstring', POINTER(ASN1_GENERALSTRING)),
+ ('bmpstring', POINTER(ASN1_BMPSTRING)),
+ ('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
+ ('utctime', POINTER(ASN1_UTCTIME)),
+ ('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
+ ('visiblestring', POINTER(ASN1_VISIBLESTRING)),
+ ('utf8string', POINTER(ASN1_UTF8STRING)),
+ ('set', POINTER(ASN1_STRING)),
+ ('sequence', POINTER(ASN1_STRING)),
+]
+assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
+assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
+asn1_type_st._fields_ = [
+ ('type', c_int),
+ ('value', N12asn1_type_st4DOLLAR_11E),
+]
+assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
+assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
+ASN1_TYPE = asn1_type_st
+class asn1_method_st(Structure):
+ pass
+asn1_method_st._fields_ = [
+ ('i2d', CFUNCTYPE(c_int)),
+ ('d2i', CFUNCTYPE(STRING)),
+ ('create', CFUNCTYPE(STRING)),
+ ('destroy', CFUNCTYPE(None)),
+]
+assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
+assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
+ASN1_METHOD = asn1_method_st
+class asn1_header_st(Structure):
+ pass
+asn1_header_st._fields_ = [
+ ('header', POINTER(ASN1_OCTET_STRING)),
+ ('data', STRING),
+ ('meth', POINTER(ASN1_METHOD)),
+]
+assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
+assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
+ASN1_HEADER = asn1_header_st
+class BIT_STRING_BITNAME_st(Structure):
+ pass
+BIT_STRING_BITNAME_st._fields_ = [
+ ('bitnum', c_int),
+ ('lname', STRING),
+ ('sname', STRING),
+]
+assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
+assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
+BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
+class bio_st(Structure):
+ pass
+BIO = bio_st
+bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
+class bio_method_st(Structure):
+ pass
+bio_method_st._fields_ = [
+ ('type', c_int),
+ ('name', STRING),
+ ('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+ ('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+ ('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
+ ('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+ ('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
+ ('create', CFUNCTYPE(c_int, POINTER(BIO))),
+ ('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
+ ('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
+]
+assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
+assert alignment(bio_method_st) == 4, alignment(bio_method_st)
+BIO_METHOD = bio_method_st
+class crypto_ex_data_st(Structure):
+ pass
+class stack_st(Structure):
+ pass
+STACK = stack_st
+crypto_ex_data_st._fields_ = [
+ ('sk', POINTER(STACK)),
+ ('dummy', c_int),
+]
+assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
+assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
+CRYPTO_EX_DATA = crypto_ex_data_st
+bio_st._fields_ = [
+ ('method', POINTER(BIO_METHOD)),
+ ('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
+ ('cb_arg', STRING),
+ ('init', c_int),
+ ('shutdown', c_int),
+ ('flags', c_int),
+ ('retry_reason', c_int),
+ ('num', c_int),
+ ('ptr', c_void_p),
+ ('next_bio', POINTER(bio_st)),
+ ('prev_bio', POINTER(bio_st)),
+ ('references', c_int),
+ ('num_read', c_ulong),
+ ('num_write', c_ulong),
+ ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(bio_st) == 64, sizeof(bio_st)
+assert alignment(bio_st) == 4, alignment(bio_st)
+class bio_f_buffer_ctx_struct(Structure):
+ pass
+bio_f_buffer_ctx_struct._fields_ = [
+ ('ibuf_size', c_int),
+ ('obuf_size', c_int),
+ ('ibuf', STRING),
+ ('ibuf_len', c_int),
+ ('ibuf_off', c_int),
+ ('obuf', STRING),
+ ('obuf_len', c_int),
+ ('obuf_off', c_int),
+]
+assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
+assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
+BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
+class hostent(Structure):
+ pass
+hostent._fields_ = [
+]
+class bf_key_st(Structure):
+ pass
+bf_key_st._fields_ = [
+ ('P', c_uint * 18),
+ ('S', c_uint * 1024),
+]
+assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
+assert alignment(bf_key_st) == 4, alignment(bf_key_st)
+BF_KEY = bf_key_st
+class bignum_st(Structure):
+ pass
+bignum_st._fields_ = [
+ ('d', POINTER(c_ulong)),
+ ('top', c_int),
+ ('dmax', c_int),
+ ('neg', c_int),
+ ('flags', c_int),
+]
+assert sizeof(bignum_st) == 20, sizeof(bignum_st)
+assert alignment(bignum_st) == 4, alignment(bignum_st)
+BIGNUM = bignum_st
+class bignum_ctx(Structure):
+ pass
+bignum_ctx._fields_ = [
+]
+BN_CTX = bignum_ctx
+class bn_blinding_st(Structure):
+ pass
+bn_blinding_st._fields_ = [
+ ('init', c_int),
+ ('A', POINTER(BIGNUM)),
+ ('Ai', POINTER(BIGNUM)),
+ ('mod', POINTER(BIGNUM)),
+ ('thread_id', c_ulong),
+]
+assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
+assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
+BN_BLINDING = bn_blinding_st
+class bn_mont_ctx_st(Structure):
+ pass
+bn_mont_ctx_st._fields_ = [
+ ('ri', c_int),
+ ('RR', BIGNUM),
+ ('N', BIGNUM),
+ ('Ni', BIGNUM),
+ ('n0', c_ulong),
+ ('flags', c_int),
+]
+assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
+assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
+BN_MONT_CTX = bn_mont_ctx_st
+class bn_recp_ctx_st(Structure):
+ pass
+bn_recp_ctx_st._fields_ = [
+ ('N', BIGNUM),
+ ('Nr', BIGNUM),
+ ('num_bits', c_int),
+ ('shift', c_int),
+ ('flags', c_int),
+]
+assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
+assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
+BN_RECP_CTX = bn_recp_ctx_st
+class buf_mem_st(Structure):
+ pass
+buf_mem_st._fields_ = [
+ ('length', c_int),
+ ('data', STRING),
+ ('max', c_int),
+]
+assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
+assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
+BUF_MEM = buf_mem_st
+class cast_key_st(Structure):
+ pass
+cast_key_st._fields_ = [
+ ('data', c_ulong * 32),
+ ('short_key', c_int),
+]
+assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
+assert alignment(cast_key_st) == 4, alignment(cast_key_st)
+CAST_KEY = cast_key_st
+class comp_method_st(Structure):
+ pass
+comp_method_st._fields_ = [
+ ('type', c_int),
+ ('name', STRING),
+ ('init', CFUNCTYPE(c_int)),
+ ('finish', CFUNCTYPE(None)),
+ ('compress', CFUNCTYPE(c_int)),
+ ('expand', CFUNCTYPE(c_int)),
+ ('ctrl', CFUNCTYPE(c_long)),
+ ('callback_ctrl', CFUNCTYPE(c_long)),
+]
+assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
+assert alignment(comp_method_st) == 4, alignment(comp_method_st)
+COMP_METHOD = comp_method_st
+class comp_ctx_st(Structure):
+ pass
+comp_ctx_st._fields_ = [
+ ('meth', POINTER(COMP_METHOD)),
+ ('compress_in', c_ulong),
+ ('compress_out', c_ulong),
+ ('expand_in', c_ulong),
+ ('expand_out', c_ulong),
+ ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
+assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
+COMP_CTX = comp_ctx_st
+class CRYPTO_dynlock_value(Structure):
+ pass
+CRYPTO_dynlock_value._fields_ = [
+]
+class CRYPTO_dynlock(Structure):
+ pass
+CRYPTO_dynlock._fields_ = [
+ ('references', c_int),
+ ('data', POINTER(CRYPTO_dynlock_value)),
+]
+assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
+assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
+BIO_dummy = bio_st
+CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
+class crypto_ex_data_func_st(Structure):
+ pass
+crypto_ex_data_func_st._fields_ = [
+ ('argl', c_long),
+ ('argp', c_void_p),
+ ('new_func', POINTER(CRYPTO_EX_new)),
+ ('free_func', POINTER(CRYPTO_EX_free)),
+ ('dup_func', POINTER(CRYPTO_EX_dup)),
+]
+assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
+assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
+CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
+class st_CRYPTO_EX_DATA_IMPL(Structure):
+ pass
+CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
+st_CRYPTO_EX_DATA_IMPL._fields_ = [
+]
+CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
+DES_cblock = c_ubyte * 8
+const_DES_cblock = c_ubyte * 8
+class DES_ks(Structure):
+ pass
+class N6DES_ks3DOLLAR_9E(Union):
+ pass
+N6DES_ks3DOLLAR_9E._fields_ = [
+ ('cblock', DES_cblock),
+ ('deslong', c_ulong * 2),
+]
+assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
+assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
+DES_ks._fields_ = [
+ ('ks', N6DES_ks3DOLLAR_9E * 16),
+]
+assert sizeof(DES_ks) == 128, sizeof(DES_ks)
+assert alignment(DES_ks) == 4, alignment(DES_ks)
+DES_key_schedule = DES_ks
+_ossl_old_des_cblock = c_ubyte * 8
+class _ossl_old_des_ks_struct(Structure):
+ pass
+class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
+ pass
+N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
+ ('_', _ossl_old_des_cblock),
+ ('pad', c_ulong * 2),
+]
+assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+_ossl_old_des_ks_struct._fields_ = [
+ ('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
+]
+assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
+assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
+_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
+class dh_st(Structure):
+ pass
+DH = dh_st
+class dh_method(Structure):
+ pass
+dh_method._fields_ = [
+ ('name', STRING),
+ ('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
+ ('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
+ ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+ ('init', CFUNCTYPE(c_int, POINTER(DH))),
+ ('finish', CFUNCTYPE(c_int, POINTER(DH))),
+ ('flags', c_int),
+ ('app_data', STRING),
+]
+assert sizeof(dh_method) == 32, sizeof(dh_method)
+assert alignment(dh_method) == 4, alignment(dh_method)
+DH_METHOD = dh_method
+class engine_st(Structure):
+ pass
+ENGINE = engine_st
+dh_st._fields_ = [
+ ('pad', c_int),
+ ('version', c_int),
+ ('p', POINTER(BIGNUM)),
+ ('g', POINTER(BIGNUM)),
+ ('length', c_long),
+ ('pub_key', POINTER(BIGNUM)),
+ ('priv_key', POINTER(BIGNUM)),
+ ('flags', c_int),
+ ('method_mont_p', STRING),
+ ('q', POINTER(BIGNUM)),
+ ('j', POINTER(BIGNUM)),
+ ('seed', POINTER(c_ubyte)),
+ ('seedlen', c_int),
+ ('counter', POINTER(BIGNUM)),
+ ('references', c_int),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('meth', POINTER(DH_METHOD)),
+ ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dh_st) == 76, sizeof(dh_st)
+assert alignment(dh_st) == 4, alignment(dh_st)
+class dsa_st(Structure):
+ pass
+DSA = dsa_st
+class DSA_SIG_st(Structure):
+ pass
+DSA_SIG_st._fields_ = [
+ ('r', POINTER(BIGNUM)),
+ ('s', POINTER(BIGNUM)),
+]
+assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
+assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
+DSA_SIG = DSA_SIG_st
+class dsa_method(Structure):
+ pass
+dsa_method._fields_ = [
+ ('name', STRING),
+ ('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
+ ('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
+ ('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
+ ('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+ ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+ ('init', CFUNCTYPE(c_int, POINTER(DSA))),
+ ('finish', CFUNCTYPE(c_int, POINTER(DSA))),
+ ('flags', c_int),
+ ('app_data', STRING),
+]
+assert sizeof(dsa_method) == 40, sizeof(dsa_method)
+assert alignment(dsa_method) == 4, alignment(dsa_method)
+DSA_METHOD = dsa_method
+dsa_st._fields_ = [
+ ('pad', c_int),
+ ('version', c_long),
+ ('write_params', c_int),
+ ('p', POINTER(BIGNUM)),
+ ('q', POINTER(BIGNUM)),
+ ('g', POINTER(BIGNUM)),
+ ('pub_key', POINTER(BIGNUM)),
+ ('priv_key', POINTER(BIGNUM)),
+ ('kinv', POINTER(BIGNUM)),
+ ('r', POINTER(BIGNUM)),
+ ('flags', c_int),
+ ('method_mont_p', STRING),
+ ('references', c_int),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('meth', POINTER(DSA_METHOD)),
+ ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dsa_st) == 68, sizeof(dsa_st)
+assert alignment(dsa_st) == 4, alignment(dsa_st)
+class evp_pkey_st(Structure):
+ pass
+class N11evp_pkey_st4DOLLAR_12E(Union):
+ pass
+class rsa_st(Structure):
+ pass
+N11evp_pkey_st4DOLLAR_12E._fields_ = [
+ ('ptr', STRING),
+ ('rsa', POINTER(rsa_st)),
+ ('dsa', POINTER(dsa_st)),
+ ('dh', POINTER(dh_st)),
+]
+assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
+assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
+evp_pkey_st._fields_ = [
+ ('type', c_int),
+ ('save_type', c_int),
+ ('references', c_int),
+ ('pkey', N11evp_pkey_st4DOLLAR_12E),
+ ('save_parameters', c_int),
+ ('attributes', POINTER(STACK)),
+]
+assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
+assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
+class env_md_st(Structure):
+ pass
+class env_md_ctx_st(Structure):
+ pass
+EVP_MD_CTX = env_md_ctx_st
+env_md_st._fields_ = [
+ ('type', c_int),
+ ('pkey_type', c_int),
+ ('md_size', c_int),
+ ('flags', c_ulong),
+ ('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+ ('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
+ ('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
+ ('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
+ ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+ ('sign', CFUNCTYPE(c_int)),
+ ('verify', CFUNCTYPE(c_int)),
+ ('required_pkey_type', c_int * 5),
+ ('block_size', c_int),
+ ('ctx_size', c_int),
+]
+assert sizeof(env_md_st) == 72, sizeof(env_md_st)
+assert alignment(env_md_st) == 4, alignment(env_md_st)
+EVP_MD = env_md_st
+env_md_ctx_st._fields_ = [
+ ('digest', POINTER(EVP_MD)),
+ ('engine', POINTER(ENGINE)),
+ ('flags', c_ulong),
+ ('md_data', c_void_p),
+]
+assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
+assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
+class evp_cipher_st(Structure):
+ pass
+class evp_cipher_ctx_st(Structure):
+ pass
+EVP_CIPHER_CTX = evp_cipher_ctx_st
+evp_cipher_st._fields_ = [
+ ('nid', c_int),
+ ('block_size', c_int),
+ ('key_len', c_int),
+ ('iv_len', c_int),
+ ('flags', c_ulong),
+ ('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
+ ('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
+ ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
+ ('ctx_size', c_int),
+ ('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+ ('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+ ('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
+ ('app_data', c_void_p),
+]
+assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
+assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
+class evp_cipher_info_st(Structure):
+ pass
+EVP_CIPHER = evp_cipher_st
+evp_cipher_info_st._fields_ = [
+ ('cipher', POINTER(EVP_CIPHER)),
+ ('iv', c_ubyte * 16),
+]
+assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
+assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
+EVP_CIPHER_INFO = evp_cipher_info_st
+evp_cipher_ctx_st._fields_ = [
+ ('cipher', POINTER(EVP_CIPHER)),
+ ('engine', POINTER(ENGINE)),
+ ('encrypt', c_int),
+ ('buf_len', c_int),
+ ('oiv', c_ubyte * 16),
+ ('iv', c_ubyte * 16),
+ ('buf', c_ubyte * 32),
+ ('num', c_int),
+ ('app_data', c_void_p),
+ ('key_len', c_int),
+ ('flags', c_ulong),
+ ('cipher_data', c_void_p),
+ ('final_used', c_int),
+ ('block_mask', c_int),
+ ('final', c_ubyte * 32),
+]
+assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
+assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
+class evp_Encode_Ctx_st(Structure):
+ pass
+evp_Encode_Ctx_st._fields_ = [
+ ('num', c_int),
+ ('length', c_int),
+ ('enc_data', c_ubyte * 80),
+ ('line_num', c_int),
+ ('expect_nl', c_int),
+]
+assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
+assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
+EVP_ENCODE_CTX = evp_Encode_Ctx_st
+EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
+class lhash_node_st(Structure):
+ pass
+lhash_node_st._fields_ = [
+ ('data', c_void_p),
+ ('next', POINTER(lhash_node_st)),
+ ('hash', c_ulong),
+]
+assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
+assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
+LHASH_NODE = lhash_node_st
+LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
+LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
+LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
+LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
+class lhash_st(Structure):
+ pass
+lhash_st._fields_ = [
+ ('b', POINTER(POINTER(LHASH_NODE))),
+ ('comp', LHASH_COMP_FN_TYPE),
+ ('hash', LHASH_HASH_FN_TYPE),
+ ('num_nodes', c_uint),
+ ('num_alloc_nodes', c_uint),
+ ('p', c_uint),
+ ('pmax', c_uint),
+ ('up_load', c_ulong),
+ ('down_load', c_ulong),
+ ('num_items', c_ulong),
+ ('num_expands', c_ulong),
+ ('num_expand_reallocs', c_ulong),
+ ('num_contracts', c_ulong),
+ ('num_contract_reallocs', c_ulong),
+ ('num_hash_calls', c_ulong),
+ ('num_comp_calls', c_ulong),
+ ('num_insert', c_ulong),
+ ('num_replace', c_ulong),
+ ('num_delete', c_ulong),
+ ('num_no_delete', c_ulong),
+ ('num_retrieve', c_ulong),
+ ('num_retrieve_miss', c_ulong),
+ ('num_hash_comps', c_ulong),
+ ('error', c_int),
+]
+assert sizeof(lhash_st) == 96, sizeof(lhash_st)
+assert alignment(lhash_st) == 4, alignment(lhash_st)
+LHASH = lhash_st
+class MD2state_st(Structure):
+ pass
+MD2state_st._fields_ = [
+ ('num', c_int),
+ ('data', c_ubyte * 16),
+ ('cksm', c_uint * 16),
+ ('state', c_uint * 16),
+]
+assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
+assert alignment(MD2state_st) == 4, alignment(MD2state_st)
+MD2_CTX = MD2state_st
+class MD4state_st(Structure):
+ pass
+MD4state_st._fields_ = [
+ ('A', c_uint),
+ ('B', c_uint),
+ ('C', c_uint),
+ ('D', c_uint),
+ ('Nl', c_uint),
+ ('Nh', c_uint),
+ ('data', c_uint * 16),
+ ('num', c_int),
+]
+assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
+assert alignment(MD4state_st) == 4, alignment(MD4state_st)
+MD4_CTX = MD4state_st
+class MD5state_st(Structure):
+ pass
+MD5state_st._fields_ = [
+ ('A', c_uint),
+ ('B', c_uint),
+ ('C', c_uint),
+ ('D', c_uint),
+ ('Nl', c_uint),
+ ('Nh', c_uint),
+ ('data', c_uint * 16),
+ ('num', c_int),
+]
+assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
+assert alignment(MD5state_st) == 4, alignment(MD5state_st)
+MD5_CTX = MD5state_st
+class mdc2_ctx_st(Structure):
+ pass
+mdc2_ctx_st._fields_ = [
+ ('num', c_int),
+ ('data', c_ubyte * 8),
+ ('h', DES_cblock),
+ ('hh', DES_cblock),
+ ('pad_type', c_int),
+]
+assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
+assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
+MDC2_CTX = mdc2_ctx_st
+class obj_name_st(Structure):
+ pass
+obj_name_st._fields_ = [
+ ('type', c_int),
+ ('alias', c_int),
+ ('name', STRING),
+ ('data', STRING),
+]
+assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
+assert alignment(obj_name_st) == 4, alignment(obj_name_st)
+OBJ_NAME = obj_name_st
+ASN1_TIME = asn1_string_st
+ASN1_NULL = c_int
+EVP_PKEY = evp_pkey_st
+class x509_st(Structure):
+ pass
+X509 = x509_st
+class X509_algor_st(Structure):
+ pass
+X509_ALGOR = X509_algor_st
+class X509_crl_st(Structure):
+ pass
+X509_CRL = X509_crl_st
+class X509_name_st(Structure):
+ pass
+X509_NAME = X509_name_st
+class x509_store_st(Structure):
+ pass
+X509_STORE = x509_store_st
+class x509_store_ctx_st(Structure):
+ pass
+X509_STORE_CTX = x509_store_ctx_st
+engine_st._fields_ = [
+]
+class PEM_Encode_Seal_st(Structure):
+ pass
+PEM_Encode_Seal_st._fields_ = [
+ ('encode', EVP_ENCODE_CTX),
+ ('md', EVP_MD_CTX),
+ ('cipher', EVP_CIPHER_CTX),
+]
+assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
+assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
+PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
+class pem_recip_st(Structure):
+ pass
+pem_recip_st._fields_ = [
+ ('name', STRING),
+ ('dn', POINTER(X509_NAME)),
+ ('cipher', c_int),
+ ('key_enc', c_int),
+]
+assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
+assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
+PEM_USER = pem_recip_st
+class pem_ctx_st(Structure):
+ pass
+class N10pem_ctx_st4DOLLAR_16E(Structure):
+ pass
+N10pem_ctx_st4DOLLAR_16E._fields_ = [
+ ('version', c_int),
+ ('mode', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
+assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
+class N10pem_ctx_st4DOLLAR_17E(Structure):
+ pass
+N10pem_ctx_st4DOLLAR_17E._fields_ = [
+ ('cipher', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
+assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
+pem_ctx_st._fields_ = [
+ ('type', c_int),
+ ('proc_type', N10pem_ctx_st4DOLLAR_16E),
+ ('domain', STRING),
+ ('DEK_info', N10pem_ctx_st4DOLLAR_17E),
+ ('originator', POINTER(PEM_USER)),
+ ('num_recipient', c_int),
+ ('recipient', POINTER(POINTER(PEM_USER))),
+ ('x509_chain', POINTER(STACK)),
+ ('md', POINTER(EVP_MD)),
+ ('md_enc', c_int),
+ ('md_len', c_int),
+ ('md_data', STRING),
+ ('dec', POINTER(EVP_CIPHER)),
+ ('key_len', c_int),
+ ('key', POINTER(c_ubyte)),
+ ('data_enc', c_int),
+ ('data_len', c_int),
+ ('data', POINTER(c_ubyte)),
+]
+assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
+assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
+PEM_CTX = pem_ctx_st
+pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
+class pkcs7_issuer_and_serial_st(Structure):
+ pass
+pkcs7_issuer_and_serial_st._fields_ = [
+ ('issuer', POINTER(X509_NAME)),
+ ('serial', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
+assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
+PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
+class pkcs7_signer_info_st(Structure):
+ pass
+pkcs7_signer_info_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+ ('digest_alg', POINTER(X509_ALGOR)),
+ ('auth_attr', POINTER(STACK)),
+ ('digest_enc_alg', POINTER(X509_ALGOR)),
+ ('enc_digest', POINTER(ASN1_OCTET_STRING)),
+ ('unauth_attr', POINTER(STACK)),
+ ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
+assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
+PKCS7_SIGNER_INFO = pkcs7_signer_info_st
+class pkcs7_recip_info_st(Structure):
+ pass
+pkcs7_recip_info_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+ ('key_enc_algor', POINTER(X509_ALGOR)),
+ ('enc_key', POINTER(ASN1_OCTET_STRING)),
+ ('cert', POINTER(X509)),
+]
+assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
+assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
+PKCS7_RECIP_INFO = pkcs7_recip_info_st
+class pkcs7_signed_st(Structure):
+ pass
+class pkcs7_st(Structure):
+ pass
+pkcs7_signed_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('md_algs', POINTER(STACK)),
+ ('cert', POINTER(STACK)),
+ ('crl', POINTER(STACK)),
+ ('signer_info', POINTER(STACK)),
+ ('contents', POINTER(pkcs7_st)),
+]
+assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
+assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
+PKCS7_SIGNED = pkcs7_signed_st
+class pkcs7_enc_content_st(Structure):
+ pass
+pkcs7_enc_content_st._fields_ = [
+ ('content_type', POINTER(ASN1_OBJECT)),
+ ('algorithm', POINTER(X509_ALGOR)),
+ ('enc_data', POINTER(ASN1_OCTET_STRING)),
+ ('cipher', POINTER(EVP_CIPHER)),
+]
+assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
+assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
+PKCS7_ENC_CONTENT = pkcs7_enc_content_st
+class pkcs7_enveloped_st(Structure):
+ pass
+pkcs7_enveloped_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('recipientinfo', POINTER(STACK)),
+ ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
+assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
+PKCS7_ENVELOPE = pkcs7_enveloped_st
+class pkcs7_signedandenveloped_st(Structure):
+ pass
+pkcs7_signedandenveloped_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('md_algs', POINTER(STACK)),
+ ('cert', POINTER(STACK)),
+ ('crl', POINTER(STACK)),
+ ('signer_info', POINTER(STACK)),
+ ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+ ('recipientinfo', POINTER(STACK)),
+]
+assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
+assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
+PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
+class pkcs7_digest_st(Structure):
+ pass
+pkcs7_digest_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('md', POINTER(X509_ALGOR)),
+ ('contents', POINTER(pkcs7_st)),
+ ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
+assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
+PKCS7_DIGEST = pkcs7_digest_st
+class pkcs7_encrypted_st(Structure):
+ pass
+pkcs7_encrypted_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
+assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
+PKCS7_ENCRYPT = pkcs7_encrypted_st
+class N8pkcs7_st4DOLLAR_15E(Union):
+ pass
+N8pkcs7_st4DOLLAR_15E._fields_ = [
+ ('ptr', STRING),
+ ('data', POINTER(ASN1_OCTET_STRING)),
+ ('sign', POINTER(PKCS7_SIGNED)),
+ ('enveloped', POINTER(PKCS7_ENVELOPE)),
+ ('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
+ ('digest', POINTER(PKCS7_DIGEST)),
+ ('encrypted', POINTER(PKCS7_ENCRYPT)),
+ ('other', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
+assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
+pkcs7_st._fields_ = [
+ ('asn1', POINTER(c_ubyte)),
+ ('length', c_long),
+ ('state', c_int),
+ ('detached', c_int),
+ ('type', POINTER(ASN1_OBJECT)),
+ ('d', N8pkcs7_st4DOLLAR_15E),
+]
+assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
+assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
+PKCS7 = pkcs7_st
+class rc2_key_st(Structure):
+ pass
+rc2_key_st._fields_ = [
+ ('data', c_uint * 64),
+]
+assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
+assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
+RC2_KEY = rc2_key_st
+class rc4_key_st(Structure):
+ pass
+rc4_key_st._fields_ = [
+ ('x', c_ubyte),
+ ('y', c_ubyte),
+ ('data', c_ubyte * 256),
+]
+assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
+assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
+RC4_KEY = rc4_key_st
+class rc5_key_st(Structure):
+ pass
+rc5_key_st._fields_ = [
+ ('rounds', c_int),
+ ('data', c_ulong * 34),
+]
+assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
+assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
+RC5_32_KEY = rc5_key_st
+class RIPEMD160state_st(Structure):
+ pass
+RIPEMD160state_st._fields_ = [
+ ('A', c_uint),
+ ('B', c_uint),
+ ('C', c_uint),
+ ('D', c_uint),
+ ('E', c_uint),
+ ('Nl', c_uint),
+ ('Nh', c_uint),
+ ('data', c_uint * 16),
+ ('num', c_int),
+]
+assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
+assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
+RIPEMD160_CTX = RIPEMD160state_st
+RSA = rsa_st
+class rsa_meth_st(Structure):
+ pass
+rsa_meth_st._fields_ = [
+ ('name', STRING),
+ ('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+ ('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+ ('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+ ('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+ ('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
+ ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+ ('init', CFUNCTYPE(c_int, POINTER(RSA))),
+ ('finish', CFUNCTYPE(c_int, POINTER(RSA))),
+ ('flags', c_int),
+ ('app_data', STRING),
+ ('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
+ ('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
+]
+assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
+assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
+RSA_METHOD = rsa_meth_st
+rsa_st._fields_ = [
+ ('pad', c_int),
+ ('version', c_long),
+ ('meth', POINTER(RSA_METHOD)),
+ ('engine', POINTER(ENGINE)),
+ ('n', POINTER(BIGNUM)),
+ ('e', POINTER(BIGNUM)),
+ ('d', POINTER(BIGNUM)),
+ ('p', POINTER(BIGNUM)),
+ ('q', POINTER(BIGNUM)),
+ ('dmp1', POINTER(BIGNUM)),
+ ('dmq1', POINTER(BIGNUM)),
+ ('iqmp', POINTER(BIGNUM)),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('references', c_int),
+ ('flags', c_int),
+ ('_method_mod_n', POINTER(BN_MONT_CTX)),
+ ('_method_mod_p', POINTER(BN_MONT_CTX)),
+ ('_method_mod_q', POINTER(BN_MONT_CTX)),
+ ('bignum_data', STRING),
+ ('blinding', POINTER(BN_BLINDING)),
+]
+assert sizeof(rsa_st) == 84, sizeof(rsa_st)
+assert alignment(rsa_st) == 4, alignment(rsa_st)
+openssl_fptr = CFUNCTYPE(None)
+class SHAstate_st(Structure):
+ pass
+SHAstate_st._fields_ = [
+ ('h0', c_uint),
+ ('h1', c_uint),
+ ('h2', c_uint),
+ ('h3', c_uint),
+ ('h4', c_uint),
+ ('Nl', c_uint),
+ ('Nh', c_uint),
+ ('data', c_uint * 16),
+ ('num', c_int),
+]
+assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
+assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
+SHA_CTX = SHAstate_st
+class ssl_st(Structure):
+ pass
+ssl_crock_st = POINTER(ssl_st)
+class ssl_cipher_st(Structure):
+ pass
+ssl_cipher_st._fields_ = [
+ ('valid', c_int),
+ ('name', STRING),
+ ('id', c_ulong),
+ ('algorithms', c_ulong),
+ ('algo_strength', c_ulong),
+ ('algorithm2', c_ulong),
+ ('strength_bits', c_int),
+ ('alg_bits', c_int),
+ ('mask', c_ulong),
+ ('mask_strength', c_ulong),
+]
+assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
+assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
+SSL_CIPHER = ssl_cipher_st
+SSL = ssl_st
+class ssl_ctx_st(Structure):
+ pass
+SSL_CTX = ssl_ctx_st
+class ssl_method_st(Structure):
+ pass
+class ssl3_enc_method(Structure):
+ pass
+ssl_method_st._fields_ = [
+ ('version', c_int),
+ ('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
+ ('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
+ ('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+ ('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+ ('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+ ('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
+ ('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
+ ('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+ ('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+ ('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
+ ('num_ciphers', CFUNCTYPE(c_int)),
+ ('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
+ ('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
+ ('get_timeout', CFUNCTYPE(c_long)),
+ ('ssl3_enc', POINTER(ssl3_enc_method)),
+ ('ssl_version', CFUNCTYPE(c_int)),
+ ('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
+ ('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
+]
+assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
+assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
+ssl3_enc_method._fields_ = [
+]
+SSL_METHOD = ssl_method_st
+class ssl_session_st(Structure):
+ pass
+class sess_cert_st(Structure):
+ pass
+ssl_session_st._fields_ = [
+ ('ssl_version', c_int),
+ ('key_arg_length', c_uint),
+ ('key_arg', c_ubyte * 8),
+ ('master_key_length', c_int),
+ ('master_key', c_ubyte * 48),
+ ('session_id_length', c_uint),
+ ('session_id', c_ubyte * 32),
+ ('sid_ctx_length', c_uint),
+ ('sid_ctx', c_ubyte * 32),
+ ('not_resumable', c_int),
+ ('sess_cert', POINTER(sess_cert_st)),
+ ('peer', POINTER(X509)),
+ ('verify_result', c_long),
+ ('references', c_int),
+ ('timeout', c_long),
+ ('time', c_long),
+ ('compress_meth', c_int),
+ ('cipher', POINTER(SSL_CIPHER)),
+ ('cipher_id', c_ulong),
+ ('ciphers', POINTER(STACK)),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('prev', POINTER(ssl_session_st)),
+ ('next', POINTER(ssl_session_st)),
+]
+assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
+assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
+sess_cert_st._fields_ = [
+]
+SSL_SESSION = ssl_session_st
+GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
+class ssl_comp_st(Structure):
+ pass
+ssl_comp_st._fields_ = [
+ ('id', c_int),
+ ('name', STRING),
+ ('method', POINTER(COMP_METHOD)),
+]
+assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
+assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
+SSL_COMP = ssl_comp_st
+class N10ssl_ctx_st4DOLLAR_18E(Structure):
+ pass
+N10ssl_ctx_st4DOLLAR_18E._fields_ = [
+ ('sess_connect', c_int),
+ ('sess_connect_renegotiate', c_int),
+ ('sess_connect_good', c_int),
+ ('sess_accept', c_int),
+ ('sess_accept_renegotiate', c_int),
+ ('sess_accept_good', c_int),
+ ('sess_miss', c_int),
+ ('sess_timeout', c_int),
+ ('sess_cache_full', c_int),
+ ('sess_hit', c_int),
+ ('sess_cb_hit', c_int),
+]
+assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
+assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
+class cert_st(Structure):
+ pass
+ssl_ctx_st._fields_ = [
+ ('method', POINTER(SSL_METHOD)),
+ ('cipher_list', POINTER(STACK)),
+ ('cipher_list_by_id', POINTER(STACK)),
+ ('cert_store', POINTER(x509_store_st)),
+ ('sessions', POINTER(lhash_st)),
+ ('session_cache_size', c_ulong),
+ ('session_cache_head', POINTER(ssl_session_st)),
+ ('session_cache_tail', POINTER(ssl_session_st)),
+ ('session_cache_mode', c_int),
+ ('session_timeout', c_long),
+ ('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
+ ('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
+ ('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
+ ('stats', N10ssl_ctx_st4DOLLAR_18E),
+ ('references', c_int),
+ ('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
+ ('app_verify_arg', c_void_p),
+ ('default_passwd_callback', POINTER(pem_password_cb)),
+ ('default_passwd_callback_userdata', c_void_p),
+ ('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('rsa_md5', POINTER(EVP_MD)),
+ ('md5', POINTER(EVP_MD)),
+ ('sha1', POINTER(EVP_MD)),
+ ('extra_certs', POINTER(STACK)),
+ ('comp_methods', POINTER(STACK)),
+ ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+ ('client_CA', POINTER(STACK)),
+ ('options', c_ulong),
+ ('mode', c_ulong),
+ ('max_cert_list', c_long),
+ ('cert', POINTER(cert_st)),
+ ('read_ahead', c_int),
+ ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+ ('msg_callback_arg', c_void_p),
+ ('verify_mode', c_int),
+ ('verify_depth', c_int),
+ ('sid_ctx_length', c_uint),
+ ('sid_ctx', c_ubyte * 32),
+ ('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+ ('generate_session_id', GEN_SESSION_CB),
+ ('purpose', c_int),
+ ('trust', c_int),
+ ('quiet_shutdown', c_int),
+]
+assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
+assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
+cert_st._fields_ = [
+]
+class ssl2_state_st(Structure):
+ pass
+class ssl3_state_st(Structure):
+ pass
+ssl_st._fields_ = [
+ ('version', c_int),
+ ('type', c_int),
+ ('method', POINTER(SSL_METHOD)),
+ ('rbio', POINTER(BIO)),
+ ('wbio', POINTER(BIO)),
+ ('bbio', POINTER(BIO)),
+ ('rwstate', c_int),
+ ('in_handshake', c_int),
+ ('handshake_func', CFUNCTYPE(c_int)),
+ ('server', c_int),
+ ('new_session', c_int),
+ ('quiet_shutdown', c_int),
+ ('shutdown', c_int),
+ ('state', c_int),
+ ('rstate', c_int),
+ ('init_buf', POINTER(BUF_MEM)),
+ ('init_msg', c_void_p),
+ ('init_num', c_int),
+ ('init_off', c_int),
+ ('packet', POINTER(c_ubyte)),
+ ('packet_length', c_uint),
+ ('s2', POINTER(ssl2_state_st)),
+ ('s3', POINTER(ssl3_state_st)),
+ ('read_ahead', c_int),
+ ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+ ('msg_callback_arg', c_void_p),
+ ('hit', c_int),
+ ('purpose', c_int),
+ ('trust', c_int),
+ ('cipher_list', POINTER(STACK)),
+ ('cipher_list_by_id', POINTER(STACK)),
+ ('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
+ ('read_hash', POINTER(EVP_MD)),
+ ('expand', POINTER(COMP_CTX)),
+ ('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
+ ('write_hash', POINTER(EVP_MD)),
+ ('compress', POINTER(COMP_CTX)),
+ ('cert', POINTER(cert_st)),
+ ('sid_ctx_length', c_uint),
+ ('sid_ctx', c_ubyte * 32),
+ ('session', POINTER(SSL_SESSION)),
+ ('generate_session_id', GEN_SESSION_CB),
+ ('verify_mode', c_int),
+ ('verify_depth', c_int),
+ ('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+ ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+ ('error', c_int),
+ ('error_code', c_int),
+ ('ctx', POINTER(SSL_CTX)),
+ ('debug', c_int),
+ ('verify_result', c_long),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('client_CA', POINTER(STACK)),
+ ('references', c_int),
+ ('options', c_ulong),
+ ('mode', c_ulong),
+ ('max_cert_list', c_long),
+ ('first_packet', c_int),
+ ('client_version', c_int),
+]
+assert sizeof(ssl_st) == 268, sizeof(ssl_st)
+assert alignment(ssl_st) == 4, alignment(ssl_st)
+class N13ssl2_state_st4DOLLAR_19E(Structure):
+ pass
+N13ssl2_state_st4DOLLAR_19E._fields_ = [
+ ('conn_id_length', c_uint),
+ ('cert_type', c_uint),
+ ('cert_length', c_uint),
+ ('csl', c_uint),
+ ('clear', c_uint),
+ ('enc', c_uint),
+ ('ccl', c_ubyte * 32),
+ ('cipher_spec_length', c_uint),
+ ('session_id_length', c_uint),
+ ('clen', c_uint),
+ ('rlen', c_uint),
+]
+assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
+assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
+ssl2_state_st._fields_ = [
+ ('three_byte_header', c_int),
+ ('clear_text', c_int),
+ ('escape', c_int),
+ ('ssl2_rollback', c_int),
+ ('wnum', c_uint),
+ ('wpend_tot', c_int),
+ ('wpend_buf', POINTER(c_ubyte)),
+ ('wpend_off', c_int),
+ ('wpend_len', c_int),
+ ('wpend_ret', c_int),
+ ('rbuf_left', c_int),
+ ('rbuf_offs', c_int),
+ ('rbuf', POINTER(c_ubyte)),
+ ('wbuf', POINTER(c_ubyte)),
+ ('write_ptr', POINTER(c_ubyte)),
+ ('padding', c_uint),
+ ('rlength', c_uint),
+ ('ract_data_length', c_int),
+ ('wlength', c_uint),
+ ('wact_data_length', c_int),
+ ('ract_data', POINTER(c_ubyte)),
+ ('wact_data', POINTER(c_ubyte)),
+ ('mac_data', POINTER(c_ubyte)),
+ ('read_key', POINTER(c_ubyte)),
+ ('write_key', POINTER(c_ubyte)),
+ ('challenge_length', c_uint),
+ ('challenge', c_ubyte * 32),
+ ('conn_id_length', c_uint),
+ ('conn_id', c_ubyte * 16),
+ ('key_material_length', c_uint),
+ ('key_material', c_ubyte * 48),
+ ('read_sequence', c_ulong),
+ ('write_sequence', c_ulong),
+ ('tmp', N13ssl2_state_st4DOLLAR_19E),
+]
+assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
+assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
+SSL2_STATE = ssl2_state_st
+class ssl3_record_st(Structure):
+ pass
+ssl3_record_st._fields_ = [
+ ('type', c_int),
+ ('length', c_uint),
+ ('off', c_uint),
+ ('data', POINTER(c_ubyte)),
+ ('input', POINTER(c_ubyte)),
+ ('comp', POINTER(c_ubyte)),
+]
+assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
+assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
+SSL3_RECORD = ssl3_record_st
+class ssl3_buffer_st(Structure):
+ pass
+size_t = __darwin_size_t
+ssl3_buffer_st._fields_ = [
+ ('buf', POINTER(c_ubyte)),
+ ('len', size_t),
+ ('offset', c_int),
+ ('left', c_int),
+]
+assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
+assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
+SSL3_BUFFER = ssl3_buffer_st
+class N13ssl3_state_st4DOLLAR_20E(Structure):
+ pass
+N13ssl3_state_st4DOLLAR_20E._fields_ = [
+ ('cert_verify_md', c_ubyte * 72),
+ ('finish_md', c_ubyte * 72),
+ ('finish_md_len', c_int),
+ ('peer_finish_md', c_ubyte * 72),
+ ('peer_finish_md_len', c_int),
+ ('message_size', c_ulong),
+ ('message_type', c_int),
+ ('new_cipher', POINTER(SSL_CIPHER)),
+ ('dh', POINTER(DH)),
+ ('next_state', c_int),
+ ('reuse_message', c_int),
+ ('cert_req', c_int),
+ ('ctype_num', c_int),
+ ('ctype', c_char * 7),
+ ('ca_names', POINTER(STACK)),
+ ('use_rsa_tmp', c_int),
+ ('key_block_length', c_int),
+ ('key_block', POINTER(c_ubyte)),
+ ('new_sym_enc', POINTER(EVP_CIPHER)),
+ ('new_hash', POINTER(EVP_MD)),
+ ('new_compression', POINTER(SSL_COMP)),
+ ('cert_request', c_int),
+]
+assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
+assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
+ssl3_state_st._fields_ = [
+ ('flags', c_long),
+ ('delay_buf_pop_ret', c_int),
+ ('read_sequence', c_ubyte * 8),
+ ('read_mac_secret', c_ubyte * 36),
+ ('write_sequence', c_ubyte * 8),
+ ('write_mac_secret', c_ubyte * 36),
+ ('server_random', c_ubyte * 32),
+ ('client_random', c_ubyte * 32),
+ ('need_empty_fragments', c_int),
+ ('empty_fragment_done', c_int),
+ ('rbuf', SSL3_BUFFER),
+ ('wbuf', SSL3_BUFFER),
+ ('rrec', SSL3_RECORD),
+ ('wrec', SSL3_RECORD),
+ ('alert_fragment', c_ubyte * 2),
+ ('alert_fragment_len', c_uint),
+ ('handshake_fragment', c_ubyte * 4),
+ ('handshake_fragment_len', c_uint),
+ ('wnum', c_uint),
+ ('wpend_tot', c_int),
+ ('wpend_type', c_int),
+ ('wpend_ret', c_int),
+ ('wpend_buf', POINTER(c_ubyte)),
+ ('finish_dgst1', EVP_MD_CTX),
+ ('finish_dgst2', EVP_MD_CTX),
+ ('change_cipher_spec', c_int),
+ ('warn_alert', c_int),
+ ('fatal_alert', c_int),
+ ('alert_dispatch', c_int),
+ ('send_alert', c_ubyte * 2),
+ ('renegotiate', c_int),
+ ('total_renegotiations', c_int),
+ ('num_renegotiations', c_int),
+ ('in_read_app_data', c_int),
+ ('tmp', N13ssl3_state_st4DOLLAR_20E),
+]
+assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
+assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
+SSL3_STATE = ssl3_state_st
+stack_st._fields_ = [
+ ('num', c_int),
+ ('data', POINTER(STRING)),
+ ('sorted', c_int),
+ ('num_alloc', c_int),
+ ('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
+]
+assert sizeof(stack_st) == 20, sizeof(stack_st)
+assert alignment(stack_st) == 4, alignment(stack_st)
+class ui_st(Structure):
+ pass
+ui_st._fields_ = [
+]
+UI = ui_st
+class ui_method_st(Structure):
+ pass
+ui_method_st._fields_ = [
+]
+UI_METHOD = ui_method_st
+class ui_string_st(Structure):
+ pass
+ui_string_st._fields_ = [
+]
+UI_STRING = ui_string_st
+
+# values for enumeration 'UI_string_types'
+UI_string_types = c_int # enum
+class X509_objects_st(Structure):
+ pass
+X509_objects_st._fields_ = [
+ ('nid', c_int),
+ ('a2i', CFUNCTYPE(c_int)),
+ ('i2a', CFUNCTYPE(c_int)),
+]
+assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
+assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
+X509_OBJECTS = X509_objects_st
+X509_algor_st._fields_ = [
+ ('algorithm', POINTER(ASN1_OBJECT)),
+ ('parameter', POINTER(ASN1_TYPE)),
+]
+assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
+assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
+class X509_val_st(Structure):
+ pass
+X509_val_st._fields_ = [
+ ('notBefore', POINTER(ASN1_TIME)),
+ ('notAfter', POINTER(ASN1_TIME)),
+]
+assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
+assert alignment(X509_val_st) == 4, alignment(X509_val_st)
+X509_VAL = X509_val_st
+class X509_pubkey_st(Structure):
+ pass
+X509_pubkey_st._fields_ = [
+ ('algor', POINTER(X509_ALGOR)),
+ ('public_key', POINTER(ASN1_BIT_STRING)),
+ ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
+assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
+X509_PUBKEY = X509_pubkey_st
+class X509_sig_st(Structure):
+ pass
+X509_sig_st._fields_ = [
+ ('algor', POINTER(X509_ALGOR)),
+ ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
+assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
+X509_SIG = X509_sig_st
+class X509_name_entry_st(Structure):
+ pass
+X509_name_entry_st._fields_ = [
+ ('object', POINTER(ASN1_OBJECT)),
+ ('value', POINTER(ASN1_STRING)),
+ ('set', c_int),
+ ('size', c_int),
+]
+assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
+assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
+X509_NAME_ENTRY = X509_name_entry_st
+X509_name_st._fields_ = [
+ ('entries', POINTER(STACK)),
+ ('modified', c_int),
+ ('bytes', POINTER(BUF_MEM)),
+ ('hash', c_ulong),
+]
+assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
+assert alignment(X509_name_st) == 4, alignment(X509_name_st)
+class X509_extension_st(Structure):
+ pass
+X509_extension_st._fields_ = [
+ ('object', POINTER(ASN1_OBJECT)),
+ ('critical', ASN1_BOOLEAN),
+ ('value', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
+assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
+X509_EXTENSION = X509_extension_st
+class x509_attributes_st(Structure):
+ pass
+class N18x509_attributes_st4DOLLAR_13E(Union):
+ pass
+N18x509_attributes_st4DOLLAR_13E._fields_ = [
+ ('ptr', STRING),
+ ('set', POINTER(STACK)),
+ ('single', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
+assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
+x509_attributes_st._fields_ = [
+ ('object', POINTER(ASN1_OBJECT)),
+ ('single', c_int),
+ ('value', N18x509_attributes_st4DOLLAR_13E),
+]
+assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
+assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
+X509_ATTRIBUTE = x509_attributes_st
+class X509_req_info_st(Structure):
+ pass
+X509_req_info_st._fields_ = [
+ ('enc', ASN1_ENCODING),
+ ('version', POINTER(ASN1_INTEGER)),
+ ('subject', POINTER(X509_NAME)),
+ ('pubkey', POINTER(X509_PUBKEY)),
+ ('attributes', POINTER(STACK)),
+]
+assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
+assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
+X509_REQ_INFO = X509_req_info_st
+class X509_req_st(Structure):
+ pass
+X509_req_st._fields_ = [
+ ('req_info', POINTER(X509_REQ_INFO)),
+ ('sig_alg', POINTER(X509_ALGOR)),
+ ('signature', POINTER(ASN1_BIT_STRING)),
+ ('references', c_int),
+]
+assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
+assert alignment(X509_req_st) == 4, alignment(X509_req_st)
+X509_REQ = X509_req_st
+class x509_cinf_st(Structure):
+ pass
+x509_cinf_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('serialNumber', POINTER(ASN1_INTEGER)),
+ ('signature', POINTER(X509_ALGOR)),
+ ('issuer', POINTER(X509_NAME)),
+ ('validity', POINTER(X509_VAL)),
+ ('subject', POINTER(X509_NAME)),
+ ('key', POINTER(X509_PUBKEY)),
+ ('issuerUID', POINTER(ASN1_BIT_STRING)),
+ ('subjectUID', POINTER(ASN1_BIT_STRING)),
+ ('extensions', POINTER(STACK)),
+]
+assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
+assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
+X509_CINF = x509_cinf_st
+class x509_cert_aux_st(Structure):
+ pass
+x509_cert_aux_st._fields_ = [
+ ('trust', POINTER(STACK)),
+ ('reject', POINTER(STACK)),
+ ('alias', POINTER(ASN1_UTF8STRING)),
+ ('keyid', POINTER(ASN1_OCTET_STRING)),
+ ('other', POINTER(STACK)),
+]
+assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
+assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
+X509_CERT_AUX = x509_cert_aux_st
+class AUTHORITY_KEYID_st(Structure):
+ pass
+x509_st._fields_ = [
+ ('cert_info', POINTER(X509_CINF)),
+ ('sig_alg', POINTER(X509_ALGOR)),
+ ('signature', POINTER(ASN1_BIT_STRING)),
+ ('valid', c_int),
+ ('references', c_int),
+ ('name', STRING),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('ex_pathlen', c_long),
+ ('ex_flags', c_ulong),
+ ('ex_kusage', c_ulong),
+ ('ex_xkusage', c_ulong),
+ ('ex_nscert', c_ulong),
+ ('skid', POINTER(ASN1_OCTET_STRING)),
+ ('akid', POINTER(AUTHORITY_KEYID_st)),
+ ('sha1_hash', c_ubyte * 20),
+ ('aux', POINTER(X509_CERT_AUX)),
+]
+assert sizeof(x509_st) == 84, sizeof(x509_st)
+assert alignment(x509_st) == 4, alignment(x509_st)
+AUTHORITY_KEYID_st._fields_ = [
+]
+class x509_trust_st(Structure):
+ pass
+x509_trust_st._fields_ = [
+ ('trust', c_int),
+ ('flags', c_int),
+ ('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
+ ('name', STRING),
+ ('arg1', c_int),
+ ('arg2', c_void_p),
+]
+assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
+assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
+X509_TRUST = x509_trust_st
+class X509_revoked_st(Structure):
+ pass
+X509_revoked_st._fields_ = [
+ ('serialNumber', POINTER(ASN1_INTEGER)),
+ ('revocationDate', POINTER(ASN1_TIME)),
+ ('extensions', POINTER(STACK)),
+ ('sequence', c_int),
+]
+assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
+assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
+X509_REVOKED = X509_revoked_st
+class X509_crl_info_st(Structure):
+ pass
+X509_crl_info_st._fields_ = [
+ ('version', POINTER(ASN1_INTEGER)),
+ ('sig_alg', POINTER(X509_ALGOR)),
+ ('issuer', POINTER(X509_NAME)),
+ ('lastUpdate', POINTER(ASN1_TIME)),
+ ('nextUpdate', POINTER(ASN1_TIME)),
+ ('revoked', POINTER(STACK)),
+ ('extensions', POINTER(STACK)),
+ ('enc', ASN1_ENCODING),
+]
+assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
+assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
+X509_CRL_INFO = X509_crl_info_st
+X509_crl_st._fields_ = [
+ ('crl', POINTER(X509_CRL_INFO)),
+ ('sig_alg', POINTER(X509_ALGOR)),
+ ('signature', POINTER(ASN1_BIT_STRING)),
+ ('references', c_int),
+]
+assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
+assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
+class private_key_st(Structure):
+ pass
+private_key_st._fields_ = [
+ ('version', c_int),
+ ('enc_algor', POINTER(X509_ALGOR)),
+ ('enc_pkey', POINTER(ASN1_OCTET_STRING)),
+ ('dec_pkey', POINTER(EVP_PKEY)),
+ ('key_length', c_int),
+ ('key_data', STRING),
+ ('key_free', c_int),
+ ('cipher', EVP_CIPHER_INFO),
+ ('references', c_int),
+]
+assert sizeof(private_key_st) == 52, sizeof(private_key_st)
+assert alignment(private_key_st) == 4, alignment(private_key_st)
+X509_PKEY = private_key_st
+class X509_info_st(Structure):
+ pass
+X509_info_st._fields_ = [
+ ('x509', POINTER(X509)),
+ ('crl', POINTER(X509_CRL)),
+ ('x_pkey', POINTER(X509_PKEY)),
+ ('enc_cipher', EVP_CIPHER_INFO),
+ ('enc_len', c_int),
+ ('enc_data', STRING),
+ ('references', c_int),
+]
+assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
+assert alignment(X509_info_st) == 4, alignment(X509_info_st)
+X509_INFO = X509_info_st
+class Netscape_spkac_st(Structure):
+ pass
+Netscape_spkac_st._fields_ = [
+ ('pubkey', POINTER(X509_PUBKEY)),
+ ('challenge', POINTER(ASN1_IA5STRING)),
+]
+assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
+assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
+NETSCAPE_SPKAC = Netscape_spkac_st
+class Netscape_spki_st(Structure):
+ pass
+Netscape_spki_st._fields_ = [
+ ('spkac', POINTER(NETSCAPE_SPKAC)),
+ ('sig_algor', POINTER(X509_ALGOR)),
+ ('signature', POINTER(ASN1_BIT_STRING)),
+]
+assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
+assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
+NETSCAPE_SPKI = Netscape_spki_st
+class Netscape_certificate_sequence(Structure):
+ pass
+Netscape_certificate_sequence._fields_ = [
+ ('type', POINTER(ASN1_OBJECT)),
+ ('certs', POINTER(STACK)),
+]
+assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
+assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
+NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
+class PBEPARAM_st(Structure):
+ pass
+PBEPARAM_st._fields_ = [
+ ('salt', POINTER(ASN1_OCTET_STRING)),
+ ('iter', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
+assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
+PBEPARAM = PBEPARAM_st
+class PBE2PARAM_st(Structure):
+ pass
+PBE2PARAM_st._fields_ = [
+ ('keyfunc', POINTER(X509_ALGOR)),
+ ('encryption', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
+assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
+PBE2PARAM = PBE2PARAM_st
+class PBKDF2PARAM_st(Structure):
+ pass
+PBKDF2PARAM_st._fields_ = [
+ ('salt', POINTER(ASN1_TYPE)),
+ ('iter', POINTER(ASN1_INTEGER)),
+ ('keylength', POINTER(ASN1_INTEGER)),
+ ('prf', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
+assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
+PBKDF2PARAM = PBKDF2PARAM_st
+class pkcs8_priv_key_info_st(Structure):
+ pass
+pkcs8_priv_key_info_st._fields_ = [
+ ('broken', c_int),
+ ('version', POINTER(ASN1_INTEGER)),
+ ('pkeyalg', POINTER(X509_ALGOR)),
+ ('pkey', POINTER(ASN1_TYPE)),
+ ('attributes', POINTER(STACK)),
+]
+assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
+assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
+PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
+class x509_hash_dir_st(Structure):
+ pass
+x509_hash_dir_st._fields_ = [
+ ('num_dirs', c_int),
+ ('dirs', POINTER(STRING)),
+ ('dirs_type', POINTER(c_int)),
+ ('num_dirs_alloced', c_int),
+]
+assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
+assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
+X509_HASH_DIR_CTX = x509_hash_dir_st
+class x509_file_st(Structure):
+ pass
+x509_file_st._fields_ = [
+ ('num_paths', c_int),
+ ('num_alloced', c_int),
+ ('paths', POINTER(STRING)),
+ ('path_type', POINTER(c_int)),
+]
+assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
+assert alignment(x509_file_st) == 4, alignment(x509_file_st)
+X509_CERT_FILE_CTX = x509_file_st
+class x509_object_st(Structure):
+ pass
+class N14x509_object_st4DOLLAR_14E(Union):
+ pass
+N14x509_object_st4DOLLAR_14E._fields_ = [
+ ('ptr', STRING),
+ ('x509', POINTER(X509)),
+ ('crl', POINTER(X509_CRL)),
+ ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
+assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
+x509_object_st._fields_ = [
+ ('type', c_int),
+ ('data', N14x509_object_st4DOLLAR_14E),
+]
+assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
+assert alignment(x509_object_st) == 4, alignment(x509_object_st)
+X509_OBJECT = x509_object_st
+class x509_lookup_st(Structure):
+ pass
+X509_LOOKUP = x509_lookup_st
+class x509_lookup_method_st(Structure):
+ pass
+x509_lookup_method_st._fields_ = [
+ ('name', STRING),
+ ('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+ ('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
+ ('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+ ('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+ ('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
+ ('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
+ ('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
+ ('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
+ ('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
+]
+assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
+assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
+X509_LOOKUP_METHOD = x509_lookup_method_st
+x509_store_st._fields_ = [
+ ('cache', c_int),
+ ('objs', POINTER(STACK)),
+ ('get_cert_methods', POINTER(STACK)),
+ ('flags', c_ulong),
+ ('purpose', c_int),
+ ('trust', c_int),
+ ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+ ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+ ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+ ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+ ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+ ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+ ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('ex_data', CRYPTO_EX_DATA),
+ ('references', c_int),
+ ('depth', c_int),
+]
+assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
+assert alignment(x509_store_st) == 4, alignment(x509_store_st)
+x509_lookup_st._fields_ = [
+ ('init', c_int),
+ ('skip', c_int),
+ ('method', POINTER(X509_LOOKUP_METHOD)),
+ ('method_data', STRING),
+ ('store_ctx', POINTER(X509_STORE)),
+]
+assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
+assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
+time_t = __darwin_time_t
+x509_store_ctx_st._fields_ = [
+ ('ctx', POINTER(X509_STORE)),
+ ('current_method', c_int),
+ ('cert', POINTER(X509)),
+ ('untrusted', POINTER(STACK)),
+ ('purpose', c_int),
+ ('trust', c_int),
+ ('check_time', time_t),
+ ('flags', c_ulong),
+ ('other_ctx', c_void_p),
+ ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+ ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+ ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+ ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+ ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+ ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+ ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+ ('depth', c_int),
+ ('valid', c_int),
+ ('last_untrusted', c_int),
+ ('chain', POINTER(STACK)),
+ ('error_depth', c_int),
+ ('error', c_int),
+ ('current_cert', POINTER(X509)),
+ ('current_issuer', POINTER(X509)),
+ ('current_crl', POINTER(X509_CRL)),
+ ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
+assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
+va_list = __darwin_va_list
+__darwin_off_t = __int64_t
+fpos_t = __darwin_off_t
+class __sbuf(Structure):
+ pass
+__sbuf._fields_ = [
+ ('_base', POINTER(c_ubyte)),
+ ('_size', c_int),
+]
+assert sizeof(__sbuf) == 8, sizeof(__sbuf)
+assert alignment(__sbuf) == 4, alignment(__sbuf)
+class __sFILEX(Structure):
+ pass
+__sFILEX._fields_ = [
+]
+class __sFILE(Structure):
+ pass
+__sFILE._pack_ = 4
+__sFILE._fields_ = [
+ ('_p', POINTER(c_ubyte)),
+ ('_r', c_int),
+ ('_w', c_int),
+ ('_flags', c_short),
+ ('_file', c_short),
+ ('_bf', __sbuf),
+ ('_lbfsize', c_int),
+ ('_cookie', c_void_p),
+ ('_close', CFUNCTYPE(c_int, c_void_p)),
+ ('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+ ('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
+ ('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+ ('_ub', __sbuf),
+ ('_extra', POINTER(__sFILEX)),
+ ('_ur', c_int),
+ ('_ubuf', c_ubyte * 3),
+ ('_nbuf', c_ubyte * 1),
+ ('_lb', __sbuf),
+ ('_blksize', c_int),
+ ('_offset', fpos_t),
+]
+assert sizeof(__sFILE) == 88, sizeof(__sFILE)
+assert alignment(__sFILE) == 4, alignment(__sFILE)
+FILE = __sFILE
+ct_rune_t = __darwin_ct_rune_t
+rune_t = __darwin_rune_t
+class div_t(Structure):
+ pass
+div_t._fields_ = [
+ ('quot', c_int),
+ ('rem', c_int),
+]
+assert sizeof(div_t) == 8, sizeof(div_t)
+assert alignment(div_t) == 4, alignment(div_t)
+class ldiv_t(Structure):
+ pass
+ldiv_t._fields_ = [
+ ('quot', c_long),
+ ('rem', c_long),
+]
+assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
+assert alignment(ldiv_t) == 4, alignment(ldiv_t)
+class lldiv_t(Structure):
+ pass
+lldiv_t._pack_ = 4
+lldiv_t._fields_ = [
+ ('quot', c_longlong),
+ ('rem', c_longlong),
+]
+assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
+assert alignment(lldiv_t) == 4, alignment(lldiv_t)
+__darwin_dev_t = __int32_t
+dev_t = __darwin_dev_t
+__darwin_mode_t = __uint16_t
+mode_t = __darwin_mode_t
+class mcontext(Structure):
+ pass
+mcontext._fields_ = [
+]
+class mcontext64(Structure):
+ pass
+mcontext64._fields_ = [
+]
+class __darwin_pthread_handler_rec(Structure):
+ pass
+__darwin_pthread_handler_rec._fields_ = [
+ ('__routine', CFUNCTYPE(None, c_void_p)),
+ ('__arg', c_void_p),
+ ('__next', POINTER(__darwin_pthread_handler_rec)),
+]
+assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
+assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
+class _opaque_pthread_attr_t(Structure):
+ pass
+_opaque_pthread_attr_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 36),
+]
+assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
+assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
+class _opaque_pthread_cond_t(Structure):
+ pass
+_opaque_pthread_cond_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 24),
+]
+assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
+assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
+class _opaque_pthread_condattr_t(Structure):
+ pass
+_opaque_pthread_condattr_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
+assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
+class _opaque_pthread_mutex_t(Structure):
+ pass
+_opaque_pthread_mutex_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 40),
+]
+assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
+assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
+class _opaque_pthread_mutexattr_t(Structure):
+ pass
+_opaque_pthread_mutexattr_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 8),
+]
+assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
+assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
+class _opaque_pthread_once_t(Structure):
+ pass
+_opaque_pthread_once_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
+assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
+class _opaque_pthread_rwlock_t(Structure):
+ pass
+_opaque_pthread_rwlock_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 124),
+]
+assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
+assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
+class _opaque_pthread_rwlockattr_t(Structure):
+ pass
+_opaque_pthread_rwlockattr_t._fields_ = [
+ ('__sig', c_long),
+ ('__opaque', c_char * 12),
+]
+assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
+assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
+class _opaque_pthread_t(Structure):
+ pass
+_opaque_pthread_t._fields_ = [
+ ('__sig', c_long),
+ ('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
+ ('__opaque', c_char * 596),
+]
+assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
+assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
+__darwin_blkcnt_t = __int64_t
+__darwin_blksize_t = __int32_t
+__darwin_fsblkcnt_t = c_uint
+__darwin_fsfilcnt_t = c_uint
+__darwin_gid_t = __uint32_t
+__darwin_id_t = __uint32_t
+__darwin_ino_t = __uint32_t
+__darwin_mach_port_name_t = __darwin_natural_t
+__darwin_mach_port_t = __darwin_mach_port_name_t
+__darwin_mcontext_t = POINTER(mcontext)
+__darwin_mcontext64_t = POINTER(mcontext64)
+__darwin_pid_t = __int32_t
+__darwin_pthread_attr_t = _opaque_pthread_attr_t
+__darwin_pthread_cond_t = _opaque_pthread_cond_t
+__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
+__darwin_pthread_key_t = c_ulong
+__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
+__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
+__darwin_pthread_once_t = _opaque_pthread_once_t
+__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
+__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
+__darwin_pthread_t = POINTER(_opaque_pthread_t)
+__darwin_sigset_t = __uint32_t
+__darwin_suseconds_t = __int32_t
+__darwin_uid_t = __uint32_t
+__darwin_useconds_t = __uint32_t
+__darwin_uuid_t = c_ubyte * 16
+class sigaltstack(Structure):
+ pass
+sigaltstack._fields_ = [
+ ('ss_sp', c_void_p),
+ ('ss_size', __darwin_size_t),
+ ('ss_flags', c_int),
+]
+assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
+assert alignment(sigaltstack) == 4, alignment(sigaltstack)
+__darwin_stack_t = sigaltstack
+class ucontext(Structure):
+ pass
+ucontext._fields_ = [
+ ('uc_onstack', c_int),
+ ('uc_sigmask', __darwin_sigset_t),
+ ('uc_stack', __darwin_stack_t),
+ ('uc_link', POINTER(ucontext)),
+ ('uc_mcsize', __darwin_size_t),
+ ('uc_mcontext', __darwin_mcontext_t),
+]
+assert sizeof(ucontext) == 32, sizeof(ucontext)
+assert alignment(ucontext) == 4, alignment(ucontext)
+__darwin_ucontext_t = ucontext
+class ucontext64(Structure):
+ pass
+ucontext64._fields_ = [
+ ('uc_onstack', c_int),
+ ('uc_sigmask', __darwin_sigset_t),
+ ('uc_stack', __darwin_stack_t),
+ ('uc_link', POINTER(ucontext64)),
+ ('uc_mcsize', __darwin_size_t),
+ ('uc_mcontext64', __darwin_mcontext64_t),
+]
+assert sizeof(ucontext64) == 32, sizeof(ucontext64)
+assert alignment(ucontext64) == 4, alignment(ucontext64)
+__darwin_ucontext64_t = ucontext64
+class timeval(Structure):
+ pass
+timeval._fields_ = [
+ ('tv_sec', __darwin_time_t),
+ ('tv_usec', __darwin_suseconds_t),
+]
+assert sizeof(timeval) == 8, sizeof(timeval)
+assert alignment(timeval) == 4, alignment(timeval)
+rlim_t = __int64_t
+class rusage(Structure):
+ pass
+rusage._fields_ = [
+ ('ru_utime', timeval),
+ ('ru_stime', timeval),
+ ('ru_maxrss', c_long),
+ ('ru_ixrss', c_long),
+ ('ru_idrss', c_long),
+ ('ru_isrss', c_long),
+ ('ru_minflt', c_long),
+ ('ru_majflt', c_long),
+ ('ru_nswap', c_long),
+ ('ru_inblock', c_long),
+ ('ru_oublock', c_long),
+ ('ru_msgsnd', c_long),
+ ('ru_msgrcv', c_long),
+ ('ru_nsignals', c_long),
+ ('ru_nvcsw', c_long),
+ ('ru_nivcsw', c_long),
+]
+assert sizeof(rusage) == 72, sizeof(rusage)
+assert alignment(rusage) == 4, alignment(rusage)
+class rlimit(Structure):
+ pass
+rlimit._pack_ = 4
+rlimit._fields_ = [
+ ('rlim_cur', rlim_t),
+ ('rlim_max', rlim_t),
+]
+assert sizeof(rlimit) == 16, sizeof(rlimit)
+assert alignment(rlimit) == 4, alignment(rlimit)
+mcontext_t = __darwin_mcontext_t
+mcontext64_t = __darwin_mcontext64_t
+pthread_attr_t = __darwin_pthread_attr_t
+sigset_t = __darwin_sigset_t
+ucontext_t = __darwin_ucontext_t
+ucontext64_t = __darwin_ucontext64_t
+uid_t = __darwin_uid_t
+class sigval(Union):
+ pass
+sigval._fields_ = [
+ ('sival_int', c_int),
+ ('sival_ptr', c_void_p),
+]
+assert sizeof(sigval) == 4, sizeof(sigval)
+assert alignment(sigval) == 4, alignment(sigval)
+class sigevent(Structure):
+ pass
+sigevent._fields_ = [
+ ('sigev_notify', c_int),
+ ('sigev_signo', c_int),
+ ('sigev_value', sigval),
+ ('sigev_notify_function', CFUNCTYPE(None, sigval)),
+ ('sigev_notify_attributes', POINTER(pthread_attr_t)),
+]
+assert sizeof(sigevent) == 20, sizeof(sigevent)
+assert alignment(sigevent) == 4, alignment(sigevent)
+class __siginfo(Structure):
+ pass
+pid_t = __darwin_pid_t
+__siginfo._fields_ = [
+ ('si_signo', c_int),
+ ('si_errno', c_int),
+ ('si_code', c_int),
+ ('si_pid', pid_t),
+ ('si_uid', uid_t),
+ ('si_status', c_int),
+ ('si_addr', c_void_p),
+ ('si_value', sigval),
+ ('si_band', c_long),
+ ('pad', c_ulong * 7),
+]
+assert sizeof(__siginfo) == 64, sizeof(__siginfo)
+assert alignment(__siginfo) == 4, alignment(__siginfo)
+siginfo_t = __siginfo
+class __sigaction_u(Union):
+ pass
+__sigaction_u._fields_ = [
+ ('__sa_handler', CFUNCTYPE(None, c_int)),
+ ('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
+]
+assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
+assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
+class __sigaction(Structure):
+ pass
+__sigaction._fields_ = [
+ ('__sigaction_u', __sigaction_u),
+ ('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
+ ('sa_mask', sigset_t),
+ ('sa_flags', c_int),
+]
+assert sizeof(__sigaction) == 16, sizeof(__sigaction)
+assert alignment(__sigaction) == 4, alignment(__sigaction)
+class sigaction(Structure):
+ pass
+sigaction._fields_ = [
+ ('__sigaction_u', __sigaction_u),
+ ('sa_mask', sigset_t),
+ ('sa_flags', c_int),
+]
+assert sizeof(sigaction) == 12, sizeof(sigaction)
+assert alignment(sigaction) == 4, alignment(sigaction)
+sig_t = CFUNCTYPE(None, c_int)
+stack_t = __darwin_stack_t
+class sigvec(Structure):
+ pass
+sigvec._fields_ = [
+ ('sv_handler', CFUNCTYPE(None, c_int)),
+ ('sv_mask', c_int),
+ ('sv_flags', c_int),
+]
+assert sizeof(sigvec) == 12, sizeof(sigvec)
+assert alignment(sigvec) == 4, alignment(sigvec)
+class sigstack(Structure):
+ pass
+sigstack._fields_ = [
+ ('ss_sp', STRING),
+ ('ss_onstack', c_int),
+]
+assert sizeof(sigstack) == 8, sizeof(sigstack)
+assert alignment(sigstack) == 4, alignment(sigstack)
+u_char = c_ubyte
+u_short = c_ushort
+u_int = c_uint
+u_long = c_ulong
+ushort = c_ushort
+uint = c_uint
+u_quad_t = u_int64_t
+quad_t = int64_t
+qaddr_t = POINTER(quad_t)
+caddr_t = STRING
+daddr_t = int32_t
+fixpt_t = u_int32_t
+blkcnt_t = __darwin_blkcnt_t
+blksize_t = __darwin_blksize_t
+gid_t = __darwin_gid_t
+in_addr_t = __uint32_t
+in_port_t = __uint16_t
+ino_t = __darwin_ino_t
+key_t = __int32_t
+nlink_t = __uint16_t
+off_t = __darwin_off_t
+segsz_t = int32_t
+swblk_t = int32_t
+clock_t = __darwin_clock_t
+ssize_t = __darwin_ssize_t
+useconds_t = __darwin_useconds_t
+suseconds_t = __darwin_suseconds_t
+fd_mask = __int32_t
+class fd_set(Structure):
+ pass
+fd_set._fields_ = [
+ ('fds_bits', __int32_t * 32),
+]
+assert sizeof(fd_set) == 128, sizeof(fd_set)
+assert alignment(fd_set) == 4, alignment(fd_set)
+pthread_cond_t = __darwin_pthread_cond_t
+pthread_condattr_t = __darwin_pthread_condattr_t
+pthread_mutex_t = __darwin_pthread_mutex_t
+pthread_mutexattr_t = __darwin_pthread_mutexattr_t
+pthread_once_t = __darwin_pthread_once_t
+pthread_rwlock_t = __darwin_pthread_rwlock_t
+pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
+pthread_t = __darwin_pthread_t
+pthread_key_t = __darwin_pthread_key_t
+fsblkcnt_t = __darwin_fsblkcnt_t
+fsfilcnt_t = __darwin_fsfilcnt_t
+
+# values for enumeration 'idtype_t'
+idtype_t = c_int # enum
+id_t = __darwin_id_t
+class wait(Union):
+ pass
+class N4wait3DOLLAR_3E(Structure):
+ pass
+N4wait3DOLLAR_3E._fields_ = [
+ ('w_Termsig', c_uint, 7),
+ ('w_Coredump', c_uint, 1),
+ ('w_Retcode', c_uint, 8),
+ ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
+assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
+class N4wait3DOLLAR_4E(Structure):
+ pass
+N4wait3DOLLAR_4E._fields_ = [
+ ('w_Stopval', c_uint, 8),
+ ('w_Stopsig', c_uint, 8),
+ ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
+assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
+wait._fields_ = [
+ ('w_status', c_int),
+ ('w_T', N4wait3DOLLAR_3E),
+ ('w_S', N4wait3DOLLAR_4E),
+]
+assert sizeof(wait) == 4, sizeof(wait)
+assert alignment(wait) == 4, alignment(wait)
+class timespec(Structure):
+ pass
+timespec._fields_ = [
+ ('tv_sec', time_t),
+ ('tv_nsec', c_long),
+]
+assert sizeof(timespec) == 8, sizeof(timespec)
+assert alignment(timespec) == 4, alignment(timespec)
+class tm(Structure):
+ pass
+tm._fields_ = [
+ ('tm_sec', c_int),
+ ('tm_min', c_int),
+ ('tm_hour', c_int),
+ ('tm_mday', c_int),
+ ('tm_mon', c_int),
+ ('tm_year', c_int),
+ ('tm_wday', c_int),
+ ('tm_yday', c_int),
+ ('tm_isdst', c_int),
+ ('tm_gmtoff', c_long),
+ ('tm_zone', STRING),
+]
+assert sizeof(tm) == 44, sizeof(tm)
+assert alignment(tm) == 4, alignment(tm)
+__gnuc_va_list = STRING
+ptrdiff_t = c_int
+int8_t = c_byte
+int16_t = c_short
+uint8_t = c_ubyte
+uint16_t = c_ushort
+uint32_t = c_uint
+uint64_t = c_ulonglong
+int_least8_t = int8_t
+int_least16_t = int16_t
+int_least32_t = int32_t
+int_least64_t = int64_t
+uint_least8_t = uint8_t
+uint_least16_t = uint16_t
+uint_least32_t = uint32_t
+uint_least64_t = uint64_t
+int_fast8_t = int8_t
+int_fast16_t = int16_t
+int_fast32_t = int32_t
+int_fast64_t = int64_t
+uint_fast8_t = uint8_t
+uint_fast16_t = uint16_t
+uint_fast32_t = uint32_t
+uint_fast64_t = uint64_t
+intptr_t = c_long
+uintptr_t = c_ulong
+intmax_t = c_longlong
+uintmax_t = c_ulonglong
+__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
+ 'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
+ '__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
+ 'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
+ 'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
+ 'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
+ 'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
+ 'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
+ 'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
+ 'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
+ 'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
+ 'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
+ 'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
+ 'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
+ '__darwin_va_list', 'ucontext_t', 'lhash_st',
+ 'N4wait3DOLLAR_4E', '__darwin_uuid_t',
+ '_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
+ 'va_list', '__darwin_wchar_t', 'pthread_key_t',
+ 'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
+ 'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
+ 'uint_fast8_t', 'UI_STRING', 'DES_cblock',
+ '__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
+ 'SHAstate_st', 'u_quad_t', 'openssl_fptr',
+ '_opaque_pthread_rwlockattr_t',
+ 'N18x509_attributes_st4DOLLAR_13E',
+ '__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
+ 'x509_file_st', 'X509_req_info_st', 'int_least64_t',
+ 'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
+ '__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
+ 'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
+ 'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
+ 'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
+ 'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
+ 'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
+ 'ui_method_st', '__darwin_pthread_rwlockattr_t',
+ 'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
+ 'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
+ 'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
+ 'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
+ 'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
+ '_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
+ 'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
+ '__darwin_pthread_condattr_t', 'X509_PKEY',
+ '__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
+ 'pthread_mutexattr_t', 'size_t',
+ '_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
+ 'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
+ 'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
+ 'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
+ 'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
+ 'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
+ 'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
+ 'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
+ 'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
+ 'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
+ 'user_ssize_t', 'clock_t', 'aes_key_st',
+ '__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
+ 'asn1_string_table_st', 'uint_fast32_t',
+ 'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
+ 'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
+ '__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
+ 'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
+ 'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
+ 'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
+ 'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
+ 'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
+ 'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
+ '__darwin_pthread_mutexattr_t', 'x509_attributes_st',
+ 'stack_t', '__darwin_mode_t', '__mbstate_t',
+ 'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
+ 'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
+ 'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
+ 'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
+ '__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
+ 'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
+ 'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
+ 'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
+ 'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
+ 'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
+ 'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
+ 'int8_t', '__darwin_wint_t', 'OBJ_NAME',
+ 'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
+ 'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
+ 'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
+ 'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
+ 'ssl3_buffer_st', 'ASN1_ITEM_EXP',
+ '_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
+ 'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
+ '_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
+ 'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
+ 'env_md_st', 'LHASH', '__darwin_ucontext_t',
+ 'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
+ 'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
+ 'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
+ 'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
+ 'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
+ '__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
+ 'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
+ 'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
+ 'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
+ 'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
+ 'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
+ 'asn1_header_st', '__darwin_pthread_key_t',
+ 'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
+ 'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
+ 'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
+ 'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
+ '_opaque_pthread_mutexattr_t', 'ssl_st',
+ 'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
+ 'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
+ 'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
+ 'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
+ 'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
+ 'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
+ 'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
+ 'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
+ 'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
+ 'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
+ 'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
+ 'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
+ 'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
+ 'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
+ 'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
+ '__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
+ 'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
+ 'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
+ 'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
+ 'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
+ 'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
+ 'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
+ 'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
+ 'Netscape_certificate_sequence', 'ssl_cipher_st',
+ 'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
+ 'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
+ 'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
+ 'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
+ '__sigaction', 'user_ulong_t', 'syscall_arg_t',
+ 'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
+ 'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
+ 'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
+ 'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
+ 'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
+ 'X509_info_st', 'x509_store_st', '__darwin_natural_t',
+ 'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
+ 'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
+ 'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
+ 'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
+ 'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
+ '__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
+ '__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
+ 'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
+ 'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
+ 'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
+ 'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
+ 'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
+ '_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
+ 'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
+ 'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
+ '__darwin_pid_t', 'lldiv_t', 'comp_method_st',
+ 'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
+ 'x509_hash_dir_st', '__darwin_mach_port_name_t',
+ 'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
+ 'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
+ 'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
+ 'comp_ctx_st', 'X509_CERT_FILE_CTX']
diff --git a/lib/python2.7/lib2to3/tests/data/py2_test_grammar.py b/lib/python2.7/lib2to3/tests/data/py2_test_grammar.py
new file mode 100644
index 0000000..b5a4137
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/py2_test_grammar.py
@@ -0,0 +1,974 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants. Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module. Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.test_support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+ def testBackslash(self):
+ # Backslash means line continuation:
+ x = 1 \
+ + 1
+ self.assertEquals(x, 2, 'backslash for line continuation')
+
+ # Backslash does not means continuation in comments :\
+ x = 0
+ self.assertEquals(x, 0, 'backslash ending comment')
+
+ def testPlainIntegers(self):
+ self.assertEquals(0xff, 255)
+ self.assertEquals(0377, 255)
+ self.assertEquals(2147483647, 017777777777)
+ # "0x" is not a valid literal
+ self.assertRaises(SyntaxError, eval, "0x")
+ from sys import maxint
+ if maxint == 2147483647:
+ self.assertEquals(-2147483647-1, -020000000000)
+ # XXX -2147483648
+ self.assert_(037777777777 > 0)
+ self.assert_(0xffffffff > 0)
+ for s in '2147483648', '040000000000', '0x100000000':
+ try:
+ x = eval(s)
+ except OverflowError:
+ self.fail("OverflowError on huge integer literal %r" % s)
+ elif maxint == 9223372036854775807:
+ self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
+ self.assert_(01777777777777777777777 > 0)
+ self.assert_(0xffffffffffffffff > 0)
+ for s in '9223372036854775808', '02000000000000000000000', \
+ '0x10000000000000000':
+ try:
+ x = eval(s)
+ except OverflowError:
+ self.fail("OverflowError on huge integer literal %r" % s)
+ else:
+ self.fail('Weird maxint value %r' % maxint)
+
+ def testLongIntegers(self):
+ x = 0L
+ x = 0l
+ x = 0xffffffffffffffffL
+ x = 0xffffffffffffffffl
+ x = 077777777777777777L
+ x = 077777777777777777l
+ x = 123456789012345678901234567890L
+ x = 123456789012345678901234567890l
+
+ def testFloats(self):
+ x = 3.14
+ x = 314.
+ x = 0.314
+ # XXX x = 000.314
+ x = .314
+ x = 3e14
+ x = 3E14
+ x = 3e-14
+ x = 3e+14
+ x = 3.e14
+ x = .3e14
+ x = 3.1e4
+
+ def testStringLiterals(self):
+ x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+ x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+ x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+ x = "doesn't \"shrink\" does it"
+ y = 'doesn\'t "shrink" does it'
+ self.assert_(len(x) == 24 and x == y)
+ x = "does \"shrink\" doesn't it"
+ y = 'does "shrink" doesn\'t it'
+ self.assert_(len(x) == 24 and x == y)
+ x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+ y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+ self.assertEquals(x, y)
+ y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+ self.assertEquals(x, y)
+ y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+ self.assertEquals(x, y)
+ y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+ self.assertEquals(x, y)
+
+
+class GrammarTests(unittest.TestCase):
+
+ # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+ # XXX can't test in a script -- this rule is only used when interactive
+
+ # file_input: (NEWLINE | stmt)* ENDMARKER
+ # Being tested as this very moment this very module
+
+ # expr_input: testlist NEWLINE
+ # XXX Hard to test -- used only in calls to input()
+
+ def testEvalInput(self):
+ # testlist ENDMARKER
+ x = eval('1, 0 or 1')
+
+ def testFuncdef(self):
+ ### 'def' NAME parameters ':' suite
+ ### parameters: '(' [varargslist] ')'
+ ### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
+ ### | ('**'|'*' '*') NAME)
+ ### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+ ### fpdef: NAME | '(' fplist ')'
+ ### fplist: fpdef (',' fpdef)* [',']
+ ### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
+ ### argument: [test '='] test # Really [keyword '='] test
+ def f1(): pass
+ f1()
+ f1(*())
+ f1(*(), **{})
+ def f2(one_argument): pass
+ def f3(two, arguments): pass
+ def f4(two, (compound, (argument, list))): pass
+ def f5((compound, first), two): pass
+ self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
+ self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
+ if sys.platform.startswith('java'):
+ self.assertEquals(f4.func_code.co_varnames,
+ ('two', '(compound, (argument, list))', 'compound', 'argument',
+ 'list',))
+ self.assertEquals(f5.func_code.co_varnames,
+ ('(compound, first)', 'two', 'compound', 'first'))
+ else:
+ self.assertEquals(f4.func_code.co_varnames,
+ ('two', '.1', 'compound', 'argument', 'list'))
+ self.assertEquals(f5.func_code.co_varnames,
+ ('.0', 'two', 'compound', 'first'))
+ def a1(one_arg,): pass
+ def a2(two, args,): pass
+ def v0(*rest): pass
+ def v1(a, *rest): pass
+ def v2(a, b, *rest): pass
+ def v3(a, (b, c), *rest): return a, b, c, rest
+
+ f1()
+ f2(1)
+ f2(1,)
+ f3(1, 2)
+ f3(1, 2,)
+ f4(1, (2, (3, 4)))
+ v0()
+ v0(1)
+ v0(1,)
+ v0(1,2)
+ v0(1,2,3,4,5,6,7,8,9,0)
+ v1(1)
+ v1(1,)
+ v1(1,2)
+ v1(1,2,3)
+ v1(1,2,3,4,5,6,7,8,9,0)
+ v2(1,2)
+ v2(1,2,3)
+ v2(1,2,3,4)
+ v2(1,2,3,4,5,6,7,8,9,0)
+ v3(1,(2,3))
+ v3(1,(2,3),4)
+ v3(1,(2,3),4,5,6,7,8,9,0)
+
+ # ceval unpacks the formal arguments into the first argcount names;
+ # thus, the names nested inside tuples must appear after these names.
+ if sys.platform.startswith('java'):
+ self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
+ else:
+ self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
+ self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
+ def d01(a=1): pass
+ d01()
+ d01(1)
+ d01(*(1,))
+ d01(**{'a':2})
+ def d11(a, b=1): pass
+ d11(1)
+ d11(1, 2)
+ d11(1, **{'b':2})
+ def d21(a, b, c=1): pass
+ d21(1, 2)
+ d21(1, 2, 3)
+ d21(*(1, 2, 3))
+ d21(1, *(2, 3))
+ d21(1, 2, *(3,))
+ d21(1, 2, **{'c':3})
+ def d02(a=1, b=2): pass
+ d02()
+ d02(1)
+ d02(1, 2)
+ d02(*(1, 2))
+ d02(1, *(2,))
+ d02(1, **{'b':2})
+ d02(**{'a': 1, 'b': 2})
+ def d12(a, b=1, c=2): pass
+ d12(1)
+ d12(1, 2)
+ d12(1, 2, 3)
+ def d22(a, b, c=1, d=2): pass
+ d22(1, 2)
+ d22(1, 2, 3)
+ d22(1, 2, 3, 4)
+ def d01v(a=1, *rest): pass
+ d01v()
+ d01v(1)
+ d01v(1, 2)
+ d01v(*(1, 2, 3, 4))
+ d01v(*(1,))
+ d01v(**{'a':2})
+ def d11v(a, b=1, *rest): pass
+ d11v(1)
+ d11v(1, 2)
+ d11v(1, 2, 3)
+ def d21v(a, b, c=1, *rest): pass
+ d21v(1, 2)
+ d21v(1, 2, 3)
+ d21v(1, 2, 3, 4)
+ d21v(*(1, 2, 3, 4))
+ d21v(1, 2, **{'c': 3})
+ def d02v(a=1, b=2, *rest): pass
+ d02v()
+ d02v(1)
+ d02v(1, 2)
+ d02v(1, 2, 3)
+ d02v(1, *(2, 3, 4))
+ d02v(**{'a': 1, 'b': 2})
+ def d12v(a, b=1, c=2, *rest): pass
+ d12v(1)
+ d12v(1, 2)
+ d12v(1, 2, 3)
+ d12v(1, 2, 3, 4)
+ d12v(*(1, 2, 3, 4))
+ d12v(1, 2, *(3, 4, 5))
+ d12v(1, *(2,), **{'c': 3})
+ def d22v(a, b, c=1, d=2, *rest): pass
+ d22v(1, 2)
+ d22v(1, 2, 3)
+ d22v(1, 2, 3, 4)
+ d22v(1, 2, 3, 4, 5)
+ d22v(*(1, 2, 3, 4))
+ d22v(1, 2, *(3, 4, 5))
+ d22v(1, *(2, 3), **{'d': 4})
+ def d31v((x)): pass
+ d31v(1)
+ def d32v((x,)): pass
+ d32v((1,))
+
+ # keyword arguments after *arglist
+ def f(*args, **kwargs):
+ return args, kwargs
+ self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+ {'x':2, 'y':5}))
+ self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+ self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+ # Check ast errors in *args and *kwargs
+ check_syntax_error(self, "f(*g(1=2))")
+ check_syntax_error(self, "f(**g(1=2))")
+
+ def testLambdef(self):
+ ### lambdef: 'lambda' [varargslist] ':' test
+ l1 = lambda : 0
+ self.assertEquals(l1(), 0)
+ l2 = lambda : a[d] # XXX just testing the expression
+ l3 = lambda : [2 < x for x in [-1, 3, 0L]]
+ self.assertEquals(l3(), [0, 1, 0])
+ l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+ self.assertEquals(l4(), 1)
+ l5 = lambda x, y, z=2: x + y + z
+ self.assertEquals(l5(1, 2), 5)
+ self.assertEquals(l5(1, 2, 3), 6)
+ check_syntax_error(self, "lambda x: x = 2")
+ check_syntax_error(self, "lambda (None,): None")
+
+ ### stmt: simple_stmt | compound_stmt
+ # Tested below
+
+ def testSimpleStmt(self):
+ ### simple_stmt: small_stmt (';' small_stmt)* [';']
+ x = 1; pass; del x
+ def foo():
+ # verify statements that end with semi-colons
+ x = 1; pass; del x;
+ foo()
+
+ ### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
+ # Tested below
+
+ def testExprStmt(self):
+ # (exprlist '=')* exprlist
+ 1
+ 1, 2, 3
+ x = 1
+ x = 1, 2, 3
+ x = y = z = 1, 2, 3
+ x, y, z = 1, 2, 3
+ abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+ check_syntax_error(self, "x + 1 = 1")
+ check_syntax_error(self, "a + 1 = b + 2")
+
+ def testPrintStmt(self):
+ # 'print' (test ',')* [test]
+ import StringIO
+
+ # Can't test printing to real stdout without comparing output
+ # which is not available in unittest.
+ save_stdout = sys.stdout
+ sys.stdout = StringIO.StringIO()
+
+ print 1, 2, 3
+ print 1, 2, 3,
+ print
+ print 0 or 1, 0 or 1,
+ print 0 or 1
+
+ # 'print' '>>' test ','
+ print >> sys.stdout, 1, 2, 3
+ print >> sys.stdout, 1, 2, 3,
+ print >> sys.stdout
+ print >> sys.stdout, 0 or 1, 0 or 1,
+ print >> sys.stdout, 0 or 1
+
+ # test printing to an instance
+ class Gulp:
+ def write(self, msg): pass
+
+ gulp = Gulp()
+ print >> gulp, 1, 2, 3
+ print >> gulp, 1, 2, 3,
+ print >> gulp
+ print >> gulp, 0 or 1, 0 or 1,
+ print >> gulp, 0 or 1
+
+ # test print >> None
+ def driver():
+ oldstdout = sys.stdout
+ sys.stdout = Gulp()
+ try:
+ tellme(Gulp())
+ tellme()
+ finally:
+ sys.stdout = oldstdout
+
+ # we should see this once
+ def tellme(file=sys.stdout):
+ print >> file, 'hello world'
+
+ driver()
+
+ # we should not see this at all
+ def tellme(file=None):
+ print >> file, 'goodbye universe'
+
+ driver()
+
+ self.assertEqual(sys.stdout.getvalue(), '''\
+1 2 3
+1 2 3
+1 1 1
+1 2 3
+1 2 3
+1 1 1
+hello world
+''')
+ sys.stdout = save_stdout
+
+ # syntax errors
+ check_syntax_error(self, 'print ,')
+ check_syntax_error(self, 'print >> x,')
+
+ def testDelStmt(self):
+ # 'del' exprlist
+ abc = [1,2,3]
+ x, y, z = abc
+ xyz = x, y, z
+
+ del abc
+ del x, y, (z, xyz)
+
+ def testPassStmt(self):
+ # 'pass'
+ pass
+
+ # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+ # Tested below
+
+ def testBreakStmt(self):
+ # 'break'
+ while 1: break
+
+ def testContinueStmt(self):
+ # 'continue'
+ i = 1
+ while i: i = 0; continue
+
+ msg = ""
+ while not msg:
+ msg = "ok"
+ try:
+ continue
+ msg = "continue failed to continue inside try"
+ except:
+ msg = "continue inside try called except block"
+ if msg != "ok":
+ self.fail(msg)
+
+ msg = ""
+ while not msg:
+ msg = "finally block not called"
+ try:
+ continue
+ finally:
+ msg = "ok"
+ if msg != "ok":
+ self.fail(msg)
+
+ def test_break_continue_loop(self):
+ # This test warrants an explanation. It is a test specifically for SF bugs
+ # #463359 and #462937. The bug is that a 'break' statement executed or
+ # exception raised inside a try/except inside a loop, *after* a continue
+ # statement has been executed in that loop, will cause the wrong number of
+ # arguments to be popped off the stack and the instruction pointer reset to
+ # a very small number (usually 0.) Because of this, the following test
+ # *must* written as a function, and the tracking vars *must* be function
+ # arguments with default values. Otherwise, the test will loop and loop.
+
+ def test_inner(extra_burning_oil = 1, count=0):
+ big_hippo = 2
+ while big_hippo:
+ count += 1
+ try:
+ if extra_burning_oil and big_hippo == 1:
+ extra_burning_oil -= 1
+ break
+ big_hippo -= 1
+ continue
+ except:
+ raise
+ if count > 2 or big_hippo <> 1:
+ self.fail("continue then break in try/except in loop broken!")
+ test_inner()
+
+ def testReturn(self):
+ # 'return' [testlist]
+ def g1(): return
+ def g2(): return 1
+ g1()
+ x = g2()
+ check_syntax_error(self, "class foo:return 1")
+
+ def testYield(self):
+ check_syntax_error(self, "class foo:yield 1")
+
+ def testRaise(self):
+ # 'raise' test [',' test]
+ try: raise RuntimeError, 'just testing'
+ except RuntimeError: pass
+ try: raise KeyboardInterrupt
+ except KeyboardInterrupt: pass
+
+ def testImport(self):
+ # 'import' dotted_as_names
+ import sys
+ import time, sys
+ # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+ from time import time
+ from time import (time)
+ # not testable inside a function, but already done at top of the module
+ # from sys import *
+ from sys import path, argv
+ from sys import (path, argv)
+ from sys import (path, argv,)
+
+ def testGlobal(self):
+ # 'global' NAME (',' NAME)*
+ global a
+ global a, b
+ global one, two, three, four, five, six, seven, eight, nine, ten
+
+ def testExec(self):
+ # 'exec' expr ['in' expr [',' expr]]
+ z = None
+ del z
+ exec 'z=1+1\n'
+ if z != 2: self.fail('exec \'z=1+1\'\\n')
+ del z
+ exec 'z=1+1'
+ if z != 2: self.fail('exec \'z=1+1\'')
+ z = None
+ del z
+ import types
+ if hasattr(types, "UnicodeType"):
+ exec r"""if 1:
+ exec u'z=1+1\n'
+ if z != 2: self.fail('exec u\'z=1+1\'\\n')
+ del z
+ exec u'z=1+1'
+ if z != 2: self.fail('exec u\'z=1+1\'')"""
+ g = {}
+ exec 'z = 1' in g
+ if g.has_key('__builtins__'): del g['__builtins__']
+ if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
+ g = {}
+ l = {}
+
+ import warnings
+ warnings.filterwarnings("ignore", "global statement", module="<string>")
+ exec 'global a; a = 1; b = 2' in g, l
+ if g.has_key('__builtins__'): del g['__builtins__']
+ if l.has_key('__builtins__'): del l['__builtins__']
+ if (g, l) != ({'a':1}, {'b':2}):
+ self.fail('exec ... in g (%s), l (%s)' %(g,l))
+
+ def testAssert(self):
+ # assert_stmt: 'assert' test [',' test]
+ assert 1
+ assert 1, 1
+ assert lambda x:x
+ assert 1, lambda x:x+1
+ try:
+ assert 0, "msg"
+ except AssertionError, e:
+ self.assertEquals(e.args[0], "msg")
+ else:
+ if __debug__:
+ self.fail("AssertionError not raised by assert 0")
+
+ ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+ # Tested below
+
+ def testIf(self):
+ # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+ if 1: pass
+ if 1: pass
+ else: pass
+ if 0: pass
+ elif 0: pass
+ if 0: pass
+ elif 0: pass
+ elif 0: pass
+ elif 0: pass
+ else: pass
+
+ def testWhile(self):
+ # 'while' test ':' suite ['else' ':' suite]
+ while 0: pass
+ while 0: pass
+ else: pass
+
+ # Issue1920: "while 0" is optimized away,
+ # ensure that the "else" clause is still present.
+ x = 0
+ while 0:
+ x = 1
+ else:
+ x = 2
+ self.assertEquals(x, 2)
+
+ def testFor(self):
+ # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+ for i in 1, 2, 3: pass
+ for i, j, k in (): pass
+ else: pass
+ class Squares:
+ def __init__(self, max):
+ self.max = max
+ self.sofar = []
+ def __len__(self): return len(self.sofar)
+ def __getitem__(self, i):
+ if not 0 <= i < self.max: raise IndexError
+ n = len(self.sofar)
+ while n <= i:
+ self.sofar.append(n*n)
+ n = n+1
+ return self.sofar[i]
+ n = 0
+ for x in Squares(10): n = n+x
+ if n != 285:
+ self.fail('for over growing sequence')
+
+ result = []
+ for x, in [(1,), (2,), (3,)]:
+ result.append(x)
+ self.assertEqual(result, [1, 2, 3])
+
+ def testTry(self):
+ ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+ ### | 'try' ':' suite 'finally' ':' suite
+ ### except_clause: 'except' [expr [('as' | ',') expr]]
+ try:
+ 1/0
+ except ZeroDivisionError:
+ pass
+ else:
+ pass
+ try: 1/0
+ except EOFError: pass
+ except TypeError as msg: pass
+ except RuntimeError, msg: pass
+ except: pass
+ else: pass
+ try: 1/0
+ except (EOFError, TypeError, ZeroDivisionError): pass
+ try: 1/0
+ except (EOFError, TypeError, ZeroDivisionError), msg: pass
+ try: pass
+ finally: pass
+
+ def testSuite(self):
+ # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+ if 1: pass
+ if 1:
+ pass
+ if 1:
+ #
+ #
+ #
+ pass
+ pass
+ #
+ pass
+ #
+
+ def testTest(self):
+ ### and_test ('or' and_test)*
+ ### and_test: not_test ('and' not_test)*
+ ### not_test: 'not' not_test | comparison
+ if not 1: pass
+ if 1 and 1: pass
+ if 1 or 1: pass
+ if not not not 1: pass
+ if not 1 and 1 and 1: pass
+ if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+ def testComparison(self):
+ ### comparison: expr (comp_op expr)*
+ ### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+ if 1: pass
+ x = (1 == 1)
+ if 1 == 1: pass
+ if 1 != 1: pass
+ if 1 <> 1: pass
+ if 1 < 1: pass
+ if 1 > 1: pass
+ if 1 <= 1: pass
+ if 1 >= 1: pass
+ if 1 is 1: pass
+ if 1 is not 1: pass
+ if 1 in (): pass
+ if 1 not in (): pass
+ if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+ def testBinaryMaskOps(self):
+ x = 1 & 1
+ x = 1 ^ 1
+ x = 1 | 1
+
+ def testShiftOps(self):
+ x = 1 << 1
+ x = 1 >> 1
+ x = 1 << 1 >> 1
+
+ def testAdditiveOps(self):
+ x = 1
+ x = 1 + 1
+ x = 1 - 1 - 1
+ x = 1 - 1 + 1 - 1 + 1
+
+ def testMultiplicativeOps(self):
+ x = 1 * 1
+ x = 1 / 1
+ x = 1 % 1
+ x = 1 / 1 * 1 % 1
+
+ def testUnaryOps(self):
+ x = +1
+ x = -1
+ x = ~1
+ x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+ x = -1*1/1 + 1*1 - ---1*1
+
+ def testSelectors(self):
+ ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+ ### subscript: expr | [expr] ':' [expr]
+
+ import sys, time
+ c = sys.path[0]
+ x = time.time()
+ x = sys.modules['time'].time()
+ a = '01234'
+ c = a[0]
+ c = a[-1]
+ s = a[0:5]
+ s = a[:5]
+ s = a[0:]
+ s = a[:]
+ s = a[-5:]
+ s = a[:-1]
+ s = a[-4:-3]
+ # A rough test of SF bug 1333982. http://python.org/sf/1333982
+ # The testing here is fairly incomplete.
+ # Test cases should include: commas with 1 and 2 colons
+ d = {}
+ d[1] = 1
+ d[1,] = 2
+ d[1,2] = 3
+ d[1,2,3] = 4
+ L = list(d)
+ L.sort()
+ self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+ def testAtoms(self):
+ ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
+ ### dictmaker: test ':' test (',' test ':' test)* [',']
+
+ x = (1)
+ x = (1 or 2 or 3)
+ x = (1 or 2 or 3, 2, 3)
+
+ x = []
+ x = [1]
+ x = [1 or 2 or 3]
+ x = [1 or 2 or 3, 2, 3]
+ x = []
+
+ x = {}
+ x = {'one': 1}
+ x = {'one': 1,}
+ x = {'one' or 'two': 1 or 2}
+ x = {'one': 1, 'two': 2}
+ x = {'one': 1, 'two': 2,}
+ x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+ x = `x`
+ x = `1 or 2 or 3`
+ self.assertEqual(`1,2`, '(1, 2)')
+
+ x = x
+ x = 'x'
+ x = 123
+
+ ### exprlist: expr (',' expr)* [',']
+ ### testlist: test (',' test)* [',']
+ # These have been exercised enough above
+
+ def testClassdef(self):
+ # 'class' NAME ['(' [testlist] ')'] ':' suite
+ class B: pass
+ class B2(): pass
+ class C1(B): pass
+ class C2(B): pass
+ class D(C1, C2, B): pass
+ class C:
+ def meth1(self): pass
+ def meth2(self, arg): pass
+ def meth3(self, a1, a2): pass
+ # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+ # decorators: decorator+
+ # decorated: decorators (classdef | funcdef)
+ def class_decorator(x):
+ x.decorated = True
+ return x
+ @class_decorator
+ class G:
+ pass
+ self.assertEqual(G.decorated, True)
+
+ def testListcomps(self):
+ # list comprehension tests
+ nums = [1, 2, 3, 4, 5]
+ strs = ["Apple", "Banana", "Coconut"]
+ spcs = [" Apple", " Banana ", "Coco nut "]
+
+ self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
+ self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+ self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+ self.assertEqual([(i, s) for i in nums for s in strs],
+ [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+ (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+ (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+ (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+ (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+ self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+ [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+ (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+ (5, 'Banana'), (5, 'Coconut')])
+ self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+ [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+ def test_in_func(l):
+ return [None < x < 3 for x in l if x > 2]
+
+ self.assertEqual(test_in_func(nums), [False, False, False])
+
+ def test_nested_front():
+ self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+ [[1, 2], [3, 4], [5, 6]])
+
+ test_nested_front()
+
+ check_syntax_error(self, "[i, s for i in nums for s in strs]")
+ check_syntax_error(self, "[x if y]")
+
+ suppliers = [
+ (1, "Boeing"),
+ (2, "Ford"),
+ (3, "Macdonalds")
+ ]
+
+ parts = [
+ (10, "Airliner"),
+ (20, "Engine"),
+ (30, "Cheeseburger")
+ ]
+
+ suppart = [
+ (1, 10), (1, 20), (2, 20), (3, 30)
+ ]
+
+ x = [
+ (sname, pname)
+ for (sno, sname) in suppliers
+ for (pno, pname) in parts
+ for (sp_sno, sp_pno) in suppart
+ if sno == sp_sno and pno == sp_pno
+ ]
+
+ self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+ ('Macdonalds', 'Cheeseburger')])
+
+ def testGenexps(self):
+ # generator expression tests
+ g = ([x for x in range(10)] for x in range(1))
+ self.assertEqual(g.next(), [x for x in range(10)])
+ try:
+ g.next()
+ self.fail('should produce StopIteration exception')
+ except StopIteration:
+ pass
+
+ a = 1
+ try:
+ g = (a for d in a)
+ g.next()
+ self.fail('should produce TypeError')
+ except TypeError:
+ pass
+
+ self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+ self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+ a = [x for x in range(10)]
+ b = (x for x in (y for y in a))
+ self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+ self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+ self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+ self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+ check_syntax_error(self, "foo(x for x in range(10), 100)")
+ check_syntax_error(self, "foo(100, x for x in range(10))")
+
+ def testComprehensionSpecials(self):
+ # test for outmost iterable precomputation
+ x = 10; g = (i for i in range(x)); x = 5
+ self.assertEqual(len(list(g)), 10)
+
+ # This should hold, since we're only precomputing outmost iterable.
+ x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+ x = 5; t = True;
+ self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+ # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+ # even though it's silly. Make sure it works (ifelse broke this.)
+ self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+ self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+ # verify unpacking single element tuples in listcomp/genexp.
+ self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+ self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+ def test_with_statement(self):
+ class manager(object):
+ def __enter__(self):
+ return (1, 2)
+ def __exit__(self, *args):
+ pass
+
+ with manager():
+ pass
+ with manager() as x:
+ pass
+ with manager() as (x, y):
+ pass
+ with manager(), manager():
+ pass
+ with manager() as x, manager() as y:
+ pass
+ with manager() as x, manager():
+ pass
+
+ def testIfElseExpr(self):
+ # Test ifelse expressions in various cases
+ def _checkeval(msg, ret):
+ "helper to check that evaluation of expressions is done correctly"
+ print x
+ return ret
+
+ self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+ self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+ self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+ self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+ self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+ self.assertEqual((5 and 6 if 0 else 1), 1)
+ self.assertEqual(((5 and 6) if 0 else 1), 1)
+ self.assertEqual((5 and (6 if 1 else 1)), 6)
+ self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+ self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+ self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+ self.assertEqual((not 5 if 1 else 1), False)
+ self.assertEqual((not 5 if 0 else 1), 1)
+ self.assertEqual((6 + 1 if 1 else 2), 7)
+ self.assertEqual((6 - 1 if 1 else 2), 5)
+ self.assertEqual((6 * 2 if 1 else 4), 12)
+ self.assertEqual((6 / 2 if 1 else 3), 3)
+ self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+ run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/lib/python2.7/lib2to3/tests/data/py3_test_grammar.py b/lib/python2.7/lib2to3/tests/data/py3_test_grammar.py
new file mode 100644
index 0000000..c0bf7f2
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/data/py3_test_grammar.py
@@ -0,0 +1,923 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants. Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module. Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+ def testBackslash(self):
+ # Backslash means line continuation:
+ x = 1 \
+ + 1
+ self.assertEquals(x, 2, 'backslash for line continuation')
+
+ # Backslash does not means continuation in comments :\
+ x = 0
+ self.assertEquals(x, 0, 'backslash ending comment')
+
+ def testPlainIntegers(self):
+ self.assertEquals(type(000), type(0))
+ self.assertEquals(0xff, 255)
+ self.assertEquals(0o377, 255)
+ self.assertEquals(2147483647, 0o17777777777)
+ self.assertEquals(0b1001, 9)
+ # "0x" is not a valid literal
+ self.assertRaises(SyntaxError, eval, "0x")
+ from sys import maxsize
+ if maxsize == 2147483647:
+ self.assertEquals(-2147483647-1, -0o20000000000)
+ # XXX -2147483648
+ self.assert_(0o37777777777 > 0)
+ self.assert_(0xffffffff > 0)
+ self.assert_(0b1111111111111111111111111111111 > 0)
+ for s in ('2147483648', '0o40000000000', '0x100000000',
+ '0b10000000000000000000000000000000'):
+ try:
+ x = eval(s)
+ except OverflowError:
+ self.fail("OverflowError on huge integer literal %r" % s)
+ elif maxsize == 9223372036854775807:
+ self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
+ self.assert_(0o1777777777777777777777 > 0)
+ self.assert_(0xffffffffffffffff > 0)
+ self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
+ for s in '9223372036854775808', '0o2000000000000000000000', \
+ '0x10000000000000000', \
+ '0b100000000000000000000000000000000000000000000000000000000000000':
+ try:
+ x = eval(s)
+ except OverflowError:
+ self.fail("OverflowError on huge integer literal %r" % s)
+ else:
+ self.fail('Weird maxsize value %r' % maxsize)
+
+ def testLongIntegers(self):
+ x = 0
+ x = 0xffffffffffffffff
+ x = 0Xffffffffffffffff
+ x = 0o77777777777777777
+ x = 0O77777777777777777
+ x = 123456789012345678901234567890
+ x = 0b100000000000000000000000000000000000000000000000000000000000000000000
+ x = 0B111111111111111111111111111111111111111111111111111111111111111111111
+
+ def testFloats(self):
+ x = 3.14
+ x = 314.
+ x = 0.314
+ # XXX x = 000.314
+ x = .314
+ x = 3e14
+ x = 3E14
+ x = 3e-14
+ x = 3e+14
+ x = 3.e14
+ x = .3e14
+ x = 3.1e4
+
+ def testStringLiterals(self):
+ x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+ x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+ x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+ x = "doesn't \"shrink\" does it"
+ y = 'doesn\'t "shrink" does it'
+ self.assert_(len(x) == 24 and x == y)
+ x = "does \"shrink\" doesn't it"
+ y = 'does "shrink" doesn\'t it'
+ self.assert_(len(x) == 24 and x == y)
+ x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+ y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+ self.assertEquals(x, y)
+ y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+ self.assertEquals(x, y)
+ y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+ self.assertEquals(x, y)
+ y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+ self.assertEquals(x, y)
+
+ def testEllipsis(self):
+ x = ...
+ self.assert_(x is Ellipsis)
+ self.assertRaises(SyntaxError, eval, ".. .")
+
+class GrammarTests(unittest.TestCase):
+
+ # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+ # XXX can't test in a script -- this rule is only used when interactive
+
+ # file_input: (NEWLINE | stmt)* ENDMARKER
+ # Being tested as this very moment this very module
+
+ # expr_input: testlist NEWLINE
+ # XXX Hard to test -- used only in calls to input()
+
+ def testEvalInput(self):
+ # testlist ENDMARKER
+ x = eval('1, 0 or 1')
+
+ def testFuncdef(self):
+ ### [decorators] 'def' NAME parameters ['->' test] ':' suite
+ ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+ ### decorators: decorator+
+ ### parameters: '(' [typedargslist] ')'
+ ### typedargslist: ((tfpdef ['=' test] ',')*
+ ### ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
+ ### | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+ ### tfpdef: NAME [':' test]
+ ### varargslist: ((vfpdef ['=' test] ',')*
+ ### ('*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
+ ### | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+ ### vfpdef: NAME
+ def f1(): pass
+ f1()
+ f1(*())
+ f1(*(), **{})
+ def f2(one_argument): pass
+ def f3(two, arguments): pass
+ self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
+ self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
+ def a1(one_arg,): pass
+ def a2(two, args,): pass
+ def v0(*rest): pass
+ def v1(a, *rest): pass
+ def v2(a, b, *rest): pass
+
+ f1()
+ f2(1)
+ f2(1,)
+ f3(1, 2)
+ f3(1, 2,)
+ v0()
+ v0(1)
+ v0(1,)
+ v0(1,2)
+ v0(1,2,3,4,5,6,7,8,9,0)
+ v1(1)
+ v1(1,)
+ v1(1,2)
+ v1(1,2,3)
+ v1(1,2,3,4,5,6,7,8,9,0)
+ v2(1,2)
+ v2(1,2,3)
+ v2(1,2,3,4)
+ v2(1,2,3,4,5,6,7,8,9,0)
+
+ def d01(a=1): pass
+ d01()
+ d01(1)
+ d01(*(1,))
+ d01(**{'a':2})
+ def d11(a, b=1): pass
+ d11(1)
+ d11(1, 2)
+ d11(1, **{'b':2})
+ def d21(a, b, c=1): pass
+ d21(1, 2)
+ d21(1, 2, 3)
+ d21(*(1, 2, 3))
+ d21(1, *(2, 3))
+ d21(1, 2, *(3,))
+ d21(1, 2, **{'c':3})
+ def d02(a=1, b=2): pass
+ d02()
+ d02(1)
+ d02(1, 2)
+ d02(*(1, 2))
+ d02(1, *(2,))
+ d02(1, **{'b':2})
+ d02(**{'a': 1, 'b': 2})
+ def d12(a, b=1, c=2): pass
+ d12(1)
+ d12(1, 2)
+ d12(1, 2, 3)
+ def d22(a, b, c=1, d=2): pass
+ d22(1, 2)
+ d22(1, 2, 3)
+ d22(1, 2, 3, 4)
+ def d01v(a=1, *rest): pass
+ d01v()
+ d01v(1)
+ d01v(1, 2)
+ d01v(*(1, 2, 3, 4))
+ d01v(*(1,))
+ d01v(**{'a':2})
+ def d11v(a, b=1, *rest): pass
+ d11v(1)
+ d11v(1, 2)
+ d11v(1, 2, 3)
+ def d21v(a, b, c=1, *rest): pass
+ d21v(1, 2)
+ d21v(1, 2, 3)
+ d21v(1, 2, 3, 4)
+ d21v(*(1, 2, 3, 4))
+ d21v(1, 2, **{'c': 3})
+ def d02v(a=1, b=2, *rest): pass
+ d02v()
+ d02v(1)
+ d02v(1, 2)
+ d02v(1, 2, 3)
+ d02v(1, *(2, 3, 4))
+ d02v(**{'a': 1, 'b': 2})
+ def d12v(a, b=1, c=2, *rest): pass
+ d12v(1)
+ d12v(1, 2)
+ d12v(1, 2, 3)
+ d12v(1, 2, 3, 4)
+ d12v(*(1, 2, 3, 4))
+ d12v(1, 2, *(3, 4, 5))
+ d12v(1, *(2,), **{'c': 3})
+ def d22v(a, b, c=1, d=2, *rest): pass
+ d22v(1, 2)
+ d22v(1, 2, 3)
+ d22v(1, 2, 3, 4)
+ d22v(1, 2, 3, 4, 5)
+ d22v(*(1, 2, 3, 4))
+ d22v(1, 2, *(3, 4, 5))
+ d22v(1, *(2, 3), **{'d': 4})
+
+ # keyword argument type tests
+ try:
+ str('x', **{b'foo':1 })
+ except TypeError:
+ pass
+ else:
+ self.fail('Bytes should not work as keyword argument names')
+ # keyword only argument tests
+ def pos0key1(*, key): return key
+ pos0key1(key=100)
+ def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
+ pos2key2(1, 2, k1=100)
+ pos2key2(1, 2, k1=100, k2=200)
+ pos2key2(1, 2, k2=100, k1=200)
+ def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
+ pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
+ pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
+
+ # keyword arguments after *arglist
+ def f(*args, **kwargs):
+ return args, kwargs
+ self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+ {'x':2, 'y':5}))
+ self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+ self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+ # argument annotation tests
+ def f(x) -> list: pass
+ self.assertEquals(f.__annotations__, {'return': list})
+ def f(x:int): pass
+ self.assertEquals(f.__annotations__, {'x': int})
+ def f(*x:str): pass
+ self.assertEquals(f.__annotations__, {'x': str})
+ def f(**x:float): pass
+ self.assertEquals(f.__annotations__, {'x': float})
+ def f(x, y:1+2): pass
+ self.assertEquals(f.__annotations__, {'y': 3})
+ def f(a, b:1, c:2, d): pass
+ self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
+ def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
+ self.assertEquals(f.__annotations__,
+ {'b': 1, 'c': 2, 'e': 3, 'g': 6})
+ def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
+ **k:11) -> 12: pass
+ self.assertEquals(f.__annotations__,
+ {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
+ 'k': 11, 'return': 12})
+ # Check for SF Bug #1697248 - mixing decorators and a return annotation
+ def null(x): return x
+ @null
+ def f(x) -> list: pass
+ self.assertEquals(f.__annotations__, {'return': list})
+
+ # test MAKE_CLOSURE with a variety of oparg's
+ closure = 1
+ def f(): return closure
+ def f(x=1): return closure
+ def f(*, k=1): return closure
+ def f() -> int: return closure
+
+ # Check ast errors in *args and *kwargs
+ check_syntax_error(self, "f(*g(1=2))")
+ check_syntax_error(self, "f(**g(1=2))")
+
+ def testLambdef(self):
+ ### lambdef: 'lambda' [varargslist] ':' test
+ l1 = lambda : 0
+ self.assertEquals(l1(), 0)
+ l2 = lambda : a[d] # XXX just testing the expression
+ l3 = lambda : [2 < x for x in [-1, 3, 0]]
+ self.assertEquals(l3(), [0, 1, 0])
+ l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+ self.assertEquals(l4(), 1)
+ l5 = lambda x, y, z=2: x + y + z
+ self.assertEquals(l5(1, 2), 5)
+ self.assertEquals(l5(1, 2, 3), 6)
+ check_syntax_error(self, "lambda x: x = 2")
+ check_syntax_error(self, "lambda (None,): None")
+ l6 = lambda x, y, *, k=20: x+y+k
+ self.assertEquals(l6(1,2), 1+2+20)
+ self.assertEquals(l6(1,2,k=10), 1+2+10)
+
+
+ ### stmt: simple_stmt | compound_stmt
+ # Tested below
+
+ def testSimpleStmt(self):
+ ### simple_stmt: small_stmt (';' small_stmt)* [';']
+ x = 1; pass; del x
+ def foo():
+ # verify statements that end with semi-colons
+ x = 1; pass; del x;
+ foo()
+
+ ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
+ # Tested below
+
+ def testExprStmt(self):
+ # (exprlist '=')* exprlist
+ 1
+ 1, 2, 3
+ x = 1
+ x = 1, 2, 3
+ x = y = z = 1, 2, 3
+ x, y, z = 1, 2, 3
+ abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+ check_syntax_error(self, "x + 1 = 1")
+ check_syntax_error(self, "a + 1 = b + 2")
+
+ def testDelStmt(self):
+ # 'del' exprlist
+ abc = [1,2,3]
+ x, y, z = abc
+ xyz = x, y, z
+
+ del abc
+ del x, y, (z, xyz)
+
+ def testPassStmt(self):
+ # 'pass'
+ pass
+
+ # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+ # Tested below
+
+ def testBreakStmt(self):
+ # 'break'
+ while 1: break
+
+ def testContinueStmt(self):
+ # 'continue'
+ i = 1
+ while i: i = 0; continue
+
+ msg = ""
+ while not msg:
+ msg = "ok"
+ try:
+ continue
+ msg = "continue failed to continue inside try"
+ except:
+ msg = "continue inside try called except block"
+ if msg != "ok":
+ self.fail(msg)
+
+ msg = ""
+ while not msg:
+ msg = "finally block not called"
+ try:
+ continue
+ finally:
+ msg = "ok"
+ if msg != "ok":
+ self.fail(msg)
+
+ def test_break_continue_loop(self):
+ # This test warrants an explanation. It is a test specifically for SF bugs
+ # #463359 and #462937. The bug is that a 'break' statement executed or
+ # exception raised inside a try/except inside a loop, *after* a continue
+ # statement has been executed in that loop, will cause the wrong number of
+ # arguments to be popped off the stack and the instruction pointer reset to
+ # a very small number (usually 0.) Because of this, the following test
+ # *must* written as a function, and the tracking vars *must* be function
+ # arguments with default values. Otherwise, the test will loop and loop.
+
+ def test_inner(extra_burning_oil = 1, count=0):
+ big_hippo = 2
+ while big_hippo:
+ count += 1
+ try:
+ if extra_burning_oil and big_hippo == 1:
+ extra_burning_oil -= 1
+ break
+ big_hippo -= 1
+ continue
+ except:
+ raise
+ if count > 2 or big_hippo != 1:
+ self.fail("continue then break in try/except in loop broken!")
+ test_inner()
+
+ def testReturn(self):
+ # 'return' [testlist]
+ def g1(): return
+ def g2(): return 1
+ g1()
+ x = g2()
+ check_syntax_error(self, "class foo:return 1")
+
+ def testYield(self):
+ check_syntax_error(self, "class foo:yield 1")
+
+ def testRaise(self):
+ # 'raise' test [',' test]
+ try: raise RuntimeError('just testing')
+ except RuntimeError: pass
+ try: raise KeyboardInterrupt
+ except KeyboardInterrupt: pass
+
+ def testImport(self):
+ # 'import' dotted_as_names
+ import sys
+ import time, sys
+ # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+ from time import time
+ from time import (time)
+ # not testable inside a function, but already done at top of the module
+ # from sys import *
+ from sys import path, argv
+ from sys import (path, argv)
+ from sys import (path, argv,)
+
+ def testGlobal(self):
+ # 'global' NAME (',' NAME)*
+ global a
+ global a, b
+ global one, two, three, four, five, six, seven, eight, nine, ten
+
+ def testNonlocal(self):
+ # 'nonlocal' NAME (',' NAME)*
+ x = 0
+ y = 0
+ def f():
+ nonlocal x
+ nonlocal x, y
+
+ def testAssert(self):
+ # assert_stmt: 'assert' test [',' test]
+ assert 1
+ assert 1, 1
+ assert lambda x:x
+ assert 1, lambda x:x+1
+ try:
+ assert 0, "msg"
+ except AssertionError as e:
+ self.assertEquals(e.args[0], "msg")
+ else:
+ if __debug__:
+ self.fail("AssertionError not raised by assert 0")
+
+ ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+ # Tested below
+
+ def testIf(self):
+ # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+ if 1: pass
+ if 1: pass
+ else: pass
+ if 0: pass
+ elif 0: pass
+ if 0: pass
+ elif 0: pass
+ elif 0: pass
+ elif 0: pass
+ else: pass
+
+ def testWhile(self):
+ # 'while' test ':' suite ['else' ':' suite]
+ while 0: pass
+ while 0: pass
+ else: pass
+
+ # Issue1920: "while 0" is optimized away,
+ # ensure that the "else" clause is still present.
+ x = 0
+ while 0:
+ x = 1
+ else:
+ x = 2
+ self.assertEquals(x, 2)
+
+ def testFor(self):
+ # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+ for i in 1, 2, 3: pass
+ for i, j, k in (): pass
+ else: pass
+ class Squares:
+ def __init__(self, max):
+ self.max = max
+ self.sofar = []
+ def __len__(self): return len(self.sofar)
+ def __getitem__(self, i):
+ if not 0 <= i < self.max: raise IndexError
+ n = len(self.sofar)
+ while n <= i:
+ self.sofar.append(n*n)
+ n = n+1
+ return self.sofar[i]
+ n = 0
+ for x in Squares(10): n = n+x
+ if n != 285:
+ self.fail('for over growing sequence')
+
+ result = []
+ for x, in [(1,), (2,), (3,)]:
+ result.append(x)
+ self.assertEqual(result, [1, 2, 3])
+
+ def testTry(self):
+ ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+ ### | 'try' ':' suite 'finally' ':' suite
+ ### except_clause: 'except' [expr ['as' expr]]
+ try:
+ 1/0
+ except ZeroDivisionError:
+ pass
+ else:
+ pass
+ try: 1/0
+ except EOFError: pass
+ except TypeError as msg: pass
+ except RuntimeError as msg: pass
+ except: pass
+ else: pass
+ try: 1/0
+ except (EOFError, TypeError, ZeroDivisionError): pass
+ try: 1/0
+ except (EOFError, TypeError, ZeroDivisionError) as msg: pass
+ try: pass
+ finally: pass
+
+ def testSuite(self):
+ # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+ if 1: pass
+ if 1:
+ pass
+ if 1:
+ #
+ #
+ #
+ pass
+ pass
+ #
+ pass
+ #
+
+ def testTest(self):
+ ### and_test ('or' and_test)*
+ ### and_test: not_test ('and' not_test)*
+ ### not_test: 'not' not_test | comparison
+ if not 1: pass
+ if 1 and 1: pass
+ if 1 or 1: pass
+ if not not not 1: pass
+ if not 1 and 1 and 1: pass
+ if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+ def testComparison(self):
+ ### comparison: expr (comp_op expr)*
+ ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+ if 1: pass
+ x = (1 == 1)
+ if 1 == 1: pass
+ if 1 != 1: pass
+ if 1 < 1: pass
+ if 1 > 1: pass
+ if 1 <= 1: pass
+ if 1 >= 1: pass
+ if 1 is 1: pass
+ if 1 is not 1: pass
+ if 1 in (): pass
+ if 1 not in (): pass
+ if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+ def testBinaryMaskOps(self):
+ x = 1 & 1
+ x = 1 ^ 1
+ x = 1 | 1
+
+ def testShiftOps(self):
+ x = 1 << 1
+ x = 1 >> 1
+ x = 1 << 1 >> 1
+
+ def testAdditiveOps(self):
+ x = 1
+ x = 1 + 1
+ x = 1 - 1 - 1
+ x = 1 - 1 + 1 - 1 + 1
+
+ def testMultiplicativeOps(self):
+ x = 1 * 1
+ x = 1 / 1
+ x = 1 % 1
+ x = 1 / 1 * 1 % 1
+
+ def testUnaryOps(self):
+ x = +1
+ x = -1
+ x = ~1
+ x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+ x = -1*1/1 + 1*1 - ---1*1
+
+ def testSelectors(self):
+ ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+ ### subscript: expr | [expr] ':' [expr]
+
+ import sys, time
+ c = sys.path[0]
+ x = time.time()
+ x = sys.modules['time'].time()
+ a = '01234'
+ c = a[0]
+ c = a[-1]
+ s = a[0:5]
+ s = a[:5]
+ s = a[0:]
+ s = a[:]
+ s = a[-5:]
+ s = a[:-1]
+ s = a[-4:-3]
+ # A rough test of SF bug 1333982. http://python.org/sf/1333982
+ # The testing here is fairly incomplete.
+ # Test cases should include: commas with 1 and 2 colons
+ d = {}
+ d[1] = 1
+ d[1,] = 2
+ d[1,2] = 3
+ d[1,2,3] = 4
+ L = list(d)
+ L.sort(key=lambda x: x if isinstance(x, tuple) else ())
+ self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+ def testAtoms(self):
+ ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
+ ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
+
+ x = (1)
+ x = (1 or 2 or 3)
+ x = (1 or 2 or 3, 2, 3)
+
+ x = []
+ x = [1]
+ x = [1 or 2 or 3]
+ x = [1 or 2 or 3, 2, 3]
+ x = []
+
+ x = {}
+ x = {'one': 1}
+ x = {'one': 1,}
+ x = {'one' or 'two': 1 or 2}
+ x = {'one': 1, 'two': 2}
+ x = {'one': 1, 'two': 2,}
+ x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+ x = {'one'}
+ x = {'one', 1,}
+ x = {'one', 'two', 'three'}
+ x = {2, 3, 4,}
+
+ x = x
+ x = 'x'
+ x = 123
+
+ ### exprlist: expr (',' expr)* [',']
+ ### testlist: test (',' test)* [',']
+ # These have been exercised enough above
+
+ def testClassdef(self):
+ # 'class' NAME ['(' [testlist] ')'] ':' suite
+ class B: pass
+ class B2(): pass
+ class C1(B): pass
+ class C2(B): pass
+ class D(C1, C2, B): pass
+ class C:
+ def meth1(self): pass
+ def meth2(self, arg): pass
+ def meth3(self, a1, a2): pass
+
+ # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+ # decorators: decorator+
+ # decorated: decorators (classdef | funcdef)
+ def class_decorator(x): return x
+ @class_decorator
+ class G: pass
+
+ def testDictcomps(self):
+ # dictorsetmaker: ( (test ':' test (comp_for |
+ # (',' test ':' test)* [','])) |
+ # (test (comp_for | (',' test)* [','])) )
+ nums = [1, 2, 3]
+ self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
+
+ def testListcomps(self):
+ # list comprehension tests
+ nums = [1, 2, 3, 4, 5]
+ strs = ["Apple", "Banana", "Coconut"]
+ spcs = [" Apple", " Banana ", "Coco nut "]
+
+ self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco nut'])
+ self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+ self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+ self.assertEqual([(i, s) for i in nums for s in strs],
+ [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+ (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+ (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+ (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+ (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+ self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+ [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+ (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+ (5, 'Banana'), (5, 'Coconut')])
+ self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+ [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+ def test_in_func(l):
+ return [0 < x < 3 for x in l if x > 2]
+
+ self.assertEqual(test_in_func(nums), [False, False, False])
+
+ def test_nested_front():
+ self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+ [[1, 2], [3, 4], [5, 6]])
+
+ test_nested_front()
+
+ check_syntax_error(self, "[i, s for i in nums for s in strs]")
+ check_syntax_error(self, "[x if y]")
+
+ suppliers = [
+ (1, "Boeing"),
+ (2, "Ford"),
+ (3, "Macdonalds")
+ ]
+
+ parts = [
+ (10, "Airliner"),
+ (20, "Engine"),
+ (30, "Cheeseburger")
+ ]
+
+ suppart = [
+ (1, 10), (1, 20), (2, 20), (3, 30)
+ ]
+
+ x = [
+ (sname, pname)
+ for (sno, sname) in suppliers
+ for (pno, pname) in parts
+ for (sp_sno, sp_pno) in suppart
+ if sno == sp_sno and pno == sp_pno
+ ]
+
+ self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+ ('Macdonalds', 'Cheeseburger')])
+
+ def testGenexps(self):
+ # generator expression tests
+ g = ([x for x in range(10)] for x in range(1))
+ self.assertEqual(next(g), [x for x in range(10)])
+ try:
+ next(g)
+ self.fail('should produce StopIteration exception')
+ except StopIteration:
+ pass
+
+ a = 1
+ try:
+ g = (a for d in a)
+ next(g)
+ self.fail('should produce TypeError')
+ except TypeError:
+ pass
+
+ self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+ self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+ a = [x for x in range(10)]
+ b = (x for x in (y for y in a))
+ self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+ self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+ self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+ self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+ self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+ check_syntax_error(self, "foo(x for x in range(10), 100)")
+ check_syntax_error(self, "foo(100, x for x in range(10))")
+
+ def testComprehensionSpecials(self):
+ # test for outmost iterable precomputation
+ x = 10; g = (i for i in range(x)); x = 5
+ self.assertEqual(len(list(g)), 10)
+
+ # This should hold, since we're only precomputing outmost iterable.
+ x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+ x = 5; t = True;
+ self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+ # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+ # even though it's silly. Make sure it works (ifelse broke this.)
+ self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+ self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+ # verify unpacking single element tuples in listcomp/genexp.
+ self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+ self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+ def test_with_statement(self):
+ class manager(object):
+ def __enter__(self):
+ return (1, 2)
+ def __exit__(self, *args):
+ pass
+
+ with manager():
+ pass
+ with manager() as x:
+ pass
+ with manager() as (x, y):
+ pass
+ with manager(), manager():
+ pass
+ with manager() as x, manager() as y:
+ pass
+ with manager() as x, manager():
+ pass
+
+ def testIfElseExpr(self):
+ # Test ifelse expressions in various cases
+ def _checkeval(msg, ret):
+ "helper to check that evaluation of expressions is done correctly"
+ print(x)
+ return ret
+
+ # the next line is not allowed anymore
+ #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+ self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+ self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+ self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+ self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+ self.assertEqual((5 and 6 if 0 else 1), 1)
+ self.assertEqual(((5 and 6) if 0 else 1), 1)
+ self.assertEqual((5 and (6 if 1 else 1)), 6)
+ self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+ self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+ self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+ self.assertEqual((not 5 if 1 else 1), False)
+ self.assertEqual((not 5 if 0 else 1), 1)
+ self.assertEqual((6 + 1 if 1 else 2), 7)
+ self.assertEqual((6 - 1 if 1 else 2), 5)
+ self.assertEqual((6 * 2 if 1 else 4), 12)
+ self.assertEqual((6 / 2 if 1 else 3), 3)
+ self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+ run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+ test_main()
diff --git a/lib/python2.7/lib2to3/tests/pytree_idempotency.py b/lib/python2.7/lib2to3/tests/pytree_idempotency.py
new file mode 100644
index 0000000..333b622
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/pytree_idempotency.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python2
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Main program for testing the infrastructure."""
+
+__author__ = "Guido van Rossum <guido@python.org>"
+
+# Support imports (need to be imported first)
+from . import support
+
+# Python imports
+import os
+import sys
+import logging
+
+# Local imports
+from .. import pytree
+import pgen2
+from pgen2 import driver
+
+logging.basicConfig()
+
+def main():
+ gr = driver.load_grammar("Grammar.txt")
+ dr = driver.Driver(gr, convert=pytree.convert)
+
+ fn = "example.py"
+ tree = dr.parse_file(fn, debug=True)
+ if not diff(fn, tree):
+ print "No diffs."
+ if not sys.argv[1:]:
+ return # Pass a dummy argument to run the complete test suite below
+
+ problems = []
+
+ # Process every imported module
+ for name in sys.modules:
+ mod = sys.modules[name]
+ if mod is None or not hasattr(mod, "__file__"):
+ continue
+ fn = mod.__file__
+ if fn.endswith(".pyc"):
+ fn = fn[:-1]
+ if not fn.endswith(".py"):
+ continue
+ print >>sys.stderr, "Parsing", fn
+ tree = dr.parse_file(fn, debug=True)
+ if diff(fn, tree):
+ problems.append(fn)
+
+ # Process every single module on sys.path (but not in packages)
+ for dir in sys.path:
+ try:
+ names = os.listdir(dir)
+ except os.error:
+ continue
+ print >>sys.stderr, "Scanning", dir, "..."
+ for name in names:
+ if not name.endswith(".py"):
+ continue
+ print >>sys.stderr, "Parsing", name
+ fn = os.path.join(dir, name)
+ try:
+ tree = dr.parse_file(fn, debug=True)
+ except pgen2.parse.ParseError, err:
+ print "ParseError:", err
+ else:
+ if diff(fn, tree):
+ problems.append(fn)
+
+ # Show summary of problem files
+ if not problems:
+ print "No problems. Congratulations!"
+ else:
+ print "Problems in following files:"
+ for fn in problems:
+ print "***", fn
+
+def diff(fn, tree):
+ f = open("@", "w")
+ try:
+ f.write(str(tree))
+ finally:
+ f.close()
+ try:
+ return os.system("diff -u %s @" % fn)
+ finally:
+ os.remove("@")
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/python2.7/lib2to3/tests/support.py b/lib/python2.7/lib2to3/tests/support.py
new file mode 100644
index 0000000..8f12de9
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/support.py
@@ -0,0 +1,54 @@
+"""Support code for test_*.py files"""
+# Author: Collin Winter
+
+# Python imports
+import unittest
+import sys
+import os
+import os.path
+import re
+from textwrap import dedent
+
+# Local imports
+from lib2to3 import pytree, refactor
+from lib2to3.pgen2 import driver as pgen2_driver
+
+test_dir = os.path.dirname(__file__)
+proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
+grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
+grammar = pgen2_driver.load_grammar(grammar_path)
+driver = pgen2_driver.Driver(grammar, convert=pytree.convert)
+
+def parse_string(string):
+ return driver.parse_string(reformat(string), debug=True)
+
+def run_all_tests(test_mod=None, tests=None):
+ if tests is None:
+ tests = unittest.TestLoader().loadTestsFromModule(test_mod)
+ unittest.TextTestRunner(verbosity=2).run(tests)
+
+def reformat(string):
+ return dedent(string) + u"\n\n"
+
+def get_refactorer(fixer_pkg="lib2to3", fixers=None, options=None):
+ """
+ A convenience function for creating a RefactoringTool for tests.
+
+ fixers is a list of fixers for the RefactoringTool to use. By default
+ "lib2to3.fixes.*" is used. options is an optional dictionary of options to
+ be passed to the RefactoringTool.
+ """
+ if fixers is not None:
+ fixers = [fixer_pkg + ".fixes.fix_" + fix for fix in fixers]
+ else:
+ fixers = refactor.get_fixers_from_package(fixer_pkg + ".fixes")
+ options = options or {}
+ return refactor.RefactoringTool(fixers, options, explicit=True)
+
+def all_project_files():
+ for dirpath, dirnames, filenames in os.walk(proj_dir):
+ for filename in filenames:
+ if filename.endswith(".py"):
+ yield os.path.join(dirpath, filename)
+
+TestCase = unittest.TestCase
diff --git a/lib/python2.7/lib2to3/tests/test_all_fixers.py b/lib/python2.7/lib2to3/tests/test_all_fixers.py
new file mode 100644
index 0000000..f64b3d9
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_all_fixers.py
@@ -0,0 +1,23 @@
+"""Tests that run all fixer modules over an input stream.
+
+This has been broken out into its own test module because of its
+running time.
+"""
+# Author: Collin Winter
+
+# Python imports
+import unittest
+
+# Local imports
+from lib2to3 import refactor
+from . import support
+
+
+class Test_all(support.TestCase):
+
+ def setUp(self):
+ self.refactor = support.get_refactorer()
+
+ def test_all_project_files(self):
+ for filepath in support.all_project_files():
+ self.refactor.refactor_file(filepath)
diff --git a/lib/python2.7/lib2to3/tests/test_fixers.py b/lib/python2.7/lib2to3/tests/test_fixers.py
new file mode 100644
index 0000000..b0e60fe
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_fixers.py
@@ -0,0 +1,4544 @@
+""" Test suite for the fixer modules """
+
+# Python imports
+import os
+import unittest
+from itertools import chain
+from operator import itemgetter
+
+# Local imports
+from lib2to3 import pygram, pytree, refactor, fixer_util
+from lib2to3.tests import support
+
+
+class FixerTestCase(support.TestCase):
+
+ # Other test cases can subclass this class and replace "fixer_pkg" with
+ # their own.
+ def setUp(self, fix_list=None, fixer_pkg="lib2to3", options=None):
+ if fix_list is None:
+ fix_list = [self.fixer]
+ self.refactor = support.get_refactorer(fixer_pkg, fix_list, options)
+ self.fixer_log = []
+ self.filename = u"<string>"
+
+ for fixer in chain(self.refactor.pre_order,
+ self.refactor.post_order):
+ fixer.log = self.fixer_log
+
+ def _check(self, before, after):
+ before = support.reformat(before)
+ after = support.reformat(after)
+ tree = self.refactor.refactor_string(before, self.filename)
+ self.assertEqual(after, unicode(tree))
+ return tree
+
+ def check(self, before, after, ignore_warnings=False):
+ tree = self._check(before, after)
+ self.assertTrue(tree.was_changed)
+ if not ignore_warnings:
+ self.assertEqual(self.fixer_log, [])
+
+ def warns(self, before, after, message, unchanged=False):
+ tree = self._check(before, after)
+ self.assertIn(message, "".join(self.fixer_log))
+ if not unchanged:
+ self.assertTrue(tree.was_changed)
+
+ def warns_unchanged(self, before, message):
+ self.warns(before, before, message, unchanged=True)
+
+ def unchanged(self, before, ignore_warnings=False):
+ self._check(before, before)
+ if not ignore_warnings:
+ self.assertEqual(self.fixer_log, [])
+
+ def assert_runs_after(self, *names):
+ fixes = [self.fixer]
+ fixes.extend(names)
+ r = support.get_refactorer("lib2to3", fixes)
+ (pre, post) = r.get_fixers()
+ n = "fix_" + self.fixer
+ if post and post[-1].__class__.__module__.endswith(n):
+ # We're the last fixer to run
+ return
+ if pre and pre[-1].__class__.__module__.endswith(n) and not post:
+ # We're the last in pre and post is empty
+ return
+ self.fail("Fixer run order (%s) is incorrect; %s should be last."\
+ %(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
+
+class Test_ne(FixerTestCase):
+ fixer = "ne"
+
+ def test_basic(self):
+ b = """if x <> y:
+ pass"""
+
+ a = """if x != y:
+ pass"""
+ self.check(b, a)
+
+ def test_no_spaces(self):
+ b = """if x<>y:
+ pass"""
+
+ a = """if x!=y:
+ pass"""
+ self.check(b, a)
+
+ def test_chained(self):
+ b = """if x<>y<>z:
+ pass"""
+
+ a = """if x!=y!=z:
+ pass"""
+ self.check(b, a)
+
+class Test_has_key(FixerTestCase):
+ fixer = "has_key"
+
+ def test_1(self):
+ b = """x = d.has_key("x") or d.has_key("y")"""
+ a = """x = "x" in d or "y" in d"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """x = a.b.c.d.has_key("x") ** 3"""
+ a = """x = ("x" in a.b.c.d) ** 3"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """x = a.b.has_key(1 + 2).__repr__()"""
+ a = """x = (1 + 2 in a.b).__repr__()"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4"""
+ a = """x = (1 + 2 in a.b).__repr__() ** -3 ** 4"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """x = a.has_key(f or g)"""
+ a = """x = (f or g) in a"""
+ self.check(b, a)
+
+ def test_6(self):
+ b = """x = a + b.has_key(c)"""
+ a = """x = a + (c in b)"""
+ self.check(b, a)
+
+ def test_7(self):
+ b = """x = a.has_key(lambda: 12)"""
+ a = """x = (lambda: 12) in a"""
+ self.check(b, a)
+
+ def test_8(self):
+ b = """x = a.has_key(a for a in b)"""
+ a = """x = (a for a in b) in a"""
+ self.check(b, a)
+
+ def test_9(self):
+ b = """if not a.has_key(b): pass"""
+ a = """if b not in a: pass"""
+ self.check(b, a)
+
+ def test_10(self):
+ b = """if not a.has_key(b).__repr__(): pass"""
+ a = """if not (b in a).__repr__(): pass"""
+ self.check(b, a)
+
+ def test_11(self):
+ b = """if not a.has_key(b) ** 2: pass"""
+ a = """if not (b in a) ** 2: pass"""
+ self.check(b, a)
+
+class Test_apply(FixerTestCase):
+ fixer = "apply"
+
+ def test_1(self):
+ b = """x = apply(f, g + h)"""
+ a = """x = f(*g + h)"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """y = apply(f, g, h)"""
+ a = """y = f(*g, **h)"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """z = apply(fs[0], g or h, h or g)"""
+ a = """z = fs[0](*g or h, **h or g)"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """apply(f, (x, y) + t)"""
+ a = """f(*(x, y) + t)"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """apply(f, args,)"""
+ a = """f(*args)"""
+ self.check(b, a)
+
+ def test_6(self):
+ b = """apply(f, args, kwds,)"""
+ a = """f(*args, **kwds)"""
+ self.check(b, a)
+
+ # Test that complex functions are parenthesized
+
+ def test_complex_1(self):
+ b = """x = apply(f+g, args)"""
+ a = """x = (f+g)(*args)"""
+ self.check(b, a)
+
+ def test_complex_2(self):
+ b = """x = apply(f*g, args)"""
+ a = """x = (f*g)(*args)"""
+ self.check(b, a)
+
+ def test_complex_3(self):
+ b = """x = apply(f**g, args)"""
+ a = """x = (f**g)(*args)"""
+ self.check(b, a)
+
+ # But dotted names etc. not
+
+ def test_dotted_name(self):
+ b = """x = apply(f.g, args)"""
+ a = """x = f.g(*args)"""
+ self.check(b, a)
+
+ def test_subscript(self):
+ b = """x = apply(f[x], args)"""
+ a = """x = f[x](*args)"""
+ self.check(b, a)
+
+ def test_call(self):
+ b = """x = apply(f(), args)"""
+ a = """x = f()(*args)"""
+ self.check(b, a)
+
+ # Extreme case
+ def test_extreme(self):
+ b = """x = apply(a.b.c.d.e.f, args, kwds)"""
+ a = """x = a.b.c.d.e.f(*args, **kwds)"""
+ self.check(b, a)
+
+ # XXX Comments in weird places still get lost
+ def test_weird_comments(self):
+ b = """apply( # foo
+ f, # bar
+ args)"""
+ a = """f(*args)"""
+ self.check(b, a)
+
+ # These should *not* be touched
+
+ def test_unchanged_1(self):
+ s = """apply()"""
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """apply(f)"""
+ self.unchanged(s)
+
+ def test_unchanged_3(self):
+ s = """apply(f,)"""
+ self.unchanged(s)
+
+ def test_unchanged_4(self):
+ s = """apply(f, args, kwds, extras)"""
+ self.unchanged(s)
+
+ def test_unchanged_5(self):
+ s = """apply(f, *args, **kwds)"""
+ self.unchanged(s)
+
+ def test_unchanged_6(self):
+ s = """apply(f, *args)"""
+ self.unchanged(s)
+
+ def test_unchanged_6b(self):
+ s = """apply(f, **kwds)"""
+ self.unchanged(s)
+
+ def test_unchanged_7(self):
+ s = """apply(func=f, args=args, kwds=kwds)"""
+ self.unchanged(s)
+
+ def test_unchanged_8(self):
+ s = """apply(f, args=args, kwds=kwds)"""
+ self.unchanged(s)
+
+ def test_unchanged_9(self):
+ s = """apply(f, args, kwds=kwds)"""
+ self.unchanged(s)
+
+ def test_space_1(self):
+ a = """apply( f, args, kwds)"""
+ b = """f(*args, **kwds)"""
+ self.check(a, b)
+
+ def test_space_2(self):
+ a = """apply( f ,args,kwds )"""
+ b = """f(*args, **kwds)"""
+ self.check(a, b)
+
+class Test_intern(FixerTestCase):
+ fixer = "intern"
+
+ def test_prefix_preservation(self):
+ b = """x = intern( a )"""
+ a = """import sys\nx = sys.intern( a )"""
+ self.check(b, a)
+
+ b = """y = intern("b" # test
+ )"""
+ a = """import sys\ny = sys.intern("b" # test
+ )"""
+ self.check(b, a)
+
+ b = """z = intern(a+b+c.d, )"""
+ a = """import sys\nz = sys.intern(a+b+c.d, )"""
+ self.check(b, a)
+
+ def test(self):
+ b = """x = intern(a)"""
+ a = """import sys\nx = sys.intern(a)"""
+ self.check(b, a)
+
+ b = """z = intern(a+b+c.d,)"""
+ a = """import sys\nz = sys.intern(a+b+c.d,)"""
+ self.check(b, a)
+
+ b = """intern("y%s" % 5).replace("y", "")"""
+ a = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
+ self.check(b, a)
+
+ # These should not be refactored
+
+ def test_unchanged(self):
+ s = """intern(a=1)"""
+ self.unchanged(s)
+
+ s = """intern(f, g)"""
+ self.unchanged(s)
+
+ s = """intern(*h)"""
+ self.unchanged(s)
+
+ s = """intern(**i)"""
+ self.unchanged(s)
+
+ s = """intern()"""
+ self.unchanged(s)
+
+class Test_reduce(FixerTestCase):
+ fixer = "reduce"
+
+ def test_simple_call(self):
+ b = "reduce(a, b, c)"
+ a = "from functools import reduce\nreduce(a, b, c)"
+ self.check(b, a)
+
+ def test_bug_7253(self):
+ # fix_tuple_params was being bad and orphaning nodes in the tree.
+ b = "def x(arg): reduce(sum, [])"
+ a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
+ self.check(b, a)
+
+ def test_call_with_lambda(self):
+ b = "reduce(lambda x, y: x + y, seq)"
+ a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
+ self.check(b, a)
+
+ def test_unchanged(self):
+ s = "reduce(a)"
+ self.unchanged(s)
+
+ s = "reduce(a, b=42)"
+ self.unchanged(s)
+
+ s = "reduce(a, b, c, d)"
+ self.unchanged(s)
+
+ s = "reduce(**c)"
+ self.unchanged(s)
+
+ s = "reduce()"
+ self.unchanged(s)
+
+class Test_print(FixerTestCase):
+ fixer = "print"
+
+ def test_prefix_preservation(self):
+ b = """print 1, 1+1, 1+1+1"""
+ a = """print(1, 1+1, 1+1+1)"""
+ self.check(b, a)
+
+ def test_idempotency(self):
+ s = """print()"""
+ self.unchanged(s)
+
+ s = """print('')"""
+ self.unchanged(s)
+
+ def test_idempotency_print_as_function(self):
+ self.refactor.driver.grammar = pygram.python_grammar_no_print_statement
+ s = """print(1, 1+1, 1+1+1)"""
+ self.unchanged(s)
+
+ s = """print()"""
+ self.unchanged(s)
+
+ s = """print('')"""
+ self.unchanged(s)
+
+ def test_1(self):
+ b = """print 1, 1+1, 1+1+1"""
+ a = """print(1, 1+1, 1+1+1)"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """print 1, 2"""
+ a = """print(1, 2)"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """print"""
+ a = """print()"""
+ self.check(b, a)
+
+ def test_4(self):
+ # from bug 3000
+ b = """print whatever; print"""
+ a = """print(whatever); print()"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """print; print whatever;"""
+ a = """print(); print(whatever);"""
+ self.check(b, a)
+
+ def test_tuple(self):
+ b = """print (a, b, c)"""
+ a = """print((a, b, c))"""
+ self.check(b, a)
+
+ # trailing commas
+
+ def test_trailing_comma_1(self):
+ b = """print 1, 2, 3,"""
+ a = """print(1, 2, 3, end=' ')"""
+ self.check(b, a)
+
+ def test_trailing_comma_2(self):
+ b = """print 1, 2,"""
+ a = """print(1, 2, end=' ')"""
+ self.check(b, a)
+
+ def test_trailing_comma_3(self):
+ b = """print 1,"""
+ a = """print(1, end=' ')"""
+ self.check(b, a)
+
+ # >> stuff
+
+ def test_vargs_without_trailing_comma(self):
+ b = """print >>sys.stderr, 1, 2, 3"""
+ a = """print(1, 2, 3, file=sys.stderr)"""
+ self.check(b, a)
+
+ def test_with_trailing_comma(self):
+ b = """print >>sys.stderr, 1, 2,"""
+ a = """print(1, 2, end=' ', file=sys.stderr)"""
+ self.check(b, a)
+
+ def test_no_trailing_comma(self):
+ b = """print >>sys.stderr, 1+1"""
+ a = """print(1+1, file=sys.stderr)"""
+ self.check(b, a)
+
+ def test_spaces_before_file(self):
+ b = """print >> sys.stderr"""
+ a = """print(file=sys.stderr)"""
+ self.check(b, a)
+
+ def test_with_future_print_function(self):
+ s = "from __future__ import print_function\n" \
+ "print('Hai!', end=' ')"
+ self.unchanged(s)
+
+ b = "print 'Hello, world!'"
+ a = "print('Hello, world!')"
+ self.check(b, a)
+
+
+class Test_exec(FixerTestCase):
+ fixer = "exec"
+
+ def test_prefix_preservation(self):
+ b = """ exec code in ns1, ns2"""
+ a = """ exec(code, ns1, ns2)"""
+ self.check(b, a)
+
+ def test_basic(self):
+ b = """exec code"""
+ a = """exec(code)"""
+ self.check(b, a)
+
+ def test_with_globals(self):
+ b = """exec code in ns"""
+ a = """exec(code, ns)"""
+ self.check(b, a)
+
+ def test_with_globals_locals(self):
+ b = """exec code in ns1, ns2"""
+ a = """exec(code, ns1, ns2)"""
+ self.check(b, a)
+
+ def test_complex_1(self):
+ b = """exec (a.b()) in ns"""
+ a = """exec((a.b()), ns)"""
+ self.check(b, a)
+
+ def test_complex_2(self):
+ b = """exec a.b() + c in ns"""
+ a = """exec(a.b() + c, ns)"""
+ self.check(b, a)
+
+ # These should not be touched
+
+ def test_unchanged_1(self):
+ s = """exec(code)"""
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """exec (code)"""
+ self.unchanged(s)
+
+ def test_unchanged_3(self):
+ s = """exec(code, ns)"""
+ self.unchanged(s)
+
+ def test_unchanged_4(self):
+ s = """exec(code, ns1, ns2)"""
+ self.unchanged(s)
+
+class Test_repr(FixerTestCase):
+ fixer = "repr"
+
+ def test_prefix_preservation(self):
+ b = """x = `1 + 2`"""
+ a = """x = repr(1 + 2)"""
+ self.check(b, a)
+
+ def test_simple_1(self):
+ b = """x = `1 + 2`"""
+ a = """x = repr(1 + 2)"""
+ self.check(b, a)
+
+ def test_simple_2(self):
+ b = """y = `x`"""
+ a = """y = repr(x)"""
+ self.check(b, a)
+
+ def test_complex(self):
+ b = """z = `y`.__repr__()"""
+ a = """z = repr(y).__repr__()"""
+ self.check(b, a)
+
+ def test_tuple(self):
+ b = """x = `1, 2, 3`"""
+ a = """x = repr((1, 2, 3))"""
+ self.check(b, a)
+
+ def test_nested(self):
+ b = """x = `1 + `2``"""
+ a = """x = repr(1 + repr(2))"""
+ self.check(b, a)
+
+ def test_nested_tuples(self):
+ b = """x = `1, 2 + `3, 4``"""
+ a = """x = repr((1, 2 + repr((3, 4))))"""
+ self.check(b, a)
+
+class Test_except(FixerTestCase):
+ fixer = "except"
+
+ def test_prefix_preservation(self):
+ b = """
+ try:
+ pass
+ except (RuntimeError, ImportError), e:
+ pass"""
+ a = """
+ try:
+ pass
+ except (RuntimeError, ImportError) as e:
+ pass"""
+ self.check(b, a)
+
+ def test_simple(self):
+ b = """
+ try:
+ pass
+ except Foo, e:
+ pass"""
+ a = """
+ try:
+ pass
+ except Foo as e:
+ pass"""
+ self.check(b, a)
+
+ def test_simple_no_space_before_target(self):
+ b = """
+ try:
+ pass
+ except Foo,e:
+ pass"""
+ a = """
+ try:
+ pass
+ except Foo as e:
+ pass"""
+ self.check(b, a)
+
+ def test_tuple_unpack(self):
+ b = """
+ def foo():
+ try:
+ pass
+ except Exception, (f, e):
+ pass
+ except ImportError, e:
+ pass"""
+
+ a = """
+ def foo():
+ try:
+ pass
+ except Exception as xxx_todo_changeme:
+ (f, e) = xxx_todo_changeme.args
+ pass
+ except ImportError as e:
+ pass"""
+ self.check(b, a)
+
+ def test_multi_class(self):
+ b = """
+ try:
+ pass
+ except (RuntimeError, ImportError), e:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except (RuntimeError, ImportError) as e:
+ pass"""
+ self.check(b, a)
+
+ def test_list_unpack(self):
+ b = """
+ try:
+ pass
+ except Exception, [a, b]:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as xxx_todo_changeme:
+ [a, b] = xxx_todo_changeme.args
+ pass"""
+ self.check(b, a)
+
+ def test_weird_target_1(self):
+ b = """
+ try:
+ pass
+ except Exception, d[5]:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as xxx_todo_changeme:
+ d[5] = xxx_todo_changeme
+ pass"""
+ self.check(b, a)
+
+ def test_weird_target_2(self):
+ b = """
+ try:
+ pass
+ except Exception, a.foo:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as xxx_todo_changeme:
+ a.foo = xxx_todo_changeme
+ pass"""
+ self.check(b, a)
+
+ def test_weird_target_3(self):
+ b = """
+ try:
+ pass
+ except Exception, a().foo:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as xxx_todo_changeme:
+ a().foo = xxx_todo_changeme
+ pass"""
+ self.check(b, a)
+
+ def test_bare_except(self):
+ b = """
+ try:
+ pass
+ except Exception, a:
+ pass
+ except:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as a:
+ pass
+ except:
+ pass"""
+ self.check(b, a)
+
+ def test_bare_except_and_else_finally(self):
+ b = """
+ try:
+ pass
+ except Exception, a:
+ pass
+ except:
+ pass
+ else:
+ pass
+ finally:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except Exception as a:
+ pass
+ except:
+ pass
+ else:
+ pass
+ finally:
+ pass"""
+ self.check(b, a)
+
+ def test_multi_fixed_excepts_before_bare_except(self):
+ b = """
+ try:
+ pass
+ except TypeError, b:
+ pass
+ except Exception, a:
+ pass
+ except:
+ pass"""
+
+ a = """
+ try:
+ pass
+ except TypeError as b:
+ pass
+ except Exception as a:
+ pass
+ except:
+ pass"""
+ self.check(b, a)
+
+ def test_one_line_suites(self):
+ b = """
+ try: raise TypeError
+ except TypeError, e:
+ pass
+ """
+ a = """
+ try: raise TypeError
+ except TypeError as e:
+ pass
+ """
+ self.check(b, a)
+ b = """
+ try:
+ raise TypeError
+ except TypeError, e: pass
+ """
+ a = """
+ try:
+ raise TypeError
+ except TypeError as e: pass
+ """
+ self.check(b, a)
+ b = """
+ try: raise TypeError
+ except TypeError, e: pass
+ """
+ a = """
+ try: raise TypeError
+ except TypeError as e: pass
+ """
+ self.check(b, a)
+ b = """
+ try: raise TypeError
+ except TypeError, e: pass
+ else: function()
+ finally: done()
+ """
+ a = """
+ try: raise TypeError
+ except TypeError as e: pass
+ else: function()
+ finally: done()
+ """
+ self.check(b, a)
+
+ # These should not be touched:
+
+ def test_unchanged_1(self):
+ s = """
+ try:
+ pass
+ except:
+ pass"""
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """
+ try:
+ pass
+ except Exception:
+ pass"""
+ self.unchanged(s)
+
+ def test_unchanged_3(self):
+ s = """
+ try:
+ pass
+ except (Exception, SystemExit):
+ pass"""
+ self.unchanged(s)
+
+class Test_raise(FixerTestCase):
+ fixer = "raise"
+
+ def test_basic(self):
+ b = """raise Exception, 5"""
+ a = """raise Exception(5)"""
+ self.check(b, a)
+
+ def test_prefix_preservation(self):
+ b = """raise Exception,5"""
+ a = """raise Exception(5)"""
+ self.check(b, a)
+
+ b = """raise Exception, 5"""
+ a = """raise Exception(5)"""
+ self.check(b, a)
+
+ def test_with_comments(self):
+ b = """raise Exception, 5 # foo"""
+ a = """raise Exception(5) # foo"""
+ self.check(b, a)
+
+ b = """raise E, (5, 6) % (a, b) # foo"""
+ a = """raise E((5, 6) % (a, b)) # foo"""
+ self.check(b, a)
+
+ b = """def foo():
+ raise Exception, 5, 6 # foo"""
+ a = """def foo():
+ raise Exception(5).with_traceback(6) # foo"""
+ self.check(b, a)
+
+ def test_None_value(self):
+ b = """raise Exception(5), None, tb"""
+ a = """raise Exception(5).with_traceback(tb)"""
+ self.check(b, a)
+
+ def test_tuple_value(self):
+ b = """raise Exception, (5, 6, 7)"""
+ a = """raise Exception(5, 6, 7)"""
+ self.check(b, a)
+
+ def test_tuple_detection(self):
+ b = """raise E, (5, 6) % (a, b)"""
+ a = """raise E((5, 6) % (a, b))"""
+ self.check(b, a)
+
+ def test_tuple_exc_1(self):
+ b = """raise (((E1, E2), E3), E4), V"""
+ a = """raise E1(V)"""
+ self.check(b, a)
+
+ def test_tuple_exc_2(self):
+ b = """raise (E1, (E2, E3), E4), V"""
+ a = """raise E1(V)"""
+ self.check(b, a)
+
+ # These should produce a warning
+
+ def test_string_exc(self):
+ s = """raise 'foo'"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ def test_string_exc_val(self):
+ s = """raise "foo", 5"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ def test_string_exc_val_tb(self):
+ s = """raise "foo", 5, 6"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ # These should result in traceback-assignment
+
+ def test_tb_1(self):
+ b = """def foo():
+ raise Exception, 5, 6"""
+ a = """def foo():
+ raise Exception(5).with_traceback(6)"""
+ self.check(b, a)
+
+ def test_tb_2(self):
+ b = """def foo():
+ a = 5
+ raise Exception, 5, 6
+ b = 6"""
+ a = """def foo():
+ a = 5
+ raise Exception(5).with_traceback(6)
+ b = 6"""
+ self.check(b, a)
+
+ def test_tb_3(self):
+ b = """def foo():
+ raise Exception,5,6"""
+ a = """def foo():
+ raise Exception(5).with_traceback(6)"""
+ self.check(b, a)
+
+ def test_tb_4(self):
+ b = """def foo():
+ a = 5
+ raise Exception,5,6
+ b = 6"""
+ a = """def foo():
+ a = 5
+ raise Exception(5).with_traceback(6)
+ b = 6"""
+ self.check(b, a)
+
+ def test_tb_5(self):
+ b = """def foo():
+ raise Exception, (5, 6, 7), 6"""
+ a = """def foo():
+ raise Exception(5, 6, 7).with_traceback(6)"""
+ self.check(b, a)
+
+ def test_tb_6(self):
+ b = """def foo():
+ a = 5
+ raise Exception, (5, 6, 7), 6
+ b = 6"""
+ a = """def foo():
+ a = 5
+ raise Exception(5, 6, 7).with_traceback(6)
+ b = 6"""
+ self.check(b, a)
+
+class Test_throw(FixerTestCase):
+ fixer = "throw"
+
+ def test_1(self):
+ b = """g.throw(Exception, 5)"""
+ a = """g.throw(Exception(5))"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """g.throw(Exception,5)"""
+ a = """g.throw(Exception(5))"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """g.throw(Exception, (5, 6, 7))"""
+ a = """g.throw(Exception(5, 6, 7))"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """5 + g.throw(Exception, 5)"""
+ a = """5 + g.throw(Exception(5))"""
+ self.check(b, a)
+
+ # These should produce warnings
+
+ def test_warn_1(self):
+ s = """g.throw("foo")"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ def test_warn_2(self):
+ s = """g.throw("foo", 5)"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ def test_warn_3(self):
+ s = """g.throw("foo", 5, 6)"""
+ self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+ # These should not be touched
+
+ def test_untouched_1(self):
+ s = """g.throw(Exception)"""
+ self.unchanged(s)
+
+ def test_untouched_2(self):
+ s = """g.throw(Exception(5, 6))"""
+ self.unchanged(s)
+
+ def test_untouched_3(self):
+ s = """5 + g.throw(Exception(5, 6))"""
+ self.unchanged(s)
+
+ # These should result in traceback-assignment
+
+ def test_tb_1(self):
+ b = """def foo():
+ g.throw(Exception, 5, 6)"""
+ a = """def foo():
+ g.throw(Exception(5).with_traceback(6))"""
+ self.check(b, a)
+
+ def test_tb_2(self):
+ b = """def foo():
+ a = 5
+ g.throw(Exception, 5, 6)
+ b = 6"""
+ a = """def foo():
+ a = 5
+ g.throw(Exception(5).with_traceback(6))
+ b = 6"""
+ self.check(b, a)
+
+ def test_tb_3(self):
+ b = """def foo():
+ g.throw(Exception,5,6)"""
+ a = """def foo():
+ g.throw(Exception(5).with_traceback(6))"""
+ self.check(b, a)
+
+ def test_tb_4(self):
+ b = """def foo():
+ a = 5
+ g.throw(Exception,5,6)
+ b = 6"""
+ a = """def foo():
+ a = 5
+ g.throw(Exception(5).with_traceback(6))
+ b = 6"""
+ self.check(b, a)
+
+ def test_tb_5(self):
+ b = """def foo():
+ g.throw(Exception, (5, 6, 7), 6)"""
+ a = """def foo():
+ g.throw(Exception(5, 6, 7).with_traceback(6))"""
+ self.check(b, a)
+
+ def test_tb_6(self):
+ b = """def foo():
+ a = 5
+ g.throw(Exception, (5, 6, 7), 6)
+ b = 6"""
+ a = """def foo():
+ a = 5
+ g.throw(Exception(5, 6, 7).with_traceback(6))
+ b = 6"""
+ self.check(b, a)
+
+ def test_tb_7(self):
+ b = """def foo():
+ a + g.throw(Exception, 5, 6)"""
+ a = """def foo():
+ a + g.throw(Exception(5).with_traceback(6))"""
+ self.check(b, a)
+
+ def test_tb_8(self):
+ b = """def foo():
+ a = 5
+ a + g.throw(Exception, 5, 6)
+ b = 6"""
+ a = """def foo():
+ a = 5
+ a + g.throw(Exception(5).with_traceback(6))
+ b = 6"""
+ self.check(b, a)
+
+class Test_long(FixerTestCase):
+ fixer = "long"
+
+ def test_1(self):
+ b = """x = long(x)"""
+ a = """x = int(x)"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """y = isinstance(x, long)"""
+ a = """y = isinstance(x, int)"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """z = type(x) in (int, long)"""
+ a = """z = type(x) in (int, int)"""
+ self.check(b, a)
+
+ def test_unchanged(self):
+ s = """long = True"""
+ self.unchanged(s)
+
+ s = """s.long = True"""
+ self.unchanged(s)
+
+ s = """def long(): pass"""
+ self.unchanged(s)
+
+ s = """class long(): pass"""
+ self.unchanged(s)
+
+ s = """def f(long): pass"""
+ self.unchanged(s)
+
+ s = """def f(g, long): pass"""
+ self.unchanged(s)
+
+ s = """def f(x, long=True): pass"""
+ self.unchanged(s)
+
+ def test_prefix_preservation(self):
+ b = """x = long( x )"""
+ a = """x = int( x )"""
+ self.check(b, a)
+
+
+class Test_execfile(FixerTestCase):
+ fixer = "execfile"
+
+ def test_conversion(self):
+ b = """execfile("fn")"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
+ self.check(b, a)
+
+ b = """execfile("fn", glob)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
+ self.check(b, a)
+
+ b = """execfile("fn", glob, loc)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
+ self.check(b, a)
+
+ b = """execfile("fn", globals=glob)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
+ self.check(b, a)
+
+ b = """execfile("fn", locals=loc)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
+ self.check(b, a)
+
+ b = """execfile("fn", globals=glob, locals=loc)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
+ self.check(b, a)
+
+ def test_spacing(self):
+ b = """execfile( "fn" )"""
+ a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
+ self.check(b, a)
+
+ b = """execfile("fn", globals = glob)"""
+ a = """exec(compile(open("fn").read(), "fn", 'exec'), globals = glob)"""
+ self.check(b, a)
+
+
+class Test_isinstance(FixerTestCase):
+ fixer = "isinstance"
+
+ def test_remove_multiple_items(self):
+ b = """isinstance(x, (int, int, int))"""
+ a = """isinstance(x, int)"""
+ self.check(b, a)
+
+ b = """isinstance(x, (int, float, int, int, float))"""
+ a = """isinstance(x, (int, float))"""
+ self.check(b, a)
+
+ b = """isinstance(x, (int, float, int, int, float, str))"""
+ a = """isinstance(x, (int, float, str))"""
+ self.check(b, a)
+
+ b = """isinstance(foo() + bar(), (x(), y(), x(), int, int))"""
+ a = """isinstance(foo() + bar(), (x(), y(), x(), int))"""
+ self.check(b, a)
+
+ def test_prefix_preservation(self):
+ b = """if isinstance( foo(), ( bar, bar, baz )) : pass"""
+ a = """if isinstance( foo(), ( bar, baz )) : pass"""
+ self.check(b, a)
+
+ def test_unchanged(self):
+ self.unchanged("isinstance(x, (str, int))")
+
+class Test_dict(FixerTestCase):
+ fixer = "dict"
+
+ def test_prefix_preservation(self):
+ b = "if d. keys ( ) : pass"
+ a = "if list(d. keys ( )) : pass"
+ self.check(b, a)
+
+ b = "if d. items ( ) : pass"
+ a = "if list(d. items ( )) : pass"
+ self.check(b, a)
+
+ b = "if d. iterkeys ( ) : pass"
+ a = "if iter(d. keys ( )) : pass"
+ self.check(b, a)
+
+ b = "[i for i in d. iterkeys( ) ]"
+ a = "[i for i in d. keys( ) ]"
+ self.check(b, a)
+
+ b = "if d. viewkeys ( ) : pass"
+ a = "if d. keys ( ) : pass"
+ self.check(b, a)
+
+ b = "[i for i in d. viewkeys( ) ]"
+ a = "[i for i in d. keys( ) ]"
+ self.check(b, a)
+
+ def test_trailing_comment(self):
+ b = "d.keys() # foo"
+ a = "list(d.keys()) # foo"
+ self.check(b, a)
+
+ b = "d.items() # foo"
+ a = "list(d.items()) # foo"
+ self.check(b, a)
+
+ b = "d.iterkeys() # foo"
+ a = "iter(d.keys()) # foo"
+ self.check(b, a)
+
+ b = """[i for i in d.iterkeys() # foo
+ ]"""
+ a = """[i for i in d.keys() # foo
+ ]"""
+ self.check(b, a)
+
+ b = """[i for i in d.iterkeys() # foo
+ ]"""
+ a = """[i for i in d.keys() # foo
+ ]"""
+ self.check(b, a)
+
+ b = "d.viewitems() # foo"
+ a = "d.items() # foo"
+ self.check(b, a)
+
+ def test_unchanged(self):
+ for wrapper in fixer_util.consuming_calls:
+ s = "s = %s(d.keys())" % wrapper
+ self.unchanged(s)
+
+ s = "s = %s(d.values())" % wrapper
+ self.unchanged(s)
+
+ s = "s = %s(d.items())" % wrapper
+ self.unchanged(s)
+
+ def test_01(self):
+ b = "d.keys()"
+ a = "list(d.keys())"
+ self.check(b, a)
+
+ b = "a[0].foo().keys()"
+ a = "list(a[0].foo().keys())"
+ self.check(b, a)
+
+ def test_02(self):
+ b = "d.items()"
+ a = "list(d.items())"
+ self.check(b, a)
+
+ def test_03(self):
+ b = "d.values()"
+ a = "list(d.values())"
+ self.check(b, a)
+
+ def test_04(self):
+ b = "d.iterkeys()"
+ a = "iter(d.keys())"
+ self.check(b, a)
+
+ def test_05(self):
+ b = "d.iteritems()"
+ a = "iter(d.items())"
+ self.check(b, a)
+
+ def test_06(self):
+ b = "d.itervalues()"
+ a = "iter(d.values())"
+ self.check(b, a)
+
+ def test_07(self):
+ s = "list(d.keys())"
+ self.unchanged(s)
+
+ def test_08(self):
+ s = "sorted(d.keys())"
+ self.unchanged(s)
+
+ def test_09(self):
+ b = "iter(d.keys())"
+ a = "iter(list(d.keys()))"
+ self.check(b, a)
+
+ def test_10(self):
+ b = "foo(d.keys())"
+ a = "foo(list(d.keys()))"
+ self.check(b, a)
+
+ def test_11(self):
+ b = "for i in d.keys(): print i"
+ a = "for i in list(d.keys()): print i"
+ self.check(b, a)
+
+ def test_12(self):
+ b = "for i in d.iterkeys(): print i"
+ a = "for i in d.keys(): print i"
+ self.check(b, a)
+
+ def test_13(self):
+ b = "[i for i in d.keys()]"
+ a = "[i for i in list(d.keys())]"
+ self.check(b, a)
+
+ def test_14(self):
+ b = "[i for i in d.iterkeys()]"
+ a = "[i for i in d.keys()]"
+ self.check(b, a)
+
+ def test_15(self):
+ b = "(i for i in d.keys())"
+ a = "(i for i in list(d.keys()))"
+ self.check(b, a)
+
+ def test_16(self):
+ b = "(i for i in d.iterkeys())"
+ a = "(i for i in d.keys())"
+ self.check(b, a)
+
+ def test_17(self):
+ b = "iter(d.iterkeys())"
+ a = "iter(d.keys())"
+ self.check(b, a)
+
+ def test_18(self):
+ b = "list(d.iterkeys())"
+ a = "list(d.keys())"
+ self.check(b, a)
+
+ def test_19(self):
+ b = "sorted(d.iterkeys())"
+ a = "sorted(d.keys())"
+ self.check(b, a)
+
+ def test_20(self):
+ b = "foo(d.iterkeys())"
+ a = "foo(iter(d.keys()))"
+ self.check(b, a)
+
+ def test_21(self):
+ b = "print h.iterkeys().next()"
+ a = "print iter(h.keys()).next()"
+ self.check(b, a)
+
+ def test_22(self):
+ b = "print h.keys()[0]"
+ a = "print list(h.keys())[0]"
+ self.check(b, a)
+
+ def test_23(self):
+ b = "print list(h.iterkeys().next())"
+ a = "print list(iter(h.keys()).next())"
+ self.check(b, a)
+
+ def test_24(self):
+ b = "for x in h.keys()[0]: print x"
+ a = "for x in list(h.keys())[0]: print x"
+ self.check(b, a)
+
+ def test_25(self):
+ b = "d.viewkeys()"
+ a = "d.keys()"
+ self.check(b, a)
+
+ def test_26(self):
+ b = "d.viewitems()"
+ a = "d.items()"
+ self.check(b, a)
+
+ def test_27(self):
+ b = "d.viewvalues()"
+ a = "d.values()"
+ self.check(b, a)
+
+ def test_28(self):
+ b = "[i for i in d.viewkeys()]"
+ a = "[i for i in d.keys()]"
+ self.check(b, a)
+
+ def test_29(self):
+ b = "(i for i in d.viewkeys())"
+ a = "(i for i in d.keys())"
+ self.check(b, a)
+
+ def test_30(self):
+ b = "iter(d.viewkeys())"
+ a = "iter(d.keys())"
+ self.check(b, a)
+
+ def test_31(self):
+ b = "list(d.viewkeys())"
+ a = "list(d.keys())"
+ self.check(b, a)
+
+ def test_32(self):
+ b = "sorted(d.viewkeys())"
+ a = "sorted(d.keys())"
+ self.check(b, a)
+
+class Test_xrange(FixerTestCase):
+ fixer = "xrange"
+
+ def test_prefix_preservation(self):
+ b = """x = xrange( 10 )"""
+ a = """x = range( 10 )"""
+ self.check(b, a)
+
+ b = """x = xrange( 1 , 10 )"""
+ a = """x = range( 1 , 10 )"""
+ self.check(b, a)
+
+ b = """x = xrange( 0 , 10 , 2 )"""
+ a = """x = range( 0 , 10 , 2 )"""
+ self.check(b, a)
+
+ def test_single_arg(self):
+ b = """x = xrange(10)"""
+ a = """x = range(10)"""
+ self.check(b, a)
+
+ def test_two_args(self):
+ b = """x = xrange(1, 10)"""
+ a = """x = range(1, 10)"""
+ self.check(b, a)
+
+ def test_three_args(self):
+ b = """x = xrange(0, 10, 2)"""
+ a = """x = range(0, 10, 2)"""
+ self.check(b, a)
+
+ def test_wrap_in_list(self):
+ b = """x = range(10, 3, 9)"""
+ a = """x = list(range(10, 3, 9))"""
+ self.check(b, a)
+
+ b = """x = foo(range(10, 3, 9))"""
+ a = """x = foo(list(range(10, 3, 9)))"""
+ self.check(b, a)
+
+ b = """x = range(10, 3, 9) + [4]"""
+ a = """x = list(range(10, 3, 9)) + [4]"""
+ self.check(b, a)
+
+ b = """x = range(10)[::-1]"""
+ a = """x = list(range(10))[::-1]"""
+ self.check(b, a)
+
+ b = """x = range(10) [3]"""
+ a = """x = list(range(10)) [3]"""
+ self.check(b, a)
+
+ def test_xrange_in_for(self):
+ b = """for i in xrange(10):\n j=i"""
+ a = """for i in range(10):\n j=i"""
+ self.check(b, a)
+
+ b = """[i for i in xrange(10)]"""
+ a = """[i for i in range(10)]"""
+ self.check(b, a)
+
+ def test_range_in_for(self):
+ self.unchanged("for i in range(10): pass")
+ self.unchanged("[i for i in range(10)]")
+
+ def test_in_contains_test(self):
+ self.unchanged("x in range(10, 3, 9)")
+
+ def test_in_consuming_context(self):
+ for call in fixer_util.consuming_calls:
+ self.unchanged("a = %s(range(10))" % call)
+
+class Test_xrange_with_reduce(FixerTestCase):
+
+ def setUp(self):
+ super(Test_xrange_with_reduce, self).setUp(["xrange", "reduce"])
+
+ def test_double_transform(self):
+ b = """reduce(x, xrange(5))"""
+ a = """from functools import reduce
+reduce(x, range(5))"""
+ self.check(b, a)
+
+class Test_raw_input(FixerTestCase):
+ fixer = "raw_input"
+
+ def test_prefix_preservation(self):
+ b = """x = raw_input( )"""
+ a = """x = input( )"""
+ self.check(b, a)
+
+ b = """x = raw_input( '' )"""
+ a = """x = input( '' )"""
+ self.check(b, a)
+
+ def test_1(self):
+ b = """x = raw_input()"""
+ a = """x = input()"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """x = raw_input('')"""
+ a = """x = input('')"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """x = raw_input('prompt')"""
+ a = """x = input('prompt')"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """x = raw_input(foo(a) + 6)"""
+ a = """x = input(foo(a) + 6)"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """x = raw_input(invite).split()"""
+ a = """x = input(invite).split()"""
+ self.check(b, a)
+
+ def test_6(self):
+ b = """x = raw_input(invite) . split ()"""
+ a = """x = input(invite) . split ()"""
+ self.check(b, a)
+
+ def test_8(self):
+ b = "x = int(raw_input())"
+ a = "x = int(input())"
+ self.check(b, a)
+
+class Test_funcattrs(FixerTestCase):
+ fixer = "funcattrs"
+
+ attrs = ["closure", "doc", "name", "defaults", "code", "globals", "dict"]
+
+ def test(self):
+ for attr in self.attrs:
+ b = "a.func_%s" % attr
+ a = "a.__%s__" % attr
+ self.check(b, a)
+
+ b = "self.foo.func_%s.foo_bar" % attr
+ a = "self.foo.__%s__.foo_bar" % attr
+ self.check(b, a)
+
+ def test_unchanged(self):
+ for attr in self.attrs:
+ s = "foo(func_%s + 5)" % attr
+ self.unchanged(s)
+
+ s = "f(foo.__%s__)" % attr
+ self.unchanged(s)
+
+ s = "f(foo.__%s__.foo)" % attr
+ self.unchanged(s)
+
+class Test_xreadlines(FixerTestCase):
+ fixer = "xreadlines"
+
+ def test_call(self):
+ b = "for x in f.xreadlines(): pass"
+ a = "for x in f: pass"
+ self.check(b, a)
+
+ b = "for x in foo().xreadlines(): pass"
+ a = "for x in foo(): pass"
+ self.check(b, a)
+
+ b = "for x in (5 + foo()).xreadlines(): pass"
+ a = "for x in (5 + foo()): pass"
+ self.check(b, a)
+
+ def test_attr_ref(self):
+ b = "foo(f.xreadlines + 5)"
+ a = "foo(f.__iter__ + 5)"
+ self.check(b, a)
+
+ b = "foo(f().xreadlines + 5)"
+ a = "foo(f().__iter__ + 5)"
+ self.check(b, a)
+
+ b = "foo((5 + f()).xreadlines + 5)"
+ a = "foo((5 + f()).__iter__ + 5)"
+ self.check(b, a)
+
+ def test_unchanged(self):
+ s = "for x in f.xreadlines(5): pass"
+ self.unchanged(s)
+
+ s = "for x in f.xreadlines(k=5): pass"
+ self.unchanged(s)
+
+ s = "for x in f.xreadlines(*k, **v): pass"
+ self.unchanged(s)
+
+ s = "foo(xreadlines)"
+ self.unchanged(s)
+
+
+class ImportsFixerTests:
+
+ def test_import_module(self):
+ for old, new in self.modules.items():
+ b = "import %s" % old
+ a = "import %s" % new
+ self.check(b, a)
+
+ b = "import foo, %s, bar" % old
+ a = "import foo, %s, bar" % new
+ self.check(b, a)
+
+ def test_import_from(self):
+ for old, new in self.modules.items():
+ b = "from %s import foo" % old
+ a = "from %s import foo" % new
+ self.check(b, a)
+
+ b = "from %s import foo, bar" % old
+ a = "from %s import foo, bar" % new
+ self.check(b, a)
+
+ b = "from %s import (yes, no)" % old
+ a = "from %s import (yes, no)" % new
+ self.check(b, a)
+
+ def test_import_module_as(self):
+ for old, new in self.modules.items():
+ b = "import %s as foo_bar" % old
+ a = "import %s as foo_bar" % new
+ self.check(b, a)
+
+ b = "import %s as foo_bar" % old
+ a = "import %s as foo_bar" % new
+ self.check(b, a)
+
+ def test_import_from_as(self):
+ for old, new in self.modules.items():
+ b = "from %s import foo as bar" % old
+ a = "from %s import foo as bar" % new
+ self.check(b, a)
+
+ def test_star(self):
+ for old, new in self.modules.items():
+ b = "from %s import *" % old
+ a = "from %s import *" % new
+ self.check(b, a)
+
+ def test_import_module_usage(self):
+ for old, new in self.modules.items():
+ b = """
+ import %s
+ foo(%s.bar)
+ """ % (old, old)
+ a = """
+ import %s
+ foo(%s.bar)
+ """ % (new, new)
+ self.check(b, a)
+
+ b = """
+ from %s import x
+ %s = 23
+ """ % (old, old)
+ a = """
+ from %s import x
+ %s = 23
+ """ % (new, old)
+ self.check(b, a)
+
+ s = """
+ def f():
+ %s.method()
+ """ % (old,)
+ self.unchanged(s)
+
+ # test nested usage
+ b = """
+ import %s
+ %s.bar(%s.foo)
+ """ % (old, old, old)
+ a = """
+ import %s
+ %s.bar(%s.foo)
+ """ % (new, new, new)
+ self.check(b, a)
+
+ b = """
+ import %s
+ x.%s
+ """ % (old, old)
+ a = """
+ import %s
+ x.%s
+ """ % (new, old)
+ self.check(b, a)
+
+
+class Test_imports(FixerTestCase, ImportsFixerTests):
+ fixer = "imports"
+ from ..fixes.fix_imports import MAPPING as modules
+
+ def test_multiple_imports(self):
+ b = """import urlparse, cStringIO"""
+ a = """import urllib.parse, io"""
+ self.check(b, a)
+
+ def test_multiple_imports_as(self):
+ b = """
+ import copy_reg as bar, HTMLParser as foo, urlparse
+ s = urlparse.spam(bar.foo())
+ """
+ a = """
+ import copyreg as bar, html.parser as foo, urllib.parse
+ s = urllib.parse.spam(bar.foo())
+ """
+ self.check(b, a)
+
+
+class Test_imports2(FixerTestCase, ImportsFixerTests):
+ fixer = "imports2"
+ from ..fixes.fix_imports2 import MAPPING as modules
+
+
+class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
+
+ def setUp(self):
+ super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
+ from ..fixes.fix_imports2 import MAPPING as mapping2
+ self.modules = mapping2.copy()
+ from ..fixes.fix_imports import MAPPING as mapping1
+ for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
+ self.modules[key] = mapping1[key]
+
+ def test_after_local_imports_refactoring(self):
+ for fix in ("imports", "imports2"):
+ self.fixer = fix
+ self.assert_runs_after("import")
+
+
+class Test_urllib(FixerTestCase):
+ fixer = "urllib"
+ from ..fixes.fix_urllib import MAPPING as modules
+
+ def test_import_module(self):
+ for old, changes in self.modules.items():
+ b = "import %s" % old
+ a = "import %s" % ", ".join(map(itemgetter(0), changes))
+ self.check(b, a)
+
+ def test_import_from(self):
+ for old, changes in self.modules.items():
+ all_members = []
+ for new, members in changes:
+ for member in members:
+ all_members.append(member)
+ b = "from %s import %s" % (old, member)
+ a = "from %s import %s" % (new, member)
+ self.check(b, a)
+
+ s = "from foo import %s" % member
+ self.unchanged(s)
+
+ b = "from %s import %s" % (old, ", ".join(members))
+ a = "from %s import %s" % (new, ", ".join(members))
+ self.check(b, a)
+
+ s = "from foo import %s" % ", ".join(members)
+ self.unchanged(s)
+
+ # test the breaking of a module into multiple replacements
+ b = "from %s import %s" % (old, ", ".join(all_members))
+ a = "\n".join(["from %s import %s" % (new, ", ".join(members))
+ for (new, members) in changes])
+ self.check(b, a)
+
+ def test_import_module_as(self):
+ for old in self.modules:
+ s = "import %s as foo" % old
+ self.warns_unchanged(s, "This module is now multiple modules")
+
+ def test_import_from_as(self):
+ for old, changes in self.modules.items():
+ for new, members in changes:
+ for member in members:
+ b = "from %s import %s as foo_bar" % (old, member)
+ a = "from %s import %s as foo_bar" % (new, member)
+ self.check(b, a)
+ b = "from %s import %s as blah, %s" % (old, member, member)
+ a = "from %s import %s as blah, %s" % (new, member, member)
+ self.check(b, a)
+
+ def test_star(self):
+ for old in self.modules:
+ s = "from %s import *" % old
+ self.warns_unchanged(s, "Cannot handle star imports")
+
+ def test_indented(self):
+ b = """
+def foo():
+ from urllib import urlencode, urlopen
+"""
+ a = """
+def foo():
+ from urllib.parse import urlencode
+ from urllib.request import urlopen
+"""
+ self.check(b, a)
+
+ b = """
+def foo():
+ other()
+ from urllib import urlencode, urlopen
+"""
+ a = """
+def foo():
+ other()
+ from urllib.parse import urlencode
+ from urllib.request import urlopen
+"""
+ self.check(b, a)
+
+
+
+ def test_import_module_usage(self):
+ for old, changes in self.modules.items():
+ for new, members in changes:
+ for member in members:
+ new_import = ", ".join([n for (n, mems)
+ in self.modules[old]])
+ b = """
+ import %s
+ foo(%s.%s)
+ """ % (old, old, member)
+ a = """
+ import %s
+ foo(%s.%s)
+ """ % (new_import, new, member)
+ self.check(b, a)
+ b = """
+ import %s
+ %s.%s(%s.%s)
+ """ % (old, old, member, old, member)
+ a = """
+ import %s
+ %s.%s(%s.%s)
+ """ % (new_import, new, member, new, member)
+ self.check(b, a)
+
+
+class Test_input(FixerTestCase):
+ fixer = "input"
+
+ def test_prefix_preservation(self):
+ b = """x = input( )"""
+ a = """x = eval(input( ))"""
+ self.check(b, a)
+
+ b = """x = input( '' )"""
+ a = """x = eval(input( '' ))"""
+ self.check(b, a)
+
+ def test_trailing_comment(self):
+ b = """x = input() # foo"""
+ a = """x = eval(input()) # foo"""
+ self.check(b, a)
+
+ def test_idempotency(self):
+ s = """x = eval(input())"""
+ self.unchanged(s)
+
+ s = """x = eval(input(''))"""
+ self.unchanged(s)
+
+ s = """x = eval(input(foo(5) + 9))"""
+ self.unchanged(s)
+
+ def test_1(self):
+ b = """x = input()"""
+ a = """x = eval(input())"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """x = input('')"""
+ a = """x = eval(input(''))"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """x = input('prompt')"""
+ a = """x = eval(input('prompt'))"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """x = input(foo(5) + 9)"""
+ a = """x = eval(input(foo(5) + 9))"""
+ self.check(b, a)
+
+class Test_tuple_params(FixerTestCase):
+ fixer = "tuple_params"
+
+ def test_unchanged_1(self):
+ s = """def foo(): pass"""
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """def foo(a, b, c): pass"""
+ self.unchanged(s)
+
+ def test_unchanged_3(self):
+ s = """def foo(a=3, b=4, c=5): pass"""
+ self.unchanged(s)
+
+ def test_1(self):
+ b = """
+ def foo(((a, b), c)):
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme):
+ ((a, b), c) = xxx_todo_changeme
+ x = 5"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """
+ def foo(((a, b), c), d):
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, d):
+ ((a, b), c) = xxx_todo_changeme
+ x = 5"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """
+ def foo(((a, b), c), d) -> e:
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, d) -> e:
+ ((a, b), c) = xxx_todo_changeme
+ x = 5"""
+ self.check(b, a)
+
+ def test_semicolon(self):
+ b = """
+ def foo(((a, b), c)): x = 5; y = 7"""
+
+ a = """
+ def foo(xxx_todo_changeme): ((a, b), c) = xxx_todo_changeme; x = 5; y = 7"""
+ self.check(b, a)
+
+ def test_keywords(self):
+ b = """
+ def foo(((a, b), c), d, e=5) -> z:
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, d, e=5) -> z:
+ ((a, b), c) = xxx_todo_changeme
+ x = 5"""
+ self.check(b, a)
+
+ def test_varargs(self):
+ b = """
+ def foo(((a, b), c), d, *vargs, **kwargs) -> z:
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, d, *vargs, **kwargs) -> z:
+ ((a, b), c) = xxx_todo_changeme
+ x = 5"""
+ self.check(b, a)
+
+ def test_multi_1(self):
+ b = """
+ def foo(((a, b), c), (d, e, f)) -> z:
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
+ ((a, b), c) = xxx_todo_changeme
+ (d, e, f) = xxx_todo_changeme1
+ x = 5"""
+ self.check(b, a)
+
+ def test_multi_2(self):
+ b = """
+ def foo(x, ((a, b), c), d, (e, f, g), y) -> z:
+ x = 5"""
+
+ a = """
+ def foo(x, xxx_todo_changeme, d, xxx_todo_changeme1, y) -> z:
+ ((a, b), c) = xxx_todo_changeme
+ (e, f, g) = xxx_todo_changeme1
+ x = 5"""
+ self.check(b, a)
+
+ def test_docstring(self):
+ b = """
+ def foo(((a, b), c), (d, e, f)) -> z:
+ "foo foo foo foo"
+ x = 5"""
+
+ a = """
+ def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
+ "foo foo foo foo"
+ ((a, b), c) = xxx_todo_changeme
+ (d, e, f) = xxx_todo_changeme1
+ x = 5"""
+ self.check(b, a)
+
+ def test_lambda_no_change(self):
+ s = """lambda x: x + 5"""
+ self.unchanged(s)
+
+ def test_lambda_parens_single_arg(self):
+ b = """lambda (x): x + 5"""
+ a = """lambda x: x + 5"""
+ self.check(b, a)
+
+ b = """lambda(x): x + 5"""
+ a = """lambda x: x + 5"""
+ self.check(b, a)
+
+ b = """lambda ((((x)))): x + 5"""
+ a = """lambda x: x + 5"""
+ self.check(b, a)
+
+ b = """lambda((((x)))): x + 5"""
+ a = """lambda x: x + 5"""
+ self.check(b, a)
+
+ def test_lambda_simple(self):
+ b = """lambda (x, y): x + f(y)"""
+ a = """lambda x_y: x_y[0] + f(x_y[1])"""
+ self.check(b, a)
+
+ b = """lambda(x, y): x + f(y)"""
+ a = """lambda x_y: x_y[0] + f(x_y[1])"""
+ self.check(b, a)
+
+ b = """lambda (((x, y))): x + f(y)"""
+ a = """lambda x_y: x_y[0] + f(x_y[1])"""
+ self.check(b, a)
+
+ b = """lambda(((x, y))): x + f(y)"""
+ a = """lambda x_y: x_y[0] + f(x_y[1])"""
+ self.check(b, a)
+
+ def test_lambda_one_tuple(self):
+ b = """lambda (x,): x + f(x)"""
+ a = """lambda x1: x1[0] + f(x1[0])"""
+ self.check(b, a)
+
+ b = """lambda (((x,))): x + f(x)"""
+ a = """lambda x1: x1[0] + f(x1[0])"""
+ self.check(b, a)
+
+ def test_lambda_simple_multi_use(self):
+ b = """lambda (x, y): x + x + f(x) + x"""
+ a = """lambda x_y: x_y[0] + x_y[0] + f(x_y[0]) + x_y[0]"""
+ self.check(b, a)
+
+ def test_lambda_simple_reverse(self):
+ b = """lambda (x, y): y + x"""
+ a = """lambda x_y: x_y[1] + x_y[0]"""
+ self.check(b, a)
+
+ def test_lambda_nested(self):
+ b = """lambda (x, (y, z)): x + y + z"""
+ a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
+ self.check(b, a)
+
+ b = """lambda (((x, (y, z)))): x + y + z"""
+ a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
+ self.check(b, a)
+
+ def test_lambda_nested_multi_use(self):
+ b = """lambda (x, (y, z)): x + y + f(y)"""
+ a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + f(x_y_z[1][0])"""
+ self.check(b, a)
+
+class Test_methodattrs(FixerTestCase):
+ fixer = "methodattrs"
+
+ attrs = ["func", "self", "class"]
+
+ def test(self):
+ for attr in self.attrs:
+ b = "a.im_%s" % attr
+ if attr == "class":
+ a = "a.__self__.__class__"
+ else:
+ a = "a.__%s__" % attr
+ self.check(b, a)
+
+ b = "self.foo.im_%s.foo_bar" % attr
+ if attr == "class":
+ a = "self.foo.__self__.__class__.foo_bar"
+ else:
+ a = "self.foo.__%s__.foo_bar" % attr
+ self.check(b, a)
+
+ def test_unchanged(self):
+ for attr in self.attrs:
+ s = "foo(im_%s + 5)" % attr
+ self.unchanged(s)
+
+ s = "f(foo.__%s__)" % attr
+ self.unchanged(s)
+
+ s = "f(foo.__%s__.foo)" % attr
+ self.unchanged(s)
+
+class Test_next(FixerTestCase):
+ fixer = "next"
+
+ def test_1(self):
+ b = """it.next()"""
+ a = """next(it)"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """a.b.c.d.next()"""
+ a = """next(a.b.c.d)"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """(a + b).next()"""
+ a = """next((a + b))"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """a().next()"""
+ a = """next(a())"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """a().next() + b"""
+ a = """next(a()) + b"""
+ self.check(b, a)
+
+ def test_6(self):
+ b = """c( a().next() + b)"""
+ a = """c( next(a()) + b)"""
+ self.check(b, a)
+
+ def test_prefix_preservation_1(self):
+ b = """
+ for a in b:
+ foo(a)
+ a.next()
+ """
+ a = """
+ for a in b:
+ foo(a)
+ next(a)
+ """
+ self.check(b, a)
+
+ def test_prefix_preservation_2(self):
+ b = """
+ for a in b:
+ foo(a) # abc
+ # def
+ a.next()
+ """
+ a = """
+ for a in b:
+ foo(a) # abc
+ # def
+ next(a)
+ """
+ self.check(b, a)
+
+ def test_prefix_preservation_3(self):
+ b = """
+ next = 5
+ for a in b:
+ foo(a)
+ a.next()
+ """
+ a = """
+ next = 5
+ for a in b:
+ foo(a)
+ a.__next__()
+ """
+ self.check(b, a, ignore_warnings=True)
+
+ def test_prefix_preservation_4(self):
+ b = """
+ next = 5
+ for a in b:
+ foo(a) # abc
+ # def
+ a.next()
+ """
+ a = """
+ next = 5
+ for a in b:
+ foo(a) # abc
+ # def
+ a.__next__()
+ """
+ self.check(b, a, ignore_warnings=True)
+
+ def test_prefix_preservation_5(self):
+ b = """
+ next = 5
+ for a in b:
+ foo(foo(a), # abc
+ a.next())
+ """
+ a = """
+ next = 5
+ for a in b:
+ foo(foo(a), # abc
+ a.__next__())
+ """
+ self.check(b, a, ignore_warnings=True)
+
+ def test_prefix_preservation_6(self):
+ b = """
+ for a in b:
+ foo(foo(a), # abc
+ a.next())
+ """
+ a = """
+ for a in b:
+ foo(foo(a), # abc
+ next(a))
+ """
+ self.check(b, a)
+
+ def test_method_1(self):
+ b = """
+ class A:
+ def next(self):
+ pass
+ """
+ a = """
+ class A:
+ def __next__(self):
+ pass
+ """
+ self.check(b, a)
+
+ def test_method_2(self):
+ b = """
+ class A(object):
+ def next(self):
+ pass
+ """
+ a = """
+ class A(object):
+ def __next__(self):
+ pass
+ """
+ self.check(b, a)
+
+ def test_method_3(self):
+ b = """
+ class A:
+ def next(x):
+ pass
+ """
+ a = """
+ class A:
+ def __next__(x):
+ pass
+ """
+ self.check(b, a)
+
+ def test_method_4(self):
+ b = """
+ class A:
+ def __init__(self, foo):
+ self.foo = foo
+
+ def next(self):
+ pass
+
+ def __iter__(self):
+ return self
+ """
+ a = """
+ class A:
+ def __init__(self, foo):
+ self.foo = foo
+
+ def __next__(self):
+ pass
+
+ def __iter__(self):
+ return self
+ """
+ self.check(b, a)
+
+ def test_method_unchanged(self):
+ s = """
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_shadowing_assign_simple(self):
+ s = """
+ next = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_assign_tuple_1(self):
+ s = """
+ (next, a) = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_assign_tuple_2(self):
+ s = """
+ (a, (b, (next, c)), a) = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_assign_list_1(self):
+ s = """
+ [next, a] = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_assign_list_2(self):
+ s = """
+ [a, [b, [next, c]], a] = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_builtin_assign(self):
+ s = """
+ def foo():
+ __builtin__.next = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_builtin_assign_in_tuple(self):
+ s = """
+ def foo():
+ (a, __builtin__.next) = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_builtin_assign_in_list(self):
+ s = """
+ def foo():
+ [a, __builtin__.next] = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_assign_to_next(self):
+ s = """
+ def foo():
+ A.next = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_assign_to_next_in_tuple(self):
+ s = """
+ def foo():
+ (a, A.next) = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_assign_to_next_in_list(self):
+ s = """
+ def foo():
+ [a, A.next] = foo
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_shadowing_import_1(self):
+ s = """
+ import foo.bar as next
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_2(self):
+ s = """
+ import bar, bar.foo as next
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_3(self):
+ s = """
+ import bar, bar.foo as next, baz
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_from_1(self):
+ s = """
+ from x import next
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_from_2(self):
+ s = """
+ from x.a import next
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_from_3(self):
+ s = """
+ from x import a, next, b
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_import_from_4(self):
+ s = """
+ from x.a import a, next, b
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_funcdef_1(self):
+ s = """
+ def next(a):
+ pass
+
+ class A:
+ def next(self, a, b):
+ pass
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_funcdef_2(self):
+ b = """
+ def next(a):
+ pass
+
+ class A:
+ def next(self):
+ pass
+
+ it.next()
+ """
+ a = """
+ def next(a):
+ pass
+
+ class A:
+ def __next__(self):
+ pass
+
+ it.__next__()
+ """
+ self.warns(b, a, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_global_1(self):
+ s = """
+ def f():
+ global next
+ next = 5
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_global_2(self):
+ s = """
+ def f():
+ global a, next, b
+ next = 5
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_for_simple(self):
+ s = """
+ for next in it():
+ pass
+
+ b = 5
+ c = 6
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_for_tuple_1(self):
+ s = """
+ for next, b in it():
+ pass
+
+ b = 5
+ c = 6
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_shadowing_for_tuple_2(self):
+ s = """
+ for a, (next, c), b in it():
+ pass
+
+ b = 5
+ c = 6
+ """
+ self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+ def test_noncall_access_1(self):
+ b = """gnext = g.next"""
+ a = """gnext = g.__next__"""
+ self.check(b, a)
+
+ def test_noncall_access_2(self):
+ b = """f(g.next + 5)"""
+ a = """f(g.__next__ + 5)"""
+ self.check(b, a)
+
+ def test_noncall_access_3(self):
+ b = """f(g().next + 5)"""
+ a = """f(g().__next__ + 5)"""
+ self.check(b, a)
+
+class Test_nonzero(FixerTestCase):
+ fixer = "nonzero"
+
+ def test_1(self):
+ b = """
+ class A:
+ def __nonzero__(self):
+ pass
+ """
+ a = """
+ class A:
+ def __bool__(self):
+ pass
+ """
+ self.check(b, a)
+
+ def test_2(self):
+ b = """
+ class A(object):
+ def __nonzero__(self):
+ pass
+ """
+ a = """
+ class A(object):
+ def __bool__(self):
+ pass
+ """
+ self.check(b, a)
+
+ def test_unchanged_1(self):
+ s = """
+ class A(object):
+ def __bool__(self):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """
+ class A(object):
+ def __nonzero__(self, a):
+ pass
+ """
+ self.unchanged(s)
+
+ def test_unchanged_func(self):
+ s = """
+ def __nonzero__(self):
+ pass
+ """
+ self.unchanged(s)
+
+class Test_numliterals(FixerTestCase):
+ fixer = "numliterals"
+
+ def test_octal_1(self):
+ b = """0755"""
+ a = """0o755"""
+ self.check(b, a)
+
+ def test_long_int_1(self):
+ b = """a = 12L"""
+ a = """a = 12"""
+ self.check(b, a)
+
+ def test_long_int_2(self):
+ b = """a = 12l"""
+ a = """a = 12"""
+ self.check(b, a)
+
+ def test_long_hex(self):
+ b = """b = 0x12l"""
+ a = """b = 0x12"""
+ self.check(b, a)
+
+ def test_comments_and_spacing(self):
+ b = """b = 0x12L"""
+ a = """b = 0x12"""
+ self.check(b, a)
+
+ b = """b = 0755 # spam"""
+ a = """b = 0o755 # spam"""
+ self.check(b, a)
+
+ def test_unchanged_int(self):
+ s = """5"""
+ self.unchanged(s)
+
+ def test_unchanged_float(self):
+ s = """5.0"""
+ self.unchanged(s)
+
+ def test_unchanged_octal(self):
+ s = """0o755"""
+ self.unchanged(s)
+
+ def test_unchanged_hex(self):
+ s = """0xABC"""
+ self.unchanged(s)
+
+ def test_unchanged_exp(self):
+ s = """5.0e10"""
+ self.unchanged(s)
+
+ def test_unchanged_complex_int(self):
+ s = """5 + 4j"""
+ self.unchanged(s)
+
+ def test_unchanged_complex_float(self):
+ s = """5.4 + 4.9j"""
+ self.unchanged(s)
+
+ def test_unchanged_complex_bare(self):
+ s = """4j"""
+ self.unchanged(s)
+ s = """4.4j"""
+ self.unchanged(s)
+
+class Test_renames(FixerTestCase):
+ fixer = "renames"
+
+ modules = {"sys": ("maxint", "maxsize"),
+ }
+
+ def test_import_from(self):
+ for mod, (old, new) in self.modules.items():
+ b = "from %s import %s" % (mod, old)
+ a = "from %s import %s" % (mod, new)
+ self.check(b, a)
+
+ s = "from foo import %s" % old
+ self.unchanged(s)
+
+ def test_import_from_as(self):
+ for mod, (old, new) in self.modules.items():
+ b = "from %s import %s as foo_bar" % (mod, old)
+ a = "from %s import %s as foo_bar" % (mod, new)
+ self.check(b, a)
+
+ def test_import_module_usage(self):
+ for mod, (old, new) in self.modules.items():
+ b = """
+ import %s
+ foo(%s, %s.%s)
+ """ % (mod, mod, mod, old)
+ a = """
+ import %s
+ foo(%s, %s.%s)
+ """ % (mod, mod, mod, new)
+ self.check(b, a)
+
+ def XXX_test_from_import_usage(self):
+ # not implemented yet
+ for mod, (old, new) in self.modules.items():
+ b = """
+ from %s import %s
+ foo(%s, %s)
+ """ % (mod, old, mod, old)
+ a = """
+ from %s import %s
+ foo(%s, %s)
+ """ % (mod, new, mod, new)
+ self.check(b, a)
+
+class Test_unicode(FixerTestCase):
+ fixer = "unicode"
+
+ def test_whitespace(self):
+ b = """unicode( x)"""
+ a = """str( x)"""
+ self.check(b, a)
+
+ b = """ unicode(x )"""
+ a = """ str(x )"""
+ self.check(b, a)
+
+ b = """ u'h'"""
+ a = """ 'h'"""
+ self.check(b, a)
+
+ def test_unicode_call(self):
+ b = """unicode(x, y, z)"""
+ a = """str(x, y, z)"""
+ self.check(b, a)
+
+ def test_unichr(self):
+ b = """unichr(u'h')"""
+ a = """chr('h')"""
+ self.check(b, a)
+
+ def test_unicode_literal_1(self):
+ b = '''u"x"'''
+ a = '''"x"'''
+ self.check(b, a)
+
+ def test_unicode_literal_2(self):
+ b = """ur'x'"""
+ a = """r'x'"""
+ self.check(b, a)
+
+ def test_unicode_literal_3(self):
+ b = """UR'''x''' """
+ a = """R'''x''' """
+ self.check(b, a)
+
+ def test_native_literal_escape_u(self):
+ b = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\\\u20ac\\\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_bytes_literal_escape_u(self):
+ b = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_unicode_literal_escape_u(self):
+ b = """u'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = """ur'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ def test_native_unicode_literal_escape_u(self):
+ f = 'from __future__ import unicode_literals\n'
+ b = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+ b = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ a = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
+ self.check(b, a)
+
+
+class Test_filter(FixerTestCase):
+ fixer = "filter"
+
+ def test_prefix_preservation(self):
+ b = """x = filter( foo, 'abc' )"""
+ a = """x = list(filter( foo, 'abc' ))"""
+ self.check(b, a)
+
+ b = """x = filter( None , 'abc' )"""
+ a = """x = [_f for _f in 'abc' if _f]"""
+ self.check(b, a)
+
+ def test_filter_basic(self):
+ b = """x = filter(None, 'abc')"""
+ a = """x = [_f for _f in 'abc' if _f]"""
+ self.check(b, a)
+
+ b = """x = len(filter(f, 'abc'))"""
+ a = """x = len(list(filter(f, 'abc')))"""
+ self.check(b, a)
+
+ b = """x = filter(lambda x: x%2 == 0, range(10))"""
+ a = """x = [x for x in range(10) if x%2 == 0]"""
+ self.check(b, a)
+
+ # Note the parens around x
+ b = """x = filter(lambda (x): x%2 == 0, range(10))"""
+ a = """x = [x for x in range(10) if x%2 == 0]"""
+ self.check(b, a)
+
+ # XXX This (rare) case is not supported
+## b = """x = filter(f, 'abc')[0]"""
+## a = """x = list(filter(f, 'abc'))[0]"""
+## self.check(b, a)
+
+ def test_filter_nochange(self):
+ a = """b.join(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """(a + foo(5)).join(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """iter(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """list(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """list(filter(f, 'abc'))[0]"""
+ self.unchanged(a)
+ a = """set(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """set(filter(f, 'abc')).pop()"""
+ self.unchanged(a)
+ a = """tuple(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """any(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """all(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sum(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sorted(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sorted(filter(f, 'abc'), key=blah)"""
+ self.unchanged(a)
+ a = """sorted(filter(f, 'abc'), key=blah)[0]"""
+ self.unchanged(a)
+ a = """enumerate(filter(f, 'abc'))"""
+ self.unchanged(a)
+ a = """enumerate(filter(f, 'abc'), start=1)"""
+ self.unchanged(a)
+ a = """for i in filter(f, 'abc'): pass"""
+ self.unchanged(a)
+ a = """[x for x in filter(f, 'abc')]"""
+ self.unchanged(a)
+ a = """(x for x in filter(f, 'abc'))"""
+ self.unchanged(a)
+
+ def test_future_builtins(self):
+ a = "from future_builtins import spam, filter; filter(f, 'ham')"
+ self.unchanged(a)
+
+ b = """from future_builtins import spam; x = filter(f, 'abc')"""
+ a = """from future_builtins import spam; x = list(filter(f, 'abc'))"""
+ self.check(b, a)
+
+ a = "from future_builtins import *; filter(f, 'ham')"
+ self.unchanged(a)
+
+class Test_map(FixerTestCase):
+ fixer = "map"
+
+ def check(self, b, a):
+ self.unchanged("from future_builtins import map; " + b, a)
+ super(Test_map, self).check(b, a)
+
+ def test_prefix_preservation(self):
+ b = """x = map( f, 'abc' )"""
+ a = """x = list(map( f, 'abc' ))"""
+ self.check(b, a)
+
+ def test_trailing_comment(self):
+ b = """x = map(f, 'abc') # foo"""
+ a = """x = list(map(f, 'abc')) # foo"""
+ self.check(b, a)
+
+ def test_None_with_multiple_arguments(self):
+ s = """x = map(None, a, b, c)"""
+ self.warns_unchanged(s, "cannot convert map(None, ...) with "
+ "multiple arguments")
+
+ def test_map_basic(self):
+ b = """x = map(f, 'abc')"""
+ a = """x = list(map(f, 'abc'))"""
+ self.check(b, a)
+
+ b = """x = len(map(f, 'abc', 'def'))"""
+ a = """x = len(list(map(f, 'abc', 'def')))"""
+ self.check(b, a)
+
+ b = """x = map(None, 'abc')"""
+ a = """x = list('abc')"""
+ self.check(b, a)
+
+ b = """x = map(lambda x: x+1, range(4))"""
+ a = """x = [x+1 for x in range(4)]"""
+ self.check(b, a)
+
+ # Note the parens around x
+ b = """x = map(lambda (x): x+1, range(4))"""
+ a = """x = [x+1 for x in range(4)]"""
+ self.check(b, a)
+
+ b = """
+ foo()
+ # foo
+ map(f, x)
+ """
+ a = """
+ foo()
+ # foo
+ list(map(f, x))
+ """
+ self.warns(b, a, "You should use a for loop here")
+
+ # XXX This (rare) case is not supported
+## b = """x = map(f, 'abc')[0]"""
+## a = """x = list(map(f, 'abc'))[0]"""
+## self.check(b, a)
+
+ def test_map_nochange(self):
+ a = """b.join(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """(a + foo(5)).join(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """iter(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """list(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """list(map(f, 'abc'))[0]"""
+ self.unchanged(a)
+ a = """set(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """set(map(f, 'abc')).pop()"""
+ self.unchanged(a)
+ a = """tuple(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """any(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """all(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sum(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sorted(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """sorted(map(f, 'abc'), key=blah)"""
+ self.unchanged(a)
+ a = """sorted(map(f, 'abc'), key=blah)[0]"""
+ self.unchanged(a)
+ a = """enumerate(map(f, 'abc'))"""
+ self.unchanged(a)
+ a = """enumerate(map(f, 'abc'), start=1)"""
+ self.unchanged(a)
+ a = """for i in map(f, 'abc'): pass"""
+ self.unchanged(a)
+ a = """[x for x in map(f, 'abc')]"""
+ self.unchanged(a)
+ a = """(x for x in map(f, 'abc'))"""
+ self.unchanged(a)
+
+ def test_future_builtins(self):
+ a = "from future_builtins import spam, map, eggs; map(f, 'ham')"
+ self.unchanged(a)
+
+ b = """from future_builtins import spam, eggs; x = map(f, 'abc')"""
+ a = """from future_builtins import spam, eggs; x = list(map(f, 'abc'))"""
+ self.check(b, a)
+
+ a = "from future_builtins import *; map(f, 'ham')"
+ self.unchanged(a)
+
+class Test_zip(FixerTestCase):
+ fixer = "zip"
+
+ def check(self, b, a):
+ self.unchanged("from future_builtins import zip; " + b, a)
+ super(Test_zip, self).check(b, a)
+
+ def test_zip_basic(self):
+ b = """x = zip(a, b, c)"""
+ a = """x = list(zip(a, b, c))"""
+ self.check(b, a)
+
+ b = """x = len(zip(a, b))"""
+ a = """x = len(list(zip(a, b)))"""
+ self.check(b, a)
+
+ def test_zip_nochange(self):
+ a = """b.join(zip(a, b))"""
+ self.unchanged(a)
+ a = """(a + foo(5)).join(zip(a, b))"""
+ self.unchanged(a)
+ a = """iter(zip(a, b))"""
+ self.unchanged(a)
+ a = """list(zip(a, b))"""
+ self.unchanged(a)
+ a = """list(zip(a, b))[0]"""
+ self.unchanged(a)
+ a = """set(zip(a, b))"""
+ self.unchanged(a)
+ a = """set(zip(a, b)).pop()"""
+ self.unchanged(a)
+ a = """tuple(zip(a, b))"""
+ self.unchanged(a)
+ a = """any(zip(a, b))"""
+ self.unchanged(a)
+ a = """all(zip(a, b))"""
+ self.unchanged(a)
+ a = """sum(zip(a, b))"""
+ self.unchanged(a)
+ a = """sorted(zip(a, b))"""
+ self.unchanged(a)
+ a = """sorted(zip(a, b), key=blah)"""
+ self.unchanged(a)
+ a = """sorted(zip(a, b), key=blah)[0]"""
+ self.unchanged(a)
+ a = """enumerate(zip(a, b))"""
+ self.unchanged(a)
+ a = """enumerate(zip(a, b), start=1)"""
+ self.unchanged(a)
+ a = """for i in zip(a, b): pass"""
+ self.unchanged(a)
+ a = """[x for x in zip(a, b)]"""
+ self.unchanged(a)
+ a = """(x for x in zip(a, b))"""
+ self.unchanged(a)
+
+ def test_future_builtins(self):
+ a = "from future_builtins import spam, zip, eggs; zip(a, b)"
+ self.unchanged(a)
+
+ b = """from future_builtins import spam, eggs; x = zip(a, b)"""
+ a = """from future_builtins import spam, eggs; x = list(zip(a, b))"""
+ self.check(b, a)
+
+ a = "from future_builtins import *; zip(a, b)"
+ self.unchanged(a)
+
+class Test_standarderror(FixerTestCase):
+ fixer = "standarderror"
+
+ def test(self):
+ b = """x = StandardError()"""
+ a = """x = Exception()"""
+ self.check(b, a)
+
+ b = """x = StandardError(a, b, c)"""
+ a = """x = Exception(a, b, c)"""
+ self.check(b, a)
+
+ b = """f(2 + StandardError(a, b, c))"""
+ a = """f(2 + Exception(a, b, c))"""
+ self.check(b, a)
+
+class Test_types(FixerTestCase):
+ fixer = "types"
+
+ def test_basic_types_convert(self):
+ b = """types.StringType"""
+ a = """bytes"""
+ self.check(b, a)
+
+ b = """types.DictType"""
+ a = """dict"""
+ self.check(b, a)
+
+ b = """types . IntType"""
+ a = """int"""
+ self.check(b, a)
+
+ b = """types.ListType"""
+ a = """list"""
+ self.check(b, a)
+
+ b = """types.LongType"""
+ a = """int"""
+ self.check(b, a)
+
+ b = """types.NoneType"""
+ a = """type(None)"""
+ self.check(b, a)
+
+ b = "types.StringTypes"
+ a = "(str,)"
+ self.check(b, a)
+
+class Test_idioms(FixerTestCase):
+ fixer = "idioms"
+
+ def test_while(self):
+ b = """while 1: foo()"""
+ a = """while True: foo()"""
+ self.check(b, a)
+
+ b = """while 1: foo()"""
+ a = """while True: foo()"""
+ self.check(b, a)
+
+ b = """
+ while 1:
+ foo()
+ """
+ a = """
+ while True:
+ foo()
+ """
+ self.check(b, a)
+
+ def test_while_unchanged(self):
+ s = """while 11: foo()"""
+ self.unchanged(s)
+
+ s = """while 0: foo()"""
+ self.unchanged(s)
+
+ s = """while foo(): foo()"""
+ self.unchanged(s)
+
+ s = """while []: foo()"""
+ self.unchanged(s)
+
+ def test_eq_simple(self):
+ b = """type(x) == T"""
+ a = """isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if type(x) == T: pass"""
+ a = """if isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_eq_reverse(self):
+ b = """T == type(x)"""
+ a = """isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if T == type(x): pass"""
+ a = """if isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_eq_expression(self):
+ b = """type(x+y) == d.get('T')"""
+ a = """isinstance(x+y, d.get('T'))"""
+ self.check(b, a)
+
+ b = """type( x + y) == d.get('T')"""
+ a = """isinstance(x + y, d.get('T'))"""
+ self.check(b, a)
+
+ def test_is_simple(self):
+ b = """type(x) is T"""
+ a = """isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if type(x) is T: pass"""
+ a = """if isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_is_reverse(self):
+ b = """T is type(x)"""
+ a = """isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if T is type(x): pass"""
+ a = """if isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_is_expression(self):
+ b = """type(x+y) is d.get('T')"""
+ a = """isinstance(x+y, d.get('T'))"""
+ self.check(b, a)
+
+ b = """type( x + y) is d.get('T')"""
+ a = """isinstance(x + y, d.get('T'))"""
+ self.check(b, a)
+
+ def test_is_not_simple(self):
+ b = """type(x) is not T"""
+ a = """not isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if type(x) is not T: pass"""
+ a = """if not isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_is_not_reverse(self):
+ b = """T is not type(x)"""
+ a = """not isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if T is not type(x): pass"""
+ a = """if not isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_is_not_expression(self):
+ b = """type(x+y) is not d.get('T')"""
+ a = """not isinstance(x+y, d.get('T'))"""
+ self.check(b, a)
+
+ b = """type( x + y) is not d.get('T')"""
+ a = """not isinstance(x + y, d.get('T'))"""
+ self.check(b, a)
+
+ def test_ne_simple(self):
+ b = """type(x) != T"""
+ a = """not isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if type(x) != T: pass"""
+ a = """if not isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_ne_reverse(self):
+ b = """T != type(x)"""
+ a = """not isinstance(x, T)"""
+ self.check(b, a)
+
+ b = """if T != type(x): pass"""
+ a = """if not isinstance(x, T): pass"""
+ self.check(b, a)
+
+ def test_ne_expression(self):
+ b = """type(x+y) != d.get('T')"""
+ a = """not isinstance(x+y, d.get('T'))"""
+ self.check(b, a)
+
+ b = """type( x + y) != d.get('T')"""
+ a = """not isinstance(x + y, d.get('T'))"""
+ self.check(b, a)
+
+ def test_type_unchanged(self):
+ a = """type(x).__name__"""
+ self.unchanged(a)
+
+ def test_sort_list_call(self):
+ b = """
+ v = list(t)
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = list(foo(b) + d)
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(foo(b) + d)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ while x:
+ v = list(t)
+ v.sort()
+ foo(v)
+ """
+ a = """
+ while x:
+ v = sorted(t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = list(t)
+ # foo
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(t)
+ # foo
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = r"""
+ v = list( t)
+ v.sort()
+ foo(v)
+ """
+ a = r"""
+ v = sorted( t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = r"""
+ try:
+ m = list(s)
+ m.sort()
+ except: pass
+ """
+
+ a = r"""
+ try:
+ m = sorted(s)
+ except: pass
+ """
+ self.check(b, a)
+
+ b = r"""
+ try:
+ m = list(s)
+ # foo
+ m.sort()
+ except: pass
+ """
+
+ a = r"""
+ try:
+ m = sorted(s)
+ # foo
+ except: pass
+ """
+ self.check(b, a)
+
+ b = r"""
+ m = list(s)
+ # more comments
+ m.sort()"""
+
+ a = r"""
+ m = sorted(s)
+ # more comments"""
+ self.check(b, a)
+
+ def test_sort_simple_expr(self):
+ b = """
+ v = t
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = foo(b)
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(foo(b))
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = b.keys()
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(b.keys())
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = foo(b) + d
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(foo(b) + d)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ while x:
+ v = t
+ v.sort()
+ foo(v)
+ """
+ a = """
+ while x:
+ v = sorted(t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = """
+ v = t
+ # foo
+ v.sort()
+ foo(v)
+ """
+ a = """
+ v = sorted(t)
+ # foo
+ foo(v)
+ """
+ self.check(b, a)
+
+ b = r"""
+ v = t
+ v.sort()
+ foo(v)
+ """
+ a = r"""
+ v = sorted(t)
+ foo(v)
+ """
+ self.check(b, a)
+
+ def test_sort_unchanged(self):
+ s = """
+ v = list(t)
+ w.sort()
+ foo(w)
+ """
+ self.unchanged(s)
+
+ s = """
+ v = list(t)
+ v.sort(u)
+ foo(v)
+ """
+ self.unchanged(s)
+
+class Test_basestring(FixerTestCase):
+ fixer = "basestring"
+
+ def test_basestring(self):
+ b = """isinstance(x, basestring)"""
+ a = """isinstance(x, str)"""
+ self.check(b, a)
+
+class Test_buffer(FixerTestCase):
+ fixer = "buffer"
+
+ def test_buffer(self):
+ b = """x = buffer(y)"""
+ a = """x = memoryview(y)"""
+ self.check(b, a)
+
+ def test_slicing(self):
+ b = """buffer(y)[4:5]"""
+ a = """memoryview(y)[4:5]"""
+ self.check(b, a)
+
+class Test_future(FixerTestCase):
+ fixer = "future"
+
+ def test_future(self):
+ b = """from __future__ import braces"""
+ a = """"""
+ self.check(b, a)
+
+ b = """# comment\nfrom __future__ import braces"""
+ a = """# comment\n"""
+ self.check(b, a)
+
+ b = """from __future__ import braces\n# comment"""
+ a = """\n# comment"""
+ self.check(b, a)
+
+ def test_run_order(self):
+ self.assert_runs_after('print')
+
+class Test_itertools(FixerTestCase):
+ fixer = "itertools"
+
+ def checkall(self, before, after):
+ # Because we need to check with and without the itertools prefix
+ # and on each of the three functions, these loops make it all
+ # much easier
+ for i in ('itertools.', ''):
+ for f in ('map', 'filter', 'zip'):
+ b = before %(i+'i'+f)
+ a = after %(f)
+ self.check(b, a)
+
+ def test_0(self):
+ # A simple example -- test_1 covers exactly the same thing,
+ # but it's not quite as clear.
+ b = "itertools.izip(a, b)"
+ a = "zip(a, b)"
+ self.check(b, a)
+
+ def test_1(self):
+ b = """%s(f, a)"""
+ a = """%s(f, a)"""
+ self.checkall(b, a)
+
+ def test_qualified(self):
+ b = """itertools.ifilterfalse(a, b)"""
+ a = """itertools.filterfalse(a, b)"""
+ self.check(b, a)
+
+ b = """itertools.izip_longest(a, b)"""
+ a = """itertools.zip_longest(a, b)"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """ifilterfalse(a, b)"""
+ a = """filterfalse(a, b)"""
+ self.check(b, a)
+
+ b = """izip_longest(a, b)"""
+ a = """zip_longest(a, b)"""
+ self.check(b, a)
+
+ def test_space_1(self):
+ b = """ %s(f, a)"""
+ a = """ %s(f, a)"""
+ self.checkall(b, a)
+
+ def test_space_2(self):
+ b = """ itertools.ifilterfalse(a, b)"""
+ a = """ itertools.filterfalse(a, b)"""
+ self.check(b, a)
+
+ b = """ itertools.izip_longest(a, b)"""
+ a = """ itertools.zip_longest(a, b)"""
+ self.check(b, a)
+
+ def test_run_order(self):
+ self.assert_runs_after('map', 'zip', 'filter')
+
+
+class Test_itertools_imports(FixerTestCase):
+ fixer = 'itertools_imports'
+
+ def test_reduced(self):
+ b = "from itertools import imap, izip, foo"
+ a = "from itertools import foo"
+ self.check(b, a)
+
+ b = "from itertools import bar, imap, izip, foo"
+ a = "from itertools import bar, foo"
+ self.check(b, a)
+
+ b = "from itertools import chain, imap, izip"
+ a = "from itertools import chain"
+ self.check(b, a)
+
+ def test_comments(self):
+ b = "#foo\nfrom itertools import imap, izip"
+ a = "#foo\n"
+ self.check(b, a)
+
+ def test_none(self):
+ b = "from itertools import imap, izip"
+ a = ""
+ self.check(b, a)
+
+ b = "from itertools import izip"
+ a = ""
+ self.check(b, a)
+
+ def test_import_as(self):
+ b = "from itertools import izip, bar as bang, imap"
+ a = "from itertools import bar as bang"
+ self.check(b, a)
+
+ b = "from itertools import izip as _zip, imap, bar"
+ a = "from itertools import bar"
+ self.check(b, a)
+
+ b = "from itertools import imap as _map"
+ a = ""
+ self.check(b, a)
+
+ b = "from itertools import imap as _map, izip as _zip"
+ a = ""
+ self.check(b, a)
+
+ s = "from itertools import bar as bang"
+ self.unchanged(s)
+
+ def test_ifilter_and_zip_longest(self):
+ for name in "filterfalse", "zip_longest":
+ b = "from itertools import i%s" % (name,)
+ a = "from itertools import %s" % (name,)
+ self.check(b, a)
+
+ b = "from itertools import imap, i%s, foo" % (name,)
+ a = "from itertools import %s, foo" % (name,)
+ self.check(b, a)
+
+ b = "from itertools import bar, i%s, foo" % (name,)
+ a = "from itertools import bar, %s, foo" % (name,)
+ self.check(b, a)
+
+ def test_import_star(self):
+ s = "from itertools import *"
+ self.unchanged(s)
+
+
+ def test_unchanged(self):
+ s = "from itertools import foo"
+ self.unchanged(s)
+
+
+class Test_import(FixerTestCase):
+ fixer = "import"
+
+ def setUp(self):
+ super(Test_import, self).setUp()
+ # Need to replace fix_import's exists method
+ # so we can check that it's doing the right thing
+ self.files_checked = []
+ self.present_files = set()
+ self.always_exists = True
+ def fake_exists(name):
+ self.files_checked.append(name)
+ return self.always_exists or (name in self.present_files)
+
+ from lib2to3.fixes import fix_import
+ fix_import.exists = fake_exists
+
+ def tearDown(self):
+ from lib2to3.fixes import fix_import
+ fix_import.exists = os.path.exists
+
+ def check_both(self, b, a):
+ self.always_exists = True
+ super(Test_import, self).check(b, a)
+ self.always_exists = False
+ super(Test_import, self).unchanged(b)
+
+ def test_files_checked(self):
+ def p(path):
+ # Takes a unix path and returns a path with correct separators
+ return os.path.pathsep.join(path.split("/"))
+
+ self.always_exists = False
+ self.present_files = set(['__init__.py'])
+ expected_extensions = ('.py', os.path.sep, '.pyc', '.so', '.sl', '.pyd')
+ names_to_test = (p("/spam/eggs.py"), "ni.py", p("../../shrubbery.py"))
+
+ for name in names_to_test:
+ self.files_checked = []
+ self.filename = name
+ self.unchanged("import jam")
+
+ if os.path.dirname(name):
+ name = os.path.dirname(name) + '/jam'
+ else:
+ name = 'jam'
+ expected_checks = set(name + ext for ext in expected_extensions)
+ expected_checks.add("__init__.py")
+
+ self.assertEqual(set(self.files_checked), expected_checks)
+
+ def test_not_in_package(self):
+ s = "import bar"
+ self.always_exists = False
+ self.present_files = set(["bar.py"])
+ self.unchanged(s)
+
+ def test_with_absolute_import_enabled(self):
+ s = "from __future__ import absolute_import\nimport bar"
+ self.always_exists = False
+ self.present_files = set(["__init__.py", "bar.py"])
+ self.unchanged(s)
+
+ def test_in_package(self):
+ b = "import bar"
+ a = "from . import bar"
+ self.always_exists = False
+ self.present_files = set(["__init__.py", "bar.py"])
+ self.check(b, a)
+
+ def test_import_from_package(self):
+ b = "import bar"
+ a = "from . import bar"
+ self.always_exists = False
+ self.present_files = set(["__init__.py", "bar" + os.path.sep])
+ self.check(b, a)
+
+ def test_already_relative_import(self):
+ s = "from . import bar"
+ self.unchanged(s)
+
+ def test_comments_and_indent(self):
+ b = "import bar # Foo"
+ a = "from . import bar # Foo"
+ self.check(b, a)
+
+ def test_from(self):
+ b = "from foo import bar, baz"
+ a = "from .foo import bar, baz"
+ self.check_both(b, a)
+
+ b = "from foo import bar"
+ a = "from .foo import bar"
+ self.check_both(b, a)
+
+ b = "from foo import (bar, baz)"
+ a = "from .foo import (bar, baz)"
+ self.check_both(b, a)
+
+ def test_dotted_from(self):
+ b = "from green.eggs import ham"
+ a = "from .green.eggs import ham"
+ self.check_both(b, a)
+
+ def test_from_as(self):
+ b = "from green.eggs import ham as spam"
+ a = "from .green.eggs import ham as spam"
+ self.check_both(b, a)
+
+ def test_import(self):
+ b = "import foo"
+ a = "from . import foo"
+ self.check_both(b, a)
+
+ b = "import foo, bar"
+ a = "from . import foo, bar"
+ self.check_both(b, a)
+
+ b = "import foo, bar, x"
+ a = "from . import foo, bar, x"
+ self.check_both(b, a)
+
+ b = "import x, y, z"
+ a = "from . import x, y, z"
+ self.check_both(b, a)
+
+ def test_import_as(self):
+ b = "import foo as x"
+ a = "from . import foo as x"
+ self.check_both(b, a)
+
+ b = "import a as b, b as c, c as d"
+ a = "from . import a as b, b as c, c as d"
+ self.check_both(b, a)
+
+ def test_local_and_absolute(self):
+ self.always_exists = False
+ self.present_files = set(["foo.py", "__init__.py"])
+
+ s = "import foo, bar"
+ self.warns_unchanged(s, "absolute and local imports together")
+
+ def test_dotted_import(self):
+ b = "import foo.bar"
+ a = "from . import foo.bar"
+ self.check_both(b, a)
+
+ def test_dotted_import_as(self):
+ b = "import foo.bar as bang"
+ a = "from . import foo.bar as bang"
+ self.check_both(b, a)
+
+ def test_prefix(self):
+ b = """
+ # prefix
+ import foo.bar
+ """
+ a = """
+ # prefix
+ from . import foo.bar
+ """
+ self.check_both(b, a)
+
+
+class Test_set_literal(FixerTestCase):
+
+ fixer = "set_literal"
+
+ def test_basic(self):
+ b = """set([1, 2, 3])"""
+ a = """{1, 2, 3}"""
+ self.check(b, a)
+
+ b = """set((1, 2, 3))"""
+ a = """{1, 2, 3}"""
+ self.check(b, a)
+
+ b = """set((1,))"""
+ a = """{1}"""
+ self.check(b, a)
+
+ b = """set([1])"""
+ self.check(b, a)
+
+ b = """set((a, b))"""
+ a = """{a, b}"""
+ self.check(b, a)
+
+ b = """set([a, b])"""
+ self.check(b, a)
+
+ b = """set((a*234, f(args=23)))"""
+ a = """{a*234, f(args=23)}"""
+ self.check(b, a)
+
+ b = """set([a*23, f(23)])"""
+ a = """{a*23, f(23)}"""
+ self.check(b, a)
+
+ b = """set([a-234**23])"""
+ a = """{a-234**23}"""
+ self.check(b, a)
+
+ def test_listcomps(self):
+ b = """set([x for x in y])"""
+ a = """{x for x in y}"""
+ self.check(b, a)
+
+ b = """set([x for x in y if x == m])"""
+ a = """{x for x in y if x == m}"""
+ self.check(b, a)
+
+ b = """set([x for x in y for a in b])"""
+ a = """{x for x in y for a in b}"""
+ self.check(b, a)
+
+ b = """set([f(x) - 23 for x in y])"""
+ a = """{f(x) - 23 for x in y}"""
+ self.check(b, a)
+
+ def test_whitespace(self):
+ b = """set( [1, 2])"""
+ a = """{1, 2}"""
+ self.check(b, a)
+
+ b = """set([1 , 2])"""
+ a = """{1 , 2}"""
+ self.check(b, a)
+
+ b = """set([ 1 ])"""
+ a = """{ 1 }"""
+ self.check(b, a)
+
+ b = """set( [1] )"""
+ a = """{1}"""
+ self.check(b, a)
+
+ b = """set([ 1, 2 ])"""
+ a = """{ 1, 2 }"""
+ self.check(b, a)
+
+ b = """set([x for x in y ])"""
+ a = """{x for x in y }"""
+ self.check(b, a)
+
+ b = """set(
+ [1, 2]
+ )
+ """
+ a = """{1, 2}\n"""
+ self.check(b, a)
+
+ def test_comments(self):
+ b = """set((1, 2)) # Hi"""
+ a = """{1, 2} # Hi"""
+ self.check(b, a)
+
+ # This isn't optimal behavior, but the fixer is optional.
+ b = """
+ # Foo
+ set( # Bar
+ (1, 2)
+ )
+ """
+ a = """
+ # Foo
+ {1, 2}
+ """
+ self.check(b, a)
+
+ def test_unchanged(self):
+ s = """set()"""
+ self.unchanged(s)
+
+ s = """set(a)"""
+ self.unchanged(s)
+
+ s = """set(a, b, c)"""
+ self.unchanged(s)
+
+ # Don't transform generators because they might have to be lazy.
+ s = """set(x for x in y)"""
+ self.unchanged(s)
+
+ s = """set(x for x in y if z)"""
+ self.unchanged(s)
+
+ s = """set(a*823-23**2 + f(23))"""
+ self.unchanged(s)
+
+
+class Test_sys_exc(FixerTestCase):
+ fixer = "sys_exc"
+
+ def test_0(self):
+ b = "sys.exc_type"
+ a = "sys.exc_info()[0]"
+ self.check(b, a)
+
+ def test_1(self):
+ b = "sys.exc_value"
+ a = "sys.exc_info()[1]"
+ self.check(b, a)
+
+ def test_2(self):
+ b = "sys.exc_traceback"
+ a = "sys.exc_info()[2]"
+ self.check(b, a)
+
+ def test_3(self):
+ b = "sys.exc_type # Foo"
+ a = "sys.exc_info()[0] # Foo"
+ self.check(b, a)
+
+ def test_4(self):
+ b = "sys. exc_type"
+ a = "sys. exc_info()[0]"
+ self.check(b, a)
+
+ def test_5(self):
+ b = "sys .exc_type"
+ a = "sys .exc_info()[0]"
+ self.check(b, a)
+
+
+class Test_paren(FixerTestCase):
+ fixer = "paren"
+
+ def test_0(self):
+ b = """[i for i in 1, 2 ]"""
+ a = """[i for i in (1, 2) ]"""
+ self.check(b, a)
+
+ def test_1(self):
+ b = """[i for i in 1, 2, ]"""
+ a = """[i for i in (1, 2,) ]"""
+ self.check(b, a)
+
+ def test_2(self):
+ b = """[i for i in 1, 2 ]"""
+ a = """[i for i in (1, 2) ]"""
+ self.check(b, a)
+
+ def test_3(self):
+ b = """[i for i in 1, 2 if i]"""
+ a = """[i for i in (1, 2) if i]"""
+ self.check(b, a)
+
+ def test_4(self):
+ b = """[i for i in 1, 2 ]"""
+ a = """[i for i in (1, 2) ]"""
+ self.check(b, a)
+
+ def test_5(self):
+ b = """(i for i in 1, 2)"""
+ a = """(i for i in (1, 2))"""
+ self.check(b, a)
+
+ def test_6(self):
+ b = """(i for i in 1 ,2 if i)"""
+ a = """(i for i in (1 ,2) if i)"""
+ self.check(b, a)
+
+ def test_unchanged_0(self):
+ s = """[i for i in (1, 2)]"""
+ self.unchanged(s)
+
+ def test_unchanged_1(self):
+ s = """[i for i in foo()]"""
+ self.unchanged(s)
+
+ def test_unchanged_2(self):
+ s = """[i for i in (1, 2) if nothing]"""
+ self.unchanged(s)
+
+ def test_unchanged_3(self):
+ s = """(i for i in (1, 2))"""
+ self.unchanged(s)
+
+ def test_unchanged_4(self):
+ s = """[i for i in m]"""
+ self.unchanged(s)
+
+class Test_metaclass(FixerTestCase):
+
+ fixer = 'metaclass'
+
+ def test_unchanged(self):
+ self.unchanged("class X(): pass")
+ self.unchanged("class X(object): pass")
+ self.unchanged("class X(object1, object2): pass")
+ self.unchanged("class X(object1, object2, object3): pass")
+ self.unchanged("class X(metaclass=Meta): pass")
+ self.unchanged("class X(b, arg=23, metclass=Meta): pass")
+ self.unchanged("class X(b, arg=23, metaclass=Meta, other=42): pass")
+
+ s = """
+ class X:
+ def __metaclass__(self): pass
+ """
+ self.unchanged(s)
+
+ s = """
+ class X:
+ a[23] = 74
+ """
+ self.unchanged(s)
+
+ def test_comments(self):
+ b = """
+ class X:
+ # hi
+ __metaclass__ = AppleMeta
+ """
+ a = """
+ class X(metaclass=AppleMeta):
+ # hi
+ pass
+ """
+ self.check(b, a)
+
+ b = """
+ class X:
+ __metaclass__ = Meta
+ # Bedtime!
+ """
+ a = """
+ class X(metaclass=Meta):
+ pass
+ # Bedtime!
+ """
+ self.check(b, a)
+
+ def test_meta(self):
+ # no-parent class, odd body
+ b = """
+ class X():
+ __metaclass__ = Q
+ pass
+ """
+ a = """
+ class X(metaclass=Q):
+ pass
+ """
+ self.check(b, a)
+
+ # one parent class, no body
+ b = """class X(object): __metaclass__ = Q"""
+ a = """class X(object, metaclass=Q): pass"""
+ self.check(b, a)
+
+
+ # one parent, simple body
+ b = """
+ class X(object):
+ __metaclass__ = Meta
+ bar = 7
+ """
+ a = """
+ class X(object, metaclass=Meta):
+ bar = 7
+ """
+ self.check(b, a)
+
+ b = """
+ class X:
+ __metaclass__ = Meta; x = 4; g = 23
+ """
+ a = """
+ class X(metaclass=Meta):
+ x = 4; g = 23
+ """
+ self.check(b, a)
+
+ # one parent, simple body, __metaclass__ last
+ b = """
+ class X(object):
+ bar = 7
+ __metaclass__ = Meta
+ """
+ a = """
+ class X(object, metaclass=Meta):
+ bar = 7
+ """
+ self.check(b, a)
+
+ # redefining __metaclass__
+ b = """
+ class X():
+ __metaclass__ = A
+ __metaclass__ = B
+ bar = 7
+ """
+ a = """
+ class X(metaclass=B):
+ bar = 7
+ """
+ self.check(b, a)
+
+ # multiple inheritance, simple body
+ b = """
+ class X(clsA, clsB):
+ __metaclass__ = Meta
+ bar = 7
+ """
+ a = """
+ class X(clsA, clsB, metaclass=Meta):
+ bar = 7
+ """
+ self.check(b, a)
+
+ # keywords in the class statement
+ b = """class m(a, arg=23): __metaclass__ = Meta"""
+ a = """class m(a, arg=23, metaclass=Meta): pass"""
+ self.check(b, a)
+
+ b = """
+ class X(expression(2 + 4)):
+ __metaclass__ = Meta
+ """
+ a = """
+ class X(expression(2 + 4), metaclass=Meta):
+ pass
+ """
+ self.check(b, a)
+
+ b = """
+ class X(expression(2 + 4), x**4):
+ __metaclass__ = Meta
+ """
+ a = """
+ class X(expression(2 + 4), x**4, metaclass=Meta):
+ pass
+ """
+ self.check(b, a)
+
+ b = """
+ class X:
+ __metaclass__ = Meta
+ save.py = 23
+ """
+ a = """
+ class X(metaclass=Meta):
+ save.py = 23
+ """
+ self.check(b, a)
+
+
+class Test_getcwdu(FixerTestCase):
+
+ fixer = 'getcwdu'
+
+ def test_basic(self):
+ b = """os.getcwdu"""
+ a = """os.getcwd"""
+ self.check(b, a)
+
+ b = """os.getcwdu()"""
+ a = """os.getcwd()"""
+ self.check(b, a)
+
+ b = """meth = os.getcwdu"""
+ a = """meth = os.getcwd"""
+ self.check(b, a)
+
+ b = """os.getcwdu(args)"""
+ a = """os.getcwd(args)"""
+ self.check(b, a)
+
+ def test_comment(self):
+ b = """os.getcwdu() # Foo"""
+ a = """os.getcwd() # Foo"""
+ self.check(b, a)
+
+ def test_unchanged(self):
+ s = """os.getcwd()"""
+ self.unchanged(s)
+
+ s = """getcwdu()"""
+ self.unchanged(s)
+
+ s = """os.getcwdb()"""
+ self.unchanged(s)
+
+ def test_indentation(self):
+ b = """
+ if 1:
+ os.getcwdu()
+ """
+ a = """
+ if 1:
+ os.getcwd()
+ """
+ self.check(b, a)
+
+ def test_multilation(self):
+ b = """os .getcwdu()"""
+ a = """os .getcwd()"""
+ self.check(b, a)
+
+ b = """os. getcwdu"""
+ a = """os. getcwd"""
+ self.check(b, a)
+
+ b = """os.getcwdu ( )"""
+ a = """os.getcwd ( )"""
+ self.check(b, a)
+
+
+class Test_operator(FixerTestCase):
+
+ fixer = "operator"
+
+ def test_operator_isCallable(self):
+ b = "operator.isCallable(x)"
+ a = "hasattr(x, '__call__')"
+ self.check(b, a)
+
+ def test_operator_sequenceIncludes(self):
+ b = "operator.sequenceIncludes(x, y)"
+ a = "operator.contains(x, y)"
+ self.check(b, a)
+
+ b = "operator .sequenceIncludes(x, y)"
+ a = "operator .contains(x, y)"
+ self.check(b, a)
+
+ b = "operator. sequenceIncludes(x, y)"
+ a = "operator. contains(x, y)"
+ self.check(b, a)
+
+ def test_operator_isSequenceType(self):
+ b = "operator.isSequenceType(x)"
+ a = "import collections\nisinstance(x, collections.Sequence)"
+ self.check(b, a)
+
+ def test_operator_isMappingType(self):
+ b = "operator.isMappingType(x)"
+ a = "import collections\nisinstance(x, collections.Mapping)"
+ self.check(b, a)
+
+ def test_operator_isNumberType(self):
+ b = "operator.isNumberType(x)"
+ a = "import numbers\nisinstance(x, numbers.Number)"
+ self.check(b, a)
+
+ def test_operator_repeat(self):
+ b = "operator.repeat(x, n)"
+ a = "operator.mul(x, n)"
+ self.check(b, a)
+
+ b = "operator .repeat(x, n)"
+ a = "operator .mul(x, n)"
+ self.check(b, a)
+
+ b = "operator. repeat(x, n)"
+ a = "operator. mul(x, n)"
+ self.check(b, a)
+
+ def test_operator_irepeat(self):
+ b = "operator.irepeat(x, n)"
+ a = "operator.imul(x, n)"
+ self.check(b, a)
+
+ b = "operator .irepeat(x, n)"
+ a = "operator .imul(x, n)"
+ self.check(b, a)
+
+ b = "operator. irepeat(x, n)"
+ a = "operator. imul(x, n)"
+ self.check(b, a)
+
+ def test_bare_isCallable(self):
+ s = "isCallable(x)"
+ t = "You should use 'hasattr(x, '__call__')' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_sequenceIncludes(self):
+ s = "sequenceIncludes(x, y)"
+ t = "You should use 'operator.contains(x, y)' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_operator_isSequenceType(self):
+ s = "isSequenceType(z)"
+ t = "You should use 'isinstance(z, collections.Sequence)' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_operator_isMappingType(self):
+ s = "isMappingType(x)"
+ t = "You should use 'isinstance(x, collections.Mapping)' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_operator_isNumberType(self):
+ s = "isNumberType(y)"
+ t = "You should use 'isinstance(y, numbers.Number)' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_operator_repeat(self):
+ s = "repeat(x, n)"
+ t = "You should use 'operator.mul(x, n)' here."
+ self.warns_unchanged(s, t)
+
+ def test_bare_operator_irepeat(self):
+ s = "irepeat(y, 187)"
+ t = "You should use 'operator.imul(y, 187)' here."
+ self.warns_unchanged(s, t)
+
+
+class Test_exitfunc(FixerTestCase):
+
+ fixer = "exitfunc"
+
+ def test_simple(self):
+ b = """
+ import sys
+ sys.exitfunc = my_atexit
+ """
+ a = """
+ import sys
+ import atexit
+ atexit.register(my_atexit)
+ """
+ self.check(b, a)
+
+ def test_names_import(self):
+ b = """
+ import sys, crumbs
+ sys.exitfunc = my_func
+ """
+ a = """
+ import sys, crumbs, atexit
+ atexit.register(my_func)
+ """
+ self.check(b, a)
+
+ def test_complex_expression(self):
+ b = """
+ import sys
+ sys.exitfunc = do(d)/a()+complex(f=23, g=23)*expression
+ """
+ a = """
+ import sys
+ import atexit
+ atexit.register(do(d)/a()+complex(f=23, g=23)*expression)
+ """
+ self.check(b, a)
+
+ def test_comments(self):
+ b = """
+ import sys # Foo
+ sys.exitfunc = f # Blah
+ """
+ a = """
+ import sys
+ import atexit # Foo
+ atexit.register(f) # Blah
+ """
+ self.check(b, a)
+
+ b = """
+ import apples, sys, crumbs, larry # Pleasant comments
+ sys.exitfunc = func
+ """
+ a = """
+ import apples, sys, crumbs, larry, atexit # Pleasant comments
+ atexit.register(func)
+ """
+ self.check(b, a)
+
+ def test_in_a_function(self):
+ b = """
+ import sys
+ def f():
+ sys.exitfunc = func
+ """
+ a = """
+ import sys
+ import atexit
+ def f():
+ atexit.register(func)
+ """
+ self.check(b, a)
+
+ def test_no_sys_import(self):
+ b = """sys.exitfunc = f"""
+ a = """atexit.register(f)"""
+ msg = ("Can't find sys import; Please add an atexit import at the "
+ "top of your file.")
+ self.warns(b, a, msg)
+
+
+ def test_unchanged(self):
+ s = """f(sys.exitfunc)"""
+ self.unchanged(s)
+
+
+class Test_asserts(FixerTestCase):
+
+ fixer = "asserts"
+
+ def test_deprecated_names(self):
+ tests = [
+ ('self.assert_(True)', 'self.assertTrue(True)'),
+ ('self.assertEquals(2, 2)', 'self.assertEqual(2, 2)'),
+ ('self.assertNotEquals(2, 3)', 'self.assertNotEqual(2, 3)'),
+ ('self.assertAlmostEquals(2, 3)', 'self.assertAlmostEqual(2, 3)'),
+ ('self.assertNotAlmostEquals(2, 8)', 'self.assertNotAlmostEqual(2, 8)'),
+ ('self.failUnlessEqual(2, 2)', 'self.assertEqual(2, 2)'),
+ ('self.failIfEqual(2, 3)', 'self.assertNotEqual(2, 3)'),
+ ('self.failUnlessAlmostEqual(2, 3)', 'self.assertAlmostEqual(2, 3)'),
+ ('self.failIfAlmostEqual(2, 8)', 'self.assertNotAlmostEqual(2, 8)'),
+ ('self.failUnless(True)', 'self.assertTrue(True)'),
+ ('self.failUnlessRaises(foo)', 'self.assertRaises(foo)'),
+ ('self.failIf(False)', 'self.assertFalse(False)'),
+ ]
+ for b, a in tests:
+ self.check(b, a)
+
+ def test_variants(self):
+ b = 'eq = self.assertEquals'
+ a = 'eq = self.assertEqual'
+ self.check(b, a)
+ b = 'self.assertEquals(2, 3, msg="fail")'
+ a = 'self.assertEqual(2, 3, msg="fail")'
+ self.check(b, a)
+ b = 'self.assertEquals(2, 3, msg="fail") # foo'
+ a = 'self.assertEqual(2, 3, msg="fail") # foo'
+ self.check(b, a)
+ b = 'self.assertEquals (2, 3)'
+ a = 'self.assertEqual (2, 3)'
+ self.check(b, a)
+ b = ' self.assertEquals (2, 3)'
+ a = ' self.assertEqual (2, 3)'
+ self.check(b, a)
+ b = 'with self.failUnlessRaises(Explosion): explode()'
+ a = 'with self.assertRaises(Explosion): explode()'
+ self.check(b, a)
+ b = 'with self.failUnlessRaises(Explosion) as cm: explode()'
+ a = 'with self.assertRaises(Explosion) as cm: explode()'
+ self.check(b, a)
+
+ def test_unchanged(self):
+ self.unchanged('self.assertEqualsOnSaturday')
+ self.unchanged('self.assertEqualsOnSaturday(3, 5)')
diff --git a/lib/python2.7/lib2to3/tests/test_main.py b/lib/python2.7/lib2to3/tests/test_main.py
new file mode 100644
index 0000000..04131cf
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_main.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+import sys
+import codecs
+import logging
+import os
+import re
+import shutil
+import StringIO
+import sys
+import tempfile
+import unittest
+
+from lib2to3 import main
+
+
+TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+PY2_TEST_MODULE = os.path.join(TEST_DATA_DIR, "py2_test_grammar.py")
+
+
+class TestMain(unittest.TestCase):
+
+ if not hasattr(unittest.TestCase, 'assertNotRegex'):
+ # This method was only introduced in 3.2.
+ def assertNotRegex(self, text, regexp, msg=None):
+ import re
+ if not hasattr(regexp, 'search'):
+ regexp = re.compile(regexp)
+ if regexp.search(text):
+ self.fail("regexp %s MATCHED text %r" % (regexp.pattern, text))
+
+ def setUp(self):
+ self.temp_dir = None # tearDown() will rmtree this directory if set.
+
+ def tearDown(self):
+ # Clean up logging configuration down by main.
+ del logging.root.handlers[:]
+ if self.temp_dir:
+ shutil.rmtree(self.temp_dir)
+
+ def run_2to3_capture(self, args, in_capture, out_capture, err_capture):
+ save_stdin = sys.stdin
+ save_stdout = sys.stdout
+ save_stderr = sys.stderr
+ sys.stdin = in_capture
+ sys.stdout = out_capture
+ sys.stderr = err_capture
+ try:
+ return main.main("lib2to3.fixes", args)
+ finally:
+ sys.stdin = save_stdin
+ sys.stdout = save_stdout
+ sys.stderr = save_stderr
+
+ def test_unencodable_diff(self):
+ input_stream = StringIO.StringIO(u"print 'nothing'\nprint u'über'\n")
+ out = StringIO.StringIO()
+ out_enc = codecs.getwriter("ascii")(out)
+ err = StringIO.StringIO()
+ ret = self.run_2to3_capture(["-"], input_stream, out_enc, err)
+ self.assertEqual(ret, 0)
+ output = out.getvalue()
+ self.assertIn("-print 'nothing'", output)
+ self.assertIn("WARNING: couldn't encode <stdin>'s diff for "
+ "your terminal", err.getvalue())
+
+ def setup_test_source_trees(self):
+ """Setup a test source tree and output destination tree."""
+ self.temp_dir = tempfile.mkdtemp() # tearDown() cleans this up.
+ self.py2_src_dir = os.path.join(self.temp_dir, "python2_project")
+ self.py3_dest_dir = os.path.join(self.temp_dir, "python3_project")
+ os.mkdir(self.py2_src_dir)
+ os.mkdir(self.py3_dest_dir)
+ # Turn it into a package with a few files.
+ self.setup_files = []
+ open(os.path.join(self.py2_src_dir, "__init__.py"), "w").close()
+ self.setup_files.append("__init__.py")
+ shutil.copy(PY2_TEST_MODULE, self.py2_src_dir)
+ self.setup_files.append(os.path.basename(PY2_TEST_MODULE))
+ self.trivial_py2_file = os.path.join(self.py2_src_dir, "trivial.py")
+ self.init_py2_file = os.path.join(self.py2_src_dir, "__init__.py")
+ with open(self.trivial_py2_file, "w") as trivial:
+ trivial.write("print 'I need a simple conversion.'")
+ self.setup_files.append("trivial.py")
+
+ def test_filename_changing_on_output_single_dir(self):
+ """2to3 a single directory with a new output dir and suffix."""
+ self.setup_test_source_trees()
+ out = StringIO.StringIO()
+ err = StringIO.StringIO()
+ suffix = "TEST"
+ ret = self.run_2to3_capture(
+ ["-n", "--add-suffix", suffix, "--write-unchanged-files",
+ "--no-diffs", "--output-dir",
+ self.py3_dest_dir, self.py2_src_dir],
+ StringIO.StringIO(""), out, err)
+ self.assertEqual(ret, 0)
+ stderr = err.getvalue()
+ self.assertIn(" implies -w.", stderr)
+ self.assertIn(
+ "Output in %r will mirror the input directory %r layout" % (
+ self.py3_dest_dir, self.py2_src_dir), stderr)
+ self.assertEqual(set(name+suffix for name in self.setup_files),
+ set(os.listdir(self.py3_dest_dir)))
+ for name in self.setup_files:
+ self.assertIn("Writing converted %s to %s" % (
+ os.path.join(self.py2_src_dir, name),
+ os.path.join(self.py3_dest_dir, name+suffix)), stderr)
+ sep = re.escape(os.sep)
+ self.assertRegexpMatches(
+ stderr, r"No changes to .*/__init__\.py".replace("/", sep))
+ self.assertNotRegex(
+ stderr, r"No changes to .*/trivial\.py".replace("/", sep))
+
+ def test_filename_changing_on_output_two_files(self):
+ """2to3 two files in one directory with a new output dir."""
+ self.setup_test_source_trees()
+ err = StringIO.StringIO()
+ py2_files = [self.trivial_py2_file, self.init_py2_file]
+ expected_files = set(os.path.basename(name) for name in py2_files)
+ ret = self.run_2to3_capture(
+ ["-n", "-w", "--write-unchanged-files",
+ "--no-diffs", "--output-dir", self.py3_dest_dir] + py2_files,
+ StringIO.StringIO(""), StringIO.StringIO(), err)
+ self.assertEqual(ret, 0)
+ stderr = err.getvalue()
+ self.assertIn(
+ "Output in %r will mirror the input directory %r layout" % (
+ self.py3_dest_dir, self.py2_src_dir), stderr)
+ self.assertEqual(expected_files, set(os.listdir(self.py3_dest_dir)))
+
+ def test_filename_changing_on_output_single_file(self):
+ """2to3 a single file with a new output dir."""
+ self.setup_test_source_trees()
+ err = StringIO.StringIO()
+ ret = self.run_2to3_capture(
+ ["-n", "-w", "--no-diffs", "--output-dir", self.py3_dest_dir,
+ self.trivial_py2_file],
+ StringIO.StringIO(""), StringIO.StringIO(), err)
+ self.assertEqual(ret, 0)
+ stderr = err.getvalue()
+ self.assertIn(
+ "Output in %r will mirror the input directory %r layout" % (
+ self.py3_dest_dir, self.py2_src_dir), stderr)
+ self.assertEqual(set([os.path.basename(self.trivial_py2_file)]),
+ set(os.listdir(self.py3_dest_dir)))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lib/python2.7/lib2to3/tests/test_parser.py b/lib/python2.7/lib2to3/tests/test_parser.py
new file mode 100644
index 0000000..ebf8441
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_parser.py
@@ -0,0 +1,343 @@
+"""Test suite for 2to3's parser and grammar files.
+
+This is the place to add tests for changes to 2to3's grammar, such as those
+merging the grammars for Python 2 and 3. In addition to specific tests for
+parts of the grammar we've changed, we also make sure we can parse the
+test_grammar.py files from both Python 2 and Python 3.
+"""
+
+# Testing imports
+from . import support
+from .support import driver, test_dir
+
+# Python imports
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+
+# Local imports
+from lib2to3.pgen2 import driver as pgen2_driver
+from lib2to3.pgen2 import tokenize
+from ..pgen2.parse import ParseError
+from lib2to3.pygram import python_symbols as syms
+
+
+class TestDriver(support.TestCase):
+
+ def test_formfeed(self):
+ s = """print 1\n\x0Cprint 2\n"""
+ t = driver.parse_string(s)
+ self.assertEqual(t.children[0].children[0].type, syms.print_stmt)
+ self.assertEqual(t.children[1].children[0].type, syms.print_stmt)
+
+
+class TestPgen2Caching(support.TestCase):
+ def test_load_grammar_from_txt_file(self):
+ pgen2_driver.load_grammar(support.grammar_path, save=False, force=True)
+
+ def test_load_grammar_from_pickle(self):
+ # Make a copy of the grammar file in a temp directory we are
+ # guaranteed to be able to write to.
+ tmpdir = tempfile.mkdtemp()
+ try:
+ grammar_copy = os.path.join(
+ tmpdir, os.path.basename(support.grammar_path))
+ shutil.copy(support.grammar_path, grammar_copy)
+ pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
+
+ pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
+ self.assertTrue(os.path.exists(pickle_name))
+
+ os.unlink(grammar_copy) # Only the pickle remains...
+ pgen2_driver.load_grammar(grammar_copy, save=False, force=False)
+ finally:
+ shutil.rmtree(tmpdir)
+
+ @unittest.skipIf(sys.executable is None, 'sys.executable required')
+ def test_load_grammar_from_subprocess(self):
+ tmpdir = tempfile.mkdtemp()
+ tmpsubdir = os.path.join(tmpdir, 'subdir')
+ try:
+ os.mkdir(tmpsubdir)
+ grammar_base = os.path.basename(support.grammar_path)
+ grammar_copy = os.path.join(tmpdir, grammar_base)
+ grammar_sub_copy = os.path.join(tmpsubdir, grammar_base)
+ shutil.copy(support.grammar_path, grammar_copy)
+ shutil.copy(support.grammar_path, grammar_sub_copy)
+ pickle_name = pgen2_driver._generate_pickle_name(grammar_copy)
+ pickle_sub_name = pgen2_driver._generate_pickle_name(
+ grammar_sub_copy)
+ self.assertNotEqual(pickle_name, pickle_sub_name)
+
+ # Generate a pickle file from this process.
+ pgen2_driver.load_grammar(grammar_copy, save=True, force=True)
+ self.assertTrue(os.path.exists(pickle_name))
+
+ # Generate a new pickle file in a subprocess with a most likely
+ # different hash randomization seed.
+ sub_env = dict(os.environ)
+ sub_env['PYTHONHASHSEED'] = 'random'
+ subprocess.check_call(
+ [sys.executable, '-c', """
+from lib2to3.pgen2 import driver as pgen2_driver
+pgen2_driver.load_grammar(%r, save=True, force=True)
+ """ % (grammar_sub_copy,)],
+ env=sub_env)
+ self.assertTrue(os.path.exists(pickle_sub_name))
+
+ with open(pickle_name, 'rb') as pickle_f_1, \
+ open(pickle_sub_name, 'rb') as pickle_f_2:
+ self.assertEqual(
+ pickle_f_1.read(), pickle_f_2.read(),
+ msg='Grammar caches generated using different hash seeds'
+ ' were not identical.')
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+
+class GrammarTest(support.TestCase):
+ def validate(self, code):
+ support.parse_string(code)
+
+ def invalid_syntax(self, code):
+ try:
+ self.validate(code)
+ except ParseError:
+ pass
+ else:
+ raise AssertionError("Syntax shouldn't have been valid")
+
+
+class TestMatrixMultiplication(GrammarTest):
+ def test_matrix_multiplication_operator(self):
+ self.validate("a @ b")
+ self.validate("a @= b")
+
+
+class TestYieldFrom(GrammarTest):
+ def test_matrix_multiplication_operator(self):
+ self.validate("yield from x")
+ self.validate("(yield from x) + y")
+ self.invalid_syntax("yield from")
+
+
+class TestRaiseChanges(GrammarTest):
+ def test_2x_style_1(self):
+ self.validate("raise")
+
+ def test_2x_style_2(self):
+ self.validate("raise E, V")
+
+ def test_2x_style_3(self):
+ self.validate("raise E, V, T")
+
+ def test_2x_style_invalid_1(self):
+ self.invalid_syntax("raise E, V, T, Z")
+
+ def test_3x_style(self):
+ self.validate("raise E1 from E2")
+
+ def test_3x_style_invalid_1(self):
+ self.invalid_syntax("raise E, V from E1")
+
+ def test_3x_style_invalid_2(self):
+ self.invalid_syntax("raise E from E1, E2")
+
+ def test_3x_style_invalid_3(self):
+ self.invalid_syntax("raise from E1, E2")
+
+ def test_3x_style_invalid_4(self):
+ self.invalid_syntax("raise E from")
+
+
+# Modelled after Lib/test/test_grammar.py:TokenTests.test_funcdef issue2292
+# and Lib/test/text_parser.py test_list_displays, test_set_displays,
+# test_dict_displays, test_argument_unpacking, ... changes.
+class TestUnpackingGeneralizations(GrammarTest):
+ def test_mid_positional_star(self):
+ self.validate("""func(1, *(2, 3), 4)""")
+
+ def test_double_star_dict_literal(self):
+ self.validate("""func(**{'eggs':'scrambled', 'spam':'fried'})""")
+
+ def test_double_star_dict_literal_after_keywords(self):
+ self.validate("""func(spam='fried', **{'eggs':'scrambled'})""")
+
+ def test_list_display(self):
+ self.validate("""[*{2}, 3, *[4]]""")
+
+ def test_set_display(self):
+ self.validate("""{*{2}, 3, *[4]}""")
+
+ def test_dict_display_1(self):
+ self.validate("""{**{}}""")
+
+ def test_dict_display_2(self):
+ self.validate("""{**{}, 3:4, **{5:6, 7:8}}""")
+
+ def test_argument_unpacking_1(self):
+ self.validate("""f(a, *b, *c, d)""")
+
+ def test_argument_unpacking_2(self):
+ self.validate("""f(**a, **b)""")
+
+ def test_argument_unpacking_3(self):
+ self.validate("""f(2, *a, *b, **b, **c, **d)""")
+
+
+# Adaptated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
+class TestFunctionAnnotations(GrammarTest):
+ def test_1(self):
+ self.validate("""def f(x) -> list: pass""")
+
+ def test_2(self):
+ self.validate("""def f(x:int): pass""")
+
+ def test_3(self):
+ self.validate("""def f(*x:str): pass""")
+
+ def test_4(self):
+ self.validate("""def f(**x:float): pass""")
+
+ def test_5(self):
+ self.validate("""def f(x, y:1+2): pass""")
+
+ def test_6(self):
+ self.validate("""def f(a, (b:1, c:2, d)): pass""")
+
+ def test_7(self):
+ self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
+
+ def test_8(self):
+ s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
+ *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
+ self.validate(s)
+
+
+class TestExcept(GrammarTest):
+ def test_new(self):
+ s = """
+ try:
+ x
+ except E as N:
+ y"""
+ self.validate(s)
+
+ def test_old(self):
+ s = """
+ try:
+ x
+ except E, N:
+ y"""
+ self.validate(s)
+
+
+# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
+class TestSetLiteral(GrammarTest):
+ def test_1(self):
+ self.validate("""x = {'one'}""")
+
+ def test_2(self):
+ self.validate("""x = {'one', 1,}""")
+
+ def test_3(self):
+ self.validate("""x = {'one', 'two', 'three'}""")
+
+ def test_4(self):
+ self.validate("""x = {2, 3, 4,}""")
+
+
+class TestNumericLiterals(GrammarTest):
+ def test_new_octal_notation(self):
+ self.validate("""0o7777777777777""")
+ self.invalid_syntax("""0o7324528887""")
+
+ def test_new_binary_notation(self):
+ self.validate("""0b101010""")
+ self.invalid_syntax("""0b0101021""")
+
+
+class TestClassDef(GrammarTest):
+ def test_new_syntax(self):
+ self.validate("class B(t=7): pass")
+ self.validate("class B(t, *args): pass")
+ self.validate("class B(t, **kwargs): pass")
+ self.validate("class B(t, *args, **kwargs): pass")
+ self.validate("class B(t, y=9, *args, **kwargs): pass")
+
+
+class TestParserIdempotency(support.TestCase):
+
+ """A cut-down version of pytree_idempotency.py."""
+
+ def test_all_project_files(self):
+ if sys.platform.startswith("win"):
+ # XXX something with newlines goes wrong on Windows.
+ return
+ for filepath in support.all_project_files():
+ with open(filepath, "rb") as fp:
+ encoding = tokenize.detect_encoding(fp.readline)[0]
+ self.assertIsNotNone(encoding,
+ "can't detect encoding for %s" % filepath)
+ with open(filepath, "r") as fp:
+ source = fp.read()
+ source = source.decode(encoding)
+ tree = driver.parse_string(source)
+ new = unicode(tree)
+ if diff(filepath, new, encoding):
+ self.fail("Idempotency failed: %s" % filepath)
+
+ def test_extended_unpacking(self):
+ driver.parse_string("a, *b, c = x\n")
+ driver.parse_string("[*a, b] = x\n")
+ driver.parse_string("(z, *y, w) = m\n")
+ driver.parse_string("for *z, m in d: pass\n")
+
+class TestLiterals(GrammarTest):
+
+ def validate(self, s):
+ driver.parse_string(support.dedent(s) + "\n\n")
+
+ def test_multiline_bytes_literals(self):
+ s = """
+ md5test(b"\xaa" * 80,
+ (b"Test Using Larger Than Block-Size Key "
+ b"and Larger Than One Block-Size Data"),
+ "6f630fad67cda0ee1fb1f562db3aa53e")
+ """
+ self.validate(s)
+
+ def test_multiline_bytes_tripquote_literals(self):
+ s = '''
+ b"""
+ <?xml version="1.0" encoding="UTF-8"?>
+ <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
+ """
+ '''
+ self.validate(s)
+
+ def test_multiline_str_literals(self):
+ s = """
+ md5test("\xaa" * 80,
+ ("Test Using Larger Than Block-Size Key "
+ "and Larger Than One Block-Size Data"),
+ "6f630fad67cda0ee1fb1f562db3aa53e")
+ """
+ self.validate(s)
+
+
+def diff(fn, result, encoding):
+ f = open("@", "w")
+ try:
+ f.write(result.encode(encoding))
+ finally:
+ f.close()
+ try:
+ fn = fn.replace('"', '\\"')
+ return os.system('diff -u "%s" @' % fn)
+ finally:
+ os.remove("@")
diff --git a/lib/python2.7/lib2to3/tests/test_pytree.py b/lib/python2.7/lib2to3/tests/test_pytree.py
new file mode 100644
index 0000000..ccddce6
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_pytree.py
@@ -0,0 +1,494 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Unit tests for pytree.py.
+
+NOTE: Please *don't* add doc strings to individual test methods!
+In verbose mode, printing of the module, class and method name is much
+more helpful than printing of (the first line of) the docstring,
+especially when debugging a test.
+"""
+
+from __future__ import with_statement
+
+import sys
+import warnings
+
+# Testing imports
+from . import support
+
+from lib2to3 import pytree
+
+try:
+ sorted
+except NameError:
+ def sorted(lst):
+ l = list(lst)
+ l.sort()
+ return l
+
+class TestNodes(support.TestCase):
+
+ """Unit tests for nodes (Base, Leaf, Node)."""
+
+ if sys.version_info >= (2,6):
+ # warnings.catch_warnings is new in 2.6.
+ def test_deprecated_prefix_methods(self):
+ l = pytree.Leaf(100, "foo")
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always", DeprecationWarning)
+ self.assertEqual(l.get_prefix(), "")
+ l.set_prefix("hi")
+ self.assertEqual(l.prefix, "hi")
+ self.assertEqual(len(w), 2)
+ for warning in w:
+ self.assertTrue(warning.category is DeprecationWarning)
+ self.assertEqual(str(w[0].message), "get_prefix() is deprecated; " \
+ "use the prefix property")
+ self.assertEqual(str(w[1].message), "set_prefix() is deprecated; " \
+ "use the prefix property")
+
+ def test_instantiate_base(self):
+ if __debug__:
+ # Test that instantiating Base() raises an AssertionError
+ self.assertRaises(AssertionError, pytree.Base)
+
+ def test_leaf(self):
+ l1 = pytree.Leaf(100, "foo")
+ self.assertEqual(l1.type, 100)
+ self.assertEqual(l1.value, "foo")
+
+ def test_leaf_repr(self):
+ l1 = pytree.Leaf(100, "foo")
+ self.assertEqual(repr(l1), "Leaf(100, 'foo')")
+
+ def test_leaf_str(self):
+ l1 = pytree.Leaf(100, "foo")
+ self.assertEqual(str(l1), "foo")
+ l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
+ self.assertEqual(str(l2), " foo")
+
+ def test_leaf_str_numeric_value(self):
+ # Make sure that the Leaf's value is stringified. Failing to
+ # do this can cause a TypeError in certain situations.
+ l1 = pytree.Leaf(2, 5)
+ l1.prefix = "foo_"
+ self.assertEqual(str(l1), "foo_5")
+
+ def test_leaf_equality(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
+ self.assertEqual(l1, l2)
+ l3 = pytree.Leaf(101, "foo")
+ l4 = pytree.Leaf(100, "bar")
+ self.assertNotEqual(l1, l3)
+ self.assertNotEqual(l1, l4)
+
+ def test_leaf_prefix(self):
+ l1 = pytree.Leaf(100, "foo")
+ self.assertEqual(l1.prefix, "")
+ self.assertFalse(l1.was_changed)
+ l1.prefix = " ##\n\n"
+ self.assertEqual(l1.prefix, " ##\n\n")
+ self.assertTrue(l1.was_changed)
+
+ def test_node(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(200, "bar")
+ n1 = pytree.Node(1000, [l1, l2])
+ self.assertEqual(n1.type, 1000)
+ self.assertEqual(n1.children, [l1, l2])
+
+ def test_node_repr(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+ n1 = pytree.Node(1000, [l1, l2])
+ self.assertEqual(repr(n1),
+ "Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
+
+ def test_node_str(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+ n1 = pytree.Node(1000, [l1, l2])
+ self.assertEqual(str(n1), "foo bar")
+
+ def test_node_prefix(self):
+ l1 = pytree.Leaf(100, "foo")
+ self.assertEqual(l1.prefix, "")
+ n1 = pytree.Node(1000, [l1])
+ self.assertEqual(n1.prefix, "")
+ n1.prefix = " "
+ self.assertEqual(n1.prefix, " ")
+ self.assertEqual(l1.prefix, " ")
+
+ def test_get_suffix(self):
+ l1 = pytree.Leaf(100, "foo", prefix="a")
+ l2 = pytree.Leaf(100, "bar", prefix="b")
+ n1 = pytree.Node(1000, [l1, l2])
+
+ self.assertEqual(l1.get_suffix(), l2.prefix)
+ self.assertEqual(l2.get_suffix(), "")
+ self.assertEqual(n1.get_suffix(), "")
+
+ l3 = pytree.Leaf(100, "bar", prefix="c")
+ n2 = pytree.Node(1000, [n1, l3])
+
+ self.assertEqual(n1.get_suffix(), l3.prefix)
+ self.assertEqual(l3.get_suffix(), "")
+ self.assertEqual(n2.get_suffix(), "")
+
+ def test_node_equality(self):
+ n1 = pytree.Node(1000, ())
+ n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
+ self.assertEqual(n1, n2)
+ n3 = pytree.Node(1001, ())
+ self.assertNotEqual(n1, n3)
+
+ def test_node_recursive_equality(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1])
+ n2 = pytree.Node(1000, [l2])
+ self.assertEqual(n1, n2)
+ l3 = pytree.Leaf(100, "bar")
+ n3 = pytree.Node(1000, [l3])
+ self.assertNotEqual(n1, n3)
+
+ def test_replace(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "+")
+ l3 = pytree.Leaf(100, "bar")
+ n1 = pytree.Node(1000, [l1, l2, l3])
+ self.assertEqual(n1.children, [l1, l2, l3])
+ self.assertIsInstance(n1.children, list)
+ self.assertFalse(n1.was_changed)
+ l2new = pytree.Leaf(100, "-")
+ l2.replace(l2new)
+ self.assertEqual(n1.children, [l1, l2new, l3])
+ self.assertIsInstance(n1.children, list)
+ self.assertTrue(n1.was_changed)
+
+ def test_replace_with_list(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "+")
+ l3 = pytree.Leaf(100, "bar")
+ n1 = pytree.Node(1000, [l1, l2, l3])
+
+ l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
+ self.assertEqual(str(n1), "foo**bar")
+ self.assertIsInstance(n1.children, list)
+
+ def test_leaves(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ l3 = pytree.Leaf(100, "fooey")
+ n2 = pytree.Node(1000, [l1, l2])
+ n3 = pytree.Node(1000, [l3])
+ n1 = pytree.Node(1000, [n2, n3])
+
+ self.assertEqual(list(n1.leaves()), [l1, l2, l3])
+
+ def test_depth(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ n2 = pytree.Node(1000, [l1, l2])
+ n3 = pytree.Node(1000, [])
+ n1 = pytree.Node(1000, [n2, n3])
+
+ self.assertEqual(l1.depth(), 2)
+ self.assertEqual(n3.depth(), 1)
+ self.assertEqual(n1.depth(), 0)
+
+ def test_post_order(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ l3 = pytree.Leaf(100, "fooey")
+ c1 = pytree.Node(1000, [l1, l2])
+ n1 = pytree.Node(1000, [c1, l3])
+ self.assertEqual(list(n1.post_order()), [l1, l2, c1, l3, n1])
+
+ def test_pre_order(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ l3 = pytree.Leaf(100, "fooey")
+ c1 = pytree.Node(1000, [l1, l2])
+ n1 = pytree.Node(1000, [c1, l3])
+ self.assertEqual(list(n1.pre_order()), [n1, c1, l1, l2, l3])
+
+ def test_changed(self):
+ l1 = pytree.Leaf(100, "f")
+ self.assertFalse(l1.was_changed)
+ l1.changed()
+ self.assertTrue(l1.was_changed)
+
+ l1 = pytree.Leaf(100, "f")
+ n1 = pytree.Node(1000, [l1])
+ self.assertFalse(n1.was_changed)
+ n1.changed()
+ self.assertTrue(n1.was_changed)
+
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "+")
+ l3 = pytree.Leaf(100, "bar")
+ n1 = pytree.Node(1000, [l1, l2, l3])
+ n2 = pytree.Node(1000, [n1])
+ self.assertFalse(l1.was_changed)
+ self.assertFalse(n1.was_changed)
+ self.assertFalse(n2.was_changed)
+
+ n1.changed()
+ self.assertTrue(n1.was_changed)
+ self.assertTrue(n2.was_changed)
+ self.assertFalse(l1.was_changed)
+
+ def test_leaf_constructor_prefix(self):
+ for prefix in ("xyz_", ""):
+ l1 = pytree.Leaf(100, "self", prefix=prefix)
+ self.assertTrue(str(l1), prefix + "self")
+ self.assertEqual(l1.prefix, prefix)
+
+ def test_node_constructor_prefix(self):
+ for prefix in ("xyz_", ""):
+ l1 = pytree.Leaf(100, "self")
+ l2 = pytree.Leaf(100, "foo", prefix="_")
+ n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
+ self.assertTrue(str(n1), prefix + "self_foo")
+ self.assertEqual(n1.prefix, prefix)
+ self.assertEqual(l1.prefix, prefix)
+ self.assertEqual(l2.prefix, "_")
+
+ def test_remove(self):
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1, l2])
+ n2 = pytree.Node(1000, [n1])
+
+ self.assertEqual(n1.remove(), 0)
+ self.assertEqual(n2.children, [])
+ self.assertEqual(l1.parent, n1)
+ self.assertEqual(n1.parent, None)
+ self.assertEqual(n2.parent, None)
+ self.assertFalse(n1.was_changed)
+ self.assertTrue(n2.was_changed)
+
+ self.assertEqual(l2.remove(), 1)
+ self.assertEqual(l1.remove(), 0)
+ self.assertEqual(n1.children, [])
+ self.assertEqual(l1.parent, None)
+ self.assertEqual(n1.parent, None)
+ self.assertEqual(n2.parent, None)
+ self.assertTrue(n1.was_changed)
+ self.assertTrue(n2.was_changed)
+
+ def test_remove_parentless(self):
+ n1 = pytree.Node(1000, [])
+ n1.remove()
+ self.assertEqual(n1.parent, None)
+
+ l1 = pytree.Leaf(100, "foo")
+ l1.remove()
+ self.assertEqual(l1.parent, None)
+
+ def test_node_set_child(self):
+ l1 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1])
+
+ l2 = pytree.Leaf(100, "bar")
+ n1.set_child(0, l2)
+ self.assertEqual(l1.parent, None)
+ self.assertEqual(l2.parent, n1)
+ self.assertEqual(n1.children, [l2])
+
+ n2 = pytree.Node(1000, [l1])
+ n2.set_child(0, n1)
+ self.assertEqual(l1.parent, None)
+ self.assertEqual(n1.parent, n2)
+ self.assertEqual(n2.parent, None)
+ self.assertEqual(n2.children, [n1])
+
+ self.assertRaises(IndexError, n1.set_child, 4, l2)
+ # I don't care what it raises, so long as it's an exception
+ self.assertRaises(Exception, n1.set_child, 0, list)
+
+ def test_node_insert_child(self):
+ l1 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1])
+
+ l2 = pytree.Leaf(100, "bar")
+ n1.insert_child(0, l2)
+ self.assertEqual(l2.parent, n1)
+ self.assertEqual(n1.children, [l2, l1])
+
+ l3 = pytree.Leaf(100, "abc")
+ n1.insert_child(2, l3)
+ self.assertEqual(n1.children, [l2, l1, l3])
+
+ # I don't care what it raises, so long as it's an exception
+ self.assertRaises(Exception, n1.insert_child, 0, list)
+
+ def test_node_append_child(self):
+ n1 = pytree.Node(1000, [])
+
+ l1 = pytree.Leaf(100, "foo")
+ n1.append_child(l1)
+ self.assertEqual(l1.parent, n1)
+ self.assertEqual(n1.children, [l1])
+
+ l2 = pytree.Leaf(100, "bar")
+ n1.append_child(l2)
+ self.assertEqual(l2.parent, n1)
+ self.assertEqual(n1.children, [l1, l2])
+
+ # I don't care what it raises, so long as it's an exception
+ self.assertRaises(Exception, n1.append_child, list)
+
+ def test_node_next_sibling(self):
+ n1 = pytree.Node(1000, [])
+ n2 = pytree.Node(1000, [])
+ p1 = pytree.Node(1000, [n1, n2])
+
+ self.assertIs(n1.next_sibling, n2)
+ self.assertEqual(n2.next_sibling, None)
+ self.assertEqual(p1.next_sibling, None)
+
+ def test_leaf_next_sibling(self):
+ l1 = pytree.Leaf(100, "a")
+ l2 = pytree.Leaf(100, "b")
+ p1 = pytree.Node(1000, [l1, l2])
+
+ self.assertIs(l1.next_sibling, l2)
+ self.assertEqual(l2.next_sibling, None)
+ self.assertEqual(p1.next_sibling, None)
+
+ def test_node_prev_sibling(self):
+ n1 = pytree.Node(1000, [])
+ n2 = pytree.Node(1000, [])
+ p1 = pytree.Node(1000, [n1, n2])
+
+ self.assertIs(n2.prev_sibling, n1)
+ self.assertEqual(n1.prev_sibling, None)
+ self.assertEqual(p1.prev_sibling, None)
+
+ def test_leaf_prev_sibling(self):
+ l1 = pytree.Leaf(100, "a")
+ l2 = pytree.Leaf(100, "b")
+ p1 = pytree.Node(1000, [l1, l2])
+
+ self.assertIs(l2.prev_sibling, l1)
+ self.assertEqual(l1.prev_sibling, None)
+ self.assertEqual(p1.prev_sibling, None)
+
+
+class TestPatterns(support.TestCase):
+
+ """Unit tests for tree matching patterns."""
+
+ def test_basic_patterns(self):
+ # Build a tree
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ l3 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1, l2])
+ n2 = pytree.Node(1000, [l3])
+ root = pytree.Node(1000, [n1, n2])
+ # Build a pattern matching a leaf
+ pl = pytree.LeafPattern(100, "foo", name="pl")
+ r = {}
+ self.assertFalse(pl.match(root, results=r))
+ self.assertEqual(r, {})
+ self.assertFalse(pl.match(n1, results=r))
+ self.assertEqual(r, {})
+ self.assertFalse(pl.match(n2, results=r))
+ self.assertEqual(r, {})
+ self.assertTrue(pl.match(l1, results=r))
+ self.assertEqual(r, {"pl": l1})
+ r = {}
+ self.assertFalse(pl.match(l2, results=r))
+ self.assertEqual(r, {})
+ # Build a pattern matching a node
+ pn = pytree.NodePattern(1000, [pl], name="pn")
+ self.assertFalse(pn.match(root, results=r))
+ self.assertEqual(r, {})
+ self.assertFalse(pn.match(n1, results=r))
+ self.assertEqual(r, {})
+ self.assertTrue(pn.match(n2, results=r))
+ self.assertEqual(r, {"pn": n2, "pl": l3})
+ r = {}
+ self.assertFalse(pn.match(l1, results=r))
+ self.assertEqual(r, {})
+ self.assertFalse(pn.match(l2, results=r))
+ self.assertEqual(r, {})
+
+ def test_wildcard(self):
+ # Build a tree for testing
+ l1 = pytree.Leaf(100, "foo")
+ l2 = pytree.Leaf(100, "bar")
+ l3 = pytree.Leaf(100, "foo")
+ n1 = pytree.Node(1000, [l1, l2])
+ n2 = pytree.Node(1000, [l3])
+ root = pytree.Node(1000, [n1, n2])
+ # Build a pattern
+ pl = pytree.LeafPattern(100, "foo", name="pl")
+ pn = pytree.NodePattern(1000, [pl], name="pn")
+ pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
+ r = {}
+ self.assertFalse(pw.match_seq([root], r))
+ self.assertEqual(r, {})
+ self.assertFalse(pw.match_seq([n1], r))
+ self.assertEqual(r, {})
+ self.assertTrue(pw.match_seq([n2], r))
+ # These are easier to debug
+ self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
+ self.assertEqual(r["pl"], l1)
+ self.assertEqual(r["pn"], n2)
+ self.assertEqual(r["pw"], [n2])
+ # But this is equivalent
+ self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
+ r = {}
+ self.assertTrue(pw.match_seq([l1, l3], r))
+ self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
+ self.assertIs(r["pl"], l3)
+ r = {}
+
+ def test_generate_matches(self):
+ la = pytree.Leaf(1, "a")
+ lb = pytree.Leaf(1, "b")
+ lc = pytree.Leaf(1, "c")
+ ld = pytree.Leaf(1, "d")
+ le = pytree.Leaf(1, "e")
+ lf = pytree.Leaf(1, "f")
+ leaves = [la, lb, lc, ld, le, lf]
+ root = pytree.Node(1000, leaves)
+ pa = pytree.LeafPattern(1, "a", "pa")
+ pb = pytree.LeafPattern(1, "b", "pb")
+ pc = pytree.LeafPattern(1, "c", "pc")
+ pd = pytree.LeafPattern(1, "d", "pd")
+ pe = pytree.LeafPattern(1, "e", "pe")
+ pf = pytree.LeafPattern(1, "f", "pf")
+ pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
+ [pa, pb], [pc, pd], [pe, pf]],
+ min=1, max=4, name="pw")
+ self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
+ [3, 5, 2, 4, 6])
+ pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
+ matches = list(pytree.generate_matches([pr], [root]))
+ self.assertEqual(len(matches), 1)
+ c, r = matches[0]
+ self.assertEqual(c, 1)
+ self.assertEqual(str(r["pr"]), "abcdef")
+ self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
+ for c in "abcdef":
+ self.assertEqual(r["p" + c], pytree.Leaf(1, c))
+
+ def test_has_key_example(self):
+ pattern = pytree.NodePattern(331,
+ (pytree.LeafPattern(7),
+ pytree.WildcardPattern(name="args"),
+ pytree.LeafPattern(8)))
+ l1 = pytree.Leaf(7, "(")
+ l2 = pytree.Leaf(3, "x")
+ l3 = pytree.Leaf(8, ")")
+ node = pytree.Node(331, [l1, l2, l3])
+ r = {}
+ self.assertTrue(pattern.match(node, r))
+ self.assertEqual(r["args"], [l2])
diff --git a/lib/python2.7/lib2to3/tests/test_refactor.py b/lib/python2.7/lib2to3/tests/test_refactor.py
new file mode 100644
index 0000000..c737aa5
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_refactor.py
@@ -0,0 +1,322 @@
+"""
+Unit tests for refactor.py.
+"""
+
+from __future__ import with_statement
+
+import sys
+import os
+import codecs
+import operator
+import re
+import StringIO
+import tempfile
+import shutil
+import unittest
+import warnings
+
+from lib2to3 import refactor, pygram, fixer_base
+from lib2to3.pgen2 import token
+
+from . import support
+
+
+TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
+FIXER_DIR = os.path.join(TEST_DATA_DIR, "fixers")
+
+sys.path.append(FIXER_DIR)
+try:
+ _DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
+finally:
+ sys.path.pop()
+
+_2TO3_FIXERS = refactor.get_fixers_from_package("lib2to3.fixes")
+
+class TestRefactoringTool(unittest.TestCase):
+
+ def setUp(self):
+ sys.path.append(FIXER_DIR)
+
+ def tearDown(self):
+ sys.path.pop()
+
+ def check_instances(self, instances, classes):
+ for inst, cls in zip(instances, classes):
+ if not isinstance(inst, cls):
+ self.fail("%s are not instances of %s" % instances, classes)
+
+ def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
+ return refactor.RefactoringTool(fixers, options, explicit)
+
+ def test_print_function_option(self):
+ rt = self.rt({"print_function" : True})
+ self.assertIs(rt.grammar, pygram.python_grammar_no_print_statement)
+ self.assertIs(rt.driver.grammar,
+ pygram.python_grammar_no_print_statement)
+
+ def test_write_unchanged_files_option(self):
+ rt = self.rt()
+ self.assertFalse(rt.write_unchanged_files)
+ rt = self.rt({"write_unchanged_files" : True})
+ self.assertTrue(rt.write_unchanged_files)
+
+ def test_fixer_loading_helpers(self):
+ contents = ["explicit", "first", "last", "parrot", "preorder"]
+ non_prefixed = refactor.get_all_fix_names("myfixes")
+ prefixed = refactor.get_all_fix_names("myfixes", False)
+ full_names = refactor.get_fixers_from_package("myfixes")
+ self.assertEqual(prefixed, ["fix_" + name for name in contents])
+ self.assertEqual(non_prefixed, contents)
+ self.assertEqual(full_names,
+ ["myfixes.fix_" + name for name in contents])
+
+ def test_detect_future_features(self):
+ run = refactor._detect_future_features
+ fs = frozenset
+ empty = fs()
+ self.assertEqual(run(""), empty)
+ self.assertEqual(run("from __future__ import print_function"),
+ fs(("print_function",)))
+ self.assertEqual(run("from __future__ import generators"),
+ fs(("generators",)))
+ self.assertEqual(run("from __future__ import generators, feature"),
+ fs(("generators", "feature")))
+ inp = "from __future__ import generators, print_function"
+ self.assertEqual(run(inp), fs(("generators", "print_function")))
+ inp ="from __future__ import print_function, generators"
+ self.assertEqual(run(inp), fs(("print_function", "generators")))
+ inp = "from __future__ import (print_function,)"
+ self.assertEqual(run(inp), fs(("print_function",)))
+ inp = "from __future__ import (generators, print_function)"
+ self.assertEqual(run(inp), fs(("generators", "print_function")))
+ inp = "from __future__ import (generators, nested_scopes)"
+ self.assertEqual(run(inp), fs(("generators", "nested_scopes")))
+ inp = """from __future__ import generators
+from __future__ import print_function"""
+ self.assertEqual(run(inp), fs(("generators", "print_function")))
+ invalid = ("from",
+ "from 4",
+ "from x",
+ "from x 5",
+ "from x im",
+ "from x import",
+ "from x import 4",
+ )
+ for inp in invalid:
+ self.assertEqual(run(inp), empty)
+ inp = "'docstring'\nfrom __future__ import print_function"
+ self.assertEqual(run(inp), fs(("print_function",)))
+ inp = "'docstring'\n'somng'\nfrom __future__ import print_function"
+ self.assertEqual(run(inp), empty)
+ inp = "# comment\nfrom __future__ import print_function"
+ self.assertEqual(run(inp), fs(("print_function",)))
+ inp = "# comment\n'doc'\nfrom __future__ import print_function"
+ self.assertEqual(run(inp), fs(("print_function",)))
+ inp = "class x: pass\nfrom __future__ import print_function"
+ self.assertEqual(run(inp), empty)
+
+ def test_get_headnode_dict(self):
+ class NoneFix(fixer_base.BaseFix):
+ pass
+
+ class FileInputFix(fixer_base.BaseFix):
+ PATTERN = "file_input< any * >"
+
+ class SimpleFix(fixer_base.BaseFix):
+ PATTERN = "'name'"
+
+ no_head = NoneFix({}, [])
+ with_head = FileInputFix({}, [])
+ simple = SimpleFix({}, [])
+ d = refactor._get_headnode_dict([no_head, with_head, simple])
+ top_fixes = d.pop(pygram.python_symbols.file_input)
+ self.assertEqual(top_fixes, [with_head, no_head])
+ name_fixes = d.pop(token.NAME)
+ self.assertEqual(name_fixes, [simple, no_head])
+ for fixes in d.itervalues():
+ self.assertEqual(fixes, [no_head])
+
+ def test_fixer_loading(self):
+ from myfixes.fix_first import FixFirst
+ from myfixes.fix_last import FixLast
+ from myfixes.fix_parrot import FixParrot
+ from myfixes.fix_preorder import FixPreorder
+
+ rt = self.rt()
+ pre, post = rt.get_fixers()
+
+ self.check_instances(pre, [FixPreorder])
+ self.check_instances(post, [FixFirst, FixParrot, FixLast])
+
+ def test_naughty_fixers(self):
+ self.assertRaises(ImportError, self.rt, fixers=["not_here"])
+ self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
+ self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
+
+ def test_refactor_string(self):
+ rt = self.rt()
+ input = "def parrot(): pass\n\n"
+ tree = rt.refactor_string(input, "<test>")
+ self.assertNotEqual(str(tree), input)
+
+ input = "def f(): pass\n\n"
+ tree = rt.refactor_string(input, "<test>")
+ self.assertEqual(str(tree), input)
+
+ def test_refactor_stdin(self):
+
+ class MyRT(refactor.RefactoringTool):
+
+ def print_output(self, old_text, new_text, filename, equal):
+ results.extend([old_text, new_text, filename, equal])
+
+ results = []
+ rt = MyRT(_DEFAULT_FIXERS)
+ save = sys.stdin
+ sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
+ try:
+ rt.refactor_stdin()
+ finally:
+ sys.stdin = save
+ expected = ["def parrot(): pass\n\n",
+ "def cheese(): pass\n\n",
+ "<stdin>", False]
+ self.assertEqual(results, expected)
+
+ def check_file_refactoring(self, test_file, fixers=_2TO3_FIXERS,
+ options=None, mock_log_debug=None,
+ actually_write=True):
+ tmpdir = tempfile.mkdtemp(prefix="2to3-test_refactor")
+ self.addCleanup(shutil.rmtree, tmpdir)
+ # make a copy of the tested file that we can write to
+ shutil.copy(test_file, tmpdir)
+ test_file = os.path.join(tmpdir, os.path.basename(test_file))
+ os.chmod(test_file, 0o644)
+
+ def read_file():
+ with open(test_file, "rb") as fp:
+ return fp.read()
+
+ old_contents = read_file()
+ rt = self.rt(fixers=fixers, options=options)
+ if mock_log_debug:
+ rt.log_debug = mock_log_debug
+
+ rt.refactor_file(test_file)
+ self.assertEqual(old_contents, read_file())
+
+ if not actually_write:
+ return
+ rt.refactor_file(test_file, True)
+ new_contents = read_file()
+ self.assertNotEqual(old_contents, new_contents)
+ return new_contents
+
+ def test_refactor_file(self):
+ test_file = os.path.join(FIXER_DIR, "parrot_example.py")
+ self.check_file_refactoring(test_file, _DEFAULT_FIXERS)
+
+ def test_refactor_file_write_unchanged_file(self):
+ test_file = os.path.join(FIXER_DIR, "parrot_example.py")
+ debug_messages = []
+ def recording_log_debug(msg, *args):
+ debug_messages.append(msg % args)
+ self.check_file_refactoring(test_file, fixers=(),
+ options={"write_unchanged_files": True},
+ mock_log_debug=recording_log_debug,
+ actually_write=False)
+ # Testing that it logged this message when write=False was passed is
+ # sufficient to see that it did not bail early after "No changes".
+ message_regex = r"Not writing changes to .*%s" % \
+ re.escape(os.sep + os.path.basename(test_file))
+ for message in debug_messages:
+ if "Not writing changes" in message:
+ self.assertRegexpMatches(message, message_regex)
+ break
+ else:
+ self.fail("%r not matched in %r" % (message_regex, debug_messages))
+
+ def test_refactor_dir(self):
+ def check(structure, expected):
+ def mock_refactor_file(self, f, *args):
+ got.append(f)
+ save_func = refactor.RefactoringTool.refactor_file
+ refactor.RefactoringTool.refactor_file = mock_refactor_file
+ rt = self.rt()
+ got = []
+ dir = tempfile.mkdtemp(prefix="2to3-test_refactor")
+ try:
+ os.mkdir(os.path.join(dir, "a_dir"))
+ for fn in structure:
+ open(os.path.join(dir, fn), "wb").close()
+ rt.refactor_dir(dir)
+ finally:
+ refactor.RefactoringTool.refactor_file = save_func
+ shutil.rmtree(dir)
+ self.assertEqual(got,
+ [os.path.join(dir, path) for path in expected])
+ check([], [])
+ tree = ["nothing",
+ "hi.py",
+ ".dumb",
+ ".after.py",
+ "notpy.npy",
+ "sappy"]
+ expected = ["hi.py"]
+ check(tree, expected)
+ tree = ["hi.py",
+ os.path.join("a_dir", "stuff.py")]
+ check(tree, tree)
+
+ def test_file_encoding(self):
+ fn = os.path.join(TEST_DATA_DIR, "different_encoding.py")
+ self.check_file_refactoring(fn)
+
+ def test_false_file_encoding(self):
+ fn = os.path.join(TEST_DATA_DIR, "false_encoding.py")
+ data = self.check_file_refactoring(fn)
+
+ def test_bom(self):
+ fn = os.path.join(TEST_DATA_DIR, "bom.py")
+ data = self.check_file_refactoring(fn)
+ self.assertTrue(data.startswith(codecs.BOM_UTF8))
+
+ def test_crlf_newlines(self):
+ old_sep = os.linesep
+ os.linesep = "\r\n"
+ try:
+ fn = os.path.join(TEST_DATA_DIR, "crlf.py")
+ fixes = refactor.get_fixers_from_package("lib2to3.fixes")
+ self.check_file_refactoring(fn, fixes)
+ finally:
+ os.linesep = old_sep
+
+ def test_refactor_docstring(self):
+ rt = self.rt()
+
+ doc = """
+>>> example()
+42
+"""
+ out = rt.refactor_docstring(doc, "<test>")
+ self.assertEqual(out, doc)
+
+ doc = """
+>>> def parrot():
+... return 43
+"""
+ out = rt.refactor_docstring(doc, "<test>")
+ self.assertNotEqual(out, doc)
+
+ def test_explicit(self):
+ from myfixes.fix_explicit import FixExplicit
+
+ rt = self.rt(fixers=["myfixes.fix_explicit"])
+ self.assertEqual(len(rt.post_order), 0)
+
+ rt = self.rt(explicit=["myfixes.fix_explicit"])
+ for fix in rt.post_order:
+ if isinstance(fix, FixExplicit):
+ break
+ else:
+ self.fail("explicit fixer not loaded")
diff --git a/lib/python2.7/lib2to3/tests/test_util.py b/lib/python2.7/lib2to3/tests/test_util.py
new file mode 100644
index 0000000..2fab8b9
--- /dev/null
+++ b/lib/python2.7/lib2to3/tests/test_util.py
@@ -0,0 +1,594 @@
+""" Test suite for the code in fixer_util """
+
+# Testing imports
+from . import support
+
+# Python imports
+import os.path
+
+# Local imports
+from lib2to3.pytree import Node, Leaf
+from lib2to3 import fixer_util
+from lib2to3.fixer_util import Attr, Name, Call, Comma
+from lib2to3.pgen2 import token
+
+def parse(code, strip_levels=0):
+ # The topmost node is file_input, which we don't care about.
+ # The next-topmost node is a *_stmt node, which we also don't care about
+ tree = support.parse_string(code)
+ for i in range(strip_levels):
+ tree = tree.children[0]
+ tree.parent = None
+ return tree
+
+class MacroTestCase(support.TestCase):
+ def assertStr(self, node, string):
+ if isinstance(node, (tuple, list)):
+ node = Node(fixer_util.syms.simple_stmt, node)
+ self.assertEqual(str(node), string)
+
+
+class Test_is_tuple(support.TestCase):
+ def is_tuple(self, string):
+ return fixer_util.is_tuple(parse(string, strip_levels=2))
+
+ def test_valid(self):
+ self.assertTrue(self.is_tuple("(a, b)"))
+ self.assertTrue(self.is_tuple("(a, (b, c))"))
+ self.assertTrue(self.is_tuple("((a, (b, c)),)"))
+ self.assertTrue(self.is_tuple("(a,)"))
+ self.assertTrue(self.is_tuple("()"))
+
+ def test_invalid(self):
+ self.assertFalse(self.is_tuple("(a)"))
+ self.assertFalse(self.is_tuple("('foo') % (b, c)"))
+
+
+class Test_is_list(support.TestCase):
+ def is_list(self, string):
+ return fixer_util.is_list(parse(string, strip_levels=2))
+
+ def test_valid(self):
+ self.assertTrue(self.is_list("[]"))
+ self.assertTrue(self.is_list("[a]"))
+ self.assertTrue(self.is_list("[a, b]"))
+ self.assertTrue(self.is_list("[a, [b, c]]"))
+ self.assertTrue(self.is_list("[[a, [b, c]],]"))
+
+ def test_invalid(self):
+ self.assertFalse(self.is_list("[]+[]"))
+
+
+class Test_Attr(MacroTestCase):
+ def test(self):
+ call = parse("foo()", strip_levels=2)
+
+ self.assertStr(Attr(Name("a"), Name("b")), "a.b")
+ self.assertStr(Attr(call, Name("b")), "foo().b")
+
+ def test_returns(self):
+ attr = Attr(Name("a"), Name("b"))
+ self.assertEqual(type(attr), list)
+
+
+class Test_Name(MacroTestCase):
+ def test(self):
+ self.assertStr(Name("a"), "a")
+ self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
+ self.assertStr(Name("a", prefix="b"), "ba")
+
+
+class Test_Call(MacroTestCase):
+ def _Call(self, name, args=None, prefix=None):
+ """Help the next test"""
+ children = []
+ if isinstance(args, list):
+ for arg in args:
+ children.append(arg)
+ children.append(Comma())
+ children.pop()
+ return Call(Name(name), children, prefix)
+
+ def test(self):
+ kids = [None,
+ [Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 2),
+ Leaf(token.NUMBER, 3)],
+ [Leaf(token.NUMBER, 1), Leaf(token.NUMBER, 3),
+ Leaf(token.NUMBER, 2), Leaf(token.NUMBER, 4)],
+ [Leaf(token.STRING, "b"), Leaf(token.STRING, "j", prefix=" ")]
+ ]
+ self.assertStr(self._Call("A"), "A()")
+ self.assertStr(self._Call("b", kids[1]), "b(1,2,3)")
+ self.assertStr(self._Call("a.b().c", kids[2]), "a.b().c(1,3,2,4)")
+ self.assertStr(self._Call("d", kids[3], prefix=" "), " d(b, j)")
+
+
+class Test_does_tree_import(support.TestCase):
+ def _find_bind_rec(self, name, node):
+ # Search a tree for a binding -- used to find the starting
+ # point for these tests.
+ c = fixer_util.find_binding(name, node)
+ if c: return c
+ for child in node.children:
+ c = self._find_bind_rec(name, child)
+ if c: return c
+
+ def does_tree_import(self, package, name, string):
+ node = parse(string)
+ # Find the binding of start -- that's what we'll go from
+ node = self._find_bind_rec('start', node)
+ return fixer_util.does_tree_import(package, name, node)
+
+ def try_with(self, string):
+ failing_tests = (("a", "a", "from a import b"),
+ ("a.d", "a", "from a.d import b"),
+ ("d.a", "a", "from d.a import b"),
+ (None, "a", "import b"),
+ (None, "a", "import b, c, d"))
+ for package, name, import_ in failing_tests:
+ n = self.does_tree_import(package, name, import_ + "\n" + string)
+ self.assertFalse(n)
+ n = self.does_tree_import(package, name, string + "\n" + import_)
+ self.assertFalse(n)
+
+ passing_tests = (("a", "a", "from a import a"),
+ ("x", "a", "from x import a"),
+ ("x", "a", "from x import b, c, a, d"),
+ ("x.b", "a", "from x.b import a"),
+ ("x.b", "a", "from x.b import b, c, a, d"),
+ (None, "a", "import a"),
+ (None, "a", "import b, c, a, d"))
+ for package, name, import_ in passing_tests:
+ n = self.does_tree_import(package, name, import_ + "\n" + string)
+ self.assertTrue(n)
+ n = self.does_tree_import(package, name, string + "\n" + import_)
+ self.assertTrue(n)
+
+ def test_in_function(self):
+ self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
+
+class Test_find_binding(support.TestCase):
+ def find_binding(self, name, string, package=None):
+ return fixer_util.find_binding(name, parse(string), package)
+
+ def test_simple_assignment(self):
+ self.assertTrue(self.find_binding("a", "a = b"))
+ self.assertTrue(self.find_binding("a", "a = [b, c, d]"))
+ self.assertTrue(self.find_binding("a", "a = foo()"))
+ self.assertTrue(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
+ self.assertFalse(self.find_binding("a", "foo = a"))
+ self.assertFalse(self.find_binding("a", "foo = (a, b, c)"))
+
+ def test_tuple_assignment(self):
+ self.assertTrue(self.find_binding("a", "(a,) = b"))
+ self.assertTrue(self.find_binding("a", "(a, b, c) = [b, c, d]"))
+ self.assertTrue(self.find_binding("a", "(c, (d, a), b) = foo()"))
+ self.assertTrue(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
+ self.assertFalse(self.find_binding("a", "(foo, b) = (b, a)"))
+ self.assertFalse(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
+
+ def test_list_assignment(self):
+ self.assertTrue(self.find_binding("a", "[a] = b"))
+ self.assertTrue(self.find_binding("a", "[a, b, c] = [b, c, d]"))
+ self.assertTrue(self.find_binding("a", "[c, [d, a], b] = foo()"))
+ self.assertTrue(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
+ self.assertFalse(self.find_binding("a", "[foo, b] = (b, a)"))
+ self.assertFalse(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
+
+ def test_invalid_assignments(self):
+ self.assertFalse(self.find_binding("a", "foo.a = 5"))
+ self.assertFalse(self.find_binding("a", "foo[a] = 5"))
+ self.assertFalse(self.find_binding("a", "foo(a) = 5"))
+ self.assertFalse(self.find_binding("a", "foo(a, b) = 5"))
+
+ def test_simple_import(self):
+ self.assertTrue(self.find_binding("a", "import a"))
+ self.assertTrue(self.find_binding("a", "import b, c, a, d"))
+ self.assertFalse(self.find_binding("a", "import b"))
+ self.assertFalse(self.find_binding("a", "import b, c, d"))
+
+ def test_from_import(self):
+ self.assertTrue(self.find_binding("a", "from x import a"))
+ self.assertTrue(self.find_binding("a", "from a import a"))
+ self.assertTrue(self.find_binding("a", "from x import b, c, a, d"))
+ self.assertTrue(self.find_binding("a", "from x.b import a"))
+ self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d"))
+ self.assertFalse(self.find_binding("a", "from a import b"))
+ self.assertFalse(self.find_binding("a", "from a.d import b"))
+ self.assertFalse(self.find_binding("a", "from d.a import b"))
+
+ def test_import_as(self):
+ self.assertTrue(self.find_binding("a", "import b as a"))
+ self.assertTrue(self.find_binding("a", "import b as a, c, a as f, d"))
+ self.assertFalse(self.find_binding("a", "import a as f"))
+ self.assertFalse(self.find_binding("a", "import b, c as f, d as e"))
+
+ def test_from_import_as(self):
+ self.assertTrue(self.find_binding("a", "from x import b as a"))
+ self.assertTrue(self.find_binding("a", "from x import g as a, d as b"))
+ self.assertTrue(self.find_binding("a", "from x.b import t as a"))
+ self.assertTrue(self.find_binding("a", "from x.b import g as a, d"))
+ self.assertFalse(self.find_binding("a", "from a import b as t"))
+ self.assertFalse(self.find_binding("a", "from a.d import b as t"))
+ self.assertFalse(self.find_binding("a", "from d.a import b as t"))
+
+ def test_simple_import_with_package(self):
+ self.assertTrue(self.find_binding("b", "import b"))
+ self.assertTrue(self.find_binding("b", "import b, c, d"))
+ self.assertFalse(self.find_binding("b", "import b", "b"))
+ self.assertFalse(self.find_binding("b", "import b, c, d", "c"))
+
+ def test_from_import_with_package(self):
+ self.assertTrue(self.find_binding("a", "from x import a", "x"))
+ self.assertTrue(self.find_binding("a", "from a import a", "a"))
+ self.assertTrue(self.find_binding("a", "from x import *", "x"))
+ self.assertTrue(self.find_binding("a", "from x import b, c, a, d", "x"))
+ self.assertTrue(self.find_binding("a", "from x.b import a", "x.b"))
+ self.assertTrue(self.find_binding("a", "from x.b import *", "x.b"))
+ self.assertTrue(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
+ self.assertFalse(self.find_binding("a", "from a import b", "a"))
+ self.assertFalse(self.find_binding("a", "from a.d import b", "a.d"))
+ self.assertFalse(self.find_binding("a", "from d.a import b", "a.d"))
+ self.assertFalse(self.find_binding("a", "from x.y import *", "a.b"))
+
+ def test_import_as_with_package(self):
+ self.assertFalse(self.find_binding("a", "import b.c as a", "b.c"))
+ self.assertFalse(self.find_binding("a", "import a as f", "f"))
+ self.assertFalse(self.find_binding("a", "import a as f", "a"))
+
+ def test_from_import_as_with_package(self):
+ # Because it would take a lot of special-case code in the fixers
+ # to deal with from foo import bar as baz, we'll simply always
+ # fail if there is an "from ... import ... as ..."
+ self.assertFalse(self.find_binding("a", "from x import b as a", "x"))
+ self.assertFalse(self.find_binding("a", "from x import g as a, d as b", "x"))
+ self.assertFalse(self.find_binding("a", "from x.b import t as a", "x.b"))
+ self.assertFalse(self.find_binding("a", "from x.b import g as a, d", "x.b"))
+ self.assertFalse(self.find_binding("a", "from a import b as t", "a"))
+ self.assertFalse(self.find_binding("a", "from a import b as t", "b"))
+ self.assertFalse(self.find_binding("a", "from a import b as t", "t"))
+
+ def test_function_def(self):
+ self.assertTrue(self.find_binding("a", "def a(): pass"))
+ self.assertTrue(self.find_binding("a", "def a(b, c, d): pass"))
+ self.assertTrue(self.find_binding("a", "def a(): b = 7"))
+ self.assertFalse(self.find_binding("a", "def d(b, (c, a), e): pass"))
+ self.assertFalse(self.find_binding("a", "def d(a=7): pass"))
+ self.assertFalse(self.find_binding("a", "def d(a): pass"))
+ self.assertFalse(self.find_binding("a", "def d(): a = 7"))
+
+ s = """
+ def d():
+ def a():
+ pass"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_class_def(self):
+ self.assertTrue(self.find_binding("a", "class a: pass"))
+ self.assertTrue(self.find_binding("a", "class a(): pass"))
+ self.assertTrue(self.find_binding("a", "class a(b): pass"))
+ self.assertTrue(self.find_binding("a", "class a(b, c=8): pass"))
+ self.assertFalse(self.find_binding("a", "class d: pass"))
+ self.assertFalse(self.find_binding("a", "class d(a): pass"))
+ self.assertFalse(self.find_binding("a", "class d(b, a=7): pass"))
+ self.assertFalse(self.find_binding("a", "class d(b, *a): pass"))
+ self.assertFalse(self.find_binding("a", "class d(b, **a): pass"))
+ self.assertFalse(self.find_binding("a", "class d: a = 7"))
+
+ s = """
+ class d():
+ class a():
+ pass"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_for(self):
+ self.assertTrue(self.find_binding("a", "for a in r: pass"))
+ self.assertTrue(self.find_binding("a", "for a, b in r: pass"))
+ self.assertTrue(self.find_binding("a", "for (a, b) in r: pass"))
+ self.assertTrue(self.find_binding("a", "for c, (a,) in r: pass"))
+ self.assertTrue(self.find_binding("a", "for c, (a, b) in r: pass"))
+ self.assertTrue(self.find_binding("a", "for c in r: a = c"))
+ self.assertFalse(self.find_binding("a", "for c in a: pass"))
+
+ def test_for_nested(self):
+ s = """
+ for b in r:
+ for a in b:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for a, c in b:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for (a, c) in b:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for (a,) in b:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for c, (a, d) in b:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for c in b:
+ a = 7"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for c in b:
+ d = a"""
+ self.assertFalse(self.find_binding("a", s))
+
+ s = """
+ for b in r:
+ for c in a:
+ d = 7"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_if(self):
+ self.assertTrue(self.find_binding("a", "if b in r: a = c"))
+ self.assertFalse(self.find_binding("a", "if a in r: d = e"))
+
+ def test_if_nested(self):
+ s = """
+ if b in r:
+ if c in d:
+ a = c"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ if b in r:
+ if c in d:
+ c = a"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_while(self):
+ self.assertTrue(self.find_binding("a", "while b in r: a = c"))
+ self.assertFalse(self.find_binding("a", "while a in r: d = e"))
+
+ def test_while_nested(self):
+ s = """
+ while b in r:
+ while c in d:
+ a = c"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ while b in r:
+ while c in d:
+ c = a"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_try_except(self):
+ s = """
+ try:
+ a = 6
+ except:
+ b = 8"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except KeyError:
+ pass
+ except:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ b = 6"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_try_except_nested(self):
+ s = """
+ try:
+ try:
+ a = 6
+ except:
+ pass
+ except:
+ b = 8"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ try:
+ a = 6
+ except:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ try:
+ pass
+ except:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ try:
+ b = 8
+ except KeyError:
+ pass
+ except:
+ a = 6
+ except:
+ pass"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ pass
+ except:
+ try:
+ b = 8
+ except KeyError:
+ pass
+ except:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ b = 6"""
+ self.assertFalse(self.find_binding("a", s))
+
+ s = """
+ try:
+ try:
+ b = 8
+ except:
+ c = d
+ except:
+ try:
+ b = 6
+ except:
+ t = 8
+ except:
+ o = y"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_try_except_finally(self):
+ s = """
+ try:
+ c = 6
+ except:
+ b = 8
+ finally:
+ a = 9"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ finally:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ finally:
+ b = 6"""
+ self.assertFalse(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ except:
+ b = 9
+ finally:
+ b = 6"""
+ self.assertFalse(self.find_binding("a", s))
+
+ def test_try_except_finally_nested(self):
+ s = """
+ try:
+ c = 6
+ except:
+ b = 8
+ finally:
+ try:
+ a = 9
+ except:
+ b = 9
+ finally:
+ c = 9"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ finally:
+ try:
+ pass
+ finally:
+ a = 6"""
+ self.assertTrue(self.find_binding("a", s))
+
+ s = """
+ try:
+ b = 8
+ finally:
+ try:
+ b = 6
+ finally:
+ b = 7"""
+ self.assertFalse(self.find_binding("a", s))
+
+class Test_touch_import(support.TestCase):
+
+ def test_after_docstring(self):
+ node = parse('"""foo"""\nbar()')
+ fixer_util.touch_import(None, "foo", node)
+ self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
+
+ def test_after_imports(self):
+ node = parse('"""foo"""\nimport bar\nbar()')
+ fixer_util.touch_import(None, "foo", node)
+ self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
+
+ def test_beginning(self):
+ node = parse('bar()')
+ fixer_util.touch_import(None, "foo", node)
+ self.assertEqual(str(node), 'import foo\nbar()\n\n')
+
+ def test_from_import(self):
+ node = parse('bar()')
+ fixer_util.touch_import("html", "escape", node)
+ self.assertEqual(str(node), 'from html import escape\nbar()\n\n')
+
+ def test_name_import(self):
+ node = parse('bar()')
+ fixer_util.touch_import(None, "cgi", node)
+ self.assertEqual(str(node), 'import cgi\nbar()\n\n')
+
+class Test_find_indentation(support.TestCase):
+
+ def test_nothing(self):
+ fi = fixer_util.find_indentation
+ node = parse("node()")
+ self.assertEqual(fi(node), u"")
+ node = parse("")
+ self.assertEqual(fi(node), u"")
+
+ def test_simple(self):
+ fi = fixer_util.find_indentation
+ node = parse("def f():\n x()")
+ self.assertEqual(fi(node), u"")
+ self.assertEqual(fi(node.children[0].children[4].children[2]), u" ")
+ node = parse("def f():\n x()\n y()")
+ self.assertEqual(fi(node.children[0].children[4].children[4]), u" ")