mirror of
https://github.com/rocky/python-uncompyle6.git
synced 2024-11-26 22:50:40 +00:00
Merge branch 'master' of github.com:rocky/python-uncompyle6
This commit is contained in:
commit
9a14db567b
@ -1,7 +1,5 @@
|
||||
language: python
|
||||
|
||||
sudo: false
|
||||
|
||||
python:
|
||||
- '3.5'
|
||||
- '2.7'
|
||||
@ -9,6 +7,11 @@ python:
|
||||
- '3.4'
|
||||
- '3.6'
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- python: '3.7'
|
||||
dist: xenial # required for Python >= 3.7 (travis-ci/travis-ci#9069)
|
||||
|
||||
install:
|
||||
- pip install -e .
|
||||
- pip install -r requirements-dev.txt
|
||||
|
@ -115,7 +115,7 @@ if PYTHON_VERSION > 2.6:
|
||||
return "f{}'{}'".format('r' if is_raw else '', content)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@pytest.mark.skipif(PYTHON_VERSION != 3.6, reason='need Python 3.6')
|
||||
@hypothesis.given(format_specifiers())
|
||||
def test_format_specifiers(format_specifier):
|
||||
"""Verify that format_specifiers generates valid specifiers"""
|
||||
@ -137,14 +137,14 @@ if PYTHON_VERSION > 2.6:
|
||||
assert 'dis(' + deparsed.text.strip('\n') + ')' == 'dis(' + expr.strip('\n') + ')'
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@pytest.mark.skipif(PYTHON_VERSION != 3.6, reason='need Python 3.6')
|
||||
@hypothesis.given(fstrings())
|
||||
def test_uncompyle_fstring(fstring):
|
||||
"""Verify uncompyling fstring bytecode"""
|
||||
run_test(fstring)
|
||||
|
||||
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need at least python 3.6')
|
||||
@pytest.mark.skipif(PYTHON_VERSION < 3.6, reason='need Python 3.6+')
|
||||
@pytest.mark.parametrize('fstring', [
|
||||
"f'{abc}{abc!s}'",
|
||||
"f'{abc}0'",
|
||||
|
@ -7,7 +7,7 @@ def test_grammar():
|
||||
|
||||
def check_tokens(tokens, opcode_set):
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
remain_tokens = set([re.sub('_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub(r'_\d+$','', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$','', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
assert remain_tokens == set([]), \
|
||||
|
@ -91,8 +91,9 @@ check-bytecode-2:
|
||||
#: Check deparsing bytecode 3.x only
|
||||
check-bytecode-3:
|
||||
$(PYTHON) test_pythonlib.py --bytecode-3.0 \
|
||||
--bytecode-3.1 --bytecode-3.2 --bytecode-3.3 \
|
||||
--bytecode-3.4 --bytecode-3.5 --bytecode-3.6 --bytecode-pypy3.2
|
||||
--bytecode-3.1 --bytecode-3.2 --bytecode-3.3 \
|
||||
--bytecode-3.4 --bytecode-3.5 --bytecode-3.6 --bytecode-3.7 \
|
||||
--bytecode-pypy3.2
|
||||
|
||||
#: Check deparsing on selected bytecode 3.x
|
||||
check-bytecode-3-short:
|
||||
|
10
test/simple_source/bug24/03_iftrue.py
Normal file
10
test/simple_source/bug24/03_iftrue.py
Normal file
@ -0,0 +1,10 @@
|
||||
# Python 2.4 (and before?) bug in handling unconditional "else if true"
|
||||
# Doesn't occur in Python > 2.4
|
||||
# From Issue #187
|
||||
def unconditional_if_true_24(foo):
|
||||
if not foo:
|
||||
pass
|
||||
elif 1:
|
||||
pass
|
||||
else:
|
||||
return None
|
22
test/simple_source/bug27+/06_raise.py
Normal file
22
test/simple_source/bug27+/06_raise.py
Normal file
@ -0,0 +1,22 @@
|
||||
# Bug in Python 2.7 is code creating a (useless) JUMP_ABSOLUTE to the instruction right after
|
||||
# the "raise" which causes the
|
||||
|
||||
# RUNNABLE!
|
||||
def testit(a, b):
|
||||
if a:
|
||||
if not b:
|
||||
raise AssertionError("test JUMP_ABSOLUTE to next instruction")
|
||||
|
||||
def testit2(a, b):
|
||||
if a:
|
||||
if not b:
|
||||
raise AssertionError("test with dead code after raise")
|
||||
x = 10
|
||||
|
||||
testit(False, True)
|
||||
testit(False, False)
|
||||
testit(True, True)
|
||||
|
||||
testit2(False, True)
|
||||
testit2(False, False)
|
||||
testit2(True, True)
|
@ -4,6 +4,7 @@
|
||||
# try. In 3.x we not distinguising this jump out of the loop with a jump to the
|
||||
# end of the "try".
|
||||
|
||||
# RUNNABLE!
|
||||
def testit(stmts):
|
||||
|
||||
# Bug was confusing When the except jumps back to the beginning of the block
|
||||
|
8
test/simple_source/bug36/03_if_try.py
Normal file
8
test/simple_source/bug36/03_if_try.py
Normal file
@ -0,0 +1,8 @@
|
||||
# The bug in python 3.6+ was in parsing that we
|
||||
# add END_IF_THEN and using that inside "return results"
|
||||
def whcms_license_info(md5hash, datahash, results):
|
||||
if md5hash == datahash:
|
||||
try:
|
||||
return md5hash
|
||||
except:
|
||||
return results
|
@ -315,10 +315,19 @@ def main(in_base, out_base, files, codes, outfile=None,
|
||||
sys.stdout.write("%s\r" %
|
||||
status_msg(do_verify, tot_files, okay_files, failed_files,
|
||||
verify_failed_files, do_verify))
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
# FIXME: Something is weird with Pypy here
|
||||
sys.stdout.flush()
|
||||
except:
|
||||
pass
|
||||
if current_outfile:
|
||||
sys.stdout.write("\n")
|
||||
sys.stdout.flush()
|
||||
try:
|
||||
# FIXME: Something is weird with Pypy here
|
||||
sys.stdout.flush()
|
||||
except:
|
||||
pass
|
||||
pass
|
||||
return (tot_files, okay_files, failed_files, verify_failed_files)
|
||||
|
||||
|
||||
|
@ -533,14 +533,11 @@ class Python2Parser(PythonParser):
|
||||
# Dead code testing...
|
||||
# if lhs == 'while1elsestmt':
|
||||
# from trepan.api import debug; debug()
|
||||
|
||||
if lhs in ('aug_assign1', 'aug_assign2') and ast[0] and ast[0][0] in ('and', 'or'):
|
||||
return True
|
||||
elif lhs in ('raise_stmt1',):
|
||||
# We will assme 'LOAD_ASSERT' will be handled by an assert grammar rule
|
||||
return (tokens[first] == 'LOAD_ASSERT' and
|
||||
(last >= len(tokens) or tokens[last] not in
|
||||
('COME_FROM', 'JUMP_BACK','JUMP_FORWARD')))
|
||||
# We will assume 'LOAD_ASSERT' will be handled by an assert grammar rule
|
||||
return (tokens[first] == 'LOAD_ASSERT' and (last >= len(tokens)))
|
||||
elif rule == ('or', ('expr', 'jmp_true', 'expr', '\\e_come_from_opt')):
|
||||
expr2 = ast[2]
|
||||
return expr2 == 'expr' and expr2[0] == 'LOAD_ASSERT'
|
||||
|
@ -1,9 +1,10 @@
|
||||
# Copyright (c) 2016-2018 Rocky Bernstein
|
||||
# Copyright (c) 2016-2019 Rocky Bernstein
|
||||
# Copyright (c) 2005 by Dan Pascu <dan@windowmaker.org>
|
||||
# Copyright (c) 2000-2002 by hartmut Goebel <hartmut@goebel.noris.de>
|
||||
|
||||
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
|
||||
from uncompyle6.parser import PythonParserSingle
|
||||
from xdis import next_offset
|
||||
from uncompyle6.parser import PythonParserSingle, nop_func
|
||||
from uncompyle6.parsers.parse2 import Python2Parser
|
||||
|
||||
class Python27Parser(Python2Parser):
|
||||
@ -196,6 +197,13 @@ class Python27Parser(Python2Parser):
|
||||
POP_BLOCK LOAD_CONST COME_FROM suite_stmts_opt
|
||||
END_FINALLY
|
||||
""")
|
||||
if 'PyPy' in customize:
|
||||
# PyPy-specific customizations
|
||||
self.addRule("""
|
||||
return_if_stmt ::= ret_expr RETURN_END_IF come_froms
|
||||
""", nop_func)
|
||||
|
||||
|
||||
super(Python27Parser, self).customize_grammar_rules(tokens, customize)
|
||||
self.check_reduce['and'] = 'AST'
|
||||
# self.check_reduce['or'] = 'AST'
|
||||
@ -220,6 +228,12 @@ class Python27Parser(Python2Parser):
|
||||
tokens[last].pattr == jmp_false.pattr)
|
||||
elif rule[0] == ('raise_stmt1'):
|
||||
return ast[0] == 'expr' and ast[0][0] == 'or'
|
||||
elif rule[0] in ('assert', 'assert2'):
|
||||
jump_inst = ast[1][0]
|
||||
jump_target = jump_inst.attr
|
||||
return not (last >= len(tokens)
|
||||
or jump_target == tokens[last].offset
|
||||
or jump_target == next_offset(ast[-1].op, ast[-1].opc, ast[-1].offset))
|
||||
elif rule == ('list_if_not', ('expr', 'jmp_true', 'list_iter')):
|
||||
jump_inst = ast[1][0]
|
||||
jump_offset = jump_inst.attr
|
||||
|
@ -84,7 +84,7 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
|
@ -267,7 +267,7 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
|
@ -366,7 +366,7 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
|
@ -115,7 +115,7 @@ if __name__ == '__main__':
|
||||
""".split()))
|
||||
remain_tokens = set(tokens) - opcode_set
|
||||
import re
|
||||
remain_tokens = set([re.sub('_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
|
||||
remain_tokens = set(remain_tokens) - opcode_set
|
||||
print(remain_tokens)
|
||||
|
@ -107,7 +107,7 @@ class Token():
|
||||
pattr = self.opc.cmp_op[self.attr]
|
||||
# And so on. See xdis/bytecode.py get_instructions_bytes
|
||||
pass
|
||||
elif re.search('_\d+$', self.kind):
|
||||
elif re.search(r'_\d+$', self.kind):
|
||||
return "%s%s%s" % (prefix, offset_opname, argstr)
|
||||
else:
|
||||
pattr = ''
|
||||
|
Loading…
Reference in New Issue
Block a user