More cleanup

This commit is contained in:
Liam Newman 2018-07-26 14:46:48 -07:00
parent ebc310f470
commit d75357aa31
8 changed files with 1726 additions and 4104 deletions

View File

@ -26,8 +26,8 @@
SOFTWARE.
*/
function InputScanner(input) {
var _input = input || '';
function InputScanner(input_string) {
var _input = input_string || '';
var _input_length = _input.length;
var _position = 0;

View File

@ -55,9 +55,9 @@ function OutputLine(parent) {
}
};
this.push = function(input) {
this._items.push(input);
this._character_count += input.length;
this.push = function(item) {
this._items.push(item);
this._character_count += item.length;
_empty = false;
};

5741
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@
"editorconfig": "^0.15.0",
"mkdirp": "~0.5.0",
"nopt": "~4.0.1",
"npm": "^6.1.0"
"npm": "^6.2.0"
},
"devDependencies": {
"benchmark": "^2.1.4",
@ -58,7 +58,7 @@
"mustache": "~2.3.0",
"node-static": "^0.7.10",
"requirejs": "^2.3.3",
"webpack": "^4.15.1",
"webpack": "^4.16.2",
"webpack-command": "^0.4.1"
}
}

View File

@ -24,10 +24,10 @@
class InputScanner:
def __init__(self, input):
if input is None:
input = ''
self.__input = input
def __init__(self, input_string):
if input_string is None:
input_string = ''
self.__input = input_string
self.__input_length = len(self.__input)
self.__position = 0

View File

@ -35,7 +35,7 @@ def mergeOpts(options, childFieldName):
finalOpts = copy.copy(options)
local = getattr(finalOpts, childFieldName, None)
if (local):
if local:
delattr(finalOpts, childFieldName)
for key in local:
setattr(finalOpts, key, local[key])

View File

@ -51,12 +51,12 @@ class OutputLine:
def last(self):
if not self.is_empty():
return self.__items[-1]
else:
return None
def push(self, input):
self.__items.append(input)
self.__character_count += len(input)
return None
def push(self, item):
self.__items.append(item)
self.__character_count += len(item)
self.__empty = False
def pop(self):

View File

@ -29,20 +29,20 @@ from ..core.token import Token
class TokenTypes:
START_EXPR = 'TK_START_EXPR'
END_EXPR = 'TK_END_EXPR',
START_BLOCK = 'TK_START_BLOCK',
END_BLOCK = 'TK_END_BLOCK',
WORD = 'TK_WORD',
RESERVED = 'TK_RESERVED',
SEMICOLON = 'TK_SEMICOLON',
STRING = 'TK_STRING',
EQUALS = 'TK_EQUALS',
OPERATOR = 'TK_OPERATOR',
COMMA = 'TK_COMMA',
BLOCK_COMMENT = 'TK_BLOCK_COMMENT',
COMMENT = 'TK_COMMENT',
DOT = 'TK_DOT',
UNKNOWN = 'TK_UNKNOWN',
END_EXPR = 'TK_END_EXPR'
START_BLOCK = 'TK_START_BLOCK'
END_BLOCK = 'TK_END_BLOCK'
WORD = 'TK_WORD'
RESERVED = 'TK_RESERVED'
SEMICOLON = 'TK_SEMICOLON'
STRING = 'TK_STRING'
EQUALS = 'TK_EQUALS'
OPERATOR = 'TK_OPERATOR'
COMMA = 'TK_COMMA'
BLOCK_COMMENT = 'TK_BLOCK_COMMENT'
COMMENT = 'TK_COMMENT'
DOT = 'TK_DOT'
UNKNOWN = 'TK_UNKNOWN'
EOF = 'TK_EOF'
def __init__(self):
@ -156,10 +156,10 @@ class Tokenizer:
open_stack.append(open_token)
open_token = next
elif (next.type == TOKEN.END_BLOCK or next.type == TOKEN.END_EXPR) and \
(open_token is not None and (
(next.text == ']' and open_token.text == '[') or
(next.text == ')' and open_token.text == '(') or
(next.text == '}' and open_token.text == '{'))):
(open_token is not None and (
(next.text == ']' and open_token.text == '[') or
(next.text == ')' and open_token.text == '(') or
(next.text == '}' and open_token.text == '{'))):
next.parent = open_token.parent
next.opened = open_token
open_token = open_stack.pop()
@ -194,7 +194,7 @@ class Tokenizer:
last_token = Token(TOKEN.START_BLOCK, '{')
resulting_string = self.input.readWhile(self.whitespacePattern)
if not resulting_string == '':
if resulting_string != '':
if resulting_string == ' ':
self.whitespace_before_token = resulting_string
else:
@ -206,13 +206,12 @@ class Tokenizer:
break
resulting_string = self.input.readWhile(self.acorn.identifier)
if not resulting_string == '':
if not (
last_token.type == TOKEN.DOT or (
if resulting_string != '':
if not (last_token.type == TOKEN.DOT or (
last_token.type == TOKEN.RESERVED and last_token.text in [
'set',
'get'])) and resulting_string in self.reserved_words:
if resulting_string == 'in' or resulting_string == 'of': # in and of are operators, need to hack
if resulting_string in ['in', 'of']: # in and of are operators, need to hack
return resulting_string, TOKEN.OPERATOR
return resulting_string, TOKEN.RESERVED
@ -220,7 +219,7 @@ class Tokenizer:
return resulting_string, TOKEN.WORD
resulting_string = self.input.readWhile(self.number_pattern)
if not resulting_string == '':
if resulting_string != '':
return resulting_string, TOKEN.WORD
c = self.input.next()
@ -294,11 +293,9 @@ class Tokenizer:
esc = False
while self.input.hasNext():
current_char = self.input.peek()
if not (
esc or (
current_char != delimiter and (
allow_unescaped_newlines or not bool(
self.acorn.newline.match(current_char))))):
if not (esc or (current_char != delimiter and (
allow_unescaped_newlines or not bool(
self.acorn.newline.match(current_char))))):
break
# Handle \r\n linebreaks after escapes or in template