Change js and py jsbeautifier for record as START_BLOCK token

This commit is contained in:
Antonius Anggito Arissaputro 2022-11-25 02:54:44 +07:00
parent 5cedf34305
commit 7c05020785
4 changed files with 29 additions and 9 deletions

View File

@ -720,9 +720,6 @@ Beautifier.prototype.handle_start_block = function(current_token) {
}
}
// Issue #2062 check to see if defining a new Record type - #{}
// Conditional on line 774 uses this variable
var is_record = this._flags.last_word === '#';
var empty_braces = !next_token.comments_before && next_token.text === '}';
var empty_anonymous_function = empty_braces && this._flags.last_word === 'function' &&
this._flags.last_token.type === TOKEN.END_EXPR;
@ -771,7 +768,7 @@ Beautifier.prototype.handle_start_block = function(current_token) {
if (in_array(this._flags.last_token.type, [TOKEN.START_BLOCK, TOKEN.SEMICOLON]) && !this._flags.inline_frame) {
this.print_newline();
} else {
this._output.space_before_token = !is_record;
this._output.space_before_token = true;
}
}
}

View File

@ -167,6 +167,7 @@ Tokenizer.prototype._get_next_token = function(previous_token, open_token) { //
token = token || this._read_non_javascript(c);
token = token || this._read_string(c);
token = token || this._read_pair(c, this._input.peek(1)); // Issue #2062 hack for record type '#{'
token = token || this._read_word(previous_token);
token = token || this._read_singles(c);
token = token || this._read_comment(c);
@ -225,6 +226,19 @@ Tokenizer.prototype._read_singles = function(c) {
return token;
};
Tokenizer.prototype._read_pair = function(c, d) {
var token = null;
if (c === '#' && d === '{') {
token = this._create_token(TOKEN.START_BLOCK, c+d);
}
if (token) {
this._input.next();
this._input.next();
}
return token;
};
Tokenizer.prototype._read_punctuation = function() {
var resulting_string = this.__patterns.punct.read();

View File

@ -766,10 +766,6 @@ class Beautifier:
if reserved_array(self._flags.last_token.previous, ["class", "extends"]):
self._flags.class_start_block = True
# Issue #2062 check to see if defining a new Record type - #{}
# Conditional on line 843 uses this variable
is_record = self._flags.last_token.text == "#"
empty_braces = (
(next_token is not None)
and next_token.comments_before is None
@ -840,7 +836,7 @@ class Beautifier:
):
self.print_newline()
else:
self._output.space_before_token = not is_record
self._output.space_before_token = True
self.print_token(current_token)
self.indent()

View File

@ -220,6 +220,7 @@ class Tokenizer(BaseTokenizer):
token = token or self._read_non_javascript(c)
token = token or self._read_string(c)
token = token or self._read_pair(c, self._input.peek(1)) # Issue #2062 hack for record type '#{'
token = token or self._read_word(previous_token)
token = token or self._read_singles(c)
token = token or self._read_comment(c)
@ -257,6 +258,18 @@ class Tokenizer(BaseTokenizer):
return token
def _read_pair(self, c, d):
token = None
if c == "#" and d == "{":
token = self._create_token(TOKEN.START_BLOCK, c+d)
if token is not None:
self._input.next()
self._input.next()
return token
def _read_word(self, previous_token):
resulting_string = self._patterns.identifier.read()