mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-25 03:49:42 +00:00
Bug 1887638 - [devtools] Add InspectorCSSParser. r=emilio,devtools-reviewers,frontend-codestyle-reviewers,bomsy.
This new InspectorCSSParser makes use of the cssparser crate so DevTools end up using the same code as the CSS engine. At the moment, we can't get the token start and end offsets, so we create a JS wrapper class to compute them in JS. This might be removed if we get a way to retrieve utf16 position from the cssparser. The existing lexer xpcshell test is modified so it can run against both js-based and rust-based lexers. Differential Revision: https://phabricator.services.mozilla.com/D202909
This commit is contained in:
parent
d238ca721d
commit
f6bb5f8a6c
@ -1513,10 +1513,41 @@ Scanner.prototype = {
|
||||
* Create and return a new CSS lexer.
|
||||
*
|
||||
* @param {String} input the CSS text to lex
|
||||
* @param {Boolean} useInspectorCSSParser Set to true to use InspectorCSSParser.
|
||||
* @return {CSSLexer} the new lexer
|
||||
*/
|
||||
function getCSSLexer(input) {
|
||||
function getCSSLexer(input, useInspectorCSSParser = false) {
|
||||
if (useInspectorCSSParser) {
|
||||
return new InspectorCSSParserWrapper(input);
|
||||
}
|
||||
return new Scanner(input);
|
||||
}
|
||||
|
||||
exports.getCSSLexer = getCSSLexer;
|
||||
|
||||
/**
|
||||
* Wrapper around InspectorCSSParser.
|
||||
* Once/if https://github.com/servo/rust-cssparser/pull/374 lands, we can remove this class.
|
||||
*/
|
||||
class InspectorCSSParserWrapper {
|
||||
#offset = 0;
|
||||
constructor(input) {
|
||||
this.parser = new InspectorCSSParser(input);
|
||||
}
|
||||
|
||||
nextToken() {
|
||||
const token = this.parser.nextToken();
|
||||
if (!token) {
|
||||
return token;
|
||||
}
|
||||
|
||||
// At the moment, InspectorCSSParser doesn't expose offsets, so we need to compute
|
||||
// them manually here.
|
||||
// We can do that because we are retrieving every token in the input string, and so the
|
||||
// end offset of the last token is the start offset of the new token.
|
||||
token.startOffset = this.#offset;
|
||||
this.#offset += token.text.length;
|
||||
token.endOffset = this.#offset;
|
||||
return token;
|
||||
}
|
||||
}
|
||||
|
@ -123,6 +123,7 @@ function Sandbox(options) {
|
||||
"FileReader",
|
||||
"FormData",
|
||||
"Headers",
|
||||
"InspectorCSSParser",
|
||||
"InspectorUtils",
|
||||
"MIDIInputMap",
|
||||
"MIDIOutputMap",
|
||||
|
@ -7,197 +7,270 @@
|
||||
|
||||
const jsLexer = require("resource://devtools/shared/css/lexer.js");
|
||||
|
||||
function test_lexer(cssText, tokenTypes) {
|
||||
const lexer = jsLexer.getCSSLexer(cssText);
|
||||
let reconstructed = "";
|
||||
let lastTokenEnd = 0;
|
||||
let i = 0;
|
||||
while (true) {
|
||||
const token = lexer.nextToken();
|
||||
if (!token) {
|
||||
break;
|
||||
}
|
||||
let combined = token.tokenType;
|
||||
if (token.text) {
|
||||
combined += ":" + token.text;
|
||||
}
|
||||
equal(combined, tokenTypes[i]);
|
||||
Assert.greater(token.endOffset, token.startOffset);
|
||||
equal(token.startOffset, lastTokenEnd);
|
||||
lastTokenEnd = token.endOffset;
|
||||
reconstructed += cssText.substring(token.startOffset, token.endOffset);
|
||||
++i;
|
||||
}
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, tokenTypes.length);
|
||||
// Ensure that the reported offsets cover all the text.
|
||||
equal(reconstructed, cssText);
|
||||
}
|
||||
|
||||
var LEX_TESTS = [
|
||||
["simple", ["ident:simple"]],
|
||||
[
|
||||
"simple: { hi; }",
|
||||
add_task(function test_lexer() {
|
||||
const LEX_TESTS = [
|
||||
["simple", ["ident:simple"], ["Ident:simple"]],
|
||||
[
|
||||
"ident:simple",
|
||||
"symbol::",
|
||||
"whitespace",
|
||||
"symbol:{",
|
||||
"whitespace",
|
||||
"ident:hi",
|
||||
"symbol:;",
|
||||
"whitespace",
|
||||
"symbol:}",
|
||||
"simple: { hi; }",
|
||||
[
|
||||
"ident:simple",
|
||||
"symbol::",
|
||||
"whitespace",
|
||||
"symbol:{",
|
||||
"whitespace",
|
||||
"ident:hi",
|
||||
"symbol:;",
|
||||
"whitespace",
|
||||
"symbol:}",
|
||||
],
|
||||
[
|
||||
"Ident:simple",
|
||||
"Colon::",
|
||||
"WhiteSpace: ",
|
||||
"CurlyBracketBlock:{",
|
||||
"WhiteSpace: ",
|
||||
"Ident:hi",
|
||||
"Semicolon:;",
|
||||
"WhiteSpace: ",
|
||||
"CloseCurlyBracket:}",
|
||||
],
|
||||
],
|
||||
],
|
||||
["/* whatever */", ["comment"]],
|
||||
["'string'", ["string:string"]],
|
||||
['"string"', ["string:string"]],
|
||||
[
|
||||
"rgb(1,2,3)",
|
||||
["/* whatever */", ["comment"], ["Comment:/* whatever */"]],
|
||||
["'string'", ["string:string"], ["QuotedString:'string'"]],
|
||||
['"string"', ["string:string"], [`QuotedString:"string"`]],
|
||||
[
|
||||
"function:rgb",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:)",
|
||||
"rgb(1,2,3)",
|
||||
[
|
||||
"function:rgb",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:,",
|
||||
"number",
|
||||
"symbol:)",
|
||||
],
|
||||
[
|
||||
"Function:rgb(",
|
||||
"Number:1",
|
||||
"Comma:,",
|
||||
"Number:2",
|
||||
"Comma:,",
|
||||
"Number:3",
|
||||
"CloseParenthesis:)",
|
||||
],
|
||||
],
|
||||
],
|
||||
["@media", ["at:media"]],
|
||||
["#hibob", ["id:hibob"]],
|
||||
["#123", ["hash:123"]],
|
||||
["23px", ["dimension:px"]],
|
||||
["23%", ["percentage"]],
|
||||
["url(http://example.com)", ["url:http://example.com"]],
|
||||
["url('http://example.com')", ["url:http://example.com"]],
|
||||
["url( 'http://example.com' )", ["url:http://example.com"]],
|
||||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||
["url(http://example.com", ["url:http://example.com"]],
|
||||
["url(http://example.com @", ["bad_url:http://example.com"]],
|
||||
["quo\\ting", ["ident:quoting"]],
|
||||
["'bad string\n", ["bad_string:bad string", "whitespace"]],
|
||||
["~=", ["includes"]],
|
||||
["|=", ["dashmatch"]],
|
||||
["^=", ["beginsmatch"]],
|
||||
["$=", ["endsmatch"]],
|
||||
["*=", ["containsmatch"]],
|
||||
|
||||
// URANGE may be on the way out, and it isn't used by devutils, so
|
||||
// let's skip it.
|
||||
|
||||
[
|
||||
"<!-- html comment -->",
|
||||
["@media", ["at:media"], ["AtKeyword:@media"]],
|
||||
["#hibob", ["id:hibob"], ["IDHash:#hibob"]],
|
||||
["#123", ["hash:123"], ["Hash:#123"]],
|
||||
["23px", ["dimension:px"], ["Dimension:23px"]],
|
||||
["23%", ["percentage"], ["Percentage:23%"]],
|
||||
[
|
||||
"htmlcomment",
|
||||
"whitespace",
|
||||
"ident:html",
|
||||
"whitespace",
|
||||
"ident:comment",
|
||||
"whitespace",
|
||||
"htmlcomment",
|
||||
"url(http://example.com)",
|
||||
["url:http://example.com"],
|
||||
["UnquotedUrl:url(http://example.com)"],
|
||||
],
|
||||
],
|
||||
[
|
||||
"url('http://example.com')",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
"Function:url(",
|
||||
"QuotedString:'http://example.com'",
|
||||
"CloseParenthesis:)",
|
||||
],
|
||||
],
|
||||
[
|
||||
"url( 'http://example.com' )",
|
||||
["url:http://example.com"],
|
||||
[
|
||||
"Function:url(",
|
||||
"WhiteSpace: ",
|
||||
"QuotedString:'http://example.com'",
|
||||
"WhiteSpace: ",
|
||||
"CloseParenthesis:)",
|
||||
],
|
||||
],
|
||||
// In CSS Level 3, this is an ordinary URL, not a BAD_URL.
|
||||
[
|
||||
"url(http://example.com",
|
||||
["url:http://example.com"],
|
||||
["UnquotedUrl:url(http://example.com"],
|
||||
],
|
||||
[
|
||||
"url(http://example.com @",
|
||||
["bad_url:http://example.com"],
|
||||
["BadUrl:url(http://example.com @"],
|
||||
],
|
||||
["quo\\ting", ["ident:quoting"], ["Ident:quo\\ting"]],
|
||||
[
|
||||
"'bad string\n",
|
||||
["bad_string:bad string", "whitespace"],
|
||||
["BadString:'bad string", "WhiteSpace:\n"],
|
||||
],
|
||||
["~=", ["includes"], ["IncludeMatch:~="]],
|
||||
["|=", ["dashmatch"], ["DashMatch:|="]],
|
||||
["^=", ["beginsmatch"], ["PrefixMatch:^="]],
|
||||
["$=", ["endsmatch"], ["SuffixMatch:$="]],
|
||||
["*=", ["containsmatch"], ["SubstringMatch:*="]],
|
||||
|
||||
// earlier versions of CSS had "bad comment" tokens, but in level 3,
|
||||
// unterminated comments are just comments.
|
||||
["/* bad comment", ["comment"]],
|
||||
];
|
||||
[
|
||||
"<!-- html comment -->",
|
||||
[
|
||||
"htmlcomment",
|
||||
"whitespace",
|
||||
"ident:html",
|
||||
"whitespace",
|
||||
"ident:comment",
|
||||
"whitespace",
|
||||
"htmlcomment",
|
||||
],
|
||||
[
|
||||
"CDO:<!--",
|
||||
"WhiteSpace: ",
|
||||
"Ident:html",
|
||||
"WhiteSpace: ",
|
||||
"Ident:comment",
|
||||
"WhiteSpace: ",
|
||||
"CDC:-->",
|
||||
],
|
||||
],
|
||||
|
||||
function test_lexer_linecol(cssText, locations) {
|
||||
const lexer = jsLexer.getCSSLexer(cssText);
|
||||
let i = 0;
|
||||
while (true) {
|
||||
const token = lexer.nextToken();
|
||||
const startLine = lexer.lineNumber;
|
||||
const startColumn = lexer.columnNumber;
|
||||
// earlier versions of CSS had "bad comment" tokens, but in level 3,
|
||||
// unterminated comments are just comments.
|
||||
["/* bad comment", ["comment"], ["Comment:/* bad comment"]],
|
||||
];
|
||||
|
||||
// We do this in a bit of a funny way so that we can also test the
|
||||
// location of the EOF.
|
||||
let combined = ":" + startLine + ":" + startColumn;
|
||||
if (token) {
|
||||
combined = token.tokenType + combined;
|
||||
const test = (cssText, useInspectorCSSParser, tokenTypes) => {
|
||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
||||
let reconstructed = "";
|
||||
let lastTokenEnd = 0;
|
||||
let i = 0;
|
||||
let token;
|
||||
while ((token = lexer.nextToken())) {
|
||||
let combined = token.tokenType;
|
||||
if (token.text) {
|
||||
combined += ":" + token.text;
|
||||
}
|
||||
equal(combined, tokenTypes[i]);
|
||||
Assert.greater(token.endOffset, token.startOffset);
|
||||
equal(token.startOffset, lastTokenEnd);
|
||||
lastTokenEnd = token.endOffset;
|
||||
reconstructed += cssText.substring(token.startOffset, token.endOffset);
|
||||
++i;
|
||||
}
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, tokenTypes.length);
|
||||
// Ensure that the reported offsets cover all the text.
|
||||
equal(reconstructed, cssText);
|
||||
};
|
||||
|
||||
equal(combined, locations[i]);
|
||||
++i;
|
||||
for (const [cssText, jsTokenTypes, rustTokenTypes] of LEX_TESTS) {
|
||||
info(`Test "${cssText}" with js-based lexer`);
|
||||
test(cssText, false, jsTokenTypes);
|
||||
|
||||
if (!token) {
|
||||
break;
|
||||
info(`Test "${cssText}" with rust-based lexer`);
|
||||
test(cssText, true, rustTokenTypes);
|
||||
}
|
||||
});
|
||||
|
||||
add_task(function test_lexer_linecol() {
|
||||
const LINECOL_TESTS = [
|
||||
["simple", ["ident:0:0", ":0:6"], ["Ident:0:0", ":0:6"]],
|
||||
[
|
||||
"\n stuff",
|
||||
["whitespace:0:0", "ident:1:4", ":1:9"],
|
||||
["WhiteSpace:0:0", "Ident:1:4", ":1:9"],
|
||||
],
|
||||
[
|
||||
'"string with \\\nnewline" \r\n',
|
||||
["string:0:0", "whitespace:1:8", ":2:0"],
|
||||
["QuotedString:0:0", "WhiteSpace:1:8", ":2:0"],
|
||||
],
|
||||
];
|
||||
|
||||
const test = (cssText, useInspectorCSSParser, locations) => {
|
||||
const lexer = jsLexer.getCSSLexer(cssText, useInspectorCSSParser);
|
||||
let i = 0;
|
||||
let token;
|
||||
const testLocation = () => {
|
||||
const startLine = useInspectorCSSParser
|
||||
? lexer.parser.lineNumber
|
||||
: lexer.lineNumber;
|
||||
const startColumn = useInspectorCSSParser
|
||||
? lexer.parser.columnNumber
|
||||
: lexer.columnNumber;
|
||||
|
||||
// We do this in a bit of a funny way so that we can also test the
|
||||
// location of the EOF.
|
||||
let combined = ":" + startLine + ":" + startColumn;
|
||||
if (token) {
|
||||
combined = token.tokenType + combined;
|
||||
}
|
||||
|
||||
equal(combined, locations[i]);
|
||||
++i;
|
||||
};
|
||||
while ((token = lexer.nextToken())) {
|
||||
testLocation();
|
||||
}
|
||||
// Collect location after we consumed all the tokens
|
||||
testLocation();
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, locations.length);
|
||||
};
|
||||
|
||||
for (const [cssText, jsLocations, rustLocations] of LINECOL_TESTS) {
|
||||
info(`Test "${cssText}" with js-based lexer`);
|
||||
test(cssText, false, jsLocations);
|
||||
|
||||
info(`Test "${cssText}" with rust-based lexer`);
|
||||
test(cssText, true, rustLocations);
|
||||
}
|
||||
// Ensure that we saw the correct number of tokens.
|
||||
equal(i, locations.length);
|
||||
}
|
||||
});
|
||||
|
||||
function test_lexer_eofchar(
|
||||
cssText,
|
||||
argText,
|
||||
expectedAppend,
|
||||
expectedNoAppend
|
||||
) {
|
||||
const lexer = jsLexer.getCSSLexer(cssText);
|
||||
while (lexer.nextToken()) {
|
||||
// Nothing.
|
||||
}
|
||||
add_task(function test_lexer_eofchar() {
|
||||
const EOFCHAR_TESTS = [
|
||||
["hello", "hello"],
|
||||
["hello \\", "hello \\\\", "hello \\\uFFFD"],
|
||||
["'hello", "'hello'"],
|
||||
['"hello', '"hello"'],
|
||||
["'hello\\", "'hello\\\\'", "'hello'"],
|
||||
['"hello\\', '"hello\\\\"', '"hello"'],
|
||||
["/*hello", "/*hello*/"],
|
||||
["/*hello*", "/*hello*/"],
|
||||
["/*hello\\", "/*hello\\*/"],
|
||||
["url(hello", "url(hello)"],
|
||||
["url('hello", "url('hello')"],
|
||||
['url("hello', 'url("hello")'],
|
||||
["url(hello\\", "url(hello\\\\)", "url(hello\\\uFFFD)"],
|
||||
["url('hello\\", "url('hello\\\\')", "url('hello')"],
|
||||
['url("hello\\', 'url("hello\\\\")', 'url("hello")'],
|
||||
// Ensure that passing a different inputString to performEOFFixup
|
||||
// doesn't cause an assertion trying to strip a backslash from the
|
||||
// end of an empty string.
|
||||
["'\\", "\\'", "'", ""],
|
||||
];
|
||||
|
||||
info("EOF char test, input = " + cssText);
|
||||
|
||||
let result = lexer.performEOFFixup(argText, true);
|
||||
equal(result, expectedAppend);
|
||||
|
||||
result = lexer.performEOFFixup(argText, false);
|
||||
equal(result, expectedNoAppend);
|
||||
}
|
||||
|
||||
var LINECOL_TESTS = [
|
||||
["simple", ["ident:0:0", ":0:6"]],
|
||||
["\n stuff", ["whitespace:0:0", "ident:1:4", ":1:9"]],
|
||||
[
|
||||
'"string with \\\nnewline" \r\n',
|
||||
["string:0:0", "whitespace:1:8", ":2:0"],
|
||||
],
|
||||
];
|
||||
|
||||
var EOFCHAR_TESTS = [
|
||||
["hello", "hello"],
|
||||
["hello \\", "hello \\\\", "hello \\\uFFFD"],
|
||||
["'hello", "'hello'"],
|
||||
['"hello', '"hello"'],
|
||||
["'hello\\", "'hello\\\\'", "'hello'"],
|
||||
['"hello\\', '"hello\\\\"', '"hello"'],
|
||||
["/*hello", "/*hello*/"],
|
||||
["/*hello*", "/*hello*/"],
|
||||
["/*hello\\", "/*hello\\*/"],
|
||||
["url(hello", "url(hello)"],
|
||||
["url('hello", "url('hello')"],
|
||||
['url("hello', 'url("hello")'],
|
||||
["url(hello\\", "url(hello\\\\)", "url(hello\\\uFFFD)"],
|
||||
["url('hello\\", "url('hello\\\\')", "url('hello')"],
|
||||
['url("hello\\', 'url("hello\\\\")', 'url("hello")'],
|
||||
];
|
||||
|
||||
function run_test() {
|
||||
let text, result;
|
||||
for ([text, result] of LEX_TESTS) {
|
||||
test_lexer(text, result);
|
||||
}
|
||||
|
||||
for ([text, result] of LINECOL_TESTS) {
|
||||
test_lexer_linecol(text, result);
|
||||
}
|
||||
|
||||
let expectedAppend, expectedNoAppend;
|
||||
for ([text, expectedAppend, expectedNoAppend] of EOFCHAR_TESTS) {
|
||||
for (let [
|
||||
cssText,
|
||||
expectedAppend,
|
||||
expectedNoAppend,
|
||||
argText = cssText,
|
||||
] of EOFCHAR_TESTS) {
|
||||
if (!expectedNoAppend) {
|
||||
expectedNoAppend = expectedAppend;
|
||||
}
|
||||
test_lexer_eofchar(text, text, expectedAppend, expectedNoAppend);
|
||||
}
|
||||
const lexer = jsLexer.getCSSLexer(cssText);
|
||||
while (lexer.nextToken()) {
|
||||
// We don't need to do anything with the tokens. We only want to consume the iterator
|
||||
// so we can safely call performEOFFixup.
|
||||
}
|
||||
|
||||
// Ensure that passing a different inputString to performEOFFixup
|
||||
// doesn't cause an assertion trying to strip a backslash from the
|
||||
// end of an empty string.
|
||||
test_lexer_eofchar("'\\", "", "\\'", "'");
|
||||
}
|
||||
info("EOF char test, input = " + cssText);
|
||||
|
||||
let result = lexer.performEOFFixup(argText, true);
|
||||
equal(result, expectedAppend);
|
||||
|
||||
result = lexer.performEOFFixup(argText, false);
|
||||
equal(result, expectedNoAppend);
|
||||
}
|
||||
});
|
||||
|
@ -415,6 +415,10 @@ DOMInterfaces = {
|
||||
'wrapperCache': False,
|
||||
},
|
||||
|
||||
'InspectorCSSParser': {
|
||||
'wrapperCache': False,
|
||||
},
|
||||
|
||||
'IntersectionObserver': {
|
||||
'nativeType': 'mozilla::dom::DOMIntersectionObserver',
|
||||
},
|
||||
|
@ -227,3 +227,44 @@ interface InspectorFontFace {
|
||||
readonly attribute DOMString format; // as per http://www.w3.org/TR/css3-webfonts/#referencing
|
||||
readonly attribute DOMString metadata; // XML metadata from WOFF file (if any)
|
||||
};
|
||||
|
||||
dictionary InspectorCSSToken {
|
||||
// The token type.
|
||||
required UTF8String tokenType;
|
||||
|
||||
// Text associated with the token.
|
||||
required UTF8String text;
|
||||
|
||||
// Unit for Dimension tokens
|
||||
required UTF8String? unit;
|
||||
|
||||
// Float value for Dimension, Number and Percentage tokens
|
||||
double? number = null;
|
||||
};
|
||||
|
||||
/**
|
||||
* InspectorCSSParser is an interface to the CSS lexer. It tokenizes an
|
||||
* input stream and returns CSS tokens.
|
||||
*/
|
||||
[Func="nsContentUtils::IsCallerChromeOrFuzzingEnabled",
|
||||
Exposed=Window]
|
||||
interface InspectorCSSParser {
|
||||
constructor(UTF8String text);
|
||||
|
||||
/**
|
||||
* The line number of the most recently returned token. Line
|
||||
* numbers are 0-based.
|
||||
*/
|
||||
readonly attribute unsigned long lineNumber;
|
||||
|
||||
/**
|
||||
* The column number of the most recently returned token. Column
|
||||
* numbers are 1-based.
|
||||
*/
|
||||
readonly attribute unsigned long columnNumber;
|
||||
|
||||
/**
|
||||
* Return the next token, or null at EOF.
|
||||
*/
|
||||
InspectorCSSToken? nextToken();
|
||||
};
|
||||
|
@ -211,10 +211,10 @@ interface nsIXPCComponents_Utils : nsISupports
|
||||
* object that you want to make available as a global to code running in
|
||||
* the sandbox. Possible values: Blob, ChromeUtils, CSS, CSSRule,
|
||||
* Directory, DOMParser, Element, Event, File, FileReader, FormData,
|
||||
* InspectorUtils, MessageChannel, Node, NodeFilter, PromiseDebugging,
|
||||
* TextDecoder, TextEncoder, URL, URLSearchParams, XMLHttpRequest,
|
||||
* XMLSerializer, atob, btoa, caches, crypto, fetch, indexedDB,
|
||||
* rtcIdentityProvider
|
||||
* InspectorCSSParser, InspectorUtils, MessageChannel, Node, NodeFilter,
|
||||
PromiseDebugging, TextDecoder, TextEncoder, URL, URLSearchParams,
|
||||
XMLHttpRequest, XMLSerializer, atob, btoa, caches, crypto, fetch,
|
||||
indexedDB, rtcIdentityProvider
|
||||
* - wantXrays: {Boolean} Whether the sandbox wants Xray vision with
|
||||
* respect to same-origin objects outside the sandbox.
|
||||
* Note that wantXrays is essentially deprecated. The preferred method
|
||||
|
@ -928,6 +928,8 @@ bool xpc::GlobalProperties::Parse(JSContext* cx, JS::HandleObject obj) {
|
||||
Headers = true;
|
||||
} else if (JS_LinearStringEqualsLiteral(nameStr, "IOUtils")) {
|
||||
IOUtils = true;
|
||||
} else if (JS_LinearStringEqualsLiteral(nameStr, "InspectorCSSParser")) {
|
||||
InspectorCSSParser = true;
|
||||
} else if (JS_LinearStringEqualsLiteral(nameStr, "InspectorUtils")) {
|
||||
InspectorUtils = true;
|
||||
} else if (JS_LinearStringEqualsLiteral(nameStr, "MessageChannel")) {
|
||||
@ -1075,6 +1077,11 @@ bool xpc::GlobalProperties::Define(JSContext* cx, JS::HandleObject obj) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (InspectorCSSParser &&
|
||||
!dom::InspectorCSSParser_Binding::GetConstructorObject(cx)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (InspectorUtils && !dom::InspectorUtils_Binding::GetConstructorObject(cx))
|
||||
return false;
|
||||
|
||||
|
@ -2204,6 +2204,7 @@ struct GlobalProperties {
|
||||
bool FormData : 1;
|
||||
bool Headers : 1;
|
||||
bool IOUtils : 1;
|
||||
bool InspectorCSSParser : 1;
|
||||
bool InspectorUtils : 1;
|
||||
bool MessageChannel : 1;
|
||||
bool MIDIInputMap : 1;
|
||||
|
65
layout/inspector/InspectorCSSParser.cpp
Normal file
65
layout/inspector/InspectorCSSParser.cpp
Normal file
@ -0,0 +1,65 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "mozilla/dom/InspectorCSSParser.h"
|
||||
|
||||
#include "mozilla/ServoBindings.h"
|
||||
#include "mozilla/ServoStyleConsts.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
namespace mozilla::dom {
|
||||
|
||||
InspectorCSSParser::InspectorCSSParser(const nsACString& aText)
|
||||
: mInput(aText) {
|
||||
mParserState = Servo_CSSParser_create(&mInput);
|
||||
}
|
||||
|
||||
UniquePtr<InspectorCSSParser> InspectorCSSParser::Constructor(
|
||||
const GlobalObject& aGlobal, const nsACString& aText) {
|
||||
return MakeUnique<InspectorCSSParser>(aText);
|
||||
}
|
||||
|
||||
InspectorCSSParser::~InspectorCSSParser() {
|
||||
Servo_CSSParser_destroy(mParserState);
|
||||
mParserState = nullptr;
|
||||
}
|
||||
|
||||
uint32_t InspectorCSSParser::LineNumber() const { return mLineNumber; }
|
||||
|
||||
uint32_t InspectorCSSParser::ColumnNumber() const {
|
||||
// mColumnNumber is 1-based, but consumers expect 0-based.
|
||||
return mColumnNumber - 1;
|
||||
}
|
||||
|
||||
void InspectorCSSParser::NextToken(Nullable<InspectorCSSToken>& aResult) {
|
||||
StyleCSSToken cssToken;
|
||||
if (!Servo_CSSParser_NextToken(&mInput, mParserState, &cssToken)) {
|
||||
aResult.SetNull();
|
||||
|
||||
mLineNumber = Servo_CSSParser_GetCurrentLine(mParserState);
|
||||
mColumnNumber = Servo_CSSParser_GetCurrentColumn(mParserState);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
InspectorCSSToken& inspectorCssToken = aResult.SetValue();
|
||||
inspectorCssToken.mText.Append(cssToken.text);
|
||||
inspectorCssToken.mTokenType.Append(cssToken.token_type);
|
||||
if (cssToken.has_unit) {
|
||||
inspectorCssToken.mUnit.Append(cssToken.unit);
|
||||
} else {
|
||||
inspectorCssToken.mUnit.SetIsVoid(true);
|
||||
}
|
||||
if (cssToken.has_number) {
|
||||
// Reduce precision to avoid floating point inprecision
|
||||
inspectorCssToken.mNumber = round(cssToken.number * 100) / 100.0;
|
||||
}
|
||||
|
||||
mLineNumber = cssToken.line;
|
||||
mColumnNumber = cssToken.column;
|
||||
}
|
||||
|
||||
} // namespace mozilla::dom
|
47
layout/inspector/InspectorCSSParser.h
Normal file
47
layout/inspector/InspectorCSSParser.h
Normal file
@ -0,0 +1,47 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef InspectorCSSParser_h___
|
||||
#define InspectorCSSParser_h___
|
||||
|
||||
#include "mozilla/dom/InspectorUtilsBinding.h"
|
||||
#include "mozilla/dom/NonRefcountedDOMObject.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class StyleParserState;
|
||||
|
||||
namespace dom {
|
||||
|
||||
class InspectorCSSParser final : public NonRefcountedDOMObject {
|
||||
public:
|
||||
explicit InspectorCSSParser(const nsACString&);
|
||||
// The WebIDL constructor.
|
||||
static UniquePtr<InspectorCSSParser> Constructor(const GlobalObject& aGlobal,
|
||||
const nsACString& aText);
|
||||
|
||||
~InspectorCSSParser();
|
||||
|
||||
bool WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto,
|
||||
JS::MutableHandle<JSObject*> aReflector) {
|
||||
return InspectorCSSParser_Binding::Wrap(aCx, this, aGivenProto, aReflector);
|
||||
}
|
||||
|
||||
uint32_t LineNumber() const;
|
||||
uint32_t ColumnNumber() const;
|
||||
void NextToken(Nullable<InspectorCSSToken>& aResult);
|
||||
|
||||
private:
|
||||
const nsCString mInput;
|
||||
StyleParserState* mParserState;
|
||||
uint32_t mLineNumber = 0;
|
||||
uint32_t mColumnNumber = 0;
|
||||
};
|
||||
|
||||
} // namespace dom
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* InspectorCSSParser_h___ */
|
@ -19,6 +19,7 @@ EXPORTS.mozilla += [
|
||||
]
|
||||
|
||||
EXPORTS.mozilla.dom += [
|
||||
"InspectorCSSParser.h",
|
||||
"InspectorFontFace.h",
|
||||
"InspectorUtils.h",
|
||||
]
|
||||
@ -26,6 +27,7 @@ EXPORTS.mozilla.dom += [
|
||||
UNIFIED_SOURCES += [
|
||||
"inDeepTreeWalker.cpp",
|
||||
"inLayoutUtils.cpp",
|
||||
"InspectorCSSParser.cpp",
|
||||
"InspectorFontFace.cpp",
|
||||
"InspectorUtils.cpp",
|
||||
"ServoStyleRuleMap.cpp",
|
||||
|
@ -403,6 +403,7 @@ cbindgen-types = [
|
||||
{ gecko = "StyleOffsetRotate", servo = "crate::values::computed::motion::OffsetRotate" },
|
||||
{ gecko = "StylePathCommand", servo = "crate::values::specified::svg_path::PathCommand" },
|
||||
{ gecko = "StyleRayFunction", servo = "crate::values::computed::motion::RayFunction" },
|
||||
{ gecko = "StyleParserState", servo = "cssparser::ParserState" },
|
||||
{ gecko = "StyleUnicodeRange", servo = "cssparser::UnicodeRange" },
|
||||
{ gecko = "StyleOverflowWrap", servo = "crate::values::computed::OverflowWrap" },
|
||||
{ gecko = "StyleWordBreak", servo = "crate::values::computed::WordBreak" },
|
||||
|
@ -89,6 +89,7 @@ class SharedFontList;
|
||||
class StyleSheet;
|
||||
class WritingMode;
|
||||
class ServoElementSnapshotTable;
|
||||
class StyleParserState;
|
||||
|
||||
template <typename T>
|
||||
struct StyleForgottenArcSlicePtr;
|
||||
|
@ -6,7 +6,7 @@ use super::error_reporter::ErrorReporter;
|
||||
use super::stylesheet_loader::{AsyncStylesheetParser, StylesheetLoader};
|
||||
use bincode::{deserialize, serialize};
|
||||
use cssparser::ToCss as ParserToCss;
|
||||
use cssparser::{BasicParseError, ParseError as CssParseError, Parser, ParserInput, SourceLocation, UnicodeRange, Token};
|
||||
use cssparser::{BasicParseError, ParseError as CssParseError, Parser, ParserInput, ParserState, SourceLocation, UnicodeRange, Token};
|
||||
use dom::{DocumentState, ElementState};
|
||||
use malloc_size_of::MallocSizeOfOps;
|
||||
use nsstring::{nsCString, nsString};
|
||||
@ -9277,3 +9277,161 @@ fn get_byte_index_from_line_and_column(
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
pub struct CSSToken {
|
||||
pub text: nsCString,
|
||||
pub token_type: nsCString,
|
||||
pub has_unit: bool,
|
||||
pub unit: nsCString,
|
||||
pub has_number: bool,
|
||||
pub number: f32,
|
||||
// line and column at which the token starts
|
||||
pub line: u32,
|
||||
pub column: u32,
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn Servo_CSSParser_create(
|
||||
text: &nsACString,
|
||||
) -> *mut ParserState {
|
||||
let css_text = unsafe { text.as_str_unchecked() };
|
||||
let mut parser_input = ParserInput::new(&css_text);
|
||||
let input = Parser::new(&mut parser_input);
|
||||
Box::into_raw(Box::new(input.state()))
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn Servo_CSSParser_destroy(
|
||||
state: *mut ParserState,
|
||||
) {
|
||||
drop(Box::from_raw(state));
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn Servo_CSSParser_GetCurrentLine(
|
||||
state: &ParserState,
|
||||
) -> u32 {
|
||||
return state.source_location().line;
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn Servo_CSSParser_GetCurrentColumn(
|
||||
state: &ParserState,
|
||||
) -> u32 {
|
||||
return state.source_location().column;
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn Servo_CSSParser_NextToken(
|
||||
text: &nsACString,
|
||||
state: &mut ParserState,
|
||||
css_token: &mut CSSToken,
|
||||
) -> bool {
|
||||
let css_text = unsafe { text.as_str_unchecked() };
|
||||
let mut parser_input = ParserInput::new(&css_text);
|
||||
let mut input = Parser::new(&mut parser_input);
|
||||
input.reset(state);
|
||||
|
||||
let token_start = input.position();
|
||||
let location_start = state.source_location();
|
||||
let Ok(token) = &input.next_including_whitespace_and_comments() else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let token_type = match *token {
|
||||
Token::Ident(_) => "Ident",
|
||||
Token::AtKeyword(_) => "AtKeyword",
|
||||
Token::Hash(_) => "Hash",
|
||||
Token::IDHash(_) => "IDHash",
|
||||
Token::QuotedString(_) => "QuotedString",
|
||||
Token::UnquotedUrl(_) => "UnquotedUrl",
|
||||
Token::Delim(_) => "Delim",
|
||||
Token::Number{..} => "Number",
|
||||
Token::Percentage{..} => "Percentage",
|
||||
Token::Dimension{..} => "Dimension",
|
||||
Token::WhiteSpace(_) => "WhiteSpace",
|
||||
Token::Comment(_) => "Comment",
|
||||
Token::Colon => "Colon",
|
||||
Token::Semicolon => "Semicolon",
|
||||
Token::Comma => "Comma",
|
||||
Token::IncludeMatch => "IncludeMatch",
|
||||
Token::DashMatch => "DashMatch",
|
||||
Token::PrefixMatch => "PrefixMatch",
|
||||
Token::SuffixMatch => "SuffixMatch",
|
||||
Token::SubstringMatch => "SubstringMatch",
|
||||
Token::CDO => "CDO",
|
||||
Token::CDC => "CDC",
|
||||
Token::Function(_) => "Function",
|
||||
Token::ParenthesisBlock => "ParenthesisBlock",
|
||||
Token::SquareBracketBlock => "SquareBracketBlock",
|
||||
Token::CurlyBracketBlock => "CurlyBracketBlock",
|
||||
Token::BadUrl(_) => "BadUrl",
|
||||
Token::BadString(_) => "BadString",
|
||||
Token::CloseParenthesis => "CloseParenthesis",
|
||||
Token::CloseSquareBracket => "CloseSquareBracket",
|
||||
Token::CloseCurlyBracket => "CloseCurlyBracket",
|
||||
};
|
||||
|
||||
let token_unit = match *token {
|
||||
Token::Dimension{
|
||||
ref unit, ..
|
||||
} => {
|
||||
let mut unit_text = nsCString::new();
|
||||
unit_text.assign(unit.as_bytes());
|
||||
Some(unit_text)
|
||||
},
|
||||
_ => None
|
||||
};
|
||||
|
||||
let token_number = match *token {
|
||||
Token::Dimension {
|
||||
ref value, ..
|
||||
} => Some(value),
|
||||
Token::Number{
|
||||
ref value, ..
|
||||
} => Some(value),
|
||||
Token::Percentage{
|
||||
ref unit_value, ..
|
||||
} => Some(unit_value),
|
||||
_ => None
|
||||
};
|
||||
css_token.has_number = token_number.is_some();
|
||||
if css_token.has_number {
|
||||
css_token.number = *token_number.unwrap();
|
||||
}
|
||||
|
||||
let need_to_parse_nested_block = match *token {
|
||||
Token::Function(_) |
|
||||
Token::ParenthesisBlock |
|
||||
Token::CurlyBracketBlock |
|
||||
Token::SquareBracketBlock => true,
|
||||
_ => false,
|
||||
};
|
||||
|
||||
let mut text = nsCString::new();
|
||||
text.assign(&input.slice_from(token_start));
|
||||
|
||||
css_token.text = text;
|
||||
css_token.token_type = token_type.into();
|
||||
css_token.has_unit = token_unit.is_some();
|
||||
if css_token.has_unit {
|
||||
css_token.unit = token_unit.unwrap();
|
||||
}
|
||||
|
||||
css_token.line = location_start.line;
|
||||
css_token.column = location_start.column;
|
||||
|
||||
if need_to_parse_nested_block {
|
||||
let _ = input.parse_nested_block(
|
||||
|i| -> Result<(), CssParseError<'_, BasicParseError>> {
|
||||
*state = i.state();
|
||||
Ok(())
|
||||
},
|
||||
);
|
||||
} else {
|
||||
*state = input.state();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -317,6 +317,7 @@ module.exports = {
|
||||
ImageData: false,
|
||||
ImageDocument: false,
|
||||
InputEvent: false,
|
||||
InspectorCSSParser: false,
|
||||
InspectorFontFace: false,
|
||||
InspectorUtils: false,
|
||||
InstallTriggerImpl: false,
|
||||
|
@ -35,6 +35,7 @@ module.exports = {
|
||||
assert: false,
|
||||
Assert: false,
|
||||
BrowsingContext: false,
|
||||
InspectorCSSParser: false,
|
||||
InspectorUtils: false,
|
||||
ok: false,
|
||||
is: false,
|
||||
|
Loading…
x
Reference in New Issue
Block a user