Bug 1720941, part 2: tests(ci): now do the actual vendoring r=webgpu-reviewers,taskgraph-reviewers,jmaher,jimb

After implementing vendoring in part 1, it's time to run it! This patch was broken out from part 1 to isolate very tedious portion of the review. Most of this patch is composed of:

1. Generated CTS test files from part 1.
2. A massive wall of test expectation management acknowledging current passes and failures. Currently, Linux and Windows are expected to pass with these noted failures. Many, but not all, current failures on MacOS are recorded.

Differential Revision: https://phabricator.services.mozilla.com/D169953
This commit is contained in:
Erich Gubler 2023-03-15 20:13:45 +00:00
parent af02a85436
commit 07d5552a55
1226 changed files with 347043 additions and 82 deletions

View File

@ -0,0 +1,23 @@
const path = require('path');
const resolve = require('resolve')
// Implements the following resolver spec:
// https://github.com/benmosher/eslint-plugin-import/blob/master/resolvers/README.md
exports.interfaceVersion = 2
exports.resolve = function (source, file, config) {
if (resolve.isCore(source)) return { found: true, path: null }
source = source.replace(/\.js$/, '.ts');
try {
return {
found: true, path: resolve.sync(source, {
extensions: [],
basedir: path.dirname(path.resolve(file)),
...config,
})
}
} catch (err) {
return { found: false }
}
}

View File

@ -0,0 +1 @@
/src/external/*

View File

@ -0,0 +1,127 @@
{
"root": true,
"parser": "@typescript-eslint/parser",
"parserOptions": { "project": "./tsconfig.json" },
"extends": [
"./node_modules/gts",
"plugin:import/errors",
"plugin:import/warnings",
"plugin:import/typescript"
],
"env": {
"browser": true,
"node": true
},
"plugins": ["node", "ban", "import", "deprecation"],
"rules": {
// Core rules
"linebreak-style": ["warn", "unix"],
"no-console": "warn",
"no-undef": "off",
"no-useless-rename": "warn",
"object-shorthand": "warn",
"quotes": ["warn", "single", { "avoidEscape": true, "allowTemplateLiterals": true }],
// All test TODOs must be tracked inside file/test descriptions or READMEs.
// Comments relating to TODOs in descriptions can be marked with references like "[1]".
// TODOs not relating to test coverage can be marked MAINTENANCE_TODO or similar.
"no-warning-comments": ["warn", { "terms": ["todo", "fixme", "xxx"], "location": "anywhere" }],
// Plugin: @typescript-eslint
"@typescript-eslint/no-inferrable-types": "off",
"@typescript-eslint/consistent-type-assertions": "warn",
// Recommended lints
// https://github.com/typescript-eslint/typescript-eslint/blob/main/packages/eslint-plugin/docs/rules/README.md
"@typescript-eslint/adjacent-overload-signatures": "warn",
"@typescript-eslint/await-thenable": "warn",
"@typescript-eslint/ban-ts-comment": "warn",
"@typescript-eslint/no-empty-interface": "warn",
"@typescript-eslint/no-explicit-any": "warn",
"@typescript-eslint/no-extra-non-null-assertion": "warn",
"@typescript-eslint/no-floating-promises": "warn",
"@typescript-eslint/no-for-in-array": "warn",
"@typescript-eslint/no-misused-new": "warn",
"@typescript-eslint/no-namespace": "warn",
"@typescript-eslint/no-non-null-asserted-optional-chain": "warn",
"@typescript-eslint/no-this-alias": "warn",
"@typescript-eslint/no-unnecessary-type-assertion": "warn",
"@typescript-eslint/no-unnecessary-type-constraint": "warn",
"@typescript-eslint/no-unused-vars": ["warn", { "vars": "all", "args": "none" }],
"@typescript-eslint/prefer-as-const": "warn",
"@typescript-eslint/prefer-for-of": "warn",
"@typescript-eslint/prefer-namespace-keyword": "warn",
"@typescript-eslint/restrict-plus-operands": "warn",
"@typescript-eslint/triple-slash-reference": "warn",
"@typescript-eslint/unbound-method": "warn",
// MAINTENANCE_TODO: Try to clean up and enable these recommended lints?
//"@typescript-eslint/no-unsafe-argument": "warn",
//"@typescript-eslint/no-unsafe-assignment": "warn",
//"@typescript-eslint/no-unsafe-call": "warn",
//"@typescript-eslint/no-unsafe-member-access": "warn",
//"@typescript-eslint/no-unsafe-return": "warn",
// Note: These recommended lints are probably not practical to enable.
//"@typescript-eslint/no-misused-promises": "warn",
//"@typescript-eslint/no-non-null-assertion": "warn",
//"@typescript-eslint/no-var-requires": "warn",
//"@typescript-eslint/restrict-template-expressions": "warn",
// Plugin: ban
"ban/ban": [
"warn",
{
"name": "setTimeout",
"message": "WPT disallows setTimeout; use `common/util/timeout.js`."
}
],
// Plugin: deprecation
//"deprecation/deprecation": "warn",
// Plugin: import
"import/order": [
"warn",
{
"groups": ["builtin", "external", "internal", "parent", "sibling", "index"],
"newlines-between": "always",
"alphabetize": { "order": "asc", "caseInsensitive": false }
}
],
"import/newline-after-import": ["warn", { "count": 1 }],
"import/no-duplicates": "warn",
"import/no-restricted-paths": [
"error",
{
"zones": [
{
"target": "./src/webgpu",
"from": "./src/common",
"except": ["./framework", "./util"],
"message": "Non-framework common/ code imported from webgpu/ suite"
},
{
"target": "./src/unittests",
"from": "./src/common",
"except": ["./framework", "./util", "./internal"],
"message": "Non-framework common/ code imported from unittests/ suite"
},
{
"target": "./src/webgpu",
"from": "./src/unittests",
"message": "unittests/ suite imported from webgpu/ suite"
},
{
"target": "./src/common",
"from": "./src",
"except": ["./common", "./external"],
"message": "Non common/ code imported from common/"
}
]
}
]
},
"settings": {
"import/resolver": {
"./.eslint-resolver": {}
}
}
}

View File

@ -0,0 +1 @@
* text=auto eol=lf

View File

@ -0,0 +1,21 @@
Issue: #<!-- Fill in the issue number here. See docs/intro/life_of.md -->
<hr>
**Requirements for PR author:**
- [ ] All missing test coverage is tracked with "TODO" or `.unimplemented()`.
- [ ] New helpers are `/** documented */` and new helper files are found in `helper_index.txt`.
- [ ] Test behaves as expected in a WebGPU implementation. (If not passing, explain above.)
**Requirements for [reviewer sign-off](https://github.com/gpuweb/cts/blob/main/docs/reviews.md):**
- [ ] Tests are properly located in the test tree.
- [ ] [Test descriptions](https://github.com/gpuweb/cts/blob/main/docs/intro/plans.md) allow a reader to "read only the test plans and evaluate coverage completeness", and accurately reflect the test code.
- [ ] Tests provide complete coverage (including validation control cases). **Missing coverage MUST be covered by TODOs.**
- [ ] Helpers and types promote readability and maintainability.
When landing this PR, be sure to make any necessary issue status updates.

View File

@ -0,0 +1,28 @@
name: Pull Request CI
on:
pull_request:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.1
with:
persist-credentials: false
- run: |
git fetch origin ${{ github.event.pull_request.head.sha }}
git checkout ${{ github.event.pull_request.head.sha }}
- uses: actions/setup-node@v2-beta
with:
node-version: "15.x"
- run: npm ci
- run: npm test
- run: |
mkdir deploy-build/
cp -r README.md src standalone out docs deploy-build/
- uses: actions/upload-artifact@v2
with:
name: pr-artifact
path: deploy-build/

View File

@ -0,0 +1,26 @@
name: Push CI
on:
push:
branches: [main]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.1
with:
persist-credentials: false
- uses: actions/setup-node@v2-beta
with:
node-version: "15.x"
- run: npm ci
- run: |
npm test
mkdir deploy-build/
cp -r README.md src standalone out out-wpt docs tools deploy-build/
- uses: JamesIves/github-pages-deploy-action@4.1.4
with:
BRANCH: gh-pages
FOLDER: deploy-build
CLEAN: true

View File

@ -0,0 +1,80 @@
name: Workflow CI
on:
workflow_run:
workflows:
- "Pull Request CI"
types:
- completed
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.1
with:
persist-credentials: false
- run: |
PR=$(curl https://api.github.com/search/issues?q=${{ github.event.workflow_run.head_sha }} |
grep -Po "(?<=${{ github.event.workflow_run.repository.full_name }}\/pulls\/)\d*" | head -1)
echo "PR=$PR" >> $GITHUB_ENV
- uses: actions/github-script@v3
id: pr-artifact
with:
github-token: ${{secrets.GITHUB_TOKEN}}
result-encoding: string
script: |
const artifacts_url = context.payload.workflow_run.artifacts_url
const artifacts_req = await github.request(artifacts_url)
const artifact = artifacts_req.data.artifacts[0]
const download = await github.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: artifact.id,
archive_format: "zip"
})
return download.url
- run: |
rm -rf *
curl -L -o "pr-artifact.zip" "${{ steps.pr-artifact.outputs.result }}"
unzip -o pr-artifact.zip
rm pr-artifact.zip
- run: |
cat << EOF >> firebase.json
{
"hosting": {
"public": ".",
"ignore": [
"firebase.json",
"**/.*",
"**/node_modules/**"
]
}
}
EOF
cat << EOF >> .firebaserc
{
"projects": {
"default": "gpuweb-cts"
}
}
EOF
- id: deployment
continue-on-error: true
uses: FirebaseExtended/action-hosting-deploy@v0
with:
firebaseServiceAccount: ${{ secrets.FIREBASE_SERVICE_ACCOUNT_CTS }}
expires: 10d
channelId: cts-prs-${{ env.PR }}-${{ github.event.workflow_run.head_sha }}
- uses: peter-evans/create-or-update-comment@v1
continue-on-error: true
if: ${{ steps.deployment.outcome == 'success' }}
with:
issue-number: ${{ env.PR }}
body: |
Previews, as seen when this [build job](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}) started (${{ github.event.workflow_run.head_sha }}):
[**Run tests**](${{ steps.deployment.outputs.details_url }}/standalone/) | [**View tsdoc**](${{ steps.deployment.outputs.details_url }}/docs/tsdoc/)
<!--
pr;head;sha
${{ env.PR }};${{ github.event.workflow_run.head_repository.full_name }};${{ github.event.workflow_run.head_sha }}
-->

196
dom/webgpu/tests/cts/checkout/.gitignore vendored Normal file
View File

@ -0,0 +1,196 @@
# VSCode - see .vscode/README.md
.vscode/
# Build files
/out/
/out-wpt/
/out-node/
/out-wpt-reftest-screenshots/
.tscache/
*.tmp.txt
/docs/tsdoc/
# Cache files
/standalone/data
# Created by https://www.gitignore.io/api/linux,macos,windows,node
# Edit at https://www.gitignore.io/?templates=linux,macos,windows,node
### Linux ###
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Node ###
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Diagnostic reports (https://nodejs.org/api/report.html)
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node-waf configuration
.lock-wscript
# Compiled binary addons (https://nodejs.org/api/addons.html)
build/Release
# Dependency directories
node_modules/
jspm_packages/
# TypeScript v1 declaration files
typings/
# TypeScript cache
*.tsbuildinfo
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
# parcel-bundler cache (https://parceljs.org/)
.cache
# next.js build output
.next
# nuxt.js build output
.nuxt
# rollup.js default build output
dist/
# Uncomment the public line if your project uses Gatsby
# https://nextjs.org/blog/next-9-1#public-directory-support
# https://create-react-app.dev/docs/using-the-public-folder/#docsNav
# public
# Storybook build outputs
.out
.storybook-out
# vuepress build output
.vuepress/dist
# Serverless directories
.serverless/
# FuseBox cache
.fusebox/
# DynamoDB Local files
.dynamodb/
# Temporary folders
tmp/
temp/
### Windows ###
# Windows thumbnail cache files
Thumbs.db
Thumbs.db:encryptable
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
trace/
# End of https://www.gitignore.io/api/linux,macos,windows,node

View File

@ -0,0 +1,31 @@
# GPU for the Web
This repository is being used for work in the [W3C GPU for the Web Community
Group](https://www.w3.org/community/gpu/), governed by the [W3C Community
License Agreement (CLA)](http://www.w3.org/community/about/agreements/cla/). To
make substantive contributions, you must join the CG.
Contributions to the source code repository are subject to the terms of the
[3-Clause BSD License](./LICENSE.txt).
**Contributions will also be exported to
[web-platform-tests](https://github.com/web-platform-tests/wpt)
under the same license, and under the terms of its
[CONTRIBUTING.md](https://github.com/web-platform-tests/wpt/blob/master/CONTRIBUTING.md).**
If you are not the sole contributor to a contribution (pull request), please identify all
contributors in the pull request comment.
To add a contributor (other than yourself, that's automatic), mark them one per line as follows:
```
+@github_username
```
If you added a contributor by mistake, you can remove them in a comment with:
```
-@github_username
```
If you are making a pull request on behalf of someone else but you had no part in designing the
feature, you can remove yourself with the above syntax.

View File

@ -0,0 +1,229 @@
/* eslint-disable node/no-unpublished-require */
/* eslint-disable prettier/prettier */
/* eslint-disable no-console */
module.exports = function (grunt) {
// Project configuration.
grunt.initConfig({
pkg: grunt.file.readJSON('package.json'),
clean: {
out: ['out/', 'out-wpt/', 'out-node/'],
},
run: {
'generate-version': {
cmd: 'node',
args: ['tools/gen_version'],
},
'generate-listings': {
cmd: 'node',
args: ['tools/gen_listings', 'out/', 'src/webgpu', 'src/stress', 'src/manual', 'src/unittests', 'src/demo'],
},
'generate-wpt-cts-html': {
cmd: 'node',
args: ['tools/gen_wpt_cts_html', 'out-wpt/cts.https.html', 'src/common/templates/cts.https.html'],
},
'generate-cache': {
cmd: 'node',
args: ['tools/gen_cache', 'out/data', 'src/webgpu'],
},
unittest: {
cmd: 'node',
args: ['tools/run_node', 'unittests:*'],
},
'build-out': {
cmd: 'node',
args: [
'node_modules/@babel/cli/bin/babel',
'--extensions=.ts,.js',
'--source-maps=true',
'--out-dir=out/',
'src/',
],
},
'build-out-wpt': {
cmd: 'node',
args: [
'node_modules/@babel/cli/bin/babel',
'--extensions=.ts,.js',
'--source-maps=false',
'--delete-dir-on-start',
'--out-dir=out-wpt/',
'src/',
'--only=src/common/framework/',
'--only=src/common/runtime/helper/',
'--only=src/common/runtime/wpt.ts',
'--only=src/webgpu/',
// These files will be generated, instead of compiled from TypeScript.
'--ignore=src/common/internal/version.ts',
'--ignore=src/webgpu/listing.ts',
],
},
'build-out-node': {
cmd: 'node',
args: [
'node_modules/typescript/lib/tsc.js',
'--project', 'node.tsconfig.json',
'--outDir', 'out-node/',
],
},
'copy-assets': {
cmd: 'node',
args: [
'node_modules/@babel/cli/bin/babel',
'src/resources/',
'--out-dir=out/resources/',
'--copy-files'
],
},
'copy-assets-wpt': {
cmd: 'node',
args: [
'node_modules/@babel/cli/bin/babel',
'src/resources/',
'--out-dir=out-wpt/resources/',
'--copy-files'
],
},
lint: {
cmd: 'node',
args: ['node_modules/eslint/bin/eslint', 'src/**/*.ts', '--max-warnings=0'],
},
presubmit: {
cmd: 'node',
args: ['tools/presubmit'],
},
fix: {
cmd: 'node',
args: ['node_modules/eslint/bin/eslint', 'src/**/*.ts', '--fix'],
},
'autoformat-out-wpt': {
cmd: 'node',
args: ['node_modules/prettier/bin-prettier', '--loglevel=warn', '--write', 'out-wpt/**/*.js'],
},
tsdoc: {
cmd: 'node',
args: ['node_modules/typedoc/bin/typedoc'],
},
'tsdoc-treatWarningsAsErrors': {
cmd: 'node',
args: ['node_modules/typedoc/bin/typedoc', '--treatWarningsAsErrors'],
},
serve: {
cmd: 'node',
args: ['node_modules/http-server/bin/http-server', '-p8080', '-a127.0.0.1', '-c-1']
}
},
copy: {
'out-wpt-generated': {
files: [
{ expand: true, cwd: 'out', src: 'common/internal/version.js', dest: 'out-wpt/' },
{ expand: true, cwd: 'out', src: 'webgpu/listing.js', dest: 'out-wpt/' },
],
},
'out-wpt-htmlfiles': {
files: [
{ expand: true, cwd: 'src', src: 'webgpu/**/*.html', dest: 'out-wpt/' },
],
},
},
ts: {
check: {
tsconfig: {
tsconfig: 'tsconfig.json',
passThrough: true,
},
},
},
});
grunt.loadNpmTasks('grunt-contrib-clean');
grunt.loadNpmTasks('grunt-contrib-copy');
grunt.loadNpmTasks('grunt-run');
grunt.loadNpmTasks('grunt-ts');
const helpMessageTasks = [];
function registerTaskAndAddToHelp(name, desc, deps) {
grunt.registerTask(name, deps);
addExistingTaskToHelp(name, desc);
}
function addExistingTaskToHelp(name, desc) {
helpMessageTasks.push({ name, desc });
}
grunt.registerTask('set-quiet-mode', () => {
grunt.log.write('Running tasks');
require('quiet-grunt');
});
grunt.registerTask('build-standalone', 'Build out/ (no checks, no WPT)', [
'run:build-out',
'run:copy-assets',
'run:generate-version',
'run:generate-listings',
]);
grunt.registerTask('build-wpt', 'Build out/ (no checks)', [
'run:build-out-wpt',
'run:copy-assets-wpt',
'run:autoformat-out-wpt',
'run:generate-version',
'run:generate-listings',
'copy:out-wpt-generated',
'copy:out-wpt-htmlfiles',
'run:generate-wpt-cts-html',
]);
grunt.registerTask('build-done-message', () => {
process.stderr.write('\nBuild completed! Running checks/tests');
});
registerTaskAndAddToHelp('pre', 'Run all presubmit checks: standalone+wpt+typecheck+unittest+lint', [
'set-quiet-mode',
'clean',
'build-standalone',
'build-wpt',
'run:build-out-node',
'build-done-message',
'ts:check',
'run:presubmit',
'run:unittest',
'run:lint',
'run:tsdoc-treatWarningsAsErrors',
]);
registerTaskAndAddToHelp('standalone', 'Build standalone and typecheck', [
'set-quiet-mode',
'build-standalone',
'build-done-message',
'ts:check',
]);
registerTaskAndAddToHelp('wpt', 'Build for WPT and typecheck', [
'set-quiet-mode',
'build-wpt',
'build-done-message',
'ts:check',
]);
registerTaskAndAddToHelp('unittest', 'Build standalone, typecheck, and unittest', [
'standalone',
'run:unittest',
]);
registerTaskAndAddToHelp('check', 'Just typecheck', [
'set-quiet-mode',
'ts:check',
]);
registerTaskAndAddToHelp('serve', 'Serve out/ on 127.0.0.1:8080 (does NOT compile source)', ['run:serve']);
registerTaskAndAddToHelp('fix', 'Fix lint and formatting', ['run:fix']);
addExistingTaskToHelp('clean', 'Clean out/ and out-wpt/');
grunt.registerTask('default', '', () => {
console.error('\nAvailable tasks (see grunt --help for info):');
for (const { name, desc } of helpMessageTasks) {
console.error(`$ grunt ${name}`);
console.error(` ${desc}`);
}
});
};

View File

@ -0,0 +1,26 @@
Copyright 2019 WebGPU CTS Contributors
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,22 @@
# WebGPU Conformance Test Suite
This is the conformance test suite for WebGPU.
It tests the behaviors defined by the [WebGPU specification](https://gpuweb.github.io/gpuweb/).
The contents of this test suite are considered **normative**; implementations must pass
them to be WebGPU-conformant. Mismatches between the specification and tests are bugs.
This test suite can be embedded inside [WPT](https://github.com/web-platform-tests/wpt) or run in standalone.
## [Launch the standalone CTS runner / test plan viewer](https://gpuweb.github.io/cts/standalone/)
## Contributing
Please read the [introductory guidelines](docs/intro/README.md) before contributing.
Other documentation may be found in [`docs/`](docs/) and in the [helper index](https://gpuweb.github.io/cts/docs/tsdoc/) ([source](docs/helper_index.txt)).
Read [CONTRIBUTING.md](CONTRIBUTING.md) on licensing.
For realtime communication about WebGPU spec and test, join the
[#WebGPU:matrix.org room](https://app.element.io/#/room/#WebGPU:matrix.org)
on Matrix.

View File

@ -0,0 +1,21 @@
module.exports = function (api) {
api.cache(true);
return {
presets: ['@babel/preset-typescript'],
plugins: [
'const-enum',
[
'add-header-comment',
{
header: ['AUTO-GENERATED - DO NOT EDIT. Source: https://github.com/gpuweb/cts'],
},
],
],
compact: false,
// Keeps comments from getting hoisted to the end of the previous line of code.
// (Also keeps lines close to their original line numbers - but for WPT we
// reformat with prettier anyway.)
retainLines: true,
shouldPrintComment: val => !/eslint|prettier-ignore/.test(val),
};
};

View File

@ -0,0 +1,110 @@
// Note: VS Code's setting precedence is `.vscode/` > `cts.code-workspace` > global user settings.
{
"folders": [
{
"name": "cts",
"path": "."
},
{
"name": "webgpu",
"path": "src/webgpu"
}
],
"settings": {
"editor.defaultFormatter": "esbenp.prettier-vscode",
"editor.detectIndentation": false,
"editor.rulers": [100],
"editor.tabSize": 2,
"files.insertFinalNewline": true,
"files.trimFinalNewlines": true,
"files.trimTrailingWhitespace": true,
"files.exclude": {
"*.tmp.txt": true,
".gitignore": true,
".travis.yml": true,
".tscache": true,
"deploy_key.enc": true,
"node_modules": true,
"out": true,
"out-node": true,
"out-wpt": true,
"docs/tsdoc": true,
"package-lock.json": true
},
// Configure VSCode to use the right style when automatically adding imports on autocomplete.
"typescript.preferences.importModuleSpecifier": "relative",
"typescript.preferences.importModuleSpecifierEnding": "js",
"typescript.preferences.quoteStyle": "single"
},
"tasks": {
"version": "2.0.0",
"tasks": [
// Only supports "shell" and "process" tasks.
// https://code.visualstudio.com/docs/editor/multi-root-workspaces#_workspace-task-configuration
{
// Use "group": "build" instead of "test" so it's easy to access from cmd-shift-B.
"group": "build",
"label": "npm: test",
"detail": "Run all presubmit checks",
"type": "shell",
"command": "npm run test",
"problemMatcher": []
},
{
"group": "build",
"label": "npm: check",
"detail": "Just typecheck",
"type": "shell",
"command": "npm run check",
"problemMatcher": ["$tsc"]
},
{
"group": "build",
"label": "npm: standalone",
"detail": "Build standalone and typecheck",
"type": "shell",
"command": "npm run standalone",
"problemMatcher": []
},
{
"group": "build",
"label": "npm: wpt",
"detail": "Build for WPT and typecheck",
"type": "shell",
"command": "npm run wpt",
"problemMatcher": []
},
{
"group": "build",
"label": "npm: unittest",
"detail": "Build standalone, typecheck, and unittest",
"type": "shell",
"command": "npm run unittest",
"problemMatcher": []
},
{
"group": "build",
"label": "npm: tsdoc",
"detail": "Build docs/tsdoc/",
"type": "shell",
"command": "npm run tsdoc",
"problemMatcher": []
},
{
"group": "build",
"label": "grunt: run:lint",
"detail": "Run eslint",
"type": "shell",
"command": "npx grunt run:lint",
"problemMatcher": ["$eslint-stylish"]
},
]
}
}

View File

@ -0,0 +1,43 @@
# Building
Building the project is not usually needed for local development.
However, for exports to WPT, or deployment (https://gpuweb.github.io/cts/),
files can be pre-generated.
The project builds into two directories:
- `out/`: Built framework and test files, needed to run standalone or command line.
- `out-wpt/`: Build directory for export into WPT. Contains:
- An adapter for running WebGPU CTS tests under WPT
- A copy of the needed files from `out/`
- A copy of any `.html` test cases from `src/`
To build and run all pre-submit checks (including type and lint checks and
unittests), use:
```sh
npm test
```
For checks only:
```sh
npm run check
```
For a quicker iterative build:
```sh
npm run standalone
```
## Run
To serve the built files (rather than using the dev server), run `npx grunt serve`.
## Export to WPT
Run `npm run wpt`.
Copy (or symlink) the `out-wpt/` directory as the `webgpu/` directory in your
WPT checkout or your browser's "internal" WPT test directory.

View File

@ -0,0 +1,24 @@
# Running the CTS on Deno
Since version 1.8, Deno experimentally implements the WebGPU API out of the box.
You can use the `./tools/deno` script to run the CTS in Deno. To do this you
will first need to install Deno: [stable](https://deno.land#installation), or
build the main branch from source
(`cargo install --git https://github.com/denoland/deno --bin deno`).
On macOS and recent Linux, you can just run `./tools/run_deno` as is. On Windows and
older Linux releases you will need to run
`deno run --unstable --allow-read --allow-write --allow-env ./tools/deno`.
## Usage
```
Usage:
tools/run_deno [OPTIONS...] QUERIES...
tools/run_deno 'unittests:*' 'webgpu:buffers,*'
Options:
--verbose Print result/log of every test as it runs.
--debug Include debug messages in logging.
--print-json Print the complete result JSON in the output.
--expectations Path to expectations file.
```

View File

@ -0,0 +1,516 @@
# Floating Point Primer
This document is meant to be a primer of the concepts related to floating point
numbers that are needed to be understood when working on tests in WebGPU's CTS.
WebGPU's CTS is responsible for testing if implementations of WebGPU are
conformant to the spec, and thus interoperable with each other.
Floating point math makes up a significant portion of the WGSL spec, and has
many subtle corner cases to get correct.
Additionally, floating point math, unlike integer math, is broadly not exact, so
how inaccurate a calculation is allowed to be is required to be stated in the
spec and tested in the CTS, as opposed to testing for a singular correct
response.
Thus, the WebGPU CTS has a significant amount of machinery around how to
correctly test floating point expectations in a fluent manner.
## Floating Point Numbers
For the context of this discussion floating point numbers, fp for short, are
single precision IEEE floating point numbers, f32 for short.
Details of how this format works are discussed as needed below, but for a more
involved discussion, please see the references in the Resources sections.
Additionally, in the Appendix there is a table of interesting/common values that
are often referenced in tests or this document.
*In the future support for f16 and abstract floats will be added to the CTS, and
this document will need to be updated.*
Floating point numbers are effectively lossy compression of the infinite number
of possible values over their range down to 32-bits of distinct points.
This means that not all numbers in the range can be exactly represented as a f32.
For example, the integer `1` is exactly represented as `0x3f800000`, but the next
nearest number `0x3f800001` is `1.00000011920928955`.
So any number between `1` and `1.00000011920928955` is not exactly represented
as a f32 and instead is approximated as either `1` or `1.00000011920928955`.
When a number X is not exactly represented by a f32 value, there are normally
two neighbouring numbers that could reasonably represent X: the nearest f32
value above X, and the nearest f32 value below X. Which of these values gets
used is dictated by the rounding mode being used, which may be something like
always round towards 0 or go to the nearest neighbour, or something else
entirely.
The process of converting numbers between precisions, like non-f32 to f32, is
called quantization. WGSL does not prescribe a specific rounding mode when
quantizing, so either of the neighbouring values is considered valid
when converting a non-exactly representable value to f32. This has significant
implications on the CTS that are discussed later.
From here on, we assume you are familiar with the internal structure of a f32
value: a sign bit, a biased exponent, and a mantissa. For reference, see
[float32 on Wikipedia](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
In the f32 format as described above, there are two possible zero values, one
with all bits being 0, called positive zero, and one all the same except with
the sign bit being 1, called negative zero.
For WGSL, and thus the CTS's purposes, these values are considered equivalent.
Typescript, which the CTS is written in, treats all zeros as positive zeros,
unless you explicitly escape hatch to differentiate between them, so most of the
time there being two zeros doesn't materially affect code.
### Normals
Normal numbers are floating point numbers whose biased exponent is not all 0s or
all 1s. For WGSL these numbers behave as you expect for floating point values
with no interesting caveats.
### Subnormals
Subnormal numbers are numbers whose biased exponent is all 0s, also called
denorms.
These are the closest numbers to zero, both positive and negative, and fill in
the gap between the normal numbers with smallest magnitude, and 0.
Some devices, for performance reasons, do not handle operations on the
subnormal numbers, and instead treat them as being zero, this is called *flush
to zero* or FTZ behaviour.
This means in the CTS that when a subnormal number is consumed or produced by an
operation, an implementation may choose to replace it with zero.
Like the rounding mode for quantization, this adds significant complexity to the
CTS, which will be discussed later.
### Inf & NaNs
Floating point numbers include positive and negative infinity to represent
values that are out of the bounds supported by the current precision.
Implementations may assume that infinities are not present. When an evaluation
would produce an infinity, an undefined value is produced instead.
Additionally, when a calculation would produce a finite value outside the
bounds of the current precision, the implementation may convert that value to
either an infinity with same sign, or the min/max representable value as
appropriate.
The CTS encodes the least restrictive interpretation of the rules in the spec,
i.e. assuming someone has made a slightly adversarial implementation that always
chooses the thing with the least accuracy.
This means that the above rules about infinities combine to say that any time an
out of bounds value is seen, any finite value is acceptable afterwards.
This is because the out of bounds value may be converted to an infinity and then
an undefined value can be used instead of the infinity.
This is actually a significant boon for the CTS implementation, because it short
circuits a bunch of complexity about clamping to edge values and handling
infinities.
Signaling NaNs are treated as quiet NaNs in the WGSL spec. And quiet NaNs have
the same "may-convert-to-undefined-value" behaviour that infinities have, so for
the purpose of the CTS they are handled by the infinite/out of bounds logic
normally.
## Notation/Terminology
When discussing floating point values in the CTS, there are a few terms used
with precise meanings, which will be elaborated here.
Additionally, any specific notation used will be specified here to avoid
confusion.
### Operations
The CTS tests for the proper execution of f32 builtins, i.e. sin, sqrt, abs,
etc, and expressions, i.e. *, /, <, etc. These collectively can be referred to
as f32 operations.
Operations, which can be thought of as mathematical functions, are mappings from
a set of inputs to a set of outputs.
Denoted `f(x, y) = X`, where f is a placeholder or the name of the operation,
lower case variables are the inputs to the function, and uppercase variables are
the outputs of the function.
Operations have one or more inputs and an output. Being a f32 operation means
that the primary space for input and output values is f32, but there is some
flexibility in this definition. For example operations with values being
restricted to a subset of integers that are representable as f32 are often
referred to as being f32 based.
Values are generally floats, integers, booleans, vector, and matrices. Consult
the WGSL spec for the exact list of types and their definitions.
For composite outputs where there are multiple values being returned, there is a
single result value made of structured data. Whereas inputs handle this by
having multiple input parameters.
Some examples of different types of operations:
`multiplication(x, y) = X`, which represents the WGSL expression `x * y`, takes
in f32 values, `x` and `y`, and produces a f32 value `X`.
`lessThen(x, y) = X`, which represents the WGSL expression `x < y`, again takes
in f32 values, but in this case returns a boolean value.
`ldexp(x, y) = X`, which builds a f32 takes, takes in a f32 values `x` and a
restricted integer `y`.
### Domain, Range, and Intervals
For an operation `f(x) = X`, the interval of valid values for the input, `x`, is
called the *domain*, and the interval for valid results, `X`, is called the
*range*.
An interval, `[a, b]`, is a set of real numbers that contains `a`, `b`, and all
the real numbers between them.
Open-ended intervals, i.e. ones that don't include `a` and/or `b`, are avoided,
and are called out explicitly when they occur.
The convention in this doc and the CTS code is that `a <= b`, so `a` can be
referred to as the beginning of the interval and `b` as the end of the interval.
When talking about intervals, this doc and the code endeavours to avoid using
the term **range** to refer to the span of values that an interval covers,
instead using the term bounds to avoid confusion of terminology around output of
operations.
## Accuracy
As mentioned above floating point numbers are not able to represent all the
possible values over their bounds, but instead represent discrete values in that
interval, and approximate the remainder.
Additionally, floating point numbers are not evenly distributed over the real
number line, but instead are clustered closer together near zero, and further
apart as their magnitudes grow.
When discussing operations on floating point numbers, there is often reference
to a true value. This is the value that given no performance constraints and
infinite precision you would get, i.e `acos(1) = π`, where π has infinite
digits of precision.
For the CTS it is often sufficient to calculate the true value using TypeScript,
since its native number format is higher precision (double-precision/f64), and
all f32 values can be represented in it.
The true value is sometimes representable exactly as a f32 value, but often is
not.
Additionally, many operations are implemented using approximations from
numerical analysis, where there is a tradeoff between the precision of the
result and the cost.
Thus, the spec specifies what the accuracy constraints for specific operations
is, how close to truth an implementation is required to be, to be
considered conformant.
There are 5 different ways that accuracy requirements are defined in the spec:
1. *Exact*
This is the situation where it is expected that true value for an operation
is always expected to be exactly representable. This doesn't happen for any
of the operations that return floating point values, but does occur for
logical operations that return boolean values.
2. *Correctly Rounded*
For the case that the true value is exactly representable as a f32, this is
the equivalent of exactly from above. In the event that the true value is not
exact, then the acceptable answer for most numbers is either the nearest f32
above or the nearest f32 below the true value.
For values near the subnormal range, e.g. close to zero, this becomes more
complex, since an implementation may FTZ at any point. So if the exact
solution is subnormal or either of the neighbours of the true value are
subnormal, zero becomes a possible result, thus the acceptance interval is
wider than naively expected.
3. *Absolute Error*
This type of accuracy specifies an error value, ε, and the calculated result
is expected to be within that distance from the true value, i.e.
`[ X - ε, X + ε ]`.
The main drawback with this manner of specifying accuracy is that it doesn't
scale with the level of precision in floating point numbers themselves at a
specific value. Thus, it tends to be only used for specifying accuracy over
specific limited intervals, i.e. [-π, π].
4. *Units of Least Precision (ULP)*
The solution to the issue of not scaling with precision of floating point is
to use units of least precision.
ULP(X) is min (b-a) over all pairs (a,b) of representable floating point
numbers such that (a <= X <= b and a =/= b). For a more formal discussion of
ULP see
[On the definition of ulp(x)](https://hal.inria.fr/inria-00070503/document).
n * ULP or nULP means `[X - n * ULP @ X, X + n * ULP @ X]`.
5. *Inherited*
When an operation's accuracy is defined in terms of other operations, then
its accuracy is said to be inherited. Handling of inherited accuracies is
one of the main driving factors in the design of testing framework, so will
need to be discussed in detail.
## Acceptance Intervals
The first four accuracy types; Exact, Correctly Rounded, Absolute Error, and
ULP, sometimes called simple accuracies, can be defined in isolation from each
other, and by association can be implemented using relatively independent
implementations.
The original implementation of the floating point framework did this as it was
being built out, but ran into difficulties when defining the inherited
accuracies.
For examples, `tan(x) inherits from sin(x)/cos(x)`, one can take the defined
rules and manually build up a bespoke solution for checking the results, but
this is tedious, error-prone, and doesn't allow for code re-use.
Instead, it would be better if there was a single conceptual framework that one
can express all the 'simple' accuracy requirements in, and then have a mechanism
for composing them to define inherited accuracies.
In the WebGPU CTS this is done via the concept of acceptance intervals, which is
derived from a similar concept in the Vulkan CTS, though implemented
significantly differently.
The core of this idea is that each of different accuracy types can be integrated
into the definition of the operation, so that instead of transforming an input
from the domain to a point in the range, the operation is producing an interval
in the range, that is the acceptable values an implementation may emit.
The simple accuracies can be defined as follows:
1. *Exact*
`f(x) => [X, X]`
2. *Correctly Rounded*
If `X` is precisely defined as a f32
`f(x) => [X, X]`
otherwise,
`[a, b]` where `a` is the largest representable number with `a <= X`, and `b`
is the smallest representable number with `X <= b`
3. *Absolute Error*
`f(x) => [ X - ε, X + ε ]`, where ε is the absolute error value
4. **ULP Error**
`f(x) = X => [X - n*ULP(X), X + n*ULP(X)]`
As defined, these definitions handle mapping from a point in the domain into an
interval in the range.
This is insufficient for implementing inherited accuracies, since inheritance
sometimes involve mapping domain intervals to range intervals.
Here we use the convention for naturally extending a function on real numbers
into a function on intervals of real numbers, i.e. `f([a, b]) = [A, B]`.
Given that floating point numbers have a finite number of precise values for any
given interval, one could implement just running the accuracy computation for
every point in the interval and then spanning together the resultant intervals.
That would be very inefficient though and make your reviewer sad to read.
For mapping intervals to intervals the key insight is that we only need to be
concerned with the extrema of the operation in the interval, since the
acceptance interval is the bounds of the possible outputs.
In more precise terms:
```
f(x) => X, x = [a, b] and X = [A, B]
X = [min(f(x)), max(f(x))]
X = [min(f([a, b])), max(f([a, b]))]
X = [f(m), f(M)]
```
where m and M are in `[a, b]`, `m <= M`, and produce the min and max results
for `f` on the interval, respectively.
So how do we find the minima and maxima for our operation in the domain?
The common general solution for this requires using calculus to calculate the
derivative of `f`, `f'`, and then find the zeroes `f'` to find inflection
points of `f`.
This solution wouldn't be sufficient for all builtins, i.e. `step` which is not
differentiable at 'edge' values.
Thankfully we do not need a general solution for the CTS, since all the builtin
operations are defined in the spec, so `f` is from a known set of options.
These operations can be divided into two broad categories: monotonic, and
non-monotonic, with respect to an interval.
The monotonic operations are ones that preserve the order of inputs in their
outputs (or reverse it). Their graph only ever decreases or increases,
never changing from one or the other, though it can have flat sections.
The non-monotonic operations are ones whose graph would have both regions of
increase and decrease.
The monotonic operations, when mapping an interval to an interval, are simple to
handle, since the extrema are guaranteed to be the ends of the domain, `a` and `b`.
So `f([a, b])` = `[f(a), f(b)]` or `[f(b), f(a)]`. We could figure out if `f` is
increasing or decreasing beforehand to determine if it should be `[f(a), f(b)]`
or `[f(b), f(a)]`.
It is simpler to just use min & max to have an implementation that is agnostic
to the details of `f`.
```
A = f(a), B = f(b)
X = [min(A, B), max(A, B)]
```
The non-monotonic functions that we need to handle for interval-to-interval
mappings are more complex. Thankfully are a small number of the overall
operations that need to be handled, since they are only the operations that are
used in an inherited accuracy and take in the output of another operation as
part of that inherited accuracy.
So in the CTS we just have bespoke implementations for each of them.
Part of the operation definition in the CTS is a function that takes in the
domain interval, and returns a sub-interval such that the subject function is
monotonic over that sub-interval, and hence the function's minima and maxima are
at the ends.
This adjusted domain interval can then be fed through the same machinery as the
monotonic functions.
### Inherited Accuracy
So with all of that background out of the way, we can now define an inherited
accuracy in terms of acceptance intervals.
The crux of this is the insight that the range of one operation can become the
domain of another operation to compose them together.
And since we have defined how to do this interval to interval mapping above,
transforming things becomes mechanical and thus implementable in reusable code.
When talking about inherited accuracies `f(x) => g(x)` is used to denote that
`f`'s accuracy is a defined as `g`.
An example to illustrate inherited accuracies:
```
tan(x) => sin(x)/cos(x)
sin(x) => [sin(x) - 2^-11, sin(x) + 2^-11]`
cos(x) => [cos(x) - 2^-11, cos(x) + 2-11]
x/y => [x/y - 2.5 * ULP(x/y), x/y + 2.5 * ULP(x/y)]
```
`sin(x)` and `cos(x)` are non-monotonic, so calculating out a closed generic
form over an interval is a pain, since the min and max vary depending on the
value of x. Let's isolate this to a single point, so you don't have to read
literally pages of expanded intervals.
```
x = π/2
sin(π/2) => [sin(π/2) - 2-11, sin(π/2) + 2-11]
=> [0 - 2-11, 0 + 2-11]
=> [-0.000488.., 0.000488...]
cos(π/2) => [cos(π/2) - 2-11, cos(π/2) + 2-11]
=> [-0.500488, -0.499511...]
tan(π/2) => sin(π/2)/cos(π/2)
=> [-0.000488.., 0.000488...]/[-0.500488..., -0.499511...]
=> [min({-0.000488.../-0.500488..., -0.000488.../-0.499511..., ...}),
max(min({-0.000488.../-0.500488..., -0.000488.../-0.499511..., ...}) ]
=> [0.000488.../-0.499511..., 0.000488.../0.499511...]
=> [-0.0009775171, 0.0009775171]
```
For clarity this has omitted a bunch of complexity around FTZ behaviours, and
that these operations are only defined for specific domains, but the high-level
concepts hold.
For each of the inherited operations we could implement a manually written out
closed form solution, but that would be quite error-prone and not be
re-using code between builtins.
Instead, the CTS takes advantage of the fact in addition to testing
implementations of `tan(x)` we are going to be testing implementations of
`sin(x)`, `cos(x)` and `x/y`, so there should be functions to generate
acceptance intervals for those operations.
The `tan(x)` acceptance interval can be constructed by generating the acceptance
intervals for `sin(x)`, `cos(x)` and `x/y` via function calls and composing the
results.
This algorithmically looks something like this:
```
tan(x):
Calculate sin(x) interval
Calculate cos(x) interval
Calculate sin(x) result divided by cos(x) result
Return division result
```
# Appendix
### Significant f32 Values
| Name | Decimal (~) | Hex | Sign Bit | Exponent Bits | Significand Bits |
| ---------------------- | --------------: | ----------: | -------: | ------------: | ---------------------------: |
| Negative Infinity | -∞ | 0xff80 0000 | 1 | 1111 1111 | 0000 0000 0000 0000 0000 000 |
| Min Negative Normal | -3.40282346E38 | 0xff7f ffff | 1 | 1111 1110 | 1111 1111 1111 1111 1111 111 |
| Max Negative Normal | -1.1754943E38 | 0x8080 0000 | 1 | 0000 0001 | 0000 0000 0000 0000 0000 000 |
| Min Negative Subnormal | -1.1754942E-38 | 0x807f ffff | 1 | 0000 0000 | 1111 1111 1111 1111 1111 111 |
| Max Negative Subnormal | -1.4012984E45 | 0x8000 0001 | 1 | 0000 0000 | 0000 0000 0000 0000 0000 001 |
| Negative Zero | -0 | 0x8000 0000 | 1 | 0000 0000 | 0000 0000 0000 0000 0000 000 |
| Positive Zero | 0 | 0x0000 0000 | 0 | 0000 0000 | 0000 0000 0000 0000 0000 000 |
| Min Positive Subnormal | 1.4012984E45 | 0x0000 0001 | 0 | 0000 0000 | 0000 0000 0000 0000 0000 001 |
| Max Positive Subnormal | 1.1754942E-38 | 0x007f ffff | 0 | 0000 0000 | 1111 1111 1111 1111 1111 111 |
| Min Positive Normal | 1.1754943E38 | 0x0080 0000 | 0 | 0000 0001 | 0000 0000 0000 0000 0000 000 |
| Max Positive Normal | 3.40282346E38 | 0x7f7f ffff | 0 | 1111 1110 | 1111 1111 1111 1111 1111 111 |
| Negative Infinity | ∞ | 0x7f80 0000 | 0 | 1111 1111 | 0000 0000 0000 0000 0000 000 |
# Resources
- [WebGPU Spec](https://www.w3.org/TR/webgpu/)
- [WGSL Spec](https://www.w3.org/TR/WGSL/)
- [float32 on Wikipedia](https://en.wikipedia.org/wiki/Single-precision_floating-point_format)
- [IEEE-754 Floating Point Converter](https://www.h-schmidt.net/FloatConverter/IEEE754.html)
- [IEEE 754 Calculator](http://weitz.de/ieee/)
- [Keisan High Precision Calculator](https://keisan.casio.com/calculator)
- [On the definition of ulp(x)](https://hal.inria.fr/inria-00070503/document)

View File

@ -0,0 +1,92 @@
<!--
View this file in Typedoc!
- At https://gpuweb.github.io/cts/docs/tsdoc/
- Or locally:
- npm run tsdoc
- npm start
- http://localhost:8080/docs/tsdoc/
This file is parsed as a tsdoc.
-->
## Index of Test Helpers
This index is a quick-reference of helper functions in the test suite.
Use it to determine whether you can reuse a helper, instead of writing new code,
to improve readability and reviewability.
Whenever a new generally-useful helper is added, it should be indexed here.
**See linked documentation for full helper listings.**
- {@link common/framework/params_builder!CaseParamsBuilder} and {@link common/framework/params_builder!SubcaseParamsBuilder}:
Combinatorial generation of test parameters. They are iterated by the test framework at runtime.
See `examples.spec.ts` for basic examples of how this behaves.
- {@link common/framework/params_builder!CaseParamsBuilder}:
`ParamsBuilder` for adding "cases" to a test.
- {@link common/framework/params_builder!CaseParamsBuilder#beginSubcases}:
"Finalizes" the `CaseParamsBuilder`, returning a `SubcaseParamsBuilder`.
- {@link common/framework/params_builder!SubcaseParamsBuilder}:
`ParamsBuilder` for adding "subcases" to a test.
### Fixtures
(Uncheck the "Inherited" box to hide inherited methods from documentation pages.)
- {@link common/framework/fixture!Fixture}: Base fixture for all tests.
- {@link webgpu/gpu_test!GPUTest}: Base fixture for WebGPU tests.
- {@link webgpu/api/validation/validation_test!ValidationTest}: Base fixture for WebGPU validation tests.
- {@link webgpu/shader/validation/shader_validation_test!ShaderValidationTest}: Base fixture for WGSL shader validation tests.
- {@link webgpu/idl/idl_test!IDLTest}:
Base fixture for testing the exposed interface is correct (without actually using WebGPU).
### WebGPU Helpers
- {@link webgpu/capability_info}: Structured information about texture formats, binding types, etc.
- {@link webgpu/constants}:
Constant values (needed anytime a WebGPU constant is needed outside of a test function).
- {@link webgpu/util/buffer}: Helpers for GPUBuffers.
- {@link webgpu/util/texture}: Helpers for GPUTextures.
- {@link webgpu/util/unions}: Helpers for various union typedefs in the WebGPU spec.
- {@link webgpu/util/math}: Helpers for common math operations.
- {@link webgpu/util/check_contents}: Check the contents of TypedArrays, with nice messages.
Also can be composed with {@link webgpu/gpu_test!GPUTest#expectGPUBufferValuesPassCheck}, used to implement
GPUBuffer checking helpers in GPUTest.
- {@link webgpu/util/conversion}: Numeric encoding/decoding for float/unorm/snorm values, etc.
- {@link webgpu/util/copy_to_texture}:
Helper class for copyToTexture test suites for execution copy and check results.
- {@link webgpu/util/color_space_conversion}:
Helper functions to do color space conversion. The algorithm is the same as defined in
CSS Color Module Level 4.
- {@link webgpu/util/create_elements}:
Helpers for creating web elements like HTMLCanvasElement, OffscreenCanvas, etc.
- {@link webgpu/util/shader}: Helpers for creating fragment shader based on intended output values, plainType, and componentCount.
- {@link webgpu/util/texture/base}: General texture-related helpers.
- {@link webgpu/util/texture/data_generation}: Helper for generating dummy texture data.
- {@link webgpu/util/texture/layout}: Helpers for working with linear image data
(like in copyBufferToTexture, copyTextureToBuffer, writeTexture).
- {@link webgpu/util/texture/subresource}: Helpers for working with texture subresource ranges.
- {@link webgpu/util/texture/texel_data}: Helpers encoding/decoding texel formats.
- {@link webgpu/util/texture/texel_view}: Helper class to create and view texture data through various representations.
- {@link webgpu/util/texture/texture_ok}: Helpers for checking texture contents.
- {@link webgpu/shader/types}: Helpers for WGSL data types.
- {@link webgpu/shader/execution/expression/expression}: Helpers for WGSL expression execution tests.
- {@link webgpu/web_platform/util}: Helpers for web platform features (e.g. video elements).
### General Helpers
- {@link common/framework/resources}: Provides the path to the `resources/` directory.
- {@link common/util/navigator_gpu}: Finds and returns the `navigator.gpu` object or equivalent.
- {@link common/util/util}: Miscellaneous utilities.
- {@link common/util/util!assert}: Assert a condition, otherwise throw an exception.
- {@link common/util/util!unreachable}: Assert unreachable code.
- {@link common/util/util!assertReject}, {@link common/util/util!resolveOnTimeout},
{@link common/util/util!rejectOnTimeout},
{@link common/util/util!raceWithRejectOnTimeout}, and more.
- {@link common/util/collect_garbage}:
Attempt to trigger garbage collection, for testing that garbage collection is not observable.
- {@link common/util/preprocessor}: A simple template-based, non-line-based preprocessor,
implementing if/elif/else/endif. Possibly useful for WGSL shader generation.
- {@link common/util/timeout}: Use this instead of `setTimeout`.
- {@link common/util/types}: Type metaprogramming helpers.

View File

@ -0,0 +1,97 @@
# Test Implementation
Concepts important to understand when writing tests. See existing tests for examples to copy from.
## Test fixtures
Most tests can use one of the several common test fixtures:
- `Fixture`: Base fixture, provides core functions like `expect()`, `skip()`.
- `GPUTest`: Wraps every test in error scopes. Provides helpers like `expectContents()`.
- `ValidationTest`: Extends `GPUTest`, provides helpers like `expectValidationError()`, `getErrorTextureView()`.
- Or create your own. (Often not necessary - helper functions can be used instead.)
Test fixtures or helper functions may be defined in `.spec.ts` files, but if used by multiple
test files, should be defined in separate `.ts` files (without `.spec`) alongside the files that
use them.
### GPUDevices in tests
`GPUDevice`s are largely stateless (except for `lost`-ness, error scope stack, and `label`).
This allows the CTS to reuse one device across multiple test cases using the `DevicePool`,
which provides `GPUDevice` objects to tests.
Currently, there is one `GPUDevice` with the default descriptor, and
a cache of several more, for devices with additional capabilities.
Devices in the `DevicePool` are automatically removed when certain things go wrong.
Later, there may be multiple `GPUDevice`s to allow multiple test cases to run concurrently.
## Test parameterization
The CTS provides helpers (`.params()` and friends) for creating large cartesian products of test parameters.
These generate "test cases" further subdivided into "test subcases".
See `basic,*` in `examples.spec.ts` for examples, and the [helper index](./helper_index.txt)
for a list of capabilities.
Test parameterization should be applied liberally to ensure the maximum coverage
possible within reasonable time. You can skip some with `.filter()`. And remember: computers are
pretty fast - thousands of test cases can be reasonable.
Use existing lists of parameters values (such as
[`kTextureFormats`](https://github.com/gpuweb/cts/blob/0f38b85/src/suites/cts/capability_info.ts#L61),
to parameterize tests), instead of making your own list. Use the info tables (such as
`kTextureFormatInfo`) to define and retrieve information about the parameters.
## Asynchrony in tests
Since there are no synchronous operations in WebGPU, almost every test is asynchronous in some
way. For example:
- Checking the result of a readback.
- Capturing the result of a `popErrorScope()`.
That said, test functions don't always need to be `async`; see below.
### Checking asynchronous errors/results
Validation is inherently asynchronous (`popErrorScope()` returns a promise). However, the error
scope stack itself is synchronous - operations immediately after a `popErrorScope()` are outside
that error scope.
As a result, tests can assert things like validation errors/successes without having an `async`
test body.
**Example:**
```typescript
t.expectValidationError(() => {
device.createThing();
});
```
does:
- `pushErrorScope('validation')`
- `popErrorScope()` and "eventually" check whether it returned an error.
**Example:**
```typescript
t.expectGPUBufferValuesEqual(srcBuffer, expectedData);
```
does:
- copy `srcBuffer` into a new mappable buffer `dst`
- `dst.mapReadAsync()`, and "eventually" check what data it returned.
Internally, this is accomplished via an "eventual expectation": `eventualAsyncExpectation()`
takes an async function, calls it immediately, and stores off the resulting `Promise` to
automatically await at the end before determining the pass/fail state.
### Asynchronous parallelism
A side effect of test asynchrony is that it's possible for multiple tests to be in flight at
once. We do not currently do this, but it will eventually be an option to run `N` tests in
"parallel", for faster local test runs.

View File

@ -0,0 +1,99 @@
# Introduction
These documents contains guidelines for contributors to the WebGPU CTS (Conformance Test Suite)
on how to write effective tests, and on the testing philosophy to adopt.
The WebGPU CTS is arguably more important than the WebGPU specification itself, because
it is what forces implementation to be interoperable by checking they conform to the specification.
However writing a CTS is hard and requires a lot of effort to reach good coverage.
More than a collection of tests like regular end2end and unit tests for software artifacts, a CTS
needs to be exhaustive. Contrast for example the WebGL2 CTS with the ANGLE end2end tests: they
cover the same functionality (WebGL 2 / OpenGL ES 3) but are structured very differently:
- ANGLE's test suite has one or two tests per functionality to check it works correctly, plus
regression tests and special tests to cover implementation details.
- WebGL2's CTS can have thousands of tests per API aspect to cover every combination of
parameters (and global state) used by an operation.
Below are guidelines based on our collective experience with graphics API CTSes like WebGL's.
They are expected to evolve over time and have exceptions, but should give a general idea of what
to do.
## Contributing
Testing tasks are tracked in the [CTS project tracker](https://github.com/orgs/gpuweb/projects/3).
Go here if you're looking for tasks, or if you have a test idea that isn't already covered.
If contributing conformance tests, the directory you'll work in is [`src/webgpu/`](../src/webgpu/).
This directory is organized according to the goal of the test (API validation behavior vs
actual results) and its target (API entry points and spec areas, e.g. texture sampling).
The contents of a test file (`src/webgpu/**/*.spec.ts`) are twofold:
- Documentation ("test plans") on what tests do, how they do it, and what cases they cover.
Some test plans are fully or partially unimplemented:
they either contain "TODO" in a description or are `.unimplemented()`.
- Actual tests.
**Please read the following short documents before contributing.**
### 0. [Developing](developing.md)
- Reviewers should also read [Review Requirements](../reviews.md).
### 1. [Life of a Test Change](life_of.md)
### 2. [Adding or Editing Test Plans](plans.md)
### 3. [Implementing Tests](tests.md)
## [Additional Documentation](../)
## Examples
### Operation testing of vertex input id generation
This section provides an example of the planning process for a test.
It has not been refined into a set of final test plan descriptions.
(Note: this predates the actual implementation of these tests, so doesn't match the actual tests.)
Somewhere under the `api/operation` node are tests checking that running `GPURenderPipelines` on
the device using the `GPURenderEncoderBase.draw` family of functions works correctly. Render
pipelines are composed of several stages that are mostly independent so they can be split in
several parts such as `vertex_input`, `rasterization`, `blending`.
Vertex input itself has several parts that are mostly separate in hardware:
- generation of the vertex and instance indices to run for this draw
- fetching of vertex data from vertex buffers based on these indices
- conversion from the vertex attribute `GPUVertexFormat` to the datatype for the input variable
in the shader
Each of these are tested separately and have cases for each combination of the variables that may
affect them. This means that `api/operation/render/vertex_input/id_generation` checks that the
correct operation is performed for the cartesian product of all the following dimensions:
- for encoding in a `GPURenderPassEncoder` or a `GPURenderBundleEncoder`
- whether the draw is direct or indirect
- whether the draw is indexed or not
- for various values of the `firstInstance` argument
- for various values of the `instanceCount` argument
- if the draw is not indexed:
- for various values of the `firstVertex` argument
- for various values of the `vertexCount` argument
- if the draw is indexed:
- for each `GPUIndexFormat`
- for various values of the indices in the index buffer including the primitive restart values
- for various values for the `offset` argument to `setIndexBuffer`
- for various values of the `firstIndex` argument
- for various values of the `indexCount` argument
- for various values of the `baseVertex` argument
"Various values" above mean several small values, including `0` and the second smallest valid
value to check for corner cases, as well as some large value.
An instance of the test sets up a `draw*` call based on the parameters, using point rendering and
a fragment shader that outputs to a storage buffer. After the draw the test checks the content of
the storage buffer to make sure all expected vertex shader invocation, and only these ones have
been generated.

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -0,0 +1,134 @@
# Developing
The WebGPU CTS is written in TypeScript.
## Setup
After checking out the repository and installing node/npm, run:
```sh
npm ci
```
Before uploading, you can run pre-submit checks (`npm test`) to make sure it will pass CI.
Use `npm run fix` to fix linting issues.
`npm run` will show available npm scripts.
Some more scripts can be listed using `npx grunt`.
## Dev Server
To start the development server, use:
```sh
npm start
```
Then, browse to the standalone test runner at the printed URL.
The server will generate and compile code on the fly, so no build step is necessary.
Only a reload is needed to see saved changes.
(TODO: except, currently, `README.txt` and file `description` changes won't be reflected in
the standalone runner.)
Note: The first load of a test suite may take some time as generating the test suite listing can
take a few seconds.
## Standalone Test Runner / Test Plan Viewer
**The standalone test runner also serves as a test plan viewer.**
(This can be done in a browser without WebGPU support.)
You can use this to preview how your test plan will appear.
You can view different suites (webgpu, unittests, stress, etc.) or different subtrees of
the test suite.
- `http://localhost:8080/standalone/` (defaults to `?runnow=0&worker=0&debug=0&q=webgpu:*`)
- `http://localhost:8080/standalone/?q=unittests:*`
- `http://localhost:8080/standalone/?q=unittests:basic:*`
The following url parameters change how the harness runs:
- `runnow=1` runs all matching tests on page load.
- `debug=1` enables verbose debug logging from tests.
- `worker=1` runs the tests on a Web Worker instead of the main thread.
- `power_preference=low-power` runs most tests passing `powerPreference: low-power` to `requestAdapter`
- `power_preference=high-performance` runs most tests passing `powerPreference: high-performance` to `requestAdapter`
### Web Platform Tests (wpt) - Ref Tests
You can inspect the actual and reference pages for web platform reftests in the standalone
runner by navigating to them. For example, by loading:
- `http://localhost:8080/out/webgpu/web_platform/reftests/canvas_clear.https.html`
- `http://localhost:8080/out/webgpu/web_platform/reftests/ref/canvas_clear-ref.html`
You can also run a minimal ref test runner.
- open 2 terminals / command lines.
- in one, `npm start`
- in the other, `node tools/run_wpt_ref_tests <path-to-browser-executable> [name-of-test]`
Without `[name-of-test]` all ref tests will be run. `[name-of-test]` is just a simple check for
substring so passing in `rgba` will run every test with `rgba` in its filename.
Examples:
MacOS
```
# Chrome
node tools/run_wpt_ref_tests /Applications/Google\ Chrome\ Canary.app/Contents/MacOS/Google\ Chrome\ Canary
```
Windows
```
# Chrome
node .\tools\run_wpt_ref_tests "C:\Users\your-user-name\AppData\Local\Google\Chrome SxS\Application\chrome.exe"
```
## Editor
Since this project is written in TypeScript, it integrates best with
[Visual Studio Code](https://code.visualstudio.com/).
This is optional, but highly recommended: it automatically adds `import` lines and
provides robust completions, cross-references, renames, error highlighting,
deprecation highlighting, and type/JSDoc popups.
Open the `cts.code-workspace` workspace file to load settings convenient for this project.
You can make local configuration changes in `.vscode/`, which is untracked by Git.
## Pull Requests
When opening a pull request, fill out the PR checklist and attach the issue number.
If an issue hasn't been opened, find the draft issue on the
[project tracker](https://github.com/orgs/gpuweb/projects/3) and choose "Convert to issue":
![convert to issue button screenshot](convert_to_issue.png)
Opening a pull request will automatically notify reviewers.
To make the review process smoother, once a reviewer has started looking at your change:
- Avoid major additions or changes that would be best done in a follow-up PR.
- Avoid rebases (`git rebase`) and force pushes (`git push -f`). These can make
it difficult for reviewers to review incremental changes as GitHub often cannot
view a useful diff across a rebase. If it's necessary to resolve conflicts
with upstream changes, use a merge commit (`git merge`) and don't include any
consequential changes in the merge, so a reviewer can skip over merge commits
when working through the individual commits in the PR.
- When you address a review comment, mark the thread as "Resolved".
Pull requests will (usually) be landed with the "Squash and merge" option.
### TODOs
The word "TODO" refers to missing test coverage. It may only appear inside file/test descriptions
and README files (enforced by linting).
To use comments to refer to TODOs inside the description, use a backreference, e.g., in the
description, `TODO: Also test the FROBNICATE usage flag [1]`, and somewhere in the code, `[1]:
Need to add FROBNICATE to this list.`.
Use `MAINTENANCE_TODO` for TODOs which don't impact test coverage.

View File

@ -0,0 +1,46 @@
# Life of a Test Change
A "test change" could be a new test, an expansion of an existing test, a test bug fix, or a
modification to existing tests to make them match new spec changes.
**CTS contributors should contribute to the tracker and strive to keep it up to date, especially
relating to their own changes.**
Filing new draft issues in the CTS project tracker is very lightweight.
Anyone with access should do this eagerly, to ensure no testing ideas are forgotten.
(And if you don't have access, just file a regular issue.)
1. Enter a [draft issue](https://github.com/orgs/gpuweb/projects/3), with the Status
set to "New (not in repo)", and any available info included in the issue description
(notes/plans to ensure full test coverage of the change). The source of this may be:
- Anything in the spec/API that is found not to be covered by the CTS yet.
- Any test is found to be outdated or otherwise buggy.
- A spec change from the "Needs CTS Issue" column in the
[spec project tracker](https://github.com/orgs/gpuweb/projects/1).
Once information on the required test changes is entered into the CTS project tracker,
the spec issue moves to "Specification Done".
Note: at some point, someone may make a PR to flush "New (not in repo)" issues into `TODO`s in
CTS file/test description text, changing their "Status" to "Open".
These may be done in bulk without linking back to the issue.
1. As necessary:
- Convert the draft issue to a full, numbered issue for linking from later PRs.
![convert to issue button screenshot](convert_to_issue.png)
- Update the "Assignees" of the issue when an issue is assigned or unassigned
(you can assign yourself).
- Change the "Status" of the issue to "Started" once you start the task.
1. Open one or more PRs, **each linking to the associated issue**.
Each PR may is reviewed and landed, and may leave further TODOs for parts it doesn't complete.
1. Test are "planned" in test descriptions. (For complex tests, open a separate PR with the
tests `.unimplemented()` so a reviewer can evaluate the plan before you implement tests.)
1. Tests are implemented.
1. When **no TODOs remain** for an issue, close it and change its status to "Complete".
(Enter a new more, specific draft issue into the tracker if you need to track related TODOs.)

View File

@ -0,0 +1,82 @@
# Adding or Editing Test Plans
## 1. Write a test plan
For new tests, if some notes exist already, incorporate them into your plan.
A detailed test plan should be written and reviewed before substantial test code is written.
This allows reviewers a chance to identify additional tests and cases, opportunities for
generalizations that would improve the strength of tests, similar existing tests or test plans,
and potentially useful [helpers](../helper_index.txt).
**A test plan must serve two functions:**
- Describes the test, succinctly, but in enough detail that a reader can read *only* the test
plans and evaluate coverage completeness of a file/directory.
- Describes the test precisely enough that, when code is added, the reviewer can ensure that the
test really covers what the test plan says.
There should be one test plan for each test. It should describe what it tests, how, and describe
important cases that need to be covered. Here's an example:
```ts
g.test('x,some_detail')
.desc(
`
Tests [some detail] about x. Tests calling x in various 'mode's { mode1, mode2 },
with various values of 'arg', and checks correctness of the result.
Tries to trigger [some conditional path].
- Valid values (control case) // <- (to make sure the test function works well)
- Unaligned values (should fail) // <- (only validation tests need to intentionally hit invalid cases)
- Extreme values`
)
.params(u =>
u //
.combine('mode', ['mode1', 'mode2'])
.beginSubcases()
.combine('arg', [
// Valid // <- Comment params as you see fit.
4,
8,
100,
// Invalid
2,
6,
1e30,
])
)
.unimplemented();
```
"Cases" each appear as individual items in the `/standalone/` runner.
"Subcases" run inside each case, like a for-loop wrapping the `.fn(`test function`)`.
Documentation on the parameter builder can be found in the [helper index](../helper_index.txt).
It's often impossible to predict the exact case/subcase structure before implementing tests, so they
can be added during implementation, instead of planning.
For any notes which are not specific to a single test, or for preliminary notes for tests that
haven't been planned in full detail, put them in the test file's `description` variable at
the top. Or, if they aren't associated with a test file, put them in a `README.txt` file.
**Any notes about missing test coverage must be marked with the word `TODO` inside a
description or README.** This makes them appear on the `/standalone/` page.
## 2. Open a pull request
Open a PR, and work with the reviewer(s) to revise the test plan.
Usually (probably), plans will be landed in separate PRs before test implementations.
## Conventions used in test plans
- `Iff`: If and only if
- `x=`: "cartesian-cross equals", like `+=` for cartesian product.
Used for combinatorial test coverage.
- Sometimes this will result in too many test cases; simplify/reduce as needed
during planning *or* implementation.
- `{x,y,z}`: list of cases to test
- e.g. `x= texture format {r8unorm, r8snorm}`
- *Control case*: a case included to make sure that the rest of the cases aren't
missing their target by testing some other error case.

View File

@ -0,0 +1,25 @@
# Implementing Tests
Once a test plan is done, you can start writing tests.
To add new tests, imitate the pattern in neigboring tests or neighboring files.
New test files must be named ending in `.spec.ts`.
For an example test file, see [`src/webgpu/examples.spec.ts`](../../src/webgpu/examples.spec.ts).
For a more complex, well-structured reference test file, see
[`src/webgpu/api/validation/vertex_state.spec.ts`](../../src/webgpu/api/validation/vertex_state.spec.ts).
Implement some tests and open a pull request. You can open a PR any time you're ready for a review.
(If two tests are non-trivial but independent, consider separate pull requests.)
Before uploading, you can run pre-submit checks (`npm test`) to make sure it will pass CI.
Use `npm run fix` to fix linting issues.
## Test Helpers
It's best to be familiar with helpers available in the test suite for simplifying
test implementations.
New test helpers can be added at any time to either of those files, or to new `.ts` files anywhere
near the `.spec.ts` file where they're used.
Documentation on existing helpers can be found in the [helper index](../helper_index.txt).

View File

@ -0,0 +1,166 @@
# Test Organization
## `src/webgpu/`
Because of the glorious amount of test needed, the WebGPU CTS is organized as a tree of arbitrary
depth (a filesystem with multiple tests per file).
Each directory may have a `README.txt` describing its contents.
Tests are grouped in large families (each of which has a `README.txt`);
the root and first few levels looks like the following (some nodes omitted for simplicity):
- **`api`** with tests for full coverage of the Javascript API surface of WebGPU.
- **`validation`** with positive and negative tests for all the validation rules of the API.
- **`operation`** with tests that checks the result of performing valid WebGPU operations,
taking advantage of parametrization to exercise interactions between parts of the API.
- **`regression`** for one-off tests that reproduce bugs found in implementations to prevent
the bugs from appearing again.
- **`shader`** with tests for full coverage of the shaders that can be passed to WebGPU.
- **`validation`**.
- **`execution`** similar to `api/operation`.
- **`regression`**.
- **`idl`** with tests to check that the WebGPU IDL is correctly implemented, for examples that
objects exposed exactly the correct members, and that methods throw when passed incomplete
dictionaries.
- **`web-platform`** with tests for Web platform-specific interactions like `GPUSwapChain` and
`<canvas>`, WebXR and `GPUQueue.copyExternalImageToTexture`.
At the same time test hierarchies can be used to split the testing of a single sub-object into
several file for maintainability. For example `GPURenderPipeline` has a large descriptor and some
parts could be tested independently like `vertex_input` vs. `primitive_topology` vs. `blending`
but all live under the `render_pipeline` directory.
In addition to the test tree, each test can be parameterized. For coverage it is important to
test all enums values, for example for `GPUTextureFormat`. Instead of having a loop to iterate
over all the `GPUTextureFormat`, it is better to parameterize the test over them. Each format
will have a different entry in the test list which will help WebGPU implementers debug the test,
or suppress the failure without losing test coverage while they fix the bug.
Extra capabilities (limits and features) are often tested in the same files as the rest of the API.
For example, a compressed texture format capability would simply add a `GPUTextureFormat` to the
parametrization lists of many tests, while a capability adding significant new functionality
like ray-tracing could have a separate subtree.
Operation tests for optional features should be skipped using `t.selectDeviceOrSkipTestCase()` or
`t.skip()`. Validation tests should be written that test the behavior with and without the
capability enabled via `t.selectDeviceOrSkipTestCase()`, to ensure the functionality is valid
only with the capability enabled.
### Validation tests
Validation tests check the validation rules that are (or will be) set by the
WebGPU spec. Validation tests try to carefully trigger the individual validation
rules in the spec, without simultaneously triggering other rules.
Validation errors *generally* generate WebGPU errors, not exceptions.
But check the spec on a case-by-case basis.
Like all `GPUTest`s, `ValidationTest`s are wrapped in both types of error scope. These
"catch-all" error scopes look for any errors during the test, and report them as test failures.
Since error scopes can be nested, validation tests can nest an error scope to expect that there
*are* errors from specific operations.
#### Parameterization
Test parameterization can help write many validation tests more succinctly,
while making it easier for both authors and reviewers to be confident that
an aspect of the API is tested fully. Examples:
- [`webgpu:api,validation,render_pass,resolve:resolve_attachment:*`](https://github.com/gpuweb/cts/blob/ded3b7c8a4680a1a01621a8ac859facefadf32d0/src/webgpu/api/validation/render_pass/resolve.spec.ts#L35)
- [`webgpu:api,validation,createBindGroupLayout:bindingTypeSpecific_optional_members:*`](https://github.com/gpuweb/cts/blob/ded3b7c8a4680a1a01621a8ac859facefadf32d0/src/webgpu/api/validation/createBindGroupLayout.spec.ts#L68)
Use your own discretion when deciding the balance between heavily parameterizing
a test and writing multiple separate tests.
#### Guidelines
There are many aspects that should be tested in all validation tests:
- each individual argument to a method call (including `this`) or member of a descriptor
dictionary should be tested including:
- what happens when an error object is passed.
- what happens when an optional feature enum or method is used.
- what happens for numeric values when they are at 0, too large, too small, etc.
- each validation rule in the specification should be checked both with a control success case,
and error cases.
- each set of arguments or state that interact for validation.
When testing numeric values, it is important to check on both sides of the boundary: if the error
happens for value N and not N - 1, both should be tested. Alignment of integer values should also
be tested but boundary testing of alignment should be between a value aligned to 2^N and a value
aligned to 2^(N-1).
Finally, this is probably also where we would test that extensions follow the rule that: if the
browser supports a feature but it is not enabled on the device, then calling methods from that
feature throws `TypeError`.
- Test providing unknown properties *that are definitely not part of any feature* are
valid/ignored. (Unfortunately, due to the rules of IDL, adding a member to a dictionary is
always a breaking change. So this is how we have to test this unless we can get a "strict"
dictionary type in IDL. We can't test adding members from non-enabled extensions.)
### Operation tests
Operation tests test the actual results of using the API. They execute
(sometimes significant) code and check that the result is within the expected
set of behaviors (which can be quite complex to compute).
Note that operation tests need to test a lot of interactions between different
parts of the API, and so can become quite complex. Try to reduce the complexity by
utilizing combinatorics and [helpers](./helper_index.txt), and splitting/merging test files as needed.
#### Errors
Operation tests are usually `GPUTest`s. As a result, they automatically fail on any validation
errors that occur during the test.
When it's easier to write an operation test with invalid cases, use
`ParamsBuilder.filter`/`.unless` to avoid invalid cases, or detect and
`expect` validation errors in some cases.
#### Implementation
Use helpers like `expectContents` (and more to come) to check the values of data on the GPU.
(These are "eventual expectations" - the harness will wait for them to finish at the end).
When testing something inside a shader, it's not always necessary to output the result to a
render output. In fragment shaders, you can output to a storage buffer. In vertex shaders, you
can't - but you can render with points (simplest), send the result to the fragment shader, and
output it from there. (Someday, we may end up wanting a helper for this.)
#### Testing Default Values
Default value tests (for arguments and dictionary members) should usually be operation tests -
all you have to do is include `undefined` in parameterizations of other tests to make sure the
behavior with `undefined` has the same expected result that you have when the default value is
specified explicitly.
### IDL tests
TODO: figure out how to implement these. https://github.com/gpuweb/cts/issues/332
These tests test only rules that come directly from WebIDL. For example:
- Values out of range for `[EnforceRange]` cause exceptions.
- Required function arguments and dictionary members cause exceptions if omitted.
- Arguments and dictionary members cause exceptions if passed the wrong type.
They may also test positive cases like the following, but the behavior of these should be tested in
operation tests.
- OK to omit optional arguments/members.
- OK to pass the correct argument/member type (or of any type in a union type).
Every overload of every method should be tested.
## `src/stress/`, `src/manual/`
Stress tests and manual tests for WebGPU that are not intended to be run in an automated way.
## `src/unittests/`
Unit tests for the test framework (`src/common/framework/`).
## `src/demo/`
A demo of test hierarchies for the purpose of testing the `standalone` test runner page.

View File

@ -0,0 +1,70 @@
# Review Requirements
A review should have several items checked off before it is landed.
Checkboxes are pre-filled into the pull request summary when it's created.
The uploader may pre-check-off boxes if they are not applicable
(e.g. TypeScript readability on a plan PR).
## Readability
A reviewer has "readability" for a topic if they have enough expertise in that topic to ensure
good practices are followed in pull requests, or know when to loop in other reviewers.
Perfection is not required!
**It is up to reviewers' own discretion** whether they are qualified to check off a
"readability" checkbox on any given pull request.
- WebGPU Readability: Familiarity with the API to ensure:
- WebGPU is being used correctly; expected results seem reasonable.
- WebGPU is being tested completely; tests have control cases.
- Test code has a clear correspondence with the test description.
- [Test helpers](./helper_index.txt) are used or created appropriately
(where the reviewer is familiar with the helpers).
- TypeScript Readability: Make sure TypeScript is utilized in a way that:
- Ensures test code is reasonably type-safe.
Reviewers may recommend changes to make type-safety either weaker (`as`, etc.) or stronger.
- Is understandable and has appropriate verbosity and dynamicity
(e.g. type inference and `as const` are used to reduce unnecessary boilerplate).
## Plan Reviews
**Changes *must* have an author or reviewer with the following readability:** WebGPU
Reviewers must carefully ensure the following:
- The test plan name accurately describes the area being tested.
- The test plan covers the area described by the file/test name and file/test description
as fully as possible (or adds TODOs for incomplete areas).
- Validation tests have control cases (where no validation error should occur).
- Each validation rule is tested in isolation, in at least one case which does not validate any
other validation rules.
See also: [Adding or Editing Test Plans](intro/plans.md).
## Implementation Reviews
**Changes *must* have an author or reviewer with the following readability:** WebGPU, TypeScript
Reviewers must carefully ensure the following:
- The coverage of the test implementation precisely matches the test description.
- Everything required for test plan reviews above.
Reviewers should ensure the following:
- New test helpers are documented in [helper index](./helper_index.txt).
- Framework and test helpers are used where they would make test code clearer.
See also: [Implementing Tests](intro/tests.md).
## Framework
**Changes *must* have an author or reviewer with the following readability:** TypeScript
Reviewers should ensure the following:
- Changes are reasonably type-safe, and covered by unit tests where appropriate.

View File

@ -0,0 +1,270 @@
# Terminology
Each test suite is organized as a tree, both in the filesystem and further within each file.
- _Suites_, e.g. `src/webgpu/`.
- _READMEs_, e.g. `src/webgpu/README.txt`.
- _Test Spec Files_, e.g. `src/webgpu/examples.spec.ts`.
Identified by their file path.
Each test spec file provides a description and a _Test Group_.
A _Test Group_ defines a test fixture, and contains multiple:
- _Tests_.
Identified by a comma-separated list of parts (e.g. `basic,async`)
which define a path through a filesystem-like tree (analogy: `basic/async.txt`).
Defines a _test function_ and contains multiple:
- _Test Cases_.
Identified by a list of _Public Parameters_ (e.g. `x` = `1`, `y` = `2`).
Each Test Case has the same test function but different Public Parameters.
## Test Tree
A _Test Tree_ is a tree whose leaves are individual Test Cases.
A Test Tree can be thought of as follows:
- Suite, which is the root of a tree with "leaves" which are:
- Test Spec Files, each of which is a tree with "leaves" which are:
- Tests, each of which is a tree with leaves which are:
- Test Cases.
(In the implementation, this conceptual tree of trees is decomposed into one big tree
whose leaves are Test Cases.)
**Type:** `TestTree`
## Suite
A suite of tests.
A single suite has a directory structure, and many _test spec files_
(`.spec.ts` files containing tests) and _READMEs_.
Each member of a suite is identified by its path within the suite.
**Example:** `src/webgpu/`
### README
**Example:** `src/webgpu/README.txt`
Describes (in prose) the contents of a subdirectory in a suite.
READMEs are only processed at build time, when generating the _Listing_ for a suite.
**Type:** `TestSuiteListingEntryReadme`
## Queries
A _Query_ is a structured object which specifies a subset of cases in exactly one Suite.
A Query can be represented uniquely as a string.
Queries are used to:
- Identify a subtree of a suite (by identifying the root node of that subtree).
- Identify individual cases.
- Represent the list of tests that a test runner (standalone, wpt, or cmdline) should run.
- Identify subtrees which should not be "collapsed" during WPT `cts.https.html` generation,
so that that cts.https.html "variants" can have individual test expectations
(i.e. marked as "expected to fail", "skip", etc.).
There are four types of `TestQuery`:
- `TestQueryMultiFile` represents any subtree of the file hierarchy:
- `suite:*`
- `suite:path,to,*`
- `suite:path,to,file,*`
- `TestQueryMultiTest` represents any subtree of the test hierarchy:
- `suite:path,to,file:*`
- `suite:path,to,file:path,to,*`
- `suite:path,to,file:path,to,test,*`
- `TestQueryMultiCase` represents any subtree of the case hierarchy:
- `suite:path,to,file:path,to,test:*`
- `suite:path,to,file:path,to,test:my=0;*`
- `suite:path,to,file:path,to,test:my=0;params="here";*`
- `TestQuerySingleCase` represents as single case:
- `suite:path,to,file:path,to,test:my=0;params="here"`
Test Queries are a **weakly ordered set**: any query is
_Unordered_, _Equal_, _StrictSuperset_, or _StrictSubset_ relative to any other.
This property is used to construct the complete tree of test cases.
In the examples above, every example query is a StrictSubset of the previous one
(note: even `:*` is a subset of `,*`).
In the WPT and standalone harnesses, the query is stored in the URL, e.g.
`index.html?q=q:u,e:r,y:*`.
Queries are selectively URL-encoded for readability and compatibility with browsers
(see `encodeURIComponentSelectively`).
**Type:** `TestQuery`
## Listing
A listing of the **test spec files** in a suite.
This can be generated only in Node, which has filesystem access (see `src/tools/crawl.ts`).
As part of the build step, a _listing file_ is generated (see `src/tools/gen.ts`) so that the
Test Spec Files can be discovered by the web runner (since it does not have filesystem access).
**Type:** `TestSuiteListing`
### Listing File
Each Suite has one Listing File (`suite/listing.[tj]s`), containing a list of the files
in the suite.
In `src/suite/listing.ts`, this is computed dynamically.
In `out/suite/listing.js`, the listing has been pre-baked (by `tools/gen_listings`).
**Type:** Once `import`ed, `ListingFile`
**Example:** `out/webgpu/listing.js`
## Test Spec File
A Test Spec File has a `description` and a Test Group (under which tests and cases are defined).
**Type:** Once `import`ed, `SpecFile`
**Example:** `src/webgpu/**/*.spec.ts`
## Test Group
A subtree of tests. There is one Test Group per Test Spec File.
The Test Fixture used for tests is defined at TestGroup creation.
**Type:** `TestGroup`
## Test
One test. It has a single _test function_.
It may represent multiple _test cases_, each of which runs the same Test Function with different
Parameters.
A test is named using `TestGroup.test()`, which returns a `TestBuilder`.
`TestBuilder.params()`/`.paramsSimple()`/`.paramsSubcasesOnly()`
can optionally be used to parametrically generate instances (cases and subcases) of the test.
Finally, `TestBuilder.fn()` provides the Test Function
(or, a test can be marked unimplemented with `TestBuilder.unimplemented()`).
### Test Function
When a test subcase is run, the Test Function receives an instance of the
Test Fixture provided to the Test Group, producing test results.
**Type:** `TestFn`
## Test Case / Case
A single case of a test. It is identified by a `TestCaseID`: a test name, and its parameters.
Each case appears as an individual item (tree leaf) in `/standalone/`,
and as an individual "step" in WPT.
If `TestBuilder.params()`/`.paramsSimple()`/`.paramsSubcasesOnly()` are not used,
there is exactly one case with one subcase, with parameters `{}`.
**Type:** During test run time, a case is encapsulated as a `RunCase`.
## Test Subcase / Subcase
A single "subcase" of a test. It can also be identified by a `TestCaseID`, though
not all contexts allow subdividing cases into subcases.
All of the subcases of a case will run _inside_ the case, essentially as a for-loop wrapping the
test function. They do _not_ appear individually in `/standalone/` or WPT.
If `CaseParamsBuilder.beginSubcases()` is not used, there is exactly one subcase per case.
## Test Parameters / Params
Each Test Subcase has a (possibly empty) set of Test Parameters,
The parameters are passed to the Test Function `f(t)` via `t.params`.
A set of Public Parameters identifies a Test Case or Test Subcase within a Test.
There are also Private Parameters: any parameter name beginning with an underscore (`_`).
These parameters are not part of the Test Case identification, but are still passed into
the Test Function. They can be used, e.g., to manually specify expected results.
**Type:** `TestParams`
## Test Fixture / Fixture
_Test Fixtures_ provide helpers for tests to use.
A new instance of the fixture is created for every run of every test case.
There is always one fixture class for a whole test group (though this may change).
The fixture is also how a test gets access to the _case recorder_,
which allows it to produce test results.
They are also how tests produce results: `.skip()`, `.fail()`, etc.
**Type:** `Fixture`
### `UnitTest` Fixture
Provides basic fixture utilities most useful in the `unittests` suite.
### `GPUTest` Fixture
Provides utilities useful in WebGPU CTS tests.
# Test Results
## Logger
A logger logs the results of a whole test run.
It saves an empty `LiveTestSpecResult` into its results map, then creates a
_test spec recorder_, which records the results for a group into the `LiveTestSpecResult`.
**Type:** `Logger`
### Test Case Recorder
Refers to a `LiveTestCaseResult` created by the logger.
Records the results of running a test case (its pass-status, run time, and logs) into it.
**Types:** `TestCaseRecorder`, `LiveTestCaseResult`
#### Test Case Status
The `status` of a `LiveTestCaseResult` can be one of:
- `'running'` (only while still running)
- `'pass'`
- `'skip'`
- `'warn'`
- `'fail'`
The "worst" result from running a case is always reported (fail > warn > skip > pass).
Note this means a test can still fail if it's "skipped", if it failed before
`.skip()` was called.
**Type:** `Status`
## Results Format
The results are returned in JSON format.
They are designed to be easily merged in JavaScript:
the `"results"` can be passed into the constructor of `Map` and merged from there.
(TODO: Write a merge tool, if needed.)
```js
{
"version": "bf472c5698138cdf801006cd400f587e9b1910a5-dirty",
"results": [
[
"unittests:async_mutex:basic:",
{ "status": "pass", "timems": 0.286, "logs": [] }
],
[
"unittests:async_mutex:serial:",
{ "status": "pass", "timems": 0.415, "logs": [] }
]
]
}
```

View File

@ -0,0 +1,20 @@
// Typescript configuration for compile sources and
// dependent files for usage directly with Node.js. This
// is useful for running scripts in tools/ directly with Node
// without including extra dependencies.
{
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "commonjs",
"incremental": false,
"noEmit": false,
"declaration": false,
},
"exclude": [
"src/common/runtime/wpt.ts",
"src/common/runtime/standalone.ts",
"src/common/runtime/helper/test_worker.ts",
"src/webgpu/web_platform/worker/worker_launcher.ts"
]
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
{
"name": "@webgpu/cts",
"version": "0.1.0",
"description": "WebGPU Conformance Test Suite",
"scripts": {
"test": "grunt pre",
"check": "grunt check",
"standalone": "grunt standalone",
"wpt": "grunt wpt",
"fix": "grunt fix",
"unittest": "grunt unittest",
"gen_wpt_cts_html": "node tools/gen_wpt_cts_html",
"gen_cache": "node tools/gen_cache",
"tsdoc": "grunt run:tsdoc",
"start": "node tools/dev_server",
"dev": "node tools/dev_server"
},
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
},
"repository": {
"type": "git",
"url": "git+https://github.com/gpuweb/cts.git"
},
"author": "WebGPU CTS Contributors",
"private": true,
"license": "BSD-3-Clause",
"bugs": {
"url": "https://github.com/gpuweb/cts/issues"
},
"homepage": "https://github.com/gpuweb/cts#readme",
"devDependencies": {
"@babel/cli": "^7.19.3",
"@babel/core": "^7.20.5",
"@babel/preset-typescript": "^7.18.6",
"@types/babel__core": "^7.1.20",
"@types/dom-mediacapture-transform": "^0.1.4",
"@types/dom-webcodecs": "^0.1.5",
"@types/express": "^4.17.14",
"@types/jquery": "^3.5.14",
"@types/morgan": "^1.9.3",
"@types/node": "^14.18.12",
"@types/offscreencanvas": "^2019.7.0",
"@types/pngjs": "^6.0.1",
"@types/serve-index": "^1.9.1",
"@typescript-eslint/parser": "^4.33.0",
"@webgpu/types": "0.1.25",
"ansi-colors": "4.1.1",
"babel-plugin-add-header-comment": "^1.0.3",
"babel-plugin-const-enum": "^1.2.0",
"chokidar": "^3.5.3",
"eslint": "^7.11.0",
"eslint-plugin-ban": "^1.6.0",
"eslint-plugin-deprecation": "^1.3.3",
"eslint-plugin-import": "^2.26.0",
"express": "^4.18.2",
"grunt": "^1.5.3",
"grunt-cli": "^1.4.3",
"grunt-contrib-clean": "^2.0.1",
"grunt-contrib-copy": "^1.0.0",
"grunt-run": "^0.8.1",
"grunt-ts": "^6.0.0-beta.22",
"gts": "^3.1.1",
"http-server": "^14.1.1",
"morgan": "^1.10.0",
"playwright-core": "^1.29.2",
"pngjs": "^6.0.0",
"portfinder": "^1.0.32",
"prettier": "~2.1.2",
"quiet-grunt": "^0.2.3",
"screenshot-ftw": "^1.0.5",
"serve-index": "^1.9.1",
"ts-node": "^9.0.0",
"typedoc": "^0.23.21",
"typescript": "~4.7.4"
}
}

View File

@ -0,0 +1,8 @@
module.exports = {
printWidth: 100,
arrowParens: 'avoid',
bracketSpacing: true,
singleQuote: true,
trailingComma: 'es5',
};

View File

@ -0,0 +1,120 @@
/**
* Utilities to improve the performance of the CTS, by caching data that is
* expensive to build using a two-level cache (in-memory, pre-computed file).
*/
interface DataStore {
load(path: string): Promise<string>;
}
/** Logger is a basic debug logger function */
export type Logger = (s: string) => void;
/** DataCache is an interface to a data store used to hold cached data */
export class DataCache {
/** setDataStore() sets the backing data store used by the data cache */
public setStore(dataStore: DataStore) {
this.dataStore = dataStore;
}
/** setDebugLogger() sets the verbose logger */
public setDebugLogger(logger: Logger) {
this.debugLogger = logger;
}
/**
* fetch() retrieves cacheable data from the data cache, first checking the
* in-memory cache, then the data store (if specified), then resorting to
* building the data and storing it in the cache.
*/
public async fetch<Data>(cacheable: Cacheable<Data>): Promise<Data> {
// First check the in-memory cache
let data = this.cache.get(cacheable.path);
if (data !== undefined) {
this.log('in-memory cache hit');
return Promise.resolve(data as Data);
}
this.log('in-memory cache miss');
// In in-memory cache miss.
// Next, try the data store.
if (this.dataStore !== null && !this.unavailableFiles.has(cacheable.path)) {
let serialized: string | undefined;
try {
serialized = await this.dataStore.load(cacheable.path);
this.log('loaded serialized');
} catch (err) {
// not found in data store
this.log(`failed to load (${cacheable.path}): ${err}`);
this.unavailableFiles.add(cacheable.path);
}
if (serialized !== undefined) {
this.log(`deserializing`);
data = cacheable.deserialize(serialized);
this.cache.set(cacheable.path, data);
return data as Data;
}
}
// Not found anywhere. Build the data, and cache for future lookup.
this.log(`cache: building (${cacheable.path})`);
data = await cacheable.build();
this.cache.set(cacheable.path, data);
return data as Data;
}
private log(msg: string) {
if (this.debugLogger !== null) {
this.debugLogger(`DataCache: ${msg}`);
}
}
private cache = new Map<string, unknown>();
private unavailableFiles = new Set<string>();
private dataStore: DataStore | null = null;
private debugLogger: Logger | null = null;
}
/** The data cache */
export const dataCache = new DataCache();
/** true if the current process is building the cache */
let isBuildingDataCache = false;
/** @returns true if the data cache is currently being built */
export function getIsBuildingDataCache() {
return isBuildingDataCache;
}
/** Sets whether the data cache is currently being built */
export function setIsBuildingDataCache(value = true) {
isBuildingDataCache = value;
}
/**
* Cacheable is the interface to something that can be stored into the
* DataCache.
* The 'npm run gen_cache' tool will look for module-scope variables of this
* interface, with the name `d`.
*/
export interface Cacheable<Data> {
/** the globally unique path for the cacheable data */
readonly path: string;
/**
* build() builds the cacheable data.
* This is assumed to be an expensive operation and will only happen if the
* cache does not already contain the built data.
*/
build(): Promise<Data>;
/**
* serialize() transforms `data` to a string (usually JSON encoded) so that it
* can be stored in a text cache file.
*/
serialize(data: Data): string;
/**
* deserialize() is the inverse of serialize(), transforming the string back
* to the Data object.
*/
deserialize(serialized: string): Data;
}

View File

@ -0,0 +1,328 @@
import { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
import { JSONWithUndefined } from '../internal/params_utils.js';
import { assert, unreachable } from '../util/util.js';
export class SkipTestCase extends Error {}
export class UnexpectedPassError extends Error {}
export { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
/** The fully-general type for params passed to a test function invocation. */
export type TestParams = {
readonly [k: string]: JSONWithUndefined;
};
type DestroyableObject =
| { destroy(): void }
| { close(): void }
| { getExtension(extensionName: 'WEBGL_lose_context'): WEBGL_lose_context };
export class SubcaseBatchState {
private _params: TestParams;
constructor(params: TestParams) {
this._params = params;
}
/**
* Returns the case parameters for this test fixture shared state. Subcase params
* are not included.
*/
get params(): TestParams {
return this._params;
}
/**
* Runs before the `.before()` function.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async init() {}
/**
* Runs between the `.before()` function and the subcases.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async postInit() {}
/**
* Runs after all subcases finish.
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async finalize() {}
}
/**
* A Fixture is a class used to instantiate each test sub/case at run time.
* A new instance of the Fixture is created for every single test subcase
* (i.e. every time the test function is run).
*/
export class Fixture<S extends SubcaseBatchState = SubcaseBatchState> {
private _params: unknown;
private _sharedState: S;
/**
* Interface for recording logs and test status.
*
* @internal
*/
protected rec: TestCaseRecorder;
private eventualExpectations: Array<Promise<unknown>> = [];
private numOutstandingAsyncExpectations = 0;
private objectsToCleanUp: DestroyableObject[] = [];
public static MakeSharedState(params: TestParams): SubcaseBatchState {
return new SubcaseBatchState(params);
}
/** @internal */
constructor(sharedState: S, rec: TestCaseRecorder, params: TestParams) {
this._sharedState = sharedState;
this.rec = rec;
this._params = params;
}
/**
* Returns the (case+subcase) parameters for this test function invocation.
*/
get params(): unknown {
return this._params;
}
/**
* Gets the test fixture's shared state. This object is shared between subcases
* within the same testcase.
*/
get sharedState(): S {
return this._sharedState;
}
/**
* Override this to do additional pre-test-function work in a derived fixture.
* This has to be a member function instead of an async `createFixture` function, because
* we need to be able to ergonomically override it in subclasses.
*
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async init(): Promise<void> {}
/**
* Override this to do additional post-test-function work in a derived fixture.
*
* Called even if init was unsuccessful.
*
* @internal MAINTENANCE_TODO: Make this not visible to test code?
*/
async finalize(): Promise<void> {
assert(
this.numOutstandingAsyncExpectations === 0,
'there were outstanding immediateAsyncExpectations (e.g. expectUncapturedError) at the end of the test'
);
// Loop to exhaust the eventualExpectations in case they chain off each other.
while (this.eventualExpectations.length) {
const p = this.eventualExpectations.shift()!;
try {
await p;
} catch (ex) {
this.rec.threw(ex);
}
}
// And clean up any objects now that they're done being used.
for (const o of this.objectsToCleanUp) {
if ('getExtension' in o) {
const WEBGL_lose_context = o.getExtension('WEBGL_lose_context');
if (WEBGL_lose_context) WEBGL_lose_context.loseContext();
} else if ('destroy' in o) {
o.destroy();
} else {
o.close();
}
}
}
/**
* Tracks an object to be cleaned up after the test finishes.
*
* MAINTENANCE_TODO: Use this in more places. (Will be easier once .destroy() is allowed on
* invalid objects.)
*/
trackForCleanup<T extends DestroyableObject>(o: T): T {
this.objectsToCleanUp.push(o);
return o;
}
/** Tracks an object, if it's destroyable, to be cleaned up after the test finishes. */
tryTrackForCleanup<T>(o: T): T {
if (typeof o === 'object' && o !== null) {
if (
'destroy' in o ||
'close' in o ||
o instanceof WebGLRenderingContext ||
o instanceof WebGL2RenderingContext
) {
this.objectsToCleanUp.push((o as unknown) as DestroyableObject);
}
}
return o;
}
/** Log a debug message. */
debug(msg: string): void {
this.rec.debug(new Error(msg));
}
/** Throws an exception marking the subcase as skipped. */
skip(msg: string): never {
throw new SkipTestCase(msg);
}
/** Log a warning and increase the result status to "Warn". */
warn(msg?: string): void {
this.rec.warn(new Error(msg));
}
/** Log an error and increase the result status to "ExpectFailed". */
fail(msg?: string): void {
this.rec.expectationFailed(new Error(msg));
}
/**
* Wraps an async function. Tracks its status to fail if the test tries to report a test status
* before the async work has finished.
*/
protected async immediateAsyncExpectation<T>(fn: () => Promise<T>): Promise<T> {
this.numOutstandingAsyncExpectations++;
const ret = await fn();
this.numOutstandingAsyncExpectations--;
return ret;
}
/**
* Wraps an async function, passing it an `Error` object recording the original stack trace.
* The async work will be implicitly waited upon before reporting a test status.
*/
protected eventualAsyncExpectation<T>(fn: (niceStack: Error) => Promise<T>): void {
const promise = fn(new Error());
this.eventualExpectations.push(promise);
}
private expectErrorValue(expectedError: string | true, ex: unknown, niceStack: Error): void {
if (!(ex instanceof Error)) {
niceStack.message = `THREW non-error value, of type ${typeof ex}: ${ex}`;
this.rec.expectationFailed(niceStack);
return;
}
const actualName = ex.name;
if (expectedError !== true && actualName !== expectedError) {
niceStack.message = `THREW ${actualName}, instead of ${expectedError}: ${ex}`;
this.rec.expectationFailed(niceStack);
} else {
niceStack.message = `OK: threw ${actualName}: ${ex.message}`;
this.rec.debug(niceStack);
}
}
/** Expect that the provided promise resolves (fulfills). */
shouldResolve(p: Promise<unknown>, msg?: string): void {
this.eventualAsyncExpectation(async niceStack => {
const m = msg ? ': ' + msg : '';
try {
await p;
niceStack.message = 'resolved as expected' + m;
} catch (ex) {
niceStack.message = `REJECTED${m}`;
if (ex instanceof Error) {
niceStack.message += '\n' + ex.message;
}
this.rec.expectationFailed(niceStack);
}
});
}
/** Expect that the provided promise rejects, with the provided exception name. */
shouldReject(expectedName: string, p: Promise<unknown>, msg?: string): void {
this.eventualAsyncExpectation(async niceStack => {
const m = msg ? ': ' + msg : '';
try {
await p;
niceStack.message = 'DID NOT REJECT' + m;
this.rec.expectationFailed(niceStack);
} catch (ex) {
niceStack.message = 'rejected as expected' + m;
this.expectErrorValue(expectedName, ex, niceStack);
}
});
}
/**
* Expect that the provided function throws.
* If an `expectedName` is provided, expect that the throw exception has that name.
*/
shouldThrow(expectedError: string | boolean, fn: () => void, msg?: string): void {
const m = msg ? ': ' + msg : '';
try {
fn();
if (expectedError === false) {
this.rec.debug(new Error('did not throw, as expected' + m));
} else {
this.rec.expectationFailed(new Error('unexpectedly did not throw' + m));
}
} catch (ex) {
if (expectedError === false) {
this.rec.expectationFailed(new Error('threw unexpectedly' + m));
} else {
this.expectErrorValue(expectedError, ex, new Error(m));
}
}
}
/** Expect that a condition is true. */
expect(cond: boolean, msg?: string): boolean {
if (cond) {
const m = msg ? ': ' + msg : '';
this.rec.debug(new Error('expect OK' + m));
} else {
this.rec.expectationFailed(new Error(msg));
}
return cond;
}
/**
* If the argument is an `Error`, fail (or warn). If it's `undefined`, no-op.
* If the argument is an array, apply the above behavior on each of elements.
*/
expectOK(
error: Error | undefined | (Error | undefined)[],
{ mode = 'fail', niceStack }: { mode?: 'fail' | 'warn'; niceStack?: Error } = {}
): void {
const handleError = (error: Error | undefined) => {
if (error instanceof Error) {
if (niceStack) {
error.stack = niceStack.stack;
}
if (mode === 'fail') {
this.rec.expectationFailed(error);
} else if (mode === 'warn') {
this.rec.warn(error);
} else {
unreachable();
}
}
};
if (Array.isArray(error)) {
for (const e of error) {
handleError(e);
}
} else {
handleError(error);
}
}
eventualExpectOK(
error: Promise<Error | undefined | (Error | undefined)[]>,
{ mode = 'fail' }: { mode?: 'fail' | 'warn' } = {}
) {
this.eventualAsyncExpectation(async niceStack => {
this.expectOK(await error, { mode, niceStack });
});
}
}

View File

@ -0,0 +1,337 @@
import { Merged, mergeParams } from '../internal/params_utils.js';
import { stringifyPublicParams } from '../internal/query/stringify_params.js';
import { assert, mapLazy } from '../util/util.js';
// ================================================================
// "Public" ParamsBuilder API / Documentation
// ================================================================
/**
* Provides doc comments for the methods of CaseParamsBuilder and SubcaseParamsBuilder.
* (Also enforces rough interface match between them.)
*/
export interface ParamsBuilder {
/**
* Expands each item in `this` into zero or more items.
* Each item has its parameters expanded with those returned by the `expander`.
*
* **Note:** When only a single key is being added, use the simpler `expand` for readability.
*
* ```text
* this = [ a , b , c ]
* this.map(expander) = [ f(a) f(b) f(c) ]
* = [[a1, a2, a3] , [ b1 ] , [] ]
* merge and flatten = [ merge(a, a1), merge(a, a2), merge(a, a3), merge(b, b1) ]
* ```
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
expandWithParams(expander: (_: any) => any): any;
/**
* Expands each item in `this` into zero or more items. Each item has its parameters expanded
* with one new key, `key`, and the values returned by `expander`.
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
expand(key: string, expander: (_: any) => any): any;
/**
* Expands each item in `this` to multiple items, one for each item in `newParams`.
*
* In other words, takes the cartesian product of [ the items in `this` ] and `newParams`.
*
* **Note:** When only a single key is being added, use the simpler `combine` for readability.
*
* ```text
* this = [ {a:1}, {b:2} ]
* newParams = [ {x:1}, {y:2} ]
* this.combineP(newParams) = [ {a:1,x:1}, {a:1,y:2}, {b:2,x:1}, {b:2,y:2} ]
* ```
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
combineWithParams(newParams: Iterable<any>): any;
/**
* Expands each item in `this` to multiple items with `{ [name]: value }` for each value.
*
* In other words, takes the cartesian product of [ the items in `this` ]
* and `[ {[name]: value} for each value in values ]`
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
combine(key: string, newParams: Iterable<any>): any;
/**
* Filters `this` to only items for which `pred` returns true.
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
filter(pred: (_: any) => boolean): any;
/**
* Filters `this` to only items for which `pred` returns false.
*/
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
unless(pred: (_: any) => boolean): any;
}
/**
* Determines the resulting parameter object type which would be generated by an object of
* the given ParamsBuilder type.
*/
export type ParamTypeOf<
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
T extends ParamsBuilder
> = T extends SubcaseParamsBuilder<infer CaseP, infer SubcaseP>
? Merged<CaseP, SubcaseP>
: T extends CaseParamsBuilder<infer CaseP>
? CaseP
: never;
// ================================================================
// Implementation
// ================================================================
/**
* Iterable over pairs of either:
* - `[case params, Iterable<subcase params>]` if there are subcases.
* - `[case params, undefined]` if not.
*/
export type CaseSubcaseIterable<CaseP, SubcaseP> = Iterable<
readonly [CaseP, Iterable<SubcaseP> | undefined]
>;
/**
* Base class for `CaseParamsBuilder` and `SubcaseParamsBuilder`.
*/
export abstract class ParamsBuilderBase<CaseP extends {}, SubcaseP extends {}> {
protected readonly cases: () => Generator<CaseP>;
constructor(cases: () => Generator<CaseP>) {
this.cases = cases;
}
/**
* Hidden from test files. Use `builderIterateCasesWithSubcases` to access this.
*/
protected abstract iterateCasesWithSubcases(): CaseSubcaseIterable<CaseP, SubcaseP>;
}
/**
* Calls the (normally hidden) `iterateCasesWithSubcases()` method.
*/
export function builderIterateCasesWithSubcases(builder: ParamsBuilderBase<{}, {}>) {
interface IterableParamsBuilder {
iterateCasesWithSubcases(): CaseSubcaseIterable<{}, {}>;
}
return ((builder as unknown) as IterableParamsBuilder).iterateCasesWithSubcases();
}
/**
* Builder for combinatorial test **case** parameters.
*
* CaseParamsBuilder is immutable. Each method call returns a new, immutable object,
* modifying the list of cases according to the method called.
*
* This means, for example, that the `unit` passed into `TestBuilder.params()` can be reused.
*/
export class CaseParamsBuilder<CaseP extends {}>
extends ParamsBuilderBase<CaseP, {}>
implements Iterable<CaseP>, ParamsBuilder {
*iterateCasesWithSubcases(): CaseSubcaseIterable<CaseP, {}> {
for (const a of this.cases()) {
yield [a, undefined];
}
}
[Symbol.iterator](): Iterator<CaseP> {
return this.cases();
}
/** @inheritDoc */
expandWithParams<NewP extends {}>(
expander: (_: Merged<{}, CaseP>) => Iterable<NewP>
): CaseParamsBuilder<Merged<CaseP, NewP>> {
const newGenerator = expanderGenerator(this.cases, expander);
return new CaseParamsBuilder(() => newGenerator({}));
}
/** @inheritDoc */
expand<NewPKey extends string, NewPValue>(
key: NewPKey,
expander: (_: Merged<{}, CaseP>) => Iterable<NewPValue>
): CaseParamsBuilder<Merged<CaseP, { [name in NewPKey]: NewPValue }>> {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
yield { [key]: value } as { readonly [name in NewPKey]: NewPValue };
}
});
}
/** @inheritDoc */
combineWithParams<NewP extends {}>(
newParams: Iterable<NewP>
): CaseParamsBuilder<Merged<CaseP, NewP>> {
assertNotGenerator(newParams);
const seenValues = new Set<string>();
for (const params of newParams) {
const paramsStr = stringifyPublicParams(params);
assert(!seenValues.has(paramsStr), `Duplicate entry in combine[WithParams]: ${paramsStr}`);
seenValues.add(paramsStr);
}
return this.expandWithParams(() => newParams);
}
/** @inheritDoc */
combine<NewPKey extends string, NewPValue>(
key: NewPKey,
values: Iterable<NewPValue>
): CaseParamsBuilder<Merged<CaseP, { [name in NewPKey]: NewPValue }>> {
assertNotGenerator(values);
const mapped = mapLazy(values, v => ({ [key]: v } as { [name in NewPKey]: NewPValue }));
return this.combineWithParams(mapped);
}
/** @inheritDoc */
filter(pred: (_: Merged<{}, CaseP>) => boolean): CaseParamsBuilder<CaseP> {
const newGenerator = filterGenerator(this.cases, pred);
return new CaseParamsBuilder(() => newGenerator({}));
}
/** @inheritDoc */
unless(pred: (_: Merged<{}, CaseP>) => boolean): CaseParamsBuilder<CaseP> {
return this.filter(x => !pred(x));
}
/**
* "Finalize" the list of cases and begin defining subcases.
* Returns a new SubcaseParamsBuilder. Methods called on SubcaseParamsBuilder
* generate new subcases instead of new cases.
*/
beginSubcases(): SubcaseParamsBuilder<CaseP, {}> {
return new SubcaseParamsBuilder(
() => this.cases(),
function* () {
yield {};
}
);
}
}
/**
* The unit CaseParamsBuilder, representing a single case with no params: `[ {} ]`.
*
* `punit` is passed to every `.params()`/`.paramsSubcasesOnly()` call, so `kUnitCaseParamsBuilder`
* is only explicitly needed if constructing a ParamsBuilder outside of a test builder.
*/
export const kUnitCaseParamsBuilder = new CaseParamsBuilder(function* () {
yield {};
});
/**
* Builder for combinatorial test _subcase_ parameters.
*
* SubcaseParamsBuilder is immutable. Each method call returns a new, immutable object,
* modifying the list of subcases according to the method called.
*/
export class SubcaseParamsBuilder<CaseP extends {}, SubcaseP extends {}>
extends ParamsBuilderBase<CaseP, SubcaseP>
implements ParamsBuilder {
protected readonly subcases: (_: CaseP) => Generator<SubcaseP>;
constructor(cases: () => Generator<CaseP>, generator: (_: CaseP) => Generator<SubcaseP>) {
super(cases);
this.subcases = generator;
}
*iterateCasesWithSubcases(): CaseSubcaseIterable<CaseP, SubcaseP> {
for (const caseP of this.cases()) {
const subcases = Array.from(this.subcases(caseP));
if (subcases.length) {
yield [caseP, subcases];
}
}
}
/** @inheritDoc */
expandWithParams<NewP extends {}>(
expander: (_: Merged<CaseP, SubcaseP>) => Iterable<NewP>
): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, NewP>> {
return new SubcaseParamsBuilder(this.cases, expanderGenerator(this.subcases, expander));
}
/** @inheritDoc */
expand<NewPKey extends string, NewPValue>(
key: NewPKey,
expander: (_: Merged<CaseP, SubcaseP>) => Iterable<NewPValue>
): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, { [name in NewPKey]: NewPValue }>> {
return this.expandWithParams(function* (p) {
for (const value of expander(p)) {
// TypeScript doesn't know here that NewPKey is always a single literal string type.
yield { [key]: value } as { [name in NewPKey]: NewPValue };
}
});
}
/** @inheritDoc */
combineWithParams<NewP extends {}>(
newParams: Iterable<NewP>
): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, NewP>> {
assertNotGenerator(newParams);
return this.expandWithParams(() => newParams);
}
/** @inheritDoc */
combine<NewPKey extends string, NewPValue>(
key: NewPKey,
values: Iterable<NewPValue>
): SubcaseParamsBuilder<CaseP, Merged<SubcaseP, { [name in NewPKey]: NewPValue }>> {
assertNotGenerator(values);
return this.expand(key, () => values);
}
/** @inheritDoc */
filter(pred: (_: Merged<CaseP, SubcaseP>) => boolean): SubcaseParamsBuilder<CaseP, SubcaseP> {
return new SubcaseParamsBuilder(this.cases, filterGenerator(this.subcases, pred));
}
/** @inheritDoc */
unless(pred: (_: Merged<CaseP, SubcaseP>) => boolean): SubcaseParamsBuilder<CaseP, SubcaseP> {
return this.filter(x => !pred(x));
}
}
function expanderGenerator<Base, A, B>(
baseGenerator: (_: Base) => Generator<A>,
expander: (_: Merged<Base, A>) => Iterable<B>
): (_: Base) => Generator<Merged<A, B>> {
return function* (base: Base) {
for (const a of baseGenerator(base)) {
for (const b of expander(mergeParams(base, a))) {
yield mergeParams(a, b);
}
}
};
}
function filterGenerator<Base, A>(
baseGenerator: (_: Base) => Generator<A>,
pred: (_: Merged<Base, A>) => boolean
): (_: Base) => Generator<A> {
return function* (base: Base) {
for (const a of baseGenerator(base)) {
if (pred(mergeParams(base, a))) {
yield a;
}
}
};
}
/** Assert an object is not a Generator (a thing returned from a generator function). */
function assertNotGenerator(x: object) {
if ('constructor' in x) {
assert(
x.constructor !== (function* () {})().constructor,
'Argument must not be a generator, as generators are not reusable'
);
}
}

View File

@ -0,0 +1,110 @@
/**
* Base path for resources. The default value is correct for non-worker WPT, but standalone and
* workers must access resources using a different base path, so this is overridden in
* `test_worker-worker.ts` and `standalone.ts`.
*/
let baseResourcePath = './resources';
let crossOriginHost = '';
function getAbsoluteBaseResourcePath(path: string) {
// Path is already an absolute one.
if (path[0] === '/') {
return path;
}
// Path is relative
const relparts = window.location.pathname.split('/');
relparts.pop();
const pathparts = path.split('/');
let i;
for (i = 0; i < pathparts.length; ++i) {
switch (pathparts[i]) {
case '':
break;
case '.':
break;
case '..':
relparts.pop();
break;
default:
relparts.push(pathparts[i]);
break;
}
}
return relparts.join('/');
}
function runningOnLocalHost(): boolean {
const hostname = window.location.hostname;
return hostname === 'localhost' || hostname === '127.0.0.1' || hostname === '::1';
}
/**
* Get a path to a resource in the `resources` directory relative to the current execution context
* (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc but from cross origin host.
* Provide onlineUrl if the case running online.
* @internal MAINTENANCE_TODO: Cases may run in the LAN environment (not localhost but no internet
* access). We temporarily use `crossOriginHost` to configure the cross origin host name in that situation.
* But opening to auto-detect mechanism or other solutions.
*/
export function getCrossOriginResourcePath(pathRelativeToResourcesDir: string, onlineUrl = '') {
// A cross origin host has been configured. Use this to load resource.
if (crossOriginHost !== '') {
return (
crossOriginHost +
getAbsoluteBaseResourcePath(baseResourcePath) +
'/' +
pathRelativeToResourcesDir
);
}
// Using 'localhost' and '127.0.0.1' trick to load cross origin resource. Set cross origin host name
// to 'localhost' if case is not running in 'localhost' domain. Otherwise, use '127.0.0.1'.
// host name to locahost unless the server running in
if (runningOnLocalHost()) {
let crossOriginHostName = '';
if (location.hostname === 'localhost') {
crossOriginHostName = 'http://127.0.0.1';
} else {
crossOriginHostName = 'http://localhost';
}
return (
crossOriginHostName +
':' +
location.port +
getAbsoluteBaseResourcePath(baseResourcePath) +
'/' +
pathRelativeToResourcesDir
);
}
return onlineUrl;
}
/**
* Get a path to a resource in the `resources` directory, relative to the current execution context
* (html file or worker .js file), for `fetch()`, `<img>`, `<video>`, etc. Pass the cross origin host
* name if wants to load resoruce from cross origin host.
*/
export function getResourcePath(pathRelativeToResourcesDir: string) {
return baseResourcePath + '/' + pathRelativeToResourcesDir;
}
/**
* Set the base resource path (path to the `resources` directory relative to the current
* execution context).
*/
export function setBaseResourcePath(path: string) {
baseResourcePath = path;
}
/**
* Set the cross origin host and cases related to cross origin
* will load resource from the given host.
*/
export function setCrossOriginHost(host: string) {
crossOriginHost = host;
}

View File

@ -0,0 +1,20 @@
export type TestConfig = {
maxSubcasesInFlight: number;
testHeartbeatCallback: () => void;
noRaceWithRejectOnTimeout: boolean;
/**
* Controls the emission of loops in constant-evaluation shaders under
* 'webgpu:shader,execution,expression,*'
* FXC is extremely slow to compile shaders with loops unrolled, where as the
* MSL compiler is extremely slow to compile with loops rolled.
*/
unrollConstEvalLoops: boolean;
};
export const globalTestConfig: TestConfig = {
maxSubcasesInFlight: 500,
testHeartbeatCallback: () => {},
noRaceWithRejectOnTimeout: false,
unrollConstEvalLoops: false,
};

View File

@ -0,0 +1 @@
export { makeTestGroup } from '../internal/test_group.js';

View File

@ -0,0 +1,95 @@
import { IterableTestGroup } from '../internal/test_group.js';
import { assert } from '../util/util.js';
import { parseQuery } from './query/parseQuery.js';
import { TestQuery } from './query/query.js';
import { TestSuiteListing } from './test_suite_listing.js';
import { loadTreeForQuery, TestTree, TestTreeLeaf } from './tree.js';
// A listing file, e.g. either of:
// - `src/webgpu/listing.ts` (which is dynamically computed, has a Promise<TestSuiteListing>)
// - `out/webgpu/listing.js` (which is pre-baked, has a TestSuiteListing)
interface ListingFile {
listing: Promise<TestSuiteListing> | TestSuiteListing;
}
// A .spec.ts file, as imported.
export interface SpecFile {
readonly description: string;
readonly g: IterableTestGroup;
}
export interface ImportInfo {
url: string;
}
interface TestFileLoaderEventMap {
import: MessageEvent<ImportInfo>;
finish: MessageEvent<void>;
}
export interface TestFileLoader extends EventTarget {
addEventListener<K extends keyof TestFileLoaderEventMap>(
type: K,
listener: (this: TestFileLoader, ev: TestFileLoaderEventMap[K]) => void,
options?: boolean | AddEventListenerOptions
): void;
addEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | AddEventListenerOptions
): void;
removeEventListener<K extends keyof TestFileLoaderEventMap>(
type: K,
listener: (this: TestFileLoader, ev: TestFileLoaderEventMap[K]) => void,
options?: boolean | EventListenerOptions
): void;
removeEventListener(
type: string,
listener: EventListenerOrEventListenerObject,
options?: boolean | EventListenerOptions
): void;
}
// Base class for DefaultTestFileLoader and FakeTestFileLoader.
export abstract class TestFileLoader extends EventTarget {
abstract listing(suite: string): Promise<TestSuiteListing>;
protected abstract import(path: string): Promise<SpecFile>;
importSpecFile(suite: string, path: string[]): Promise<SpecFile> {
const url = `${suite}/${path.join('/')}.spec.js`;
this.dispatchEvent(
new MessageEvent<ImportInfo>('import', { data: { url } })
);
return this.import(url);
}
async loadTree(query: TestQuery, subqueriesToExpand: string[] = []): Promise<TestTree> {
const tree = await loadTreeForQuery(
this,
query,
subqueriesToExpand.map(s => {
const q = parseQuery(s);
assert(q.level >= 2, () => `subqueriesToExpand entries should not be multi-file:\n ${q}`);
return q;
})
);
this.dispatchEvent(new MessageEvent<void>('finish'));
return tree;
}
async loadCases(query: TestQuery): Promise<IterableIterator<TestTreeLeaf>> {
const tree = await this.loadTree(query);
return tree.iterateLeaves();
}
}
export class DefaultTestFileLoader extends TestFileLoader {
async listing(suite: string): Promise<TestSuiteListing> {
return ((await import(`../../${suite}/listing.js`)) as ListingFile).listing;
}
import(path: string): Promise<SpecFile> {
return import(`../../${path}`);
}
}

View File

@ -0,0 +1,44 @@
import { ErrorWithExtra } from '../../util/util.js';
import { extractImportantStackTrace } from '../stack.js';
export class LogMessageWithStack extends Error {
readonly extra: unknown;
private stackHiddenMessage: string | undefined = undefined;
constructor(name: string, ex: Error | ErrorWithExtra) {
super(ex.message);
this.name = name;
this.stack = ex.stack;
if ('extra' in ex) {
this.extra = ex.extra;
}
}
/** Set a flag so the stack is not printed in toJSON(). */
setStackHidden(stackHiddenMessage: string) {
this.stackHiddenMessage ??= stackHiddenMessage;
}
toJSON(): string {
let m = this.name;
if (this.message) m += ': ' + this.message;
if (this.stack) {
if (this.stackHiddenMessage === undefined) {
m += '\n' + extractImportantStackTrace(this);
} else if (this.stackHiddenMessage) {
m += `\n at (elided: ${this.stackHiddenMessage})`;
}
}
return m;
}
}
/**
* Returns a string, nicely indented, for debug logs.
* This is used in the cmdline and wpt runtimes. In WPT, it shows up in the `*-actual.txt` file.
*/
export function prettyPrintLog(log: LogMessageWithStack): string {
return ' - ' + log.toJSON().replace(/\n/g, '\n ');
}

View File

@ -0,0 +1,30 @@
import { version } from '../version.js';
import { LiveTestCaseResult } from './result.js';
import { TestCaseRecorder } from './test_case_recorder.js';
export type LogResults = Map<string, LiveTestCaseResult>;
export class Logger {
static globalDebugMode: boolean = false;
readonly overriddenDebugMode: boolean | undefined;
readonly results: LogResults = new Map();
constructor({ overrideDebugMode }: { overrideDebugMode?: boolean } = {}) {
this.overriddenDebugMode = overrideDebugMode;
}
record(name: string): [TestCaseRecorder, LiveTestCaseResult] {
const result: LiveTestCaseResult = { status: 'running', timems: -1 };
this.results.set(name, result);
return [
new TestCaseRecorder(result, this.overriddenDebugMode ?? Logger.globalDebugMode),
result,
];
}
asJSON(space?: number): string {
return JSON.stringify({ version, results: Array.from(this.results) }, undefined, space);
}
}

View File

@ -0,0 +1,21 @@
import { LogMessageWithStack } from './log_message.js';
// MAINTENANCE_TODO: Add warn expectations
export type Expectation = 'pass' | 'skip' | 'fail';
export type Status = 'running' | 'warn' | Expectation;
export interface TestCaseResult {
status: Status;
timems: number;
}
export interface LiveTestCaseResult extends TestCaseResult {
logs?: LogMessageWithStack[];
}
export interface TransferredTestCaseResult extends TestCaseResult {
// When transferred from a worker, a LogMessageWithStack turns into a generic Error
// (its prototype gets lost and replaced with Error).
logs?: Error[];
}

View File

@ -0,0 +1,158 @@
import { SkipTestCase, UnexpectedPassError } from '../../framework/fixture.js';
import { globalTestConfig } from '../../framework/test_config.js';
import { now, assert } from '../../util/util.js';
import { LogMessageWithStack } from './log_message.js';
import { Expectation, LiveTestCaseResult } from './result.js';
enum LogSeverity {
Pass = 0,
Skip = 1,
Warn = 2,
ExpectFailed = 3,
ValidationFailed = 4,
ThrewException = 5,
}
const kMaxLogStacks = 2;
const kMinSeverityForStack = LogSeverity.Warn;
/** Holds onto a LiveTestCaseResult owned by the Logger, and writes the results into it. */
export class TestCaseRecorder {
private result: LiveTestCaseResult;
private inSubCase: boolean = false;
private subCaseStatus = LogSeverity.Pass;
private finalCaseStatus = LogSeverity.Pass;
private hideStacksBelowSeverity = kMinSeverityForStack;
private startTime = -1;
private logs: LogMessageWithStack[] = [];
private logLinesAtCurrentSeverity = 0;
private debugging = false;
/** Used to dedup log messages which have identical stacks. */
private messagesForPreviouslySeenStacks = new Map<string, LogMessageWithStack>();
constructor(result: LiveTestCaseResult, debugging: boolean) {
this.result = result;
this.debugging = debugging;
}
start(): void {
assert(this.startTime < 0, 'TestCaseRecorder cannot be reused');
this.startTime = now();
}
finish(): void {
assert(this.startTime >= 0, 'finish() before start()');
const timeMilliseconds = now() - this.startTime;
// Round to next microsecond to avoid storing useless .xxxx00000000000002 in results.
this.result.timems = Math.ceil(timeMilliseconds * 1000) / 1000;
// Convert numeric enum back to string (but expose 'exception' as 'fail')
this.result.status =
this.finalCaseStatus === LogSeverity.Pass
? 'pass'
: this.finalCaseStatus === LogSeverity.Skip
? 'skip'
: this.finalCaseStatus === LogSeverity.Warn
? 'warn'
: 'fail'; // Everything else is an error
this.result.logs = this.logs;
}
beginSubCase() {
this.subCaseStatus = LogSeverity.Pass;
this.inSubCase = true;
}
endSubCase(expectedStatus: Expectation) {
try {
if (expectedStatus === 'fail') {
if (this.subCaseStatus <= LogSeverity.Warn) {
throw new UnexpectedPassError();
} else {
this.subCaseStatus = LogSeverity.Pass;
}
}
} finally {
this.inSubCase = false;
if (this.subCaseStatus > this.finalCaseStatus) {
this.finalCaseStatus = this.subCaseStatus;
}
}
}
injectResult(injectedResult: LiveTestCaseResult): void {
Object.assign(this.result, injectedResult);
}
debug(ex: Error): void {
if (!this.debugging) return;
this.logImpl(LogSeverity.Pass, 'DEBUG', ex);
}
info(ex: Error): void {
this.logImpl(LogSeverity.Pass, 'INFO', ex);
}
skipped(ex: SkipTestCase): void {
this.logImpl(LogSeverity.Skip, 'SKIP', ex);
}
warn(ex: Error): void {
this.logImpl(LogSeverity.Warn, 'WARN', ex);
}
expectationFailed(ex: Error): void {
this.logImpl(LogSeverity.ExpectFailed, 'EXPECTATION FAILED', ex);
}
validationFailed(ex: Error): void {
this.logImpl(LogSeverity.ValidationFailed, 'VALIDATION FAILED', ex);
}
threw(ex: unknown): void {
if (ex instanceof SkipTestCase) {
this.skipped(ex);
return;
}
this.logImpl(LogSeverity.ThrewException, 'EXCEPTION', ex);
}
private logImpl(level: LogSeverity, name: string, baseException: unknown): void {
assert(baseException instanceof Error, 'test threw a non-Error object');
globalTestConfig.testHeartbeatCallback();
const logMessage = new LogMessageWithStack(name, baseException);
// Final case status should be the "worst" of all log entries.
if (this.inSubCase) {
if (level > this.subCaseStatus) this.subCaseStatus = level;
} else {
if (level > this.finalCaseStatus) this.finalCaseStatus = level;
}
// setFirstLineOnly for all logs except `kMaxLogStacks` stacks at the highest severity
if (level > this.hideStacksBelowSeverity) {
this.logLinesAtCurrentSeverity = 0;
this.hideStacksBelowSeverity = level;
// Go back and setFirstLineOnly for everything of a lower log level
for (const log of this.logs) {
log.setStackHidden('below max severity');
}
}
if (level === this.hideStacksBelowSeverity) {
this.logLinesAtCurrentSeverity++;
} else if (level < kMinSeverityForStack) {
logMessage.setStackHidden('');
} else if (level < this.hideStacksBelowSeverity) {
logMessage.setStackHidden('below max severity');
}
if (this.logLinesAtCurrentSeverity > kMaxLogStacks) {
logMessage.setStackHidden(`only ${kMaxLogStacks} shown`);
}
this.logs.push(logMessage);
}
}

View File

@ -0,0 +1,124 @@
import { TestParams } from '../framework/fixture.js';
import { ResolveType, UnionToIntersection } from '../util/types.js';
import { assert } from '../util/util.js';
import { comparePublicParamsPaths, Ordering } from './query/compare.js';
import { kWildcard, kParamSeparator, kParamKVSeparator } from './query/separators.js';
export type JSONWithUndefined =
| undefined
| null
| number
| string
| boolean
| readonly JSONWithUndefined[]
// Ideally this would recurse into JSONWithUndefined, but it breaks code.
| { readonly [k: string]: unknown };
export interface TestParamsRW {
[k: string]: JSONWithUndefined;
}
export type TestParamsIterable = Iterable<TestParams>;
export function paramKeyIsPublic(key: string): boolean {
return !key.startsWith('_');
}
export function extractPublicParams(params: TestParams): TestParams {
const publicParams: TestParamsRW = {};
for (const k of Object.keys(params)) {
if (paramKeyIsPublic(k)) {
publicParams[k] = params[k];
}
}
return publicParams;
}
export const badParamValueChars = new RegExp(
'[' + kParamKVSeparator + kParamSeparator + kWildcard + ']'
);
export function publicParamsEquals(x: TestParams, y: TestParams): boolean {
return comparePublicParamsPaths(x, y) === Ordering.Equal;
}
export type KeyOfNeverable<T> = T extends never ? never : keyof T;
export type AllKeysFromUnion<T> = keyof T | KeyOfNeverable<UnionToIntersection<T>>;
export type KeyOfOr<T, K, Default> = K extends keyof T ? T[K] : Default;
/**
* Flatten a union of interfaces into a single interface encoding the same type.
*
* Flattens a union in such a way that:
* `{ a: number, b?: undefined } | { b: string, a?: undefined }`
* (which is the value type of `[{ a: 1 }, { b: 1 }]`)
* becomes `{ a: number | undefined, b: string | undefined }`.
*
* And also works for `{ a: number } | { b: string }` which maps to the same.
*/
export type FlattenUnionOfInterfaces<T> = {
[K in AllKeysFromUnion<T>]: KeyOfOr<
T,
// If T always has K, just take T[K] (union of C[K] for each component C of T):
K,
// Otherwise, take the union of C[K] for each component C of T, PLUS undefined:
undefined | KeyOfOr<UnionToIntersection<T>, K, void>
>;
};
/* eslint-disable-next-line @typescript-eslint/no-unused-vars */
function typeAssert<T extends 'pass'>() {}
{
type Test<T, U> = [T] extends [U]
? [U] extends [T]
? 'pass'
: { actual: ResolveType<T>; expected: U }
: { actual: ResolveType<T>; expected: U };
type T01 = { a: number } | { b: string };
type T02 = { a: number } | { b?: string };
type T03 = { a: number } | { a?: number };
type T04 = { a: number } | { a: string };
type T05 = { a: number } | { a?: string };
type T11 = { a: number; b?: undefined } | { a?: undefined; b: string };
type T21 = { a: number; b?: undefined } | { b: string };
type T22 = { a: number; b?: undefined } | { b?: string };
type T23 = { a: number; b?: undefined } | { a?: number };
type T24 = { a: number; b?: undefined } | { a: string };
type T25 = { a: number; b?: undefined } | { a?: string };
type T26 = { a: number; b?: undefined } | { a: undefined };
type T27 = { a: number; b?: undefined } | { a: undefined; b: undefined };
/* prettier-ignore */ {
typeAssert<Test<FlattenUnionOfInterfaces<T01>, { a: number | undefined; b: string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T02>, { a: number | undefined; b: string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T03>, { a: number | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T04>, { a: number | string }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T05>, { a: number | string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T11>, { a: number | undefined; b: string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T22>, { a: number | undefined; b: string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T23>, { a: number | undefined; b: undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T24>, { a: number | string; b: undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T25>, { a: number | string | undefined; b: undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T27>, { a: number | undefined; b: undefined }>>();
// Unexpected test results - hopefully okay to ignore these
typeAssert<Test<FlattenUnionOfInterfaces<T21>, { b: string | undefined }>>();
typeAssert<Test<FlattenUnionOfInterfaces<T26>, { a: number | undefined }>>();
}
}
export type Merged<A, B> = MergedFromFlat<A, FlattenUnionOfInterfaces<B>>;
export type MergedFromFlat<A, B> = {
[K in keyof A | keyof B]: K extends keyof B ? B[K] : K extends keyof A ? A[K] : never;
};
export function mergeParams<A extends {}, B extends {}>(a: A, b: B): Merged<A, B> {
for (const key of Object.keys(a)) {
assert(!(key in b), 'Duplicate key: ' + key);
}
return { ...a, ...b } as Merged<A, B>;
}

View File

@ -0,0 +1,94 @@
import { TestParams } from '../../framework/fixture.js';
import { assert, objectEquals } from '../../util/util.js';
import { paramKeyIsPublic } from '../params_utils.js';
import { TestQuery } from './query.js';
export const enum Ordering {
Unordered,
StrictSuperset,
Equal,
StrictSubset,
}
/**
* Compares two queries for their ordering (which is used to build the tree).
*
* See src/unittests/query_compare.spec.ts for examples.
*/
export function compareQueries(a: TestQuery, b: TestQuery): Ordering {
if (a.suite !== b.suite) {
return Ordering.Unordered;
}
const filePathOrdering = comparePaths(a.filePathParts, b.filePathParts);
if (filePathOrdering !== Ordering.Equal || a.isMultiFile || b.isMultiFile) {
return compareOneLevel(filePathOrdering, a.isMultiFile, b.isMultiFile);
}
assert('testPathParts' in a && 'testPathParts' in b);
const testPathOrdering = comparePaths(a.testPathParts, b.testPathParts);
if (testPathOrdering !== Ordering.Equal || a.isMultiTest || b.isMultiTest) {
return compareOneLevel(testPathOrdering, a.isMultiTest, b.isMultiTest);
}
assert('params' in a && 'params' in b);
const paramsPathOrdering = comparePublicParamsPaths(a.params, b.params);
if (paramsPathOrdering !== Ordering.Equal || a.isMultiCase || b.isMultiCase) {
return compareOneLevel(paramsPathOrdering, a.isMultiCase, b.isMultiCase);
}
return Ordering.Equal;
}
/**
* Compares a single level of a query.
*
* "IsBig" means the query is big relative to the level, e.g. for test-level:
* - Anything >= `suite:a,*` is big
* - Anything <= `suite:a:*` is small
*/
function compareOneLevel(ordering: Ordering, aIsBig: boolean, bIsBig: boolean): Ordering {
assert(ordering !== Ordering.Equal || aIsBig || bIsBig);
if (ordering === Ordering.Unordered) return Ordering.Unordered;
if (aIsBig && bIsBig) return ordering;
if (!aIsBig && !bIsBig) return Ordering.Unordered; // Equal case is already handled
// Exactly one of (a, b) is big.
if (aIsBig && ordering !== Ordering.StrictSubset) return Ordering.StrictSuperset;
if (bIsBig && ordering !== Ordering.StrictSuperset) return Ordering.StrictSubset;
return Ordering.Unordered;
}
function comparePaths(a: readonly string[], b: readonly string[]): Ordering {
const shorter = Math.min(a.length, b.length);
for (let i = 0; i < shorter; ++i) {
if (a[i] !== b[i]) {
return Ordering.Unordered;
}
}
if (a.length === b.length) {
return Ordering.Equal;
} else if (a.length < b.length) {
return Ordering.StrictSuperset;
} else {
return Ordering.StrictSubset;
}
}
export function comparePublicParamsPaths(a: TestParams, b: TestParams): Ordering {
const aKeys = Object.keys(a).filter(k => paramKeyIsPublic(k));
const commonKeys = new Set(aKeys.filter(k => k in b));
for (const k of commonKeys) {
if (!objectEquals(a[k], b[k])) {
return Ordering.Unordered;
}
}
const bKeys = Object.keys(b).filter(k => paramKeyIsPublic(k));
const aRemainingKeys = aKeys.length - commonKeys.size;
const bRemainingKeys = bKeys.length - commonKeys.size;
if (aRemainingKeys === 0 && bRemainingKeys === 0) return Ordering.Equal;
if (aRemainingKeys === 0) return Ordering.StrictSuperset;
if (bRemainingKeys === 0) return Ordering.StrictSubset;
return Ordering.Unordered;
}

View File

@ -0,0 +1,23 @@
/**
* Encodes a stringified TestQuery so that it can be placed in a `?q=` parameter in a URL.
*
* `encodeURIComponent` encodes in accordance with `application/x-www-form-urlencoded`,
* but URLs don't actually have to be as strict as HTML form encoding
* (we interpret this purely from JavaScript).
* So we encode the component, then selectively convert some %-encoded escape codes
* back to their original form for readability/copyability.
*/
export function encodeURIComponentSelectively(s: string): string {
let ret = encodeURIComponent(s);
ret = ret.replace(/%22/g, '"'); // for JSON strings
ret = ret.replace(/%2C/g, ','); // for path separator, and JSON arrays
ret = ret.replace(/%3A/g, ':'); // for big separator
ret = ret.replace(/%3B/g, ';'); // for param separator
ret = ret.replace(/%3D/g, '='); // for params (k=v)
ret = ret.replace(/%5B/g, '['); // for JSON arrays
ret = ret.replace(/%5D/g, ']'); // for JSON arrays
ret = ret.replace(/%7B/g, '{'); // for JSON objects
ret = ret.replace(/%7D/g, '}'); // for JSON objects
ret = ret.replace(/%E2%9C%97/g, '✗'); // for jsUndefinedMagicValue
return ret;
}

View File

@ -0,0 +1,83 @@
import { assert, sortObjectByKey } from '../../util/util.js';
import { JSONWithUndefined } from '../params_utils.js';
// JSON can't represent various values and by default stores them as `null`.
// Instead, storing them as a magic string values in JSON.
const jsUndefinedMagicValue = '_undef_';
const jsNaNMagicValue = '_nan_';
const jsPositiveInfinityMagicValue = '_posinfinity_';
const jsNegativeInfinityMagicValue = '_neginfinity_';
// -0 needs to be handled separately, because -0 === +0 returns true. Not
// special casing +0/0, since it behaves intuitively. Assuming that if -0 is
// being used, the differentiation from +0 is desired.
const jsNegativeZeroMagicValue = '_negzero_';
const toStringMagicValue = new Map<unknown, string>([
[undefined, jsUndefinedMagicValue],
[NaN, jsNaNMagicValue],
[Number.POSITIVE_INFINITY, jsPositiveInfinityMagicValue],
[Number.NEGATIVE_INFINITY, jsNegativeInfinityMagicValue],
// No -0 handling because it is special cased.
]);
const fromStringMagicValue = new Map<string, unknown>([
[jsUndefinedMagicValue, undefined],
[jsNaNMagicValue, NaN],
[jsPositiveInfinityMagicValue, Number.POSITIVE_INFINITY],
[jsNegativeInfinityMagicValue, Number.NEGATIVE_INFINITY],
// -0 is handled in this direction because there is no comparison issue.
[jsNegativeZeroMagicValue, -0],
]);
function stringifyFilter(k: string, v: unknown): unknown {
// Make sure no one actually uses a magic value as a parameter.
if (typeof v === 'string') {
assert(
!fromStringMagicValue.has(v),
`${v} is a magic value for stringification, so cannot be used`
);
assert(
v !== jsNegativeZeroMagicValue,
`${v} is a magic value for stringification, so cannot be used`
);
}
if (Object.is(v, -0)) {
return jsNegativeZeroMagicValue;
}
return toStringMagicValue.has(v) ? toStringMagicValue.get(v) : v;
}
export function stringifyParamValue(value: JSONWithUndefined): string {
return JSON.stringify(value, stringifyFilter);
}
/**
* Like stringifyParamValue but sorts dictionaries by key, for hashing.
*/
export function stringifyParamValueUniquely(value: JSONWithUndefined): string {
return JSON.stringify(value, (k, v) => {
if (typeof v === 'object' && v !== null) {
return sortObjectByKey(v);
}
return stringifyFilter(k, v);
});
}
// 'any' is part of the JSON.parse reviver interface, so cannot be avoided.
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function parseParamValueReviver(k: string, v: any): any {
if (fromStringMagicValue.has(v)) {
return fromStringMagicValue.get(v);
}
return v;
}
export function parseParamValue(s: string): JSONWithUndefined {
return JSON.parse(s, parseParamValueReviver);
}

View File

@ -0,0 +1,155 @@
import { assert } from '../../util/util.js';
import {
TestParamsRW,
JSONWithUndefined,
badParamValueChars,
paramKeyIsPublic,
} from '../params_utils.js';
import { parseParamValue } from './json_param_value.js';
import {
TestQuery,
TestQueryMultiFile,
TestQueryMultiTest,
TestQueryMultiCase,
TestQuerySingleCase,
} from './query.js';
import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './separators.js';
import { validQueryPart } from './validQueryPart.js';
export function parseQuery(s: string): TestQuery {
try {
return parseQueryImpl(s);
} catch (ex) {
if (ex instanceof Error) {
ex.message += '\n on: ' + s;
}
throw ex;
}
}
function parseQueryImpl(s: string): TestQuery {
// Undo encodeURIComponentSelectively
s = decodeURIComponent(s);
// bigParts are: suite, file, test, params (note kBigSeparator could appear in params)
let suite: string;
let fileString: string | undefined;
let testString: string | undefined;
let paramsString: string | undefined;
{
const i1 = s.indexOf(kBigSeparator);
assert(i1 !== -1, `query string must have at least one ${kBigSeparator}`);
suite = s.substring(0, i1);
const i2 = s.indexOf(kBigSeparator, i1 + 1);
if (i2 === -1) {
fileString = s.substring(i1 + 1);
} else {
fileString = s.substring(i1 + 1, i2);
const i3 = s.indexOf(kBigSeparator, i2 + 1);
if (i3 === -1) {
testString = s.substring(i2 + 1);
} else {
testString = s.substring(i2 + 1, i3);
paramsString = s.substring(i3 + 1);
}
}
}
const { parts: file, wildcard: filePathHasWildcard } = parseBigPart(fileString, kPathSeparator);
if (testString === undefined) {
// Query is file-level
assert(
filePathHasWildcard,
`File-level query without wildcard ${kWildcard}. Did you want a file-level query \
(append ${kPathSeparator}${kWildcard}) or test-level query (append ${kBigSeparator}${kWildcard})?`
);
return new TestQueryMultiFile(suite, file);
}
assert(!filePathHasWildcard, `Wildcard ${kWildcard} must be at the end of the query string`);
const { parts: test, wildcard: testPathHasWildcard } = parseBigPart(testString, kPathSeparator);
if (paramsString === undefined) {
// Query is test-level
assert(
testPathHasWildcard,
`Test-level query without wildcard ${kWildcard}; did you want a test-level query \
(append ${kPathSeparator}${kWildcard}) or case-level query (append ${kBigSeparator}${kWildcard})?`
);
assert(file.length > 0, 'File part of test-level query was empty (::)');
return new TestQueryMultiTest(suite, file, test);
}
// Query is case-level
assert(!testPathHasWildcard, `Wildcard ${kWildcard} must be at the end of the query string`);
const { parts: paramsParts, wildcard: paramsHasWildcard } = parseBigPart(
paramsString,
kParamSeparator
);
assert(test.length > 0, 'Test part of case-level query was empty (::)');
const params: TestParamsRW = {};
for (const paramPart of paramsParts) {
const [k, v] = parseSingleParam(paramPart);
assert(validQueryPart.test(k), `param key names must match ${validQueryPart}`);
params[k] = v;
}
if (paramsHasWildcard) {
return new TestQueryMultiCase(suite, file, test, params);
} else {
return new TestQuerySingleCase(suite, file, test, params);
}
}
// webgpu:a,b,* or webgpu:a,b,c:*
const kExampleQueries = `\
webgpu${kBigSeparator}a${kPathSeparator}b${kPathSeparator}${kWildcard} or \
webgpu${kBigSeparator}a${kPathSeparator}b${kPathSeparator}c${kBigSeparator}${kWildcard}`;
function parseBigPart(
s: string,
separator: typeof kParamSeparator | typeof kPathSeparator
): { parts: string[]; wildcard: boolean } {
if (s === '') {
return { parts: [], wildcard: false };
}
const parts = s.split(separator);
let endsWithWildcard = false;
for (const [i, part] of parts.entries()) {
if (i === parts.length - 1) {
endsWithWildcard = part === kWildcard;
}
assert(
part.indexOf(kWildcard) === -1 || endsWithWildcard,
`Wildcard ${kWildcard} must be complete last part of a path (e.g. ${kExampleQueries})`
);
}
if (endsWithWildcard) {
// Remove the last element of the array (which is just the wildcard).
parts.length = parts.length - 1;
}
return { parts, wildcard: endsWithWildcard };
}
function parseSingleParam(paramSubstring: string): [string, JSONWithUndefined] {
assert(paramSubstring !== '', 'Param in a query must not be blank (is there a trailing comma?)');
const i = paramSubstring.indexOf('=');
assert(i !== -1, 'Param in a query must be of form key=value');
const k = paramSubstring.substring(0, i);
assert(paramKeyIsPublic(k), 'Param in a query must not be private (start with _)');
const v = paramSubstring.substring(i + 1);
return [k, parseSingleParamValue(v)];
}
function parseSingleParamValue(s: string): JSONWithUndefined {
assert(
!badParamValueChars.test(s),
`param value must not match ${badParamValueChars} - was ${s}`
);
return parseParamValue(s);
}

View File

@ -0,0 +1,262 @@
import { TestParams } from '../../framework/fixture.js';
import { optionEnabled } from '../../runtime/helper/options.js';
import { assert, unreachable } from '../../util/util.js';
import { Expectation } from '../logging/result.js';
import { compareQueries, Ordering } from './compare.js';
import { encodeURIComponentSelectively } from './encode_selectively.js';
import { parseQuery } from './parseQuery.js';
import { kBigSeparator, kPathSeparator, kWildcard } from './separators.js';
import { stringifyPublicParams } from './stringify_params.js';
/**
* Represents a test query of some level.
*
* TestQuery types are immutable.
*/
export type TestQuery =
| TestQuerySingleCase
| TestQueryMultiCase
| TestQueryMultiTest
| TestQueryMultiFile;
/**
* - 1 = MultiFile.
* - 2 = MultiTest.
* - 3 = MultiCase.
* - 4 = SingleCase.
*/
export type TestQueryLevel = 1 | 2 | 3 | 4;
export interface TestQueryWithExpectation {
query: TestQuery;
expectation: Expectation;
}
/**
* A multi-file test query, like `s:*` or `s:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiFile {
readonly level: TestQueryLevel = 1;
readonly isMultiFile: boolean = true;
readonly suite: string;
readonly filePathParts: readonly string[];
constructor(suite: string, file: readonly string[]) {
this.suite = suite;
this.filePathParts = [...file];
}
get depthInLevel() {
return this.filePathParts.length;
}
toString(): string {
return encodeURIComponentSelectively(this.toStringHelper().join(kBigSeparator));
}
protected toStringHelper(): string[] {
return [this.suite, [...this.filePathParts, kWildcard].join(kPathSeparator)];
}
}
/**
* A multi-test test query, like `s:f:*` or `s:f:a,b,*`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQueryMultiTest extends TestQueryMultiFile {
readonly level: TestQueryLevel = 2;
readonly isMultiFile: false = false;
readonly isMultiTest: boolean = true;
readonly testPathParts: readonly string[];
constructor(suite: string, file: readonly string[], test: readonly string[]) {
super(suite, file);
assert(file.length > 0, 'multi-test (or finer) query must have file-path');
this.testPathParts = [...test];
}
get depthInLevel() {
return this.testPathParts.length;
}
protected toStringHelper(): string[] {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
[...this.testPathParts, kWildcard].join(kPathSeparator),
];
}
}
/**
* A multi-case test query, like `s:f:t:*` or `s:f:t:a,b,*`.
*
* Immutable (makes copies of constructor args), except for param values
* (which aren't normally supposed to change; they're marked readonly in TestParams).
*/
export class TestQueryMultiCase extends TestQueryMultiTest {
readonly level: TestQueryLevel = 3;
readonly isMultiTest: false = false;
readonly isMultiCase: boolean = true;
readonly params: TestParams;
constructor(suite: string, file: readonly string[], test: readonly string[], params: TestParams) {
super(suite, file, test);
assert(test.length > 0, 'multi-case (or finer) query must have test-path');
this.params = { ...params };
}
get depthInLevel() {
return Object.keys(this.params).length;
}
protected toStringHelper(): string[] {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
stringifyPublicParams(this.params, true),
];
}
}
/**
* A multi-case test query, like `s:f:t:` or `s:f:t:a=1,b=1`.
*
* Immutable (makes copies of constructor args).
*/
export class TestQuerySingleCase extends TestQueryMultiCase {
readonly level: TestQueryLevel = 4;
readonly isMultiCase: false = false;
get depthInLevel() {
return 0;
}
protected toStringHelper(): string[] {
return [
this.suite,
this.filePathParts.join(kPathSeparator),
this.testPathParts.join(kPathSeparator),
stringifyPublicParams(this.params),
];
}
}
/**
* Parse raw expectations input into TestQueryWithExpectation[], filtering so that only
* expectations that are relevant for the provided query and wptURL.
*
* `rawExpectations` should be @type {{ query: string, expectation: Expectation }[]}
*
* The `rawExpectations` are parsed and validated that they are in the correct format.
* If `wptURL` is passed, the query string should be of the full path format such
* as `path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;*`.
* If `wptURL` is `undefined`, the query string should be only the query
* `suite:test_path:test_name:foo=1;bar=2;*`.
*/
export function parseExpectationsForTestQuery(
rawExpectations:
| unknown
| {
query: string;
expectation: Expectation;
}[],
query: TestQuery,
wptURL?: URL
) {
if (!Array.isArray(rawExpectations)) {
unreachable('Expectations should be an array');
}
const expectations: TestQueryWithExpectation[] = [];
for (const entry of rawExpectations) {
assert(typeof entry === 'object');
const rawExpectation = entry as { query?: string; expectation?: string };
assert(rawExpectation.query !== undefined, 'Expectation missing query string');
assert(rawExpectation.expectation !== undefined, 'Expectation missing expectation string');
let expectationQuery: TestQuery;
if (wptURL !== undefined) {
const expectationURL = new URL(`${wptURL.origin}/${entry.query}`);
if (expectationURL.pathname !== wptURL.pathname) {
continue;
}
assert(
expectationURL.pathname === wptURL.pathname,
`Invalid expectation path ${expectationURL.pathname}
Expectation should be of the form path/to/cts.https.html?worker=0&q=suite:test_path:test_name:foo=1;bar=2;...
`
);
const params = expectationURL.searchParams;
if (optionEnabled('worker', params) !== optionEnabled('worker', wptURL.searchParams)) {
continue;
}
const qs = params.getAll('q');
assert(qs.length === 1, 'currently, there must be exactly one ?q= in the expectation string');
expectationQuery = parseQuery(qs[0]);
} else {
expectationQuery = parseQuery(entry.query);
}
// Strip params from multicase expectations so that an expectation of foo=2;*
// is stored if the test query is bar=3;*
const queryForFilter =
expectationQuery instanceof TestQueryMultiCase
? new TestQueryMultiCase(
expectationQuery.suite,
expectationQuery.filePathParts,
expectationQuery.testPathParts,
{}
)
: expectationQuery;
if (compareQueries(query, queryForFilter) === Ordering.Unordered) {
continue;
}
switch (entry.expectation) {
case 'pass':
case 'skip':
case 'fail':
break;
default:
unreachable(`Invalid expectation ${entry.expectation}`);
}
expectations.push({
query: expectationQuery,
expectation: entry.expectation,
});
}
return expectations;
}
/**
* For display purposes only, produces a "relative" query string from parent to child.
* Used in the wpt runtime to reduce the verbosity of logs.
*/
export function relativeQueryString(parent: TestQuery, child: TestQuery): string {
const ordering = compareQueries(parent, child);
if (ordering === Ordering.Equal) {
return '';
} else if (ordering === Ordering.StrictSuperset) {
const parentString = parent.toString();
assert(parentString.endsWith(kWildcard));
const childString = child.toString();
assert(
childString.startsWith(parentString.substring(0, parentString.length - 2)),
'impossible?: childString does not start with parentString[:-2]'
);
return childString.substring(parentString.length - 2);
} else {
unreachable(
`relativeQueryString arguments have invalid ordering ${ordering}:\n${parent}\n${child}`
);
}
}

View File

@ -0,0 +1,14 @@
/** Separator between big parts: suite:file:test:case */
export const kBigSeparator = ':';
/** Separator between path,to,file or path,to,test */
export const kPathSeparator = ',';
/** Separator between k=v;k=v */
export const kParamSeparator = ';';
/** Separator between key and value in k=v */
export const kParamKVSeparator = '=';
/** Final wildcard, if query is not single-case */
export const kWildcard = '*';

View File

@ -0,0 +1,44 @@
import { TestParams } from '../../framework/fixture.js';
import { assert } from '../../util/util.js';
import { JSONWithUndefined, badParamValueChars, paramKeyIsPublic } from '../params_utils.js';
import { stringifyParamValue, stringifyParamValueUniquely } from './json_param_value.js';
import { kParamKVSeparator, kParamSeparator, kWildcard } from './separators.js';
export function stringifyPublicParams(p: TestParams, addWildcard = false): string {
const parts = Object.keys(p)
.filter(k => paramKeyIsPublic(k))
.map(k => stringifySingleParam(k, p[k]));
if (addWildcard) parts.push(kWildcard);
return parts.join(kParamSeparator);
}
/**
* An _approximately_ unique string representing a CaseParams value.
*/
export function stringifyPublicParamsUniquely(p: TestParams): string {
const keys = Object.keys(p).sort();
return keys
.filter(k => paramKeyIsPublic(k))
.map(k => stringifySingleParamUniquely(k, p[k]))
.join(kParamSeparator);
}
export function stringifySingleParam(k: string, v: JSONWithUndefined) {
return `${k}${kParamKVSeparator}${stringifySingleParamValue(v)}`;
}
function stringifySingleParamUniquely(k: string, v: JSONWithUndefined) {
return `${k}${kParamKVSeparator}${stringifyParamValueUniquely(v)}`;
}
function stringifySingleParamValue(v: JSONWithUndefined): string {
const s = stringifyParamValue(v);
assert(
!badParamValueChars.test(s),
`JSON.stringified param value must not match ${badParamValueChars} - was ${s}`
);
return s;
}

View File

@ -0,0 +1,2 @@
/** Applies to group parts, test parts, params keys. */
export const validQueryPart = /^[a-zA-Z0-9_]+$/;

View File

@ -0,0 +1,82 @@
// Returns the stack trace of an Error, but without the extra boilerplate at the bottom
// (e.g. RunCaseSpecific, processTicksAndRejections, etc.), for logging.
export function extractImportantStackTrace(e: Error): string {
let stack = e.stack;
if (!stack) {
return '';
}
const redundantMessage = 'Error: ' + e.message + '\n';
if (stack.startsWith(redundantMessage)) {
stack = stack.substring(redundantMessage.length);
}
const lines = stack.split('\n');
for (let i = lines.length - 1; i >= 0; --i) {
const line = lines[i];
if (line.indexOf('.spec.') !== -1) {
return lines.slice(0, i + 1).join('\n');
}
}
return stack;
}
// *** Examples ***
//
// Node fail()
// > Error:
// > at CaseRecorder.fail (/Users/kainino/src/cts/src/common/framework/logger.ts:99:30)
// > at RunCaseSpecific.exports.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/logger.spec.ts:80:7)
// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
//
// Node throw
// > Error: hello
// > at RunCaseSpecific.g.test.t [as fn] (/Users/kainino/src/cts/src/unittests/test_group.spec.ts:51:11)
// x at RunCaseSpecific.run (/Users/kainino/src/cts/src/common/framework/test_group.ts:121:18)
// x at processTicksAndRejections (internal/process/task_queues.js:86:5)
//
// Firefox fail()
// > fail@http://localhost:8080/out/framework/logger.js:104:30
// > expect@http://localhost:8080/out/framework/default_fixture.js:59:16
// > @http://localhost:8080/out/unittests/util.spec.js:35:5
// x run@http://localhost:8080/out/framework/test_group.js:119:18
//
// Firefox throw
// > @http://localhost:8080/out/unittests/test_group.spec.js:48:11
// x run@http://localhost:8080/out/framework/test_group.js:119:18
//
// Safari fail()
// > fail@http://localhost:8080/out/framework/logger.js:104:39
// > expect@http://localhost:8080/out/framework/default_fixture.js:59:20
// > http://localhost:8080/out/unittests/util.spec.js:35:11
// x http://localhost:8080/out/framework/test_group.js:119:20
// x asyncFunctionResume@[native code]
// x [native code]
// x promiseReactionJob@[native code]
//
// Safari throw
// > http://localhost:8080/out/unittests/test_group.spec.js:48:20
// x http://localhost:8080/out/framework/test_group.js:119:20
// x asyncFunctionResume@[native code]
// x [native code]
// x promiseReactionJob@[native code]
//
// Chrome fail()
// x Error
// x at CaseRecorder.fail (http://localhost:8080/out/framework/logger.js:104:30)
// x at DefaultFixture.expect (http://localhost:8080/out/framework/default_fixture.js:59:16)
// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/util.spec.js:35:5)
// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)
// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
// x at async http://localhost:8080/out/runtime/standalone.js:102:7
//
// Chrome throw
// x Error: hello
// > at RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:48:11)
// x at RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:18)"
// x at async Promise.all (index 0)
// x at async TestGroupTest.run (http://localhost:8080/out/unittests/test_group_test.js:6:5)
// x at async RunCaseSpecific.fn (http://localhost:8080/out/unittests/test_group.spec.js:53:15)
// x at async RunCaseSpecific.run (http://localhost:8080/out/framework/test_group.js:119:7)
// x at async runCase (http://localhost:8080/out/runtime/standalone.js:37:17)
// x at async http://localhost:8080/out/runtime/standalone.js:102:7

View File

@ -0,0 +1,646 @@
import {
Fixture,
SubcaseBatchState,
SkipTestCase,
TestParams,
UnexpectedPassError,
} from '../framework/fixture.js';
import {
CaseParamsBuilder,
builderIterateCasesWithSubcases,
kUnitCaseParamsBuilder,
ParamsBuilderBase,
SubcaseParamsBuilder,
} from '../framework/params_builder.js';
import { globalTestConfig } from '../framework/test_config.js';
import { Expectation } from '../internal/logging/result.js';
import { TestCaseRecorder } from '../internal/logging/test_case_recorder.js';
import { extractPublicParams, Merged, mergeParams } from '../internal/params_utils.js';
import { compareQueries, Ordering } from '../internal/query/compare.js';
import { TestQuerySingleCase, TestQueryWithExpectation } from '../internal/query/query.js';
import { kPathSeparator } from '../internal/query/separators.js';
import {
stringifyPublicParams,
stringifyPublicParamsUniquely,
} from '../internal/query/stringify_params.js';
import { validQueryPart } from '../internal/query/validQueryPart.js';
import { assert, unreachable } from '../util/util.js';
export type RunFn = (
rec: TestCaseRecorder,
expectations?: TestQueryWithExpectation[]
) => Promise<void>;
export interface TestCaseID {
readonly test: readonly string[];
readonly params: TestParams;
}
export interface RunCase {
readonly id: TestCaseID;
readonly isUnimplemented: boolean;
run(
rec: TestCaseRecorder,
selfQuery: TestQuerySingleCase,
expectations: TestQueryWithExpectation[]
): Promise<void>;
}
// Interface for defining tests
export interface TestGroupBuilder<S extends SubcaseBatchState, F extends Fixture<S>> {
test(name: string): TestBuilderWithName<S, F>;
}
export function makeTestGroup<S extends SubcaseBatchState, F extends Fixture<S>>(
fixture: FixtureClass<S, F>
): TestGroupBuilder<S, F> {
return new TestGroup((fixture as unknown) as FixtureClass);
}
// Interfaces for running tests
export interface IterableTestGroup {
iterate(): Iterable<IterableTest>;
validate(): void;
}
export interface IterableTest {
testPath: string[];
description: string | undefined;
readonly testCreationStack: Error;
iterate(): Iterable<RunCase>;
}
export function makeTestGroupForUnitTesting<F extends Fixture>(
fixture: FixtureClass<SubcaseBatchState, F>
): TestGroup<SubcaseBatchState, F> {
return new TestGroup(fixture);
}
export type FixtureClass<
S extends SubcaseBatchState = SubcaseBatchState,
F extends Fixture<S> = Fixture<S>
> = {
new (sharedState: S, log: TestCaseRecorder, params: TestParams): F;
MakeSharedState(params: TestParams): S;
};
type TestFn<F extends Fixture, P extends {}> = (t: F & { params: P }) => Promise<void> | void;
type BeforeAllSubcasesFn<S extends SubcaseBatchState, P extends {}> = (
s: S & { params: P }
) => Promise<void> | void;
export class TestGroup<S extends SubcaseBatchState, F extends Fixture<S>>
implements TestGroupBuilder<S, F> {
private fixture: FixtureClass;
private seen: Set<string> = new Set();
private tests: Array<TestBuilder<S, F>> = [];
constructor(fixture: FixtureClass) {
this.fixture = fixture;
}
iterate(): Iterable<IterableTest> {
return this.tests;
}
private checkName(name: string): void {
assert(
// Shouldn't happen due to the rule above. Just makes sure that treating
// unencoded strings as encoded strings is OK.
name === decodeURIComponent(name),
`Not decodeURIComponent-idempotent: ${name} !== ${decodeURIComponent(name)}`
);
assert(!this.seen.has(name), `Duplicate test name: ${name}`);
this.seen.add(name);
}
test(name: string): TestBuilderWithName<S, F> {
const testCreationStack = new Error(`Test created: ${name}`);
this.checkName(name);
const parts = name.split(kPathSeparator);
for (const p of parts) {
assert(validQueryPart.test(p), `Invalid test name part ${p}; must match ${validQueryPart}`);
}
const test = new TestBuilder(parts, this.fixture, testCreationStack);
this.tests.push(test);
return (test as unknown) as TestBuilderWithName<S, F>;
}
validate(): void {
for (const test of this.tests) {
test.validate();
}
}
}
interface TestBuilderWithName<S extends SubcaseBatchState, F extends Fixture<S>>
extends TestBuilderWithParams<S, F, {}, {}> {
desc(description: string): this;
/**
* A noop function to associate a test with the relevant part of the specification.
*
* @param url a link to the spec where test is extracted from.
*/
specURL(url: string): this;
/**
* Parameterize the test, generating multiple cases, each possibly having subcases.
*
* The `unit` value passed to the `cases` callback is an immutable constant
* `CaseParamsBuilder<{}>` representing the "unit" builder `[ {} ]`,
* provided for convenience. The non-callback overload can be used if `unit` is not needed.
*/
params<CaseP extends {}, SubcaseP extends {}>(
cases: (unit: CaseParamsBuilder<{}>) => ParamsBuilderBase<CaseP, SubcaseP>
): TestBuilderWithParams<S, F, CaseP, SubcaseP>;
/**
* Parameterize the test, generating multiple cases, each possibly having subcases.
*
* Use the callback overload of this method if a "unit" builder is needed.
*/
params<CaseP extends {}, SubcaseP extends {}>(
cases: ParamsBuilderBase<CaseP, SubcaseP>
): TestBuilderWithParams<S, F, CaseP, SubcaseP>;
/**
* Parameterize the test, generating multiple cases, without subcases.
*/
paramsSimple<P extends {}>(cases: Iterable<P>): TestBuilderWithParams<S, F, P, {}>;
/**
* Parameterize the test, generating one case with multiple subcases.
*/
paramsSubcasesOnly<P extends {}>(subcases: Iterable<P>): TestBuilderWithParams<S, F, {}, P>;
/**
* Parameterize the test, generating one case with multiple subcases.
*
* The `unit` value passed to the `subcases` callback is an immutable constant
* `SubcaseParamsBuilder<{}>`, with one empty case `{}` and one empty subcase `{}`.
*/
paramsSubcasesOnly<P extends {}>(
subcases: (unit: SubcaseParamsBuilder<{}, {}>) => SubcaseParamsBuilder<{}, P>
): TestBuilderWithParams<S, F, {}, P>;
}
interface TestBuilderWithParams<
S extends SubcaseBatchState,
F extends Fixture<S>,
CaseP extends {},
SubcaseP extends {}
> {
/**
* Limit subcases to a maximum number of per testcase.
* @param b the maximum number of subcases per testcase.
*
* If the number of subcases exceeds `b`, add an internal
* numeric, incrementing `batch__` param to split subcases
* into groups of at most `b` subcases.
*/
batch(b: number): this;
/**
* Run a function on shared subcase batch state before each
* batch of subcases.
* @param fn the function to run. It is called with the test
* fixture's shared subcase batch state.
*
* Generally, this function should be careful to avoid mutating
* any state on the shared subcase batch state which could result
* in unexpected order-dependent test behavior.
*/
beforeAllSubcases(fn: BeforeAllSubcasesFn<S, CaseP>): this;
/**
* Set the test function.
* @param fn the test function.
*/
fn(fn: TestFn<F, Merged<CaseP, SubcaseP>>): void;
/**
* Mark the test as unimplemented.
*/
unimplemented(): void;
}
class TestBuilder<S extends SubcaseBatchState, F extends Fixture> {
readonly testPath: string[];
isUnimplemented: boolean;
description: string | undefined;
readonly testCreationStack: Error;
private readonly fixture: FixtureClass;
private testFn: TestFn<Fixture, {}> | undefined;
private beforeFn: BeforeAllSubcasesFn<SubcaseBatchState, {}> | undefined;
private testCases?: ParamsBuilderBase<{}, {}> = undefined;
private batchSize: number = 0;
constructor(testPath: string[], fixture: FixtureClass, testCreationStack: Error) {
this.testPath = testPath;
this.isUnimplemented = false;
this.fixture = fixture;
this.testCreationStack = testCreationStack;
}
desc(description: string): this {
this.description = description.trim();
return this;
}
specURL(url: string): this {
return this;
}
beforeAllSubcases(fn: BeforeAllSubcasesFn<SubcaseBatchState, {}>): this {
assert(this.beforeFn === undefined);
this.beforeFn = fn;
return this;
}
fn(fn: TestFn<Fixture, {}>): void {
// eslint-disable-next-line no-warning-comments
// MAINTENANCE_TODO: add "TODO" if there's no description? (and make sure it only ends up on
// actual tests, not on test parents in the tree, which is what happens if you do it here, not
// sure why)
assert(this.testFn === undefined);
this.testFn = fn;
}
batch(b: number): this {
this.batchSize = b;
return this;
}
unimplemented(): void {
assert(this.testFn === undefined);
this.description =
(this.description ? this.description + '\n\n' : '') + 'TODO: .unimplemented()';
this.isUnimplemented = true;
this.testFn = () => {
throw new SkipTestCase('test unimplemented');
};
}
validate(): void {
const testPathString = this.testPath.join(kPathSeparator);
assert(this.testFn !== undefined, () => {
let s = `Test is missing .fn(): ${testPathString}`;
if (this.testCreationStack.stack) {
s += `\n-> test created at:\n${this.testCreationStack.stack}`;
}
return s;
});
if (this.testCases === undefined) {
return;
}
const seen = new Set<string>();
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
for (const subcaseParams of subcases ?? [{}]) {
const params = mergeParams(caseParams, subcaseParams);
assert(this.batchSize === 0 || !('batch__' in params));
// stringifyPublicParams also checks for invalid params values
const testcaseString = stringifyPublicParams(params);
// A (hopefully) unique representation of a params value.
const testcaseStringUnique = stringifyPublicParamsUniquely(params);
assert(
!seen.has(testcaseStringUnique),
`Duplicate public test case params for test ${testPathString}: ${testcaseString}`
);
seen.add(testcaseStringUnique);
}
}
}
params(
cases: ((unit: CaseParamsBuilder<{}>) => ParamsBuilderBase<{}, {}>) | ParamsBuilderBase<{}, {}>
): TestBuilder<S, F> {
assert(this.testCases === undefined, 'test case is already parameterized');
if (cases instanceof Function) {
this.testCases = cases(kUnitCaseParamsBuilder);
} else {
this.testCases = cases;
}
return this;
}
paramsSimple(cases: Iterable<{}>): TestBuilder<S, F> {
assert(this.testCases === undefined, 'test case is already parameterized');
this.testCases = kUnitCaseParamsBuilder.combineWithParams(cases);
return this;
}
paramsSubcasesOnly(
subcases: Iterable<{}> | ((unit: SubcaseParamsBuilder<{}, {}>) => SubcaseParamsBuilder<{}, {}>)
): TestBuilder<S, F> {
if (subcases instanceof Function) {
return this.params(subcases(kUnitCaseParamsBuilder.beginSubcases()));
} else {
return this.params(kUnitCaseParamsBuilder.beginSubcases().combineWithParams(subcases));
}
}
*iterate(): IterableIterator<RunCase> {
assert(this.testFn !== undefined, 'No test function (.fn()) for test');
this.testCases ??= kUnitCaseParamsBuilder;
for (const [caseParams, subcases] of builderIterateCasesWithSubcases(this.testCases)) {
if (this.batchSize === 0 || subcases === undefined) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcases,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
} else {
const subcaseArray = Array.from(subcases);
if (subcaseArray.length <= this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
caseParams,
this.isUnimplemented,
subcaseArray,
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
} else {
for (let i = 0; i < subcaseArray.length; i = i + this.batchSize) {
yield new RunCaseSpecific(
this.testPath,
{ ...caseParams, batch__: i / this.batchSize },
this.isUnimplemented,
subcaseArray.slice(i, Math.min(subcaseArray.length, i + this.batchSize)),
this.fixture,
this.testFn,
this.beforeFn,
this.testCreationStack
);
}
}
}
}
}
}
class RunCaseSpecific implements RunCase {
readonly id: TestCaseID;
readonly isUnimplemented: boolean;
private readonly params: {};
private readonly subcases: Iterable<{}> | undefined;
private readonly fixture: FixtureClass;
private readonly fn: TestFn<Fixture, {}>;
private readonly beforeFn?: BeforeAllSubcasesFn<SubcaseBatchState, {}>;
private readonly testCreationStack: Error;
constructor(
testPath: string[],
params: {},
isUnimplemented: boolean,
subcases: Iterable<{}> | undefined,
fixture: FixtureClass,
fn: TestFn<Fixture, {}>,
beforeFn: BeforeAllSubcasesFn<SubcaseBatchState, {}> | undefined,
testCreationStack: Error
) {
this.id = { test: testPath, params: extractPublicParams(params) };
this.isUnimplemented = isUnimplemented;
this.params = params;
this.subcases = subcases;
this.fixture = fixture;
this.fn = fn;
this.beforeFn = beforeFn;
this.testCreationStack = testCreationStack;
}
async runTest(
rec: TestCaseRecorder,
sharedState: SubcaseBatchState,
params: TestParams,
throwSkip: boolean,
expectedStatus: Expectation
): Promise<void> {
try {
rec.beginSubCase();
if (expectedStatus === 'skip') {
throw new SkipTestCase('Skipped by expectations');
}
const inst = new this.fixture(sharedState, rec, params);
try {
await inst.init();
await this.fn(inst as Fixture & { params: {} });
} finally {
// Runs as long as constructor succeeded, even if initialization or the test failed.
await inst.finalize();
}
} catch (ex) {
// There was an exception from constructor, init, test, or finalize.
// An error from init or test may have been a SkipTestCase.
// An error from finalize may have been an eventualAsyncExpectation failure
// or unexpected validation/OOM error from the GPUDevice.
if (throwSkip && ex instanceof SkipTestCase) {
throw ex;
}
rec.threw(ex);
} finally {
try {
rec.endSubCase(expectedStatus);
} catch (ex) {
assert(ex instanceof UnexpectedPassError);
ex.message = `Testcase passed unexpectedly.`;
ex.stack = this.testCreationStack.stack;
rec.warn(ex);
}
}
}
async run(
rec: TestCaseRecorder,
selfQuery: TestQuerySingleCase,
expectations: TestQueryWithExpectation[]
): Promise<void> {
const getExpectedStatus = (selfQueryWithSubParams: TestQuerySingleCase) => {
let didSeeFail = false;
for (const exp of expectations) {
const ordering = compareQueries(exp.query, selfQueryWithSubParams);
if (ordering === Ordering.Unordered || ordering === Ordering.StrictSubset) {
continue;
}
switch (exp.expectation) {
// Skip takes precedence. If there is any expectation indicating a skip,
// signal it immediately.
case 'skip':
return 'skip';
case 'fail':
// Otherwise, indicate that we might expect a failure.
didSeeFail = true;
break;
default:
unreachable();
}
}
return didSeeFail ? 'fail' : 'pass';
};
const { testHeartbeatCallback, maxSubcasesInFlight } = globalTestConfig;
try {
rec.start();
const sharedState = this.fixture.MakeSharedState(this.params);
try {
await sharedState.init();
if (this.beforeFn) {
await this.beforeFn(sharedState);
}
await sharedState.postInit();
testHeartbeatCallback();
let allPreviousSubcasesFinalizedPromise: Promise<void> = Promise.resolve();
if (this.subcases) {
let totalCount = 0;
let skipCount = 0;
// If there are too many subcases in flight, starting the next subcase will register
// `resolvePromiseBlockingSubcase` and wait until `subcaseFinishedCallback` is called.
let subcasesInFlight = 0;
let resolvePromiseBlockingSubcase: (() => void) | undefined = undefined;
const subcaseFinishedCallback = () => {
subcasesInFlight -= 1;
// If there is any subcase waiting on a previous subcase to finish,
// unblock it now, and clear the resolve callback.
if (resolvePromiseBlockingSubcase) {
resolvePromiseBlockingSubcase();
resolvePromiseBlockingSubcase = undefined;
}
};
for (const subParams of this.subcases) {
// Make a recorder that will defer all calls until `allPreviousSubcasesFinalizedPromise`
// resolves. Waiting on `allPreviousSubcasesFinalizedPromise` ensures that
// logs from all the previous subcases have been flushed before flushing new logs.
const subcasePrefix = 'subcase: ' + stringifyPublicParams(subParams);
const subRec = new Proxy(rec, {
get: (target, k: keyof TestCaseRecorder) => {
const prop = TestCaseRecorder.prototype[k];
if (typeof prop === 'function') {
testHeartbeatCallback();
return function (...args: Parameters<typeof prop>) {
void allPreviousSubcasesFinalizedPromise.then(() => {
// Prepend the subcase name to all error messages.
for (const arg of args) {
if (arg instanceof Error) {
try {
arg.message = subcasePrefix + '\n' + arg.message;
} catch {
// If that fails (e.g. on DOMException), try to put it in the stack:
let stack = subcasePrefix;
if (arg.stack) stack += '\n' + arg.stack;
try {
arg.stack = stack;
} catch {
// If that fails too, just silence it.
}
}
}
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const rv = (prop as any).apply(target, args);
// Because this proxy executes functions in a deferred manner,
// it should never be used for functions that need to return a value.
assert(rv === undefined);
});
};
}
return prop;
},
});
const params = mergeParams(this.params, subParams);
const subcaseQuery = new TestQuerySingleCase(
selfQuery.suite,
selfQuery.filePathParts,
selfQuery.testPathParts,
params
);
// Limit the maximum number of subcases in flight.
if (subcasesInFlight >= maxSubcasesInFlight) {
await new Promise<void>(resolve => {
// There should only be one subcase waiting at a time.
assert(resolvePromiseBlockingSubcase === undefined);
resolvePromiseBlockingSubcase = resolve;
});
}
subcasesInFlight += 1;
// Runs async without waiting so that subsequent subcases can start.
// All finalization steps will be waited on at the end of the testcase.
const finalizePromise = this.runTest(
subRec,
sharedState,
params,
/* throwSkip */ true,
getExpectedStatus(subcaseQuery)
)
.then(() => {
subRec.info(new Error('OK'));
})
.catch(ex => {
if (ex instanceof SkipTestCase) {
// Convert SkipTestCase to info messages
ex.message = 'subcase skipped: ' + ex.message;
subRec.info(ex);
++skipCount;
} else {
// Since we are catching all error inside runTest(), this should never happen
subRec.threw(ex);
}
})
.finally(subcaseFinishedCallback);
allPreviousSubcasesFinalizedPromise = allPreviousSubcasesFinalizedPromise.then(
() => finalizePromise
);
++totalCount;
}
// Wait for all subcases to finalize and report their results.
await allPreviousSubcasesFinalizedPromise;
if (skipCount === totalCount) {
rec.skipped(new SkipTestCase('all subcases were skipped'));
}
} else {
await this.runTest(
rec,
sharedState,
this.params,
/* throwSkip */ false,
getExpectedStatus(selfQuery)
);
}
} finally {
testHeartbeatCallback();
// Runs as long as the shared state constructor succeeded, even if initialization or a test failed.
await sharedState.finalize();
testHeartbeatCallback();
}
} catch (ex) {
// There was an exception from sharedState/fixture constructor, init, beforeFn, or test.
// An error from beforeFn may have been SkipTestCase.
// An error from finalize may have been an eventualAsyncExpectation failure
// or unexpected validation/OOM error from the GPUDevice.
rec.threw(ex);
} finally {
rec.finish();
}
}
}

View File

@ -0,0 +1,15 @@
// A listing of all specs within a single suite. This is the (awaited) type of
// `groups` in '{cts,unittests}/listing.ts' and `listing` in the auto-generated
// 'out/{cts,unittests}/listing.js' files (see tools/gen_listings).
export type TestSuiteListing = TestSuiteListingEntry[];
export type TestSuiteListingEntry = TestSuiteListingEntrySpec | TestSuiteListingEntryReadme;
interface TestSuiteListingEntrySpec {
readonly file: string[];
}
interface TestSuiteListingEntryReadme {
readonly file: string[];
readonly readme: string;
}

View File

@ -0,0 +1,575 @@
import { RunCase, RunFn } from '../internal/test_group.js';
import { assert } from '../util/util.js';
import { TestFileLoader } from './file_loader.js';
import { TestParamsRW } from './params_utils.js';
import { compareQueries, Ordering } from './query/compare.js';
import {
TestQuery,
TestQueryMultiCase,
TestQuerySingleCase,
TestQueryMultiFile,
TestQueryMultiTest,
} from './query/query.js';
import { kBigSeparator, kWildcard, kPathSeparator, kParamSeparator } from './query/separators.js';
import { stringifySingleParam } from './query/stringify_params.js';
import { StacklessError } from './util.js';
// `loadTreeForQuery()` loads a TestTree for a given queryToLoad.
// The resulting tree is a linked-list all the way from `suite:*` to queryToLoad,
// and under queryToLoad is a tree containing every case matched by queryToLoad.
//
// `subqueriesToExpand` influences the `collapsible` flag on nodes in the resulting tree.
// A node is considered "collapsible" if none of the subqueriesToExpand is a StrictSubset
// of that node.
//
// In WebKit/Blink-style web_tests, an expectation file marks individual cts.https.html "variants
// as "Failure", "Crash", etc. By passing in the list of expectations as the subqueriesToExpand,
// we can programmatically subdivide the cts.https.html "variants" list to be able to implement
// arbitrarily-fine suppressions (instead of having to suppress entire test files, which would
// lose a lot of coverage).
//
// `iterateCollapsedNodes()` produces the list of queries for the variants list.
//
// Though somewhat complicated, this system has important benefits:
// - Avoids having to suppress entire test files, which would cause large test coverage loss.
// - Minimizes the number of page loads needed for fine-grained suppressions.
// (In the naive case, we could do one page load per test case - but the test suite would
// take impossibly long to run.)
// - Enables developers to put any number of tests in one file as appropriate, without worrying
// about expectation granularity.
interface TestTreeNodeBase<T extends TestQuery> {
readonly query: T;
/**
* Readable "relative" name for display in standalone runner.
* Not always the exact relative name, because sometimes there isn't
* one (e.g. s:f:* relative to s:f,*), but something that is readable.
*/
readonly readableRelativeName: string;
subtreeCounts?: { tests: number; nodesWithTODO: number };
}
export interface TestSubtree<T extends TestQuery = TestQuery> extends TestTreeNodeBase<T> {
readonly children: Map<string, TestTreeNode>;
readonly collapsible: boolean;
description?: string;
readonly testCreationStack?: Error;
}
export interface TestTreeLeaf extends TestTreeNodeBase<TestQuerySingleCase> {
readonly run: RunFn;
readonly isUnimplemented?: boolean;
subtreeCounts?: undefined;
}
export type TestTreeNode = TestSubtree | TestTreeLeaf;
/**
* When iterating through "collapsed" tree nodes, indicates how many "query levels" to traverse
* through before starting to collapse nodes.
*
* Corresponds with TestQueryLevel, but excludes 4 (SingleCase):
* - 1 = MultiFile. Expands so every file is in the collapsed tree.
* - 2 = MultiTest. Expands so every test is in the collapsed tree.
* - 3 = MultiCase. Expands so every case is in the collapsed tree (i.e. collapsing disabled).
*/
export type ExpandThroughLevel = 1 | 2 | 3;
export class TestTree {
/**
* The `queryToLoad` that this test tree was created for.
* Test trees are always rooted at `suite:*`, but they only contain nodes that fit
* within `forQuery`.
*
* This is used for `iterateCollapsedNodes` which only starts collapsing at the next
* `TestQueryLevel` after `forQuery`.
*/
readonly forQuery: TestQuery;
readonly root: TestSubtree;
constructor(forQuery: TestQuery, root: TestSubtree) {
this.forQuery = forQuery;
TestTree.propagateCounts(root);
this.root = root;
assert(
root.query.level === 1 && root.query.depthInLevel === 0,
'TestTree root must be the root (suite:*)'
);
}
/**
* Iterate through the leaves of a version of the tree which has been pruned to exclude
* subtrees which:
* - are at a deeper `TestQueryLevel` than `this.forQuery`, and
* - were not a `Ordering.StrictSubset` of any of the `subqueriesToExpand` during tree creation.
*/
iterateCollapsedNodes({
includeIntermediateNodes = false,
includeEmptySubtrees = false,
alwaysExpandThroughLevel,
}: {
/** Whether to include intermediate tree nodes or only collapsed-leaves. */
includeIntermediateNodes?: boolean;
/** Whether to include collapsed-leaves with no children. */
includeEmptySubtrees?: boolean;
/** Never collapse nodes up through this level. */
alwaysExpandThroughLevel: ExpandThroughLevel;
}): IterableIterator<Readonly<TestTreeNode>> {
const expandThroughLevel = Math.max(this.forQuery.level, alwaysExpandThroughLevel);
return TestTree.iterateSubtreeNodes(this.root, {
includeIntermediateNodes,
includeEmptySubtrees,
expandThroughLevel,
});
}
iterateLeaves(): IterableIterator<Readonly<TestTreeLeaf>> {
return TestTree.iterateSubtreeLeaves(this.root);
}
/**
* Dissolve nodes which have only one child, e.g.:
* a,* { a,b,* { a,b:* { ... } } }
* collapses down into:
* a,* { a,b:* { ... } }
* which is less needlessly verbose when displaying the tree in the standalone runner.
*/
dissolveSingleChildTrees(): void {
const newRoot = dissolveSingleChildTrees(this.root);
assert(newRoot === this.root);
}
toString(): string {
return TestTree.subtreeToString('(root)', this.root, '');
}
static *iterateSubtreeNodes(
subtree: TestSubtree,
opts: {
includeIntermediateNodes: boolean;
includeEmptySubtrees: boolean;
expandThroughLevel: number;
}
): IterableIterator<TestTreeNode> {
if (opts.includeIntermediateNodes) {
yield subtree;
}
for (const [, child] of subtree.children) {
if ('children' in child) {
// Is a subtree
const collapsible = child.collapsible && child.query.level > opts.expandThroughLevel;
if (child.children.size > 0 && !collapsible) {
yield* TestTree.iterateSubtreeNodes(child, opts);
} else if (child.children.size > 0 || opts.includeEmptySubtrees) {
// Don't yield empty subtrees (e.g. files with no tests) unless includeEmptySubtrees
yield child;
}
} else {
// Is a leaf
yield child;
}
}
}
static *iterateSubtreeLeaves(subtree: TestSubtree): IterableIterator<TestTreeLeaf> {
for (const [, child] of subtree.children) {
if ('children' in child) {
yield* TestTree.iterateSubtreeLeaves(child);
} else {
yield child;
}
}
}
/** Propagate the subtreeTODOs/subtreeTests state upward from leaves to parent nodes. */
static propagateCounts(subtree: TestSubtree): { tests: number; nodesWithTODO: number } {
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
for (const [, child] of subtree.children) {
if ('children' in child) {
const counts = TestTree.propagateCounts(child);
subtree.subtreeCounts.tests += counts.tests;
subtree.subtreeCounts.nodesWithTODO += counts.nodesWithTODO;
}
}
return subtree.subtreeCounts;
}
/** Displays counts in the format `(Nodes with TODOs) / (Total test count)`. */
static countsToString(tree: TestTreeNode): string {
if (tree.subtreeCounts) {
return `${tree.subtreeCounts.nodesWithTODO} / ${tree.subtreeCounts.tests}`;
} else {
return '';
}
}
static subtreeToString(name: string, tree: TestTreeNode, indent: string): string {
const collapsible = 'run' in tree ? '>' : tree.collapsible ? '+' : '-';
let s =
indent +
`${collapsible} ${TestTree.countsToString(tree)} ${JSON.stringify(name)} => ${tree.query}`;
if ('children' in tree) {
if (tree.description !== undefined) {
s += `\n${indent} | ${JSON.stringify(tree.description)}`;
}
for (const [name, child] of tree.children) {
s += '\n' + TestTree.subtreeToString(name, child, indent + ' ');
}
}
return s;
}
}
// MAINTENANCE_TODO: Consider having subqueriesToExpand actually impact the depth-order of params
// in the tree.
export async function loadTreeForQuery(
loader: TestFileLoader,
queryToLoad: TestQuery,
subqueriesToExpand: TestQuery[]
): Promise<TestTree> {
const suite = queryToLoad.suite;
const specs = await loader.listing(suite);
const subqueriesToExpandEntries = Array.from(subqueriesToExpand.entries());
const seenSubqueriesToExpand: boolean[] = new Array(subqueriesToExpand.length);
seenSubqueriesToExpand.fill(false);
const isCollapsible = (subquery: TestQuery) =>
subqueriesToExpandEntries.every(([i, toExpand]) => {
const ordering = compareQueries(toExpand, subquery);
// If toExpand == subquery, no expansion is needed (but it's still "seen").
if (ordering === Ordering.Equal) seenSubqueriesToExpand[i] = true;
return ordering !== Ordering.StrictSubset;
});
// L0 = suite-level, e.g. suite:*
// L1 = file-level, e.g. suite:a,b:*
// L2 = test-level, e.g. suite:a,b:c,d:*
// L3 = case-level, e.g. suite:a,b:c,d:
let foundCase = false;
// L0 is suite:*
const subtreeL0 = makeTreeForSuite(suite, isCollapsible);
for (const entry of specs) {
if (entry.file.length === 0 && 'readme' in entry) {
// Suite-level readme.
setSubtreeDescriptionAndCountTODOs(subtreeL0, entry.readme);
continue;
}
{
const queryL1 = new TestQueryMultiFile(suite, entry.file);
const orderingL1 = compareQueries(queryL1, queryToLoad);
if (orderingL1 === Ordering.Unordered) {
// File path is not matched by this query.
continue;
}
}
if ('readme' in entry) {
// Entry is a README that is an ancestor or descendant of the query.
// (It's included for display in the standalone runner.)
// readmeSubtree is suite:a,b,*
// (This is always going to dedup with a file path, if there are any test spec files under
// the directory that has the README).
const readmeSubtree: TestSubtree<TestQueryMultiFile> = addSubtreeForDirPath(
subtreeL0,
entry.file,
isCollapsible
);
setSubtreeDescriptionAndCountTODOs(readmeSubtree, entry.readme);
continue;
}
// Entry is a spec file.
const spec = await loader.importSpecFile(queryToLoad.suite, entry.file);
// subtreeL1 is suite:a,b:*
const subtreeL1: TestSubtree<TestQueryMultiTest> = addSubtreeForFilePath(
subtreeL0,
entry.file,
isCollapsible
);
setSubtreeDescriptionAndCountTODOs(subtreeL1, spec.description);
let groupHasTests = false;
for (const t of spec.g.iterate()) {
groupHasTests = true;
{
const queryL2 = new TestQueryMultiCase(suite, entry.file, t.testPath, {});
const orderingL2 = compareQueries(queryL2, queryToLoad);
if (orderingL2 === Ordering.Unordered) {
// Test path is not matched by this query.
continue;
}
}
// subtreeL2 is suite:a,b:c,d:*
const subtreeL2: TestSubtree<TestQueryMultiCase> = addSubtreeForTestPath(
subtreeL1,
t.testPath,
t.testCreationStack,
isCollapsible
);
// This is 1 test. Set tests=1 then count TODOs.
subtreeL2.subtreeCounts ??= { tests: 1, nodesWithTODO: 0 };
if (t.description) setSubtreeDescriptionAndCountTODOs(subtreeL2, t.description);
// MAINTENANCE_TODO: If tree generation gets too slow, avoid actually iterating the cases in a
// file if there's no need to (based on the subqueriesToExpand).
for (const c of t.iterate()) {
{
const queryL3 = new TestQuerySingleCase(suite, entry.file, c.id.test, c.id.params);
const orderingL3 = compareQueries(queryL3, queryToLoad);
if (orderingL3 === Ordering.Unordered || orderingL3 === Ordering.StrictSuperset) {
// Case is not matched by this query.
continue;
}
}
// Leaf for case is suite:a,b:c,d:x=1;y=2
addLeafForCase(subtreeL2, c, isCollapsible);
foundCase = true;
}
}
if (!groupHasTests && !subtreeL1.subtreeCounts) {
throw new StacklessError(
`${subtreeL1.query} has no tests - it must have "TODO" in its description`
);
}
}
for (const [i, sq] of subqueriesToExpandEntries) {
const subquerySeen = seenSubqueriesToExpand[i];
if (!subquerySeen) {
throw new StacklessError(
`subqueriesToExpand entry did not match anything \
(could be wrong, or could be redundant with a previous subquery):\n ${sq.toString()}`
);
}
}
assert(foundCase, `Query \`${queryToLoad.toString()}\` does not match any cases`);
return new TestTree(queryToLoad, subtreeL0);
}
function setSubtreeDescriptionAndCountTODOs(
subtree: TestSubtree<TestQueryMultiFile>,
description: string
) {
assert(subtree.description === undefined);
subtree.description = description.trim();
subtree.subtreeCounts ??= { tests: 0, nodesWithTODO: 0 };
if (subtree.description.indexOf('TODO') !== -1) {
subtree.subtreeCounts.nodesWithTODO++;
}
}
function makeTreeForSuite(
suite: string,
isCollapsible: (sq: TestQuery) => boolean
): TestSubtree<TestQueryMultiFile> {
const query = new TestQueryMultiFile(suite, []);
return {
readableRelativeName: suite + kBigSeparator,
query,
children: new Map(),
collapsible: isCollapsible(query),
};
}
function addSubtreeForDirPath(
tree: TestSubtree<TestQueryMultiFile>,
file: string[],
isCollapsible: (sq: TestQuery) => boolean
): TestSubtree<TestQueryMultiFile> {
const subqueryFile: string[] = [];
// To start, tree is suite:*
// This loop goes from that -> suite:a,* -> suite:a,b,*
for (const part of file) {
subqueryFile.push(part);
tree = getOrInsertSubtree(part, tree, () => {
const query = new TestQueryMultiFile(tree.query.suite, subqueryFile);
return {
readableRelativeName: part + kPathSeparator + kWildcard,
query,
collapsible: isCollapsible(query),
};
});
}
return tree;
}
function addSubtreeForFilePath(
tree: TestSubtree<TestQueryMultiFile>,
file: string[],
isCollapsible: (sq: TestQuery) => boolean
): TestSubtree<TestQueryMultiTest> {
// To start, tree is suite:*
// This goes from that -> suite:a,* -> suite:a,b,*
tree = addSubtreeForDirPath(tree, file, isCollapsible);
// This goes from that -> suite:a,b:*
const subtree = getOrInsertSubtree('', tree, () => {
const query = new TestQueryMultiTest(tree.query.suite, tree.query.filePathParts, []);
assert(file.length > 0, 'file path is empty');
return {
readableRelativeName: file[file.length - 1] + kBigSeparator + kWildcard,
query,
collapsible: isCollapsible(query),
};
});
return subtree;
}
function addSubtreeForTestPath(
tree: TestSubtree<TestQueryMultiTest>,
test: readonly string[],
testCreationStack: Error,
isCollapsible: (sq: TestQuery) => boolean
): TestSubtree<TestQueryMultiCase> {
const subqueryTest: string[] = [];
// To start, tree is suite:a,b:*
// This loop goes from that -> suite:a,b:c,* -> suite:a,b:c,d,*
for (const part of test) {
subqueryTest.push(part);
tree = getOrInsertSubtree(part, tree, () => {
const query = new TestQueryMultiTest(
tree.query.suite,
tree.query.filePathParts,
subqueryTest
);
return {
readableRelativeName: part + kPathSeparator + kWildcard,
query,
collapsible: isCollapsible(query),
};
});
}
// This goes from that -> suite:a,b:c,d:*
return getOrInsertSubtree('', tree, () => {
const query = new TestQueryMultiCase(
tree.query.suite,
tree.query.filePathParts,
subqueryTest,
{}
);
assert(subqueryTest.length > 0, 'subqueryTest is empty');
return {
readableRelativeName: subqueryTest[subqueryTest.length - 1] + kBigSeparator + kWildcard,
kWildcard,
query,
testCreationStack,
collapsible: isCollapsible(query),
};
});
}
function addLeafForCase(
tree: TestSubtree<TestQueryMultiTest>,
t: RunCase,
checkCollapsible: (sq: TestQuery) => boolean
): void {
const query = tree.query;
let name: string = '';
const subqueryParams: TestParamsRW = {};
// To start, tree is suite:a,b:c,d:*
// This loop goes from that -> suite:a,b:c,d:x=1;* -> suite:a,b:c,d:x=1;y=2;*
for (const [k, v] of Object.entries(t.id.params)) {
name = stringifySingleParam(k, v);
subqueryParams[k] = v;
tree = getOrInsertSubtree(name, tree, () => {
const subquery = new TestQueryMultiCase(
query.suite,
query.filePathParts,
query.testPathParts,
subqueryParams
);
return {
readableRelativeName: name + kParamSeparator + kWildcard,
query: subquery,
collapsible: checkCollapsible(subquery),
};
});
}
// This goes from that -> suite:a,b:c,d:x=1;y=2
const subquery = new TestQuerySingleCase(
query.suite,
query.filePathParts,
query.testPathParts,
subqueryParams
);
checkCollapsible(subquery); // mark seenSubqueriesToExpand
insertLeaf(tree, subquery, t);
}
function getOrInsertSubtree<T extends TestQuery>(
key: string,
parent: TestSubtree,
createSubtree: () => Omit<TestSubtree<T>, 'children'>
): TestSubtree<T> {
let v: TestSubtree<T>;
const child = parent.children.get(key);
if (child !== undefined) {
assert('children' in child); // Make sure cached subtree is not actually a leaf
v = child as TestSubtree<T>;
} else {
v = { ...createSubtree(), children: new Map() };
parent.children.set(key, v);
}
return v;
}
function insertLeaf(parent: TestSubtree, query: TestQuerySingleCase, t: RunCase) {
const leaf: TestTreeLeaf = {
readableRelativeName: readableNameForCase(query),
query,
run: (rec, expectations) => t.run(rec, query, expectations || []),
isUnimplemented: t.isUnimplemented,
};
// This is a leaf (e.g. s:f:t:x=1;* -> s:f:t:x=1). The key is always ''.
const key = '';
assert(!parent.children.has(key), `Duplicate testcase: ${query}`);
parent.children.set(key, leaf);
}
function dissolveSingleChildTrees(tree: TestTreeNode): TestTreeNode {
if ('children' in tree) {
const shouldDissolveThisTree =
tree.children.size === 1 && tree.query.depthInLevel !== 0 && tree.description === undefined;
if (shouldDissolveThisTree) {
// Loops exactly once
for (const [, child] of tree.children) {
// Recurse on child
return dissolveSingleChildTrees(child);
}
}
for (const [k, child] of tree.children) {
// Recurse on each child
const newChild = dissolveSingleChildTrees(child);
if (newChild !== child) {
tree.children.set(k, newChild);
}
}
}
return tree;
}
/** Generate a readable relative name for a case (used in standalone). */
function readableNameForCase(query: TestQuerySingleCase): string {
const paramsKeys = Object.keys(query.params);
if (paramsKeys.length === 0) {
return query.testPathParts[query.testPathParts.length - 1] + kBigSeparator;
} else {
const lastKey = paramsKeys[paramsKeys.length - 1];
return stringifySingleParam(lastKey, query.params[lastKey]);
}
}

View File

@ -0,0 +1,10 @@
/**
* Error without a stack, which can be used to fatally exit from `tool/` scripts with a
* user-friendly message (and no confusing stack).
*/
export class StacklessError extends Error {
constructor(message: string) {
super(message);
this.stack = undefined;
}
}

View File

@ -0,0 +1 @@
export const version = 'unknown';

View File

@ -0,0 +1,278 @@
/* eslint no-console: "off" */
import * as fs from 'fs';
import { dataCache } from '../framework/data_cache.js';
import { globalTestConfig } from '../framework/test_config.js';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { prettyPrintLog } from '../internal/logging/log_message.js';
import { Logger } from '../internal/logging/logger.js';
import { LiveTestCaseResult } from '../internal/logging/result.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { parseExpectationsForTestQuery } from '../internal/query/query.js';
import { Colors } from '../util/colors.js';
import { setGPUProvider } from '../util/navigator_gpu.js';
import { assert, unreachable } from '../util/util.js';
import sys from './helper/sys.js';
function usage(rc: number): never {
console.log(`Usage:
tools/run_${sys.type} [OPTIONS...] QUERIES...
tools/run_${sys.type} 'unittests:*' 'webgpu:buffers,*'
Options:
--colors Enable ANSI colors in output.
--coverage Emit coverage data.
--verbose Print result/log of every test as it runs.
--list Print all testcase names that match the given query and exit.
--debug Include debug messages in logging.
--print-json Print the complete result JSON in the output.
--expectations Path to expectations file.
--gpu-provider Path to node module that provides the GPU implementation.
--gpu-provider-flag Flag to set on the gpu-provider as <flag>=<value>
--unroll-const-eval-loops Unrolls loops in constant-evaluation shader execution tests
--quiet Suppress summary information in output
`);
return sys.exit(rc);
}
// The interface that exposes creation of the GPU, and optional interface to code coverage.
interface GPUProviderModule {
// @returns a GPU with the given flags
create(flags: string[]): GPU;
// An optional interface to a CodeCoverageProvider
coverage?: CodeCoverageProvider;
}
interface CodeCoverageProvider {
// Starts collecting code coverage
begin(): void;
// Ends collecting of code coverage, returning the coverage data.
// This data is opaque (implementation defined).
end(): string;
}
type listModes = 'none' | 'cases' | 'unimplemented';
Colors.enabled = false;
let verbose = false;
let emitCoverage = false;
let listMode: listModes = 'none';
let debug = false;
let printJSON = false;
let quiet = false;
let loadWebGPUExpectations: Promise<unknown> | undefined = undefined;
let gpuProviderModule: GPUProviderModule | undefined = undefined;
let dataPath: string | undefined = undefined;
const queries: string[] = [];
const gpuProviderFlags: string[] = [];
for (let i = 0; i < sys.args.length; ++i) {
const a = sys.args[i];
if (a.startsWith('-')) {
if (a === '--colors') {
Colors.enabled = true;
} else if (a === '--coverage') {
emitCoverage = true;
} else if (a === '--verbose') {
verbose = true;
} else if (a === '--list') {
listMode = 'cases';
} else if (a === '--list-unimplemented') {
listMode = 'unimplemented';
} else if (a === '--debug') {
debug = true;
} else if (a === '--data') {
dataPath = sys.args[++i];
} else if (a === '--print-json') {
printJSON = true;
} else if (a === '--expectations') {
const expectationsFile = new URL(sys.args[++i], `file://${sys.cwd()}`).pathname;
loadWebGPUExpectations = import(expectationsFile).then(m => m.expectations);
} else if (a === '--gpu-provider') {
const modulePath = sys.args[++i];
gpuProviderModule = require(modulePath);
} else if (a === '--gpu-provider-flag') {
gpuProviderFlags.push(sys.args[++i]);
} else if (a === '--quiet') {
quiet = true;
} else if (a === '--unroll-const-eval-loops') {
globalTestConfig.unrollConstEvalLoops = true;
} else {
console.log('unrecognized flag: ', a);
usage(1);
}
} else {
queries.push(a);
}
}
let codeCoverage: CodeCoverageProvider | undefined = undefined;
if (gpuProviderModule) {
setGPUProvider(() => gpuProviderModule!.create(gpuProviderFlags));
if (emitCoverage) {
codeCoverage = gpuProviderModule.coverage;
if (codeCoverage === undefined) {
console.error(
`--coverage specified, but the GPUProviderModule does not support code coverage.
Did you remember to build with code coverage instrumentation enabled?`
);
sys.exit(1);
}
}
}
if (dataPath !== undefined) {
dataCache.setStore({
load: (path: string) => {
return new Promise<string>((resolve, reject) => {
fs.readFile(`${dataPath}/${path}`, 'utf8', (err, data) => {
if (err !== null) {
reject(err.message);
} else {
resolve(data);
}
});
});
},
});
}
if (verbose) {
dataCache.setDebugLogger(console.log);
}
if (queries.length === 0) {
console.log('no queries specified');
usage(0);
}
(async () => {
const loader = new DefaultTestFileLoader();
assert(queries.length === 1, 'currently, there must be exactly one query on the cmd line');
const filterQuery = parseQuery(queries[0]);
const testcases = await loader.loadCases(filterQuery);
const expectations = parseExpectationsForTestQuery(
await (loadWebGPUExpectations ?? []),
filterQuery
);
Logger.globalDebugMode = debug;
const log = new Logger();
const failed: Array<[string, LiveTestCaseResult]> = [];
const warned: Array<[string, LiveTestCaseResult]> = [];
const skipped: Array<[string, LiveTestCaseResult]> = [];
let total = 0;
if (codeCoverage !== undefined) {
codeCoverage.begin();
}
for (const testcase of testcases) {
const name = testcase.query.toString();
switch (listMode) {
case 'cases':
console.log(name);
continue;
case 'unimplemented':
if (testcase.isUnimplemented) {
console.log(name);
}
continue;
default:
break;
}
const [rec, res] = log.record(name);
await testcase.run(rec, expectations);
if (verbose) {
printResults([[name, res]]);
}
total++;
switch (res.status) {
case 'pass':
break;
case 'fail':
failed.push([name, res]);
break;
case 'warn':
warned.push([name, res]);
break;
case 'skip':
skipped.push([name, res]);
break;
default:
unreachable('unrecognized status');
}
}
if (codeCoverage !== undefined) {
const coverage = codeCoverage.end();
console.log(`Code-coverage: [[${coverage}]]`);
}
if (listMode !== 'none') {
return;
}
assert(total > 0, 'found no tests!');
// MAINTENANCE_TODO: write results out somewhere (a file?)
if (printJSON) {
console.log(log.asJSON(2));
}
if (!quiet) {
if (skipped.length) {
console.log('');
console.log('** Skipped **');
printResults(skipped);
}
if (warned.length) {
console.log('');
console.log('** Warnings **');
printResults(warned);
}
if (failed.length) {
console.log('');
console.log('** Failures **');
printResults(failed);
}
const passed = total - warned.length - failed.length - skipped.length;
const pct = (x: number) => ((100 * x) / total).toFixed(2);
const rpt = (x: number) => {
const xs = x.toString().padStart(1 + Math.log10(total), ' ');
return `${xs} / ${total} = ${pct(x).padStart(6, ' ')}%`;
};
console.log('');
console.log(`** Summary **
Passed w/o warnings = ${rpt(passed)}
Passed with warnings = ${rpt(warned.length)}
Skipped = ${rpt(skipped.length)}
Failed = ${rpt(failed.length)}`);
}
if (failed.length || warned.length) {
sys.exit(1);
}
})().catch(ex => {
console.log(ex.stack ?? ex.toString());
sys.exit(1);
});
function printResults(results: Array<[string, LiveTestCaseResult]>): void {
for (const [name, r] of results) {
console.log(`[${r.status}] ${name} (${r.timems}ms). Log:`);
if (r.logs) {
for (const l of r.logs) {
console.log(prettyPrintLog(l));
}
}
}
}

View File

@ -0,0 +1,22 @@
let windowURL: URL | undefined = undefined;
function getWindowURL() {
if (windowURL === undefined) {
windowURL = new URL(window.location.toString());
}
return windowURL;
}
export function optionEnabled(
opt: string,
searchParams: URLSearchParams = getWindowURL().searchParams
): boolean {
const val = searchParams.get(opt);
return val !== null && val !== '0';
}
export function optionString(
opt: string,
searchParams: URLSearchParams = getWindowURL().searchParams
): string {
return searchParams.get(opt) || '';
}

View File

@ -0,0 +1,46 @@
/* eslint no-process-exit: "off" */
/* eslint @typescript-eslint/no-namespace: "off" */
function node() {
const { existsSync } = require('fs');
return {
type: 'node',
existsSync,
args: process.argv.slice(2),
cwd: () => process.cwd(),
exit: (code?: number | undefined) => process.exit(code),
};
}
declare global {
namespace Deno {
function readFileSync(path: string): Uint8Array;
const args: string[];
const cwd: () => string;
function exit(code?: number): never;
}
}
function deno() {
function existsSync(path: string) {
try {
Deno.readFileSync(path);
return true;
} catch (err) {
return false;
}
}
return {
type: 'deno',
existsSync,
args: Deno.args,
cwd: Deno.cwd,
exit: Deno.exit,
};
}
const sys = typeof globalThis.process !== 'undefined' ? node() : deno();
export default sys;

View File

@ -0,0 +1,32 @@
import { setBaseResourcePath } from '../../framework/resources.js';
import { DefaultTestFileLoader } from '../../internal/file_loader.js';
import { Logger } from '../../internal/logging/logger.js';
import { parseQuery } from '../../internal/query/parseQuery.js';
import { TestQueryWithExpectation } from '../../internal/query/query.js';
import { assert } from '../../util/util.js';
// Should be DedicatedWorkerGlobalScope, but importing lib "webworker" conflicts with lib "dom".
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
declare const self: any;
const loader = new DefaultTestFileLoader();
setBaseResourcePath('../../../resources');
self.onmessage = async (ev: MessageEvent) => {
const query: string = ev.data.query;
const expectations: TestQueryWithExpectation[] = ev.data.expectations;
const debug: boolean = ev.data.debug;
Logger.globalDebugMode = debug;
const log = new Logger();
const testcases = Array.from(await loader.loadCases(parseQuery(query)));
assert(testcases.length === 1, 'worker query resulted in != 1 cases');
const testcase = testcases[0];
const [rec, result] = log.record(testcase.query.toString());
await testcase.run(rec, expectations);
self.postMessage({ query, result });
};

View File

@ -0,0 +1,44 @@
import { LogMessageWithStack } from '../../internal/logging/log_message.js';
import { TransferredTestCaseResult, LiveTestCaseResult } from '../../internal/logging/result.js';
import { TestCaseRecorder } from '../../internal/logging/test_case_recorder.js';
import { TestQueryWithExpectation } from '../../internal/query/query.js';
export class TestWorker {
private readonly debug: boolean;
private readonly worker: Worker;
private readonly resolvers = new Map<string, (result: LiveTestCaseResult) => void>();
constructor(debug: boolean) {
this.debug = debug;
const selfPath = import.meta.url;
const selfPathDir = selfPath.substring(0, selfPath.lastIndexOf('/'));
const workerPath = selfPathDir + '/test_worker-worker.js';
this.worker = new Worker(workerPath, { type: 'module' });
this.worker.onmessage = ev => {
const query: string = ev.data.query;
const result: TransferredTestCaseResult = ev.data.result;
if (result.logs) {
for (const l of result.logs) {
Object.setPrototypeOf(l, LogMessageWithStack.prototype);
}
}
this.resolvers.get(query)!(result as LiveTestCaseResult);
// MAINTENANCE_TODO(kainino0x): update the Logger with this result (or don't have a logger and
// update the entire results JSON somehow at some point).
};
}
async run(
rec: TestCaseRecorder,
query: string,
expectations: TestQueryWithExpectation[] = []
): Promise<void> {
this.worker.postMessage({ query, expectations, debug: this.debug });
const workerResult = await new Promise<LiveTestCaseResult>(resolve => {
this.resolvers.set(query, resolve);
});
rec.injectResult(workerResult);
}
}

View File

@ -0,0 +1,227 @@
/* eslint no-console: "off" */
import * as fs from 'fs';
import * as http from 'http';
import { AddressInfo } from 'net';
import { dataCache } from '../framework/data_cache.js';
import { globalTestConfig } from '../framework/test_config.js';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { prettyPrintLog } from '../internal/logging/log_message.js';
import { Logger } from '../internal/logging/logger.js';
import { LiveTestCaseResult, Status } from '../internal/logging/result.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { TestQueryWithExpectation } from '../internal/query/query.js';
import { TestTreeLeaf } from '../internal/tree.js';
import { Colors } from '../util/colors.js';
import { setGPUProvider } from '../util/navigator_gpu.js';
import sys from './helper/sys.js';
function usage(rc: number): never {
console.log(`Usage:
tools/run_${sys.type} [OPTIONS...]
Options:
--colors Enable ANSI colors in output.
--coverage Add coverage data to each result.
--data Path to the data cache directory.
--verbose Print result/log of every test as it runs.
--gpu-provider Path to node module that provides the GPU implementation.
--gpu-provider-flag Flag to set on the gpu-provider as <flag>=<value>
--unroll-const-eval-loops Unrolls loops in constant-evaluation shader execution tests
--u Flag to set on the gpu-provider as <flag>=<value>
Provides an HTTP server used for running tests via an HTTP RPC interface
To run a test, perform an HTTP GET or POST at the URL:
http://localhost:port/run?<test-name>
To shutdown the server perform an HTTP GET or POST at the URL:
http://localhost:port/terminate
`);
return sys.exit(rc);
}
interface RunResult {
// The result of the test
status: Status;
// Any additional messages printed
message: string;
// Code coverage data, if the server was started with `--coverage`
// This data is opaque (implementation defined).
coverageData?: string;
}
// The interface that exposes creation of the GPU, and optional interface to code coverage.
interface GPUProviderModule {
// @returns a GPU with the given flags
create(flags: string[]): GPU;
// An optional interface to a CodeCoverageProvider
coverage?: CodeCoverageProvider;
}
interface CodeCoverageProvider {
// Starts collecting code coverage
begin(): void;
// Ends collecting of code coverage, returning the coverage data.
// This data is opaque (implementation defined).
end(): string;
}
if (!sys.existsSync('src/common/runtime/cmdline.ts')) {
console.log('Must be run from repository root');
usage(1);
}
Colors.enabled = false;
let emitCoverage = false;
let verbose = false;
let gpuProviderModule: GPUProviderModule | undefined = undefined;
let dataPath: string | undefined = undefined;
const gpuProviderFlags: string[] = [];
for (let i = 0; i < sys.args.length; ++i) {
const a = sys.args[i];
if (a.startsWith('-')) {
if (a === '--colors') {
Colors.enabled = true;
} else if (a === '--coverage') {
emitCoverage = true;
} else if (a === '--data') {
dataPath = sys.args[++i];
} else if (a === '--gpu-provider') {
const modulePath = sys.args[++i];
gpuProviderModule = require(modulePath);
} else if (a === '--gpu-provider-flag') {
gpuProviderFlags.push(sys.args[++i]);
} else if (a === '--unroll-const-eval-loops') {
globalTestConfig.unrollConstEvalLoops = true;
} else if (a === '--help') {
usage(1);
} else if (a === '--verbose') {
verbose = true;
} else {
console.log(`unrecognised flag: ${a}`);
}
}
}
let codeCoverage: CodeCoverageProvider | undefined = undefined;
if (gpuProviderModule) {
setGPUProvider(() => gpuProviderModule!.create(gpuProviderFlags));
if (emitCoverage) {
codeCoverage = gpuProviderModule.coverage;
if (codeCoverage === undefined) {
console.error(
`--coverage specified, but the GPUProviderModule does not support code coverage.
Did you remember to build with code coverage instrumentation enabled?`
);
sys.exit(1);
}
}
}
if (dataPath !== undefined) {
dataCache.setStore({
load: (path: string) => {
return new Promise<string>((resolve, reject) => {
fs.readFile(`${dataPath}/${path}`, 'utf8', (err, data) => {
if (err !== null) {
reject(err.message);
} else {
resolve(data);
}
});
});
},
});
}
if (verbose) {
dataCache.setDebugLogger(console.log);
}
(async () => {
Logger.globalDebugMode = verbose;
const log = new Logger();
const testcases = new Map<string, TestTreeLeaf>();
async function runTestcase(
testcase: TestTreeLeaf,
expectations: TestQueryWithExpectation[] = []
): Promise<LiveTestCaseResult> {
const name = testcase.query.toString();
const [rec, res] = log.record(name);
await testcase.run(rec, expectations);
return res;
}
const server = http.createServer(
async (request: http.IncomingMessage, response: http.ServerResponse) => {
if (request.url === undefined) {
response.end('invalid url');
return;
}
const loadCasesPrefix = '/load?';
const runPrefix = '/run?';
const terminatePrefix = '/terminate';
if (request.url.startsWith(loadCasesPrefix)) {
const query = request.url.substr(loadCasesPrefix.length);
try {
const webgpuQuery = parseQuery(query);
const loader = new DefaultTestFileLoader();
for (const testcase of await loader.loadCases(webgpuQuery)) {
testcases.set(testcase.query.toString(), testcase);
}
response.statusCode = 200;
response.end();
} catch (err) {
response.statusCode = 500;
response.end(`load failed with error: ${err}\n${(err as Error).stack}`);
}
} else if (request.url.startsWith(runPrefix)) {
const name = request.url.substr(runPrefix.length);
try {
const testcase = testcases.get(name);
if (testcase) {
if (codeCoverage !== undefined) {
codeCoverage.begin();
}
const result = await runTestcase(testcase);
const coverageData = codeCoverage !== undefined ? codeCoverage.end() : undefined;
let message = '';
if (result.logs !== undefined) {
message = result.logs.map(log => prettyPrintLog(log)).join('\n');
}
const status = result.status;
const res: RunResult = { status, message, coverageData };
response.statusCode = 200;
response.end(JSON.stringify(res));
} else {
response.statusCode = 404;
response.end(`test case '${name}' not found`);
}
} catch (err) {
response.statusCode = 500;
response.end(`run failed with error: ${err}`);
}
} else if (request.url.startsWith(terminatePrefix)) {
server.close();
sys.exit(1);
} else {
response.statusCode = 404;
response.end('unhandled url request');
}
}
);
server.listen(0, () => {
const address = server.address() as AddressInfo;
console.log(`Server listening at [[${address.port}]]`);
});
})().catch(ex => {
console.error(ex.stack ?? ex.toString());
sys.exit(1);
});

View File

@ -0,0 +1,625 @@
// Implements the standalone test runner (see also: /standalone/index.html).
import { dataCache } from '../framework/data_cache.js';
import { setBaseResourcePath } from '../framework/resources.js';
import { globalTestConfig } from '../framework/test_config.js';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { Logger } from '../internal/logging/logger.js';
import { LiveTestCaseResult } from '../internal/logging/result.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { TestQueryLevel } from '../internal/query/query.js';
import { TestTreeNode, TestSubtree, TestTreeLeaf, TestTree } from '../internal/tree.js';
import { setDefaultRequestAdapterOptions } from '../util/navigator_gpu.js';
import { assert, ErrorWithExtra, unreachable } from '../util/util.js';
import { optionEnabled, optionString } from './helper/options.js';
import { TestWorker } from './helper/test_worker.js';
window.onbeforeunload = () => {
// Prompt user before reloading if there are any results
return haveSomeResults ? false : undefined;
};
let haveSomeResults = false;
// The possible options for the tests.
interface StandaloneOptions {
runnow: boolean;
worker: boolean;
debug: boolean;
unrollConstEvalLoops: boolean;
powerPreference: string;
}
// Extra per option info.
interface StandaloneOptionInfo {
description: string;
parser?: (key: string) => boolean | string;
selectValueDescriptions?: { value: string; description: string }[];
}
// Type for info for every option. This definition means adding an option
// will generate a compile time error if not extra info is provided.
type StandaloneOptionsInfos = Record<keyof StandaloneOptions, StandaloneOptionInfo>;
const optionsInfo: StandaloneOptionsInfos = {
runnow: { description: 'run immediately on load' },
worker: { description: 'run in a worker' },
debug: { description: 'show more info' },
unrollConstEvalLoops: { description: 'unroll const eval loops in WGSL' },
powerPreference: {
description: 'set default powerPreference for some tests',
parser: optionString,
selectValueDescriptions: [
{ value: '', description: 'default' },
{ value: 'low-power', description: 'low-power' },
{ value: 'high-performance', description: 'high-performance' },
],
},
};
/**
* Converts camel case to snake case.
* Examples:
* fooBar -> foo_bar
* parseHTMLFile -> parse_html_file
*/
function camelCaseToSnakeCase(id: string) {
return id
.replace(/(.)([A-Z][a-z]+)/g, '$1_$2')
.replace(/([a-z0-9])([A-Z])/g, '$1_$2')
.toLowerCase();
}
/**
* Creates a StandaloneOptions from the current URL search parameters.
*/
function getOptionsInfoFromSearchParameters(
optionsInfos: StandaloneOptionsInfos
): StandaloneOptions {
const optionValues: Record<string, boolean | string> = {};
for (const [optionName, info] of Object.entries(optionsInfos)) {
const parser = info.parser || optionEnabled;
optionValues[optionName] = parser(camelCaseToSnakeCase(optionName));
}
return (optionValues as unknown) as StandaloneOptions;
}
// This is just a cast in one place.
function optionsToRecord(options: StandaloneOptions) {
return (options as unknown) as Record<string, boolean | string>;
}
const options = getOptionsInfoFromSearchParameters(optionsInfo);
const { runnow, debug, unrollConstEvalLoops, powerPreference } = options;
globalTestConfig.unrollConstEvalLoops = unrollConstEvalLoops;
Logger.globalDebugMode = debug;
const logger = new Logger();
setBaseResourcePath('../out/resources');
const worker = options.worker ? new TestWorker(debug) : undefined;
const autoCloseOnPass = document.getElementById('autoCloseOnPass') as HTMLInputElement;
const resultsVis = document.getElementById('resultsVis')!;
const progressElem = document.getElementById('progress')!;
const progressTestNameElem = progressElem.querySelector('.progress-test-name')!;
const stopButtonElem = progressElem.querySelector('button')!;
let runDepth = 0;
let stopRequested = false;
stopButtonElem.addEventListener('click', () => {
stopRequested = true;
});
if (powerPreference) {
setDefaultRequestAdapterOptions({ powerPreference: powerPreference as GPUPowerPreference });
}
dataCache.setStore({
load: async (path: string) => {
const response = await fetch(`data/${path}`);
if (!response.ok) {
return Promise.reject(response.statusText);
}
return await response.text();
},
});
interface SubtreeResult {
pass: number;
fail: number;
warn: number;
skip: number;
total: number;
timems: number;
}
function emptySubtreeResult() {
return { pass: 0, fail: 0, warn: 0, skip: 0, total: 0, timems: 0 };
}
function mergeSubtreeResults(...results: SubtreeResult[]) {
const target = emptySubtreeResult();
for (const result of results) {
target.pass += result.pass;
target.fail += result.fail;
target.warn += result.warn;
target.skip += result.skip;
target.total += result.total;
target.timems += result.timems;
}
return target;
}
type SetCheckedRecursively = () => void;
type GenerateSubtreeHTML = (parent: HTMLElement) => SetCheckedRecursively;
type RunSubtree = () => Promise<SubtreeResult>;
interface VisualizedSubtree {
generateSubtreeHTML: GenerateSubtreeHTML;
runSubtree: RunSubtree;
}
// DOM generation
function memoize<T>(fn: () => T): () => T {
let value: T | undefined;
return () => {
if (value === undefined) {
value = fn();
}
return value;
};
}
function makeTreeNodeHTML(tree: TestTreeNode, parentLevel: TestQueryLevel): VisualizedSubtree {
let subtree: VisualizedSubtree;
if ('children' in tree) {
subtree = makeSubtreeHTML(tree, parentLevel);
} else {
subtree = makeCaseHTML(tree);
}
const generateMyHTML = (parentElement: HTMLElement) => {
const div = $('<div>').appendTo(parentElement)[0];
return subtree.generateSubtreeHTML(div);
};
return { runSubtree: subtree.runSubtree, generateSubtreeHTML: generateMyHTML };
}
function makeCaseHTML(t: TestTreeLeaf): VisualizedSubtree {
// Becomes set once the case has been run once.
let caseResult: LiveTestCaseResult | undefined;
// Becomes set once the DOM for this case exists.
let clearRenderedResult: (() => void) | undefined;
let updateRenderedResult: (() => void) | undefined;
const name = t.query.toString();
const runSubtree = async () => {
if (clearRenderedResult) clearRenderedResult();
const result: SubtreeResult = emptySubtreeResult();
progressTestNameElem.textContent = name;
haveSomeResults = true;
const [rec, res] = logger.record(name);
caseResult = res;
if (worker) {
await worker.run(rec, name);
} else {
await t.run(rec);
}
result.total++;
result.timems += caseResult.timems;
switch (caseResult.status) {
case 'pass':
result.pass++;
break;
case 'fail':
result.fail++;
break;
case 'skip':
result.skip++;
break;
case 'warn':
result.warn++;
break;
default:
unreachable();
}
if (updateRenderedResult) updateRenderedResult();
return result;
};
const generateSubtreeHTML = (div: HTMLElement) => {
div.classList.add('testcase');
const caselogs = $('<div>').addClass('testcaselogs').hide();
const [casehead, setChecked] = makeTreeNodeHeaderHTML(t, runSubtree, 2, checked => {
checked ? caselogs.show() : caselogs.hide();
});
const casetime = $('<div>').addClass('testcasetime').html('ms').appendTo(casehead);
div.appendChild(casehead);
div.appendChild(caselogs[0]);
clearRenderedResult = () => {
div.removeAttribute('data-status');
casetime.text('ms');
caselogs.empty();
};
updateRenderedResult = () => {
if (caseResult) {
div.setAttribute('data-status', caseResult.status);
casetime.text(caseResult.timems.toFixed(4) + ' ms');
if (caseResult.logs) {
caselogs.empty();
for (const l of caseResult.logs) {
const caselog = $('<div>').addClass('testcaselog').appendTo(caselogs);
$('<button>')
.addClass('testcaselogbtn')
.attr('alt', 'Log stack to console')
.attr('title', 'Log stack to console')
.appendTo(caselog)
.on('click', () => {
consoleLogError(l);
});
$('<pre>').addClass('testcaselogtext').appendTo(caselog).text(l.toJSON());
}
}
}
};
updateRenderedResult();
return setChecked;
};
return { runSubtree, generateSubtreeHTML };
}
function makeSubtreeHTML(n: TestSubtree, parentLevel: TestQueryLevel): VisualizedSubtree {
let subtreeResult: SubtreeResult = emptySubtreeResult();
// Becomes set once the DOM for this case exists.
let clearRenderedResult: (() => void) | undefined;
let updateRenderedResult: (() => void) | undefined;
const { runSubtree, generateSubtreeHTML } = makeSubtreeChildrenHTML(
n.children.values(),
n.query.level
);
const runMySubtree = async () => {
if (runDepth === 0) {
stopRequested = false;
progressElem.style.display = '';
}
if (stopRequested) {
const result = emptySubtreeResult();
result.skip = 1;
result.total = 1;
return result;
}
++runDepth;
if (clearRenderedResult) clearRenderedResult();
subtreeResult = await runSubtree();
if (updateRenderedResult) updateRenderedResult();
--runDepth;
if (runDepth === 0) {
progressElem.style.display = 'none';
}
return subtreeResult;
};
const generateMyHTML = (div: HTMLElement) => {
const subtreeHTML = $('<div>').addClass('subtreechildren');
const generateSubtree = memoize(() => generateSubtreeHTML(subtreeHTML[0]));
// Hide subtree - it's not generated yet.
subtreeHTML.hide();
const [header, setChecked] = makeTreeNodeHeaderHTML(n, runMySubtree, parentLevel, checked => {
if (checked) {
// Make sure the subtree is generated and then show it.
generateSubtree();
subtreeHTML.show();
} else {
subtreeHTML.hide();
}
});
div.classList.add('subtree');
div.classList.add(['', 'multifile', 'multitest', 'multicase'][n.query.level]);
div.appendChild(header);
div.appendChild(subtreeHTML[0]);
clearRenderedResult = () => {
div.removeAttribute('data-status');
};
updateRenderedResult = () => {
let status = '';
if (subtreeResult.pass > 0) {
status += 'pass';
}
if (subtreeResult.fail > 0) {
status += 'fail';
}
div.setAttribute('data-status', status);
if (autoCloseOnPass.checked && status === 'pass') {
div.firstElementChild!.removeAttribute('open');
}
};
updateRenderedResult();
return () => {
setChecked();
const setChildrenChecked = generateSubtree();
setChildrenChecked();
};
};
return { runSubtree: runMySubtree, generateSubtreeHTML: generateMyHTML };
}
function makeSubtreeChildrenHTML(
children: Iterable<TestTreeNode>,
parentLevel: TestQueryLevel
): VisualizedSubtree {
const childFns = Array.from(children, subtree => makeTreeNodeHTML(subtree, parentLevel));
const runMySubtree = async () => {
const results: SubtreeResult[] = [];
for (const { runSubtree } of childFns) {
results.push(await runSubtree());
}
return mergeSubtreeResults(...results);
};
const generateMyHTML = (div: HTMLElement) => {
const setChildrenChecked = Array.from(childFns, ({ generateSubtreeHTML }) =>
generateSubtreeHTML(div)
);
return () => {
for (const setChildChecked of setChildrenChecked) {
setChildChecked();
}
};
};
return { runSubtree: runMySubtree, generateSubtreeHTML: generateMyHTML };
}
function consoleLogError(e: Error | ErrorWithExtra | undefined) {
if (e === undefined) return;
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
(globalThis as any)._stack = e;
/* eslint-disable-next-line no-console */
console.log('_stack =', e);
if ('extra' in e && e.extra !== undefined) {
/* eslint-disable-next-line no-console */
console.log('_stack.extra =', e.extra);
}
}
function makeTreeNodeHeaderHTML(
n: TestTreeNode,
runSubtree: RunSubtree,
parentLevel: TestQueryLevel,
onChange: (checked: boolean) => void
): [HTMLElement, SetCheckedRecursively] {
const isLeaf = 'run' in n;
const div = $('<details>').addClass('nodeheader');
const header = $('<summary>').appendTo(div);
const setChecked = () => {
div.prop('open', true); // (does not fire onChange)
onChange(true);
};
const href = `?${worker ? 'worker&' : ''}${debug ? 'debug&' : ''}q=${n.query.toString()}`;
if (onChange) {
div.on('toggle', function (this) {
onChange((this as HTMLDetailsElement).open);
});
// Expand the shallower parts of the tree at load.
// Also expand completely within subtrees that are at the same query level
// (e.g. s:f:t,* and s:f:t,t,*).
if (n.query.level <= lastQueryLevelToExpand || n.query.level === parentLevel) {
setChecked();
}
}
const runtext = isLeaf ? 'Run case' : 'Run subtree';
$('<button>')
.addClass(isLeaf ? 'leafrun' : 'subtreerun')
.attr('alt', runtext)
.attr('title', runtext)
.on('click', () => void runSubtree())
.appendTo(header);
$('<a>')
.addClass('nodelink')
.attr('href', href)
.attr('alt', 'Open')
.attr('title', 'Open')
.appendTo(header);
if ('testCreationStack' in n && n.testCreationStack) {
$('<button>')
.addClass('testcaselogbtn')
.attr('alt', 'Log test creation stack to console')
.attr('title', 'Log test creation stack to console')
.appendTo(header)
.on('click', () => {
consoleLogError(n.testCreationStack);
});
}
const nodetitle = $('<div>').addClass('nodetitle').appendTo(header);
const nodecolumns = $('<span>').addClass('nodecolumns').appendTo(nodetitle);
{
$('<input>')
.attr('type', 'text')
.prop('readonly', true)
.addClass('nodequery')
.val(n.query.toString())
.appendTo(nodecolumns);
if (n.subtreeCounts) {
$('<span>')
.attr('title', '(Nodes with TODOs) / (Total test count)')
.text(TestTree.countsToString(n))
.appendTo(nodecolumns);
}
}
if ('description' in n && n.description) {
nodetitle.append('&nbsp;');
$('<pre>') //
.addClass('nodedescription')
.text(n.description)
.appendTo(header);
}
return [div[0], setChecked];
}
// Collapse s:f:t:* or s:f:t:c by default.
let lastQueryLevelToExpand: TestQueryLevel = 2;
type ParamValue = string | undefined | null | boolean | string[];
/**
* Takes an array of string, ParamValue and returns an array of pairs
* of [key, value] where value is a string. Converts boolean to '0' or '1'.
*/
function keyValueToPairs([k, v]: [string, ParamValue]): [string, string][] {
const key = camelCaseToSnakeCase(k);
if (typeof v === 'boolean') {
return [[key, v ? '1' : '0']];
} else if (Array.isArray(v)) {
return v.map(v => [key, v]);
} else {
return [[key, v!.toString()]];
}
}
/**
* Converts key value pairs to a search string.
* Keys will appear in order in the search string.
* Values can be undefined, null, boolean, string, or string[]
* If the value is falsy the key will not appear in the search string.
* If the value is an array the key will appear multiple times.
*
* @param params Some object with key value pairs.
* @returns a search string.
*/
function prepareParams(params: Record<string, ParamValue>): string {
const pairsArrays = Object.entries(params)
.filter(([, v]) => !!v)
.map(keyValueToPairs);
const pairs = pairsArrays.flat();
return new URLSearchParams(pairs).toString();
}
void (async () => {
const loader = new DefaultTestFileLoader();
// MAINTENANCE_TODO: start populating page before waiting for everything to load?
const qs = new URLSearchParams(window.location.search).getAll('q');
if (qs.length === 0) {
qs.push('webgpu:*');
}
// Update the URL bar to match the exact current options.
const updateURLWithCurrentOptions = () => {
const search = prepareParams(optionsToRecord(options));
let url = `${window.location.origin}${window.location.pathname}`;
// Add in q separately to avoid escaping punctuation marks.
url += `?${search}${search ? '&' : ''}${qs.map(q => 'q=' + q).join('&')}`;
window.history.replaceState(null, '', url.toString());
};
updateURLWithCurrentOptions();
const addOptionsToPage = (options: StandaloneOptions, optionsInfos: StandaloneOptionsInfos) => {
const optionsElem = $('table#options>tbody')[0];
const optionValues = optionsToRecord(options);
const createCheckbox = (optionName: string) => {
return $(`<input>`)
.attr('type', 'checkbox')
.prop('checked', optionValues[optionName] as boolean)
.on('change', function () {
optionValues[optionName] = (this as HTMLInputElement).checked;
updateURLWithCurrentOptions();
});
};
const createSelect = (optionName: string, info: StandaloneOptionInfo) => {
const select = $('<select>').on('change', function () {
optionValues[optionName] = (this as HTMLInputElement).value;
updateURLWithCurrentOptions();
});
const currentValue = optionValues[optionName];
for (const { value, description } of info.selectValueDescriptions!) {
$('<option>')
.text(description)
.val(value)
.prop('selected', value === currentValue)
.appendTo(select);
}
return select;
};
for (const [optionName, info] of Object.entries(optionsInfos)) {
const input =
typeof optionValues[optionName] === 'boolean'
? createCheckbox(optionName)
: createSelect(optionName, info);
$('<tr>')
.append($('<td>').append(input))
.append($('<td>').text(camelCaseToSnakeCase(optionName)))
.append($('<td>').text(info.description))
.appendTo(optionsElem);
}
};
addOptionsToPage(options, optionsInfo);
assert(qs.length === 1, 'currently, there must be exactly one ?q=');
const rootQuery = parseQuery(qs[0]);
if (rootQuery.level > lastQueryLevelToExpand) {
lastQueryLevelToExpand = rootQuery.level;
}
loader.addEventListener('import', ev => {
$('#info')[0].textContent = `loading: ${ev.data.url}`;
});
loader.addEventListener('finish', () => {
$('#info')[0].textContent = '';
});
const tree = await loader.loadTree(rootQuery);
tree.dissolveSingleChildTrees();
const { runSubtree, generateSubtreeHTML } = makeSubtreeHTML(tree.root, 1);
const setTreeCheckedRecursively = generateSubtreeHTML(resultsVis);
document.getElementById('expandall')!.addEventListener('click', () => {
setTreeCheckedRecursively();
});
document.getElementById('copyResultsJSON')!.addEventListener('click', () => {
void navigator.clipboard.writeText(logger.asJSON(2));
});
if (runnow) {
void runSubtree();
}
})();

View File

@ -0,0 +1,83 @@
// Implements the wpt-embedded test runner (see also: wpt/cts.https.html).
import { globalTestConfig } from '../framework/test_config.js';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { prettyPrintLog } from '../internal/logging/log_message.js';
import { Logger } from '../internal/logging/logger.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { parseExpectationsForTestQuery, relativeQueryString } from '../internal/query/query.js';
import { assert } from '../util/util.js';
import { optionEnabled } from './helper/options.js';
import { TestWorker } from './helper/test_worker.js';
// testharness.js API (https://web-platform-tests.org/writing-tests/testharness-api.html)
declare interface WptTestObject {
step(f: () => void): void;
done(): void;
}
declare function setup(properties: { explicit_done?: boolean }): void;
declare function promise_test(f: (t: WptTestObject) => Promise<void>, name: string): void;
declare function done(): void;
declare function assert_unreached(description: string): void;
declare const loadWebGPUExpectations: Promise<unknown> | undefined;
declare const shouldWebGPUCTSFailOnWarnings: Promise<boolean> | undefined;
setup({
// It's convenient for us to asynchronously add tests to the page. Prevent done() from being
// called implicitly when the page is finished loading.
explicit_done: true,
});
void (async () => {
const workerEnabled = optionEnabled('worker');
const worker = workerEnabled ? new TestWorker(false) : undefined;
globalTestConfig.unrollConstEvalLoops = optionEnabled('unroll_const_eval_loops');
const failOnWarnings =
typeof shouldWebGPUCTSFailOnWarnings !== 'undefined' && (await shouldWebGPUCTSFailOnWarnings);
const loader = new DefaultTestFileLoader();
const qs = new URLSearchParams(window.location.search).getAll('q');
assert(qs.length === 1, 'currently, there must be exactly one ?q=');
const filterQuery = parseQuery(qs[0]);
const testcases = await loader.loadCases(filterQuery);
const expectations =
typeof loadWebGPUExpectations !== 'undefined'
? parseExpectationsForTestQuery(
await loadWebGPUExpectations,
filterQuery,
new URL(window.location.href)
)
: [];
const log = new Logger();
for (const testcase of testcases) {
const name = testcase.query.toString();
// For brevity, display the case name "relative" to the ?q= path.
const shortName = relativeQueryString(filterQuery, testcase.query) || '(case)';
const wpt_fn = async () => {
const [rec, res] = log.record(name);
if (worker) {
await worker.run(rec, name, expectations);
} else {
await testcase.run(rec, expectations);
}
// Unfortunately, it seems not possible to surface any logs for warn/skip.
if (res.status === 'fail' || (res.status === 'warn' && failOnWarnings)) {
const logs = (res.logs ?? []).map(prettyPrintLog);
assert_unreached('\n' + logs.join('\n') + '\n');
}
};
promise_test(wpt_fn, shortName);
}
done();
})();

View File

@ -0,0 +1,32 @@
<!--
This test suite is built from the TypeScript sources at:
https://github.com/gpuweb/cts
If you are debugging WebGPU conformance tests, it's highly recommended that
you use the standalone interactive runner in that repository, which
provides tools for easier debugging and editing (source maps, debug
logging, warn/skip functionality, etc.)
NOTE:
The WPT version of this file is generated with *one variant per test spec
file*. If your harness needs more fine-grained suppressions, you'll need to
generate your own variants list from your suppression list.
See `tools/gen_wpt_cts_html` to do this.
When run under browser CI, the original cts.https.html should be skipped, and
this alternate version should be run instead, under a non-exported WPT test
directory (e.g. Chromium's wpt_internal).
-->
<!doctype html>
<title>WebGPU CTS</title>
<meta charset=utf-8>
<link rel=help href='https://gpuweb.github.io/gpuweb/'>
<script src=/resources/testharness.js></script>
<script src=/resources/testharnessreport.js></script>
<script>
const loadWebGPUExpectations = undefined;
const shouldWebGPUCTSFailOnWarnings = undefined;
</script>
<script type=module src=/webgpu/common/runtime/wpt.js></script>

View File

@ -0,0 +1,9 @@
{
"rules": {
"no-console": "off",
"no-process-exit": "off",
"node/no-unpublished-import": "off",
"node/no-unpublished-require": "off",
"@typescript-eslint/no-var-requires": "off"
}
}

View File

@ -0,0 +1,138 @@
import * as fs from 'fs';
import * as process from 'process';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { Ordering, compareQueries } from '../internal/query/compare.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { TestQuery, TestQueryMultiFile } from '../internal/query/query.js';
import { loadTreeForQuery, TestTree } from '../internal/tree.js';
import { StacklessError } from '../internal/util.js';
import { assert } from '../util/util.js';
function usage(rc: number): void {
console.error('Usage:');
console.error(' tools/checklist FILE');
console.error(' tools/checklist my/list.txt');
process.exit(rc);
}
if (process.argv.length === 2) usage(0);
if (process.argv.length !== 3) usage(1);
type QueryInSuite = { readonly query: TestQuery; readonly done: boolean };
type QueriesInSuite = QueryInSuite[];
type QueriesBySuite = Map<string, QueriesInSuite>;
async function loadQueryListFromTextFile(filename: string): Promise<QueriesBySuite> {
const lines = (await fs.promises.readFile(filename, 'utf8')).split(/\r?\n/);
const allQueries = lines
.filter(l => l)
.map(l => {
const [doneStr, q] = l.split(/\s+/);
assert(doneStr === 'DONE' || doneStr === 'TODO', 'first column must be DONE or TODO');
return { query: parseQuery(q), done: doneStr === 'DONE' } as const;
});
const queriesBySuite: QueriesBySuite = new Map();
for (const q of allQueries) {
let suiteQueries = queriesBySuite.get(q.query.suite);
if (suiteQueries === undefined) {
suiteQueries = [];
queriesBySuite.set(q.query.suite, suiteQueries);
}
suiteQueries.push(q);
}
return queriesBySuite;
}
function checkForOverlappingQueries(queries: QueriesInSuite): void {
for (let i1 = 0; i1 < queries.length; ++i1) {
for (let i2 = i1 + 1; i2 < queries.length; ++i2) {
const q1 = queries[i1].query;
const q2 = queries[i2].query;
if (compareQueries(q1, q2) !== Ordering.Unordered) {
console.log(` FYI, the following checklist items overlap:\n ${q1}\n ${q2}`);
}
}
}
}
function checkForUnmatchedSubtreesAndDoneness(
tree: TestTree,
matchQueries: QueriesInSuite
): number {
let subtreeCount = 0;
const unmatchedSubtrees: TestQuery[] = [];
const overbroadMatches: [TestQuery, TestQuery][] = [];
const donenessMismatches: QueryInSuite[] = [];
const alwaysExpandThroughLevel = 1; // expand to, at minimum, every file.
for (const subtree of tree.iterateCollapsedNodes({
includeIntermediateNodes: true,
includeEmptySubtrees: true,
alwaysExpandThroughLevel,
})) {
subtreeCount++;
const subtreeDone = !subtree.subtreeCounts?.nodesWithTODO;
let subtreeMatched = false;
for (const q of matchQueries) {
const comparison = compareQueries(q.query, subtree.query);
if (comparison !== Ordering.Unordered) subtreeMatched = true;
if (comparison === Ordering.StrictSubset) continue;
if (comparison === Ordering.StrictSuperset) overbroadMatches.push([q.query, subtree.query]);
if (comparison === Ordering.Equal && q.done !== subtreeDone) donenessMismatches.push(q);
}
if (!subtreeMatched) unmatchedSubtrees.push(subtree.query);
}
if (overbroadMatches.length) {
// (note, this doesn't show ALL multi-test queries - just ones that actually match any .spec.ts)
console.log(` FYI, the following checklist items were broader than one file:`);
for (const [q, collapsedSubtree] of overbroadMatches) {
console.log(` ${q} > ${collapsedSubtree}`);
}
}
if (unmatchedSubtrees.length) {
throw new StacklessError(`Found unmatched tests:\n ${unmatchedSubtrees.join('\n ')}`);
}
if (donenessMismatches.length) {
throw new StacklessError(
'Found done/todo mismatches:\n ' +
donenessMismatches
.map(q => `marked ${q.done ? 'DONE, but is TODO' : 'TODO, but is DONE'}: ${q.query}`)
.join('\n ')
);
}
return subtreeCount;
}
(async () => {
console.log('Loading queries...');
const queriesBySuite = await loadQueryListFromTextFile(process.argv[2]);
console.log(' Found suites: ' + Array.from(queriesBySuite.keys()).join(' '));
const loader = new DefaultTestFileLoader();
for (const [suite, queriesInSuite] of queriesBySuite.entries()) {
console.log(`Suite "${suite}":`);
console.log(` Checking overlaps between ${queriesInSuite.length} checklist items...`);
checkForOverlappingQueries(queriesInSuite);
const suiteQuery = new TestQueryMultiFile(suite, []);
console.log(` Loading tree ${suiteQuery}...`);
const tree = await loadTreeForQuery(
loader,
suiteQuery,
queriesInSuite.map(q => q.query)
);
console.log(' Found no invalid queries in the checklist. Checking for unmatched tests...');
const subtreeCount = checkForUnmatchedSubtreesAndDoneness(tree, queriesInSuite);
console.log(` No unmatched tests or done/todo mismatches among ${subtreeCount} subtrees!`);
}
console.log(`Checklist looks good!`);
})().catch(ex => {
console.log(ex.stack ?? ex.toString());
process.exit(1);
});

View File

@ -0,0 +1,102 @@
// Node can look at the filesystem, but JS in the browser can't.
// This crawls the file tree under src/suites/${suite} to generate a (non-hierarchical) static
// listing file that can then be used in the browser to load the modules containing the tests.
import * as fs from 'fs';
import * as path from 'path';
import { SpecFile } from '../internal/file_loader.js';
import { validQueryPart } from '../internal/query/validQueryPart.js';
import { TestSuiteListingEntry, TestSuiteListing } from '../internal/test_suite_listing.js';
import { assert, unreachable } from '../util/util.js';
const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
async function crawlFilesRecursively(dir: string): Promise<string[]> {
const subpathInfo = await Promise.all(
(await fs.promises.readdir(dir)).map(async d => {
const p = path.join(dir, d);
const stats = await fs.promises.stat(p);
return {
path: p,
isDirectory: stats.isDirectory(),
isFile: stats.isFile(),
};
})
);
const files = subpathInfo
.filter(
i =>
i.isFile &&
(i.path.endsWith(specFileSuffix) ||
i.path.endsWith(`${path.sep}README.txt`) ||
i.path === 'README.txt')
)
.map(i => i.path);
return files.concat(
await subpathInfo
.filter(i => i.isDirectory)
.map(i => crawlFilesRecursively(i.path))
.reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
);
}
export async function crawl(
suiteDir: string,
validate: boolean = true
): Promise<TestSuiteListingEntry[]> {
if (!fs.existsSync(suiteDir)) {
console.error(`Could not find ${suiteDir}`);
process.exit(1);
}
// Crawl files and convert paths to be POSIX-style, relative to suiteDir.
const filesToEnumerate = (await crawlFilesRecursively(suiteDir))
.map(f => path.relative(suiteDir, f).replace(/\\/g, '/'))
.sort();
const entries: TestSuiteListingEntry[] = [];
for (const file of filesToEnumerate) {
// |file| is the suite-relative file path.
if (file.endsWith(specFileSuffix)) {
const filepathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
const suite = path.basename(suiteDir);
if (validate) {
const filename = `../../${suite}/${filepathWithoutExtension}.spec.js`;
assert(!process.env.STANDALONE_DEV_SERVER);
const mod = (await import(filename)) as SpecFile;
assert(mod.description !== undefined, 'Test spec file missing description: ' + filename);
assert(mod.g !== undefined, 'Test spec file missing TestGroup definition: ' + filename);
mod.g.validate();
}
const pathSegments = filepathWithoutExtension.split('/');
for (const p of pathSegments) {
assert(validQueryPart.test(p), `Invalid directory name ${p}; must match ${validQueryPart}`);
}
entries.push({ file: pathSegments });
} else if (path.basename(file) === 'README.txt') {
const dirname = path.dirname(file);
const readme = fs.readFileSync(path.join(suiteDir, file), 'utf8').trim();
const pathSegments = dirname !== '.' ? dirname.split('/') : [];
entries.push({ file: pathSegments, readme });
} else {
unreachable(`Matched an unrecognized filename ${file}`);
}
}
return entries;
}
export function makeListing(filename: string): Promise<TestSuiteListing> {
// Don't validate. This path is only used for the dev server and running tests with Node.
// Validation is done for listing generation and presubmit.
return crawl(path.dirname(filename), false);
}

View File

@ -0,0 +1,189 @@
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import * as babel from '@babel/core';
import * as chokidar from 'chokidar';
import * as express from 'express';
import * as morgan from 'morgan';
import * as portfinder from 'portfinder';
import * as serveIndex from 'serve-index';
import { makeListing } from './crawl.js';
// Make sure that makeListing doesn't cache imported spec files. See crawl().
process.env.STANDALONE_DEV_SERVER = '1';
const srcDir = path.resolve(__dirname, '../../');
// Import the project's babel.config.js. We'll use the same config for the runtime compiler.
const babelConfig = {
...require(path.resolve(srcDir, '../babel.config.js'))({
cache: () => {
/* not used */
},
}),
sourceMaps: 'inline',
};
// Caches for the generated listing file and compiled TS sources to speed up reloads.
// Keyed by suite name
const listingCache = new Map<string, string>();
// Keyed by the path to the .ts file, without src/
const compileCache = new Map<string, string>();
console.log('Watching changes in', srcDir);
const watcher = chokidar.watch(srcDir, {
persistent: true,
});
/**
* Handler to dirty the compile cache for changed .ts files.
*/
function dirtyCompileCache(absPath: string, stats?: fs.Stats) {
const relPath = path.relative(srcDir, absPath);
if ((stats === undefined || stats.isFile()) && relPath.endsWith('.ts')) {
const tsUrl = relPath;
if (compileCache.has(tsUrl)) {
console.debug('Dirtying compile cache', tsUrl);
}
compileCache.delete(tsUrl);
}
}
/**
* Handler to dirty the listing cache for:
* - Directory changes
* - .spec.ts changes
* - README.txt changes
* Also dirties the compile cache for changed files.
*/
function dirtyListingAndCompileCache(absPath: string, stats?: fs.Stats) {
const relPath = path.relative(srcDir, absPath);
const segments = relPath.split(path.sep);
// The listing changes if the directories change, or if a .spec.ts file is added/removed.
const listingChange =
// A directory or a file with no extension that we can't stat.
// (stat doesn't work for deletions)
((path.extname(relPath) === '' && (stats === undefined || !stats.isFile())) ||
// A spec file
relPath.endsWith('.spec.ts') ||
// A README.txt
path.basename(relPath, 'txt') === 'README') &&
segments.length > 0;
if (listingChange) {
const suite = segments[0];
if (listingCache.has(suite)) {
console.debug('Dirtying listing cache', suite);
}
listingCache.delete(suite);
}
dirtyCompileCache(absPath, stats);
}
watcher.on('add', dirtyListingAndCompileCache);
watcher.on('unlink', dirtyListingAndCompileCache);
watcher.on('addDir', dirtyListingAndCompileCache);
watcher.on('unlinkDir', dirtyListingAndCompileCache);
watcher.on('change', dirtyCompileCache);
const app = express();
// Send Chrome Origin Trial tokens
app.use((req, res, next) => {
res.header('Origin-Trial', [
// Token for http://localhost:8080
'AvyDIV+RJoYs8fn3W6kIrBhWw0te0klraoz04mw/nPb8VTus3w5HCdy+vXqsSzomIH745CT6B5j1naHgWqt/tw8AAABJeyJvcmlnaW4iOiJodHRwOi8vbG9jYWxob3N0OjgwODAiLCJmZWF0dXJlIjoiV2ViR1BVIiwiZXhwaXJ5IjoxNjYzNzE4Mzk5fQ==',
]);
next();
});
// Set up logging
app.use(morgan('dev'));
// Serve the standalone runner directory
app.use('/standalone', express.static(path.resolve(srcDir, '../standalone')));
// Add out-wpt/ build dir for convenience
app.use('/out-wpt', express.static(path.resolve(srcDir, '../out-wpt')));
app.use('/docs/tsdoc', express.static(path.resolve(srcDir, '../docs/tsdoc')));
// Serve a suite's listing.js file by crawling the filesystem for all tests.
app.get('/out/:suite/listing.js', async (req, res, next) => {
const suite = req.params['suite'];
if (listingCache.has(suite)) {
res.setHeader('Content-Type', 'application/javascript');
res.send(listingCache.get(suite));
return;
}
try {
const listing = await makeListing(path.resolve(srcDir, suite, 'listing.ts'));
const result = `export const listing = ${JSON.stringify(listing, undefined, 2)}`;
listingCache.set(suite, result);
res.setHeader('Content-Type', 'application/javascript');
res.send(result);
} catch (err) {
next(err);
}
});
// Serve all other .js files by fetching the source .ts file and compiling it.
app.get('/out/**/*.js', async (req, res, next) => {
const jsUrl = path.relative('/out', req.url);
const tsUrl = jsUrl.replace(/\.js$/, '.ts');
if (compileCache.has(tsUrl)) {
res.setHeader('Content-Type', 'application/javascript');
res.send(compileCache.get(tsUrl));
return;
}
let absPath = path.join(srcDir, tsUrl);
if (!fs.existsSync(absPath)) {
// The .ts file doesn't exist. Try .js file in case this is a .js/.d.ts pair.
absPath = path.join(srcDir, jsUrl);
}
try {
const result = await babel.transformFileAsync(absPath, babelConfig);
if (result && result.code) {
compileCache.set(tsUrl, result.code);
res.setHeader('Content-Type', 'application/javascript');
res.send(result.code);
} else {
throw new Error(`Failed compile ${tsUrl}.`);
}
} catch (err) {
next(err);
}
});
const host = '0.0.0.0';
const port = 8080;
// Find an available port, starting at 8080.
portfinder.getPort({ host, port }, (err, port) => {
if (err) {
throw err;
}
watcher.on('ready', () => {
// Listen on the available port.
app.listen(port, host, () => {
console.log('Standalone test runner running at:');
for (const iface of Object.values(os.networkInterfaces())) {
for (const details of iface || []) {
if (details.family === 'IPv4') {
console.log(` http://${details.address}:${port}/standalone/`);
}
}
}
});
});
});
// Serve everything else (not .js) as static, and directories as directory listings.
app.use('/out', serveIndex(path.resolve(srcDir, '../src')));
app.use('/out', express.static(path.resolve(srcDir, '../src')));

View File

@ -0,0 +1,144 @@
import * as fs from 'fs';
import * as path from 'path';
import * as process from 'process';
import { Cacheable, dataCache, setIsBuildingDataCache } from '../framework/data_cache.js';
function usage(rc: number): void {
console.error(`Usage: tools/gen_cache [options] [OUT_DIR] [SUITE_DIRS...]
For each suite in SUITE_DIRS, pre-compute data that is expensive to generate
at runtime and store it under OUT_DIR. If the data file is found then the
DataCache will load this instead of building the expensive data at CTS runtime.
Options:
--help Print this message and exit.
--list Print the list of output files without writing them.
`);
process.exit(rc);
}
let mode: 'emit' | 'list' = 'emit';
const nonFlagsArgs: string[] = [];
for (const a of process.argv) {
if (a.startsWith('-')) {
if (a === '--list') {
mode = 'list';
} else if (a === '--help') {
usage(0);
} else {
console.log('unrecognized flag: ', a);
usage(1);
}
} else {
nonFlagsArgs.push(a);
}
}
if (nonFlagsArgs.length < 4) {
usage(0);
}
const outRootDir = nonFlagsArgs[2];
dataCache.setStore({
load: (path: string) => {
return new Promise<string>((resolve, reject) => {
fs.readFile(`data/${path}`, 'utf8', (err, data) => {
if (err !== null) {
reject(err.message);
} else {
resolve(data);
}
});
});
},
});
setIsBuildingDataCache();
void (async () => {
for (const suiteDir of nonFlagsArgs.slice(3)) {
await build(suiteDir);
}
})();
const specFileSuffix = __filename.endsWith('.ts') ? '.spec.ts' : '.spec.js';
async function crawlFilesRecursively(dir: string): Promise<string[]> {
const subpathInfo = await Promise.all(
(await fs.promises.readdir(dir)).map(async d => {
const p = path.join(dir, d);
const stats = await fs.promises.stat(p);
return {
path: p,
isDirectory: stats.isDirectory(),
isFile: stats.isFile(),
};
})
);
const files = subpathInfo
.filter(i => i.isFile && i.path.endsWith(specFileSuffix))
.map(i => i.path);
return files.concat(
await subpathInfo
.filter(i => i.isDirectory)
.map(i => crawlFilesRecursively(i.path))
.reduce(async (a, b) => (await a).concat(await b), Promise.resolve([]))
);
}
async function build(suiteDir: string) {
if (!fs.existsSync(suiteDir)) {
console.error(`Could not find ${suiteDir}`);
process.exit(1);
}
// Crawl files and convert paths to be POSIX-style, relative to suiteDir.
const filesToEnumerate = (await crawlFilesRecursively(suiteDir)).sort();
const cacheablePathToTS = new Map<string, string>();
for (const file of filesToEnumerate) {
if (file.endsWith(specFileSuffix)) {
const pathWithoutExtension = file.substring(0, file.length - specFileSuffix.length);
const mod = await import(`../../../${pathWithoutExtension}.spec.js`);
if (mod.d?.serialize !== undefined) {
const cacheable = mod.d as Cacheable<unknown>;
{
// Check for collisions
const existing = cacheablePathToTS.get(cacheable.path);
if (existing !== undefined) {
console.error(
`error: Cacheable '${cacheable.path}' is emitted by both:
'${existing}'
and
'${file}'`
);
process.exit(1);
}
cacheablePathToTS.set(cacheable.path, file);
}
const outPath = `${outRootDir}/data/${cacheable.path}`;
switch (mode) {
case 'emit': {
const data = await cacheable.build();
const serialized = cacheable.serialize(data);
fs.mkdirSync(path.dirname(outPath), { recursive: true });
fs.writeFileSync(outPath, serialized);
break;
}
case 'list': {
console.log(outPath);
break;
}
}
}
}
}
}

View File

@ -0,0 +1,64 @@
import * as fs from 'fs';
import * as path from 'path';
import * as process from 'process';
import { crawl } from './crawl.js';
function usage(rc: number): void {
console.error(`Usage: tools/gen_listings [options] [OUT_DIR] [SUITE_DIRS...]
For each suite in SUITE_DIRS, generate listings and write each listing.js
into OUT_DIR/{suite}/listing.js. Example:
tools/gen_listings out/ src/unittests/ src/webgpu/
Options:
--help Print this message and exit.
--no-validate Whether to validate test modules while crawling.
`);
process.exit(rc);
}
const argv = process.argv;
if (argv.indexOf('--help') !== -1) {
usage(0);
}
let validate = true;
{
const i = argv.indexOf('--no-validate');
if (i !== -1) {
validate = false;
argv.splice(i, 1);
}
}
if (argv.length < 4) {
usage(0);
}
const myself = 'src/common/tools/gen_listings.ts';
const outDir = argv[2];
void (async () => {
for (const suiteDir of argv.slice(3)) {
const listing = await crawl(suiteDir, validate);
const suite = path.basename(suiteDir);
const outFile = path.normalize(path.join(outDir, `${suite}/listing.js`));
fs.mkdirSync(path.join(outDir, suite), { recursive: true });
fs.writeFileSync(
outFile,
`\
// AUTO-GENERATED - DO NOT EDIT. See ${myself}.
export const listing = ${JSON.stringify(listing, undefined, 2)};
`
);
try {
fs.unlinkSync(outFile + '.map');
} catch (ex) {
// ignore if file didn't exist
}
}
})();

View File

@ -0,0 +1,122 @@
import { promises as fs } from 'fs';
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { TestQueryMultiFile } from '../internal/query/query.js';
import { assert } from '../util/util.js';
function printUsageAndExit(rc: number): void {
console.error(`\
Usage:
tools/gen_wpt_cts_html OUTPUT_FILE TEMPLATE_FILE [ARGUMENTS_PREFIXES_FILE EXPECTATIONS_FILE EXPECTATIONS_PREFIX [SUITE]]
tools/gen_wpt_cts_html out-wpt/cts.https.html templates/cts.https.html
tools/gen_wpt_cts_html my/path/to/cts.https.html templates/cts.https.html arguments.txt myexpectations.txt 'path/to/cts.https.html' cts
where arguments.txt is a file containing a list of arguments prefixes to both generate and expect
in the expectations. The entire variant list generation runs *once per prefix*, so this
multiplies the size of the variant list.
?worker=0&q=
?worker=1&q=
and myexpectations.txt is a file containing a list of WPT paths to suppress, e.g.:
path/to/cts.https.html?worker=0&q=webgpu:a/foo:bar={"x":1}
path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":1}
path/to/cts.https.html?worker=1&q=webgpu:a/foo:bar={"x":3}
`);
process.exit(rc);
}
if (process.argv.length !== 4 && process.argv.length !== 7 && process.argv.length !== 8) {
printUsageAndExit(0);
}
const [
,
,
outFile,
templateFile,
argsPrefixesFile,
expectationsFile,
expectationsPrefix,
suite = 'webgpu',
] = process.argv;
(async () => {
let argsPrefixes = [''];
let expectationLines = new Set<string>();
if (process.argv.length >= 7) {
// Prefixes sorted from longest to shortest
const argsPrefixesFromFile = (await fs.readFile(argsPrefixesFile, 'utf8'))
.split(/\r?\n/)
.filter(a => a.length)
.sort((a, b) => b.length - a.length);
if (argsPrefixesFromFile.length) argsPrefixes = argsPrefixesFromFile;
expectationLines = new Set(
(await fs.readFile(expectationsFile, 'utf8')).split(/\r?\n/).filter(l => l.length)
);
}
const expectations: Map<string, string[]> = new Map();
for (const prefix of argsPrefixes) {
expectations.set(prefix, []);
}
expLoop: for (const exp of expectationLines) {
// Take each expectation for the longest prefix it matches.
for (const argsPrefix of argsPrefixes) {
const prefix = expectationsPrefix + argsPrefix;
if (exp.startsWith(prefix)) {
expectations.get(argsPrefix)!.push(exp.substring(prefix.length));
continue expLoop;
}
}
console.log('note: ignored expectation: ' + exp);
}
const loader = new DefaultTestFileLoader();
const lines: Array<string | undefined> = [];
for (const prefix of argsPrefixes) {
const rootQuery = new TestQueryMultiFile(suite, []);
const tree = await loader.loadTree(rootQuery, expectations.get(prefix));
lines.push(undefined); // output blank line between prefixes
const alwaysExpandThroughLevel = 2; // expand to, at minimum, every test.
for (const { query } of tree.iterateCollapsedNodes({ alwaysExpandThroughLevel })) {
const urlQueryString = prefix + query.toString(); // "?worker=0&q=..."
// Check for a safe-ish path length limit. Filename must be <= 255, and on Windows the whole
// path must be <= 259. Leave room for e.g.:
// 'c:\b\s\w\xxxxxxxx\layout-test-results\external\wpt\webgpu\cts_worker=0_q=...-actual.txt'
assert(
urlQueryString.length < 185,
'Generated test variant would produce too-long -actual.txt filename. \
Try broadening suppressions to avoid long test variant names. ' +
urlQueryString
);
lines.push(urlQueryString);
}
}
await generateFile(lines);
})().catch(ex => {
console.log(ex.stack ?? ex.toString());
process.exit(1);
});
async function generateFile(lines: Array<string | undefined>): Promise<void> {
let result = '';
result += '<!-- AUTO-GENERATED - DO NOT EDIT. See WebGPU CTS: tools/gen_wpt_cts_html. -->\n';
result += await fs.readFile(templateFile, 'utf8');
for (const line of lines) {
if (line === undefined) {
result += '\n';
} else {
result += `<meta name=variant content='${line}'>\n`;
}
}
await fs.writeFile(outFile, result);
}

View File

@ -0,0 +1,58 @@
import * as fs from 'fs';
import { Page } from 'playwright-core';
import { PNG } from 'pngjs';
import { screenshot, WindowInfo } from 'screenshot-ftw';
// eslint-disable-next-line ban/ban
const waitMS = (ms: number) => new Promise(resolve => setTimeout(resolve, ms));
export function readPng(filename: string) {
const data = fs.readFileSync(filename);
return PNG.sync.read(data);
}
export function writePng(filename: string, width: number, height: number, data: Buffer) {
const png = new PNG({ colorType: 6, width, height });
for (let i = 0; i < data.byteLength; ++i) {
png.data[i] = data[i];
}
const buffer = PNG.sync.write(png);
fs.writeFileSync(filename, buffer);
}
export class ScreenshotManager {
window?: WindowInfo;
async init(page: Page) {
// set the title to some random number so we can find the window by title
const title: string = await page.evaluate(() => {
const title = `t-${Math.random()}`;
document.title = title;
return title;
});
// wait for the window to show up
let window;
for (let i = 0; !window && i < 100; ++i) {
await waitMS(50);
const windows = await screenshot.getWindows();
window = windows.find(window => window.title.includes(title));
}
if (!window) {
throw Error(`could not find window: ${title}`);
}
this.window = window;
}
async takeScreenshot(page: Page, screenshotName: string) {
// await page.screenshot({ path: screenshotName });
// we need to set the url and title since the screenshot will include the chrome
await page.evaluate(async () => {
document.title = 'screenshot';
window.history.replaceState({}, '', '/screenshot');
});
await screenshot.captureWindowById(screenshotName, this.window!.id);
}
}

View File

@ -0,0 +1,19 @@
import { DefaultTestFileLoader } from '../internal/file_loader.js';
import { parseQuery } from '../internal/query/parseQuery.js';
import { assert } from '../util/util.js';
void (async () => {
for (const suite of ['unittests', 'webgpu']) {
const loader = new DefaultTestFileLoader();
const filterQuery = parseQuery(`${suite}:*`);
const testcases = await loader.loadCases(filterQuery);
for (const testcase of testcases) {
const name = testcase.query.toString();
const maxLength = 375;
assert(
name.length <= maxLength,
`Testcase ${name} is too long. Max length is ${maxLength} characters. Please shorten names or reduce parameters.`
);
}
}
})();

View File

@ -0,0 +1,446 @@
import * as fs from 'fs';
import * as path from 'path';
import { chromium, firefox, webkit, Page, Browser } from 'playwright-core';
import { ScreenshotManager, readPng, writePng } from './image_utils.js';
declare function wptRefTestPageReady(): boolean;
declare function wptRefTestGetTimeout(): boolean;
const verbose = !!process.env.VERBOSE;
const kRefTestsBaseURL = 'http://localhost:8080/out/webgpu/web_platform/reftests';
const kRefTestsPath = 'src/webgpu/web_platform/reftests';
const kScreenshotPath = 'out-wpt-reftest-screenshots';
// note: technically we should use an HTML parser to find this to deal with whitespace
// attribute order, quotes, entities, etc but since we control the test source we can just
// make sure they match
const kRefLinkRE = /<link\s+rel="match"\s+href="(.*?)"/;
const kRefWaitClassRE = /class="reftest-wait"/;
const kFuzzy = /<meta\s+name="?fuzzy"?\s+content="(.*?)">/;
function printUsage() {
console.log(`
run_wpt_ref_tests path-to-browser-executable [ref-test-name]
where ref-test-name is just a simple check for the test including the given string.
If not passed all ref tests are run
MacOS Chrome Example:
node tools/run_wpt_ref_tests /Applications/Google\\ Chrome\\ Canary.app/Contents/MacOS/Google\\ Chrome\\ Canary
`);
}
// Get all of filenames that end with '.html'
function getRefTestNames(refTestPath: string) {
return fs.readdirSync(refTestPath).filter(name => name.endsWith('.html'));
}
// Given a regex with one capture, return it or the empty string if no match.
function getRegexMatchCapture(re: RegExp, content: string) {
const m = re.exec(content);
return m ? m[1] : '';
}
type FileInfo = {
content: string;
refLink: string;
refWait: boolean;
fuzzy: string;
};
function readHTMLFile(filename: string): FileInfo {
const content = fs.readFileSync(filename, { encoding: 'utf8' });
return {
content,
refLink: getRegexMatchCapture(kRefLinkRE, content),
refWait: kRefWaitClassRE.test(content),
fuzzy: getRegexMatchCapture(kFuzzy, content),
};
}
/**
* This is workaround for a bug in Chrome. The bug is when in emulation mode
* Chrome lets you set a devicePixelRatio but Chrome still renders in the
* actual devicePixelRatio, at least on MacOS.
* So, we compute the ratio and then use that.
*/
async function getComputedDevicePixelRatio(browser: Browser): Promise<number> {
const context = await browser.newContext();
const page = await context.newPage();
await page.goto('data:text/html,<html></html>');
await page.waitForLoadState('networkidle');
const devicePixelRatio = await page.evaluate(() => {
let resolve: (v: number) => void;
const promise = new Promise(_resolve => (resolve = _resolve));
const observer = new ResizeObserver(entries => {
const devicePixelWidth = entries[0].devicePixelContentBoxSize[0].inlineSize;
const clientWidth = entries[0].target.clientWidth;
const devicePixelRatio = devicePixelWidth / clientWidth;
resolve(devicePixelRatio);
});
observer.observe(document.documentElement);
return promise;
});
await page.close();
await context.close();
return devicePixelRatio as number;
}
// Note: If possible, rather then start adding command line options to this tool,
// see if you can just make it work based off the path.
async function getBrowserInterface(executablePath: string) {
const lc = executablePath.toLowerCase();
if (lc.includes('chrom')) {
const browser = await chromium.launch({
executablePath,
headless: false,
args: ['--enable-unsafe-webgpu'],
});
const devicePixelRatio = await getComputedDevicePixelRatio(browser);
const context = await browser.newContext({
deviceScaleFactor: devicePixelRatio,
});
return { browser, context };
} else if (lc.includes('firefox')) {
const browser = await firefox.launch({
executablePath,
headless: false,
});
const context = await browser.newContext();
return { browser, context };
} else if (lc.includes('safari') || lc.includes('webkit')) {
const browser = await webkit.launch({
executablePath,
headless: false,
});
const context = await browser.newContext();
return { browser, context };
} else {
throw new Error(`could not guess browser from executable path: ${executablePath}`);
}
}
// Parses a fuzzy spec as defined here
// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
// Note: This is not robust but the tests will eventually be run in the real wpt.
function parseFuzzy(fuzzy: string) {
if (!fuzzy) {
return { maxDifference: [0, 0], totalPixels: [0, 0] };
} else {
const parts = fuzzy.split(';');
if (parts.length !== 2) {
throw Error(`unhandled fuzzy format: ${fuzzy}`);
}
const ranges = parts.map(part => {
const range = part
.replace(/[a-zA-Z=]/g, '')
.split('-')
.map(v => parseInt(v));
return range.length === 1 ? [0, range[0]] : range;
});
return {
maxDifference: ranges[0],
totalPixels: ranges[1],
};
}
}
// Compares two images using the algorithm described in the web platform tests
// https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
// If they are different will write out a diff mask.
async function compareImages(
filename1: string,
filename2: string,
fuzzy: string,
diffName: string,
startingRow: number = 0
) {
const img1 = readPng(filename1);
const img2 = readPng(filename2);
const { width, height } = img1;
if (img2.width !== width || img2.height !== height) {
console.error('images are not the same size:', filename1, filename2);
return;
}
const { maxDifference, totalPixels } = parseFuzzy(fuzzy);
const diffData = Buffer.alloc(width * height * 4);
const diffPixels = new Uint32Array(diffData.buffer);
const kRed = 0xff0000ff;
const kWhite = 0xffffffff;
const kYellow = 0xff00ffff;
let numPixelsDifferent = 0;
let anyPixelsOutOfRange = false;
for (let y = startingRow; y < height; ++y) {
for (let x = 0; x < width; ++x) {
const offset = y * width + x;
let isDifferent = false;
let outOfRange = false;
for (let c = 0; c < 4 && !outOfRange; ++c) {
const off = offset * 4 + c;
const v0 = img1.data[off];
const v1 = img2.data[off];
const channelDiff = Math.abs(v0 - v1);
outOfRange ||= channelDiff < maxDifference[0] || channelDiff > maxDifference[1];
isDifferent ||= channelDiff > 0;
}
numPixelsDifferent += isDifferent ? 1 : 0;
anyPixelsOutOfRange ||= outOfRange;
diffPixels[offset] = outOfRange ? kRed : isDifferent ? kYellow : kWhite;
}
}
const pass =
!anyPixelsOutOfRange &&
numPixelsDifferent >= totalPixels[0] &&
numPixelsDifferent <= totalPixels[1];
if (!pass) {
writePng(diffName, width, height, diffData);
console.error(
`FAIL: too many differences in: ${filename1} vs ${filename2}
${numPixelsDifferent} differences, expected: ${totalPixels[0]}-${totalPixels[1]} with range: ${maxDifference[0]}-${maxDifference[1]}
wrote difference to: ${diffName};
`
);
} else {
console.log(`PASS`);
}
return pass;
}
function exists(filename: string) {
try {
fs.accessSync(filename);
return true;
} catch (e) {
return false;
}
}
async function waitForPageRender(page: Page) {
await page.evaluate(() => {
return new Promise(resolve => requestAnimationFrame(resolve));
});
}
// returns true if the page timed out.
async function runPage(page: Page, url: string, refWait: boolean) {
console.log(' loading:', url);
// we need to load about:blank to force the browser to re-render
// else the previous page may still be visible if the page we are loading fails
await page.goto('about:blank');
await page.waitForLoadState('domcontentloaded');
await waitForPageRender(page);
await page.goto(url);
await page.waitForLoadState('domcontentloaded');
await waitForPageRender(page);
if (refWait) {
await page.waitForFunction(() => wptRefTestPageReady());
const timeout = await page.evaluate(() => wptRefTestGetTimeout());
if (timeout) {
return true;
}
}
return false;
}
async function main() {
const args = process.argv.slice(2);
if (args.length < 1 || args.length > 2) {
printUsage();
return;
}
const [executablePath, refTestName] = args;
if (!exists(executablePath)) {
console.error(executablePath, 'does not exist');
return;
}
const testNames = getRefTestNames(kRefTestsPath).filter(name =>
refTestName ? name.includes(refTestName) : true
);
if (!exists(kScreenshotPath)) {
fs.mkdirSync(kScreenshotPath, { recursive: true });
}
if (testNames.length === 0) {
console.error(`no tests include "${refTestName}"`);
return;
}
const { browser, context } = await getBrowserInterface(executablePath);
const page = await context.newPage();
const screenshotManager = new ScreenshotManager();
await screenshotManager.init(page);
if (verbose) {
page.on('console', async msg => {
const { url, lineNumber, columnNumber } = msg.location();
const values = await Promise.all(msg.args().map(a => a.jsonValue()));
console.log(`${url}:${lineNumber}:${columnNumber}:`, ...values);
});
}
await page.addInitScript({
content: `
(() => {
let timeout = false;
setTimeout(() => timeout = true, 5000);
window.wptRefTestPageReady = function() {
return timeout || !document.documentElement.classList.contains('reftest-wait');
};
window.wptRefTestGetTimeout = function() {
return timeout;
};
})();
`,
});
type Result = {
status: string;
testName: string;
refName: string;
testScreenshotName: string;
refScreenshotName: string;
diffName: string;
};
const results: Result[] = [];
const addResult = (
status: string,
testName: string,
refName: string,
testScreenshotName: string = '',
refScreenshotName: string = '',
diffName: string = ''
) => {
results.push({ status, testName, refName, testScreenshotName, refScreenshotName, diffName });
};
for (const testName of testNames) {
console.log('processing:', testName);
const { refLink, refWait, fuzzy } = readHTMLFile(path.join(kRefTestsPath, testName));
if (!refLink) {
throw new Error(`could not find ref link in: ${testName}`);
}
const testURL = `${kRefTestsBaseURL}/${testName}`;
const refURL = `${kRefTestsBaseURL}/${refLink}`;
// Technically this is not correct but it fits the existing tests.
// It assumes refLink is relative to the refTestsPath but it's actually
// supposed to be relative to the test. It might also be an absolute
// path. Neither of those cases exist at the time of writing this.
const refFileInfo = readHTMLFile(path.join(kRefTestsPath, refLink));
const testScreenshotName = path.join(kScreenshotPath, `${testName}-actual.png`);
const refScreenshotName = path.join(kScreenshotPath, `${testName}-expected.png`);
const diffName = path.join(kScreenshotPath, `${testName}-diff.png`);
const timeoutTest = await runPage(page, testURL, refWait);
if (timeoutTest) {
addResult('TIMEOUT', testName, refLink);
continue;
}
await screenshotManager.takeScreenshot(page, testScreenshotName);
const timeoutRef = await runPage(page, refURL, refFileInfo.refWait);
if (timeoutRef) {
addResult('TIMEOUT', testName, refLink);
continue;
}
await screenshotManager.takeScreenshot(page, refScreenshotName);
const pass = await compareImages(testScreenshotName, refScreenshotName, fuzzy, diffName);
addResult(
pass ? 'PASS' : 'FAILURE',
testName,
refLink,
testScreenshotName,
refScreenshotName,
diffName
);
}
console.log(
`----results----\n${results
.map(({ status, testName }) => `[ ${status.padEnd(7)} ] ${testName}`)
.join('\n')}`
);
const imgLink = (filename: string, title: string) => {
const name = path.basename(filename);
return `
<div class="screenshot">
${title}
<a href="${name}" title="${name}">
<img src="${name}" width="256"/>
</a>
</div>`;
};
const indexName = path.join(kScreenshotPath, 'index.html');
fs.writeFileSync(
indexName,
`<!DOCTYPE html>
<html>
<head>
<style>
.screenshot {
display: inline-block;
background: #CCC;
margin-right: 5px;
padding: 5px;
}
.screenshot a {
display: block;
}
.screenshot
</style>
</head>
<body>
${results
.map(({ status, testName, refName, testScreenshotName, refScreenshotName, diffName }) => {
return `
<div>
<div>[ ${status} ]: ${testName} ref: ${refName}</div>
${
status === 'FAILURE'
? `${imgLink(testScreenshotName, 'actual')}
${imgLink(refScreenshotName, 'ref')}
${imgLink(diffName, 'diff')}`
: ``
}
</div>
<hr>
`;
})
.join('\n')}
</body>
</html>
`
);
// the file:// with an absolute path makes it clickable in some terminals
console.log(`\nsee: file://${path.resolve(indexName)}\n`);
await page.close();
await context.close();
// I have no idea why it's taking ~30 seconds for playwright to close.
console.log('-- [ done: waiting for browser to close ] --');
await browser.close();
}
main().catch(e => {
throw e;
});

View File

@ -0,0 +1,51 @@
const path = require('path');
// Automatically transpile .ts imports
require('ts-node').register({
// Specify the project file so ts-node doesn't try to find it itself based on the CWD.
project: path.resolve(__dirname, '../../../tsconfig.json'),
compilerOptions: {
module: 'commonjs',
},
transpileOnly: true,
});
const Module = require('module');
// Redirect imports of .js files to .ts files
const resolveFilename = Module._resolveFilename;
Module._resolveFilename = (request, parentModule, isMain) => {
do {
if (request.startsWith('.') && parentModule.filename.endsWith('.ts')) {
// Required for browser (because it needs the actual correct file path and
// can't do any kind of file resolution).
if (request.endsWith('/index.js')) {
throw new Error(
"Avoid the name `index.js`; we don't have Node-style path resolution: " + request
);
}
// Import of Node addon modules are valid and should pass through.
if (request.endsWith('.node')) {
break;
}
if (!request.endsWith('.js')) {
throw new Error('All relative imports must end in .js: ' + request);
}
try {
const tsRequest = request.substring(0, request.length - '.js'.length) + '.ts';
return resolveFilename.call(this, tsRequest, parentModule, isMain);
} catch (ex) {
// If the .ts file doesn't exist, try .js instead.
break;
}
}
} while (0);
return resolveFilename.call(this, request, parentModule, isMain);
};
process.on('unhandledRejection', ex => {
throw ex;
});

View File

@ -0,0 +1,4 @@
export const version = require('child_process')
.execSync('git describe --always --abbrev=0 --dirty')
.toString()
.trim();

View File

@ -0,0 +1,58 @@
import { resolveOnTimeout } from './util.js';
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
declare const Components: any;
/**
* Attempts to trigger JavaScript garbage collection, either using explicit methods if exposed
* (may be available in testing environments with special browser runtime flags set), or using
* some weird tricks to incur GC pressure. Adopted from the WebGL CTS.
*/
export async function attemptGarbageCollection(): Promise<void> {
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
const w: any = globalThis;
if (w.GCController) {
w.GCController.collect();
return;
}
if (w.opera && w.opera.collect) {
w.opera.collect();
return;
}
try {
w.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
.getInterface(Components.interfaces.nsIDOMWindowUtils)
.garbageCollect();
return;
} catch (e) {
// ignore any failure
}
if (w.gc) {
w.gc();
return;
}
if (w.CollectGarbage) {
w.CollectGarbage();
return;
}
let i: number;
function gcRec(n: number): void {
if (n < 1) return;
/* eslint-disable @typescript-eslint/restrict-plus-operands */
let temp: object | string = { i: 'ab' + i + i / 100000 };
/* eslint-disable @typescript-eslint/restrict-plus-operands */
temp = temp + 'foo';
temp; // dummy use of unused variable
gcRec(n - 1);
}
for (i = 0; i < 1000; i++) {
gcRec(10);
}
return resolveOnTimeout(35); // Let the event loop run a few frames in case it helps.
}

View File

@ -0,0 +1,127 @@
/**
* The interface used for formatting strings to contain color metadata.
*
* Use the interface properties to construct a style, then use the
* `(s: string): string` function to format the provided string with the given
* style.
*/
export interface Colors {
// Are colors enabled?
enabled: boolean;
// Returns the string formatted to contain the specified color or style.
(s: string): string;
// modifiers
reset: Colors;
bold: Colors;
dim: Colors;
italic: Colors;
underline: Colors;
inverse: Colors;
hidden: Colors;
strikethrough: Colors;
// colors
black: Colors;
red: Colors;
green: Colors;
yellow: Colors;
blue: Colors;
magenta: Colors;
cyan: Colors;
white: Colors;
gray: Colors;
grey: Colors;
// bright colors
blackBright: Colors;
redBright: Colors;
greenBright: Colors;
yellowBright: Colors;
blueBright: Colors;
magentaBright: Colors;
cyanBright: Colors;
whiteBright: Colors;
// background colors
bgBlack: Colors;
bgRed: Colors;
bgGreen: Colors;
bgYellow: Colors;
bgBlue: Colors;
bgMagenta: Colors;
bgCyan: Colors;
bgWhite: Colors;
// bright background colors
bgBlackBright: Colors;
bgRedBright: Colors;
bgGreenBright: Colors;
bgYellowBright: Colors;
bgBlueBright: Colors;
bgMagentaBright: Colors;
bgCyanBright: Colors;
bgWhiteBright: Colors;
}
/**
* The interface used for formatting strings with color metadata.
*
* Currently Colors will use the 'ansi-colors' module if it can be loaded.
* If it cannot be loaded, then the Colors implementation is a straight pass-through.
*
* Colors may also be a no-op if the current environment does not support colors.
*/
export let Colors: Colors;
try {
/* eslint-disable-next-line node/no-unpublished-require */
Colors = require('ansi-colors') as Colors;
} catch {
const passthrough = ((s: string) => s) as Colors;
passthrough.enabled = false;
passthrough.reset = passthrough;
passthrough.bold = passthrough;
passthrough.dim = passthrough;
passthrough.italic = passthrough;
passthrough.underline = passthrough;
passthrough.inverse = passthrough;
passthrough.hidden = passthrough;
passthrough.strikethrough = passthrough;
passthrough.black = passthrough;
passthrough.red = passthrough;
passthrough.green = passthrough;
passthrough.yellow = passthrough;
passthrough.blue = passthrough;
passthrough.magenta = passthrough;
passthrough.cyan = passthrough;
passthrough.white = passthrough;
passthrough.gray = passthrough;
passthrough.grey = passthrough;
passthrough.blackBright = passthrough;
passthrough.redBright = passthrough;
passthrough.greenBright = passthrough;
passthrough.yellowBright = passthrough;
passthrough.blueBright = passthrough;
passthrough.magentaBright = passthrough;
passthrough.cyanBright = passthrough;
passthrough.whiteBright = passthrough;
passthrough.bgBlack = passthrough;
passthrough.bgRed = passthrough;
passthrough.bgGreen = passthrough;
passthrough.bgYellow = passthrough;
passthrough.bgBlue = passthrough;
passthrough.bgMagenta = passthrough;
passthrough.bgCyan = passthrough;
passthrough.bgWhite = passthrough;
passthrough.bgBlackBright = passthrough;
passthrough.bgRedBright = passthrough;
passthrough.bgGreenBright = passthrough;
passthrough.bgYellowBright = passthrough;
passthrough.bgBlueBright = passthrough;
passthrough.bgMagentaBright = passthrough;
passthrough.bgCyanBright = passthrough;
passthrough.bgWhiteBright = passthrough;
Colors = passthrough;
}

View File

@ -0,0 +1,39 @@
import { ResolveType, ZipKeysWithValues } from './types.js';
export type valueof<K> = K[keyof K];
export function keysOf<T extends string>(obj: { [k in T]: unknown }): readonly T[] {
return (Object.keys(obj) as unknown[]) as T[];
}
export function numericKeysOf<T>(obj: object): readonly T[] {
return (Object.keys(obj).map(n => Number(n)) as unknown[]) as T[];
}
/**
* Creates an info lookup object from a more nicely-formatted table. See below for examples.
*
* Note: Using `as const` on the arguments to this function is necessary to infer the correct type.
*/
export function makeTable<
Members extends readonly string[],
Defaults extends readonly unknown[],
Table extends { readonly [k: string]: readonly unknown[] }
>(
members: Members,
defaults: Defaults,
table: Table
): {
readonly [k in keyof Table]: ResolveType<ZipKeysWithValues<Members, Table[k], Defaults>>;
} {
const result: { [k: string]: { [m: string]: unknown } } = {};
for (const [k, v] of Object.entries<readonly unknown[]>(table)) {
const item: { [m: string]: unknown } = {};
for (let i = 0; i < members.length; ++i) {
item[members[i]] = v[i] ?? defaults[i];
}
result[k] = item;
}
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
return result as any;
}

View File

@ -0,0 +1,74 @@
/// <reference types="@webgpu/types" />
import { assert } from './util.js';
/**
* Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
* Throws an exception if not found.
*/
function defaultGPUProvider(): GPU {
assert(
typeof navigator !== 'undefined' && navigator.gpu !== undefined,
'No WebGPU implementation found'
);
return navigator.gpu;
}
/**
* GPUProvider is a function that creates and returns a new GPU instance.
* May throw an exception if a GPU cannot be created.
*/
export type GPUProvider = () => GPU;
let gpuProvider: GPUProvider = defaultGPUProvider;
/**
* Sets the function to create and return a new GPU instance.
*/
export function setGPUProvider(provider: GPUProvider) {
assert(impl === undefined, 'setGPUProvider() should not be after getGPU()');
gpuProvider = provider;
}
let impl: GPU | undefined = undefined;
let defaultRequestAdapterOptions: GPURequestAdapterOptions | undefined;
export function setDefaultRequestAdapterOptions(options: GPURequestAdapterOptions) {
if (impl) {
throw new Error('must call setDefaultRequestAdapterOptions before getGPU');
}
defaultRequestAdapterOptions = { ...options };
}
/**
* Finds and returns the `navigator.gpu` object (or equivalent, for non-browser implementations).
* Throws an exception if not found.
*/
export function getGPU(): GPU {
if (impl) {
return impl;
}
impl = gpuProvider();
if (defaultRequestAdapterOptions) {
// eslint-disable-next-line @typescript-eslint/unbound-method
const oldFn = impl.requestAdapter;
impl.requestAdapter = function (
options?: GPURequestAdapterOptions
): Promise<GPUAdapter | null> {
const promise = oldFn.call(this, { ...defaultRequestAdapterOptions, ...(options || {}) });
void promise.then(async adapter => {
if (adapter) {
const info = await adapter.requestAdapterInfo();
// eslint-disable-next-line no-console
console.log(info);
}
});
return promise;
};
}
return impl;
}

View File

@ -0,0 +1,149 @@
import { assert } from './util.js';
// The state of the preprocessor is a stack of States.
type StateStack = { allowsFollowingElse: boolean; state: State }[];
const enum State {
Seeking, // Still looking for a passing condition
Passing, // Currently inside a passing condition (the root is always in this state)
Skipping, // Have already seen a passing condition; now skipping the rest
}
// The transitions in the state space are the following preprocessor directives:
// - Sibling elif
// - Sibling else
// - Sibling endif
// - Child if
abstract class Directive {
private readonly depth: number;
constructor(depth: number) {
this.depth = depth;
}
protected checkDepth(stack: StateStack): void {
assert(
stack.length === this.depth,
`Number of "$"s must match nesting depth, currently ${stack.length} (e.g. $if $$if $$endif $endif)`
);
}
abstract applyTo(stack: StateStack): void;
}
class If extends Directive {
private readonly predicate: boolean;
constructor(depth: number, predicate: boolean) {
super(depth);
this.predicate = predicate;
}
applyTo(stack: StateStack) {
this.checkDepth(stack);
const parentState = stack[stack.length - 1].state;
stack.push({
allowsFollowingElse: true,
state:
parentState !== State.Passing
? State.Skipping
: this.predicate
? State.Passing
: State.Seeking,
});
}
}
class ElseIf extends If {
applyTo(stack: StateStack) {
assert(stack.length >= 1);
const { allowsFollowingElse, state: siblingState } = stack.pop()!;
this.checkDepth(stack);
assert(allowsFollowingElse, 'pp.elif after pp.else');
if (siblingState !== State.Seeking) {
stack.push({ allowsFollowingElse: true, state: State.Skipping });
} else {
super.applyTo(stack);
}
}
}
class Else extends Directive {
applyTo(stack: StateStack) {
assert(stack.length >= 1);
const { allowsFollowingElse, state: siblingState } = stack.pop()!;
this.checkDepth(stack);
assert(allowsFollowingElse, 'pp.else after pp.else');
stack.push({
allowsFollowingElse: false,
state: siblingState === State.Seeking ? State.Passing : State.Skipping,
});
}
}
class EndIf extends Directive {
applyTo(stack: StateStack) {
stack.pop();
this.checkDepth(stack);
}
}
/**
* A simple template-based, non-line-based preprocessor implementing if/elif/else/endif.
*
* @example
* ```
* const shader = pp`
* ${pp._if(expr)}
* const x: ${type} = ${value};
* ${pp._elif(expr)}
* ${pp.__if(expr)}
* ...
* ${pp.__else}
* ...
* ${pp.__endif}
* ${pp._endif}`;
* ```
*
* @param strings - The array of constant string chunks of the template string.
* @param ...values - The array of interpolated `${}` values within the template string.
*/
export function pp(
strings: TemplateStringsArray,
...values: ReadonlyArray<Directive | string | number>
): string {
let result = '';
const stateStack: StateStack = [{ allowsFollowingElse: false, state: State.Passing }];
for (let i = 0; i < values.length; ++i) {
const passing = stateStack[stateStack.length - 1].state === State.Passing;
if (passing) {
result += strings[i];
}
const value = values[i];
if (value instanceof Directive) {
value.applyTo(stateStack);
} else {
if (passing) {
result += value;
}
}
}
assert(stateStack.length === 1, 'Unterminated preprocessor condition at end of file');
result += strings[values.length];
return result;
}
pp._if = (predicate: boolean) => new If(1, predicate);
pp._elif = (predicate: boolean) => new ElseIf(1, predicate);
pp._else = new Else(1);
pp._endif = new EndIf(1);
pp.__if = (predicate: boolean) => new If(2, predicate);
pp.__elif = (predicate: boolean) => new ElseIf(2, predicate);
pp.__else = new Else(2);
pp.__endif = new EndIf(2);
pp.___if = (predicate: boolean) => new If(3, predicate);
pp.___elif = (predicate: boolean) => new ElseIf(3, predicate);
pp.___else = new Else(3);
pp.___endif = new EndIf(3);
// Add more if needed.

View File

@ -0,0 +1,7 @@
/** Defined by WPT. Like `setTimeout`, but applies a timeout multiplier for slow test systems. */
declare const step_timeout: undefined | typeof setTimeout;
/**
* Equivalent of `setTimeout`, but redirects to WPT's `step_timeout` when it is defined.
*/
export const timeout = typeof step_timeout !== 'undefined' ? step_timeout : setTimeout;

View File

@ -0,0 +1,59 @@
/** Forces a type to resolve its type definitions, to make it readable/debuggable. */
export type ResolveType<T> = T extends object
? T extends infer O
? { [K in keyof O]: ResolveType<O[K]> }
: never
: T;
/** Returns the type `true` iff X and Y are exactly equal */
export type TypeEqual<X, Y> = (<T>() => T extends X ? 1 : 2) extends <T>() => T extends Y ? 1 : 2
? true
: false;
/* eslint-disable-next-line @typescript-eslint/no-unused-vars */
export function assertTypeTrue<T extends true>() {}
/**
* Computes the intersection of a set of types, given the union of those types.
*
* From: https://stackoverflow.com/a/56375136
*/
export type UnionToIntersection<U> =
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
(U extends any ? (k: U) => void : never) extends (k: infer I) => void ? I : never;
/** "Type asserts" that `X` is a subtype of `Y`. */
type EnsureSubtype<X, Y> = X extends Y ? X : never;
type TupleHeadOr<T, Default> = T extends readonly [infer H, ...(readonly unknown[])] ? H : Default;
type TupleTailOr<T, Default> = T extends readonly [unknown, ...infer Tail] ? Tail : Default;
type TypeOr<T, Default> = T extends undefined ? Default : T;
/**
* Zips a key tuple type and a value tuple type together into an object.
*
* @template Keys Keys of the resulting object.
* @template Values Values of the resulting object. If a key corresponds to a `Values` member that
* is undefined or past the end, it defaults to the corresponding `Defaults` member.
* @template Defaults Default values. If a key corresponds to a `Defaults` member that is past the
* end, the default falls back to `undefined`.
*/
export type ZipKeysWithValues<
Keys extends readonly string[],
Values extends readonly unknown[],
Defaults extends readonly unknown[]
> =
//
Keys extends readonly [infer KHead, ...infer KTail]
? {
readonly [k in EnsureSubtype<KHead, string>]: TypeOr<
TupleHeadOr<Values, undefined>,
TupleHeadOr<Defaults, undefined>
>;
} &
ZipKeysWithValues<
EnsureSubtype<KTail, readonly string[]>,
TupleTailOr<Values, []>,
TupleTailOr<Defaults, []>
>
: {}; // K exhausted

View File

@ -0,0 +1,303 @@
import { Float16Array } from '../../external/petamoriken/float16/float16.js';
import { globalTestConfig } from '../framework/test_config.js';
import { Logger } from '../internal/logging/logger.js';
import { keysOf } from './data_tables.js';
import { timeout } from './timeout.js';
/**
* Error with arbitrary `extra` data attached, for debugging.
* The extra data is omitted if not running the test in debug mode (`?debug=1`).
*/
export class ErrorWithExtra extends Error {
readonly extra: { [k: string]: unknown };
/**
* `extra` function is only called if in debug mode.
* If an `ErrorWithExtra` is passed, its message is used and its extras are passed through.
*/
constructor(message: string, extra: () => {});
constructor(base: ErrorWithExtra, newExtra: () => {});
constructor(baseOrMessage: string | ErrorWithExtra, newExtra: () => {}) {
const message = typeof baseOrMessage === 'string' ? baseOrMessage : baseOrMessage.message;
super(message);
const oldExtras = baseOrMessage instanceof ErrorWithExtra ? baseOrMessage.extra : {};
this.extra = Logger.globalDebugMode
? { ...oldExtras, ...newExtra() }
: { omitted: 'pass ?debug=1' };
}
}
/**
* Asserts `condition` is true. Otherwise, throws an `Error` with the provided message.
*/
export function assert(condition: boolean, msg?: string | (() => string)): asserts condition {
if (!condition) {
throw new Error(msg && (typeof msg === 'string' ? msg : msg()));
}
}
/** If the argument is an Error, throw it. Otherwise, pass it back. */
export function assertOK<T>(value: Error | T): T {
if (value instanceof Error) {
throw value;
}
return value;
}
/**
* Resolves if the provided promise rejects; rejects if it does not.
*/
export async function assertReject(p: Promise<unknown>, msg?: string): Promise<void> {
try {
await p;
unreachable(msg);
} catch (ex) {
// Assertion OK
}
}
/**
* Assert this code is unreachable. Unconditionally throws an `Error`.
*/
export function unreachable(msg?: string): never {
throw new Error(msg);
}
/**
* The `performance` interface.
* It is available in all browsers, but it is not in scope by default in Node.
*/
const perf = typeof performance !== 'undefined' ? performance : require('perf_hooks').performance;
/**
* Calls the appropriate `performance.now()` depending on whether running in a browser or Node.
*/
export function now(): number {
return perf.now();
}
/**
* Returns a promise which resolves after the specified time.
*/
export function resolveOnTimeout(ms: number): Promise<void> {
return new Promise(resolve => {
timeout(() => {
resolve();
}, ms);
});
}
export class PromiseTimeoutError extends Error {}
/**
* Returns a promise which rejects after the specified time.
*/
export function rejectOnTimeout(ms: number, msg: string): Promise<never> {
return new Promise((_resolve, reject) => {
timeout(() => {
reject(new PromiseTimeoutError(msg));
}, ms);
});
}
/**
* Takes a promise `p`, and returns a new one which rejects if `p` takes too long,
* and otherwise passes the result through.
*/
export function raceWithRejectOnTimeout<T>(p: Promise<T>, ms: number, msg: string): Promise<T> {
if (globalTestConfig.noRaceWithRejectOnTimeout) {
return p;
}
// Setup a promise that will reject after `ms` milliseconds. We cancel this timeout when
// `p` is finalized, so the JavaScript VM doesn't hang around waiting for the timer to
// complete, once the test runner has finished executing the tests.
const timeoutPromise = new Promise((_resolve, reject) => {
const handle = timeout(() => {
reject(new PromiseTimeoutError(msg));
}, ms);
p = p.finally(() => clearTimeout(handle));
});
return Promise.race([p, timeoutPromise]) as Promise<T>;
}
/**
* Takes a promise `p` and returns a new one which rejects if `p` resolves or rejects,
* and otherwise resolves after the specified time.
*/
export function assertNotSettledWithinTime(
p: Promise<unknown>,
ms: number,
msg: string
): Promise<undefined> {
// Rejects regardless of whether p resolves or rejects.
const rejectWhenSettled = p.then(() => Promise.reject(new Error(msg)));
// Resolves after `ms` milliseconds.
const timeoutPromise = new Promise<undefined>(resolve => {
const handle = timeout(() => {
resolve(undefined);
}, ms);
p.finally(() => clearTimeout(handle));
});
return Promise.race([rejectWhenSettled, timeoutPromise]);
}
/**
* Returns a `Promise.reject()`, but also registers a dummy `.catch()` handler so it doesn't count
* as an uncaught promise rejection in the runtime.
*/
export function rejectWithoutUncaught<T>(err: unknown): Promise<T> {
const p = Promise.reject(err);
// Suppress uncaught promise rejection.
p.catch(() => {});
return p;
}
/**
* Makes a copy of a JS `object`, with the keys reordered into sorted order.
*/
export function sortObjectByKey(v: { [k: string]: unknown }): { [k: string]: unknown } {
const sortedObject: { [k: string]: unknown } = {};
for (const k of Object.keys(v).sort()) {
sortedObject[k] = v[k];
}
return sortedObject;
}
/**
* Determines whether two JS values are equal, recursing into objects and arrays.
* NaN is treated specially, such that `objectEquals(NaN, NaN)`.
*/
export function objectEquals(x: unknown, y: unknown): boolean {
if (typeof x !== 'object' || typeof y !== 'object') {
if (typeof x === 'number' && typeof y === 'number' && Number.isNaN(x) && Number.isNaN(y)) {
return true;
}
return x === y;
}
if (x === null || y === null) return x === y;
if (x.constructor !== y.constructor) return false;
if (x instanceof Function) return x === y;
if (x instanceof RegExp) return x === y;
if (x === y || x.valueOf() === y.valueOf()) return true;
if (Array.isArray(x) && Array.isArray(y) && x.length !== y.length) return false;
if (x instanceof Date) return false;
if (!(x instanceof Object)) return false;
if (!(y instanceof Object)) return false;
const x1 = x as { [k: string]: unknown };
const y1 = y as { [k: string]: unknown };
const p = Object.keys(x);
return Object.keys(y).every(i => p.indexOf(i) !== -1) && p.every(i => objectEquals(x1[i], y1[i]));
}
/**
* Generates a range of values `fn(0)..fn(n-1)`.
*/
export function range<T>(n: number, fn: (i: number) => T): T[] {
return [...new Array(n)].map((_, i) => fn(i));
}
/**
* Generates a range of values `fn(0)..fn(n-1)`.
*/
export function* iterRange<T>(n: number, fn: (i: number) => T): Iterable<T> {
for (let i = 0; i < n; ++i) {
yield fn(i);
}
}
/** Creates a (reusable) iterable object that maps `f` over `xs`, lazily. */
export function mapLazy<T, R>(xs: Iterable<T>, f: (x: T) => R): Iterable<R> {
return {
*[Symbol.iterator]() {
for (const x of xs) {
yield f(x);
}
},
};
}
const TypedArrayBufferViewInstances = [
new Uint8Array(),
new Uint8ClampedArray(),
new Uint16Array(),
new Uint32Array(),
new Int8Array(),
new Int16Array(),
new Int32Array(),
new Float16Array(),
new Float32Array(),
new Float64Array(),
] as const;
export type TypedArrayBufferView = typeof TypedArrayBufferViewInstances[number];
export type TypedArrayBufferViewConstructor<
A extends TypedArrayBufferView = TypedArrayBufferView
> = {
// Interface copied from Uint8Array, and made generic.
readonly prototype: A;
readonly BYTES_PER_ELEMENT: number;
new (): A;
new (elements: Iterable<number>): A;
new (array: ArrayLike<number> | ArrayBufferLike): A;
new (buffer: ArrayBufferLike, byteOffset?: number, length?: number): A;
new (length: number): A;
from(arrayLike: ArrayLike<number>): A;
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
from(arrayLike: Iterable<number>, mapfn?: (v: number, k: number) => number, thisArg?: any): A;
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
from<T>(arrayLike: ArrayLike<T>, mapfn: (v: T, k: number) => number, thisArg?: any): A;
of(...items: number[]): A;
};
export const kTypedArrayBufferViews: {
readonly [k: string]: TypedArrayBufferViewConstructor;
} = {
...(() => {
/* eslint-disable-next-line @typescript-eslint/no-explicit-any */
const result: { [k: string]: any } = {};
for (const v of TypedArrayBufferViewInstances) {
result[v.constructor.name] = v.constructor;
}
return result;
})(),
};
export const kTypedArrayBufferViewKeys = keysOf(kTypedArrayBufferViews);
export const kTypedArrayBufferViewConstructors = Object.values(kTypedArrayBufferViews);
function subarrayAsU8(
buf: ArrayBuffer | TypedArrayBufferView,
{ start = 0, length }: { start?: number; length?: number }
): Uint8Array | Uint8ClampedArray {
if (buf instanceof ArrayBuffer) {
return new Uint8Array(buf, start, length);
} else if (buf instanceof Uint8Array || buf instanceof Uint8ClampedArray) {
// Don't wrap in new views if we don't need to.
if (start === 0 && (length === undefined || length === buf.byteLength)) {
return buf;
}
}
const byteOffset = buf.byteOffset + start * buf.BYTES_PER_ELEMENT;
const byteLength =
length !== undefined
? length * buf.BYTES_PER_ELEMENT
: buf.byteLength - (byteOffset - buf.byteOffset);
return new Uint8Array(buf.buffer, byteOffset, byteLength);
}
/**
* Copy a range of bytes from one ArrayBuffer or TypedArray to another.
*
* `start`/`length` are in elements (or in bytes, if ArrayBuffer).
*/
export function memcpy(
src: { src: ArrayBuffer | TypedArrayBufferView; start?: number; length?: number },
dst: { dst: ArrayBuffer | TypedArrayBufferView; start?: number }
): void {
subarrayAsU8(dst.dst, dst).set(subarrayAsU8(src.src, src));
}

View File

@ -0,0 +1,24 @@
import { timeout } from './timeout.js';
// Copied from https://github.com/web-platform-tests/wpt/blob/master/common/reftest-wait.js
/**
* Remove the `reftest-wait` class on the document element.
* The reftest runner will wait with taking a screenshot while
* this class is present.
*
* See https://web-platform-tests.org/writing-tests/reftests.html#controlling-when-comparison-occurs
*/
export function takeScreenshot() {
document.documentElement.classList.remove('reftest-wait');
}
/**
* Call `takeScreenshot()` after a delay of at least `ms` milliseconds.
* @param {number} ms - milliseconds
*/
export function takeScreenshotDelayed(ms: number) {
timeout(() => {
takeScreenshot();
}, ms);
}

View File

@ -0,0 +1 @@
Demo test suite for manually testing test runners.

View File

@ -0,0 +1,8 @@
export const description = 'Description for a.spec.ts';
import { makeTestGroup } from '../common/framework/test_group.js';
import { UnitTest } from '../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('not_implemented_yet').unimplemented();

View File

@ -0,0 +1 @@
README for a/

View File

@ -0,0 +1,6 @@
export const description = 'Description for b.spec.ts';
import { makeTestGroup } from '../../common/framework/test_group.js';
import { UnitTest } from '../../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);

View File

@ -0,0 +1 @@
README for a/b/

View File

@ -0,0 +1,80 @@
export const description = 'Description for c.spec.ts';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { unreachable } from '../../../common/util/util.js';
import { UnitTest } from '../../../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('f')
.desc(
`Test plan for f
- Test stuff
- Test some more stuff`
)
.fn(() => {});
g.test('f,g').fn(() => {});
g.test('f,g,h')
.paramsSimple([{}, { x: 0 }, { x: 0, y: 0 }])
.fn(() => {});
g.test('case_depth_2_in_single_child_test')
.paramsSimple([{ x: 0, y: 0 }])
.fn(() => {});
g.test('deep_case_tree')
.params(u =>
u //
.combine('x', [1, 2])
.combine('y', [1, 2])
.combine('z', [1, 2])
)
.fn(() => {});
g.test('statuses,debug').fn(t => {
t.debug('debug');
});
g.test('statuses,skip').fn(t => {
t.skip('skip');
});
g.test('statuses,warn').fn(t => {
t.warn('warn');
});
g.test('statuses,fail').fn(t => {
t.fail('fail');
});
g.test('statuses,throw').fn(() => {
unreachable('unreachable');
});
g.test('multiple_same_stack').fn(t => {
for (let i = 0; i < 3; ++i) {
t.fail(
i === 2
? 'this should appear after deduplicated line'
: 'this should be "seen 2 times with identical stack"'
);
}
});
g.test('multiple_same_level').fn(t => {
t.fail('this should print a stack');
t.fail('this should print a stack');
t.fail('this should not print a stack');
});
g.test('lower_levels_hidden,before').fn(t => {
t.warn('warn - this should not print a stack');
t.fail('fail');
});
g.test('lower_levels_hidden,after').fn(t => {
t.fail('fail');
t.warn('warn - this should not print a stack');
});

View File

@ -0,0 +1,8 @@
export const description = 'Description for d.spec.ts';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { UnitTest } from '../../../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('test_depth_2,in_single_child_file').fn(() => {});

View File

@ -0,0 +1,6 @@
export const description = 'Description for r.spec.ts';
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { UnitTest } from '../../../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);

View File

@ -0,0 +1,10 @@
export const description = 'Description for a.spec.ts';
import { makeTestGroup } from '../common/framework/test_group.js';
import { UnitTest } from '../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('json')
.paramsSimple([{ p: { x: 1, y: 'two' } }])
.fn(() => {});

View File

@ -0,0 +1,38 @@
export const description = 'Tests with subcases';
import { makeTestGroup } from '../common/framework/test_group.js';
import { UnitTest } from '../unittests/unit_test.js';
export const g = makeTestGroup(UnitTest);
g.test('pass_warn_fail')
.params(u =>
u
.combine('x', [1, 2, 3]) //
.beginSubcases()
.combine('y', [1, 2, 3])
)
.fn(t => {
const { x, y } = t.params;
if (x + y > 5) {
t.fail();
} else if (x + y > 4) {
t.warn();
}
});
g.test('DOMException,cases')
.params(u => u.combine('fail', [false, true]))
.fn(t => {
if (t.params.fail) {
throw new DOMException('Message!', 'Name!');
}
});
g.test('DOMException,subcases')
.paramsSubcasesOnly(u => u.combine('fail', [false, true]))
.fn(t => {
if (t.params.fail) {
throw new DOMException('Message!', 'Name!');
}
});

View File

@ -0,0 +1,31 @@
# External Modules
This directory contains external modules that are used by the WebGPU
CTS. These are included in the repo, as opposed to being fetched via a
package manager or CDN, so that there is a single canonical source of
truth for the CTS tests and the CTS tests can be run as a standalone
suite without needing to pull from a CDN or similar process.
## Adding modules
Each module that is added should be done consciously with a clear
reasoning on what the module is providing, since the bar for adding
new modules should be relatively high.
The module will need to be licensed via a compatible license to the
BSD-3 clause & W3C CTS licenses that the CTS currently is covered by.
It is preferred to use a single source build of the module if possible.
In addition to the source for the module a LICENSE file should be
included in the directory clearly identifying the owner of the module
and the license it is covered by.
Details of the specific module, including version, origin and purpose
should be listed below.
## Current Modules
| **Name** | **Origin** | **License** | **Version** | **Purpose** |
|----------------------|--------------------------------------------------|-------------|-------------|------------------------------------------------|
| petamoriken/float16 | [github](https://github.com/petamoriken/float16) | MIT | 3.6.6 | Fluent support for f16 numbers via TypedArrays |

Some files were not shown because too many files have changed in this diff Show More