Bug 1610639 [wpt PR 21323] - Integration of WMAS test runner into WPT, a=testonly

Automatic update from web-platform-tests
Integration of WMAS test runner into WPT (#21323)

Adding [WMAS (Web Media API Snapshot) test runner](https://github.com/cta-wave/WMAS/) for running tests on embedded devices (TVs, Set-Top-Boxes, Streaming devices) in a single window/tab with server-side monitoring and execution. The [WMAS Specification](https://github.com/w3c/webmediaapi/) targets devices that are not able to run the WPT test runner due performance limitations and missing support of multiple windows/tabs (TV Apps run usually in a single window and not able to use window.open() which is used by the WPT Test Runner).

This [PR](https://github.com/web-platform-tests/rfcs/pull/23) in the [web-platform-tests RFCs](https://github.com/web-platform-tests/rfcs) provides more info about the discussion of the integration of the WMAS Test Runner into WPT.

Co-authored-by: Fritz Heiden <fritz.heiden@fokus.fraunhofer.de>
Co-authored-by: Stephen McGruer <smcgruer@chromium.org>

--

wpt-commits: 333521dce3954312a06d24b0484a0501b46948fa
wpt-pr: 21323
This commit is contained in:
Louay Bassbouss 2020-06-10 11:36:46 +00:00 committed by moz-wptsync-bot
parent ae993e0040
commit 0efcbfb76e
136 changed files with 42678 additions and 51 deletions

View File

@ -69,6 +69,8 @@ commands are:
* `wpt manifest` - For updating or generating a `MANIFEST.json` test manifest
* `wpt install` - For installing the latest release of a browser or
webdriver server on the local machine.
* `wpt serve-wave` - For starting the wpt http server and the WAVE test runner.
For more details on how to use the WAVE test runner see the [documentation](./tools/wave/docs/usage/usage.md).
<span id="windows-notes">Windows Notes</span>
=============================================

View File

@ -10,3 +10,8 @@ coverage.xml
*~
\#*
runner/MANIFEST.json
# WAVE
!wave/www/lib
!wave/export/lib
!wave/export/css

View File

@ -8,8 +8,16 @@ cd $WPT_ROOT
main() {
git fetch --quiet --unshallow https://github.com/web-platform-tests/wpt.git +refs/heads/*:refs/remotes/origin/*
pip install --user -U tox codecov
# wpt commands integration tests
cd tools/wpt
tox
cd $WPT_ROOT
# WMAS test runner integration tests
cd tools/wave
tox
cd $WPT_ROOT
}
main

View File

@ -1,5 +1,5 @@
[pytest]
norecursedirs = .* {arch} *.egg html5lib third_party pywebsocket six wpt wptrunner
norecursedirs = .* {arch} *.egg html5lib third_party pywebsocket six wave wpt wptrunner
xfail_strict = true
addopts = --strict-markers
markers =

View File

@ -1,2 +1,18 @@
{"serve": {"path": "serve.py", "script": "run", "parser": "get_parser", "help": "Run wptserve server",
"virtualenv": false}}
{
"serve": {
"path": "serve.py",
"script": "run",
"parser": "get_parser",
"help": "Run wptserve server",
"virtualenv": false
},
"serve-wave": {
"path": "wave.py",
"script": "run",
"parser": "get_parser",
"help": "Run wptserve server for WAVE",
"virtualenv": true,
"install": ["ua-parser"],
"requirements": ["../wave/requirements.txt"]
}
}

View File

@ -328,7 +328,8 @@ class RoutesBuilder(object):
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404))]
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/results/", handlers.ErrorHandler(404))]
self.extra = []
@ -384,7 +385,7 @@ class RoutesBuilder(object):
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def build_routes(aliases):
def get_route_builder(aliases, config=None):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
@ -396,7 +397,7 @@ def build_routes(aliases):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder.get_routes()
return builder
class ServerProc(object):
@ -450,17 +451,16 @@ class ServerProc(object):
return self.proc.is_alive()
def check_subdomains(config):
def check_subdomains(config, routes):
paths = config.paths
bind_address = config.bind_address
aliases = config.aliases
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, build_routes(aliases),
wrapper.start(start_http_server, host, port, paths, routes,
bind_address, config)
url = "http://{}:{}/".format(host, port)
@ -781,45 +781,6 @@ def iter_procs(servers):
yield server.proc
def build_config(override_path=None, **kwargs):
rv = ConfigBuilder()
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if kwargs.get("quic_transport"):
rv._default["ports"]["quic-transport"] = [10000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def _make_subdomains_product(s, depth=2):
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
@ -926,6 +887,43 @@ class ConfigBuilder(config.ConfigBuilder):
return rv
def build_config(override_path=None, config_cls=ConfigBuilder, **kwargs):
rv = config_cls()
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
@ -943,13 +941,16 @@ def get_parser():
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
parser.add_argument("--quic-transport", action="store_true", help="Enable QUIC server for WebTransport")
parser.set_defaults(report=False)
parser.set_defaults(is_wave=False)
return parser
def run(**kwargs):
def run(config_cls=ConfigBuilder, route_builder=None, **kwargs):
received_signal = threading.Event()
with build_config(os.path.join(repo_root, "config.json"),
config_cls=config_cls,
**kwargs) as config:
global logger
logger = config.logger
@ -972,8 +973,12 @@ def run(**kwargs):
'local-dir': doc_root,
})
if route_builder is None:
route_builder = get_route_builder
routes = route_builder(config.aliases, config).get_routes()
if config["check_subdomains"]:
check_subdomains(config)
check_subdomains(config, routes)
stash_address = None
if bind_address:
@ -981,7 +986,7 @@ def run(**kwargs):
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(config, build_routes(config["aliases"]), **kwargs)
servers = start(config, routes, **kwargs)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)

View File

@ -0,0 +1,124 @@
# -*- coding: utf-8 -*-
import subprocess
from manifest import manifest
import localpaths
import logging
import os
try:
from serve import serve
except ImportError:
import serve
from tools.wpt import wpt
global logger
logger = logging.getLogger("wave")
def get_route_builder_func(report):
def get_route_builder(aliases, config=None):
wave_cfg = None
if config is not None and "wave" in config:
wave_cfg = config["wave"]
builder = serve.get_route_builder(aliases)
logger.debug("Loading manifest ...")
data = load_manifest()
from ..wave.wave_server import WaveServer
wave_server = WaveServer()
wave_server.initialize(
configuration_file_path=os.path.abspath("./config.json"),
reports_enabled=report,
tests=data["items"])
class WaveHandler(object):
def __call__(self, request, response):
wave_server.handle_request(request, response)
web_root = "wave"
if wave_cfg is not None and "web_root" in wave_cfg:
web_root = wave_cfg["web_root"]
if not web_root.startswith("/"):
web_root = "/" + web_root
wave_handler = WaveHandler()
builder.add_handler("*", web_root + "*", wave_handler)
# serving wave specifc testharnessreport.js
file_path = os.path.join(wpt.localpaths.repo_root, "tools/wave/resources/testharnessreport.js")
builder.add_static(
file_path,
{},
"text/javascript;charset=utf8",
"/resources/testharnessreport.js")
return builder
return get_route_builder
class ConfigBuilder(serve.ConfigBuilder):
_default = serve.ConfigBuilder._default
_default.update({
"wave": { # wave specific configuration parameters
"results": "./results",
"timeouts": {
"automatic": 60000,
"manual": 300000
},
"enable_results_import": False,
"web_root": "/_wave",
"persisting_interval": 20,
"api_titles": []
}
})
def get_parser():
parser = serve.get_parser()
# Added wave specific arguments
parser.add_argument("--report", action="store_true", dest="report",
help="Flag for enabling the WPTReporting server")
return parser
def run(venv=None, **kwargs):
if venv is not None:
venv.start()
else:
raise Exception("Missing virtualenv for serve-wave.")
if kwargs['report'] is True:
if not is_wptreport_installed():
raise Exception("wptreport is not installed. Please install it from https://github.com/w3c/wptreport")
serve.run(config_cls=ConfigBuilder,
route_builder=get_route_builder_func(kwargs["report"]), **kwargs)
# execute wptreport version check
def is_wptreport_installed():
try:
subprocess.check_output(["wptreport", "--help"])
return True
except Exception:
return False
def load_manifest():
root = localpaths.repo_root
path = os.path.join(root, "MANIFEST.json")
manifest_file = manifest.load_and_update(root, path, "/", parallel=False)
supported_types = ["testharness", "manual"]
data = {"items": {},
"url_base": "/"}
for item_type in supported_types:
data["items"][item_type] = {}
for item_type, path, tests in manifest_file.itertypes(*supported_types):
tests_data = []
for item in tests:
test_data = [item.url[1:]]
if item_type == "reftest":
test_data.append(item.references)
test_data.append({})
if item_type != "manual":
test_data[-1]["timeout"] = item.timeout
tests_data.append(test_data)
assert path not in data["items"][item_type]
data["items"][item_type][path] = tests_data
return data

View File

@ -0,0 +1,45 @@
{
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": ".",
"ws_doc_root": "./websockets/handlers",
"server_host": null,
"ports": {
"http": [8000, "auto"],
"https": [8443],
"ws": ["auto"],
"wss": ["auto"]
},
"check_subdomains": true,
"log_level": "debug",
"bind_address": true,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": false,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"force_regenerate": false,
"base_conf_path": null
},
"pregenerated": {
"host_key_path": "./tools/certs/web-platform.test.key",
"host_cert_path": "./tools/certs/web-platform.test.pem"
},
"none": {}
},
"aliases": [],
"wave": {
"results": "./results",
"timeouts": {
"automatic": 60000,
"manual": 300000
},
"enable_results_import": false,
"web_root": "/_wave",
"persisting_interval": 20,
"api_titles": []
}
}

View File

@ -0,0 +1,81 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import os
from io import open
from tools.wpt import wpt
DEFAULT_CONFIGURATION_FILE_PATH = os.path.join(wpt.localpaths.repo_root, "./tools/wave/config.default.json")
def load(configuration_file_path):
configuration = {}
if configuration_file_path:
configuration = load_configuration_file(configuration_file_path)
default_configuration = load_configuration_file(
DEFAULT_CONFIGURATION_FILE_PATH)
configuration["wpt_port"] = configuration.get(
"ports", default_configuration["ports"]).get(
"http", default_configuration["ports"]["http"])[0]
configuration["wpt_ssl_port"] = configuration.get(
"ports", default_configuration["ports"]).get(
"https", default_configuration["ports"]["https"])[0]
web_root = configuration.get(
"wave", default_configuration["wave"]).get(
"web_root", default_configuration["wave"]["web_root"])
if not web_root.startswith("/"):
web_root = "/" + web_root
if not web_root.endswith("/"):
web_root += "/"
configuration["web_root"] = web_root
configuration["results_directory_path"] = configuration.get(
"wave", default_configuration["wave"]).get(
"results", default_configuration["wave"]["results"])
configuration["timeouts"] = {}
configuration["timeouts"]["automatic"] = configuration.get(
"wave", default_configuration["wave"]).get(
"timeouts", default_configuration["wave"]["timeouts"]).get(
"automatic", default_configuration["wave"]["timeouts"]["automatic"])
configuration["timeouts"]["manual"] = configuration.get(
"wave", default_configuration["wave"]).get(
"timeouts", default_configuration["wave"]["timeouts"]).get(
"manual", default_configuration["wave"]["timeouts"]["manual"])
configuration["hostname"] = configuration.get(
"browser_host", default_configuration["browser_host"])
configuration["import_enabled"] = configuration.get(
"wave", default_configuration["wave"]).get(
"enable_results_import",
default_configuration["wave"]["enable_results_import"])
configuration["persisting_interval"] = configuration.get(
"wave", default_configuration["wave"]).get(
"persisting_interval", default_configuration["wave"]["persisting_interval"])
configuration["tests_directory_path"] = os.getcwd()
configuration["manifest_file_path"] = os.path.join(
os.getcwd(), "MANIFEST.json")
configuration["api_titles"] = configuration.get(
"wave", default_configuration["wave"]).get(
"api_titles", default_configuration["wave"]["api_titles"])
return configuration
def load_configuration_file(path):
if not os.path.isfile(path):
return {}
configuration = None
with open(path, "r") as configuration_file:
configuration_file_content = configuration_file.read()
configuration = json.loads(configuration_file_content)
return configuration

View File

@ -0,0 +1,6 @@
class Client(object):
def __init__(self, session_token):
self.session_token = session_token
def send_message(self, message):
raise Exception("Client.send_message(message) not implemented!")

View File

@ -0,0 +1,2 @@
class DuplicateException(Exception):
pass

View File

@ -0,0 +1,2 @@
class InvalidDataException(Exception):
pass

View File

@ -0,0 +1,2 @@
class NotFoundException(Exception):
pass

View File

@ -0,0 +1,2 @@
class PermissionDeniedException(Exception):
pass

View File

@ -0,0 +1,11 @@
from .client import Client
class HttpPollingClient(Client):
def __init__(self, session_token, event):
super(HttpPollingClient, self).__init__(session_token)
self.event = event
def send_message(self, message):
self.message = message
self.event.set()

View File

@ -0,0 +1,86 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from ..testing.test_loader import MANUAL, AUTOMATIC
PAUSED = "paused"
RUNNING = "running"
COMPLETED = "completed"
ABORTED = "aborted"
PENDING = "pending"
UNKNOWN = "unknown"
class Session(object):
def __init__(
self,
token=None,
types=None,
user_agent=None,
labels=None,
tests=None,
pending_tests=None,
running_tests=None,
timeouts=None,
status=None,
test_state=None,
last_completed_test=None,
recent_completed_count=None,
date_started=None,
date_finished=None,
is_public=None,
reference_tokens=None,
browser=None,
webhook_urls=None,
expiration_date=None,
malfunctioning_tests=None
):
if token is None:
token = ""
self.token = token
if types is None:
types = [AUTOMATIC, MANUAL]
self.types = types
if user_agent is None:
user_agent = ""
self.user_agent = user_agent
if labels is None:
labels = []
self.labels = labels
if tests is None:
tests = {}
self.tests = tests
if pending_tests is None:
pending_tests = {}
self.pending_tests = pending_tests
if running_tests is None:
running_tests = {}
self.running_tests = running_tests
if timeouts is None:
timeouts = {}
self.timeouts = timeouts
if status is None:
status = UNKNOWN
self.status = status
if test_state is None:
test_state = {}
self.test_state = test_state
self.last_completed_test = last_completed_test
if recent_completed_count is None:
recent_completed_count = 0
self.recent_completed_count = recent_completed_count
self.date_started = date_started
self.date_finished = date_finished
if is_public is None:
is_public = False
self.is_public = is_public
if reference_tokens is None:
reference_tokens = []
self.reference_tokens = reference_tokens
self.browser = browser
if webhook_urls is None:
webhook_urls = []
self.webhook_urls = webhook_urls
self.expiration_date = expiration_date
if malfunctioning_tests is None:
malfunctioning_tests = []
self.malfunctioning_tests = malfunctioning_tests

View File

@ -0,0 +1,4 @@
# WAVE Test Suite Documentation
- [REST API](./rest-api/README.md)
- [Usage Guide](./usage/usage.md)

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 137 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 126 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 96 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 87 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

View File

@ -0,0 +1,54 @@
# REST API - [WAVE Test Suite](../README.md)
The REST API allows the WAVE server to be integrated into other systems. Every
call must be preceded with a namespace or web root, which is omitted in this
documentation. The default web root is `/_wave`, which can be changed in the
config.json using the keyword `web_root`.
## Sessions API <a name="sessions-api"></a>
| Name | Description |
| ---------------------------------------------- | ---------------------------------------------------- |
| [`create`](./sessions-api/create.md) | Creates a new test session. |
| [`read`](./sessions-api/read.md) | Reads a sessions configuration. |
| [`read public`](./sessions-api/read-public.md) | Reads all public sessions tokens. |
| [`update`](./sessions-api/update.md) | Updates a session configuration. |
| [`delete`](./sessions-api/delete.md) | Deletes a test session. |
| [`status`](./sessions-api/status.md) | Reads the status and progress of a session. |
| [`start`](./sessions-api/control.md#start) | Starts a test session. |
| [`stop`](./sessions-api/control.md#stop) | Stops a test session. |
| [`pause`](./sessions-api/control.md#pause) | Pauses a test session. |
| [`find`](./sessions-api/find.md) | Finds a session token by providing a token fragment. |
| [`labels`](./sessions-api/labels.md) | Attach labels to sessions for organization purposes. |
| [`events`](./sessions-api/events.md) | Register for sessions specific events. |
## Tests API <a name="tests-api"></a>
| Name | Description |
| --------------------------------------------------------------- | ------------------------------------------------------ |
| [`read all`](./tests-api/read-all.md) | Reads all tests available. |
| [`read session`](./tests-api/read-session.md) | Reads all tests that are part of a session. |
| [`read next`](./tests-api/read-next.md) | Reads the next test to run in a session. |
| [`read last completed`](./tests-api/read-last-completed.md) | Reads the last completed tests of a session. |
| [`read malfunctioning`](./tests-api/read-malfunctioning.md) | Reads the list of malfunctioning tests of a session. |
| [`update malfunctioning`](./tests-api/update-malfunctioning.md) | Updates the list of malfunctioning tests of a session. |
| [`read available apis`](./tests-api/read-available-apis.md) | Reads all available APIs names and paths. |
## Results API <a name="results-api"></a>
| Name | Description |
| ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------- |
| [`create`](./results-api/create.md) | Create a new test result for a test in a session. |
| [`read`](./results-api/read.md) | Read all test results of a session. |
| [`read compact`](./results-api/read-compact.md) | Read the number of passed, failed, timed out and not run tests of a session. |
| [`config`](./results-api/config.md) | Read what features of the results API are enabled. |
| [`import`](./results-api/import.md) | Import session results. |
| [`import enabled`](./results-api/import.md#2-import-enabled) | Check whether or not the import feature is enabled. |
| [`download`](./results-api/download.md#1-download) | Download all session results to import into other WMATS instance. |
| [`download api`](./results-api/download.md#2-download-api) | Download all results of an API. |
| [`download all apis`](./results-api/download.md#3-download-all-apis) | Download all results of all APIs. |
| [`view report`](./results-api/download.md#4-download-report) | View the WPT report of an API of a session. |
| [`view multi report`](./results-api/download.md#5-download-multi-report) | View the WPT report of an API of multiple sessions. |
| [`download overview`](./results-api/download.md#6-download-overview) | Download an overview of results of all APIs of a session. |
| [`view report`](./results-api/view.md#1-view-report) | Read an url to a hosted version of a WPT report for an API of a session. |
| [`view multi report`](./results-api/view.md#2-view-multi-report) | Read an url to a hosted version of a WPT report for an API of multiple session. |

View File

@ -0,0 +1,34 @@
# `config` - [Results API](../README.md#results-api)
The `config` method is used to determine what features of the results API are
enabled. Features that can be enabled or disabled are the
[`import`](./import.md) method and the generation of reports and therefore
[`download and view`](./download.md) methods.
## HTTP Request
`GET /api/results/config`
## Response
```json
{
"import_enabled": "Boolean",
"reports_enabled": "Boolean"
}
```
## Example
**Request:**
`GET /api/results/config`
**Response:**
```json
{
"import_enabled": false,
"reports_enabled": true
}
```

View File

@ -0,0 +1,65 @@
# `create` - [Results API](../README.md#results-api)
The `create` method of the results API creates a test result for a given test of a test session.
## HTTP Request
`POST /api/results/<session_token>`
## Request Payload
```json
{
"test": "String",
"status": "Enum['OK', 'ERROR', 'TIMEOUT', 'NOT_RUN']",
"message": "String",
"subtests": [
{
"name": "String",
"status": "Enum['PASS', 'FAIL', 'TIMEOUT', 'NOT_RUN']",
"message": "String"
}
]
}
```
- **test** specifies the test to create the result for.
- **status** specifies the overall status of the test. It does not represent a result, but rather if the contained tests were executed as intended or if something went wrong running the test.
- **OK**: All tests were executed without problems.
- **ERROR**: There was an error running one or multiple tests.
- **TIMEOUT**: It took too long for the tests to execute.
- **NOT_RUN**: This test was skipped.
- **message** contains the reason for the overall status. If the status is `OK` the message should be `null`.
- **subtests** contains the actual results of the tests executed in this file.
- **name**: The name of the test.
- **status**: The status of the result:
- **PASS**: The test was executed successfully.
- **FAIL**: The test did not meet at least one assertion.
- **TIMEOUT**: It took too long for this test to execute.
- **NOT_RUN**: This test was skipped.
- **message** contains the reason for the tests failure.
## Example
**Request:**
`POST /api/results/d89bcc00-c35b-11e9-8bb7-9e3d7595d40c`
```json
{
"test": "/apiOne/test/one.html",
"status": "OK",
"message": null,
"subtests": [
{
"name": "Value should be X",
"status": "FAIL",
"message": "Expected value to be X but got Y"
}
]
}
```
**Response:**
`200 OK`

View File

@ -0,0 +1,127 @@
# Downloading and viewing results and reports - [Results API](../README.md#results-api)
There are multiple methods to download or view reports generated by the WPT
Report tool or just the plain json results with the structure as described in
the [`create`](./create.md) method of the results API.
## 1. `download`
Downloads all results of a session as ZIP, which other instances of the WMAS
Test Suite can import.
### HTTP Request
`GET /api/results/<session_token>/export`
### Example
`GET /api/results/f63700a0-c35f-11e9-af33-9e0d4c1f1370/export`
## 2. `download api`
Downloads all results of a single API in one json file.
### HTTP Request
`GET /api/results/<session_token>/<api_name>/json`
### File Structure
```json
{
"results": [
{
"test": "String",
"status": "Enum['OK', 'ERROR', 'TIMEOUT', 'NOT_RUN']",
"message": "String",
"subtests": [
{
"name": "String",
"status": "Enum['PASS', 'FAIL', 'TIMEOUT', 'NOT_RUN']",
"message": "String"
}
]
}
]
}
```
Results are structured as explained in the [`create`](./create.md) method of the results API.
### Example
`GET /api/results/f63700a0-c35f-11e9-af33-9e0d4c1f1370/apiOne/json`
## 3. `download all apis`
Downloads all results of all APIs of a session as zip file containing one json file per API.
### HTTP Request
`GET /api/results/<session_token>/json`
### File Structure
There is one json file per API, each structured as described in the [`download api`](#download-api) method.
### Example
`GET /api/results/f63700a0-c35f-11e9-af33-9e0d4c1f1370/json`
## 4. `view report`
Returns a URL to a report of an API of a session, generated by the WPT Report tool, which is a static HTML page.
### HTTP Request
`GET /api/results/<session_token>/<api_name>/reporturl`
### Example
`GET /api/results/f63700a0-c35f-11e9-af33-9e0d4c1f1370/apiOne/reporturl`
**Response**
```json
{
"uri": "/results/8f7f2fdc-62eb-11ea-8615-b8ca3a7b18ad/2dcontext/all.html"
}
```
## 5. `view multi report`
Returns a URL to a report of an API of multiple session, generated by the WPT Report tool, which is a static HTML page.
### HTTP Request
`GET /api/results/<api_name>/reporturl`
### Query Parameters
| Parameter | Description | Default | Example |
| --------- | ------------------------------------------------------------ | ------- | -------------------------------- |
| `tokens` | Comma separated list of tokens to create a multi report for. | none | `tokens=token_a,token_b,token_c` |
### Example
`GET /api/results/apiOne/reporturl?tokens=8f7f2fdc-62eb-11ea-8615-b8ca3a7b18ad,990b4734-62eb-11ea-a9a5-b8ca3a7b18ad`
**Response**
```json
{
"uri": "/results/comparison-8f7f2fdc-990b473401488e04/reporturl/all.html"
}
```
## 6. `download overview`
Downloads a zip file containing an overview for all APIs results of a session as a static HTML page.
### HTTP Request
`GET /api/results/<session_token>/overview`
### Example
`GET /api/results/f63700a0-c35f-11e9-af33-9e0d4c1f1370/overview`

View File

@ -0,0 +1,45 @@
# Import results - [Results API](../README.md#results-api)
If enabled, the WMAS Test Suite can import results exported by any arbitrary other instance.
## 1. `import`
Import a session's results from a ZIP file.
### HTTP Request
`POST /api/results/import`
### HTTP Response
If successful, the server responds with the token of the imported session:
```json
{
"token": "String"
}
```
However, if an error occured, the server responds the error message:
```json
{
"error": "String"
}
```
## 2. `import enabled`
To check whether or not the import features is enabled, the `import enabled` method returns the state as JSON.
### HTTP Request
`GET /api/results/import`
### Response
```json
{
"enabled": "Boolean"
}
```

View File

@ -0,0 +1,59 @@
# `read compact` - [Results API](../README.md#results-api)
The `read compact` method of the results API returns the number of passed, failed, timed out and not run tests per API of a session.
## HTTP Request
`GET /api/results/<session_token>/compact`
## Response Payload
```json
{
"<api_name>": {
"pass": "Integer",
"fail": "Integer",
"timeout": "Integer",
"not_run": "Integer",
"total": "Integer",
"complete": "Integer"
}
}
```
## Example
**Request:**
`GET /api/results/620bbf70-c35e-11e9-bf9c-742c02629054/compact`
**Response:**
```json
{
"apiOne": {
"pass": 311,
"fail": 59,
"timeout": 23,
"not_run": 20,
"total": 481,
"complete": 413
},
"apiTwo": {
"pass": 548,
"fail": 129,
"timeout": 53,
"not_run": 36,
"total": 766,
"complete": 766
},
"apiThree": {
"pass": 349,
"fail": 45,
"timeout": 14,
"not_run": 9,
"total": 523,
"complete": 417
}
}
```

View File

@ -0,0 +1,63 @@
# `read` - [Results API](../README.md#results-api)
The `read` method of the results API returns all available results of a session, grouped by API. It is possible to filter the results to return by test directory or file.
## HTTP Request
`GET /api/results/<session_token>`
## Query Parameters
| Parameter | Description | Default | Example |
| --------- | ------------------------------ | ------- | --------------------------- |
| `path` | Path of test directory or file | `/` | `path=/apiOne/test/sub/dir` |
## Response Payload
```json
{
"<api_name>": [
{
"test": "String",
"status": "Enum['OK', 'ERROR', 'TIMEOUT', 'NOT_RUN']",
"message": "String",
"subtests": [
{
"name": "String",
"status": "Enum['PASS', 'FAIL', 'TIMEOUT', 'NOT_RUN']",
"message": "String"
}
]
}
]
}
```
Arrays of results grouped by their respective APIs. Structure of results is the same as described in the [`create`](./create.md) method of the results API.
## Example
**Request:**
`GET /api/results/974c84e0-c35d-11e9-8f8d-47bb5bb0037d?path=/apiOne/test/one.html`
**Response:**
```json
{
"apiOne": [
{
"test": "/apiOne/test/one.html",
"status": "OK",
"message": null,
"subtests": [
{
"name": "Value should be X",
"status": "FAIL",
"message": "Expected value to be X but got Y"
}
]
}
]
}
```

View File

@ -0,0 +1,61 @@
# Viewing Reports - [Results API](../README.md#results-api)
It is possible to view the reports generated by the WPT Report tool directly in the browser using a version of the report that is hosted by the WAVE server. The methods listed here return urls to those hosted reports.
## 1. `view report`
Returns a URL to a report for an API of a single session, generated by the WPT Report tool.
### HTTP Request
`GET /api/results/<session_token>/<api_name>/reporturl`
### Response Payload
```json
{
"uri": "String"
}
```
### Example
**Request:**
`GET /api/results/d9caaae0-c362-11e9-943f-eedb305f22f6/apiOne/reporturl`
**Response:**
```json
{
"uri": "/results/d9caaae0-c362-11e9-943f-eedb305f22f6/apiOne/all.html"
}
```
## 2. `view multi report`
Returns a URL to a report for an API of multiple session, generated by the WPT Report tool.
### HTTP Request
`GET /api/results/<api_name>/reporturl`
### Query Parameters
| Parameter | Description | Default | Example |
| --------- | ------------------------------------------------------------ | ------- | -------------------------------- |
| `tokens` | Comma separated list of tokens to create a multi report for. | none | `tokens=token_a,token_b,token_c` |
### Example
**Request:**
`GET /api/results/apiOne/reporturl?tokens=ce2dc080-c283-11e9-b4d6-e046513784c2,cd922410-c344-11e9-858f-9063f6dd878f`
**Response:**
```json
{
"uri": "/results/comparison-cd922410-ce2dc080-1709d631/apiOne/all.html"
}
```

View File

@ -0,0 +1,25 @@
# Controlling Sessions - [Sessions API](../README.md#sessions-api)
It is possible to control the execution of tests on the device under test using the session APIs control methods. They change the status of a session and trigger the device under test to fetch a new url to change location to. Depending on the current status of the session this can be a test or a static page showing information about the current status.
## `start`
The `start` method changes the status of a session from either `PENDING` or `PAUSED` to `RUNNING` and triggers the device under test to execute tests when resuming a paused session.
### HTTP Request
`POST /api/sessions/<session_token>/start`
## `pause`
The `pause` method changes the status of a session from `RUNNING` to `PAUSED` and pauses the execution of tests on the device under test.
### HTTP Request
`POST /api/sessions/<session_token>/pause`
## `stop`
The `stop` method finishes a session early by skipping all pending tests, causing a change of the status to `ABORTED`. It is not possible to undo this action and can only be performed on sessions that are not `ABORTED` or `COMPLETED`.
### HTTP Request
`POST /api/sessions/<session_token>/stop`

View File

@ -0,0 +1,101 @@
# `create` - [Sessions API](../README.md#sessions-api)
The `create` method of the sessions API creates a new session. If provided with an configuration it creates a session accordingly. If no configuration is provided it uses default values. It returns the session token of the newly created session, which is the unique identifier of sessions. While a session has the status `PENDING` it is possible to modify the configuration using the [`update`](./update.md) method of the sessions API. As it is required to create the session from the device under test, this is really helpful, since it allows to configure the session using a second device.
## HTTP Request
`POST /api/sessions`
## Request Payload
```json
{
"tests": {
"include": "Array<String>",
"exclude": "Array<String>"
},
"types": "Enum['automatic', 'manual']",
"timeouts": {
"automatic": "Integer",
"manual": "Integer",
"<test_path>": "Integer"
},
"reference_tokens": "Array<String>",
"labels": "Array<String>"
}
```
- **tests** specifies the tests of the session:
- **include** specifies what tests should be selected from all available tests. Can be a path to a test file or directory.
- **exclude** specifies what tests should be removed from the included tests. Can be a path to a test file or directory.
- **types** what types of tests should be included. Possible values:
- **automatic** tests are tests that execute without user interaction.
- **manual** tests are tests that require user interaction.
- **timeouts** specifies the time to wait for a test to finish in milliseconds.
- **automatic**: Sets the default timeout for all automatic tests.
- **manual**: Sets the default timeout for all manual tests.
- **custom test paths**: Set the timeout for a test file or directory by putting the path with all dots removed as the key.
- **reference_tokens** specifies a set of completed sessions that is used to filter out all tests that have not passed in all those sessions from the session that is going to be created.
- **labels** specifies the initial set of labels for the session.
### Default
```json
{
"tests": {
"include": ["/"],
"exclude": []
},
"types": ["automatic", "manual"],
"timeouts": {
"automatic": 60000,
"manual": 300000
},
"reference_tokens": [],
"labels": []
}
```
## Response Payload
If successful, the token of the new session is returned.
```json
{
"token": "String"
}
```
## Example
**Request:**
`POST /api/sessions`
```json
{
"tests": {
"include": ["/apiOne", "/apiTwo/sub"],
"exclude": ["/apiOne/specials"]
},
"types": ["automatic"],
"timeouts": {
"automatic": 70000,
"/apiOne/example/dir": 30000,
"/apiOne/example/filehtml": 45000
},
"reference_tokens": [
"ce2dc080-c283-11e9-b4d6-e046513784c2",
"430f47d0-c283-11e9-8776-fcbc36b81035"
],
"labels": ["label1", "label2", "label3"]
}
```
**Response:**
```json
{
"token": "6fdbd1a0-c339-11e9-b775-6d49dd567772"
}
```

View File

@ -0,0 +1,25 @@
# `delete` - [Sessions API](../README.md#sessions-api)
The `delete` method of the sessions API is used to delete a session and single results associated with it. However artifacts like generated reports or JSON files containing results of a whole API remain, therefore urls to those resources are still working.
## HTTP Request
`DELETE /api/sessions/<session_token>`
## Example
**Request:**
`DELETE /api/sessions/1592b880-c339-11e9-b414-61af09c491b1`
**Response:**
`200 OK`
**Request:**
`GET /api/sessions/1592b880-c339-11e9-b414-61af09c491b1`
**Response:**
`404 NOT FOUND`

View File

@ -0,0 +1,34 @@
# `events` - [Sessions API](../README.md#sessions-api)
Listen for session specific events by registering on the `events` endpoint using HTTP long polling.
## HTTP Request
`GET /api/sessions/<token>/events`
## Response Payload
```json
{
"type": "String",
"data": "String"
}
```
- **type**: the type of event that occurred.
- **data**: the actual payload of the event
## Example
**Request**
`GET /api/sessions/6fdbd1a0-c339-11e9-b775-6d49dd567772/events`
**Response**
```json
{
"type": "status",
"data": "paused"
}
```

View File

@ -0,0 +1,29 @@
# `find` - [Sessions API](../README.md#sessions-api)
The `find` method of the sessions API searches for a session token using a provided token fragment, which is the beginning of a session token with at least 8 characters. Due to data protection, it is not possible to find multiple tokens using one fragment. If the server finds more than one session token, it returns none. In this case more characters need to be added to the fragment, until it matches only one session token.
## HTTP Request
`GET /api/sessions/<token_fragment>`
## Response Payload
```json
{
"token": "String"
}
```
### Example
**Request:**
`GET /api/sessions/afd4ecb0`
**Response:**
```json
{
"token": "afd4ecb0-c339-11e9-b66c-eca76c2bea9c"
}
```

View File

@ -0,0 +1,75 @@
# `labels` - [Sessions API](../README.md#sessions-api)
The `labels` methods of the sessions API allow for better organization of sessions.
## Read labels
Reads all labels of a session.
### HTTP Request
`GET /api/sessions/<token>/labels`
### Response Payload
```json
"Array<String>"
```
#### Example
**Request:**
`GET /api/sessions/afd4ecb0-c339-11e9-b66c-eca76c2bea9c/labels`
**Response:**
```json
["label1", "label2", "label3"]
```
## Update labels
Update all labels of a session.
### HTTP Request
`PUT /api/sessions/<token>/labels`
### Request Payload
```json
"Array<String>"
```
The array of labels provided in the request payload will replace all existing labels of the session.
#### Example
**Request:**
`GET /api/sessions/afd4ecb0-c339-11e9-b66c-eca76c2bea9c/labels`
**Response:**
```json
["label1", "label2", "label3"]
```
**Request:**
`PUT /api/sessions/afd4ecb0-c339-11e9-b66c-eca76c2bea9c/labels`
```json
["label4", "label5"]
```
**Request:**
`GET /api/sessions/afd4ecb0-c339-11e9-b66c-eca76c2bea9c/labels`
**Response:**
```json
["label4", "label5"]
```

View File

@ -0,0 +1,30 @@
# `read public` - [Sessions API](../README.md#sessions-api)
The `read public` method of the sessions API fetches a list of all sessions that are publicly available. It is not possible to delete those sessions using the user interface or the REST API. Currently there is no way to change is-public-state of a session using the API.
## HTTP Request
`GET /api/sessions/public`
## Response Payload
```json
"Array<String>"
```
## Example
**Request:**
`GET /api/sessions/public`
**Response:**
```json
[
"bb7aafa0-6a92-11e9-8ec2-04f58dad2e4f",
"caf823e0-6a92-11e9-b732-3188d0065ebc",
"a50c6db0-6a94-11e9-8d1b-e23fc4555885",
"b2924d20-6a93-11e9-98b4-a11fb92a6d1c"
]
```

View File

@ -0,0 +1,84 @@
# `read` - [Sessions API](../README.md#sessions-api)
The `read` method of the sessions API fetches the configuration of a session, including values that can not be set by the user, but are created by the server upon creation.
## HTTP Request
`GET /api/sessions/<session_token>`
## Response Payload
```json
{
"token": "String",
"tests": {
"include": "Array<String>",
"exclude": "Array<String>"
},
"types": "Enum['automatic', 'manual']",
"timeouts": {
"automatic": "Integer",
"manual": "Integer",
"<test_path>": "Integer"
},
"reference_tokens": "Array<String>",
"user_agent": "String",
"browser": {
"name": "String",
"version": "String"
},
"is_public": "Boolean"
}
```
- **token** is the unique identifier of the session.
- **tests** specifies the tests of the session:
- **include** specifies what tests should be selected from all available tests. Can be a path to a test file or directory.
- **exclude** specifies what tests should be removed from the included tests. Can be a path to a test file or directory.
- **types** what types of tests should be included. Possible values:
- **automatic** tests are tests that execute without user interaction.
- **manual** tests are tests that require user interaction.
- **timeouts** specifies the time to wait for a test to finish in milliseconds.
- **automatic**: Sets the default timeout for all automatic tests.
- **manual**: Sets the default timeout for all manual tests.
- **custom test paths**: Set the timeout for a test file or directory by putting the path with all dots removed as the key.
- **reference_tokens** specifies a set of completed sessions that is used to filter out all tests that have not passed in all those sessions from the session that is going to be created.
- **user_agent** is the user agent string of the request that created the session. The request to create the session should performed by the device under test.
- **browser** holds information about the browser, parsed from the user agent.
- **name**: The name of the browser.
- **version**: The version numbers of the browser.
- **is_public** defines whether or not the session is listed when fetching the list of public session using [`read public`](./read-public.md).
## Example
**Request:**
`GET /api/sessions/47a6fa50-c331-11e9-8709-a8eaa0ecfd0e`
**Response:**
```json
{
"token": "47a6fa50-c331-11e9-8709-a8eaa0ecfd0e",
"tests": {
"include": ["/apiOne", "/apiTwo/sub"],
"exclude": ["/apiOne/specials"]
},
"types": ["automatic"],
"timeouts": {
"automatic": 70000,
"/apiOne/example/dir": 30000,
"/apiOne/example/filehtml": 45000
},
"reference_tokens": [
"ce2dc080-c283-11e9-b4d6-e046513784c2",
"430f47d0-c283-11e9-8776-fcbc36b81035"
],
"user_agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/76.0.3809.100 Chrome/76.0.3809.100 Safari/537.36",
"browser": {
"name": "Chromium",
"version": "76"
},
"is_public": "false"
}
```

View File

@ -0,0 +1,48 @@
# `status` - [Sessions API](../README.md#sessions-api)
The `status` method of the results API returns information about a sessions current status and progress.
## HTTP Request
`GET /api/sessions/<session_token>/status`
## Response Payload
```json
{
"token": "String",
"status": "Enum['pending', 'running', 'paused', 'completed', 'aborted']",
"date_started": "String",
"date_finished": "String",
"expiration_date": "String"
}
```
- **token** contains the token of the session corresponding to this status.
- **status** specifies the current status of the session:
- **pending**: The session was created, can receive updates, however cannot execute tests.
- **running**: The session currently executes tests.
- **paused**: The execution of tests in this session is currently paused.
- **completed**: All tests files include in this session were executed and have a result.
- **aborted**: The session was finished before all tests were executed.
- **date_started** contains the time the status changed from `PENDING` to `RUNNING` in unix epoch time milliseconds.
- **date_finished** contains the time the status changed to either `COMPLETED` or `ABORTED` in unix epoch time milliseconds.
- **expiration_date** contains the time at which the sessions will be deleted
## Example
**Request:**
`GET /api/sessions/d9caaae0-c362-11e9-943f-eedb305f22f6/status`
**Response:**
```json
{
"token": "d9caaae0-c362-11e9-943f-eedb305f22f6",
"status": "running",
"date_started": "1567606879230",
"date_finished": null,
"expiration_date": "1567607179230"
}
```

View File

@ -0,0 +1,102 @@
# `update` - [Sessions API](../README.md#sessions-api)
The `update` method of the sessions API makes it possible to modify a sessions configuration while its status is `PENDING`. This can be used to configure the session on a second device, rather than on the device under test.
## HTTP Request
`PUT /api/sessions/<session_token>`
## Request Payload
The request payload is the same as in the [`create`](./sessions-api/create.md) method of the sessions API. Only keys that are an inherent part of the configuration will stay the same if not specified in the `update` payload. All others will be deleted if not included.
## Example
**Request:**
`GET /api/sessions/47a6fa50-c331-11e9-8709-a8eaa0ecfd0e`
**Response:**
```json
{
"token": "47a6fa50-c331-11e9-8709-a8eaa0ecfd0e",
"tests": {
"include": ["/apiOne", "/apiTwo/sub"],
"exclude": ["/apiOne/specials"]
},
"types": ["automatic"],
"timeouts": {
"automatic": 70000,
"/apiOne/example/dir": 30000,
"/apiOne/example/filehtml": 45000
},
"reference_tokens": [
"ce2dc080-c283-11e9-b4d6-e046513784c2",
"430f47d0-c283-11e9-8776-fcbc36b81035"
],
"user_agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/76.0.3809.100 Chrome/76.0.3809.100 Safari/537.36",
"browser": {
"name": "Chromium",
"version": "76"
},
"is_public": "false",
"labels": []
}
```
**Request:**
`PUT /api/sessions/47a6fa50-c331-11e9-8709-a8eaa0ecfd0e`
```json
{
"tests": {
"include": ["/apiOne", "/apiThree"]
},
"timeouts": {
"automatic": 60000
},
"reference_tokens": [
"bb7aafa0-6a92-11e9-8ec2-04f58dad2e4f",
"a50c6db0-6a94-11e9-8d1b-e23fc4555885"
],
"labels": ["label1", "label2"]
}
```
**Response:**
`200 OK`
**Request:**
`GET /api/sessions/47a6fa50-c331-11e9-8709-a8eaa0ecfd0e`
**Response:**
```json
{
"token": "47a6fa50-c331-11e9-8709-a8eaa0ecfd0e",
"tests": {
"include": ["/apiOne", "/apiThree"],
"exclude": ["/apiOne/specials"]
},
"types": ["automatic"],
"timeouts": {
"automatic": 60000,
"manual": 360000
},
"reference_tokens": [
"bb7aafa0-6a92-11e9-8ec2-04f58dad2e4f",
"a50c6db0-6a94-11e9-8d1b-e23fc4555885"
],
"user_agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/76.0.3809.100 Chrome/76.0.3809.100 Safari/537.36",
"browser": {
"name": "Chromium",
"version": "76"
},
"is_public": "false",
"labels": ["label1", "label2"]
}
```

View File

@ -0,0 +1,43 @@
# `read all` - [Tests API](../README.md#tests-api)
The `read all` method of the tests API fetches all tests available to include into a test session.
## HTTP Request
`GET /api/tests`
## Response Payload
```json
{
"<api_name>": "Array<String>"
}
```
## Example
**Request:**
`GET /api/tests`
**Response:**
```json
{
"apiOne": [
"/apiOne/test/one.html",
"/apiOne/test/two.html",
"/apiOne/test/three.html"
],
"apiTwo": [
"/apiTwo/test/one.html",
"/apiTwo/test/two.html",
"/apiTWo/test/three.html"
],
"apiThree": [
"/apiThree/test/one.html",
"/apiThree/test/two.html",
"/apiThree/test/three.html"
]
}
```

View File

@ -0,0 +1,43 @@
# `read available apis` - [Tests API](../README.md#tests-api)
The `read available apis` method return a list of all web APIs that the DUT
can be tested for. It returns the human readable API name, as well as the
directory name under which all corresponding tests reside.
## HTTP Request
`GET /api/tests/apis`
## Response Payload
```json
[
{
"path": "String",
"name": "String"
},
...
]
```
## Example
**Request:**
`GET /api/tests/apis`
**Response:**
```json
[
{
"path": "/2dcontext",
"name": "2D Context"
},
{
"path": "/media-source",
"name": "Media Source"
},
...
]
```

View File

@ -0,0 +1,47 @@
# `read last completed` - [Tests API](../README.md#tests-api)
The `read last completed` method of the tests API returns a list of test files, which most recently finished and have a result. The files are grouped by the status their respective result had.
## HTTP Request
`GET /api/tests/<session_token>/last_completed`
## Query Parameters
| Parameter | Desciption | Default | Example |
| --------- | ------------------------------------------------------------------------------------------------------------------------- | ------- | --------------------- |
| `count` | Number of files per status to return | 5 | `count=5` |
| `status` | The status the files results must have. Comma separated list. Possible values: `all`, `pass`, `fail` and `timeout` | `all` | `status=timeout,pass` |
## Response Payload
```json
{
"pass": "Array<String>",
"fail": "Array<String>",
"timeout": "Array<String>"
}
```
## Example
**Request:**
`GET /api/tests/7dafeec0-c351-11e9-84c5-3d1ede2e7d2e/last_completed?count=3&status=fail,timeout`
**Response:**
```json
{
"fail": [
"/apiTwo/test/four.html",
"/apiOne/test/twentyfour.html",
"/apiOne/test/nineteen.html"
],
"timeout": [
"/apiFive/test/eight.html",
"/apiThree/test/five.html",
"/apiThree/test/two.html"
]
}
```

View File

@ -0,0 +1,30 @@
# `read malfunctioning` - [Tests API](../README.md#tests-api)
The `read malfunctioning` method of the tests API returns a list of test files, which were flagged as not working properly in a specific session. This is useful to [add them to the exclude list](../../usage/excluding-tests.md) of further test sessions.
## HTTP Request
`GET /api/tests/<session_token>/malfunctioning`
## Response Payload
```json
"Array<String>"
```
## Example
**Request:**
`GET /api/tests/7dafeec0-c351-11e9-84c5-3d1ede2e7d2e/malfunctioning`
**Response:**
```json
[
"/apiOne/test/one.html",
"/apiOne/test/five.html",
"/apiThree/test/two.html",
"/apiThree/test/twenty.html"
]
```

View File

@ -0,0 +1,29 @@
# `read next` - [Tests API](../README.md#tests-api)
The `read next` method of the tests API returns the next test of a test session, that is due to be executed. If the sessions status is not `RUNNING` it returns a static page containing information about the session and its current status.
## HTTP Request
`GET /api/tests/<session_token>/next`
## Response Payload
```json
{
"next_test": "String"
}
```
## Example
**Request:**
`GET /api/tests/d6667670-c350-11e9-b504-4ac471cdd99d/next`
**Response:**
```json
{
"next_test": "http://web-platform.test:8000/apiOne/test/one.html?&token=d6667670-c350-11e9-b504-4ac471cdd99d&timeout=60000"
}
```

View File

@ -0,0 +1,61 @@
# `read session` - [Tests API](../README.md#tests-api)
The `read session` method of the tests API fetches all tests contained in a test session grouped by their status.
## HTTP Request
`GET /api/tests/<session_token>`
## Response Payload
```json
{
"token": "String",
"pending_tests": {
"<api_name>": "Array<String>"
},
"running_tests": {
"<api_name>": "Array<String>"
},
"completed_tests": {
"<api_name>": "Array<String>"
}
}
```
- **pending_tests** are tests that have yet to be executed.
- **running_tests** are tests that are currently executed by the device under test. Although only one test at a time is executed, test that time out or fail to send a result may still wait for the time out to occur. In this case there are multiple tests in this list.
- **completed_tests** are tests that are finished and have a result.
## Example
**Request:**
`GET /api/tests/cd922410-c344-11e9-858f-9063f6dd878f`
**Response:**
```json
{
"token": "cd922410-c344-11e9-858f-9063f6dd878f",
"pending_tests": {
"apiTwo": ["/apiTwo/test/three.html"],
"apiThree": [
"/apiThree/test/one.html",
"/apiThree/test/two.html",
"/apiThree/test/three.html"
]
},
"running_tests": {
"apiTwo": ["/apiTwo/test/two.html"]
},
"completed_tests": {
"apiOne": [
"/apiOne/test/one.html",
"/apiOne/test/two.html",
"/apiOne/test/three.html"
],
"apiTwo": ["/apiTwo/test/one.html"]
}
}
```

View File

@ -0,0 +1,56 @@
# `update malfunctioning` - [Tests API](../README.md#tests-api)
The `update malfunctioning` method of the tests API sets the list of test files, that are flagged as not working properly in a specific session. It replaces the existing list with the new provided list.
## HTTP Request
`PUT /api/tests/<session_token>/malfunctioning`
## Request Payload
```json
"Array<String>"
```
## Example
**Request:**
`GET /api/tests/7dafeec0-c351-11e9-84c5-3d1ede2e7d2e/malfunctioning`
**Response:**
```json
[
"/apiOne/test/one.html",
"/apiOne/test/five.html",
"/apiThree/test/two.html",
"/apiThree/test/twenty.html"
]
```
**Request:**
`PUT /api/tests/7dafeec0-c351-11e9-84c5-3d1ede2e7d2e/malfunctioning`
```json
[
"/apiOne/test/three.html",
"/apiOne/test/eight.html",
"/apiThree/test/one.html"
]
```
**Request:**
`GET /api/tests/7dafeec0-c351-11e9-84c5-3d1ede2e7d2e/malfunctioning`
**Response:**
```json
[
"/apiOne/test/three.html",
"/apiOne/test/eight.html",
"/apiThree/test/one.html"
]
```

View File

@ -0,0 +1,224 @@
# Usage Guide - [WAVE Test Suite](../README.md)
## Contents
1. [Creating test sessions](#1-creating-test-sessions)
1. [The landing page](#11-the-landing-page)
2. [Configuring a new session](#12-configuring-a-new-session)
3. [Exclude tests](#13-exclude-tests)
1. [Manually specify tests to exclude](#131-manually-specify-tests-to-exclude)
2. [Use a session's malfunctioning list to add tests to exclude](#132-use-a-sessions-malfunctioning-list-to-add-tests-to-exclude)
3. [Use a previous session's exclude list to add tests to exclude](#133-use-a-previous-sessions-exclude-list-to-add-tests-to-exclude)
2. [Resuming test sessions](#2-resuming-test-sessions)
1. [Using the webinterface](#21-using-the-webinterface)
2. [Using a URL](#22-using-a-url)
3. [Monitoring test sessions](#3-monitoring-test-sessions)
4. [Managing test sessions](#4-managing-test-sessions)
# 1. Creating test sessions
Test sessions hold information about one test run one a particular device, like the current status.
Each session is identified using a UUIDv1 token string to gather these information or perform actions on it.
Each new session is configured using several parameters before the run starts.
## 1.1 The landing page
Every new session is created from the landing page.
It is recommended to create a new session from the device that is tested, as the user agent is part of the displayed information, as well as the browser and version, which gets parsed from it.
However, this does not influence the execution of tests or the creation of test results.
To create a new session, open the landing page on the URI path `/`.
![landing_page]
The landing page is divided into two section, one to create a new session and one to resume a session.
As soon as the landing is opened, a new test session is created.
Its token is displayed next to the QR-Code on the right, along with the expiration date.
As the session was created automatically, it gets removed automatically once it expires.
However, if you start the session, the expiration date gets removed and the sessions is available until you delete it.
## 1.2 Configuring a new session
To configure and start the session, either click on "Configure Session" or scan the QR-Code.
In most cases it is recommended to scan the QR-Code, as it does not require any interaction with the landing page on the DUT.
![configuration_page]
In the configuration screen you can set parameters for the new session and start it.
At the top the session's token and expiration date is displayed. Next there is the "Labels" option, which allows adding any number of labels to the session, helping to better organize sessions and allowing to apply filters while searching.
Labels can be added and modified at any point in the future.
Next there is the API selection, which allows defining the set of APIs to test in the new session. To exclude specific test or subdirectories of those selected APIs, there is the "Excluded Tests" option right below it. Here you can specify what tests to exclude in three distinct ways. (More details in [1.3 Exclude tests](#13-exclude-tests))
![configuration_page_bottom]
With the "Test Types" option you specify what types of test should be included into the session: in contrast to automatic tests, manual tests require user interaction to execute properly.
The "Reference Browsers" option lets you select browsers that are used to further filter the set of tests included in the session.
Only tests that have passed the reference test session in all selected browsers are included.
The reference browsers represent the status of implementation of all WAVE APIs in modern desktop browsers, at about the time the WAVE specification was published.
To start the session press "Start Session", note that the landing page has to stay opened, as the test are going to be execute in the same window.
[To the top](#usage-guide---wave-test-suite)
## 1.3 Exclude tests
To have a fine control over what test cases are executed when configuring a session, it is possible to provide a list of test cases, that are omitted in the run.
### 1.3.1 Manually specify tests to exclude
To add tests to exclude by providing a plain text list, click on "Add Raw" in the "Excluded Tests" setting.
This opens a input field, where you can enter multiple full paths to test files or directories.
![Exclude List Add Raw][configuration_page_add_raw]
Each line will be interpreted as a path to exclude a single or a group of tests.
All tests that have a path starting with one of the provided, will be excluded in the session.
Lines starting with a # symbol will be ignored, in case you want to organize test paths in a text file using comments.
Click "Add" and you will see the paths listed in the table below.
### 1.3.2 Use a session's malfunctioning list to add tests to exclude
When flagging tests in a running session as malfunctioning, e.g. when crashing the device, it is possible to add these test to the exclude list of the new session.
To do this, click on "Add Malfunctioning" in the "Excluded Tests" section.
![Exclude List Add Malfunctioning][configuration_page_add_malfunctioning]
Enter the first eight characters or more into the text field labelled "Session Token" to import all tests from the session's malfunctioning list into the new session's exclude list.
Click "Add" to confirm.
The tests should now appear in the list below.
### 1.3.3 Use a previous session's exclude list to add tests to exclude
If you have already specified a suitable exclude list or want to expand an existing, you can apply the exclude list of a previous session.
Click on "Add Previous Excluded" in the "Excluded Tests" section to open the corresponding controls.
![Exclude List Add Previously Excluded][configuration_page_add_prev_excluded]
Enter the first eight characters or more into the text field labelled "Session Token" to import all tests from the previous session's exclude list into the new session's exclude list.
Click "Add" to confirm.
The tests should now appear in the list below.
[To the top](#usage-guide---wave-test-suite)
# 2. Resuming test sessions
Certain test cases may cause some devices to crash, which makes the test suite unable to automatically run the next test.
In this case, external interaction is necessary.
To alleviate the process of resuming the test session, the are two mechanisms integrated into the web interface that reduce interaction with the device to a minimum.
There is also a mechanism that can be useful if a test framework with access to the tested browser is utilized.
## 2.1 Using the webinterface
In any case, it is necessary to open the landing page on the device, in order to resume the session.
![Landing Page][landing_page]
On the landing page, in the section "Resume running session", you can see the token of the last session this device has run.
To resume this particular session, click on the "Resume" button next to it, or simply press enter or space.
If the presented token is not the one of the session you want to resume, you can change it from the configuration screen.
To get there, press the "Configure Session" button or scan the QR-Code.
![Configuration Page][configuration_page]
At the very bottom of the configuration page, there is a section called "Resume session", where you can see the token that was previously displayed on the landing page in a text box.
Here you can change the token of the session to resume, just enter the first eight characters or more of the token.
When you're done, press the "Resume" button.
Note that it is necessary to keep the landing page open in order to automatically run the next test, as it is loaded in the same window.
## 2.2 Using a URL
If you have access to the DUTs browser programmatically, you may want to resume a crashed test session automatically.
To load the next test of a specific session, simply open the following URL:
`/next.html?token=<session_token>`
For example:
`/next.html?token=24fcd360-ef4d-11e9-a95f-d6e1ad4c5fdb`
[To the top](#usage-guide---wave-test-suite)
# 3. Monitoring test sessions
While running test sessions, the results page for second screen devices provide a convenient summary of the sessions current state, as well as controls to manipulate the test execution.
Additionally, you can flag tests in case they interrupt the test execution by, e.g. crashing the test, to exclude them in future sessions and download test results and reports.
![results_page_top]
On the top right-hand side, there are controls to stop, pause or delete the session.
Stopping, as well as deleting the session is irreversible.
Below you find the session's details, including the token, user agent, test paths, excluded test paths, total test file count, status, the different test timeouts, the date and time the session has been started, the date and time the session has finished, the duration and labels.
![results_page_last_completed]
Right below, tests that have recently completed with result status TIMEOUT are listed to add them to the list of malfunctioning tests by clicking the button with the + symbol.
Now that test appears in the list of malfunctioning tests at the very bottom of the result page.
This list can be used to exclude tests when creating a new session. (more details in [1.3.2 Use a session's malfunctioning list to add tests to exclude](#132-use-a-sessions-malfunctioning-list-to-add-tests-to-exclude))
![results_page_api_results]
In the section "API Results" you can see the progress of each individual API selected for the session.
As each test file can contain multiple subtests, the count of passed, failed, timed out and not run tests does not correlate to the count of test files run, which indicates the overall progress.
Keep in mind that only test files that received a result will count as run, so even if all tests finished executing on the device, some may have failed to send the result, in which case the internal timeout has to run out to create it.
![results_page_api_results_export]
Once all test files of an API have received a result, it is possible to download the result data or view a report for that API, by clicking the corresponding button in the far right column of the table.
![results_page_bottom]
Below the table of API results, there are more options to download the results of the session.
The first option downloads the results the same way it is persisted on the serverside, along with some meta data.
This form is especially useful if you want to import the session details with the results into other instances of the WAVE Test Suite.
Furthermore, there is the option to download the raw result in JSON format of all finished APIs.
This the same JSON you get by clicking on the "JSON" button in the API results column, but of all finished APIs in a ZIP file.
Lastly, you can download a static HTML page, similiar to the results view.
Finally, at the bottom of the page you can find the list of malfunctioning tests that have been added from the list of last timed-out test files.
Remove tests by clicking their corresponding button with the trashcan icon.
[To the top](#usage-guide---wave-test-suite)
# 4. Managing test sessions
The overview page provides features that help to manage and organize multiple sessions. You can access it from the URL `/overview.html`.
![overview_page]
In the "Manage Sessions" section you can add more sessions to the list below by entering the first eight or more characters of the token.
Clicking on "Add Session" will add the session to the list if it was the only one that could be associated with the provided token.
If there are multiple sessions that match the provided input, none will be added.
Additionally, you can compare multiple session, given that they are completed, used the same reference sessions and share tested APIs.
Simply select the desired session from the list below and click "Compare Selected".
You can also import sessions in the "Import Sessions" section, however, this feature has to be enabled in the server configuration.
Below the "Manage Sessions" section, there is the list of reference and recent sessions.
![overview_page_sessions]
In the sessions list, sessions are organized in three lists: Reference Browsers, which are test results everyone can see, containing the results of the reference browsers for the corresponding WAVE specification, recent sessions, which are sessions there have recently been viewed or executed on the device, and pinned sessions, which are sessions pinned by the user from the list of recent sessions.
Add label filters to show only matching sessions.
![overview_page_sessions_pinned_recent]
You can pin a session by clicking the button with the tag on a session in the recent sessions list and unpin them the same way from the pinned sessions list.
Click the trashcan icon to remove a session from its list, this will not delete the session results.
Sort the list of sessions by clicking on the column to filter them by.
![overview_page_sessions_filtered]
Add one or more tags to the filter to conveniently find the sessions you are looking for. Add labels to session when creating them or in their corresponding results page.
[To the top](#usage-guide---wave-test-suite)
[landing_page]: ../res/landing_page.jpg "Landing Page"
[configuration_page]: ../res/configuration_page_top.jpg "Configuration Page"
[configuration_page_bottom]: ../res/configuration_page_bottom.jpg "Configuration Page"
[configuration_page_add_raw]: ../res/configuration_page_exclude_add_raw.jpg "Exclude Tests - Add Raw"
[configuration_page_add_malfunctioning]: ../res/configuration_page_exclude_add_malfunctioning.jpg "Exclude Tests - Add Malfunctioning"
[configuration_page_add_prev_excluded]: ../res/configuration_page_exclude_add_prev_excluded.jpg "Exclude Tests - Add Previously Excluded"
[results_page_top]: ../res/results_page_top.jpg "Results Page"
[results_page_last_completed]: ../res/results_page_last_timed_out.jpg "Results Page"
[results_page_api_results]: ../res/results_page_api_results.jpg "Results Page"
[results_page_api_results_export]: ../res/results_page_api_results_export.jpg "Results Page"
[results_page_bottom]: ../res/results_page_bottom.jpg "Results Page"
[overview_page]: ../res/overview_page_top.jpg "Overview Page"
[overview_page_sessions]: ../res/overview_page_sessions.jpg "Overview Page Sessions"
[overview_page_sessions_pinned_recent]: ../res/overview_page_sessions_pinned_recent.jpg "Overview Page Sessions"
[overview_page_sessions_filtered]: ../res/overview_page_sessions_filtered.jpg "Overview Page Filter"

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,75 @@
body {
margin: 0;
padding: 0;
display: flex;
justify-content: center;
font-family: "Noto Sans" sans-serif;
background-color: white;
color: #000;
}
.header {
display: flex;
margin: 50px 0 30px 0;
}
.header :first-child {
flex: 1;
}
.site-logo {
max-width: 300px;
margin-left: -15px;
}
.content {
width: 1000px;
}
#test-path,
#token {
font-family: monospace;
font-size: 12pt;
}
.pass {
color: green;
}
.fail {
color: red;
}
.timeout {
color: rgb(224, 127, 0);
}
.not-run {
color: blue;
}
.api-result-timeoutfiles {
display: none; /* don't display for now */
flex-basis: 100%;
}
#header {
display: flex;
align-items: center;
}
#header > :first-child {
flex: 1;
}
#controls-wrapper {
display: flex;
}
.no-border-radius {
border-radius: 0;
}
#results-table .button {
margin: 0 2px;
}

View File

@ -0,0 +1,375 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Results - Web Platform Test</title>
<link rel="stylesheet" href="css/bulma.min.css" />
<link rel="stylesheet" href="css/result.css" />
<script src="lib/utils.js"></script>
<script src="lib/ui.js"></script>
<script src="results.json.js"></script>
<script src="details.json.js"></script>
</head>
<body>
<script>
window.onload = () => {
resultUi.render();
resultUi.loadData();
};
const resultUi = {
state: { details: null, results: null },
loadData: () => {
resultUi.loadSessionDetails();
resultUi.loadSessionResults();
},
loadSessionDetails(callback = () => {}) {
resultUi.state.details = details;
resultUi.renderSessionDetails();
callback(details);
},
loadSessionResults(callback = () => {}) {
const { details } = resultUi.state;
Object.keys(details.test_files_count).forEach(api =>
!results[api] ? (results[api] = {}) : null
);
for (let api in results) {
let { pass, fail, timeout, not_run } = results[api];
let complete = 0;
if (pass) complete += pass;
if (fail) complete += fail;
if (timeout) complete += timeout;
if (not_run) complete += not_run;
results[api].complete = complete;
const { test_files_count, test_files_completed } = details;
results[api].isDone =
test_files_count[api] === test_files_completed[api];
results[api].testFilesCount = test_files_count[api];
results[api].testFilesCompleted = test_files_completed[api];
}
resultUi.state.results = results;
resultUi.renderApiResults();
callback(results);
},
render() {
const resultView = UI.createElement({
className: "content",
style: "margin-bottom: 40px;",
children: [
{
className: "header",
children: [
{
children: [
{
element: "img",
src: "res/wavelogo_2016.jpg",
className: "site-logo"
}
]
}
]
},
{
id: "header",
children: [
{ className: "title", text: "Result" },
{ id: "controls" }
]
},
{ id: "session-details" },
{ id: "api-results" },
{ id: "timeout-files" },
{ id: "export" }
]
});
const root = UI.getRoot();
root.innerHTML = "";
root.appendChild(resultView);
resultUi.renderSessionDetails();
resultUi.renderApiResults();
},
renderSessionDetails() {
const { state } = resultUi;
const { details } = state;
if (!details) return;
const sessionDetailsView = UI.createElement({
style: "margin-bottom: 20px"
});
const heading = UI.createElement({
text: "Session details",
className: "title is-4"
});
sessionDetailsView.appendChild(heading);
const getTagStyle = status => {
switch (status) {
case "completed":
return "is-success";
case "running":
return "is-info";
case "aborted":
return "is-danger";
case "paused":
return "is-warning";
}
};
const { test_files_count, token } = details;
const detailsTable = UI.createElement({
element: "table",
children: {
element: "tbody",
children: [
{
element: "tr",
children: [
{ element: "td", text: "Token:", style: "width: 140px;" },
{
element: "td",
text: token,
className: "is-family-monospace"
}
]
},
{
element: "tr",
children: [
{ element: "td", text: "User Agent:" },
{ element: "td", text: details.user_agent || "" }
]
},
{
element: "tr",
children: [
{ element: "td", text: "Test Path:" },
{ element: "td", text: details.path || "" }
]
},
{
element: "tr",
children: [
{ element: "td", text: "Total Test Files:" },
{
element: "td",
text: Object.keys(test_files_count).reduce(
(sum, api) => (sum += test_files_count[api]),
0
)
}
]
},
{
element: "tr",
children: [
{ element: "td", text: "Test Timeout:" },
{ element: "td", text: details.test_timeout || "" }
]
},
{
element: "tr",
children: [
{ element: "td", text: "Started:" },
{
element: "td",
text: new Date(details.date_started).toLocaleString()
}
]
},
details.date_finished
? {
element: "tr",
children: [
{ element: "td", text: "Finished:" },
{
element: "td",
text: new Date(details.date_finished).toLocaleString()
}
]
}
: null,
details.date_finished
? {
element: "tr",
children: [
{ element: "td", text: "Duration:" },
{
element: "td",
id: "duration",
text: utils.millisToTimeString(
details.date_finished
? parseInt(details.date_finished) -
parseInt(details.date_started)
: Date.now() - parseInt(details.date_started)
)
}
]
}
: null
]
}
});
sessionDetailsView.appendChild(detailsTable);
const sessionDetails = UI.getElement("session-details");
sessionDetails.innerHTML = "";
sessionDetails.appendChild(sessionDetailsView);
},
renderApiResults() {
const { results } = resultUi.state;
if (!results) return;
const apiResultsView = UI.createElement({
style: "margin-bottom: 20px"
});
const heading = UI.createElement({
text: "API Results",
className: "title is-4"
});
apiResultsView.appendChild(heading);
const header = UI.createElement({
element: "thead",
children: [
{
element: "tr",
children: [
{ element: "th", text: "API" },
{ element: "th", text: "Pass" },
{ element: "th", text: "Fail" },
{ element: "th", text: "Timeout" },
{ element: "th", text: "Not Run" },
{ element: "th", text: "Test Files Run" }
]
}
]
});
const apis = Object.keys(results).sort((apiA, apiB) =>
apiA.toLowerCase() > apiB.toLowerCase() ? 1 : -1
);
const rows = apis.map(api => {
const {
complete = 0,
pass = 0,
fail = 0,
timeout = 0,
timeoutfiles = [],
not_run: notRun = 0,
isDone = false,
testFilesCount,
testFilesCompleted = 0
} = results[api];
return UI.createElement({
element: "tr",
children: [
{ element: "td", text: api },
{
element: "td",
style: "color: hsl(141, 71%, 38%)",
text: `${pass} (${utils.percent(pass, complete)}%)`
},
{
element: "td",
className: "has-text-danger",
text: `${fail} (${utils.percent(fail, complete)}%)`
},
{
element: "td",
style: "color: hsl(48, 100%, 40%)",
text: `${timeout} (${utils.percent(timeout, complete)}%)`
},
{
element: "td",
className: "has-text-info",
text: `${notRun} (${utils.percent(notRun, complete)}%)`
},
{
element: "td",
text: `${testFilesCompleted}/${testFilesCount} (${utils.percent(
testFilesCompleted,
testFilesCount
)}%)`
}
]
});
});
const { pass, fail, timeout, not_run, complete } = apis.reduce(
(sum, api) => {
Object.keys(sum).forEach(
key => (sum[key] += results[api][key] ? results[api][key] : 0)
);
return sum;
},
{ complete: 0, pass: 0, fail: 0, timeout: 0, not_run: 0 }
);
const testFilesCount = Object.keys(results).reduce(
(sum, api) => (sum += results[api].testFilesCount),
0
);
const testFilesCompleted = Object.keys(results).reduce(
(sum, api) => (sum += results[api].testFilesCompleted || 0),
0
);
const footer = UI.createElement({
element: "tfoot",
children: [
{
element: "tr",
children: [
{ element: "th", text: "Total" },
{
element: "th",
style: "color: hsl(141, 71%, 38%)",
text: `${pass} (${utils.percent(pass, complete)}%)`
},
{
element: "th",
className: "has-text-danger",
text: `${fail} (${utils.percent(fail, complete)}%)`
},
{
element: "th",
style: "color: hsl(48, 100%, 40%)",
text: `${timeout} (${utils.percent(timeout, complete)}%)`
},
{
element: "th",
className: "has-text-info",
text: `${not_run} (${utils.percent(not_run, complete)}%)`
},
{
element: "th",
text: `${testFilesCompleted}/${testFilesCount} (${utils.percent(
testFilesCompleted,
testFilesCount
)}%)`
}
]
}
]
});
const resultsTable = UI.createElement({
element: "table",
className: "table",
id: "results-table",
style: "border-radius: 3px; border: 2px solid hsl(0, 0%, 86%);",
children: [header, { element: "tbody", children: rows }, footer]
});
apiResultsView.appendChild(resultsTable);
const apiResults = UI.getElement("api-results");
apiResults.innerHTML = "";
apiResults.appendChild(apiResultsView);
}
};
</script>
</body>
</html>

View File

@ -0,0 +1,64 @@
const UI = {
createElement: config => {
if (!config) return document.createElement("div");
const elementType = config.element || "div";
const element = document.createElement(elementType);
Object.keys(config).forEach(property => {
const value = config[property];
switch (property.toLowerCase()) {
case "id":
case "src":
case "style":
case "placeholder":
case "title":
element.setAttribute(property, value);
return;
case "classname":
element.setAttribute("class", value);
return;
case "text":
element.innerText = value;
return;
case "html":
element.innerHTML = value;
return;
case "onclick":
element.onclick = value.bind(element);
return;
case "onchange":
element.onchange = value.bind(element);
return;
case "onkeydown":
element.onkeydown = value.bind(element);
return;
case "type":
if (elementType === "input") element.setAttribute("type", value);
return;
case "children":
if (value instanceof Array) {
value.forEach(child =>
element.appendChild(
child instanceof Element ? child : UI.createElement(child)
)
);
} else {
element.appendChild(
value instanceof Element ? value : UI.createElement(value)
);
}
return;
case "disabled":
if (value) element.setAttribute("disabled", true);
return;
}
});
return element;
},
getElement: id => {
return document.getElementById(id);
},
getRoot: () => {
return document.getElementsByTagName("body")[0];
}
};

View File

@ -0,0 +1,40 @@
const utils = {
parseQuery: queryString => {
if (queryString.indexOf("?") === -1) return {};
queryString = queryString.split("?")[1];
const query = {};
for (let part of queryString.split("&")) {
const keyValue = part.split("=");
query[keyValue[0]] = keyValue[1] ? keyValue[1] : null;
}
return query;
},
percent: (count, total) => {
const percent = Math.floor((count / total) * 10000) / 100;
if (!percent) {
return 0;
}
return percent;
},
saveBlobAsFile: (blob, filename) => {
const url = URL.createObjectURL(blob);
const a = document.createElement("a");
a.style.display = "none";
document.body.appendChild(a);
a.href = url;
a.download = filename;
a.click();
document.body.removeChild(a);
},
millisToTimeString(totalMilliseconds) {
let milliseconds = (totalMilliseconds % 1000) + "";
milliseconds = milliseconds.padStart(3, "0");
let seconds = (Math.floor(totalMilliseconds / 1000) % 60) + "";
seconds = seconds.padStart(2, "0");
let minutes = (Math.floor(totalMilliseconds / 60000) % 60) + "";
minutes = minutes.padStart(2, "0");
let hours = Math.floor(totalMilliseconds / 3600000) + "";
hours = hours.padStart(2, "0");
return `${hours}:${minutes}:${seconds}`;
}
};

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -0,0 +1,58 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import sys
import traceback
import logging
try:
from urllib.parse import parse_qsl
except ImportError:
from urlparse import parse_qsl
global logger
logger = logging.getLogger("wave-api-handler")
class ApiHandler(object):
def __init__(self, web_root):
self._web_root = web_root
def set_headers(self, response, headers):
if not isinstance(response.headers, list):
response.headers = []
for header in headers:
response.headers.append(header)
def send_json(self, data, response, status=None):
if status is None:
status = 200
json_string = json.dumps(data, indent=4)
response.content = json_string
self.set_headers(response, [("Content-Type", "application/json")])
response.status = status
def send_file(self, blob, file_name, response):
self.set_headers(response,
[("Content-Disposition",
"attachment;filename=" + file_name)])
response.content = blob
def send_zip(self, data, file_name, response):
response.headers = [("Content-Type", "application/x-compressed")]
self.send_file(data, file_name, response)
def parse_uri(self, request):
path = request.url_parts.path
if self._web_root is not None:
path = path[len(self._web_root):]
uri_parts = list(filter(None, path.split("/")))
return uri_parts
def parse_query_parameters(self, request):
return dict(parse_qsl(request.url_parts.query))
def handle_exception(self, message):
info = sys.exc_info()
traceback.print_tb(info[2])
logger.error("{}: {}: {}".format(message, info[0].__name__, info[1].args[0]))

View File

@ -0,0 +1,225 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from .api_handler import ApiHandler
from ...data.exceptions.duplicate_exception import DuplicateException
from ...data.exceptions.invalid_data_exception import InvalidDataException
class ResultsApiHandler(ApiHandler):
def __init__(self, results_manager, web_root):
super(ResultsApiHandler, self).__init__(web_root)
self._results_manager = results_manager
def create_result(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
data = None
body = request.body.decode("utf-8")
if body != "":
data = json.loads(body)
self._results_manager.create_result(token, data)
except Exception:
self.handle_exception("Failed to create result")
response.status = 500
def read_results(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
results = self._results_manager.read_results(token)
self.send_json(response=response, data=results)
except Exception:
self.handle_exception("Failed to read results")
response.status = 500
def read_results_compact(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
results = self._results_manager.read_flattened_results(token)
self.send_json(response=response, data=results)
except Exception:
self.handle_exception("Failed to read compact results")
response.status = 500
def read_results_config(self, request, response):
try:
import_enabled = self._results_manager.is_import_enabled()
reports_enabled = self._results_manager.are_reports_enabled()
self.send_json({
"import_enabled": import_enabled,
"reports_enabled": reports_enabled
}, response)
except Exception:
self.handle_exception("Failed to read results configuration")
response.status = 500
def read_results_api_wpt_report_url(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
api = uri_parts[3]
uri = self._results_manager.read_results_wpt_report_uri(token, api)
self.send_json({"uri": uri}, response)
except Exception:
self.handle_exception("Failed to read results report url")
response.status = 500
def read_results_api_wpt_multi_report_uri(self, request, response):
try:
uri_parts = self.parse_uri(request)
api = uri_parts[3]
query = self.parse_query_parameters(request)
tokens = query["tokens"].split(",")
uri = self._results_manager.read_results_wpt_multi_report_uri(
tokens,
api
)
self.send_json({"uri": uri}, response)
except Exception:
self.handle_exception("Failed to read results multi report url")
response.status = 500
def download_results_api_json(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
api = uri_parts[3]
blob = self._results_manager.export_results_api_json(token, api)
if blob is None:
response.status = 404
return
file_path = self._results_manager.get_json_path(token, api)
file_name = "{}-{}-{}".format(
token.split("-")[0],
api,
file_path.split("/")[-1]
)
self.send_zip(blob, file_name, response)
except Exception:
self.handle_exception("Failed to download api json")
response.status = 500
def download_results_all_api_jsons(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
blob = self._results_manager.export_results_all_api_jsons(token)
file_name = token.split("-")[0] + "_results_json.zip"
self.send_zip(blob, file_name, response)
except Exception:
self.handle_exception("Failed to download all api jsons")
response.status = 500
def download_results(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
blob = self._results_manager.export_results(token)
if blob is None:
response.status = 404
return
file_name = token + ".zip"
self.send_zip(blob, file_name, response)
except Exception:
self.handle_exception("Failed to download results")
response.status = 500
def download_results_overview(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
blob = self._results_manager.export_results_overview(token)
if blob is None:
response.status = 404
return
file_name = token.split("-")[0] + "_results_html.zip"
self.send_zip(blob, file_name, response)
except Exception:
self.handle_exception("Failed to download results overview")
response.status = 500
def import_results(self, request, response):
try:
blob = request.body
token = self._results_manager.import_results(blob)
self.send_json({"token": token}, response)
except DuplicateException:
self.handle_exception("Failed to import results")
self.send_json({"error": "Session already exists!"}, response, 400)
return
except InvalidDataException:
self.handle_exception("Failed to import results")
self.send_json({"error": "Invalid input data!"}, response, 400)
return
except Exception:
self.handle_exception("Failed to import results")
response.status = 500
def handle_request(self, request, response):
method = request.method
uri_parts = self.parse_uri(request)
# /api/results/<token>
if len(uri_parts) == 3:
if method == "POST":
if uri_parts[2] == "import":
self.import_results(request, response)
return
self.create_result(request, response)
return
if method == "GET":
if uri_parts[2] == "config":
self.read_results_config(request, response)
return
else:
self.read_results(request, response)
return
# /api/results/<token>/<function>
if len(uri_parts) == 4:
function = uri_parts[3]
if method == "GET":
if function == "compact":
self.read_results_compact(request, response)
return
if function == "reporturl":
return self.read_results_api_wpt_multi_report_uri(request,
response)
if function == "json":
self.download_results_all_api_jsons(request, response)
return
if function == "export":
self.download_results(request, response)
return
if function == "overview":
self.download_results_overview(request, response)
return
# /api/results/<token>/<api>/<function>
if len(uri_parts) == 5:
function = uri_parts[4]
if method == "GET":
if function == "reporturl":
self.read_results_api_wpt_report_url(request, response)
return
if function == "json":
self.download_results_api_json(request, response)
return
response.status = 404

View File

@ -0,0 +1,342 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import threading
from .api_handler import ApiHandler
from ...utils.serializer import serialize_session
from ...data.exceptions.not_found_exception import NotFoundException
from ...data.exceptions.invalid_data_exception import InvalidDataException
from ...data.http_polling_client import HttpPollingClient
TOKEN_LENGTH = 36
class SessionsApiHandler(ApiHandler):
def __init__(self, sessions_manager, results_manager, event_dispatcher, web_root):
super(SessionsApiHandler, self).__init__(web_root)
self._sessions_manager = sessions_manager
self._results_manager = results_manager
self._event_dispatcher = event_dispatcher
def create_session(self, request, response):
try:
config = {}
body = request.body.decode("utf-8")
if body != "":
config = json.loads(body)
tests = {}
if "tests" in config:
tests = config["tests"]
types = None
if "types" in config:
types = config["types"]
timeouts = {}
if "timeouts" in config:
timeouts = config["timeouts"]
reference_tokens = []
if "reference_tokens" in config:
reference_tokens = config["reference_tokens"]
webhook_urls = []
if "webhook_urls" in config:
webhook_urls = config["webhook_urls"]
user_agent = request.headers[b"user-agent"].decode("utf-8")
labels = []
if "labels" in config:
labels = config["labels"]
expiration_date = None
if "expiration_date" in config:
expiration_date = config["expiration_date"]
session = self._sessions_manager.create_session(
tests,
types,
timeouts,
reference_tokens,
webhook_urls,
user_agent,
labels,
expiration_date
)
self.send_json({"token": session.token}, response)
except InvalidDataException:
self.handle_exception("Failed to create session")
self.send_json({"error": "Invalid input data!"}, response, 400)
except Exception:
self.handle_exception("Failed to create session")
response.status = 500
def read_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
session = self._sessions_manager.read_session(token)
if session is None:
response.status = 404
return
data = serialize_session(session)
del data["pending_tests"]
del data["running_tests"]
del data["malfunctioning_tests"]
del data["test_state"]
del data["date_started"]
del data["date_finished"]
del data["status"]
self.send_json(data, response)
except Exception:
self.handle_exception("Failed to read session")
response.status = 500
def read_session_status(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
session = self._sessions_manager.read_session_status(token)
if session is None:
response.status = 404
return
data = serialize_session(session)
del data["tests"]
del data["pending_tests"]
del data["running_tests"]
del data["malfunctioning_tests"]
del data["types"]
del data["test_state"]
del data["last_completed_test"]
del data["user_agent"]
del data["timeouts"]
del data["browser"]
del data["is_public"]
del data["reference_tokens"]
del data["webhook_urls"]
self.send_json(data, response)
except Exception:
self.handle_exception("Failed to read session status")
response.status = 500
def read_public_sessions(self, request, response):
try:
session_tokens = self._sessions_manager.read_public_sessions()
self.send_json(session_tokens, response)
except Exception:
self.handle_exception("Failed to read public sessions")
response.status = 500
def update_session_configuration(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
config = {}
body = request.body.decode("utf-8")
if body != "":
config = json.loads(body)
tests = {}
if "tests" in config:
tests = config["tests"]
types = None
if "types" in config:
types = config["types"]
timeouts = {}
if "timeouts" in config:
timeouts = config["timeouts"]
reference_tokens = []
if "reference_tokens" in config:
reference_tokens = config["reference_tokens"]
webhook_urls = []
if "webhook_urls" in config:
webhook_urls = config["webhook_urls"]
self._sessions_manager.update_session_configuration(
token,
tests,
types,
timeouts,
reference_tokens,
webhook_urls
)
except NotFoundException:
self.handle_exception("Failed to update session configuration")
response.status = 404
except Exception:
self.handle_exception("Failed to update session configuration")
response.status = 500
def update_labels(self, request, response):
try:
uri_parts = self.parse_uri(request)
# convert unicode to ascii to get a text type, ignore special chars
token = uri_parts[2]
body = request.body.decode("utf-8")
labels = None
if body != "":
labels = json.loads(body)
if "labels" in labels:
labels = labels["labels"]
self._sessions_manager.update_labels(token=token, labels=labels)
except Exception:
self.handle_exception("Failed to update labels")
response.status = 500
def delete_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
session = self._sessions_manager.read_session(token)
if session is None:
response.status = 404
return
self._sessions_manager.delete_session(token)
self._results_manager.delete_results(token)
except Exception:
self.handle_exception("Failed to delete session")
response.status = 500
def start_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
self._sessions_manager.start_session(token)
except Exception:
self.handle_exception("Failed to start session")
response.status = 500
def pause_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
self._sessions_manager.pause_session(token)
except Exception:
self.handle_exception("Failed to pause session")
response.status = 500
def stop_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
self._sessions_manager.stop_session(token)
except Exception:
self.handle_exception("Failed to stop session")
response.status = 500
def resume_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
resume_token = None
body = request.body.decode("utf-8")
if body != "":
resume_token = json.loads(body)["resume_token"]
self._sessions_manager.resume_session(token, resume_token)
except Exception:
self.handle_exception("Failed to resume session")
response.status = 500
def find_session(self, request, response):
try:
uri_parts = self.parse_uri(request)
fragment = uri_parts[2]
token = self._sessions_manager.find_token(fragment)
if token is None:
response.status = 404
return
self.send_json({"token": token}, response)
except Exception:
self.handle_exception("Failed to find session")
response.status = 500
def register_event_listener(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
event = threading.Event()
http_polling_client = HttpPollingClient(token, event)
self._event_dispatcher.add_session_client(http_polling_client)
event.wait()
message = http_polling_client.message
self.send_json(data=message, response=response)
except Exception:
self.handle_exception("Failed to register event listener")
response.status = 500
def handle_request(self, request, response):
method = request.method
uri_parts = self.parse_uri(request)
# /api/sessions
if len(uri_parts) == 2:
if method == "POST":
self.create_session(request, response)
return
# /api/sessions/<token>
if len(uri_parts) == 3:
function = uri_parts[2]
if method == "GET":
if function == "public":
self.read_public_sessions(request, response)
return
if len(function) != TOKEN_LENGTH:
self.find_session(request, response)
return
self.read_session(request, response)
return
if method == "PUT":
self.update_session_configuration(request, response)
return
if method == "DELETE":
self.delete_session(request, response)
return
# /api/sessions/<token>/<function>
if len(uri_parts) == 4:
function = uri_parts[3]
if method == "GET":
if function == "status":
self.read_session_status(request, response)
return
if function == "events":
self.register_event_listener(request, response)
return
if method == "POST":
if function == "start":
self.start_session(request, response)
return
if function == "pause":
self.pause_session(request, response)
return
if function == "stop":
self.stop_session(request, response)
return
if function == "resume":
self.resume_session(request, response)
return
if method == "PUT":
if function == "labels":
self.update_labels(request, response)
return
response.status = 404

View File

@ -0,0 +1,287 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import json
try:
from urllib.parse import urlunsplit
except ImportError:
from urlparse import urlunsplit
from .api_handler import ApiHandler
from ...utils.serializer import serialize_session
from ...data.session import PAUSED, COMPLETED, ABORTED, PENDING, RUNNING
DEFAULT_LAST_COMPLETED_TESTS_COUNT = 5
DEFAULT_LAST_COMPLETED_TESTS_STATUS = ["ALL"]
class TestsApiHandler(ApiHandler):
def __init__(
self,
wpt_port,
wpt_ssl_port,
tests_manager,
sessions_manager,
hostname,
web_root,
test_loader
):
super(TestsApiHandler, self).__init__(web_root)
self._tests_manager = tests_manager
self._sessions_manager = sessions_manager
self._wpt_port = wpt_port
self._wpt_ssl_port = wpt_ssl_port
self._hostname = hostname
self._web_root = web_root
self._test_loader = test_loader
def read_tests(self, response):
tests = self._tests_manager.read_tests()
self.send_json(tests, response)
def read_session_tests(self, request, response):
uri_parts = self.parse_uri(request)
token = uri_parts[2]
session = self._sessions_manager.read_session(token)
if session is None:
response.status = 404
return
data = serialize_session(session)
tests = {
"token": token,
"pending_tests": data["pending_tests"],
"running_tests": data["running_tests"]
}
self.send_json(tests, response)
def read_next_test(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
hostname = self._hostname
session = self._sessions_manager.read_session(token)
if session is None:
response.status = 404
return
if session.status == PAUSED:
url = self._generate_wave_url(
hostname=hostname,
uri="pause.html",
token=token
)
self.send_json({"next_test": url}, response)
return
if session.status == COMPLETED or session.status == ABORTED:
url = self._generate_wave_url(
hostname=hostname,
uri="finish.html",
token=token
)
self.send_json({"next_test": url}, response)
return
if session.status == PENDING:
url = self._generate_wave_url(
hostname=hostname,
uri="newsession.html",
token=token
)
self.send_json({"next_test": url}, response)
return
test = self._tests_manager.next_test(session)
if test is None:
if session.status != RUNNING:
return
url = self._generate_wave_url(
hostname=hostname,
uri="finish.html",
token=token
)
self.send_json({"next_test": url}, response)
self._sessions_manager.complete_session(token)
return
test_timeout = self._tests_manager.get_test_timeout(
test=test, session=session)
url = self._generate_test_url(
test=test,
token=token,
test_timeout=test_timeout,
hostname=hostname)
self.send_json({
"next_test": url
}, response)
except Exception:
self.handle_exception("Failed to read next test")
response.status = 500
def read_last_completed(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
query = self.parse_query_parameters(request)
count = None
if "count" in query:
count = query["count"]
else:
count = DEFAULT_LAST_COMPLETED_TESTS_COUNT
status = None
if "status" in query:
status = query["status"].split(",")
else:
status = DEFAULT_LAST_COMPLETED_TESTS_STATUS
completed_tests = self._tests_manager.read_last_completed_tests(
token, count)
tests = {}
for one_status in status:
one_status = one_status.lower()
if one_status == "pass":
tests["pass"] = completed_tests["pass"]
continue
if one_status == "fail":
tests["fail"] = completed_tests["fail"]
continue
if one_status == "timeout":
tests["timeout"] = completed_tests["timeout"]
continue
if one_status == "all":
tests["pass"] = completed_tests["pass"]
tests["fail"] = completed_tests["fail"]
tests["timeout"] = completed_tests["timeout"]
break
self.send_json(data=tests, response=response)
except Exception:
self.handle_exception("Failed to read last completed tests")
response.status = 500
def read_malfunctioning(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
tm = self._tests_manager
malfunctioning_tests = tm.read_malfunctioning_tests(token)
self.send_json(data=malfunctioning_tests, response=response)
except Exception:
self.handle_exception("Failed to read malfunctioning tests")
response.status = 500
def update_malfunctioning(self, request, response):
try:
uri_parts = self.parse_uri(request)
token = uri_parts[2]
data = None
body = request.body.decode("utf-8")
if body != "":
data = json.loads(body)
self._tests_manager.update_malfunctioning_tests(token, data)
except Exception:
self.handle_exception("Failed to update malfunctioning tests")
response.status = 500
def read_available_apis(self, request, response):
try:
apis = self._test_loader.get_apis()
self.send_json(apis, response)
except Exception:
self.handle_exception("Failed to read available APIs")
response.status = 500
def handle_request(self, request, response):
method = request.method
uri_parts = self.parse_uri(request)
# /api/tests
if len(uri_parts) == 2:
if method == "GET":
self.read_tests(response)
return
# /api/tests/<token>
if len(uri_parts) == 3:
if method == "GET":
if uri_parts[2] == "apis":
self.read_available_apis(request, response)
return
self.read_session_tests(request, response)
return
# /api/tests/<token>/<function>
if len(uri_parts) == 4:
function = uri_parts[3]
if method == "GET":
if function == "next":
self.read_next_test(request, response)
return
if function == "last_completed":
self.read_last_completed(request, response)
return
if function == "malfunctioning":
self.read_malfunctioning(request, response)
return
if method == "PUT":
if function == "malfunctioning":
self.update_malfunctioning(request, response)
return
response.status = 404
def _generate_wave_url(self, hostname, uri, token):
if self._web_root is not None:
uri = self._web_root + uri
return self._generate_url(
hostname=hostname,
uri=uri,
port=self._wpt_port,
query="token=" + token
)
def _generate_test_url(self, hostname, test, token, test_timeout):
protocol = "http"
port = self._wpt_port
if "https" in test:
protocol = "https"
port = self._wpt_ssl_port
query = "token={}&timeout={}&https_port={}&web_root={}".format(
token,
test_timeout,
self._wpt_ssl_port,
self._web_root
)
return self._generate_url(
protocol=protocol,
hostname=hostname,
port=port,
uri=test,
query=query
)
def _generate_url(self,
hostname,
port=None,
uri=None,
query=None,
protocol=None):
if port is None:
port = 80
if uri is None:
uri = "/"
if query is None:
query = ""
if protocol is None:
protocol = "http"
return urlunsplit([protocol, "{}:{}".format(hostname, port), uri, query, ''])

View File

@ -0,0 +1,102 @@
from __future__ import unicode_literals
try:
import http.client as httplib
except ImportError:
import httplib
import sys
import traceback
class HttpHandler(object):
def __init__(
self,
static_handler=None,
sessions_api_handler=None,
tests_api_handler=None,
results_api_handler=None,
http_port=None,
web_root=None
):
self.static_handler = static_handler
self.sessions_api_handler = sessions_api_handler
self.tests_api_handler = tests_api_handler
self.results_api_handler = results_api_handler
self._http_port = http_port
self._web_root = web_root
def handle_request(self, request, response):
response.headers = [
("Access-Control-Allow-Origin", "*"),
("Access-Control-Allow-Headers", "*"),
("Access-Control-Allow-Methods", "*")
]
if request.method == "OPTIONS":
return
path = self._remove_web_root(request.request_path)
is_api_call = False
for index, part in enumerate(path.split("/")):
if index > 2:
break
if part != "api":
continue
is_api_call = True
if (is_api_call):
if request.url_parts.scheme == "https":
self._proxy(request, response)
return
self.handle_api(request, response)
else:
self.handle_static_file(request, response)
def handle_api(self, request, response):
path = self._remove_web_root(request.request_path)
api_name = path.split("/")[1]
if api_name is None:
return
if api_name == "sessions":
self.sessions_api_handler.handle_request(request, response)
return
if api_name == "tests":
self.tests_api_handler.handle_request(request, response)
return
if api_name == "results":
self.results_api_handler.handle_request(request, response)
return
def handle_static_file(self, request, response):
self.static_handler.handle_request(request, response)
def _remove_web_root(self, path):
if self._web_root is not None:
path = path[len(self._web_root):]
return path
def _proxy(self, request, response):
host = 'localhost'
port = str(self._http_port)
uri = request.url_parts.path
uri = uri + "?" + request.url_parts.query
data = request.raw_input.read(request.headers.get('Content-Length'))
method = request.method
try:
proxy_connection = httplib.HTTPConnection(host, port)
proxy_connection.request(method, uri, data, request.headers)
proxy_response = proxy_connection.getresponse()
response.content = proxy_response.read()
response.headers = proxy_response.getheaders()
response.status = proxy_response.status
except IOError:
info = sys.exc_info()
traceback.print_tb(info[2])
print("Failed to perform proxy request: " +
info[0].__name__ + ": " + str(info[1].args[0]))
response.status = 500

View File

@ -0,0 +1,55 @@
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from io import open
class StaticHandler(object):
def __init__(self, web_root, http_port, https_port):
self.static_dir = os.path.join(
os.getcwd(), "tools/wave/www")
self._web_root = web_root
self._http_port = http_port
self._https_port = https_port
def handle_request(self, request, response):
file_path = request.request_path
if self._web_root is not None:
file_path = file_path[len(self._web_root):]
if file_path == "." or file_path == "./" or file_path == "":
file_path = "index.html"
file_path = file_path.split("?")[0]
file_path = os.path.join(self.static_dir, file_path)
headers = []
content_types = {
"html": "text/html",
"js": "text/javascript",
"css": "text/css",
"jpg": "image/jpeg",
"jpeg": "image/jpeg",
"ttf": "font/ttf",
"woff": "font/woff",
"woff2": "font/woff2"
}
headers.append(
("Content-Type", content_types[file_path.split(".")[-1]]))
data = None
with open(file_path, "rb") as file:
data = file.read()
if file_path.split("/")[-1] == "wave-service.js":
data = data.decode("UTF-8")
data = data.replace("{{WEB_ROOT}}", str(self._web_root))
data = data.replace("{{HTTP_PORT}}", str(self._http_port))
data = data.replace("{{HTTPS_PORT}}", str(self._https_port))
response.content = data
response.headers = headers

View File

@ -0,0 +1 @@
ua-parser==0.8.0

View File

@ -0,0 +1,277 @@
/* global add_completion_callback */
/* global setup */
/*
* This file is intended for vendors to implement code needed to integrate
* testharness.js tests with their own test systems.
*
* Typically test system integration will attach callbacks when each test has
* run, using add_result_callback(callback(test)), or when the whole test file
* has completed, using
* add_completion_callback(callback(tests, harness_status)).
*
* For more documentation about the callback functions and the
* parameters they are called with see testharness.js
*/
/*
* If the query parameter token is available means that the test was loaded by
* the WAVE test runner and the results need to be reported to the server using
* the provided token to identify the session associated this token.
*/
console.log("ARDVAARD")
if (location.search && location.search.indexOf("token=") != -1) {
var __WAVE__HOSTNAME = location.hostname;
var __WAVE__PORT = location.port;
var __WAVE__PROTOCOL = location.protocol.replace(/:/, "");
var __WAVE__QUERY = location.search;
if (!__WAVE__QUERY) __WAVE__QUERY = "?";
var match = __WAVE__QUERY.match(/https_port=(\d+)/);
var __HTTPS_PORT = parseInt(match && match[1] ? match[1] : 443);
match = __WAVE__QUERY.match(/timeout=(\d+)/);
var __WAVE__TIMEOUT = parseInt(match && match[1] ? match[1] : 65000);
match = __WAVE__QUERY.match(/web_root=(.+)/);
var __WAVE__WEB_ROOT = match && match[1] ? match[1] : "/wave/";
console.log("\n\n\n\n\n")
console.log(match)
console.log(__WAVE__WEB_ROOT)
match = __WAVE__QUERY.match(/token=([^&]+)/);
var __WAVE__TOKEN = match ? match[1] : null;
var __WAVE__TEST = location.pathname;
var nextUrl = null;
var resultSent = false;
var screenConsole;
try {
var documentRoot = document.body ? document.body : document.documentElement;
documentRoot.style["background-color"] = "#FFF";
window.open = function () {
logToConsole(
"window.open() is overridden in testharnessreport.js and has not effect"
);
var dummyWin = {
close: function () {
logToConsole(
"dummyWindow.close() in testharnessreport.js and has not effect"
);
}
};
return dummyWin;
};
window.close = function () {
logToConsole(
"window.close() is overridden in testharnessreport.js and has not effect"
);
};
} catch (err) {}
setTimeout(function () {
loadNext();
}, __WAVE__TIMEOUT);
function logToConsole() {
var text = "";
for (var i = 0; i < arguments.length; i++) {
text += arguments[i] + " ";
}
if (console && console.log) {
console.log(text);
}
if (screenConsole) {
try {
text = text.replace(/ /gm, "&nbsp;");
text = text.replace(/\n/gm, "<br/>");
screenConsole.innerHTML += "<br/>" + text;
} catch (error) {
screenConsole.innerText += "\n" + text;
}
}
}
function dump_and_report_test_results(tests, status) {
var results_element = document.createElement("script");
results_element.type = "text/json";
results_element.id = "__testharness__results__";
var test_results = tests.map(function (x) {
return {
name: x.name,
status: x.status,
message: x.message,
stack: x.stack
};
});
var data = {
test: window.location.href,
tests: test_results,
status: status.status,
message: status.message,
stack: status.stack
};
results_element.textContent = JSON.stringify(data);
// To avoid a HierarchyRequestError with XML documents, ensure that 'results_element'
// is inserted at a location that results in a valid document.
var parent = document.body ?
document.body // <body> is required in XHTML documents
:
document.documentElement; // fallback for optional <body> in HTML5, SVG, etc.
parent.appendChild(results_element);
screenConsole = document.createElement("div");
screenConsole.setAttribute("id", "console");
screenConsole.setAttribute("style", "font-family: monospace; padding: 5px");
parent.appendChild(screenConsole);
window.onerror = logToConsole;
finishWptTest(data);
}
function finishWptTest(data) {
logToConsole("Creating result ...");
data.test = __WAVE__TEST;
createResult(
__WAVE__TOKEN,
data,
function () {
logToConsole("Result created.");
loadNext();
},
function () {
logToConsole("Failed to create result.");
logToConsole("Trying alternative method ...");
createResultAlt(__WAVE__TOKEN, data);
}
);
}
function loadNext() {
logToConsole("Loading next test ...");
readNextTest(
__WAVE__TOKEN,
function (url) {
logToConsole("Redirecting to " + url);
location.href = url;
},
function () {
logToConsole("Could not load next test.");
logToConsole("Trying alternative method ...");
readNextAlt(__WAVE__TOKEN);
}
);
}
function readNextTest(token, onSuccess, onError) {
sendRequest(
"GET",
"api/tests/" + token + "/next",
null,
null,
function (response) {
var jsonObject = JSON.parse(response);
onSuccess(jsonObject.next_test);
},
onError
);
}
function readNextAlt(token) {
location.href = getWaveUrl("next.html?token=" + token);
}
function createResult(token, result, onSuccess, onError) {
sendRequest(
"POST",
"api/results/" + token, {
"Content-Type": "application/json"
},
JSON.stringify(result),
function () {
onSuccess();
},
onError
);
}
function createResultAlt(token, result) {
location.href = __WAVE__WEB_ROOT + "submitresult.html" +
"?token=" + token +
"&result=" + encodeURIComponent(JSON.stringify(result));
}
function sendRequest(method, uri, headers, data, onSuccess, onError) {
var url = getWaveUrl(uri);
var xhr = new XMLHttpRequest();
xhr.addEventListener("load", function () {
onSuccess(xhr.response);
});
xhr.addEventListener("error", function () {
if (onError) onError();
});
logToConsole("Sending", method, 'request to "' + url + '"');
xhr.open(method, url, true);
if (headers) {
for (var header in headers) {
xhr.setRequestHeader(header, headers[header]);
}
}
xhr.send(data);
}
function getWaveUrl(uri) {
var url = __WAVE__WEB_ROOT + uri;
console.log(url)
return url;
}
add_completion_callback(dump_and_report_test_results);
} else {
function dump_test_results(tests, status) {
var results_element = document.createElement("script");
results_element.type = "text/json";
results_element.id = "__testharness__results__";
var test_results = tests.map(function (x) {
return {
name: x.name,
status: x.status,
message: x.message,
stack: x.stack
}
});
var data = {
test: window.location.href,
tests: test_results,
status: status.status,
message: status.message,
stack: status.stack
};
results_element.textContent = JSON.stringify(data);
// To avoid a HierarchyRequestError with XML documents, ensure that 'results_element'
// is inserted at a location that results in a valid document.
var parent = document.body
? document.body // <body> is required in XHTML documents
: document.documentElement; // fallback for optional <body> in HTML5, SVG, etc.
parent.appendChild(results_element);
}
add_completion_callback(dump_test_results);
/* If the parent window has a testharness_properties object,
* we use this to provide the test settings. This is used by the
* default in-browser runner to configure the timeout and the
* rendering of results
*/
try {
if (window.opener && "testharness_properties" in window.opener) {
/* If we pass the testharness_properties object as-is here without
* JSON stringifying and reparsing it, IE fails & emits the message
* "Could not complete the operation due to error 80700019".
*/
setup(JSON.parse(JSON.stringify(window.opener.testharness_properties)));
}
} catch (e) {}
// vim: set expandtab shiftwidth=4 tabstop=4:
}

View File

@ -0,0 +1,41 @@
from __future__ import unicode_literals
STATUS_EVENT = "status"
RESUME_EVENT = "resume"
TEST_COMPLETED_EVENT = "test_completed"
class EventDispatcher(object):
def __init__(self):
self._clients = {}
def add_session_client(self, client):
token = client.session_token
if token not in self._clients:
self._clients[token] = []
self._clients[token].append(client)
def remove_session_client(self, client_to_delete):
if client_to_delete is None:
return
token = client_to_delete.session_token
if token not in self._clients:
return
for client in self._clients[token]:
if client.session_token == client_to_delete.session_token:
self._clients.remove(client)
break
if len(self._clients[token]) == 0:
del self._clients[token]
def dispatch_event(self, token, event_type, data):
if token not in self._clients:
return
event = {
"type": event_type,
"data": data
}
for client in self._clients[token]:
client.send_message(event)

View File

@ -0,0 +1,628 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
import re
import json
import hashlib
import zipfile
import time
from ..utils.user_agent_parser import parse_user_agent, abbreviate_browser_name
from ..utils.serializer import serialize_session
from ..utils.deserializer import deserialize_session
from ..data.exceptions.invalid_data_exception import InvalidDataException
from ..data.exceptions.duplicate_exception import DuplicateException
from ..data.exceptions.not_found_exception import NotFoundException
from ..data.exceptions.permission_denied_exception import PermissionDeniedException
from .wpt_report import generate_report, generate_multi_report
from ..data.session import COMPLETED
WAVE_SRC_DIR = "./tools/wave"
class ResultsManager(object):
def initialize(
self,
results_directory_path,
sessions_manager,
tests_manager,
import_enabled,
reports_enabled,
persisting_interval
):
self._results_directory_path = results_directory_path
self._sessions_manager = sessions_manager
self._tests_manager = tests_manager
self._import_enabled = import_enabled
self._reports_enabled = reports_enabled
self._results = {}
self._persisting_interval = persisting_interval
def create_result(self, token, data):
result = self.prepare_result(data)
test = result["test"]
session = self._sessions_manager.read_session(token)
if session is None:
return
if not self._sessions_manager.test_in_session(test, session):
return
if not self._sessions_manager.is_test_running(test, session):
return
self._tests_manager.complete_test(test, session)
self._push_to_cache(token, result)
self._update_test_state(result, session)
session.last_completed_test = test
session.recent_completed_count += 1
self._sessions_manager.update_session(session)
api = next((p for p in test.split("/") if p != ""), None)
if session.recent_completed_count >= self._persisting_interval \
or self._sessions_manager.is_api_complete(api, session):
self.persist_session(session)
if not self._sessions_manager.is_api_complete(api, session):
return
self.generate_report(token, api)
test_state = session.test_state
apis = list(test_state.keys())
all_apis_complete = True
for api in apis:
if not self._sessions_manager.is_api_complete(api, session):
all_apis_complete = False
if not all_apis_complete:
return
self._sessions_manager.complete_session(token)
self.create_info_file(session)
def read_results(self, token, filter_path=None):
filter_api = None
if filter_path is not None:
filter_api = next((p for p in filter_path.split("/")
if p is not None), None)
cached_results = self._read_from_cache(token)
persisted_results = self.load_results(token)
results = self._combine_results_by_api(cached_results,
persisted_results)
filtered_results = {}
for api in results:
if filter_api is not None and api.lower() != filter_api.lower():
continue
for result in results[api]:
if filter_path is not None:
pattern = re.compile("^" + filter_path.replace(".", ""))
if pattern.match(result["test"].replace(".", "")) \
is None:
continue
if api not in filtered_results:
filtered_results[api] = []
filtered_results[api].append(result)
return filtered_results
def read_flattened_results(self, token):
session = self._sessions_manager.read_session(token)
return session.test_state
def _update_test_state(self, result, session):
api = next((p for p in result["test"].split("/") if p != ""), None)
if "subtests" not in result:
if result["status"] == "OK":
session.test_state[api]["pass"] += 1
elif result["status"] == "ERROR":
session.test_state[api]["fail"] += 1
elif result["status"] == "TIMEOUT":
session.test_state[api]["timeout"] += 1
elif result["status"] == "NOTRUN":
session.test_state[api]["not_run"] += 1
else:
for test in result["subtests"]:
if test["status"] == "PASS":
session.test_state[api]["pass"] += 1
elif test["status"] == "FAIL":
session.test_state[api]["fail"] += 1
elif test["status"] == "TIMEOUT":
session.test_state[api]["timeout"] += 1
elif test["status"] == "NOTRUN":
session.test_state[api]["not_run"] += 1
session.test_state[api]["complete"] += 1
self._sessions_manager.update_session(session)
def parse_test_state(self, results):
test_state = {}
for api in list(results.keys()):
test_state[api] = {
"pass": 0,
"fail": 0,
"timeout": 0,
"not_run": 0,
"total": len(results[api]),
"complete": 0,
}
for result in results[api]:
if "subtests" not in result:
if result["status"] == "OK":
test_state[api]["pass"] += 1
elif result["status"] == "ERROR":
test_state[api]["fail"] += 1
elif result["status"] == "TIMEOUT":
test_state[api]["timeout"] += 1
elif result["status"] == "NOTRUN":
test_state[api]["not_run"] += 1
else:
for test in result["subtests"]:
if test["status"] == "PASS":
test_state[api]["pass"] += 1
elif test["status"] == "FAIL":
test_state[api]["fail"] += 1
elif test["status"] == "TIMEOUT":
test_state[api]["timeout"] += 1
elif test["status"] == "NOTRUN":
test_state[api]["not_run"] += 1
test_state[api]["complete"] += 1
return test_state
def read_common_passed_tests(self, tokens=None):
if tokens is None or len(tokens) == 0:
return None
session_results = []
for token in tokens:
session_result = self.read_results(token)
session_results.append(session_result)
passed_tests = {}
failed_tests = {}
for result in session_results:
for api in result:
if api not in passed_tests:
passed_tests[api] = []
if api not in failed_tests:
failed_tests[api] = []
for api_result in result[api]:
passed = True
for subtest in api_result["subtests"]:
if subtest["status"] == "PASS":
continue
passed = False
break
test = api_result["test"]
if passed:
if test in failed_tests[api]:
continue
if test in passed_tests[api]:
continue
passed_tests[api].append(test)
else:
if test in passed_tests[api]:
passed_tests[api].remove(test)
if test in failed_tests[api]:
continue
failed_tests[api].append(test)
def read_results_wpt_report_uri(self, token, api):
api_directory = os.path.join(self._results_directory_path, token, api)
if not os.path.isdir(api_directory):
return None
return "/results/{}/{}/all.html".format(token, api)
def read_results_wpt_multi_report_uri(self, tokens, api):
comparison_directory_name = self.get_comparison_identifier(tokens)
relative_api_directory_path = os.path.join(comparison_directory_name,
api)
api_directory_path = os.path.join(
self._results_directory_path,
relative_api_directory_path
)
if not os.path.isdir(api_directory_path):
self.generate_multi_report(tokens, api)
return "/results/{}/all.html".format(relative_api_directory_path)
def delete_results(self, token):
results_directory = os.path.join(self._results_directory_path, token)
if not os.path.isdir(results_directory):
return
shutil.rmtree(results_directory)
def persist_session(self, session):
token = session.token
if token not in self._results:
return
for api in list(self._results[token].keys())[:]:
self.save_api_results(token, api)
self.create_info_file(session)
self._clear_cache_api(token, api)
session.recent_completed_count = 0
self._sessions_manager.update_session(session)
def load_results(self, token):
results_directory = os.path.join(self._results_directory_path, token)
if not os.path.isdir(results_directory):
return {}
results = {}
apis = os.listdir(results_directory)
for api in apis:
api_directory = os.path.join(results_directory, api)
if not os.path.isdir(api_directory):
continue
files = os.listdir(api_directory)
for file_name in files:
if re.match(r"\w\w\d{1,3}\.json", file_name) is None:
continue
file_path = os.path.join(api_directory, file_name)
data = None
with open(file_path, "r") as file:
data = file.read()
result = json.loads(data)
results[api] = result["results"]
break
return results
def _push_to_cache(self, token, result):
if token is None:
return
if token not in self._results:
self._results[token] = {}
test = result["test"]
api = next((p for p in test.split("/") if p != ""), None)
if api not in self._results[token]:
self._results[token][api] = []
self._results[token][api].append(result)
def _read_from_cache(self, token):
if token is None:
return []
if token not in self._results:
return []
return self._results[token]
def _clear_cache_api(self, token, api):
if token is None:
return
if token not in self._results:
return
if api not in self._results[token]:
return
del self._results[token][api]
def _combine_results_by_api(self, result_a, result_b):
combined_result = {}
for api in result_a:
if api in result_b:
combined_result[api] = result_a[api] + result_b[api]
else:
combined_result[api] = result_a[api]
for api in result_b:
if api in combined_result:
continue
combined_result[api] = result_b[api]
return combined_result
def prepare_result(self, result):
harness_status_map = {
0: "OK",
1: "ERROR",
2: "TIMEOUT",
3: "NOTRUN",
"OK": "OK",
"ERROR": "ERROR",
"TIMEOUT": "TIMEOUT",
"NOTRUN": "NOTRUN"
}
subtest_status_map = {
0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN",
"PASS": "PASS",
"FAIL": "FAIL",
"TIMEOUT": "TIMEOUT",
"NOTRUN": "NOTRUN"
}
if "tests" in result:
for test in result["tests"]:
test["status"] = subtest_status_map[test["status"]]
if "stack" in test:
del test["stack"]
result["subtests"] = result["tests"]
del result["tests"]
if "stack" in result:
del result["stack"]
result["status"] = harness_status_map[result["status"]]
return result
def get_json_path(self, token, api):
session = self._sessions_manager.read_session(token)
api_directory = os.path.join(self._results_directory_path, token, api)
browser = parse_user_agent(session.user_agent)
abbreviation = abbreviate_browser_name(browser["name"])
version = browser["version"]
if "." in version:
version = version.split(".")[0]
version = version.zfill(2)
file_name = abbreviation + version + ".json"
return os.path.join(api_directory, file_name)
def save_api_results(self, token, api):
results = self._read_from_cache(token)
if api not in results:
return
results = results[api]
session = self._sessions_manager.read_session(token)
self._ensure_results_directory_existence(api, token, session)
file_path = self.get_json_path(token, api)
file_exists = os.path.isfile(file_path)
with open(file_path, "r+" if file_exists else "w") as file:
api_results = None
if file_exists:
data = file.read()
api_results = json.loads(data)
else:
api_results = {"results": []}
api_results["results"] = api_results["results"] + results
file.seek(0)
file.truncate()
file.write(json.dumps(api_results, indent=4, separators=(',', ': ')))
def _ensure_results_directory_existence(self, api, token, session):
directory = os.path.join(self._results_directory_path, token, api)
if not os.path.exists(directory):
os.makedirs(directory)
def generate_report(self, token, api):
file_path = self.get_json_path(token, api)
dir_path = os.path.dirname(file_path)
generate_report(
input_json_directory_path=dir_path,
output_html_directory_path=dir_path,
spec_name=api
)
def generate_multi_report(self, tokens, api):
comparison_directory_name = self.get_comparison_identifier(tokens)
api_directory_path = os.path.join(
self._results_directory_path,
comparison_directory_name,
api
)
if os.path.isdir(api_directory_path):
return None
os.makedirs(api_directory_path)
result_json_files = []
for token in tokens:
result_json_files.append({
"token": token,
"path": self.get_json_path(token, api)
})
for file in result_json_files:
if not os.path.isfile(file["path"]):
return None
generate_multi_report(
output_html_directory_path=api_directory_path,
spec_name=api,
result_json_files=result_json_files
)
def get_comparison_identifier(self, tokens, ref_tokens=None):
if ref_tokens is None:
ref_tokens = []
comparison_directory = "comparison"
tokens.sort()
for token in tokens:
short_token = token.split("-")[0]
comparison_directory += "-" + short_token
hash = hashlib.sha1()
ref_tokens.sort()
for token in ref_tokens:
hash.update(token)
for token in tokens:
hash.update(token)
hash = hash.hexdigest()
comparison_directory += hash[0:8]
return comparison_directory
def create_info_file(self, session):
token = session.token
info_file_path = os.path.join(
self._results_directory_path,
token,
"info.json"
)
info = serialize_session(session)
del info["running_tests"]
del info["pending_tests"]
file_content = json.dumps(info, indent=2)
with open(info_file_path, "w+") as file:
file.write(file_content)
def export_results_api_json(self, token, api):
results = self.read_results(token)
if api in results:
return json.dumps({"results": results[api]}, indent=4)
file_path = self.get_json_path(token, api)
if not os.path.isfile(file_path):
return None
with open(file_path, "r") as file:
blob = file.read()
return blob
def export_results_all_api_jsons(self, token):
self._sessions_manager.read_session(token)
results_directory = os.path.join(self._results_directory_path, token)
results = self.read_results(token)
zip_file_name = str(time.time()) + ".zip"
zip = zipfile.ZipFile(zip_file_name, "w")
for api, result in results.iteritems():
zip.writestr(
api + ".json",
json.dumps({"results": result}, indent=4),
zipfile.ZIP_DEFLATED
)
results_directory = os.path.join(self._results_directory_path, token)
if os.path.isdir(results_directory):
persisted_apis = os.listdir(results_directory)
for api in persisted_apis:
if api in results:
continue
blob = self.export_results_api_json(token, api)
if blob is None:
continue
zip.writestr(api + ".json", blob, zipfile.ZIP_DEFLATED)
zip.close()
with open(zip_file_name, "r") as file:
blob = file.read()
os.remove(zip_file_name)
return blob
def export_results(self, token):
if token is None:
return
session = self._sessions_manager.read_session(token)
if session.status != COMPLETED:
return None
session_results_directory = os.path.join(self._results_directory_path,
token)
if not os.path.isdir(session_results_directory):
return None
zip_file_name = str(time.time()) + ".zip"
zip = zipfile.ZipFile(zip_file_name, "w")
for root, dirs, files in os.walk(session_results_directory):
for file in files:
file_name = os.path.join(root.split(token)[1], file)
file_path = os.path.join(root, file)
zip.write(file_path, file_name, zipfile.ZIP_DEFLATED)
zip.close()
with open(zip_file_name, "r") as file:
blob = file.read()
os.remove(zip_file_name)
return blob
def export_results_overview(self, token):
session = self._sessions_manager.read_session(token)
if session is None:
raise NotFoundException("Could not find session {}".format(token))
tmp_file_name = str(time.time()) + ".zip"
zip = zipfile.ZipFile(tmp_file_name, "w")
flattened_results = self.read_flattened_results(token)
results_script = "const results = " + json.dumps(flattened_results,
indent=4)
zip.writestr("results.json.js", results_script)
session_dict = serialize_session(session)
del session_dict["running_tests"]
del session_dict["pending_tests"]
details_script = "const details = " + json.dumps(session_dict,
indent=4)
zip.writestr("details.json.js", details_script)
for root, dirs, files in os.walk(os.path.join(WAVE_SRC_DIR, "export")):
for file in files:
file_name = os.path.join(root.split("export")[1], file)
file_path = os.path.join(root, file)
zip.write(file_path, file_name, zipfile.ZIP_DEFLATED)
zip.close()
with open(tmp_file_name, "r") as file:
blob = file.read()
self.remove_tmp_files()
return blob
def is_import_enabled(self):
return self._import_enabled
def are_reports_enabled(self):
return self._reports_enabled
def load_session_from_info_file(self, info_file_path):
if not os.path.isfile(info_file_path):
return None
with open(info_file_path, "r") as info_file:
data = info_file.read()
info_file.close()
info = json.loads(str(data))
return deserialize_session(info)
def import_results(self, blob):
if not self.is_import_enabled:
raise PermissionDeniedException()
tmp_file_name = "{}.zip".format(str(time.time()))
with open(tmp_file_name, "w") as file:
file.write(blob)
zip = zipfile.ZipFile(tmp_file_name)
if "info.json" not in zip.namelist():
raise InvalidDataException("Invalid session ZIP!")
zipped_info = zip.open("info.json")
info = zipped_info.read()
zipped_info.close()
parsed_info = json.loads(info)
token = parsed_info["token"]
session = self._sessions_manager.read_session(token)
if session is not None:
raise DuplicateException("Session already exists!")
destination_path = os.path.join(self._results_directory_path, token)
os.makedirs(destination_path)
zip.extractall(destination_path)
self.remove_tmp_files()
self.load_results()
return token
def remove_tmp_files(self):
files = os.listdir(".")
for file in files:
if re.match(r"\d{10}\.\d{2}\.zip", file) is None:
continue
os.remove(file)

View File

@ -0,0 +1,442 @@
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import uuid
import time
import os
import json
from threading import Timer
from .test_loader import AUTOMATIC, MANUAL
from ..data.session import Session, PENDING, PAUSED, RUNNING, ABORTED, COMPLETED
from ..utils.user_agent_parser import parse_user_agent
from .event_dispatcher import STATUS_EVENT, RESUME_EVENT
from ..data.exceptions.not_found_exception import NotFoundException
from ..data.exceptions.invalid_data_exception import InvalidDataException
from ..utils.deserializer import deserialize_session
DEFAULT_TEST_TYPES = [AUTOMATIC, MANUAL]
DEFAULT_TEST_PATHS = ["/"]
DEFAULT_TEST_AUTOMATIC_TIMEOUT = 60000
DEFAULT_TEST_MANUAL_TIMEOUT = 300000
class SessionsManager(object):
def initialize(self,
test_loader,
event_dispatcher,
tests_manager,
results_directory,
results_manager):
self._test_loader = test_loader
self._sessions = {}
self._expiration_timeout = None
self._event_dispatcher = event_dispatcher
self._tests_manager = tests_manager
self._results_directory = results_directory
self._results_manager = results_manager
def create_session(
self,
tests=None,
types=None,
timeouts=None,
reference_tokens=None,
webhook_urls=None,
user_agent=None,
labels=None,
expiration_date=None
):
if tests is None:
tests = {}
if timeouts is None:
timeouts = {}
if reference_tokens is None:
reference_tokens = []
if webhook_urls is None:
webhook_urls = []
if user_agent is None:
user_agent = ""
if labels is None:
labels = []
if "include" not in tests:
tests["include"] = DEFAULT_TEST_PATHS
if "exclude" not in tests:
tests["exclude"] = []
if "automatic" not in timeouts:
timeouts["automatic"] = DEFAULT_TEST_AUTOMATIC_TIMEOUT
if "manual" not in timeouts:
timeouts["manual"] = DEFAULT_TEST_MANUAL_TIMEOUT
if types is None:
types = DEFAULT_TEST_TYPES
for type in types:
if type != "automatic" and type != "manual":
raise InvalidDataException("Unknown type '{}'".format(type))
token = str(uuid.uuid1())
pending_tests = self._test_loader.get_tests(
types,
include_list=tests["include"],
exclude_list=tests["exclude"],
reference_tokens=reference_tokens)
browser = parse_user_agent(user_agent)
test_files_count = self._tests_manager.calculate_test_files_count(
pending_tests
)
test_state = {}
for api in test_files_count:
test_state[api] = {
"pass": 0,
"fail": 0,
"timeout": 0,
"not_run": 0,
"total": test_files_count[api],
"complete": 0}
session = Session(
token=token,
tests=tests,
user_agent=user_agent,
browser=browser,
types=types,
timeouts=timeouts,
pending_tests=pending_tests,
running_tests={},
test_state=test_state,
status=PENDING,
reference_tokens=reference_tokens,
webhook_urls=webhook_urls,
labels=labels,
expiration_date=expiration_date
)
self._push_to_cache(session)
if expiration_date is not None:
self._set_expiration_timer()
return session
def read_session(self, token):
if token is None:
return None
session = self._read_from_cache(token)
if session is None or session.test_state is None:
session = self.load_session(token)
if session is not None:
self._push_to_cache(session)
return session
def read_session_status(self, token):
if token is None:
return None
session = self._read_from_cache(token)
if session is None:
session = self.load_session_info(token)
if session is None:
return None
if session.test_state is None:
session = self.load_session(token)
if session is not None:
self._push_to_cache(session)
return session
def read_public_sessions(self):
self.load_all_sessions_info()
session_tokens = []
for token in self._sessions:
session = self._sessions[token]
if not session.is_public:
continue
session_tokens.append(token)
return session_tokens
def update_session(self, session):
self._push_to_cache(session)
def update_session_configuration(
self, token, tests, types, timeouts, reference_tokens, webhook_urls
):
session = self.read_session(token)
if session is None:
raise NotFoundException("Could not find session")
if session.status != PENDING:
return
if tests is not None:
if "include" not in tests:
tests["include"] = session.tests["include"]
if "exclude" not in tests:
tests["exclude"] = session.tests["exclude"]
if reference_tokens is None:
reference_tokens = session.reference_tokens
if types is None:
types = session.types
pending_tests = self._test_loader.get_tests(
include_list=tests["include"],
exclude_list=tests["exclude"],
reference_tokens=reference_tokens,
types=types
)
session.pending_tests = pending_tests
session.tests = tests
test_files_count = self._tests_manager.calculate_test_files_count(
pending_tests)
test_state = {}
for api in test_files_count:
test_state[api] = {
"pass": 0,
"fail": 0,
"timeout": 0,
"not_run": 0,
"total": test_files_count[api],
"complete": 0,
}
session.test_state = test_state
if types is not None:
session.types = types
if timeouts is not None:
if AUTOMATIC not in timeouts:
timeouts[AUTOMATIC] = session.timeouts[AUTOMATIC]
if MANUAL not in timeouts:
timeouts[MANUAL] = session.timeouts[MANUAL]
session.timeouts = timeouts
if reference_tokens is not None:
session.reference_tokens = reference_tokens
if webhook_urls is not None:
session.webhook_urls = webhook_urls
self._push_to_cache(session)
return session
def update_labels(self, token, labels):
if token is None or labels is None:
return
session = self.read_session(token)
if session is None:
return
if session.is_public:
return
session.labels = labels
self._push_to_cache(session)
def delete_session(self, token):
session = self.read_session(token)
if session is None:
return
if session.is_public is True:
return
del self._sessions[token]
def add_session(self, session):
if session is None:
return
self._push_to_cache(session)
def load_all_sessions(self):
if not os.path.isdir(self._results_directory):
return
tokens = os.listdir(self._results_directory)
for token in tokens:
self.load_session(token)
def load_all_sessions_info(self):
if not os.path.isdir(self._results_directory):
return
tokens = os.listdir(self._results_directory)
for token in tokens:
if token in self._sessions:
continue
self.load_session_info(token)
def load_session(self, token):
session = self.load_session_info(token)
if session is None:
return None
if session.test_state is None:
results = self._results_manager.load_results(token)
test_state = self._results_manager.parse_test_state(results)
session.test_state = test_state
self._results_manager.create_info_file(session)
self._push_to_cache(session)
return session
def load_session_info(self, token):
result_directory = os.path.join(self._results_directory, token)
if not os.path.isdir(result_directory):
return None
info_file = os.path.join(result_directory, "info.json")
if not os.path.isfile(info_file):
return None
info_data = None
with open(info_file, "r") as file:
info_data = file.read()
parsed_info_data = json.loads(info_data)
session = deserialize_session(parsed_info_data)
self._push_to_cache(session)
return session
def _push_to_cache(self, session):
self._sessions[session.token] = session
def _read_from_cache(self, token):
if token not in self._sessions:
return None
return self._sessions[token]
def _set_expiration_timer(self):
expiring_sessions = self._read_expiring_sessions()
if len(expiring_sessions) == 0:
return
next_session = expiring_sessions[0]
for session in expiring_sessions:
if next_session.expiration_date > session.expiration_date:
next_session = session
if self._expiration_timeout is not None:
self._expiration_timeout.cancel()
timeout = next_session.expiration_date / 1000.0 - int(time.time())
if timeout < 0:
timeout = 0
def handle_timeout(self):
self._delete_expired_sessions()
self._set_expiration_timer()
self._expiration_timeout = Timer(timeout, handle_timeout, [self])
self._expiration_timeout.start()
def _delete_expired_sessions(self):
expiring_sessions = self._read_expiring_sessions()
now = int(time.time())
for session in expiring_sessions:
if session.expiration_date / 1000.0 < now:
self.delete_session(session.token)
def _read_expiring_sessions(self):
expiring_sessions = []
for token in self._sessions:
session = self._sessions[token]
if session.expiration_date is None:
continue
expiring_sessions.append(session)
return expiring_sessions
def start_session(self, token):
session = self.read_session(token)
if session is None:
return
if session.status != PENDING and session.status != PAUSED:
return
if session.status == PENDING:
session.date_started = int(time.time()) * 1000
session.expiration_date = None
session.status = RUNNING
self.update_session(session)
self._event_dispatcher.dispatch_event(
token,
event_type=STATUS_EVENT,
data=session.status
)
def pause_session(self, token):
session = self.read_session(token)
if session.status != RUNNING:
return
session.status = PAUSED
self.update_session(session)
self._event_dispatcher.dispatch_event(
token,
event_type=STATUS_EVENT,
data=session.status
)
self._results_manager.persist_session(session)
def stop_session(self, token):
session = self.read_session(token)
if session.status == ABORTED or session.status == COMPLETED:
return
session.status = ABORTED
session.date_finished = time.time() * 1000
self.update_session(session)
self._event_dispatcher.dispatch_event(
token,
event_type=STATUS_EVENT,
data=session.status
)
def resume_session(self, token, resume_token):
session = self.read_session(token)
if session.status != PENDING:
return
self._event_dispatcher.dispatch_event(
token,
event_type=RESUME_EVENT,
data=resume_token
)
self.delete_session(token)
def complete_session(self, token):
session = self.read_session(token)
if session.status == COMPLETED or session.status == ABORTED:
return
session.status = COMPLETED
session.date_finished = time.time() * 1000
self.update_session(session)
self._event_dispatcher.dispatch_event(
token,
event_type=STATUS_EVENT,
data=session.status
)
def test_in_session(self, test, session):
return self._test_list_contains_test(test, session.pending_tests) \
or self._test_list_contains_test(test, session.running_tests)
def is_test_complete(self, test, session):
return not self._test_list_contains_test(test, session.pending_tests) \
and not self._test_list_contains_test(test, session.running_tests)
def is_test_running(self, test, session):
return self._test_list_contains_test(test, session.running_tests)
def _test_list_contains_test(self, test, test_list):
for api in list(test_list.keys()):
if test in test_list[api]:
return True
return False
def is_api_complete(self, api, session):
return api not in session.pending_tests \
and api not in session.running_tests
def find_token(self, fragment):
if len(fragment) < 8:
return None
tokens = []
for token in self._sessions:
if token.startswith(fragment):
tokens.append(token)
if len(tokens) != 1:
return None
return tokens[0]

View File

@ -0,0 +1,197 @@
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
AUTOMATIC = "automatic"
MANUAL = "manual"
TEST_TYPES = [AUTOMATIC, MANUAL]
class TestLoader(object):
def initialize(
self,
exclude_list_file_path,
include_list_file_path,
results_manager,
api_titles
):
self._exclude_list_file_path = exclude_list_file_path
self._include_list_file_path = include_list_file_path
self._results_manager = results_manager
self._tests = {}
self._tests[AUTOMATIC] = {}
self._tests[MANUAL] = {}
self._api_titles = api_titles
def load_tests(self, tests):
include_list = self._load_test_list(self._include_list_file_path)
exclude_list = self._load_test_list(self._exclude_list_file_path)
if "testharness" in tests:
self._tests[AUTOMATIC] = self._load_tests(
tests=tests["testharness"],
exclude_list=exclude_list
)
if "manual" in tests:
self._tests[MANUAL] = self._load_tests(
tests=tests["manual"],
include_list=include_list
)
for api in self._tests[AUTOMATIC]:
for test_path in self._tests[AUTOMATIC][api][:]:
if "manual" not in test_path:
continue
self._tests[AUTOMATIC][api].remove(test_path)
if not self._is_valid_test(test_path,
include_list=include_list):
continue
if api not in self._tests[MANUAL]:
self._tests[MANUAL][api] = []
self._tests[MANUAL][api].append(test_path)
def _load_tests(self, tests, exclude_list=None, include_list=None):
loaded_tests = {}
def get_next_part(tests):
paths = []
for test in tests:
if isinstance(tests[test], dict):
subs = get_next_part(tests[test])
for sub in subs:
if sub is None:
continue
paths.append(test + "/" + sub)
continue
if test.endswith(".html"):
paths.append(test)
continue
if test.endswith(".js"):
for element in tests[test][1:]:
paths.append(element[0])
continue
return paths
test_paths = get_next_part(tests)
for test_path in test_paths:
if not test_path.startswith("/"):
test_path = "/" + test_path
if self._is_valid_test(test_path, exclude_list, include_list):
api_name = self._parse_api_name(test_path)
if api_name not in loaded_tests:
loaded_tests[api_name] = []
loaded_tests[api_name].append(test_path)
return loaded_tests
def _parse_api_name(self, test_path):
for part in test_path.split("/"):
if part == "":
continue
return part
def _is_valid_test(self, test_path, exclude_list=None, include_list=None):
is_valid = True
if include_list is not None and len(include_list) > 0:
is_valid = False
for include_test in include_list:
pattern = re.compile("^" + include_test)
if pattern.match(test_path) is not None:
is_valid = True
break
if not is_valid:
return is_valid
if exclude_list is not None and len(exclude_list) > 0:
is_valid = True
for exclude_test in exclude_list:
pattern = re.compile("^" + exclude_test)
if pattern.match(test_path) is not None:
is_valid = False
break
return is_valid
def _load_test_list(self, file_path):
tests = []
if not os.path.isfile(file_path):
return tests
file_content = None
with open(file_path) as file_handle:
file_content = file_handle.read()
for line in file_content.split():
line = line.replace(" ", "")
line = re.sub(r"^#", "", line)
if line == "":
continue
tests.append(line)
return tests
def get_tests(
self,
types=None,
include_list=None,
exclude_list=None,
reference_tokens=None
):
if types is None:
types = [AUTOMATIC, MANUAL]
if include_list is None:
include_list = []
if exclude_list is None:
exclude_list = []
if reference_tokens is None:
reference_tokens = []
loaded_tests = {}
reference_results = self._results_manager.read_common_passed_tests(
reference_tokens)
for test_type in types:
if test_type not in TEST_TYPES:
continue
for api in self._tests[test_type]:
for test_path in self._tests[test_type][api]:
if not self._is_valid_test(test_path, exclude_list,
include_list):
continue
if reference_results is not None and \
test_path not in reference_results[api]:
continue
if api not in loaded_tests:
loaded_tests[api] = []
loaded_tests[api].append(test_path)
return loaded_tests
def get_apis(self):
apis = []
for test_type in TEST_TYPES:
for api in self._tests[test_type]:
in_list = False
for item in apis:
if item["path"] == "/" + api:
in_list = True
break
if in_list:
continue
title = None
for item in self._api_titles:
if item["path"] == "/" + api:
title = item["title"]
break
if title is None:
apis.append({"title": api, "path": "/" + api})
else:
apis.append({"title": title, "path": "/" + api})
return apis

View File

@ -0,0 +1,369 @@
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import re
from threading import Timer
from .event_dispatcher import TEST_COMPLETED_EVENT
from ..data.exceptions.not_found_exception import NotFoundException
from ..data.session import COMPLETED, ABORTED
class TestsManager(object):
def initialize(
self,
test_loader,
sessions_manager,
results_manager,
event_dispatcher
):
self._test_loader = test_loader
self._sessions_manager = sessions_manager
self._results_manager = results_manager
self._event_dispatcher = event_dispatcher
self._timeouts = []
def next_test(self, session):
if session.status == COMPLETED or session.status == ABORTED:
return None
pending_tests = session.pending_tests
running_tests = session.running_tests
token = session.token
if pending_tests is None:
pending_tests = self.load_tests(session)
session.pending_tests = pending_tests
self._sessions_manager.update_session(session)
if running_tests is None:
running_tests = {}
test = self._get_next_test_from_list(pending_tests)
if test is None:
return None
pending_tests = self.remove_test_from_list(pending_tests, test)
running_tests = self.add_test_to_list(running_tests, test)
test_timeout = self.get_test_timeout(test, session) / 1000.0
def handler(self, token, test):
self._on_test_timeout(token, test)
timer = Timer(test_timeout, handler, [self, token, test])
self._timeouts.append({
"test": test,
"timeout": timer
})
session.pending_tests = pending_tests
session.running_tests = running_tests
self._sessions_manager.update_session(session)
timer.start()
return test
def read_last_completed_tests(self, token, count):
results = self._results_manager.read_results(token)
results_tests = {}
for api in list(results.keys()):
results_tests[api] = []
for result in results[api]:
results_tests[api].append(result["test"])
sorted_results_tests = self._sort_tests_by_execution(results_tests)
sorted_results_tests.reverse()
tests = {"pass": [], "fail": [], "timeout": []}
for test in sorted_results_tests:
api = None
for part in test.split("/"):
if part != "":
api = part
break
result = None
for potential_result in results[api]:
if potential_result["test"] == test:
result = potential_result
break
if result["status"] == "ERROR":
if len(tests["fail"]) < count:
tests["fail"].append(result["test"])
elif result["status"] == "TIMEOUT":
if len(tests["timeout"]) < count:
tests["timeout"].append(result["test"])
passes = True
for test in result["subtests"]:
if test["status"] != "PASS":
passes = False
break
if passes and len(tests["pass"]) < count:
tests["pass"].append(result["test"])
if not passes and len(tests["fail"]) < count:
tests["fail"].append(result["test"])
if len(tests["pass"]) == count and len(tests["fail"]) == count \
and len(tests["timeout"]) == count:
return tests
return tests
def _sort_tests_by_execution(self, tests):
sorted_tests = []
for api in list(tests.keys()):
for test in tests[api]:
sorted_tests.append(test)
def compare(tests_manager, test_a, test_b):
micro_test_list = {}
api_a = ""
for part in test_a.split("/"):
if part != "":
api_a = part
break
api_b = ""
for part in test_b.split("/"):
if part != "":
api_b = part
break
if api_a == api_b:
micro_test_list[api_a] = [test_a, test_b]
else:
micro_test_list[api_a] = [test_a]
micro_test_list[api_b] = [test_b]
next_test = tests_manager._get_next_test_from_list(micro_test_list)
if next_test == test_a:
return -1
return 1
sorted_tests.sort(cmp=lambda test_a,
test_b: compare(self, test_a, test_b))
return sorted_tests
def _get_next_test_from_list(self, tests):
test = None
api = None
has_http = True
has_manual = True
current_api = 0
current_test = 0
apis = list(tests.keys())
apis.sort(key=lambda api: api.lower())
for api in apis:
tests[api].sort(key=lambda api: api.replace("/", "").lower())
while test is None:
if len(apis) <= current_api:
return None
api = apis[current_api]
if len(tests[api]) <= current_test:
current_api = current_api + 1
current_test = 0
if current_api == len(apis):
if has_http:
has_http = False
current_api = 0
test = None
continue
if has_manual:
has_manual = False
current_api = 0
test = None
has_http = True
continue
return None
test = None
continue
test = tests[api][current_test]
if "manual" in test and "https" not in test:
return test
if "manual" in test and "https" in test:
if not has_http:
return test
if "manual" not in test and "https" not in test:
if not has_manual:
return test
if "manual" not in test and "https" in test:
if not has_manual and not has_http:
return test
current_test = current_test + 1
test = None
return test
def skip_to(self, test_list, test):
sorted_tests = self._sort_tests_by_execution(test_list)
if test not in sorted_tests:
return test_list
index = sorted_tests.index(test)
remaining_tests = sorted_tests[index + 1:]
remaining_tests_by_api = {}
current_api = "___"
for test in remaining_tests:
if not test.startswith("/" + current_api) and \
not test.startswith(current_api):
current_api = next((p for p in test.split("/") if p != ""),
None)
if current_api not in remaining_tests_by_api:
remaining_tests_by_api[current_api] = []
remaining_tests_by_api[current_api].append(test)
return remaining_tests_by_api
def remove_test_from_list(self, test_list, test):
api = None
for part in test.split("/"):
if part is None or part == "":
continue
api = part
break
if api not in test_list:
return test_list
if test not in test_list[api]:
return test_list
test_list[api].remove(test)
if len(test_list[api]) == 0:
del test_list[api]
return test_list
def add_test_to_list(self, test_list, test):
api = None
for part in test.split("/"):
if part is None or part == "":
continue
api = part
break
if api in test_list and test in test_list[api]:
return test_list
if api not in test_list:
test_list[api] = []
test_list[api].append(test)
return test_list
def get_test_timeout(self, test, session):
timeouts = session.timeouts
test_timeout = None
for path in list(timeouts.keys()):
pattern = re.compile("^" + path.replace(".", ""))
if pattern.match(test.replace(".", "")) is not None:
test_timeout = timeouts[path]
break
if test_timeout is None:
if "manual" in test:
test_timeout = timeouts["manual"]
else:
test_timeout = timeouts["automatic"]
return test_timeout
def _on_test_timeout(self, token, test):
data = {
"test": test,
"status": "TIMEOUT",
"message": None,
"subtests": [
{
"status": "TIMEOUT",
"xstatus": "SERVERTIMEOUT"
}
]
}
self._results_manager.create_result(token, data)
def read_tests(self):
return self._test_loader.get_tests()
def complete_test(self, test, session):
running_tests = session.running_tests
running_tests = self.remove_test_from_list(running_tests, test)
session.running_tests = running_tests
timeout = next((t for t in self._timeouts if t["test"] == test), None)
timeout["timeout"].cancel()
self._timeouts.remove(timeout)
self.update_tests(
running_tests=running_tests,
session=session
)
self._event_dispatcher.dispatch_event(
token=session.token,
event_type=TEST_COMPLETED_EVENT,
data=test
)
def update_tests(
self,
pending_tests=None,
running_tests=None,
session=None
):
if pending_tests is not None:
session.pending_tests = pending_tests
if running_tests is not None:
session.running_tests = running_tests
self._sessions_manager.update_session(session)
def calculate_test_files_count(self, tests):
count = {}
for api in tests:
count[api] = len(tests[api])
return count
def read_malfunctioning_tests(self, token):
session = self._sessions_manager.read_session(token)
return session.malfunctioning_tests
def update_malfunctioning_tests(self, token, tests):
if token is None:
return
if tests is None:
return
session = self._sessions_manager.read_session(token)
if session is None:
raise NotFoundException("Could not find session using token: " + token)
if session.is_public:
return
session.malfunctioning_tests = tests
self._sessions_manager.update_session(session)
def load_tests(self, session):
pending_tests = self._test_loader.get_tests(
session.types,
include_list=session.tests["include"],
exclude_list=session.tests["exclude"],
reference_tokens=session.reference_tokens
)
last_completed_test = session.last_completed_test
if last_completed_test is not None:
pending_tests = self.skip_to(pending_tests, last_completed_test)
return pending_tests

View File

@ -0,0 +1,56 @@
from __future__ import unicode_literals
import subprocess
import os
import ntpath
import sys
from shutil import copyfile
def generate_report(
input_json_directory_path=None,
output_html_directory_path=None,
spec_name=None,
is_multi=None,
reference_dir=None):
if is_multi is None:
is_multi = False
try:
command = [
"wptreport",
"--input", input_json_directory_path,
"--output", output_html_directory_path,
"--spec", spec_name,
"--sort", "true",
"--failures", "true",
"--tokenFileName", "true" if is_multi else "false",
"--pass", "100",
"--ref", reference_dir if reference_dir is not None else ""]
whole_command = ""
for command_part in command:
whole_command += command_part + " "
subprocess.call(command, shell=False)
except subprocess.CalledProcessError as e:
info = sys.exc_info()
raise Exception("Failed to execute wptreport: " + str(info[0].__name__) + ": " + e.output)
def generate_multi_report(
output_html_directory_path=None,
spec_name=None,
result_json_files=None,
reference_dir=None):
for file in result_json_files:
if not os.path.isfile(file["path"]):
continue
file_name = ntpath.basename(file["path"])
copyfile(file["path"], os.path.join(
output_html_directory_path,
file["token"] + "-" + file_name
))
generate_report(
input_json_directory_path=output_html_directory_path,
output_html_directory_path=output_html_directory_path,
spec_name=spec_name,
is_multi=True,
reference_dir=reference_dir)

View File

@ -0,0 +1,34 @@
{
"id": "37be8ec4-7855-4554-867e-7a5d2a4f99e6",
"name": "WAVE Local",
"values": [
{
"key": "host",
"value": "web-platform.test",
"enabled": true
},
{
"key": "port",
"value": "8000",
"enabled": true
},
{
"key": "protocol",
"value": "http",
"enabled": true
},
{
"key": "web_root",
"value": "_wave",
"enabled": true
},
{
"key": "device_timeout",
"value": "60000",
"enabled": true
}
],
"_postman_variable_scope": "environment",
"_postman_exported_at": "2020-05-25T12:12:37.098Z",
"_postman_exported_using": "Postman/7.25.0"
}

View File

@ -0,0 +1,54 @@
import errno
import os
import socket
import subprocess
import time
try:
from urllib.request import urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import urlopen, URLError
import pytest
from tools.wpt import wpt
def is_port_8000_in_use():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", 8000))
except socket.error as e:
if e.errno == errno.EADDRINUSE:
return True
else:
raise e
finally:
s.close()
return False
def test_serve():
if is_port_8000_in_use():
pytest.skip("WAVE Test Runner failed: Port 8000 already in use.")
p = subprocess.Popen([os.path.join(wpt.localpaths.repo_root, "wpt"), "serve-wave"],
preexec_fn=os.setsid)
start = time.time()
try:
while True:
if p.poll() is not None:
assert False, "WAVE Test Runner failed: Server not running."
if time.time() - start > 6 * 60:
assert False, "WAVE Test Runner failed: Server did not start responding within 6m."
try:
resp = urlopen("http://web-platform.test:8000/_wave/api/sessions/public")
print(resp)
except URLError:
print("Server not responding, waiting another 10s.")
time.sleep(10)
else:
assert resp.code == 200
break
finally:
os.killpg(p.pid, 15)

View File

@ -0,0 +1,20 @@
[tox]
envlist = py27,py35,py36,py37,py38
skipsdist=True
skip_missing_interpreters = False
[testenv]
deps =
pytest
pytest-cov
hypothesis
mock
-r{toxinidir}/../wptrunner/requirements.txt
-r{toxinidir}/../wptrunner/requirements_chrome.txt
-r{toxinidir}/../wptrunner/requirements_firefox.txt
commands =
pytest {posargs}
passenv =
TASKCLUSTER_ROOT_URL

View File

@ -0,0 +1,96 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from ..data.session import Session, UNKNOWN
def deserialize_sessions(session_dicts):
sessions = []
for session_dict in session_dicts:
session = deserialize_session(session_dict)
sessions.append(session)
return sessions
def deserialize_session(session_dict):
token = ""
if "token" in session_dict:
token = session_dict["token"]
tests = {"include": [], "exclude": []}
if "tests" in session_dict:
tests = session_dict["tests"]
if "path" in session_dict:
test_paths = session_dict["path"].split(", ")
tests["include"] = tests["include"] + test_paths
types = []
if "types" in session_dict:
types = session_dict["types"]
user_agent = ""
if "user_agent" in session_dict:
user_agent = session_dict["user_agent"]
labels = []
if "labels" in session_dict:
labels = session_dict["labels"]
timeouts = {}
if "timeouts" in session_dict:
timeouts = session_dict["timeouts"]
pending_tests = None
if "pending_tests" in session_dict:
pending_tests = session_dict["pending_tests"]
running_tests = None
if "running_tests" in session_dict:
running_tests = session_dict["running_tests"]
status = UNKNOWN
if "status" in session_dict:
status = session_dict["status"]
test_state = None
if "test_state" in session_dict:
test_state = session_dict["test_state"]
last_completed_test = None
if "last_completed_test" in session_dict:
last_completed_test = session_dict["last_completed_test"]
date_started = None
if "date_started" in session_dict:
date_started = session_dict["date_started"]
date_finished = None
if "date_finished" in session_dict:
date_finished = session_dict["date_finished"]
is_public = False
if "is_public" in session_dict:
is_public = session_dict["is_public"]
reference_tokens = []
if "reference_tokens" in session_dict:
reference_tokens = session_dict["reference_tokens"]
browser = None
if "browser" in session_dict:
browser = session_dict["browser"]
webhook_urls = []
if "webhook_urls" in session_dict:
webhook_urls = session_dict["webhook_urls"]
expiration_date = None
if "expiration_date" in session_dict:
expiration_date = session_dict["expiration_date"]
malfunctioning_tests = []
if "malfunctioning_tests" in session_dict:
malfunctioning_tests = session_dict["malfunctioning_tests"]
return Session(
token=token,
tests=tests,
types=types,
user_agent=user_agent,
labels=labels,
timeouts=timeouts,
pending_tests=pending_tests,
running_tests=running_tests,
status=status,
test_state=test_state,
last_completed_test=last_completed_test,
date_started=date_started,
date_finished=date_finished,
is_public=is_public,
reference_tokens=reference_tokens,
browser=browser,
webhook_urls=webhook_urls,
expiration_date=expiration_date,
malfunctioning_tests=malfunctioning_tests
)

View File

@ -0,0 +1,24 @@
from __future__ import unicode_literals
def serialize_session(session):
return {
"token": session.token,
"types": session.types,
"user_agent": session.user_agent,
"labels": session.labels,
"timeouts": session.timeouts,
"test_state": session.test_state,
"last_completed_test": session.last_completed_test,
"tests": session.tests,
"pending_tests": session.pending_tests,
"running_tests": session.running_tests,
"status": session.status,
"browser": session.browser,
"date_started": session.date_started,
"date_finished": session.date_finished,
"is_public": session.is_public,
"reference_tokens": session.reference_tokens,
"webhook_urls": session.webhook_urls,
"expiration_date": session.expiration_date,
"malfunctioning_tests": session.malfunctioning_tests
}

View File

@ -0,0 +1,43 @@
from __future__ import absolute_import
from __future__ import unicode_literals
from ua_parser import user_agent_parser
def parse_user_agent(user_agent_string):
user_agent = user_agent_parser.ParseUserAgent(user_agent_string)
name = user_agent["family"]
version = "0"
if user_agent["major"] is not None:
version = user_agent["major"]
if user_agent["minor"] is not None:
version = version + "." + user_agent["minor"]
if user_agent["patch"] is not None:
version = version + "." + user_agent["patch"]
return {
"name": name,
"version": version
}
def abbreviate_browser_name(name):
short_names = {
"Chrome": "Ch",
"Chrome Mobile WebView": "Ch",
"Chromium": "Cm",
"WebKit": "Wk",
"Safari": "Sf",
"Firefox": "FF",
"IE": "IE",
"Edge": "Ed",
"Opera": "Op"
}
if name in short_names:
return short_names[name]
else:
return "Xx"

View File

@ -0,0 +1,114 @@
from __future__ import unicode_literals
import os
import logging
from . import configuration_loader
from .network.http_handler import HttpHandler
from .network.api.sessions_api_handler import SessionsApiHandler
from .network.api.tests_api_handler import TestsApiHandler
from .network.api.results_api_handler import ResultsApiHandler
from .network.static_handler import StaticHandler
from .testing.sessions_manager import SessionsManager
from .testing.results_manager import ResultsManager
from .testing.tests_manager import TestsManager
from .testing.test_loader import TestLoader
from .testing.event_dispatcher import EventDispatcher
class WaveServer(object):
def initialize(self,
tests,
configuration_file_path=None,
application_directory_path=None,
reports_enabled=None):
if configuration_file_path is None:
configuration_file_path = ""
if application_directory_path is None:
application_directory_path = ""
if reports_enabled is None:
reports_enabled = False
logger = logging.getLogger("wave-server")
logger.debug("Loading configuration ...")
configuration = configuration_loader.load(configuration_file_path)
# Initialize Managers
event_dispatcher = EventDispatcher()
sessions_manager = SessionsManager()
results_manager = ResultsManager()
tests_manager = TestsManager()
test_loader = TestLoader()
sessions_manager.initialize(
test_loader=test_loader,
event_dispatcher=event_dispatcher,
tests_manager=tests_manager,
results_directory=configuration["results_directory_path"],
results_manager=results_manager
)
results_manager.initialize(
results_directory_path=configuration["results_directory_path"],
sessions_manager=sessions_manager,
tests_manager=tests_manager,
import_enabled=configuration["import_enabled"],
reports_enabled=reports_enabled,
persisting_interval=configuration["persisting_interval"]
)
tests_manager.initialize(
test_loader,
results_manager=results_manager,
sessions_manager=sessions_manager,
event_dispatcher=event_dispatcher
)
exclude_list_file_path = os.path.abspath("./excluded.json")
include_list_file_path = os.path.abspath("./included.json")
test_loader.initialize(
exclude_list_file_path,
include_list_file_path,
results_manager=results_manager,
api_titles=configuration["api_titles"]
)
test_loader.load_tests(tests)
# Initialize HTTP handlers
static_handler = StaticHandler(
web_root=configuration["web_root"],
http_port=configuration["wpt_port"],
https_port=configuration["wpt_ssl_port"]
)
sessions_api_handler = SessionsApiHandler(
sessions_manager=sessions_manager,
results_manager=results_manager,
event_dispatcher=event_dispatcher,
web_root=configuration["web_root"]
)
tests_api_handler = TestsApiHandler(
tests_manager=tests_manager,
sessions_manager=sessions_manager,
wpt_port=configuration["wpt_port"],
wpt_ssl_port=configuration["wpt_ssl_port"],
hostname=configuration["hostname"],
web_root=configuration["web_root"],
test_loader=test_loader
)
results_api_handler = ResultsApiHandler(
results_manager,
web_root=configuration["web_root"])
# Initialize HTTP server
http_handler = HttpHandler(
static_handler=static_handler,
sessions_api_handler=sessions_api_handler,
tests_api_handler=tests_api_handler,
results_api_handler=results_api_handler,
http_port=configuration["wpt_port"],
web_root=configuration["web_root"]
)
self.handle_request = http_handler.handle_request

View File

@ -0,0 +1,444 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Compare Results - Web Platform Test</title>
<link rel="stylesheet" href="css/bulma-0.7.5/bulma.min.css" />
<link rel="stylesheet" href="css/fontawesome-5.7.2.min.css" />
<script src="lib/utils.js"></script>
<script src="lib/wave-service.js"></script>
<script src="lib/ui.js"></script>
<style>
body {
margin: 0;
padding: 0;
width: 100vw;
height: 100vh;
display: flex;
justify-content: center;
font-family: "Noto Sans", sans-serif;
overflow-y: auto;
overflow-x: hidden;
background-color: white;
color: #000;
}
.site-logo {
max-width: 300px;
margin-left: -15px;
}
.content {
width: 1000px;
}
.header {
display: flex;
margin: 50px 0 30px 0;
}
.header :first-child {
flex: 1;
}
.session-header {
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
}
.session-header div {
padding: 5px;
font-weight: bold;
}
.session-header:hover div {
text-decoration: underline;
}
</style>
</head>
<body>
<script>
window.onload = () => {
let { tokens, reftokens } = utils.parseQuery(location.search);
tokens = tokens ? tokens.split(",") : [];
const refTokens = reftokens ? reftokens.split(",") : [];
if (tokens) {
WaveService.readResultsConfig(function(config) {
comparisonUi.state.reportsEnabled = config.reportsEnabled;
comparisonUi.render();
});
WaveService.addRecentSessions(tokens);
WaveService.addRecentSessions(refTokens);
comparisonUi.state.tokens = tokens;
comparisonUi.state.refTokens = refTokens;
comparisonUi.render();
comparisonUi.refreshData();
}
};
const comparisonUi = {
state: {
tokens: [],
refTokens: [],
sessions: {}
},
refreshData: () => {
const { tokens, refTokens } = comparisonUi.state;
const allTokens = tokens.slice();
refTokens
.filter(token => allTokens.indexOf(token) === -1)
.forEach(token => allTokens.push(token));
WaveService.readMultipleSessions(allTokens, configurations => {
const sessions = {};
configurations.forEach(
details => (sessions[details.token] = details)
);
comparisonUi.state.sessions = sessions;
WaveService.readResultComparison(tokens, results => {
comparisonUi.state.results = results;
comparisonUi.renderApiResults();
});
const sessionsReferenceTokens = [];
configurations.forEach(({ referenceTokens }) =>
referenceTokens
.filter(token => refTokens.indexOf(token) === -1)
.filter(token => sessionsReferenceTokens.indexOf(token) === -1)
.forEach(token => sessionsReferenceTokens.push(token))
);
sessionsReferenceTokens.forEach(token =>
comparisonUi.state.refTokens.push(token)
);
WaveService.readMultipleSessions(
sessionsReferenceTokens,
configurations => {
const { sessions } = comparisonUi.state;
configurations.forEach(
details => (sessions[details.token] = details)
);
comparisonUi.renderDetails();
}
);
});
},
openResultsOverview() {
location.href = WEB_ROOT + "overview.html";
},
render() {
const comparisonView = UI.createElement({
className: "content",
style: "margin-bottom: 40px;",
children: [
{
className: "header",
children: [
{
children: [
{
element: "img",
src: "res/wavelogo_2016.jpg",
className: "site-logo"
}
]
},
{
className: "button is-dark is-outlined",
onClick: comparisonUi.openResultsOverview,
children: [
{
element: "span",
className: "icon",
children: [
{
element: "i",
className: "fas fa-arrow-left"
}
]
},
{
text: "Results Overview",
element: "span"
}
]
}
]
},
{
id: "header",
children: [
{ className: "title", text: "Comparison" },
{ id: "controls" }
]
},
{ id: "details" },
{ id: "api-results" }
]
});
const root = UI.getRoot();
root.innerHTML = "";
root.appendChild(comparisonView);
comparisonUi.renderDetails();
comparisonUi.renderApiResults();
},
renderDetails() {
const detailsView = UI.createElement({
style: "margin-bottom: 20px"
});
const { refTokens } = comparisonUi.state;
const detailsTable = UI.createElement({
element: "table",
children: {
element: "tbody",
children: [
{
element: "tr",
id: "reference-sessions"
}
]
}
});
detailsView.appendChild(detailsTable);
const details = UI.getElement("details");
details.innerHTML = "";
details.appendChild(detailsView);
comparisonUi.renderReferenceSessions();
},
renderReferenceSessions() {
const { sessions, refTokens } = comparisonUi.state;
if (!refTokens || refTokens.length === 0) return;
if (!Object.keys(sessions) || Object.keys(sessions).length === 0)
return;
const referenceSessions = refTokens.map(token => sessions[token]);
const referenceSessionsTarget = UI.getElement("reference-sessions");
referenceSessionsTarget.innerHTML = "";
const referenceSessionsLabel = UI.createElement({
element: "td",
text: "Reference Sessions:",
style: "width: 175px"
});
referenceSessionsTarget.appendChild(referenceSessionsLabel);
const referenceSessionsList = UI.createElement({ element: "td" });
referenceSessions.forEach(session => {
const { token, browser } = session;
const referenceSessionItem = UI.createElement({
style: "margin-right: 10px",
className: "button is-dark is-small is-rounded is-outlined",
onClick: () => WaveService.openSession(token),
children: [
{
element: "span",
className: "icon",
children: {
element: "i",
className: utils.getBrowserIcon(browser.name)
}
},
{
element: "span",
className: "is-family-monospace",
text: token.split("-").shift()
}
]
});
referenceSessionsList.appendChild(referenceSessionItem);
});
referenceSessionsTarget.appendChild(referenceSessionsList);
},
renderApiResults() {
const apiResultsView = UI.createElement({
style: "margin-bottom: 20px"
});
const heading = UI.createElement({
className: "title is-4",
text: "Results"
});
apiResultsView.appendChild(heading);
const { results, tokens, sessions } = comparisonUi.state;
if (!results) {
const loadingIndicator = UI.createElement({
className: "level",
children: {
element: "span",
className: "level-item icon",
children: [
{
element: "i",
className: "fas fa-spinner fa-pulse"
},
{
style: "margin-left: 0.4em;",
text: "Loading comparison ..."
}
]
}
});
apiResultsView.appendChild(loadingIndicator);
const apiResults = UI.getElement("api-results");
apiResults.innerHTML = "";
apiResults.appendChild(apiResultsView);
return;
}
const resultsTable = UI.createElement({
element: "table"
});
apiResultsView.appendChild(resultsTable);
const getColor = percent => {
const tRed = 28;
const tGreen = 166;
const tBlue = 76;
const mRed = 204;
const mGreen = 163;
const mBlue = 0;
const bRed = 255;
const bGreen = 56;
const bBlue = 96;
if (percent > 50) {
const red = mRed + ((percent - 50) / 50) * (tRed - mRed);
const green = mGreen + ((percent - 50) / 50) * (tGreen - mGreen);
const blue = mBlue + ((percent - 50) / 50) * (tBlue - mBlue);
return `rgb(${red}, ${green}, ${blue})`;
} else {
const red = bRed + (percent / 50) * (mRed - bRed);
const green = bGreen + (percent / 50) * (mGreen - bGreen);
const blue = bBlue + (percent / 50) * (mBlue - bBlue);
return `rgb(${red}, ${green}, ${blue})`;
}
};
const resultsTableHeader = UI.createElement({
element: "thead",
children: {
element: "tr",
children: [
{
element: "td",
text: "API",
style: "vertical-align: bottom; width: 200px"
}
]
.concat(
tokens.map(token => ({
element: "td",
children: {
onClick: () => WaveService.openSession(token),
className: "session-header",
children: [
{
element: "i",
style: "font-size: 1.5em; margin-right: 0.1em",
className: utils.getBrowserIcon(
sessions[token].browser.name
)
},
{
children: [
{
style: "margin: 0; padding: 0;",
className: "is-family-monospace",
text: `${token.split("-").shift()}`
},
{
style: "margin: 0; padding: 0;",
text: `${sessions[token].browser.name} ${
sessions[token].browser.version
}`
}
]
}
]
}
}))
)
.concat([{ element: "td", style: "width: 80px" }])
}
});
resultsTable.appendChild(resultsTableHeader);
let apis = [];
tokens.forEach(token =>
Object.keys(results[token])
.filter(api => apis.indexOf(api) === -1)
.forEach(api => apis.push(api))
);
apis = apis.sort((apiA, apiB) =>
apiA.toLowerCase() > apiB.toLowerCase() ? 1 : -1
);
const resultsTableBody = UI.createElement({
element: "tbody",
children: apis.map(api => ({
element: "tr",
children: [{ element: "td", text: api }]
.concat(
tokens.map(token =>
results[token][api]
? {
element: "td",
style:
"text-align: center; font-weight: bold;" +
`color: ${getColor(
utils.percent(
results[token][api],
results["total"][api]
)
)}`,
text: `${utils.percent(
results[token][api],
results["total"][api]
)}%`
}
: {
element: "td",
text: "Not Tested",
style: "text-align: center;"
}
)
)
.concat([
comparisonUi.state.reportsEnabled ?
{
element: "td",
children: {
className:
"html-report button is-dark is-outlined is-small",
onclick: () =>
WaveService.readMultiReportUri(
comparisonUi.state.tokens,
api,
function(uri) {
window.open(uri, "_blank");
}
),
text: "report"
}
} : null
])
}))
});
resultsTable.appendChild(resultsTableBody);
const apiResults = UI.getElement("api-results");
apiResults.innerHTML = "";
apiResults.appendChild(apiResultsView);
}
};
</script>
</body>
</html>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show More