From 20834f4fb9cbd5675a060d1fce1c676787cec687 Mon Sep 17 00:00:00 2001 From: Andre Natal Date: Mon, 21 Oct 2019 20:58:57 +0000 Subject: [PATCH] Bug 1248897 - Introducing an online speech recognition service for Web Speech API r=smaug,pehrsons,padenot This patch introduces a Speech Recognition Service which interfaces with Mozilla's remote STT endpoint which is currently being used by multiple services Differential Revision: https://phabricator.services.mozilla.com/D26047 --HG-- extra : moz-landing-system : lando --- .../OnlineSpeechRecognitionService.cpp | 473 ++++++++++++++++++ .../OnlineSpeechRecognitionService.h | 133 +++++ .../webspeech/recognition/SpeechGrammar.h | 5 + .../webspeech/recognition/SpeechGrammarList.h | 5 + .../recognition/SpeechRecognition.cpp | 369 +++++++++----- .../webspeech/recognition/SpeechRecognition.h | 37 +- dom/media/webspeech/recognition/moz.build | 13 + .../test/FakeSpeechRecognitionService.cpp | 13 +- .../test/FakeSpeechRecognitionService.h | 2 +- dom/media/webspeech/recognition/test/head.js | 11 +- .../recognition/test/http_requesthandler.sjs | 77 +++ .../webspeech/recognition/test/mochitest.ini | 9 + .../recognition/test/sinoid+hello.ogg | Bin 0 -> 29514 bytes .../test/sinoid+hello.ogg^headers^ | 1 + .../recognition/test/test_abort.html | 4 +- .../test/test_audio_capture_error.html | 4 +- .../test_call_start_from_end_handler.html | 4 +- .../test/test_nested_eventloop.html | 3 +- .../test/test_online_400_response.html | 47 ++ .../test_online_empty_result_handling.html | 48 ++ .../recognition/test/test_online_hangup.html | 47 ++ .../recognition/test/test_online_http.html | 89 ++++ .../test/test_online_http_webkit.html | 90 ++++ ...test_online_malformed_result_handling.html | 48 ++ .../test/test_recognition_service_error.html | 4 +- ...t_success_without_recognition_service.html | 4 +- .../recognition/test/test_timeout.html | 4 +- dom/webidl/SpeechGrammar.webidl | 1 + dom/webidl/SpeechGrammarList.webidl | 1 + dom/webidl/SpeechRecognition.webidl | 1 + layout/build/components.conf | 6 + layout/build/nsLayoutModule.cpp | 1 + 32 files changed, 1412 insertions(+), 142 deletions(-) create mode 100644 dom/media/webspeech/recognition/OnlineSpeechRecognitionService.cpp create mode 100644 dom/media/webspeech/recognition/OnlineSpeechRecognitionService.h create mode 100644 dom/media/webspeech/recognition/test/http_requesthandler.sjs create mode 100644 dom/media/webspeech/recognition/test/sinoid+hello.ogg create mode 100644 dom/media/webspeech/recognition/test/sinoid+hello.ogg^headers^ create mode 100644 dom/media/webspeech/recognition/test/test_online_400_response.html create mode 100644 dom/media/webspeech/recognition/test/test_online_empty_result_handling.html create mode 100644 dom/media/webspeech/recognition/test/test_online_hangup.html create mode 100644 dom/media/webspeech/recognition/test/test_online_http.html create mode 100644 dom/media/webspeech/recognition/test/test_online_http_webkit.html create mode 100644 dom/media/webspeech/recognition/test/test_online_malformed_result_handling.html diff --git a/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.cpp b/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.cpp new file mode 100644 index 000000000000..1d9581d04010 --- /dev/null +++ b/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.cpp @@ -0,0 +1,473 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsThreadUtils.h" +#include "nsXPCOMCIDInternal.h" +#include "OnlineSpeechRecognitionService.h" +#include "nsIFile.h" +#include "SpeechGrammar.h" +#include "SpeechRecognition.h" +#include "SpeechRecognitionAlternative.h" +#include "SpeechRecognitionResult.h" +#include "SpeechRecognitionResultList.h" +#include "nsIObserverService.h" +#include "mozilla/StaticPrefs_media.h" +#include "mozilla/Services.h" +#include "nsDirectoryServiceDefs.h" +#include "nsDirectoryServiceUtils.h" +#include "nsMemory.h" +#include "nsNetUtil.h" +#include "nsContentUtils.h" +#include "nsIPrincipal.h" +#include "nsIStreamListener.h" +#include "nsIUploadChannel2.h" +#include "mozilla/dom/ClientIPCTypes.h" +#include "nsStringStream.h" +#include "nsIOutputStream.h" +#include "nsStreamUtils.h" +#include "OpusTrackEncoder.h" +#include "OggWriter.h" +#include "nsIClassOfService.h" +#include +#include +#include +#include + +namespace mozilla { + +using namespace dom; +using namespace std; + +#define PREFERENCE_DEFAULT_RECOGNITION_ENDPOINT \ + "media.webspeech.service.endpoint" +#define DEFAULT_RECOGNITION_ENDPOINT "https://speaktome-2.services.mozilla.com/" +#define MAX_LISTENING_TIME_MS 10000 + +NS_IMPL_ISUPPORTS(OnlineSpeechRecognitionService, nsISpeechRecognitionService, + nsIStreamListener) + +NS_IMETHODIMP +OnlineSpeechRecognitionService::OnStartRequest(nsIRequest* aRequest) { + MOZ_ASSERT(NS_IsMainThread()); + return NS_OK; +} + +static nsresult AssignResponseToBuffer(nsIInputStream* aIn, void* aClosure, + const char* aFromRawSegment, + uint32_t aToOffset, uint32_t aCount, + uint32_t* aWriteCount) { + nsCString* buf = static_cast(aClosure); + buf->Append(aFromRawSegment, aCount); + *aWriteCount = aCount; + return NS_OK; +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::OnDataAvailable(nsIRequest* aRequest, + nsIInputStream* aInputStream, + uint64_t aOffset, + uint32_t aCount) { + MOZ_ASSERT(NS_IsMainThread()); + nsresult rv; + uint32_t readCount; + rv = aInputStream->ReadSegments(AssignResponseToBuffer, &mBuf, aCount, + &readCount); + NS_ENSURE_SUCCESS(rv, rv); + return NS_OK; +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::OnStopRequest(nsIRequest* aRequest, + nsresult aStatusCode) { + MOZ_ASSERT(NS_IsMainThread()); + + auto clearBuf = MakeScopeExit([&] { mBuf.Truncate(); }); + + if (mAborted) { + return NS_OK; + } + + bool success; + float confidence = 0; + Json::Value root; + Json::CharReaderBuilder builder; + bool parsingSuccessful; + nsAutoCString result; + nsAutoCString hypoValue; + nsAutoString errorMsg; + SpeechRecognitionErrorCode errorCode; + + SR_LOG("STT Result: %s", mBuf.get()); + + if (NS_FAILED(aStatusCode)) { + success = false; + errorMsg.Assign(NS_LITERAL_STRING("Error connecting to the service.")); + errorCode = SpeechRecognitionErrorCode::Network; + } else { + success = true; + UniquePtr const reader(builder.newCharReader()); + parsingSuccessful = + reader->parse(mBuf.BeginReading(), mBuf.EndReading(), &root, nullptr); + if (!parsingSuccessful) { + // there's an internal server error + success = false; + errorMsg.Assign(NS_LITERAL_STRING("Internal server error")); + errorCode = SpeechRecognitionErrorCode::Network; + } else { + result.Assign(root.get("status", "error").asString().c_str()); + if (result.EqualsLiteral("ok")) { + // ok, we have a result + if (!root["data"].empty()) { + hypoValue.Assign(root["data"][0].get("text", "").asString().c_str()); + confidence = root["data"][0].get("confidence", "0").asFloat(); + } else { + success = false; + errorMsg.Assign(NS_LITERAL_STRING("Error reading result data.")); + errorCode = SpeechRecognitionErrorCode::Network; + } + } else { + success = false; + NS_ConvertUTF8toUTF16 error(root.get("message", "").asString().c_str()); + errorMsg.Assign(error); + errorCode = SpeechRecognitionErrorCode::No_speech; + } + } + } + + if (!success) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, errorCode, errorMsg); + } else { + // Declare javascript result events + RefPtr event = new SpeechEvent( + mRecognition, SpeechRecognition::EVENT_RECOGNITIONSERVICE_FINAL_RESULT); + SpeechRecognitionResultList* resultList = + new SpeechRecognitionResultList(mRecognition); + SpeechRecognitionResult* result = new SpeechRecognitionResult(mRecognition); + + if (mRecognition->MaxAlternatives() > 0) { + SpeechRecognitionAlternative* alternative = + new SpeechRecognitionAlternative(mRecognition); + + alternative->mTranscript = NS_ConvertUTF8toUTF16(hypoValue); + alternative->mConfidence = confidence; + + result->mItems.AppendElement(alternative); + } + resultList->mItems.AppendElement(result); + + event->mRecognitionResultList = resultList; + NS_DispatchToMainThread(event); + } + + return NS_OK; +} + +OnlineSpeechRecognitionService::OnlineSpeechRecognitionService() = default; +OnlineSpeechRecognitionService::~OnlineSpeechRecognitionService() = default; + +NS_IMETHODIMP +OnlineSpeechRecognitionService::Initialize( + WeakPtr aSpeechRecognition) { + MOZ_ASSERT(NS_IsMainThread()); + mWriter = MakeUnique(); + mRecognition = new nsMainThreadPtrHolder( + "OnlineSpeechRecognitionService::mRecognition", aSpeechRecognition); + mEncodeTaskQueue = mRecognition->GetTaskQueueForEncoding(); + MOZ_ASSERT(mEncodeTaskQueue); + return NS_OK; +} + +void OnlineSpeechRecognitionService::EncoderDataAvailable() { + MOZ_ASSERT(!NS_IsMainThread()); + nsresult rv; + AutoTArray, 4> container; + rv = mAudioEncoder->GetEncodedTrack(container); + if (NS_WARN_IF(NS_FAILED(rv))) { + MOZ_ASSERT_UNREACHABLE(); + } + + rv = mWriter->WriteEncodedTrack( + container, + mAudioEncoder->IsEncodingComplete() ? ContainerWriter::END_OF_STREAM : 0); + if (NS_WARN_IF(NS_FAILED(rv))) { + MOZ_ASSERT_UNREACHABLE(); + } + + mWriter->GetContainerData(&mEncodedData, mAudioEncoder->IsEncodingComplete() + ? ContainerWriter::FLUSH_NEEDED + : 0); + + if (mAudioEncoder->IsEncodingComplete()) { + NS_DispatchToMainThread( + NewRunnableMethod("OnlineSpeechRecognitionService::DoSTT", this, + &OnlineSpeechRecognitionService::DoSTT)); + } +} + +void OnlineSpeechRecognitionService::EncoderInitialized() { + MOZ_ASSERT(!NS_IsMainThread()); + AutoTArray, 1> metadata; + metadata.AppendElement(mAudioEncoder->GetMetadata()); + if (metadata[0]->GetKind() != TrackMetadataBase::METADATA_OPUS) { + SR_LOG("wrong meta data type!"); + MOZ_ASSERT_UNREACHABLE(); + } + + nsresult rv = mWriter->SetMetadata(metadata); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + + rv = mWriter->GetContainerData(&mEncodedData, ContainerWriter::GET_HEADER); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); +} + +void OnlineSpeechRecognitionService::EncoderError() { + MOZ_ASSERT(!NS_IsMainThread()); + SR_LOG("Error encoding frames."); + mEncodedData.Clear(); + NS_DispatchToMainThread(NS_NewRunnableFunction( + "SpeechRecognition::DispatchError", + [this, self = RefPtr(this)]() { + if (!mRecognition) { + return; + } + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Audio_capture, + NS_LITERAL_STRING("Encoder error")); + })); +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::ProcessAudioSegment(AudioSegment* aAudioSegment, + int32_t aSampleRate) { + MOZ_ASSERT(!NS_IsMainThread()); + int64_t duration = aAudioSegment->GetDuration(); + if (duration <= 0) { + return NS_OK; + } + + if (!mAudioEncoder) { + mSpeechEncoderListener = new SpeechEncoderListener(this); + mAudioEncoder = MakeAndAddRef(aSampleRate); + RefPtr mEncoderThread = AbstractThread::GetCurrent(); + mAudioEncoder->SetWorkerThread(mEncoderThread); + mAudioEncoder->RegisterListener(mSpeechEncoderListener); + } + + mAudioEncoder->AppendAudioSegment(std::move(*aAudioSegment)); + + TimeStamp now = TimeStamp::Now(); + if (mFirstIteration.IsNull()) { + mFirstIteration = now; + } + + if ((now - mFirstIteration).ToMilliseconds() >= MAX_LISTENING_TIME_MS) { + NS_DispatchToMainThread(NS_NewRunnableFunction( + "SpeechRecognition::Stop", + [this, self = RefPtr(this)]() { + if (!mRecognition) { + return; + } + mRecognition->Stop(); + })); + + return NS_OK; + } + + return NS_OK; +} + +void OnlineSpeechRecognitionService::DoSTT() { + MOZ_ASSERT(NS_IsMainThread()); + + if (mAborted) { + return; + } + + nsresult rv; + nsCOMPtr chan; + nsCOMPtr uri; + nsAutoCString speechRecognitionEndpoint; + nsAutoCString prefEndpoint; + nsAutoString language; + + Preferences::GetCString(PREFERENCE_DEFAULT_RECOGNITION_ENDPOINT, + prefEndpoint); + + if (!prefEndpoint.IsEmpty()) { + speechRecognitionEndpoint = prefEndpoint; + } else { + speechRecognitionEndpoint = DEFAULT_RECOGNITION_ENDPOINT; + } + + rv = NS_NewURI(getter_AddRefs(uri), speechRecognitionEndpoint, nullptr, + nullptr); + if (NS_WARN_IF(NS_FAILED(rv))) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Network, NS_LITERAL_STRING("Unknown URI")); + return; + } + + nsSecurityFlags secFlags = nsILoadInfo::SEC_REQUIRE_CORS_DATA_INHERITS; + nsLoadFlags loadFlags = + nsIRequest::LOAD_NORMAL | nsIChannel::LOAD_BYPASS_SERVICE_WORKER; + nsContentPolicyType contentPolicy = + nsContentUtils::InternalContentPolicyTypeToExternal( + nsIContentPolicy::TYPE_OTHER); + + nsPIDOMWindowInner* window = mRecognition->GetOwner(); + if (NS_WARN_IF(!window)) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Aborted, NS_LITERAL_STRING("No window")); + return; + } + + Document* doc = window->GetExtantDoc(); + if (NS_WARN_IF(!doc)) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Aborted, NS_LITERAL_STRING("No document")); + } + rv = NS_NewChannel(getter_AddRefs(chan), uri, doc->NodePrincipal(), secFlags, + contentPolicy, nullptr, nullptr, nullptr, nullptr, + loadFlags); + if (NS_WARN_IF(NS_FAILED(rv))) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Network, + NS_LITERAL_STRING("Failed to open channel")); + return; + } + + nsCOMPtr httpChan = do_QueryInterface(chan); + if (httpChan) { + rv = httpChan->SetRequestMethod(NS_LITERAL_CSTRING("POST")); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + } + + if (httpChan) { + mRecognition->GetLang(language); + // Accept-Language-STT is a custom header of our backend server used to set + // the language of the speech sample being submitted by the client + rv = httpChan->SetRequestHeader(NS_LITERAL_CSTRING("Accept-Language-STT"), + NS_ConvertUTF16toUTF8(language), false); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + // Tell the server to not store the transcription by default + rv = httpChan->SetRequestHeader(NS_LITERAL_CSTRING("Store-Transcription"), + NS_LITERAL_CSTRING("0"), false); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + // Tell the server to not store the sample by default + rv = httpChan->SetRequestHeader(NS_LITERAL_CSTRING("Store-Sample"), + NS_LITERAL_CSTRING("0"), false); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + // Set the product tag as teh web speech api + rv = httpChan->SetRequestHeader(NS_LITERAL_CSTRING("Product-Tag"), + NS_LITERAL_CSTRING("wsa"), false); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + } + + nsCOMPtr cos(do_QueryInterface(chan)); + if (cos) { + cos->AddClassFlags(nsIClassOfService::UrgentStart); + } + + nsCOMPtr uploadChan = do_QueryInterface(chan); + if (uploadChan) { + nsCOMPtr bodyStream; + uint32_t length = 0; + for (const nsTArray& chunk : mEncodedData) { + length += chunk.Length(); + } + + nsTArray audio; + if (!audio.SetCapacity(length, fallible)) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Audio_capture, + NS_LITERAL_STRING("Allocation error")); + return; + } + + for (const nsTArray& chunk : mEncodedData) { + audio.AppendElements(chunk); + } + + mEncodedData.Clear(); + + rv = NS_NewByteInputStream(getter_AddRefs(bodyStream), std::move(audio)); + if (NS_WARN_IF(NS_FAILED(rv))) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Network, + NS_LITERAL_STRING("Failed to open stream")); + return; + } + if (bodyStream) { + rv = uploadChan->ExplicitSetUploadStream( + bodyStream, NS_LITERAL_CSTRING("audio/ogg"), length, + NS_LITERAL_CSTRING("POST"), false); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); + } + } + + rv = chan->AsyncOpen(this); + if (NS_WARN_IF(NS_FAILED(rv))) { + mRecognition->DispatchError( + SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR, + SpeechRecognitionErrorCode::Network, + NS_LITERAL_STRING("Internal server error")); + } +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::SoundEnd() { + MOZ_ASSERT(NS_IsMainThread()); + + if (!mEncodeTaskQueue) { + // Not initialized + return NS_OK; + } + + nsresult rv = mEncodeTaskQueue->Dispatch(NS_NewRunnableFunction( + "OnlineSpeechRecognitionService::SoundEnd", + [this, self = RefPtr(this)]() { + if (mAudioEncoder) { + mAudioEncoder->NotifyEndOfStream(); + mAudioEncoder->UnregisterListener(mSpeechEncoderListener); + mSpeechEncoderListener = nullptr; + mAudioEncoder = nullptr; + } + })); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + + mEncodeTaskQueue = nullptr; + + return NS_OK; +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::ValidateAndSetGrammarList( + SpeechGrammar* aSpeechGrammar, + nsISpeechGrammarCompilationCallback* aCallback) { + // This is an online LVCSR (STT) service, + // so we don't need to set a grammar + return NS_OK; +} + +NS_IMETHODIMP +OnlineSpeechRecognitionService::Abort() { + MOZ_ASSERT(NS_IsMainThread()); + if (mAborted) { + return NS_OK; + } + mAborted = true; + return SoundEnd(); +} +} // namespace mozilla diff --git a/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.h b/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.h new file mode 100644 index 000000000000..60bd28dc161c --- /dev/null +++ b/dom/media/webspeech/recognition/OnlineSpeechRecognitionService.h @@ -0,0 +1,133 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef mozilla_dom_OnlineRecognitionService_h +#define mozilla_dom_OnlineRecognitionService_h + +#include "nsCOMPtr.h" +#include "nsTArray.h" +#include "nsISpeechRecognitionService.h" +#include "speex/speex_resampler.h" +#include "nsIStreamListener.h" +#include "OpusTrackEncoder.h" +#include "ContainerWriter.h" + +#define NS_ONLINE_SPEECH_RECOGNITION_SERVICE_CID \ + {0x0ff5ce56, \ + 0x5b09, \ + 0x4db8, \ + {0xad, 0xc6, 0x82, 0x66, 0xaf, 0x95, 0xf8, 0x64}}; + +namespace mozilla { + +namespace ipc { +class PrincipalInfo; +} // namespace ipc + +/** + * Online implementation of the nsISpeechRecognitionService interface + */ +class OnlineSpeechRecognitionService : public nsISpeechRecognitionService, + public nsIStreamListener { + public: + // Add XPCOM glue code + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSISPEECHRECOGNITIONSERVICE + NS_DECL_NSIREQUESTOBSERVER + NS_DECL_NSISTREAMLISTENER + + /** + * Listener responsible for handling the events raised by the TrackEncoder + */ + class SpeechEncoderListener : public TrackEncoderListener { + public: + explicit SpeechEncoderListener(OnlineSpeechRecognitionService* aService) + : mService(aService), mOwningThread(AbstractThread::GetCurrent()) {} + + void Initialized(TrackEncoder* aEncoder) override { + MOZ_ASSERT(mOwningThread->IsCurrentThreadIn()); + mService->EncoderInitialized(); + } + + void DataAvailable(TrackEncoder* aEncoder) override { + MOZ_ASSERT(mOwningThread->IsCurrentThreadIn()); + mService->EncoderDataAvailable(); + } + + void Error(TrackEncoder* aEncoder) override { + MOZ_ASSERT(mOwningThread->IsCurrentThreadIn()); + mService->EncoderError(); + } + + private: + const RefPtr mService; + const RefPtr mOwningThread; + }; + + /** + * Default constructs a OnlineSpeechRecognitionService + */ + OnlineSpeechRecognitionService(); + + /** + * Called by SpeechEncoderListener when the AudioTrackEncoder has been + * initialized. + */ + void EncoderInitialized(); + + /** + * Called by SpeechEncoderListener when the AudioTrackEncoder has encoded + * some data for us to pass along. + */ + void EncoderDataAvailable(); + + /** + * Called by SpeechEncoderListener when the AudioTrackEncoder has + * encountered an error. + */ + void EncoderError(); + + private: + /** + * Private destructor to prevent bypassing of reference counting + */ + virtual ~OnlineSpeechRecognitionService(); + + /** The associated SpeechRecognition */ + nsMainThreadPtrHandle mRecognition; + + /** + * Builds a mock SpeechRecognitionResultList + */ + dom::SpeechRecognitionResultList* BuildMockResultList(); + + /** + * Method responsible for uploading the audio to the remote endpoint + */ + void DoSTT(); + + // Encoded and packaged ogg audio data + nsTArray> mEncodedData; + // Member responsible for holding a reference to the TrackEncoderListener + RefPtr mSpeechEncoderListener; + // Encoder responsible for encoding the frames from pcm to opus which is the + // format supported by our backend + RefPtr mAudioEncoder; + // Object responsible for wrapping the opus frames into an ogg container + UniquePtr mWriter; + // Member responsible for storing the json string returned by the endpoint + nsCString mBuf; + // Used to calculate a ceiling on the time spent listening. + TimeStamp mFirstIteration; + // flag responsible to control if the user choose to abort + bool mAborted = false; + // reference to the audio encoder queue + RefPtr mEncodeTaskQueue; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/webspeech/recognition/SpeechGrammar.h b/dom/media/webspeech/recognition/SpeechGrammar.h index 12cd31e631e8..6cd47eeabf53 100644 --- a/dom/media/webspeech/recognition/SpeechGrammar.h +++ b/dom/media/webspeech/recognition/SpeechGrammar.h @@ -36,6 +36,11 @@ class SpeechGrammar final : public nsISupports, public nsWrapperCache { static already_AddRefed Constructor( const GlobalObject& aGlobal); + static already_AddRefed WebkitSpeechGrammar( + const GlobalObject& aGlobal, ErrorResult& aRv) { + return Constructor(aGlobal); + } + void GetSrc(nsString& aRetVal, ErrorResult& aRv) const; void SetSrc(const nsAString& aArg, ErrorResult& aRv); diff --git a/dom/media/webspeech/recognition/SpeechGrammarList.h b/dom/media/webspeech/recognition/SpeechGrammarList.h index d07d92475c56..6782e72e1dad 100644 --- a/dom/media/webspeech/recognition/SpeechGrammarList.h +++ b/dom/media/webspeech/recognition/SpeechGrammarList.h @@ -35,6 +35,11 @@ class SpeechGrammarList final : public nsISupports, public nsWrapperCache { static already_AddRefed Constructor( const GlobalObject& aGlobal); + static already_AddRefed WebkitSpeechGrammarList( + const GlobalObject& aGlobal, ErrorResult& aRv) { + return Constructor(aGlobal); + } + nsISupports* GetParentObject() const; JSObject* WrapObject(JSContext* aCx, diff --git a/dom/media/webspeech/recognition/SpeechRecognition.cpp b/dom/media/webspeech/recognition/SpeechRecognition.cpp index 2eecc1bfb15c..80f4f9246562 100644 --- a/dom/media/webspeech/recognition/SpeechRecognition.cpp +++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp @@ -19,7 +19,8 @@ #include "mozilla/Preferences.h" #include "mozilla/Services.h" #include "mozilla/StaticPrefs_media.h" - +#include "mozilla/AbstractThread.h" +#include "VideoUtils.h" #include "AudioSegment.h" #include "MediaEnginePrefs.h" #include "endpointer.h" @@ -46,17 +47,17 @@ namespace mozilla { namespace dom { #define PREFERENCE_DEFAULT_RECOGNITION_SERVICE "media.webspeech.service.default" -#define DEFAULT_RECOGNITION_SERVICE_PREFIX "pocketsphinx-" -#define DEFAULT_RECOGNITION_SERVICE "pocketsphinx-en-US" +#define DEFAULT_RECOGNITION_SERVICE "online" #define PREFERENCE_ENDPOINTER_SILENCE_LENGTH "media.webspeech.silence_length" #define PREFERENCE_ENDPOINTER_LONG_SILENCE_LENGTH \ "media.webspeech.long_silence_length" #define PREFERENCE_ENDPOINTER_LONG_SPEECH_LENGTH \ "media.webspeech.long_speech_length" +#define PREFERENCE_SPEECH_DETECTION_TIMEOUT_MS \ + "media.webspeech.recognition.timeout" static const uint32_t kSAMPLE_RATE = 16000; -static const uint32_t kSPEECH_DETECTION_TIMEOUT_MS = 10000; // number of frames corresponding to 300ms of audio to send to endpointer while // it's in environment estimation mode @@ -70,19 +71,39 @@ LogModule* GetSpeechRecognitionLog() { #define SR_LOG(...) \ MOZ_LOG(GetSpeechRecognitionLog(), mozilla::LogLevel::Debug, (__VA_ARGS__)) -already_AddRefed GetSpeechRecognitionService( - const nsAString& aLang) { +namespace { +class SpeechRecognitionShutdownBlocker : public media::ShutdownBlocker { + public: + SpeechRecognitionShutdownBlocker(SpeechRecognition* aRecognition, + const nsString& aName) + : media::ShutdownBlocker(aName), mRecognition(aRecognition) {} + + NS_IMETHOD BlockShutdown(nsIAsyncShutdownClient*) override { + MOZ_ASSERT(NS_IsMainThread()); + // AbortSilently will eventually clear the blocker. + mRecognition->Abort(); + return NS_OK; + } + + private: + const RefPtr mRecognition; +}; + +enum class ServiceCreationError { + ServiceNotFound, +}; + +Result, ServiceCreationError> +CreateSpeechRecognitionService(nsPIDOMWindowInner* aWindow, + SpeechRecognition* aRecognition, + const nsAString& aLang) { nsAutoCString speechRecognitionServiceCID; nsAutoCString prefValue; Preferences::GetCString(PREFERENCE_DEFAULT_RECOGNITION_SERVICE, prefValue); nsAutoCString speechRecognitionService; - if (!aLang.IsEmpty()) { - speechRecognitionService = - NS_LITERAL_CSTRING(DEFAULT_RECOGNITION_SERVICE_PREFIX) + - NS_ConvertUTF16toUTF8(aLang); - } else if (!prefValue.IsEmpty()) { + if (!prefValue.IsEmpty()) { speechRecognitionService = prefValue; } else { speechRecognitionService = DEFAULT_RECOGNITION_SERVICE; @@ -99,27 +120,15 @@ already_AddRefed GetSpeechRecognitionService( nsresult rv; nsCOMPtr recognitionService; - recognitionService = do_GetService(speechRecognitionServiceCID.get(), &rv); - return recognitionService.forget(); -} - -class SpeechRecognitionShutdownBlocker : public media::ShutdownBlocker { - public: - explicit SpeechRecognitionShutdownBlocker(SpeechRecognition* aRecognition) - : media::ShutdownBlocker(NS_LITERAL_STRING("SpeechRecognition shutdown")), - mRecognition(aRecognition) {} - - NS_IMETHOD BlockShutdown(nsIAsyncShutdownClient*) override { - MOZ_ASSERT(NS_IsMainThread()); - - // AbortSilently will eventually clear the blocker. - mRecognition->Abort(); - return NS_OK; + recognitionService = + do_CreateInstance(speechRecognitionServiceCID.get(), &rv); + if (!recognitionService) { + return Err(ServiceCreationError::ServiceNotFound); } - private: - const RefPtr mRecognition; -}; + return recognitionService; +} +} // namespace NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechRecognition, DOMEventTargetHelper, mStream, mTrack, mRecognitionService, @@ -137,7 +146,8 @@ SpeechRecognition::SpeechRecognition(nsPIDOMWindowInner* aOwnerWindow) mEndpointer(kSAMPLE_RATE), mAudioSamplesPerChunk(mEndpointer.FrameSize()), mSpeechDetectionTimer(NS_NewTimer()), - mSpeechGrammarList(new SpeechGrammarList(GetParentObject())), + mSpeechGrammarList(new SpeechGrammarList(GetOwner())), + mContinuous(false), mInterimResults(false), mMaxAlternatives(1) { SR_LOG("created SpeechRecognition"); @@ -154,6 +164,10 @@ SpeechRecognition::SpeechRecognition(nsPIDOMWindowInner* aOwnerWindow) Preferences::GetInt(PREFERENCE_ENDPOINTER_LONG_SILENCE_LENGTH, 2500000)); mEndpointer.set_long_speech_length( Preferences::GetInt(PREFERENCE_ENDPOINTER_SILENCE_LENGTH, 3 * 1000000)); + + mSpeechDetectionTimeoutMs = + Preferences::GetInt(PREFERENCE_SPEECH_DETECTION_TIMEOUT_MS, 10000); + Reset(); } @@ -211,8 +225,6 @@ already_AddRefed SpeechRecognition::Constructor( return object.forget(); } -nsISupports* SpeechRecognition::GetParentObject() const { return GetOwner(); } - void SpeechRecognition::ProcessEvent(SpeechEvent* aEvent) { SR_LOG("Processing %s, current state is %s", GetName(aEvent), GetName(mCurrentState)); @@ -245,8 +257,8 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { case EVENT_RECOGNITIONSERVICE_ERROR: AbortError(aEvent); break; - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; case STATE_STARTING: @@ -262,7 +274,7 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { AbortSilently(aEvent); break; case EVENT_STOP: - Reset(); + ResetAndEnd(); break; case EVENT_RECOGNITIONSERVICE_INTERMEDIATE_RESULT: case EVENT_RECOGNITIONSERVICE_FINAL_RESULT: @@ -271,8 +283,8 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { case EVENT_START: SR_LOG("STATE_STARTING: Unhandled event %s", GetName(aEvent)); MOZ_CRASH(); - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; case STATE_ESTIMATING: @@ -297,8 +309,8 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { case EVENT_START: SR_LOG("STATE_ESTIMATING: Unhandled event %d", aEvent->mType); MOZ_CRASH(); - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; case STATE_WAITING_FOR_SPEECH: @@ -323,8 +335,8 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { case EVENT_START: SR_LOG("STATE_STARTING: Unhandled event %s", GetName(aEvent)); MOZ_CRASH(); - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; case STATE_RECOGNIZING: @@ -349,8 +361,8 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { case EVENT_START: SR_LOG("STATE_RECOGNIZING: Unhandled aEvent %s", GetName(aEvent)); MOZ_CRASH(); - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; case STATE_WAITING_FOR_RESULT: @@ -376,12 +388,30 @@ void SpeechRecognition::Transition(SpeechEvent* aEvent) { SR_LOG("STATE_WAITING_FOR_RESULT: Unhandled aEvent %s", GetName(aEvent)); MOZ_CRASH(); - case EVENT_COUNT: - MOZ_CRASH("Invalid event EVENT_COUNT"); + default: + MOZ_CRASH("Invalid event"); } break; - case STATE_COUNT: - MOZ_CRASH("Invalid state STATE_COUNT"); + case STATE_ABORTING: + switch (aEvent->mType) { + case EVENT_STOP: + case EVENT_ABORT: + case EVENT_AUDIO_DATA: + case EVENT_AUDIO_ERROR: + case EVENT_RECOGNITIONSERVICE_INTERMEDIATE_RESULT: + case EVENT_RECOGNITIONSERVICE_FINAL_RESULT: + case EVENT_RECOGNITIONSERVICE_ERROR: + DoNothing(aEvent); + break; + case EVENT_START: + SR_LOG("STATE_ABORTING: Unhandled aEvent %s", GetName(aEvent)); + MOZ_CRASH(); + default: + MOZ_CRASH("Invalid event"); + } + break; + default: + MOZ_CRASH("Invalid state"); } } @@ -400,7 +430,17 @@ uint32_t SpeechRecognition::ProcessAudioSegment(AudioSegment* aSegment, iterator.Next(); } - mRecognitionService->ProcessAudioSegment(aSegment, aTrackRate); + // we need to call the nsISpeechRecognitionService::ProcessAudioSegment + // in a separate thread so that any eventual encoding or pre-processing + // of the audio does not block the main thread + nsresult rv = mEncodeTaskQueue->Dispatch( + NewRunnableMethod, TrackRate>( + "nsISpeechRecognitionService::ProcessAudioSegment", + mRecognitionService, + &nsISpeechRecognitionService::ProcessAudioSegment, + std::move(*aSegment), aTrackRate)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; return samples; } @@ -421,7 +461,19 @@ uint32_t SpeechRecognition::ProcessAudioSegment(AudioSegment* aSegment, void SpeechRecognition::Reset() { SetState(STATE_IDLE); + + // This breaks potential ref-cycles. mRecognitionService = nullptr; + + ++mStreamGeneration; + if (mStream) { + mStream->UnregisterTrackListener(this); + mStream = nullptr; + } + mTrack = nullptr; + mTrackIsOwned = false; + mStopRecordingPromise = nullptr; + mEncodeTaskQueue = nullptr; mEstimationSamples = 0; mBufferedSamples = 0; mSpeechDetectionTimer->Cancel(); @@ -454,7 +506,12 @@ void SpeechRecognition::StopRecordingAndRecognize(SpeechEvent* aEvent) { SetState(STATE_WAITING_FOR_RESULT); MOZ_ASSERT(mRecognitionService, "Service deleted before recording done"); - mRecognitionService->SoundEnd(); + + // This will run SoundEnd on the service just before StopRecording begins + // shutting the encode thread down. + mSpeechListener->mRemovedPromise->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [service = mRecognitionService] { service->SoundEnd(); }); StopRecording(); } @@ -518,14 +575,23 @@ void SpeechRecognition::DoNothing(SpeechEvent* aEvent) {} void SpeechRecognition::AbortSilently(SpeechEvent* aEvent) { if (mRecognitionService) { - mRecognitionService->Abort(); + if (mTrack) { + // This will run Abort on the service just before StopRecording begins + // shutting the encode thread down. + mSpeechListener->mRemovedPromise->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [service = mRecognitionService] { service->Abort(); }); + } else { + // Recording hasn't started yet. We can just call Abort(). + mRecognitionService->Abort(); + } } - if (mTrack) { - StopRecording(); - } + StopRecording()->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [self = RefPtr(this), this] { ResetAndEnd(); }); - ResetAndEnd(); + SetState(STATE_ABORTING); } void SpeechRecognition::AbortError(SpeechEvent* aEvent) { @@ -544,54 +610,83 @@ void SpeechRecognition::NotifyError(SpeechEvent* aEvent) { **************************************/ NS_IMETHODIMP SpeechRecognition::StartRecording(RefPtr& aTrack) { - // hold a reference so that the underlying track - // doesn't get Destroy()'ed + // hold a reference so that the underlying track doesn't get collected. mTrack = aTrack; + MOZ_ASSERT(!mTrack->Ended()); - if (NS_WARN_IF(mTrack->Ended())) { - return NS_ERROR_UNEXPECTED; - } mSpeechListener = new SpeechTrackListener(this); mTrack->AddListener(mSpeechListener); - mShutdownBlocker = MakeAndAddRef(this); + nsString blockerName; + blockerName.AppendPrintf("SpeechRecognition %p shutdown", this); + mShutdownBlocker = + MakeAndAddRef(this, blockerName); RefPtr shutdown = media::GetShutdownBarrier(); shutdown->AddBlocker(mShutdownBlocker, NS_LITERAL_STRING(__FILE__), __LINE__, NS_LITERAL_STRING("SpeechRecognition shutdown")); mEndpointer.StartSession(); - return mSpeechDetectionTimer->Init(this, kSPEECH_DETECTION_TIMEOUT_MS, + return mSpeechDetectionTimer->Init(this, mSpeechDetectionTimeoutMs, nsITimer::TYPE_ONE_SHOT); } -NS_IMETHODIMP -SpeechRecognition::StopRecording() { - if (mShutdownBlocker) { - // Block shutdown until the speech track listener has been removed from the - // MTG, as it holds a reference to us, and we reference the world, which we - // don't want to leak. - mSpeechListener->mRemovedPromise->Then( - GetCurrentThreadSerialEventTarget(), __func__, - [blocker = std::move(mShutdownBlocker)] { - RefPtr shutdown = media::GetShutdownBarrier(); - nsresult rv = shutdown->RemoveBlocker(blocker); - MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); - Unused << rv; - }); +RefPtr SpeechRecognition::StopRecording() { + if (!mTrack) { + // Recording wasn't started, or has already been stopped. + if (mStream) { + // Ensure we don't start recording because a track became available + // before we get reset. + mStream->UnregisterTrackListener(this); + } + return GenericNonExclusivePromise::CreateAndResolve(true, __func__); + } + + if (mStopRecordingPromise) { + return mStopRecordingPromise; } - MOZ_ASSERT(!mShutdownBlocker); - mStream->UnregisterTrackListener(this); mTrack->RemoveListener(mSpeechListener); - mStream = nullptr; - mSpeechListener = nullptr; - mTrack = nullptr; + if (mTrackIsOwned) { + mTrack->Stop(); + } mEndpointer.EndSession(); DispatchTrustedEvent(NS_LITERAL_STRING("audioend")); - return NS_OK; + // Block shutdown until the speech track listener has been removed from the + // MSG, as it holds a reference to us, and we reference the world, which we + // don't want to leak. + mStopRecordingPromise = + mSpeechListener->mRemovedPromise + ->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [self = RefPtr(this), this] { + SR_LOG("Shutting down encoding thread"); + return mEncodeTaskQueue->BeginShutdown(); + }, + [] { + MOZ_CRASH("Unexpected rejection"); + return ShutdownPromise::CreateAndResolve(false, __func__); + }) + ->Then( + GetCurrentThreadSerialEventTarget(), __func__, + [self = RefPtr(this), this] { + RefPtr shutdown = + media::GetShutdownBarrier(); + shutdown->RemoveBlocker(mShutdownBlocker); + mShutdownBlocker = nullptr; + + MOZ_DIAGNOSTIC_ASSERT(mCurrentState != STATE_IDLE); + return GenericNonExclusivePromise::CreateAndResolve(true, + __func__); + }, + [] { + MOZ_CRASH("Unexpected rejection"); + return GenericNonExclusivePromise::CreateAndResolve(false, + __func__); + }); + return mStopRecordingPromise; } NS_IMETHODIMP @@ -648,12 +743,11 @@ void SpeechRecognition::GetLang(nsString& aRetVal) const { aRetVal = mLang; } void SpeechRecognition::SetLang(const nsAString& aArg) { mLang = aArg; } bool SpeechRecognition::GetContinuous(ErrorResult& aRv) const { - aRv.Throw(NS_ERROR_NOT_IMPLEMENTED); - return false; + return mContinuous; } void SpeechRecognition::SetContinuous(bool aArg, ErrorResult& aRv) { - aRv.Throw(NS_ERROR_NOT_IMPLEMENTED); + mContinuous = aArg; } bool SpeechRecognition::InterimResults() const { return mInterimResults; } @@ -690,6 +784,10 @@ void SpeechRecognition::Start(const Optional>& aStream, return; } + mEncodeTaskQueue = MakeAndAddRef( + GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER), + "WebSpeechEncoderThread"); + nsresult rv; rv = mRecognitionService->Initialize(this); if (NS_WARN_IF(NS_FAILED(rv))) { @@ -701,6 +799,7 @@ void SpeechRecognition::Start(const Optional>& aStream, if (aStream.WasPassed()) { mStream = &aStream.Value(); + mTrackIsOwned = false; mStream->RegisterTrackListener(this); nsTArray> tracks; mStream->GetAudioTracks(tracks); @@ -711,24 +810,40 @@ void SpeechRecognition::Start(const Optional>& aStream, } } } else { + mTrackIsOwned = true; AutoNoJSAPI nojsapi; RefPtr self(this); MediaManager::Get() ->GetUserMedia(GetOwner(), constraints, aCallerType) ->Then( GetCurrentThreadSerialEventTarget(), __func__, - [this, self](RefPtr&& aStream) { + [this, self, + generation = mStreamGeneration](RefPtr&& aStream) { + nsTArray> tracks; + aStream->GetAudioTracks(tracks); + if (mAborted || mCurrentState != STATE_STARTING || + mStreamGeneration != generation) { + // We were probably aborted. Exit early. + for (const RefPtr& track : tracks) { + track->Stop(); + } + return; + } mStream = std::move(aStream); mStream->RegisterTrackListener(this); - nsTArray> tracks; - mStream->GetAudioTracks(tracks); for (const RefPtr& track : tracks) { if (!track->Ended()) { NotifyTrackAdded(track); } } }, - [this, self](RefPtr&& error) { + [this, self, + generation = mStreamGeneration](RefPtr&& error) { + if (mAborted || mCurrentState != STATE_STARTING || + mStreamGeneration != generation) { + // We were probably aborted. Exit early. + return; + } SpeechRecognitionErrorCode errorCode; if (error->mName == MediaMgrError::Name::NotAllowedError) { @@ -746,44 +861,47 @@ void SpeechRecognition::Start(const Optional>& aStream, } bool SpeechRecognition::SetRecognitionService(ErrorResult& aRv) { + if (!GetOwner()) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return false; + } + // See: // https://dvcs.w3.org/hg/speech-api/raw-file/tip/webspeechapi.html#dfn-lang + nsAutoString lang; if (!mLang.IsEmpty()) { - mRecognitionService = GetSpeechRecognitionService(mLang); - - if (!mRecognitionService) { + lang = mLang; + } else { + nsCOMPtr document = GetOwner()->GetExtantDoc(); + if (!document) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return false; + } + nsCOMPtr element = document->GetRootElement(); + if (!element) { aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); return false; } - return true; + nsAutoString lang; + element->GetLang(lang); } - nsCOMPtr window = GetOwner(); - if (!window) { - aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); - return false; - } - nsCOMPtr document = window->GetExtantDoc(); - if (!document) { - aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); - return false; - } - nsCOMPtr element = document->GetRootElement(); - if (!element) { - aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); - return false; - } - - nsAutoString lang; - element->GetLang(lang); - mRecognitionService = GetSpeechRecognitionService(lang); - - if (!mRecognitionService) { - aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + auto result = CreateSpeechRecognitionService(GetOwner(), this, lang); + + if (result.isErr()) { + switch (result.unwrapErr()) { + case ServiceCreationError::ServiceNotFound: + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + break; + default: + MOZ_CRASH("Unknown error"); + } return false; } + mRecognitionService = result.unwrap(); + MOZ_DIAGNOSTIC_ASSERT(mRecognitionService); return true; } @@ -794,11 +912,6 @@ bool SpeechRecognition::ValidateAndSetGrammarList(ErrorResult& aRv) { } uint32_t grammarListLength = mSpeechGrammarList->Length(); - if (0 == grammarListLength) { - aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); - return false; - } - for (uint32_t count = 0; count < grammarListLength; ++count) { RefPtr speechGrammar = mSpeechGrammarList->Item(count, aRv); if (aRv.Failed()) { @@ -825,6 +938,7 @@ void SpeechRecognition::Abort() { } mAborted = true; + RefPtr event = new SpeechEvent(this, EVENT_ABORT); NS_DispatchToMainThread(event); } @@ -874,14 +988,13 @@ void SpeechRecognition::DispatchError(EventType aErrorType, uint32_t SpeechRecognition::FillSamplesBuffer(const int16_t* aSamples, uint32_t aSampleCount) { MOZ_ASSERT(mBufferedSamples < mAudioSamplesPerChunk); - MOZ_ASSERT(mAudioSamplesBuffer.get()); + MOZ_ASSERT(mAudioSamplesBuffer); int16_t* samplesBuffer = static_cast(mAudioSamplesBuffer->Data()); size_t samplesToCopy = std::min(aSampleCount, mAudioSamplesPerChunk - mBufferedSamples); - memcpy(samplesBuffer + mBufferedSamples, aSamples, - samplesToCopy * sizeof(int16_t)); + PodCopy(samplesBuffer + mBufferedSamples, aSamples, samplesToCopy); mBufferedSamples += samplesToCopy; return samplesToCopy; @@ -903,8 +1016,8 @@ uint32_t SpeechRecognition::SplitSamplesBuffer( RefPtr chunk = SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t)); - memcpy(chunk->Data(), aSamplesBuffer + chunkStart, - mAudioSamplesPerChunk * sizeof(int16_t)); + PodCopy(static_cast(chunk->Data()), aSamplesBuffer + chunkStart, + mAudioSamplesPerChunk); aResult.AppendElement(chunk.forget()); chunkStart += mAudioSamplesPerChunk; @@ -987,6 +1100,7 @@ const char* SpeechRecognition::GetName(FSMState aId) { "STATE_IDLE", "STATE_STARTING", "STATE_ESTIMATING", "STATE_WAITING_FOR_SPEECH", "STATE_RECOGNIZING", "STATE_WAITING_FOR_RESULT", + "STATE_ABORTING", }; MOZ_ASSERT(aId < STATE_COUNT); @@ -1009,6 +1123,11 @@ const char* SpeechRecognition::GetName(SpeechEvent* aEvent) { return names[aEvent->mType]; } +TaskQueue* SpeechRecognition::GetTaskQueueForEncoding() const { + MOZ_ASSERT(NS_IsMainThread()); + return mEncodeTaskQueue; +} + SpeechEvent::SpeechEvent(SpeechRecognition* aRecognition, SpeechRecognition::EventType aType) : Runnable("dom::SpeechEvent"), diff --git a/dom/media/webspeech/recognition/SpeechRecognition.h b/dom/media/webspeech/recognition/SpeechRecognition.h index d80b160ec6de..026e2c1f05cb 100644 --- a/dom/media/webspeech/recognition/SpeechRecognition.h +++ b/dom/media/webspeech/recognition/SpeechRecognition.h @@ -32,6 +32,10 @@ namespace mozilla { +namespace media { +class ShutdownBlocker; +} + namespace dom { #define SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC \ @@ -40,7 +44,6 @@ namespace dom { class GlobalObject; class AudioStreamTrack; -class SpeechRecognitionShutdownBlocker; class SpeechEvent; class SpeechTrackListener; @@ -62,8 +65,6 @@ class SpeechRecognition final : public DOMEventTargetHelper, NS_DECL_NSIOBSERVER - nsISupports* GetParentObject() const; - JSObject* WrapObject(JSContext* aCx, JS::Handle aGivenProto) override; @@ -72,6 +73,11 @@ class SpeechRecognition final : public DOMEventTargetHelper, static already_AddRefed Constructor( const GlobalObject& aGlobal, ErrorResult& aRv); + static already_AddRefed WebkitSpeechRecognition( + const GlobalObject& aGlobal, ErrorResult& aRv) { + return Constructor(aGlobal, aRv); + } + already_AddRefed Grammars() const; void SetGrammars(mozilla::dom::SpeechGrammarList& aArg); @@ -90,6 +96,8 @@ class SpeechRecognition final : public DOMEventTargetHelper, uint32_t MaxAlternatives() const; + TaskQueue* GetTaskQueueForEncoding() const; + void SetMaxAlternatives(uint32_t aArg); void GetServiceURI(nsString& aRetVal, ErrorResult& aRv) const; @@ -153,6 +161,7 @@ class SpeechRecognition final : public DOMEventTargetHelper, STATE_WAITING_FOR_SPEECH, STATE_RECOGNIZING, STATE_WAITING_FOR_RESULT, + STATE_ABORTING, STATE_COUNT }; @@ -163,7 +172,7 @@ class SpeechRecognition final : public DOMEventTargetHelper, bool ValidateAndSetGrammarList(ErrorResult& aRv); NS_IMETHOD StartRecording(RefPtr& aDOMStream); - NS_IMETHOD StopRecording(); + RefPtr StopRecording(); uint32_t ProcessAudioSegment(AudioSegment* aSegment, TrackRate aTrackRate); void NotifyError(SpeechEvent* aEvent); @@ -186,9 +195,19 @@ class SpeechRecognition final : public DOMEventTargetHelper, RefPtr mStream; RefPtr mTrack; + bool mTrackIsOwned = false; + RefPtr mStopRecordingPromise; RefPtr mSpeechListener; - RefPtr mShutdownBlocker; nsCOMPtr mRecognitionService; + RefPtr mShutdownBlocker; + // TaskQueue responsible for pre-processing the samples by the service + // it runs in a separate thread from the main thread + RefPtr mEncodeTaskQueue; + + // A generation ID of the MediaStream a started session is for, so that + // a gUM request that resolves after the session has stopped, and a new + // one has started, can exit early. Main thread only. Can wrap. + uint8_t mStreamGeneration = 0; FSMState mCurrentState; @@ -197,6 +216,10 @@ class SpeechRecognition final : public DOMEventTargetHelper, uint32_t mAudioSamplesPerChunk; + // maximum amount of seconds the engine will wait for voice + // until returning a 'no speech detected' error + uint32_t mSpeechDetectionTimeoutMs; + // buffer holds one chunk of mAudioSamplesPerChunk // samples before feeding it to mEndpointer RefPtr mAudioSamplesBuffer; @@ -209,6 +232,10 @@ class SpeechRecognition final : public DOMEventTargetHelper, RefPtr mSpeechGrammarList; + // private flag used to hold if the user called the setContinuous() method + // of the API + bool mContinuous; + // WebSpeechAPI (http://bit.ly/1gIl7DC) states: // // 1. Default value MUST be false diff --git a/dom/media/webspeech/recognition/moz.build b/dom/media/webspeech/recognition/moz.build index d6be47151b05..23dae6699765 100644 --- a/dom/media/webspeech/recognition/moz.build +++ b/dom/media/webspeech/recognition/moz.build @@ -12,6 +12,7 @@ XPIDL_SOURCES = [ ] EXPORTS.mozilla.dom += [ + 'OnlineSpeechRecognitionService.h', 'SpeechGrammar.h', 'SpeechGrammarList.h', 'SpeechRecognition.h', @@ -21,6 +22,12 @@ EXPORTS.mozilla.dom += [ 'SpeechTrackListener.h', ] +EXPORTS += [ + 'endpointer.h', + 'energy_endpointer.h', + 'energy_endpointer_params.h', +] + if CONFIG['MOZ_WEBSPEECH_TEST_BACKEND']: EXPORTS.mozilla.dom += [ 'test/FakeSpeechRecognitionService.h', @@ -30,6 +37,7 @@ UNIFIED_SOURCES += [ 'endpointer.cc', 'energy_endpointer.cc', 'energy_endpointer_params.cc', + 'OnlineSpeechRecognitionService.cpp', 'SpeechGrammar.cpp', 'SpeechGrammarList.cpp', 'SpeechRecognition.cpp', @@ -44,8 +52,13 @@ if CONFIG['MOZ_WEBSPEECH_TEST_BACKEND']: 'test/FakeSpeechRecognitionService.cpp', ] +USE_LIBS += [ + 'jsoncpp', +] + LOCAL_INCLUDES += [ '/dom/base', + '/toolkit/components/jsoncpp/include', ] include('/ipc/chromium/chromium-config.mozbuild') diff --git a/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp b/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp index c8c95aed5d85..5e7b0193c29a 100644 --- a/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp +++ b/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp @@ -30,6 +30,7 @@ FakeSpeechRecognitionService::~FakeSpeechRecognitionService() = default; NS_IMETHODIMP FakeSpeechRecognitionService::Initialize( WeakPtr aSpeechRecognition) { + MOZ_ASSERT(NS_IsMainThread()); mRecognition = aSpeechRecognition; nsCOMPtr obs = services::GetObserverService(); obs->AddObserver(this, SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC, false); @@ -40,11 +41,15 @@ FakeSpeechRecognitionService::Initialize( NS_IMETHODIMP FakeSpeechRecognitionService::ProcessAudioSegment(AudioSegment* aAudioSegment, int32_t aSampleRate) { + MOZ_ASSERT(!NS_IsMainThread()); return NS_OK; } NS_IMETHODIMP -FakeSpeechRecognitionService::SoundEnd() { return NS_OK; } +FakeSpeechRecognitionService::SoundEnd() { + MOZ_ASSERT(NS_IsMainThread()); + return NS_OK; +} NS_IMETHODIMP FakeSpeechRecognitionService::ValidateAndSetGrammarList( @@ -53,7 +58,10 @@ FakeSpeechRecognitionService::ValidateAndSetGrammarList( } NS_IMETHODIMP -FakeSpeechRecognitionService::Abort() { return NS_OK; } +FakeSpeechRecognitionService::Abort() { + MOZ_ASSERT(NS_IsMainThread()); + return NS_OK; +} NS_IMETHODIMP FakeSpeechRecognitionService::Observe(nsISupports* aSubject, const char* aTopic, @@ -85,7 +93,6 @@ FakeSpeechRecognitionService::Observe(nsISupports* aSubject, const char* aTopic, event->mRecognitionResultList = BuildMockResultList(); NS_DispatchToMainThread(event); } - return NS_OK; } diff --git a/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h b/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h index a1e96ce9e3f4..69e2786b76d1 100644 --- a/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h +++ b/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h @@ -22,7 +22,7 @@ namespace mozilla { class FakeSpeechRecognitionService : public nsISpeechRecognitionService, public nsIObserver { public: - NS_DECL_ISUPPORTS + NS_DECL_THREADSAFE_ISUPPORTS NS_DECL_NSISPEECHRECOGNITIONSERVICE NS_DECL_NSIOBSERVER diff --git a/dom/media/webspeech/recognition/test/head.js b/dom/media/webspeech/recognition/test/head.js index 398c508125cd..958611b46776 100644 --- a/dom/media/webspeech/recognition/test/head.js +++ b/dom/media/webspeech/recognition/test/head.js @@ -163,7 +163,16 @@ function performTest(options) { ); SpecialPowers.pushPrefEnv({ set: prefs }, function() { - var sr = new SpeechRecognition(); + var sr; + if (!options.webkit) { + sr = new SpeechRecognition(); + } else { + sr = new webkitSpeechRecognition(); + var grammar = new webkitSpeechGrammar(); + var speechrecognitionlist = new webkitSpeechGrammarList(); + speechrecognitionlist.addFromString("", 1); + sr.grammars = speechrecognitionlist; + } var em = new EventManager(sr); for (var eventName in options.expectedEvents) { diff --git a/dom/media/webspeech/recognition/test/http_requesthandler.sjs b/dom/media/webspeech/recognition/test/http_requesthandler.sjs new file mode 100644 index 000000000000..13095ff1210f --- /dev/null +++ b/dom/media/webspeech/recognition/test/http_requesthandler.sjs @@ -0,0 +1,77 @@ +const CC = Components.Constructor; + +// Context structure - we need to set this up properly to pass to setObjectState +const ctx = { + QueryInterface: function(iid) { + if (iid.equals(Components.interfaces.nsISupports)) + return this; + throw Components.results.NS_ERROR_NO_INTERFACE; + } +}; + +function setRequest(request) { + setObjectState(key, request); +} +function getRequest() { + let request; + getObjectState(v => { request = v }); + return request; +} + +function handleRequest(request, response) { + response.processAsync(); + if (request.queryString == "save") { + // Get the context structure and finish the old request + getObjectState("context", function(obj) { + savedCtx = obj.wrappedJSObject; + request = savedCtx.request; + + response.setHeader("Content-Type", "application/octet-stream", false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.setHeader("Cache-Control", "no-cache", false); + response.setStatusLine(request.httpVersion, 200, "OK"); + + const input = request.bodyInputStream; + const output = response.bodyOutputStream; + let bodyAvail; + while ((bodyAvail = input.available()) > 0) { + output.writeFrom(input, bodyAvail); + } + response.finish(); + }); + return; + } else if (request.queryString == "malformedresult=1" || request.queryString == "emptyresult=1") { + jsonOK = request.queryString == "malformedresult=1" ? '{"status":"ok","dat' : '{"status":"ok","data":[]}' + response.setHeader("Content-Length", String(jsonOK.length), false); + response.setHeader("Content-Type", "application/json", false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.setHeader("Cache-Control", "no-cache", false); + response.setStatusLine(request.httpVersion, 200, "OK"); + response.write(jsonOK, jsonOK.length); + response.finish(); + } else if (request.queryString == "hangup=1") { + response.finish(); + } else if (request.queryString == "return400=1") { + jsonOK = "{'message':'Bad header:accept-language-stt'}"; + response.setHeader("Content-Length", String(jsonOK.length), false); + response.setHeader("Content-Type", "application/json", false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.setHeader("Cache-Control", "no-cache", false); + response.setStatusLine(request.httpVersion, 400, "Bad Request"); + response.write(jsonOK, jsonOK.length); + response.finish(); + } + else { + ctx.wrappedJSObject = ctx; + ctx.request = request; + setObjectState("context", ctx); + jsonOK = '{"status":"ok","data":[{"confidence":0.9085610,"text":"hello"}]}'; + response.setHeader("Content-Length", String(jsonOK.length), false); + response.setHeader("Content-Type", "application/json", false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.setHeader("Cache-Control", "no-cache", false); + response.setStatusLine(request.httpVersion, 200, "OK"); + response.write(jsonOK, jsonOK.length); + response.finish(); + } +} diff --git a/dom/media/webspeech/recognition/test/mochitest.ini b/dom/media/webspeech/recognition/test/mochitest.ini index 49389ed47dd6..a355bb36e00e 100644 --- a/dom/media/webspeech/recognition/test/mochitest.ini +++ b/dom/media/webspeech/recognition/test/mochitest.ini @@ -5,6 +5,9 @@ support-files = head.js hello.ogg hello.ogg^headers^ + http_requesthandler.sjs + sinoid+hello.ogg + sinoid+hello.ogg^headers^ silence.ogg silence.ogg^headers^ [test_abort.html] @@ -16,6 +19,12 @@ tags=capturestream skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538363 [test_nested_eventloop.html] skip-if = toolkit == 'android' +[test_online_400_response.html] +[test_online_hangup.html] +[test_online_http.html] +[test_online_http_webkit.html] +[test_online_malformed_result_handling.html] +[test_online_empty_result_handling.html] [test_preference_enable.html] [test_recognition_service_error.html] skip-if = (os == "win" && processor == "aarch64") # aarch64 due to 1538360 diff --git a/dom/media/webspeech/recognition/test/sinoid+hello.ogg b/dom/media/webspeech/recognition/test/sinoid+hello.ogg new file mode 100644 index 0000000000000000000000000000000000000000..7092e82f30a2e48633af4f3761eefdadbaf41040 GIT binary patch literal 29514 zcmeFY1yq!4)b~vZNC*h1Al+TkB^?6NB?!_b-QA6Jmz1O+NDU>OB3%+9-Q6L4LyTgb zbJp`d-?!Gc)_WsooH^rOzw7_sdtWj4Nb2dyfk|5f1scM4ophFjGY z_(f%0RF!K5;tYe5gc$I8a{2p2{NfRKiHnJg1AnjncJb`{5AY>GN&bUl!*|XRF?cn}>3KxE!MITWWcZ9&YpPTQE&AozO@OKg^?eQ~}7R zUxD^dhtSwji$=+!AgsWA;K@lK&WywbwNgUad3)=up)d6DyL->HN)7K>HcnY>s{a;B zbQ~k#d;u8B#uRLQB*T;JPR|#sdyI#l?1m^E?yoyfM@of#MRd$G=V{77T4|1sU3>O7 zD$&tcO8&V^D&AfsMb%&l9Os0OG#&z|e;n{a(cS(+k3E>_pe~ zIl2Dx!E_6Wykj4sWL#EhSqlUz)j{atjM~>ptVdekt|Q@Js1Ah6{Wqw^{98aM<7=n~ z6@P>Jd<=grl>iu#$2hwk(M*JySN!MfmQ??ja?~{Tbt9=7jCL(-xKH_rZRt55Y*K2gXrDMUstd+lmqSsjGQ@wl(Z0 z1SaU`kGAaQV*oR^8(Af`?eIO<|0ET!ePlX0e2)Hl)QN_BFEFcsQhh-2#(&P-MU+j?;%91rd{HG7?fmqI5RYcc|~@A!=6CJYfGT6#?TBsoNjE<*k2FrwS| zpaT4JTL{N-F)dMPhfu`I05iI-^1i3PHBt6Q#Mcn5su0c;f6M>tg2M<4QD z<0{@_FiVxhQ4NDzuTizDL;js|Kor_P+=;ar}}c0EYU$uRQe7YGFGchG~}#Fw)d1) zV}sFPr;S+2_aeM#?a_NEZQBor(wRXHv&I~2;^!t|K3Lb#KkmLgh>D3k}IMb$i=aow`ajdl^YzPbxWpI`pHjR zhVMagT~0Z#=C+cezNs+`Elxk6{FJOqpD#5z3hNdB81w12R|CRE?z_RZ+4s5Z5<%!< z*!sIaVJ#Hha1njjl1Xsk>0q^9$+qUWF%~scTfV-~Od~oB-Sd_jKWgIl2l7#7bp`bc#I8 zdXS4{$jxYBg=M^=YprCVVUCN*P+vVz<03>c>O=y$(VpSUAE+@c6`$;6Pf?ZHVy^&0 zJ>$A5mi2N`-c=z^pMGnsz>{dEK6;9#J*Es_7&;>F<*RYPP-+o&hkW~;(h9=Ce%{JZ zrVNx~6S690xo$Rr#!VQGzDJinFLRlc+3q_vJodno+}%C8cz*i&~Z%`8}46Zv8)3BI6d{r0gs+J@O%$QA;&v=2L~; zEy{t^B2Xn-k9uEMqUCba-z)LL#A>Hkq_1@ydV#ao?%fqx$S&C!>eeL zUr-D(RMW~r$D8f-TtsS;D+!k zsOZKgz07Rkue%~cMXrVary=r!@?q7VsEEu4LXr&jxeTWg<8DS}x&=1^PLJDMf17|Q z+HIPenL9PP;ctuNaQP#`I^wY?)M1L16!YuL zax4RI_zwhx%&Ra6QZLitM#3-rJslu2Y0(*|PDU~?J_>ta7u8cA7ut&_rM`8jSZZ07 zj07g>fqdJIB>nh(a{VKf!!erOoqU%eiP2RvF_*W%Mx$?pQ{Z4;g*r#@8`bx@^*0AS znd%5)mmq31zCK@sq^sh(pc4B|6-@gp)k@f39n1s!AlQ?wkFPjmSWUN-^qGor78oj8 z&4aDROF}4)luErC=3;;#2U|RTI2g73>;bir#P@<`s*56C>Ngv|b(hEqtg@K8L)2OF zS+|(dz4*aZ$zWP4_1d4bns?8=>dGdo{)Cc5>p$gH-U_6Cpt~BnVfH(+phZR@ei2BhHS!qmOu7iQvg|NDA1(V!D9|K5 zFMUmA8kg((WyiQ1jyBmw3|=uMW`Vp$=#!mm{)iRIVgVd;g{cQLN}V0ut1B!mp&*XS z5#DGH^Sq#@4AGr=zEi`#=OLM!eG=e5Bp(DwS3B3+*DGn1JroHmVsU<<5o1h!FOE4H zYC?aXheE$mOwiA7Zeq#NyUt)b9 zBL6kkw2|@|vT+X2t)$^k=?R8hqdtO~-v#aLLbVXVGg zZ&dZo(K}yW7?SFAQo=}TC2LK%(@o*47#rkD6szFe!&f53x3(_D`mt!UF@n6FDu9{w zOUKFsJMi>DbDnj_cyilPds=DHSoZ*v$w6kSk6NW0`lsl09a;-hPk^tJL}Y z;$LC`NBG^sXH-lCvomjpK?Ij~UDp{A=%xdG*3V z32H(Ltg{%>cGNg9@~kwN&mD75QS60gWT@($ezDMc1rlW8@3TEnth~E;yO(0w{3O<& zsX(*6$!Z5n;qqwD)VN+6vmjW&IUhVR3to~fnex6T4`4)_4wg(D7FUmAkaG@-^NV?e z;Z&6`{LRuaGlIUha7BQ~YG)idqgFL$b#2Q;a4=>fG4=hdw>dj*V>db;6sLVIia} zXc6jl4AGWqHqGPmaKQBEhj4yPDa(0Hm(%+d&%yh)^OqM6JjKDqC)w$kEs#QEeRMZl zyp3roXm#|5y=I7HW#tAxze;PCbQUs~kr}DI`|erWVpeE6Cd@?SSRZ}v?2i?$?S>C1 zxBj^BF87MG?qI+DRknUndqC-K*X@Hy^f6IUSm{?_CF2iw$RKBbEZS_08n07+<+t+! zA(pA`>{%Tfq))xej_wiCFT_|$zep(>6BFSJF+%j4nRcdR-ITt&806yrIhH91Yx*~} z#)OHxj{#aoef)Y^vQZdJWTw9M9{QAJ;o->XnBWEgmU$`JixFP$2W&T5Kx$pNh{l@c zqX4(llL&FgnX{hN1TjUlFbH3*B#5y+yO1~$b%X4uSTGkuysl~+qvC55e^-n0(>clM z30s@;Snsk)5#->NTFzm;lK3WImujWUe?Qk6?6mDJy^&lQ+jrjIz#^!2F+S^kar=2v zBHH2!MAh5ddVW=?(;UMlRsP*Djx)CBFP^}Dkz*h3P=tbi-(p!M@cRtzP|@m+-w+kj;dnx*woC8D%5!C!B109&m&N3 z!M%njeR|JL&RPD5keCX}wK`fx*}XMncD6_1R9Ctd@*7YrTOcUr^~;NMRo{`RpkrOdLx#N$gOL! zfIEv{v6yO>M7heJAh=-`G+Lf_{Yxm@R?v)SvVlLUBHhZ{;}?^+FWgHOfqkF8fQMtP zML$y9K4g9j?$~tRX+z9bB-t;NTcZ^5jkd7N=q1(lLd3Qc)Z~|;5o(2O?$Kw{^3YA8 zrpV`i+n%zI%YYg-S&h4Wo7kY&YRn$3lzbb73kA;vO947ZWHLTKBn-G`y*_+36PEoh zR^k&*JyD4|3tIJ{vqSW;J+ne#SOav62Pn<;A14Wu-{!HPeBSxG!`C$5l|&+@z8eiwqbQo~qvV%RCQtJ3)oOAs+5G zcIH>Cw(lO2D)}S4ypu?<7D}=V+r{T%^k#@QqAhBa>j{U<`H(Jlg({?ncG0-! z&3(Bzsp@|OYvcc(6Du!2jf&PcOix3vpiGT&?oi&}f83TZt|plehFVot(mY5+qj*bg z>eFl+-z+m`;O1lK)4?ZJr<&%%FuvXtC(HA!L8dTy$y|@E%eYavED5MpoA+v)|?Hm<+7g?K5dJ;^F zaPE^M&#Z@H-?yJiV0%r3p198ua82fO4=5SOv=%yuY#Ir=i!nvY`I=0fy;;Rpb`6W< z%pY6XdaL0CrB6e;Hlu@w$z-Rlq1Sq&ar`ovrv7)brsljWGSkFFarB+kr%JB!l6wPI z7BAeoO`3IV1($|eKD}R9ycNPGE-dOTbQVDlHdxgS2`@NYi)1;nPTs#YB2|rp^FU=| zVmGF#G2jYq+Rq0+zlG=m#T8L?^6a6NF!RPqi(CM+$k2v(;m1gfq3-4^6Gfljx&nKjY0gwZPPYk8* z8s4f2jfFc3I_p`aQHRofCkp@ZqCdK{c(xUnQ62Yoz9=3cEUwq#76KLmJPumkMqccq zxFBdj0PSBk`>i4{eSbC#A!hMdI(@vUT$8eVA&?ue+?_Wi4JiqTsp-26M@oi}=t(%jaH|v8RRxGsHP+Vo5X%S{efvbi2Sm@Egxw)^% z?5(EjDYBpJGWNLkJ%jYaQFD=(vQqvPl$@&TvUVH%Gl0mzUciDw4 zXBq~t8_NGS-)zkmhUnCuqQU5f>toZ@{3D}s2BXv<%y{tZ&@(~4M*;tZ&vvGA(Dh0` zP~nDhBMuQlyp?+`QOc(rBb9F>Oiipe@p#IuX2tX4=|ogfz5Nhyz~w=g1BM{)Mq4^; z>ZulX0k&gc>56z?_dMW=3ufgsq1_)Ly*ZBW1ZFEBt?=nSHEHJB{!=eik7eq>xG6aC zi6SOyS6yJdg5im~3W3h z7h;yYfVl}s$_o$)9|xGeZLIw7~>xCkxs!7Q-q z3cy9RMJpdXe4^L!)O|_f8;rP|#C+d z0PT>7d-{>U97MjFA%PRe>MhWG>)yE#HaehUUhH@H4Lp#)A^FSjt*OHKcdUTbmV9IQ z+j-DMrL@EvvZ{MSiv-!dhO(ks=HbwnBi_lyw+lWWx%5~XGWVAnX7WxA^%V~9#ncVz z$%3c8J>_60`k=p5%Y?0zT`^vuN0G}q=azKZKxR$#VLRk?m3$xZ!pCONT4mTVLYdm4 zpAw7!3Vg*scsvcMaE=SMJ%ts!jGVVKd8&uBL{F?;^rp}hYkKPmGeoB>{Rog*Gli*k zoSby!lT}OPowstY>jaz`$|VNOPd3H%dCtQp(TA$!OpN!tSTSw9eXChrn@qg>^})Z1 z-%CXEME74~v~<(#;K1Qol`_r5n3rtT;M>AJ)G)2J?UNDN|2tyG7QC5_KM`HmJxhq( zHj|#fS3b!(^$L9Z8JKMCb_?qX&uV>uh&!Y|Awph@X!<8aB1h;rJw7OTS@Y7v!cNTu zDRmjNB*&NT^Ng0^Tip}PrLG8nK%}@%#NSo?A0mP>@v0kI+8nE(tS0eUB!($UGTq+^w5gzDAt^2Nrc;)qtKEkyL+ z2K5lq{j&;~!9|D5+KB!^#WrZT>we?mp4VcVe(8=ri@;dfA1nAf>(YqWT(LM@rWILaVmeHBPscNDNW1b?|>a=GHYZa52V zu$0%i_$L+Pqh~EO{mlm2^q-p7qjGLMw%WKQL5LvHGBOqMJOfh>3Y^vAMek3DQsSl8 zYv>s!*R6&z<+;^J5@%7cq0)gpySxP*R$ZtVo~SRn( zCBm2!t{(JTMB#nPYZ?y>6(7(h--Uk}@yY!abn<(Akp>-8pMy??imy%1hGJ&3_31XR zgB!f3InFPOT3fgj1SxIjNR9o{*K}_{tBeGne22@jW^Mm4DZ&klH8(%kzd+UoYJ$r= zB@9jol@bU^sk*7jT}ll2RY>X1efU2L`9Fu05iY%G-`5{>um9NP-vaLPpWIKLV~~;k ze#yyx|B<_S$;->hUHp*;{#RaJ9=Kp0`^# zhuqp#D&x64WbX?fLhdYTp?N zN3rf8uQuh`+YCI)dEWNTr@R9fpvF$c5PVIEcjvj*Uj}|jDgKX+|2ySBYRM2y9*%9K z=XJm~oS5ZlH=aU3U|^sD18C>yIr4dBWal~bCoO*;xMb^h$A50S+UqXl(R3+6-{jC5 zNeHpZblsnIP zZluvp=GtRyZ}|GDvxbE)8yQU@E9m@}8{V1!7c9?jO;XI&NEVI%fKrbXDlt*RspYOc z+99*z9%T7Y+nz5N2)2%KdB1 z|Hb+NRrw9iSE8V2EkwPoEqi-etPS%3xzbK~bXBD`#k%q6S-JnJX57DFZo@?h?%~v$ zX+qE89A1@qpWn@zyeJ^4A7%b$Oe1xt^3nVZO?g`LVZJn~?CuN9 z%A9n0*{h%lcZ6AgF+D|jLH-}q9HCp^hD!pA#;NCEXZPY8%*qX#>Z5)re>37AFhO%! zSyD4{E6nrJy;<1 zL`i6TLReg9{FCXgmGl(<0ds&Sb1i<8ch6$(lRt5Eqv%est$SB~gO0pXD~^`R*Cx%* zIjKppo$r(Qb<+fyeq&wmQwhn&TGTx>z_lgBu1WShok^NTYbquO+zh%C{Zk30ge-t^ z^F}{H)YDZXr{1qQt|N7U z`sblbt}2{gp75HM9UD@m8}sYL%;V@<%@`lXHo>-*;GFeTEXZ}DTHju)NEb}@T(I8> zxa8Cd$t?s8{#VXez29AT3c4E^^k8e8HsQ|0;lM4lCTdE7F3j>;H$GDIu$J|J!W~q&N(MoNqw;)@zgmN}u7-mBYGrbIx_u~tv zxguH0lMNrYxq`jcw7fgQ?EOFId~LAaweKcp!uDjL=DI06%id^s)3I`$t`2twT57kJ zNd$0<4-ll%_~EF1!X>HQr;Z+f(c!U}MEpz4*CDIdzAs;%e@#OmeADOWc=1{ksSH;> zsgHJ6?dgFD`Sw$|n#bo3mz)k(b8+gEPK+>A7kVmS1zTtRTt~Gb3n1U2@9xirrzd?i z1oK83lv+MnBy79O{t$VT5vR~NiB>pFb6^Y+Z?2Gw-QxUT^o)IZ-Io3Xr^|ol{A(St zzdmkkC-0KNy#J~oG7)}f7%cx%&m;U*_-{lo)SDvkT>tmfm?$8)5hr1b1V1fS`?U?Tn5rPr}A z*UnE}ZKIuq;g6tN(iI1KyKN9i3y(T#9_0p$E|l>}6D^y%#`9hxw9R$auy!2l7P%b;{|bikl_e-HX&8S*Dk zytrMOask94NNlC7%-51YP*?bRd%9Sz6;>c9U{VCP^z7H%Rg%%MWUfXmP^mQo0we`RQ(J?Zyrl9RE1Qyizt8ioFMlWH zm%s&*((72k^oGLCZADO>6k9{^|l*1n- z?b#PM5mf^w5<_QnVi7@=(LJns0{d4#&75hm-+M^ehm}glcUFt4cE#%`dl^0+o7fyX zj|?EW&`X9K`4e@H(bxbS@dL?apI?6&Ge$~{Vr?<_h3#S$g zF>D5{x?KZt z_1=itPAMW@|1&*t>F?{CYtUGPKhGW@P-_zuLtNdlwhK^alNTV+{gF#hOy10rKZ1g$ zj~@rce*t|T@Sj1^{@9hTK*_&03}8FqkEpb}KAQ5};C%l`db&S^ElO$9NUv88(=XN2bniRqn{0j%uT!pQZeg2l8H78&GX|y^ouQ-ZRH-nG$NJfN zx}OO?`OdA#z43)(yEzKpOe!`Ho}Gu6HZ|E^NtzxL^ym2lL|O;umWA0bhDBgToG4-U z+3J&UpxEDn)=ympt&N94zuJ0DLzvpcz6W#ELfPFNm}YM}uL~ocfFx8o!ME}K$~38? zgM9Ru3&Wa@B~UEMJHQwz{mUW%MEc9{)1#3=q;5jGJ|W9WCWDeDD9aRm?Pa&09cdZZ zma*V}#ST%ODRUOs`-WjXJy7>C=~XI~;y(pV`M!4eVSdy@wceWantrdR#s(ptE~A!i zeFE8-zR1JrG?~y_`Yaq2%9>Sa$6T%C%0g_hmQe1AoGF=`C;vbgU}S6o;GPqByba%%MEIo z4EJgISTbQHcQ(k}u^(~9njb6&;9xO?LUx)Q(&4avAHS- zU(TqJGK|gEHpP2{N!4Wm(*3h_YMtT^oR8wN^NZ;$y)tcZh1p7t*I|4SGQh4A6_iu2 z6Tdg95H$RG9^Nm-!9qkXZ1<316Q61konkac^CcKBD5{=8*()x?EY#EM63a)SLD4up_ zt)um;Xc!uaDQ3ic>u)~3%KAyg1$94Z6Y-bnE;F>K zrNB)~g~tust|Dv?t)?ta>avOZ8_Tu9(>PAja1`Ds2~ySu%tkndMUY}xOm8~bR=7Fg z@n!|_HE|UgTV2h3piY&Q4D-*Zi!{}RNa;^$2L|%jg1atJlibU+N2=k27c?LcX-f)S zRHBVieo<8o^1A!~-c&z7QL4)1sh~JBI#RifcRIG0u;5qr5<5KYbb1qS0jUwq&$VbB zCORYx(F)GtV(SD#O-~B?kpOt2usJt!^&$bV@ClaOiwK_pbTOrC>!w}o6JWG!*~;*~ zRwKxmo$`MRG2<+8@-s@2{V4$?)nF9AtE$0-Vhd5yaZ^>RTwKwzg_c&CESj9Yj9GQg z)d*%VIlg~Ey{YP-Q1R51r-}&WanaskoU1MaQEw#xRRycNMCBFRxNq_&)O$B30NcB7 zFbQSs!9Fk%48)ktT3ox&)Ghlgcb?z;O(58|1``|UgW1weCO-{#@aahcar%*gWLlehR0O*DXE zK{G5B&cMLM0mA?`vBLfYtMcfJ%N1+G23YeSSV7Gt>$TR6W#Cr+F}{&AF7Mf<#gu{& zcYP1+)9Hxk*81ZFe5s1YY+WL4kh<#D^C&o8X%K_Fc} z7mac|QZW{lNhkwIctbsFTPA``=YlGs;W6_G%_0+HwH#t#Tg04-UFy%3LNE)t(IOtP zoekx02{O=L>k49RjX#&aYFW#~INvrHl#RM8Lf-Pa!l>i)_4+D(qj$@A>siiPHA+=w zftDHD7F567Hl&#^dS0gGd>`MSOKI28all4U_vd^Kk@mc1H5nqr9={KU7$cC|M69u~ z$&Z`&e1L#>>il5p@pup-xUBW28!pjr-2?1J@|+6qzjM7zEX5x5pS zZX0TA=~Pht=JOc=zmawToIIF{yr>~vdP_?nB6@3Z^dM^12uZ;TJLqRt&I{HXD=f)p zkf0p!Ps%S`45cZM*^8Nb>B9LI%Ll5hNd%V>KHczae%!^h+o}9117&L-q0%A;#xJY} z9(FgQhA8`ia^O#_WV1`@|AF;V*Bh+D&mgb+z~Wp&F<1!uA)ULqJJlxtF^iK-+)+Dx zCWFlV@dc^569=1fAIB&Aren{FY}jOB+;P$LU$GJmjyw&yvw7VJzb95WBnY)|ez=F< zqA-c?cn{Lwfi{iDFVvM#>{YLM4UOa4ER66|GU&{q9U~hpaE+cdrNJ6ln$HFwbPEct z81jjK5x_fQ2DBOk%KFfInO-0!S(b|R!#X=JsX7D7~uz64E$j?aEwcc7KoS1`cC&f7D&wgQ$;!)V%WBKW$?3^S>nkeB8yUWoSCCVAE-NP|Bd;WhFs&0cx*Wa?!= z!02I0KRj{mY>&|j)`8TTQ9QDf{-e=#y0<}`rl@Psk5qDo@hRkelPH)t099F@A50p3 z){Nl6Ip^}H@^I>glAqWuLii>Ixf}uf&(iGqlw>1`9XS+dzU=eY5+<8U1iMSmABL&3s<%i0p*LITEZmM}k!1HydFpWmz)bsfdcaxUyX6G~#`{=n zBw^2H9w=h3F_WBeWmkRWE^OW5mA~yfCb)%6H(L7eHfoMY8(4}+pIkp&c20;Q_Ina# zW9TOmZG3$mD&tE4{9Gh4fb9cx{8}+Vne5@$lpe;OT(oJMoarGVh1J@Gvi$+#I^nmG zrDdfGkW7pEMx!P^+dU|bX|FDz%yq|s9FvR%nO%(1)9G%55&a*#$=FmVByU*Sw|AD1r zH6ejWqd>8K@T}b-q!P8r-is@20A+1HOSE_{vC*il7+_<~#hIXuWqX#7CG@7vbqHo} z!*00iBTOR>x5|h%*q39K&*64xUxi5k&C(RKz3&JUUQurIWY*Cv;NXup8CtC#nY_R{ z*d2-4JY@hg{C51oWfowaplpzC!kLgD*TP3DK4L2bYqrhe{M4wRXVs01oKY~;jFpA! z9D^^NYq z&zpBSvS!^?vBY6Bwr6HtC=^~~GYPMH`v8dU5f414p*WoM9EV4Eb?JO_S6- z7YT(LiqkIMf4DUs4)=HjcYbrDW-Gj7qj|CcRasX5EoZ3L@Vc)Cukg&+rbf7zNe#La z2`ivvSV4&qTs#~`f4+MYjqZ4oK3$7JjSNsOF&NWcu(-h{Vp!$x;c1(~?Z+R!Dd~8^ zEG?k}qYkm_%^&j-=T!fottHtv=`EJ5_O7yp%Drw1g=Qpx2mb!aoXVMstZz0|k&(@< z&W91CmE~F=37$2$VACg4e!v+f&FFN@x=Svxye1e%5egAxIgv%%O}^1_n?zA2go8?B zt;d$AuDgkqZ=1_K^do92Z~UB;r21Xtf$AVW zp^y|PI$Z`VEDw{(PnZo7hm8Q6HkQYhk=T&qh=&l2nasGoT2;@|Q00d^)QWYQ>^cO@ z21|;WIp{tk-euOhB|GZuLl);lR?DF`S`f(NO)_@)t^#Ka=Ce&bM@j&=2>#jQE8FoOBmxM5DrVrADVkHtU8ew zD3D0!+#2XN7HOsF5VWUNY+$-~n6HUxbcQRy;eJ;VMYdQU^*O4pzXgRsa^cQl;QR}J zK%NjKP0d3ii?dk=jyLMm5V5D!L?h!`qwq0Z(JM^&@)%B!Kr7~9G^&P>_*8myI zLV?2T!UZ(qx%7GGM9ymaHQhr6xzfCy@dise4I->~W00X7l-3}Ng9DSmsmEMJ~NN<@||E09mX#iis^4_P;!5bw+c$Kp#U zeo}b6!d~&HRHGcuKu%#dzmbuD3CuZP+KHQC{2U|8hqS=MgFBMSdIa|{1^|ox$nZX9 z8YEtJmQYUfs*iu{Nn>SB&EN}l<<3Tab(KwPR{z5@GIiDF7!ymuQiQ~1FfZe_4Mm@Q z2mA*7*M%FR^&j}GM}rg}m`|tyR&{1yleVYeZU8uhiLGxrZ`Ex>p2-NtRI6_7<$!d0Reny z-@32{RO()S3xJ@`{v%eE~I9EyXMf( zy*kJ2bBP8eii=IJCbl2M)rh6(y~x=ZALObZmjqa+i?o{Ol+Nvu)}GxJgodNW@tD4o z%+4}I3{I;f?{bWXDHBLc(doxajTsf2uW|A~ZI9O{`jL2r=E&{wa4w1D2tDYgtgN~< z#cCqyNR|0ER(JsQ8XK-e5i&`?#Zwj;>#_0=?w!FFC<*eTN2QDj)EO};vOT`B=|m5M z=bwyfPI;$>^?Vq(Z~rY+j_kwdIz_hAThOj@%u2nJdq(k&{?97uaq}k#tk)-V*(wQ1 z-zY?Qi$lnevM2)_A5sboFl;v0p9wcAWf`W@pztwsXyQJ@_5knNfPCKIJESNmOW6`q zz)*~k=H=Mq-uW#H;ql>HXcCP^W!=fJK#U!TRkyu?oN6W0J>rry;QSs@0Fm$#J2P!@QHQOS?XuYUuFs_ahbeC^;#(NL z4%3;y@3%H+BJ{och_!8J&ok5bF)~vZ2Od?P#t)HieNV0VRZXZ8Ye}zHN6aXGck7En z;~tV{kPA87$?~a*8QAqbn;Yo+CX}o5OMoFo`T7um5%kF!GOD}OTUKGxPFM5Cu$}}3 zMv`qI@N^zN2?d5}$;87=Gv&;oQ{lvU7G6?2dyZH=C%^-SJVXQLq1t*oDmX#t+xAVB zN&5}=aUvdPq-=SK?@zZmMNgTElTG&>S8eL70X{>J%-*4{u0SB|GOavo_dwVhZJ!8^ z14|9TlxE;VESWZ)B7|} zk2ZO|o()jVp4R(V6+?bg5#6!$T~mbKrD)i3#_Sckr^{vcCaBO#LtXl@%w!DLnAsiW zFt2*kY1f=oO)m5e=_pdg9Q&1VOfN#r{5xgH&?Xe^va51yfOW~tSeq%qJ7$sMOnx)T z9+eg1uH!;))&n=FOHkiFA0&OXr4Zf-uDwH%ZFp9x-b`+KsxGXx%L^TSqBG`^9Djba zn#naB6xQs@7kQvLiO}j!h631~od}9tLm}f{d3zji275UNMYJ z00+e?17Y0T@490Y3W*V9r4>3GrgFBR^k%j8U%n-jwPke5*2b!W@c*K0p<^&zU_{IE z3Xo1s(5J}vtst`b9*+T2=p z^u?B{7J+jUY-$(o8^mNO7H+zXIsjV!zRA~m@NO`S=TZxdmR}{$8>-bb6!?c{hrb}&+$u*}3c%je%LTHts#WZbhwL*A3KZq!;d+U;dZdec~4X!DsFP32+L5*5z zW79Y--|%AcbX1{uh4l<4+RmVShg;OPn}yYG+z4;>>lWK)x4~$f)k}4Nf2(l=qdJF8 zmChH2hb?bB%Ny)qDmg0C^{by(_F|avc_Tljle(iiH`3-%FgI2+p4eyP7l%SEdSsO< z#)Hh02%xjk!7t@JddYR)!qF%4MIXjH;Ev4bL5#Y1kl=7SFNHo_U6G(Z9k`J98o!{& z=LiuBS{;91s3I?ZUen_tXpcMK1B{FvQAPS*?a|^RnGJ%oyNQVrqi6bQ1% zx9riI4Rza+8Fi%s_I});sNEb=DS)qgBKT&o5}BR-<5Qkp_@aJg9PLwB79+(lGr%gq zW^oN&;!;mAwEH#Yj3yi+Qo2TnX_iNRIf5ZZaq@1 z^%v{)?W|I$5*gYWO{3o{Q`n?-G(=%@4SpkX_xU40z@zSxrykC1 z0iLzU$SML)XXF(XWG`<_0p6qn{4R|mu!a;%S9NhnW3tfP`HR?$Q|2n`4eSpq1+zNz zpcv_cJNj#$+l*igTTuC^*^S3K>()Tdw4l;PjW@iefIB3QG9>{}-I2fdVYQWj^>x3q zdpR+yB*1@Vl1Bct)B~pbJz;^B>~oEv)j{6$TBkJt_y4#Z6J z#twTL!Gh&ZIcUhx!3;g;4Pr3}rZ#Ig>Vq5j{PJJZEo`Bb%zFW&F4P9zIdlK~1>3nc zI_IkHa;@qq!4K(dT6`kFm0}F7PS&rQ=q^* z;g+P_+onKbdtb=dhwFZ^=4Wq7fP0OfSBs(soHpH&A({!-tIONaj^Iq7PjK2(y7~pSxk5c38-zwcYVr~moiK-pT18* zBrDhCR15hwTft#cCxv5VKFc#Zv^`ATopb4`f5gN&x%U6ao zcTHQ;$A)(&7Sco&tkH!@Jq9@3T3ansboqY*j0}=Oj4hm*OAukm=9RLD2CN@9+GN)|^KQa` znferT0qIw8oFJeJrbv%YC&>-_n?&(kS;W}Pb4Uf_HaaSMsH4HCl~3w{Mcj^>R50`i z;Ge{2@l~01M8YuhEkFGrH+i^s6%kh(YfAa0-fBYi#;+MT`fw{ZG}6t5RLKeh2orWq zl2QvxDHIG&SZ1#IeHt%1Ct@k-gmcDBdfu0hl4Um=`6#=s4KxeS0!}I_&jG7Wzx=Uz% zAI=b&-S+%7L@GW}#H=LzDHx99qKIkjBf+Ol`tcV?hF1IQBQatB2<%fg1 z6d)V8$lWvQJR~@7+zX6vFk7>?nbgxQpt60KMQp7+@K$aT@Y|cI-J4{ZgnU&I-ZxT2 zRF1iWSfK_A<(NM-M%lP^pvu0b#0gNKs7XgIM_N{Y%hFA>+^z_}oQ>qM9N&U~&I9Pz z!_u|-Dg0GB{Cyf&{+B=2BPMLP7;vvw0zM&lSJ5>f#2Dm1bVUnnmcgQE4>q`o;CIog zw+|J0^s7vlE{?L6*LGAzxNS?vwe4p{gKsFfpLmms2@i-I#f7KRKD6DrHe#Id;ZLy@ zA&7Sf*BSm2j=4-x9&sq7b+R52%1xRf5;hV;xEL~uhmU}@4G3B*V1i8IurO2I{tDUx zeg%20Mu=8iT)BR=w4&=Bvc{-+vi3I?Q8{Bc95KI-x$`NTK*P=~_0{jxvBg9ZGDgFd z3avM9zV>91xO#vmD-KnuzCOjlu;Dc6IFMXi`RWDNiQ4_hY z0Wqb5rqZz0j8DBc8Jn-Z<_Ev%Zfd;A)(?G zchnruxDG0DV^(RIU5wE4UUew=bdgRHObPR}?aX7%>&er$Z?~#GJRADsf4T~Fqajg; z{(x?kRJuuvgZbbksgrJ1QsG2y!yvjrMw{BQ5>Qu-rrxk9xCNYPo;jWgrC(2m+xjAz z%repqY3uUwG2ZMcLH1VjvvnWo_dBLHifU-m;=%!%yJkTEqHK2qIJrZm>W?XYMy*1z z`Hn7Hctpo%%lnk26+iog<}vtLry zSm0EdwU6K5dJF5WEj*qpyT8-?6Q(1%EYypdP<_KZI^s*5m!nCatIJN>6SyLyr#wl` z=t51FR((gnV!kQraTDbRlpY0X{xiZjT3MvUFE$a{RmnSV=f!Xl88%SD``Z&PHm+Kr zhjT4czF(d~EY9@BkZ0s5V*UsgVd_`)>>v&9xCT@kZNtNSR}NiWz=N1=lOiwvJHra9 z@(@~J2-^+C$V)ZoM&f1`?xiiP5NJQ_KoC9nDS`|tUkk6WuC2OD!koiL-D=EZxO#ZGDQb%cfEv9xw(?2$Y?40}ysCE-dBG5DAs^epjGr`Y7l|ZHD^VTR zkrFc7h|cft_oHa|!SsgN&(6Mg&ypzZ|DG_Ew3T_0%anJZ+%+TQxORz4OKQ|>?YY+Z zDk*+P0`h5ut!OWH-DhnyBe2Zd>azv?jp-LU2HUfC5$@0FlqAx~79hjFE=c$0z;$@> z?kD9R;-gI}tS;!3s~*%zN+SKZiQrz??xVqFh{3m(Why|jwmOnPR&%Wiav#qD;I4fy zl=pt|HLQ?#;0y%q$fh;%D31{%{%u-y{nzl^EQ@GS5 zf9<{K!lhxe2z{D;e)Q?H%6E!0bYO4kO6~SRmo9;m>ytZIxnXnSYM#HexYmbw9C3yR z&tMjO`b^KOB5Con_Qz&5U$}xNP*qB!_dUa!hqI5myeOZur$2P!wQW6|H?*fBr*FUX zz07%bL)%=Gt&RQP=GqdhK(H#dOjdFS`WL)@VTrM3TRuSYr97@NW6J^L82n?ixRsLA zD4VZf4+&+coOJ!iYwYOtls9kSuJ-vXK+?ZUL2@oXW%y(Q`5Epd$zQ3XXH{|d#C#oN z(_VfW)rHolKcvB*b>Wo*E}S;xg`r_5S2KTBtGJ3Tq@y~E=ABnW8K4OAleb+U8EyXX1$(h~7Y{lfGJj!BC8F|K+SqtinOzb)CaHaZv)rxh6=KTZn@8o+i)83l zLAhp2RQr^TfX}#s@8GqaNmhj~?DkI_6fL#QZMY1zlpyB^x*3(Sw;)r1`fkedZPd-f3Eon` zHy^S`jY}skOa2nDZpf>`{I#{@gUe3OxQW&xvD)2avoIoXnf0pF@)k#j75Jvff$s>0 z!C;L)mg>s+cRP5+-KW5njU`B=b69TuvS4Al`U`Cl^D*@n?oirDc8CW+l9`E@Y;rv@ z@neyaKe%?AHeu3`(aOae)#kp?Y04x3>;EJf(MnL{S8ky!ioCvl<6}0BeggLn#bdfC zHBg8Q7h37$;Tb648UzSl5103_lF84WFN+kE1y|y%@Oj_!tyj2-;5Mosz^j6uw4WuA z%fTj7N@(lW7ivC692ZPPBrtka32wke6p>Ge$VJj3sm$0*0~xswDJg!gf|Yw%0t#b) zxM#oRXCy3DA}p!I)(u5da?iLLbrt-=SD)&<pJ?dBJ&OSz5+{W-nHQlr+0xjS5>mc+G*nb4!H7Ta0Ay9e7^=E;9;d$^1@?$i|5I> z4lHC&G92?1Iu6Jlzqr=ypu$G3NY)-@)agd-WwMgjR9lR-$df|o>Umw-wG4T~2mF807 zg9ZSuV+tpw-w-c0X=8Kpjavel++d0=5IHPJu<)Jh#s?j$rqqsP+Hszsh7 zJ+Zi}Xp&_meT%qulP_c=o2r!+2;T@VMj%dC4I;Km?E`lUKi0dHWf!|B)O{l-me~dU z4EjLQZhU)uIcZ5Y1&^7bA(lc`)MVV_FImb6b{HNXO_zba7|w9vbhwG<3248wyvfsO zP2E~xz(_+7#f8iO>9Tx|q?z;^@@-6sI&SZE3x{xSmqZm$uuS}Bykx1R@+|>4 bqDo=V*x1Aq?lSJ$#bV$#(st7c02blTf5*+~ literal 0 HcmV?d00001 diff --git a/dom/media/webspeech/recognition/test/sinoid+hello.ogg^headers^ b/dom/media/webspeech/recognition/test/sinoid+hello.ogg^headers^ new file mode 100644 index 000000000000..4030ea1d3ddb --- /dev/null +++ b/dom/media/webspeech/recognition/test/sinoid+hello.ogg^headers^ @@ -0,0 +1 @@ +Cache-Control: no-store diff --git a/dom/media/webspeech/recognition/test/test_abort.html b/dom/media/webspeech/recognition/test/test_abort.html index 81c2f6141ad7..0f22770cc7c8 100644 --- a/dom/media/webspeech/recognition/test/test_abort.html +++ b/dom/media/webspeech/recognition/test/test_abort.html @@ -60,7 +60,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 eventsToRequest: [], expectedEvents, doneFunc: (nextEventIdx < eventsToAbortOn.length) ? doNextTest : SimpleTest.finish, - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); } diff --git a/dom/media/webspeech/recognition/test/test_audio_capture_error.html b/dom/media/webspeech/recognition/test/test_audio_capture_error.html index f18a3101079e..0c054dbf0b53 100644 --- a/dom/media/webspeech/recognition/test/test_audio_capture_error.html +++ b/dom/media/webspeech/recognition/test/test_audio_capture_error.html @@ -32,7 +32,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 'end': null }, doneFunc: SimpleTest.finish, - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); diff --git a/dom/media/webspeech/recognition/test/test_call_start_from_end_handler.html b/dom/media/webspeech/recognition/test/test_call_start_from_end_handler.html index 593d5757c7ae..895648ad9e2e 100644 --- a/dom/media/webspeech/recognition/test/test_call_start_from_end_handler.html +++ b/dom/media/webspeech/recognition/test/test_call_start_from_end_handler.html @@ -91,7 +91,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 'result': buildResultCallback("Mock final result"), 'end': endHandler, }, - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); diff --git a/dom/media/webspeech/recognition/test/test_nested_eventloop.html b/dom/media/webspeech/recognition/test/test_nested_eventloop.html index d5d5c33e387e..4924766b447d 100644 --- a/dom/media/webspeech/recognition/test/test_nested_eventloop.html +++ b/dom/media/webspeech/recognition/test/test_nested_eventloop.html @@ -72,7 +72,8 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 }, doneFunc, prefs: [["media.webspeech.test.fake_fsm_events", true], - ["media.webspeech.test.fake_recognition_service", true]] + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); diff --git a/dom/media/webspeech/recognition/test/test_online_400_response.html b/dom/media/webspeech/recognition/test/test_online_400_response.html new file mode 100644 index 000000000000..1a7d0ed45242 --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_400_response.html @@ -0,0 +1,47 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_online_empty_result_handling.html b/dom/media/webspeech/recognition/test/test_online_empty_result_handling.html new file mode 100644 index 000000000000..46f1e7e0cba2 --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_empty_result_handling.html @@ -0,0 +1,48 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_online_hangup.html b/dom/media/webspeech/recognition/test/test_online_hangup.html new file mode 100644 index 000000000000..4a46f80f8ffb --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_hangup.html @@ -0,0 +1,47 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_online_http.html b/dom/media/webspeech/recognition/test/test_online_http.html new file mode 100644 index 000000000000..43be7a656af0 --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_http.html @@ -0,0 +1,89 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_online_http_webkit.html b/dom/media/webspeech/recognition/test/test_online_http_webkit.html new file mode 100644 index 000000000000..7f6c7e6d7d1e --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_http_webkit.html @@ -0,0 +1,90 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_online_malformed_result_handling.html b/dom/media/webspeech/recognition/test/test_online_malformed_result_handling.html new file mode 100644 index 000000000000..b071a46ea390 --- /dev/null +++ b/dom/media/webspeech/recognition/test/test_online_malformed_result_handling.html @@ -0,0 +1,48 @@ + + + + + + Test for Bug 1248897 -- Online speech service + + + + + +Mozilla Bug 1248897 +

+ +
+
+
+ + diff --git a/dom/media/webspeech/recognition/test/test_recognition_service_error.html b/dom/media/webspeech/recognition/test/test_recognition_service_error.html index 1d60fc397e2d..e8e59e2afc53 100644 --- a/dom/media/webspeech/recognition/test/test_recognition_service_error.html +++ b/dom/media/webspeech/recognition/test/test_recognition_service_error.html @@ -34,7 +34,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 'end': null }, doneFunc: SimpleTest.finish, - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); diff --git a/dom/media/webspeech/recognition/test/test_success_without_recognition_service.html b/dom/media/webspeech/recognition/test/test_success_without_recognition_service.html index 4100f25b2b1a..38748ed5cbc5 100644 --- a/dom/media/webspeech/recognition/test/test_success_without_recognition_service.html +++ b/dom/media/webspeech/recognition/test/test_success_without_recognition_service.html @@ -34,7 +34,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 'end': null }, doneFunc:SimpleTest.finish, - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 100000]] }); diff --git a/dom/media/webspeech/recognition/test/test_timeout.html b/dom/media/webspeech/recognition/test/test_timeout.html index 85fbc0082be2..8334c9e7799a 100644 --- a/dom/media/webspeech/recognition/test/test_timeout.html +++ b/dom/media/webspeech/recognition/test/test_timeout.html @@ -31,7 +31,9 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=650295 }, doneFunc: SimpleTest.finish, audioSampleFile: "silence.ogg", - prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]] + prefs: [["media.webspeech.test.fake_fsm_events", true], + ["media.webspeech.test.fake_recognition_service", true], + ["media.webspeech.recognition.timeout", 1000]] }); diff --git a/dom/webidl/SpeechGrammar.webidl b/dom/webidl/SpeechGrammar.webidl index d64d6b40e41b..1d57bf3cf86e 100644 --- a/dom/webidl/SpeechGrammar.webidl +++ b/dom/webidl/SpeechGrammar.webidl @@ -11,6 +11,7 @@ */ [Pref="media.webspeech.recognition.enable", + NamedConstructor=webkitSpeechGrammar, Func="SpeechRecognition::IsAuthorized", Exposed=Window] interface SpeechGrammar { diff --git a/dom/webidl/SpeechGrammarList.webidl b/dom/webidl/SpeechGrammarList.webidl index a4848fb5883d..ee4885928428 100644 --- a/dom/webidl/SpeechGrammarList.webidl +++ b/dom/webidl/SpeechGrammarList.webidl @@ -11,6 +11,7 @@ */ [Pref="media.webspeech.recognition.enable", + NamedConstructor=webkitSpeechGrammarList, Func="SpeechRecognition::IsAuthorized", Exposed=Window] interface SpeechGrammarList { diff --git a/dom/webidl/SpeechRecognition.webidl b/dom/webidl/SpeechRecognition.webidl index daec8c2e83ff..dcd153c1b676 100644 --- a/dom/webidl/SpeechRecognition.webidl +++ b/dom/webidl/SpeechRecognition.webidl @@ -11,6 +11,7 @@ */ [Pref="media.webspeech.recognition.enable", + NamedConstructor=webkitSpeechRecognition, Func="SpeechRecognition::IsAuthorized", Exposed=Window] interface SpeechRecognition : EventTarget { diff --git a/layout/build/components.conf b/layout/build/components.conf index 6d913b580cb9..7e9991e37c00 100644 --- a/layout/build/components.conf +++ b/layout/build/components.conf @@ -438,6 +438,12 @@ if defined('MOZ_WEBSPEECH'): 'headers': ['mozilla/dom/nsSynthVoiceRegistry.h'], 'constructor': 'mozilla::dom::nsSynthVoiceRegistry::GetInstanceForService', }, + { + 'cid': '{0ff5ce56-5b09-4db8-adc6-8266af95f864}', + 'contract_ids': ['@mozilla.org/webspeech/service;1?name=online'], + 'type': 'mozilla::OnlineSpeechRecognitionService', + 'headers': ['mozilla/dom/OnlineSpeechRecognitionService.h'], + }, ] if defined('MOZ_WEBSPEECH_TEST_BACKEND'): diff --git a/layout/build/nsLayoutModule.cpp b/layout/build/nsLayoutModule.cpp index 031ccc808325..7fd48802b7ad 100644 --- a/layout/build/nsLayoutModule.cpp +++ b/layout/build/nsLayoutModule.cpp @@ -47,6 +47,7 @@ #ifdef MOZ_WEBSPEECH # include "mozilla/dom/nsSynthVoiceRegistry.h" +# include "mozilla/dom/OnlineSpeechRecognitionService.h" #endif #include "mozilla/dom/PushNotifier.h"