mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 06:43:32 +00:00
Bug 1849860 - Keep UnderlyingSourceAlgorithms alive until it's explicitly closed. r=smaug,asuth a=RyanVM
1. Adding mAsyncWaitAlgorithms to match mAsyncWaitWorkerRef and keep the algorithms object just like how the workerref is kept right now. 2. Closing mInput also on the window teardown, to prevent memory leak as Fetch does not close the stream automatically. Differential Revision: https://phabricator.services.mozilla.com/D195132 Depends on D195131
This commit is contained in:
parent
b1c7724808
commit
9eb1c78759
@ -154,9 +154,10 @@ already_AddRefed<Promise> UnderlyingSourceAlgorithmsWrapper::CancelCallback(
|
||||
|
||||
NS_IMPL_ISUPPORTS(InputStreamHolder, nsIInputStreamCallback)
|
||||
|
||||
InputStreamHolder::InputStreamHolder(InputToReadableStreamAlgorithms* aCallback,
|
||||
InputStreamHolder::InputStreamHolder(nsIGlobalObject* aGlobal,
|
||||
InputToReadableStreamAlgorithms* aCallback,
|
||||
nsIAsyncInputStream* aInput)
|
||||
: mCallback(aCallback), mInput(aInput) {}
|
||||
: GlobalTeardownObserver(aGlobal), mCallback(aCallback), mInput(aInput) {}
|
||||
|
||||
void InputStreamHolder::Init(JSContext* aCx) {
|
||||
if (!NS_IsMainThread()) {
|
||||
@ -169,9 +170,8 @@ void InputStreamHolder::Init(JSContext* aCx) {
|
||||
// Note, this will create a ref-cycle between the holder and the stream.
|
||||
// The cycle is broken when the stream is closed or the worker begins
|
||||
// shutting down.
|
||||
mWorkerRef =
|
||||
StrongWorkerRef::Create(workerPrivate, "InputStreamHolder",
|
||||
[self = RefPtr{this}]() { self->Shutdown(); });
|
||||
mWorkerRef = StrongWorkerRef::Create(workerPrivate, "InputStreamHolder",
|
||||
[self = RefPtr{this}]() {});
|
||||
if (NS_WARN_IF(!mWorkerRef)) {
|
||||
return;
|
||||
}
|
||||
@ -180,13 +180,26 @@ void InputStreamHolder::Init(JSContext* aCx) {
|
||||
|
||||
InputStreamHolder::~InputStreamHolder() = default;
|
||||
|
||||
void InputStreamHolder::DisconnectFromOwner() {
|
||||
Shutdown();
|
||||
GlobalTeardownObserver::DisconnectFromOwner();
|
||||
}
|
||||
|
||||
void InputStreamHolder::Shutdown() {
|
||||
if (mWorkerRef) {
|
||||
mInput->CloseWithStatus(NS_BASE_STREAM_CLOSED);
|
||||
mWorkerRef = nullptr;
|
||||
if (mInput) {
|
||||
mInput->Close();
|
||||
}
|
||||
// NOTE(krosylight): Dropping mAsyncWaitAlgorithms here means letting cycle
|
||||
// collection happen on the underlying source, which can cause a dangling
|
||||
// read promise that never resolves. Doing so shouldn't be a problem at
|
||||
// shutdown phase.
|
||||
// Note that this is currently primarily for Fetch which does not explicitly
|
||||
// close its streams at shutdown. (i.e. to prevent memory leak for cases e.g
|
||||
// WPT /fetch/api/basic/stream-response.any.html)
|
||||
mAsyncWaitAlgorithms = nullptr;
|
||||
// If we have an AsyncWait running, we'll get a callback and clear
|
||||
// the mAsyncWaitWorkerRef
|
||||
}
|
||||
mWorkerRef = nullptr;
|
||||
}
|
||||
|
||||
nsresult InputStreamHolder::AsyncWait(uint32_t aFlags, uint32_t aRequestedCount,
|
||||
@ -194,6 +207,7 @@ nsresult InputStreamHolder::AsyncWait(uint32_t aFlags, uint32_t aRequestedCount,
|
||||
nsresult rv = mInput->AsyncWait(this, aFlags, aRequestedCount, aEventTarget);
|
||||
if (NS_SUCCEEDED(rv)) {
|
||||
mAsyncWaitWorkerRef = mWorkerRef;
|
||||
mAsyncWaitAlgorithms = mCallback;
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@ -201,6 +215,7 @@ nsresult InputStreamHolder::AsyncWait(uint32_t aFlags, uint32_t aRequestedCount,
|
||||
NS_IMETHODIMP InputStreamHolder::OnInputStreamReady(
|
||||
nsIAsyncInputStream* aStream) {
|
||||
mAsyncWaitWorkerRef = nullptr;
|
||||
mAsyncWaitAlgorithms = nullptr;
|
||||
// We may get called back after ::Shutdown()
|
||||
if (mCallback) {
|
||||
return mCallback->OnInputStreamReady(aStream);
|
||||
@ -215,6 +230,14 @@ NS_IMPL_CYCLE_COLLECTION_WEAK_PTR_INHERITED(InputToReadableStreamAlgorithms,
|
||||
UnderlyingSourceAlgorithmsWrapper,
|
||||
mPullPromise, mStream)
|
||||
|
||||
InputToReadableStreamAlgorithms::InputToReadableStreamAlgorithms(
|
||||
JSContext* aCx, nsIAsyncInputStream* aInput, ReadableStream* aStream)
|
||||
: mOwningEventTarget(GetCurrentSerialEventTarget()),
|
||||
mInput(new InputStreamHolder(aStream->GetParentObject(), this, aInput)),
|
||||
mStream(aStream) {
|
||||
mInput->Init(aCx);
|
||||
}
|
||||
|
||||
already_AddRefed<Promise> InputToReadableStreamAlgorithms::PullCallbackImpl(
|
||||
JSContext* aCx, ReadableStreamController& aController, ErrorResult& aRv) {
|
||||
MOZ_ASSERT(aController.IsByte());
|
||||
|
@ -170,17 +170,21 @@ class InputToReadableStreamAlgorithms;
|
||||
// causing a Worker to assert with globalScopeAlive. By isolating
|
||||
// ourselves from the inputstream, we can safely be CC'd if needed and
|
||||
// will inform the inputstream to shut down.
|
||||
class InputStreamHolder final : public nsIInputStreamCallback {
|
||||
class InputStreamHolder final : public nsIInputStreamCallback,
|
||||
public GlobalTeardownObserver {
|
||||
public:
|
||||
NS_DECL_THREADSAFE_ISUPPORTS
|
||||
NS_DECL_NSIINPUTSTREAMCALLBACK
|
||||
|
||||
InputStreamHolder(InputToReadableStreamAlgorithms* aCallback,
|
||||
InputStreamHolder(nsIGlobalObject* aGlobal,
|
||||
InputToReadableStreamAlgorithms* aCallback,
|
||||
nsIAsyncInputStream* aInput);
|
||||
|
||||
void Init(JSContext* aCx);
|
||||
|
||||
// Used by Worker shutdown
|
||||
void DisconnectFromOwner() override;
|
||||
|
||||
// Used by global teardown
|
||||
void Shutdown();
|
||||
|
||||
// These just proxy the calls to the nsIAsyncInputStream
|
||||
@ -203,8 +207,20 @@ class InputStreamHolder final : public nsIInputStreamCallback {
|
||||
RefPtr<StrongWorkerRef> mAsyncWaitWorkerRef;
|
||||
RefPtr<StrongWorkerRef> mWorkerRef;
|
||||
nsCOMPtr<nsIAsyncInputStream> mInput;
|
||||
|
||||
// To ensure the underlying source sticks around during an ongoing read
|
||||
// operation. mAlgorithms is not cycle collected on purpose, and this holder
|
||||
// is responsible to keep the underlying source algorithms until
|
||||
// nsIAsyncInputStream responds.
|
||||
//
|
||||
// This is done because otherwise the whole stream objects may be cycle
|
||||
// collected, including the promises created from read(), as our JS engine may
|
||||
// throw unsettled promises away for optimization. See bug 1849860.
|
||||
RefPtr<InputToReadableStreamAlgorithms> mAsyncWaitAlgorithms;
|
||||
};
|
||||
|
||||
// Using this class means you are also passing the lifetime control of your
|
||||
// nsIAsyncInputStream, as it will be closed when this class tears down.
|
||||
class InputToReadableStreamAlgorithms final
|
||||
: public UnderlyingSourceAlgorithmsWrapper,
|
||||
public nsIInputStreamCallback,
|
||||
@ -215,12 +231,7 @@ class InputToReadableStreamAlgorithms final
|
||||
UnderlyingSourceAlgorithmsWrapper)
|
||||
|
||||
InputToReadableStreamAlgorithms(JSContext* aCx, nsIAsyncInputStream* aInput,
|
||||
ReadableStream* aStream)
|
||||
: mOwningEventTarget(GetCurrentSerialEventTarget()),
|
||||
mInput(new InputStreamHolder(this, aInput)),
|
||||
mStream(aStream) {
|
||||
mInput->Init(aCx);
|
||||
}
|
||||
ReadableStream* aStream);
|
||||
|
||||
// Streams algorithms
|
||||
|
||||
|
@ -70,7 +70,18 @@ promise_test(async() => {
|
||||
await garbageCollect();
|
||||
const chunks = await read_all_chunks(stream, { perform_gc: true });
|
||||
assert_array_equals(chunks, input_arr);
|
||||
}, "Blob.stream() garbage collection of blob shouldn't break stream" +
|
||||
}, "Blob.stream() garbage collection of blob shouldn't break stream " +
|
||||
"consumption")
|
||||
|
||||
promise_test(async() => {
|
||||
const input_arr = [8, 241, 48, 123, 151];
|
||||
const typed_arr = new Uint8Array(input_arr);
|
||||
let blob = new Blob([typed_arr]);
|
||||
const chunksPromise = read_all_chunks(blob.stream());
|
||||
// It somehow matters to do GC here instead of doing `perform_gc: true`
|
||||
await garbageCollect();
|
||||
assert_array_equals(await chunksPromise, input_arr);
|
||||
}, "Blob.stream() garbage collection of stream shouldn't break stream " +
|
||||
"consumption")
|
||||
|
||||
promise_test(async () => {
|
||||
|
Loading…
Reference in New Issue
Block a user