mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 06:43:32 +00:00
Bug 1645339 - Use range-based for with nsTObserverArray in dom/cache. r=dom-workers-and-storage-reviewers,edenchuang
Differential Revision: https://phabricator.services.mozilla.com/D79431
This commit is contained in:
parent
7e0dd70737
commit
bfb7f6a78a
11
dom/cache/Context.cpp
vendored
11
dom/cache/Context.cpp
vendored
@ -817,11 +817,8 @@ void Context::CancelAll() {
|
||||
|
||||
mState = STATE_CONTEXT_CANCELED;
|
||||
mPendingActions.Clear();
|
||||
{
|
||||
ActivityList::ForwardIterator iter(mActivityList);
|
||||
while (iter.HasMore()) {
|
||||
iter.GetNext()->Cancel();
|
||||
}
|
||||
for (auto* activity : mActivityList.ForwardRange()) {
|
||||
activity->Cancel();
|
||||
}
|
||||
AllowToClose();
|
||||
}
|
||||
@ -853,9 +850,7 @@ void Context::CancelForCacheId(CacheId aCacheId) {
|
||||
});
|
||||
|
||||
// Cancel activities and let them remove themselves
|
||||
ActivityList::ForwardIterator iter(mActivityList);
|
||||
while (iter.HasMore()) {
|
||||
Activity* activity = iter.GetNext();
|
||||
for (auto* activity : mActivityList.ForwardRange()) {
|
||||
if (activity->MatchesCacheId(aCacheId)) {
|
||||
activity->Cancel();
|
||||
}
|
||||
|
3
dom/cache/Context.h
vendored
3
dom/cache/Context.h
vendored
@ -198,8 +198,7 @@ class Context final : public SafeRefCounted<Context> {
|
||||
|
||||
// Weak refs since activites must remove themselves from this list before
|
||||
// being destroyed by calling RemoveActivity().
|
||||
typedef nsTObserverArray<Activity*> ActivityList;
|
||||
ActivityList mActivityList;
|
||||
nsTObserverArray<Activity*> mActivityList;
|
||||
|
||||
// The ThreadsafeHandle may have a strong ref back to us. This creates
|
||||
// a ref-cycle that keeps the Context alive. The ref-cycle is broken
|
||||
|
18
dom/cache/Manager.cpp
vendored
18
dom/cache/Manager.cpp
vendored
@ -293,9 +293,7 @@ class Manager::Factory {
|
||||
AutoRestore<bool> restore(sFactory->mInSyncAbortOrShutdown);
|
||||
sFactory->mInSyncAbortOrShutdown = true;
|
||||
|
||||
ManagerList::ForwardIterator iter(sFactory->mManagerList);
|
||||
while (iter.HasMore()) {
|
||||
Manager* manager = iter.GetNext();
|
||||
for (auto* manager : sFactory->mManagerList.ForwardRange()) {
|
||||
if (aOrigin.IsVoid() || manager->mManagerId->QuotaOrigin() == aOrigin) {
|
||||
auto pinnedManager =
|
||||
SafeRefPtr{manager, AcquireStrongRefFromRawPtr{}};
|
||||
@ -323,10 +321,8 @@ class Manager::Factory {
|
||||
AutoRestore<bool> restore(sFactory->mInSyncAbortOrShutdown);
|
||||
sFactory->mInSyncAbortOrShutdown = true;
|
||||
|
||||
ManagerList::ForwardIterator iter(sFactory->mManagerList);
|
||||
while (iter.HasMore()) {
|
||||
auto pinnedManager =
|
||||
SafeRefPtr{iter.GetNext(), AcquireStrongRefFromRawPtr{}};
|
||||
for (auto* manager : sFactory->mManagerList.ForwardRange()) {
|
||||
auto pinnedManager = SafeRefPtr{manager, AcquireStrongRefFromRawPtr{}};
|
||||
pinnedManager->Shutdown();
|
||||
}
|
||||
}
|
||||
@ -407,9 +403,8 @@ class Manager::Factory {
|
||||
// Iterate in reverse to find the most recent, matching Manager. This
|
||||
// is important when looking for a Closing Manager. If a new Manager
|
||||
// chains to an old Manager we want it to be the most recent one.
|
||||
ManagerList::BackwardIterator iter(sFactory->mManagerList);
|
||||
while (iter.HasMore()) {
|
||||
Manager* manager = iter.GetNext();
|
||||
// XXX We could use a reversed NonObservingRange here and std::find_if.
|
||||
for (auto* manager : sFactory->mManagerList.BackwardRange()) {
|
||||
if (aState == manager->GetState() && *manager->mManagerId == aManagerId) {
|
||||
return {manager, AcquireStrongRefFromRawPtr{}};
|
||||
}
|
||||
@ -433,8 +428,7 @@ class Manager::Factory {
|
||||
// Weak references as we don't want to keep Manager objects alive forever.
|
||||
// When a Manager is destroyed it calls Factory::Remove() to clear itself.
|
||||
// PBackground thread only.
|
||||
typedef nsTObserverArray<Manager*> ManagerList;
|
||||
ManagerList mManagerList;
|
||||
nsTObserverArray<Manager*> mManagerList;
|
||||
|
||||
// This flag is set when we are looping through the list and calling Abort()
|
||||
// or Shutdown() on each Manager. We need to be careful not to synchronously
|
||||
|
6
dom/cache/PrincipalVerifier.cpp
vendored
6
dom/cache/PrincipalVerifier.cpp
vendored
@ -167,9 +167,9 @@ void PrincipalVerifier::VerifyOnMainThread() {
|
||||
|
||||
void PrincipalVerifier::CompleteOnInitiatingThread() {
|
||||
AssertIsOnBackgroundThread();
|
||||
ListenerList::ForwardIterator iter(mListenerList);
|
||||
while (iter.HasMore()) {
|
||||
iter.GetNext()->OnPrincipalVerified(mResult, mManagerId);
|
||||
|
||||
for (auto* listener : mListenerList.ForwardRange()) {
|
||||
listener->OnPrincipalVerified(mResult, mManagerId);
|
||||
}
|
||||
|
||||
// The listener must clear its reference in OnPrincipalVerified()
|
||||
|
3
dom/cache/PrincipalVerifier.h
vendored
3
dom/cache/PrincipalVerifier.h
vendored
@ -60,8 +60,7 @@ class PrincipalVerifier final : public Runnable {
|
||||
void DispatchToInitiatingThread(nsresult aRv);
|
||||
|
||||
// Weak reference cleared by RemoveListener()
|
||||
typedef nsTObserverArray<Listener*> ListenerList;
|
||||
ListenerList mListenerList;
|
||||
nsTObserverArray<Listener*> mListenerList;
|
||||
|
||||
// set in originating thread at construction, but must be accessed and
|
||||
// released on main thread
|
||||
|
25
dom/cache/StreamControl.cpp
vendored
25
dom/cache/StreamControl.cpp
vendored
@ -40,11 +40,10 @@ void StreamControl::CloseReadStreams(const nsID& aId) {
|
||||
uint32_t closedCount = 0;
|
||||
#endif
|
||||
|
||||
ReadStreamList::ForwardIterator iter(mReadStreamList);
|
||||
while (iter.HasMore()) {
|
||||
RefPtr<ReadStream::Controllable> stream = iter.GetNext();
|
||||
for (const auto& stream : mReadStreamList.ForwardRange()) {
|
||||
if (stream->MatchId(aId)) {
|
||||
stream->CloseStream();
|
||||
const auto pinnedStream = stream;
|
||||
pinnedStream->CloseStream();
|
||||
#ifdef MOZ_DIAGNOSTIC_ASSERT_ENABLED
|
||||
closedCount += 1;
|
||||
#endif
|
||||
@ -63,28 +62,26 @@ void StreamControl::CloseAllReadStreams() {
|
||||
// 2. the this pointer is deleted by CacheStreamControlParent::Shutdown
|
||||
// (called transitively)
|
||||
auto readStreamList = mReadStreamList.Clone();
|
||||
ReadStreamList::ForwardIterator iter(readStreamList);
|
||||
while (iter.HasMore()) {
|
||||
iter.GetNext()->CloseStream();
|
||||
for (const auto& stream : readStreamList.ForwardRange()) {
|
||||
stream->CloseStream();
|
||||
}
|
||||
}
|
||||
|
||||
void StreamControl::CloseAllReadStreamsWithoutReporting() {
|
||||
AssertOwningThread();
|
||||
|
||||
ReadStreamList::ForwardIterator iter(mReadStreamList);
|
||||
while (iter.HasMore()) {
|
||||
RefPtr<ReadStream::Controllable> stream = iter.GetNext();
|
||||
for (const auto& stream : mReadStreamList.ForwardRange()) {
|
||||
const auto pinnedStream = stream;
|
||||
// Note, we cannot trigger IPC traffic here. So use
|
||||
// CloseStreamWithoutReporting().
|
||||
stream->CloseStreamWithoutReporting();
|
||||
pinnedStream->CloseStreamWithoutReporting();
|
||||
}
|
||||
}
|
||||
|
||||
bool StreamControl::HasEverBeenRead() const {
|
||||
ReadStreamList::ForwardIterator iter(mReadStreamList);
|
||||
while (iter.HasMore()) {
|
||||
if (iter.GetNext()->HasEverBeenRead()) {
|
||||
// XXX We could use a NonObservingRange here, and then use std::any_of.
|
||||
for (const auto& stream : mReadStreamList.ForwardRange()) {
|
||||
if (stream->HasEverBeenRead()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
3
dom/cache/StreamControl.h
vendored
3
dom/cache/StreamControl.h
vendored
@ -73,8 +73,7 @@ class StreamControl {
|
||||
private:
|
||||
// Hold strong references to ReadStream object. When the stream is closed
|
||||
// it should call NoteClosed() or ForgetReadStream() to release this ref.
|
||||
typedef nsTObserverArray<RefPtr<ReadStream::Controllable>> ReadStreamList;
|
||||
ReadStreamList mReadStreamList;
|
||||
nsTObserverArray<RefPtr<ReadStream::Controllable>> mReadStreamList;
|
||||
};
|
||||
|
||||
} // namespace cache
|
||||
|
Loading…
Reference in New Issue
Block a user