Backed out 7 changesets (bug 1551735) for build bustage at src/gfx/layers/wr/WebRenderCompositionRecorder.h on a CLOSED TREE

Backed out changeset 98e75ac2cf4f (bug 1551735)
Backed out changeset 152e3a6e5c10 (bug 1551735)
Backed out changeset 9deb5350e244 (bug 1551735)
Backed out changeset 6154bdfe6fad (bug 1551735)
Backed out changeset 4a0936bda490 (bug 1551735)
Backed out changeset a7868d694fe1 (bug 1551735)
Backed out changeset 06c8e5f7768d (bug 1551735)
This commit is contained in:
Coroiu Cristina 2019-05-31 00:05:00 +03:00
parent 9097b58779
commit 0c344192ae
18 changed files with 326 additions and 909 deletions

View File

@ -27,6 +27,8 @@ namespace layers {
CompositionRecorder::CompositionRecorder(TimeStamp aRecordingStart)
: mRecordingStart(aRecordingStart) {}
CompositionRecorder::~CompositionRecorder() {}
void CompositionRecorder::RecordFrame(RecordedFrame* aFrame) {
mCollectedFrames.AppendElement(aFrame);
}
@ -58,7 +60,5 @@ void CompositionRecorder::WriteCollectedFrames() {
mCollectedFrames.Clear();
}
void CompositionRecorder::ClearCollectedFrames() { mCollectedFrames.Clear(); }
} // namespace layers
} // namespace mozilla

View File

@ -20,9 +20,6 @@ class DataSourceSurface;
namespace layers {
/**
* A captured frame from a |LayerManager|.
*/
class RecordedFrame {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(RecordedFrame)
@ -41,34 +38,16 @@ class RecordedFrame {
};
/**
* A recorder for composited frames.
*
* This object collects frames sent to it by a |LayerManager| and writes them
* out as a series of images until recording has finished.
*
* If GPU-accelerated rendering is used, the frames will not be mapped into
* memory until |WriteCollectedFrames| is called.
*/
class CompositionRecorder {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(CompositionRecorder)
class CompositionRecorder final {
public:
explicit CompositionRecorder(TimeStamp aRecordingStart);
~CompositionRecorder();
/**
* Record a composited frame.
*/
virtual void RecordFrame(RecordedFrame* aFrame);
void RecordFrame(RecordedFrame* aFrame);
/**
* Write out the collected frames as a series of timestamped images.
*/
virtual void WriteCollectedFrames();
protected:
virtual ~CompositionRecorder() = default;
void ClearCollectedFrames();
void WriteCollectedFrames();
private:
nsTArray<RefPtr<RecordedFrame>> mCollectedFrames;
@ -78,4 +57,4 @@ class CompositionRecorder {
} // namespace layers
} // namespace mozilla
#endif // mozilla_layers_CompositionRecorder_h
#endif // mozilla_layers_ProfilerScreenshots_h

View File

@ -19,7 +19,6 @@
#include "mozilla/gfx/Point.h" // for IntSize
#include "mozilla/gfx/Rect.h" // for Rect
#include "mozilla/gfx/Types.h" // for SurfaceFormat
#include "mozilla/layers/CompositionRecorder.h"
#include "mozilla/layers/CompositorTypes.h"
#include "mozilla/layers/Effects.h" // for EffectChain
#include "mozilla/layers/LayersMessages.h"
@ -54,6 +53,7 @@ namespace layers {
class CanvasLayerComposite;
class ColorLayerComposite;
class Compositor;
class CompositionRecorder;
class ContainerLayerComposite;
class Diagnostics;
struct EffectChain;
@ -199,7 +199,7 @@ class HostLayerManager : public LayerManager {
mCompositorBridgeID = aID;
}
void SetCompositionRecorder(already_AddRefed<CompositionRecorder> aRecorder) {
void SetCompositionRecorder(CompositionRecorder* aRecorder) {
mCompositionRecorder = aRecorder;
}
@ -216,7 +216,7 @@ class HostLayerManager : public LayerManager {
bool mWindowOverlayChanged;
TimeDuration mLastPaintTime;
TimeStamp mRenderStartTime;
RefPtr<CompositionRecorder> mCompositionRecorder = nullptr;
CompositionRecorder* mCompositionRecorder = nullptr;
// Render time for the current composition.
TimeStamp mCompositionTime;

View File

@ -2609,15 +2609,10 @@ int32_t RecordContentFrameTime(
mozilla::ipc::IPCResult CompositorBridgeParent::RecvBeginRecording(
const TimeStamp& aRecordingStart) {
mCompositionRecorder.reset(new CompositionRecorder(aRecordingStart));
if (mLayerManager) {
mCompositionRecorder = new CompositionRecorder(aRecordingStart);
mLayerManager->SetCompositionRecorder(do_AddRef(mCompositionRecorder));
} else if (mWrBridge) {
RefPtr<WebRenderCompositionRecorder> recorder =
new WebRenderCompositionRecorder(aRecordingStart,
mWrBridge->PipelineId());
mCompositionRecorder = recorder;
mWrBridge->SetCompositionRecorder(std::move(recorder));
mLayerManager->SetCompositionRecorder(mCompositionRecorder.get());
}
return IPC_OK();
@ -2627,13 +2622,8 @@ mozilla::ipc::IPCResult CompositorBridgeParent::RecvEndRecording() {
if (mLayerManager) {
mLayerManager->SetCompositionRecorder(nullptr);
}
// If we are using WebRender, the |RenderThread| will have a handle to this
// |WebRenderCompositionRecorder|, which it will release once the frames have
// been written.
mCompositionRecorder->WriteCollectedFrames();
mCompositionRecorder = nullptr;
mCompositionRecorder.reset(nullptr);
return IPC_OK();
}

View File

@ -27,7 +27,6 @@
#include "mozilla/gfx/Point.h" // for IntSize
#include "mozilla/ipc/ProtocolUtils.h"
#include "mozilla/ipc/SharedMemory.h"
#include "mozilla/layers/CompositionRecorder.h"
#include "mozilla/layers/CompositorController.h"
#include "mozilla/layers/CompositorOptions.h"
#include "mozilla/layers/CompositorVsyncSchedulerOwner.h"
@ -73,6 +72,7 @@ class APZSampler;
class APZUpdater;
class AsyncCompositionManager;
class AsyncImagePipelineManager;
class CompositionRecorder;
class Compositor;
class CompositorAnimationStorage;
class CompositorBridgeParent;
@ -767,7 +767,7 @@ class CompositorBridgeParent final : public CompositorBridgeParentBase,
// mSelfRef is cleared in DeferredDestroy which is scheduled by ActorDestroy.
RefPtr<CompositorBridgeParent> mSelfRef;
RefPtr<CompositorAnimationStorage> mAnimationStorage;
RefPtr<CompositionRecorder> mCompositionRecorder;
UniquePtr<CompositionRecorder> mCompositionRecorder;
TimeDuration mPaintTime;

View File

@ -255,7 +255,6 @@ EXPORTS.mozilla.layers += [
'wr/WebRenderBridgeParent.h',
'wr/WebRenderCanvasRenderer.h',
'wr/WebRenderCommandBuilder.h',
'wr/WebRenderCompositionRecorder.h',
'wr/WebRenderDrawEventRecorder.h',
'wr/WebRenderImageHost.h',
'wr/WebRenderLayerManager.h',
@ -504,7 +503,6 @@ UNIFIED_SOURCES += [
'wr/WebRenderBridgeParent.cpp',
'wr/WebRenderCanvasRenderer.cpp',
'wr/WebRenderCommandBuilder.cpp',
'wr/WebRenderCompositionRecorder.cpp',
'wr/WebRenderDrawEventRecorder.cpp',
'wr/WebRenderImageHost.cpp',
'wr/WebRenderLayerManager.cpp',

View File

@ -801,11 +801,6 @@ bool WebRenderBridgeParent::IsRootWebRenderBridgeParent() const {
return !!mWidget;
}
void WebRenderBridgeParent::SetCompositionRecorder(
RefPtr<layers::WebRenderCompositionRecorder>&& aRecorder) {
Api(wr::RenderRoot::Default)->SetCompositionRecorder(std::move(aRecorder));
}
CompositorBridgeParent* WebRenderBridgeParent::GetRootCompositorBridgeParent()
const {
if (!mCompositorBridge) {

View File

@ -17,7 +17,6 @@
#include "mozilla/layers/CompositorVsyncSchedulerOwner.h"
#include "mozilla/layers/PWebRenderBridgeParent.h"
#include "mozilla/layers/UiCompositorControllerParent.h"
#include "mozilla/layers/WebRenderCompositionRecorder.h"
#include "mozilla/Maybe.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/WeakPtr.h"
@ -273,9 +272,6 @@ class WebRenderBridgeParent final
LayersId GetLayersId() const;
WRRootId GetWRRootId() const;
void SetCompositionRecorder(
RefPtr<layers::WebRenderCompositionRecorder>&& aRecorder);
private:
class ScheduleSharedSurfaceRelease;

View File

@ -1,156 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "WebRenderCompositionRecorder.h"
#include "mozilla/webrender/RenderThread.h"
namespace mozilla {
namespace layers {
class RendererRecordedFrame final : public layers::RecordedFrame {
public:
RendererRecordedFrame(const TimeStamp& aTimeStamp, wr::Renderer* aRenderer,
const wr::RecordedFrameHandle aHandle,
const gfx::IntSize& aSize)
: RecordedFrame(aTimeStamp),
mRenderer(aRenderer),
mSize(aSize),
mHandle(aHandle) {}
already_AddRefed<gfx::DataSourceSurface> GetSourceSurface() override {
if (!mSurface) {
mSurface = gfx::Factory::CreateDataSourceSurface(
mSize, gfx::SurfaceFormat::B8G8R8A8, /* aZero = */ false);
gfx::DataSourceSurface::ScopedMap map(mSurface,
gfx::DataSourceSurface::WRITE);
if (!wr_renderer_map_recorded_frame(mRenderer, mHandle, map.GetData(),
mSize.width * mSize.height * 4,
mSize.width * 4)) {
return nullptr;
}
}
return do_AddRef(mSurface);
}
private:
wr::Renderer* mRenderer;
RefPtr<gfx::DataSourceSurface> mSurface;
gfx::IntSize mSize;
wr::RecordedFrameHandle mHandle;
};
void WebRenderCompositionRecorder::RecordFrame(RecordedFrame* aFrame) {
MOZ_CRASH(
"WebRenderCompositionRecorder::RecordFrame should not be called; call "
"MaybeRecordFrame instead.");
}
bool WebRenderCompositionRecorder::MaybeRecordFrame(
wr::Renderer* aRenderer, wr::WebRenderPipelineInfo* aFrameEpochs) {
MOZ_ASSERT(wr::RenderThread::IsInRenderThread());
if (!aRenderer || !aFrameEpochs) {
return false;
}
if (!mMutex.TryLock()) {
// If we cannot lock the mutex, then either (a) the |CompositorBridgeParent|
// is holding the mutex in |WriteCollectedFrames| or (b) the |RenderThread|
// is holding the mutex in |ForceFinishRecording|.
//
// In either case we do not want to wait to acquire the mutex to record a
// frame since frames recorded now will not be written to disk.
return false;
}
auto unlockGuard = MakeScopeExit([&]() { mMutex.Unlock(); });
if (mFinishedRecording) {
return true;
}
if (!DidPaintContent(aFrameEpochs)) {
return false;
}
wr::RecordedFrameHandle handle{0};
gfx::IntSize size(0, 0);
if (wr_renderer_record_frame(aRenderer, wr::ImageFormat::BGRA8, &handle,
&size.width, &size.height)) {
RefPtr<RecordedFrame> frame =
new RendererRecordedFrame(TimeStamp::Now(), aRenderer, handle, size);
CompositionRecorder::RecordFrame(frame);
}
return false;
}
void WebRenderCompositionRecorder::WriteCollectedFrames() {
MutexAutoLock guard(mMutex);
MOZ_RELEASE_ASSERT(
!mFinishedRecording,
"WebRenderCompositionRecorder: Attempting to write frames from invalid "
"state.");
CompositionRecorder::WriteCollectedFrames();
mFinishedRecording = true;
}
bool WebRenderCompositionRecorder::ForceFinishRecording() {
MutexAutoLock guard(mMutex);
bool wasRecording = !mFinishedRecording;
mFinishedRecording = true;
ClearCollectedFrames();
return wasRecording;
}
bool WebRenderCompositionRecorder::DidPaintContent(
wr::WebRenderPipelineInfo* aFrameEpochs) {
const wr::WrPipelineInfo& info = aFrameEpochs->Raw();
bool didPaintContent = false;
for (wr::usize i = 0; i < info.epochs.length; i++) {
const wr::PipelineId pipelineId = info.epochs.data[i].pipeline_id;
if (pipelineId == mRootPipelineId) {
continue;
}
const auto it = mContentPipelines.find(AsUint64(pipelineId));
if (it == mContentPipelines.end() ||
it->second != info.epochs.data[i].epoch) {
// This content pipeline has updated list last render or has newly
// rendered.
didPaintContent = true;
mContentPipelines[AsUint64(pipelineId)] = info.epochs.data[i].epoch;
}
}
for (wr::usize i = 0; i < info.removed_pipelines.length; i++) {
const wr::PipelineId pipelineId =
info.removed_pipelines.data[i].pipeline_id;
if (pipelineId == mRootPipelineId) {
continue;
}
mContentPipelines.erase(AsUint64(pipelineId));
}
return didPaintContent;
}
} // namespace layers
} // namespace mozilla

View File

@ -1,127 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_layers_WebRenderCompositionRecorder_h
#define mozilla_layers_WebRenderCompositionRecorder_h
#include "CompositionRecorder.h"
#include "mozilla/Mutex.h"
#include "mozilla/ScopeExit.h"
#include <unordered_map>
namespace mozilla {
namespace wr {
class WebRenderPipelineInfo;
}
namespace layers {
/**
* A thread-safe version of the |CompositionRecorder|.
*
* Composition recording for WebRender occurs on the |RenderThread| whereas the
* frames are written on the thread holding the |CompositorBridgeParent|.
*
*/
class WebRenderCompositionRecorder final : public CompositionRecorder {
public:
explicit WebRenderCompositionRecorder(TimeStamp aRecordingStart,
wr::WrPipelineId aRootPipelineId)
: CompositionRecorder(aRecordingStart),
mMutex("CompositionRecorder"),
mFinishedRecording(false),
mRootPipelineId(aRootPipelineId) {}
WebRenderCompositionRecorder() = delete;
WebRenderCompositionRecorder(WebRenderCompositionRecorder&) = delete;
WebRenderCompositionRecorder(WebRenderCompositionRecorder&&) = delete;
WebRenderCompositionRecorder& operator=(WebRenderCompositionRecorder&) =
delete;
WebRenderCompositionRecorder& operator=(WebRenderCompositionRecorder&&) =
delete;
/**
* Do not call this method.
*
* Instead, call |MaybeRecordFrame|, which will only attempt to record a
* frame if we have not yet written frames to disk.
*/
void RecordFrame(RecordedFrame* aFrame) override;
/**
* Write the collected frames to disk.
*
* This method should not be called if frames have already been written or if
* |ForceFinishRecording| has been called as the object will be in an invalid
* state to write to disk.
*
* Note: This method will block acquiring a lock.
*/
void WriteCollectedFrames() override;
/**
* Attempt to record a frame from the given renderer.
*
* This method will only record a frame if the following are true:
*
* - this object's lock was acquired immediately (i.e., we are not currently
* writing frames to disk);
* - we have not yet written frames to disk; and
* - one of the pipelines in |aFrameEpochs| has updated and it is not the
* root pipeline.
*
* Returns whether or not the recorder has finished recording frames. If
* true, it is safe to release both this object and Web Render's composition
* recorder structures.
*/
bool MaybeRecordFrame(wr::Renderer* aRenderer,
wr::WebRenderPipelineInfo* aFrameEpochs);
/**
* Force the composition recorder to finish recording.
*
* This should only be called if |WriteCollectedFrames| is not to be called,
* since the recorder will be in an invalid state to do so.
*
* This returns whether or not the recorder was recording before this method
* was called.
*
* Note: This method will block acquiring a lock.
*/
bool ForceFinishRecording();
protected:
~WebRenderCompositionRecorder() = default;
/**
* Determine if any content pipelines updated.
*/
bool DidPaintContent(wr::WebRenderPipelineInfo* aFrameEpochs);
private:
Mutex mMutex;
// Whether or not we have finished recording.
bool mFinishedRecording;
// The id of the root WebRender pipeline.
//
// All other pipelines are considered content.
wr::PipelineId mRootPipelineId;
// A mapping of wr::PipelineId to the epochs when last they updated.
//
// We need to use uint64_t here since wr::PipelineId is not default
// constructable.
std::unordered_map<uint64_t, wr::Epoch> mContentPipelines;
};
} // namespace layers
} // namespace mozilla
#endif // mozilla_layers_WebRenderCompositionRecorder_h

View File

@ -198,7 +198,6 @@ void RenderThread::RemoveRenderer(wr::WindowId aWindowId) {
}
mRenderers.erase(aWindowId);
mCompositionRecorders.erase(aWindowId);
if (mRenderers.size() == 0 && mHandlingDeviceReset) {
mHandlingDeviceReset = false;
@ -230,37 +229,6 @@ size_t RenderThread::RendererCount() {
return mRenderers.size();
}
void RenderThread::SetCompositionRecorderForWindow(
wr::WindowId aWindowId,
RefPtr<layers::WebRenderCompositionRecorder>&& aCompositionRecorder) {
MOZ_ASSERT(IsInRenderThread());
MOZ_ASSERT(GetRenderer(aWindowId));
auto it = mCompositionRecorders.find(aWindowId);
if (it != mCompositionRecorders.end() && it->second->ForceFinishRecording()) {
// This case should never occur since the |CompositorBridgeParent| will
// receive its "EndRecording" IPC message before another "BeginRecording"
// IPC message.
//
// However, if we do hit this case, then we should handle it gracefully.
// We free the structures here because any captured frames are not going
// to be read back.
if (RendererOGL* renderer = GetRenderer(aWindowId)) {
wr_renderer_release_composition_recorder_structures(
renderer->GetRenderer());
}
}
// If we have finished recording, then we have received
// |SetCompositionRecorderEvent| after the compositor brige parent finished
// writing but before we handled another frame to delete the data structure.
//
// In this case we do not need to free the |wr::Renderer|'s composition
// recorder structures since we can re-use them.
mCompositionRecorders[aWindowId] = std::move(aCompositionRecorder);
}
void RenderThread::HandleFrame(wr::WindowId aWindowId, bool aRender) {
if (mHasShutdown) {
return;
@ -414,28 +382,13 @@ void RenderThread::UpdateAndRender(
renderer->CheckGraphicsResetStatus();
TimeStamp end = TimeStamp::Now();
RefPtr<WebRenderPipelineInfo> info = renderer->FlushPipelineInfo();
auto info = renderer->FlushPipelineInfo();
layers::CompositorThreadHolder::Loop()->PostTask(
NewRunnableFunction("NotifyDidRenderRunnable", &NotifyDidRender,
renderer->GetCompositorBridge(), info, aStartId,
aStartTime, start, end, aRender, stats));
if (rendered) {
auto recorderIt = mCompositionRecorders.find(aWindowId);
if (recorderIt != mCompositionRecorders.end()) {
bool shouldRelease = recorderIt->second->MaybeRecordFrame(
renderer->GetRenderer(), info.get());
if (shouldRelease) {
mCompositionRecorders.erase(recorderIt);
wr_renderer_release_composition_recorder_structures(
renderer->GetRenderer());
}
}
}
if (rendered) {
// Wait for GPU after posting NotifyDidRender, since the wait is not
// necessary for the NotifyDidRender.

View File

@ -20,7 +20,6 @@
#include "mozilla/UniquePtr.h"
#include "mozilla/webrender/WebRenderTypes.h"
#include "mozilla/layers/SynchronousTask.h"
#include "mozilla/layers/WebRenderCompositionRecorder.h"
#include "mozilla/VsyncDispatcher.h"
#include <list>
@ -257,10 +256,6 @@ class RenderThread final {
size_t RendererCount();
void SetCompositionRecorderForWindow(
wr::WindowId aWindowId,
RefPtr<layers::WebRenderCompositionRecorder>&& aCompositionRecorder);
private:
explicit RenderThread(base::Thread* aThread);
@ -285,8 +280,6 @@ class RenderThread final {
RefPtr<gl::GLContext> mSharedGL;
std::map<wr::WindowId, UniquePtr<RendererOGL>> mRenderers;
std::map<wr::WindowId, RefPtr<layers::WebRenderCompositionRecorder>>
mCompositionRecorders;
struct WindowInfo {
bool mIsDestroyed = false;

View File

@ -577,34 +577,6 @@ void WebRenderAPI::Capture() {
wr_api_capture(mDocHandle, path, bits);
}
void WebRenderAPI::SetCompositionRecorder(
RefPtr<layers::WebRenderCompositionRecorder>&& aRecorder) {
class SetCompositionRecorderEvent final : public RendererEvent {
public:
explicit SetCompositionRecorderEvent(
RefPtr<layers::WebRenderCompositionRecorder>&& aRecorder)
: mRecorder(std::move(aRecorder)) {
MOZ_COUNT_CTOR(SetCompositionRecorderEvent);
}
~SetCompositionRecorderEvent() {
MOZ_COUNT_DTOR(SetCompositionRecorderEvent);
}
void Run(RenderThread& aRenderThread, WindowId aWindowId) override {
MOZ_ASSERT(mRecorder);
aRenderThread.SetCompositionRecorderForWindow(aWindowId,
std::move(mRecorder));
}
private:
RefPtr<layers::WebRenderCompositionRecorder> mRecorder;
};
auto event = MakeUnique<SetCompositionRecorderEvent>(std::move(aRecorder));
RunOnRenderThread(std::move(event));
}
void TransactionBuilder::Clear() { wr_resource_updates_clear(mTxn); }
void TransactionBuilder::Notify(wr::Checkpoint aWhen,

View File

@ -16,7 +16,6 @@
#include "mozilla/layers/IpcResourceUpdateQueue.h"
#include "mozilla/layers/ScrollableLayerGuid.h"
#include "mozilla/layers/SyncObject.h"
#include "mozilla/layers/WebRenderCompositionRecorder.h"
#include "mozilla/Range.h"
#include "mozilla/webrender/webrender_ffi.h"
#include "mozilla/webrender/WebRenderTypes.h"
@ -259,9 +258,6 @@ class WebRenderAPI final {
void Capture();
void SetCompositionRecorder(
RefPtr<layers::WebRenderCompositionRecorder>&& aRecorder);
protected:
WebRenderAPI(wr::DocumentHandle* aHandle, wr::WindowId aId,
uint32_t aMaxTextureSize, bool aUseANGLE, bool aUseDComp,

View File

@ -23,7 +23,7 @@ use gleam::gl;
use webrender::{
api::*, api::units::*, ApiRecordingReceiver, AsyncPropertySampler, AsyncScreenshotHandle,
BinaryRecorder, DebugFlags, Device, ExternalImage, ExternalImageHandler, ExternalImageSource,
PipelineInfo, ProfilerHooks, RecordedFrameHandle, Renderer, RendererOptions, RendererStats,
PipelineInfo, ProfilerHooks, Renderer, RendererOptions, RendererStats,
SceneBuilderHooks, ShaderPrecacheFlags, Shaders, ThreadListener, UploadMethod, VertexUsageHint,
WrShaders, set_profiler_hooks,
};
@ -74,7 +74,6 @@ pub enum OpacityType {
/// cbindgen:field-names=[mHandle]
/// cbindgen:derive-lt=true
/// cbindgen:derive-lte=true
/// cbindgen:derive-neq=true
type WrEpoch = Epoch;
/// cbindgen:field-names=[mHandle]
/// cbindgen:derive-lt=true
@ -652,47 +651,6 @@ pub extern "C" fn wr_renderer_render(renderer: &mut Renderer,
}
}
#[no_mangle]
pub extern "C" fn wr_renderer_record_frame(
renderer: &mut Renderer,
image_format: ImageFormat,
out_handle: &mut RecordedFrameHandle,
out_width: &mut i32,
out_height: &mut i32,
) -> bool {
if let Some((handle, size)) = renderer.record_frame(image_format) {
*out_handle = handle;
*out_width = size.width;
*out_height = size.height;
true
} else {
false
}
}
#[no_mangle]
pub extern "C" fn wr_renderer_map_recorded_frame(
renderer: &mut Renderer,
handle: RecordedFrameHandle,
dst_buffer: *mut u8,
dst_buffer_len: usize,
dst_stride: usize,
) -> bool {
renderer.map_recorded_frame(
handle,
unsafe { make_slice_mut(dst_buffer, dst_buffer_len) },
dst_stride,
)
}
#[no_mangle]
pub extern "C" fn wr_renderer_release_composition_recorder_structures(
renderer: &mut Renderer,
) {
renderer.release_composition_recorder_structures();
}
#[no_mangle]
pub extern "C" fn wr_renderer_get_screenshot_async(
renderer: &mut Renderer,

View File

@ -115,7 +115,6 @@ mod renderer;
mod resource_cache;
mod scene;
mod scene_builder;
mod screen_capture;
mod segment;
mod shade;
mod spatial_node;
@ -213,12 +212,10 @@ pub use crate::device::{ProgramBinary, ProgramCache, ProgramCacheObserver};
pub use crate::device::Device;
pub use crate::frame_builder::ChasePrimitive;
pub use crate::profiler::{ProfilerHooks, set_profiler_hooks};
pub use crate::renderer::{
AsyncPropertySampler, CpuProfile, DebugFlags, OutputImageHandler, RendererKind, ExternalImage,
ExternalImageHandler, ExternalImageSource, GpuProfile, GraphicsApi, GraphicsApiInfo,
PipelineInfo, Renderer, RendererOptions, RenderResults, RendererStats, SceneBuilderHooks,
ThreadListener, ShaderPrecacheFlags, MAX_VERTEX_TEXTURE_WIDTH,
};
pub use crate::screen_capture::{AsyncScreenshotHandle, RecordedFrameHandle};
pub use crate::renderer::{AsyncPropertySampler, AsyncScreenshotHandle, CpuProfile, DebugFlags};
pub use crate::renderer::{OutputImageHandler, RendererKind, ExternalImage, ExternalImageHandler};
pub use crate::renderer::{ExternalImageSource, GpuProfile, GraphicsApi, GraphicsApiInfo, PipelineInfo};
pub use crate::renderer::{Renderer, RendererOptions, RenderResults, RendererStats, SceneBuilderHooks};
pub use crate::renderer::{ThreadListener, ShaderPrecacheFlags, MAX_VERTEX_TEXTURE_WIDTH};
pub use crate::shade::{Shaders, WrShaders};
pub use api as webrender_api;

View File

@ -80,7 +80,6 @@ use rayon::{ThreadPool, ThreadPoolBuilder};
use crate::record::ApiRecordingReceiver;
use crate::render_backend::{FrameId, RenderBackend};
use crate::scene_builder::{SceneBuilder, LowPrioritySceneBuilder};
use crate::screen_capture::AsyncScreenshotGrabber;
use crate::shade::{Shaders, WrShaders};
use smallvec::SmallVec;
use crate::render_task::{RenderTask, RenderTaskData, RenderTaskKind, RenderTaskGraph};
@ -89,7 +88,7 @@ use crate::util::drain_filter;
use std;
use std::cmp;
use std::collections::VecDeque;
use std::collections::{HashMap, VecDeque};
use std::collections::hash_map::Entry;
use std::f32;
use std::marker::PhantomData;
@ -1596,6 +1595,254 @@ pub struct RendererVAOs {
resolve_vao: VAO,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
/// A handle to a screenshot that is being asynchronously captured and scaled.
pub struct AsyncScreenshotHandle(usize);
/// An asynchronously captured screenshot bound to a PBO which has not yet been mapped for copying.
struct AsyncScreenshot {
/// The PBO that will contain the screenshot data.
pbo: PBO,
/// The size of the screenshot.
screenshot_size: DeviceIntSize,
/// Thge image format of the screenshot.
image_format: ImageFormat,
}
/// Renderer infrastructure for capturing screenshots and scaling them asynchronously.
struct AsyncScreenshotGrabber {
/// The textures used to scale screenshots.
scaling_textures: Vec<Texture>,
/// PBOs available to be used for screenshot readback.
available_pbos: Vec<PBO>,
/// PBOs containing screenshots that are awaiting readback.
awaiting_readback: HashMap<AsyncScreenshotHandle, AsyncScreenshot>,
/// The handle for the net PBO that will be inserted into `in_use_pbos`.
next_pbo_handle: usize,
}
impl Default for AsyncScreenshotGrabber {
fn default() -> Self {
return AsyncScreenshotGrabber {
scaling_textures: Vec::new(),
available_pbos: Vec::new(),
awaiting_readback: HashMap::new(),
next_pbo_handle: 1,
}
}
}
impl AsyncScreenshotGrabber {
/// Deinitialize the allocated textures and PBOs.
pub fn deinit(self, device: &mut Device) {
for texture in self.scaling_textures {
device.delete_texture(texture);
}
for pbo in self.available_pbos {
device.delete_pbo(pbo);
}
for (_, async_screenshot) in self.awaiting_readback {
device.delete_pbo(async_screenshot.pbo);
}
}
/// Take a screenshot and scale it asynchronously.
///
/// The returned handle can be used to access the mapped screenshot data via
/// `map_and_recycle_screenshot`.
/// The returned size is the size of the screenshot.
pub fn get_screenshot(
&mut self,
device: &mut Device,
window_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
image_format: ImageFormat,
) -> (AsyncScreenshotHandle, DeviceIntSize) {
let scale = (buffer_size.width as f32 / window_rect.size.width as f32)
.min(buffer_size.height as f32 / window_rect.size.height as f32);
let screenshot_size = (window_rect.size.to_f32() * scale).round().to_i32();
let required_size = buffer_size.area() as usize
* image_format.bytes_per_pixel() as usize;
assert!(screenshot_size.width <= buffer_size.width);
assert!(screenshot_size.height <= buffer_size.height);
let pbo = match self.available_pbos.pop() {
Some(pbo) => {
assert_eq!(pbo.get_reserved_size(), required_size);
pbo
}
None => device.create_pbo_with_size(required_size),
};
self.scale_screenshot(
device,
ReadTarget::Default,
window_rect,
buffer_size,
screenshot_size,
image_format,
0,
);
device.read_pixels_into_pbo(
ReadTarget::from_texture(
&self.scaling_textures[0],
0,
),
DeviceIntRect::new(DeviceIntPoint::new(0, 0), screenshot_size),
image_format,
&pbo,
);
let handle = AsyncScreenshotHandle(self.next_pbo_handle);
self.next_pbo_handle += 1;
self.awaiting_readback.insert(handle, AsyncScreenshot {
pbo,
screenshot_size,
image_format,
});
(handle, screenshot_size)
}
/// Take the screenshot in the given `ReadTarget` and scale it to `dest_size` recursively.
///
/// Each scaling operation scales only by a factor of two to preserve quality.
///
/// Textures are scaled such that `scaling_textures[n]` is half the size of
/// `scaling_textures[n+1]`.
///
/// After the scaling completes, the final screenshot will be in
/// `scaling_textures[0]`.
fn scale_screenshot(
&mut self,
device: &mut Device,
read_target: ReadTarget,
read_target_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
dest_size: DeviceIntSize,
image_format: ImageFormat,
level: usize,
) {
let texture_size = buffer_size * (1 << level);
if level == self.scaling_textures.len() {
let texture = device.create_texture(
TextureTarget::Default,
image_format,
texture_size.width,
texture_size.height,
TextureFilter::Linear,
Some(RenderTargetInfo { has_depth: false }),
1,
);
self.scaling_textures.push(texture);
} else {
let current_texture_size = self.scaling_textures[level].get_dimensions();
assert_eq!(current_texture_size.width, texture_size.width);
assert_eq!(current_texture_size.height, texture_size.height);
}
let (read_target, read_target_rect) = if read_target_rect.size.width > 2 * dest_size.width {
self.scale_screenshot(
device,
read_target,
read_target_rect,
buffer_size,
dest_size * 2,
image_format,
level + 1,
);
(
ReadTarget::from_texture(
&self.scaling_textures[level + 1],
0,
),
DeviceIntRect::new(
DeviceIntPoint::new(0, 0),
dest_size * 2,
),
)
} else {
(read_target, read_target_rect)
};
let draw_target = DrawTarget::from_texture(
&self.scaling_textures[level],
0 as _,
false,
);
let draw_target_rect = draw_target.to_framebuffer_rect(
DeviceIntRect::new(DeviceIntPoint::new(0, 0), dest_size),
);
let read_target_rect = FramebufferIntRect::from_untyped(&read_target_rect.to_untyped());
if level == 0 {
device.blit_render_target_invert_y(
read_target,
read_target_rect,
draw_target,
draw_target_rect,
);
} else {
device.blit_render_target(
read_target,
read_target_rect,
draw_target,
draw_target_rect,
TextureFilter::Linear,
);
}
}
/// Map the contents of the screenshot given by the handle and copy it into the given buffer.
pub fn map_and_recycle_screenshot(
&mut self,
device: &mut Device,
handle: AsyncScreenshotHandle,
dst_buffer: &mut [u8],
dst_stride: usize,
) -> bool {
let AsyncScreenshot {
pbo,
screenshot_size,
image_format,
} = match self.awaiting_readback.remove(&handle) {
Some(async_screenshots) => async_screenshots,
None => return false,
};
let success = if let Some(bound_pbo) = device.map_pbo_for_readback(&pbo) {
let src_buffer = &bound_pbo.data;
let src_stride = screenshot_size.width as usize
* image_format.bytes_per_pixel() as usize;
for (src_slice, dst_slice) in src_buffer
.chunks(src_stride)
.zip(dst_buffer.chunks_mut(dst_stride))
.take(screenshot_size.height as usize)
{
dst_slice[..src_stride].copy_from_slice(src_slice);
}
true
} else {
false
};
self.available_pbos.push(pbo);
success
}
}
/// The renderer is responsible for submitting to the GPU the work prepared by the
/// RenderBackend.
@ -1680,8 +1927,7 @@ pub struct Renderer {
pub renderer_errors: Vec<RendererError>,
pub(in crate) async_frame_recorder: Option<AsyncScreenshotGrabber>,
pub(in crate) async_screenshots: Option<AsyncScreenshotGrabber>,
async_screenshots: Option<AsyncScreenshotGrabber>,
/// List of profile results from previous frames. Can be retrieved
/// via get_frame_profiles().
@ -2186,7 +2432,6 @@ impl Renderer {
texture_cache_upload_pbo,
texture_resolver,
renderer_errors: Vec::new(),
async_frame_recorder: None,
async_screenshots: None,
#[cfg(feature = "capture")]
read_fbo,
@ -2390,6 +2635,58 @@ impl Renderer {
}
}
/// Take a screenshot and scale it asynchronously.
///
/// The returned handle can be used to access the mapped screenshot data via
/// `map_and_recycle_screenshot`.
/// The returned size is the size of the screenshot.
pub fn get_screenshot_async(
&mut self,
window_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
image_format: ImageFormat,
) -> (AsyncScreenshotHandle, DeviceIntSize) {
self.device.begin_frame();
let handle = self.async_screenshots
.get_or_insert_with(AsyncScreenshotGrabber::default)
.get_screenshot(&mut self.device,
window_rect,
buffer_size,
image_format);
self.device.end_frame();
handle
}
/// Map the contents of the screenshot given by the handle and copy it into the given buffer.
pub fn map_and_recycle_screenshot(
&mut self,
handle: AsyncScreenshotHandle,
dst_buffer: &mut [u8],
dst_stride: usize,
) -> bool {
if let Some(async_screenshots) = self.async_screenshots.as_mut() {
async_screenshots.map_and_recycle_screenshot(
&mut self.device,
handle,
dst_buffer,
dst_stride,
)
} else {
false
}
}
pub fn release_profiler_structures(&mut self) {
if let Some(async_screenshots) = self.async_screenshots.take() {
self.device.begin_frame();
async_screenshots.deinit(&mut self.device);
self.device.end_frame();
}
}
#[cfg(not(feature = "debugger"))]
fn get_screenshot_for_debugger(&mut self) -> String {
// Avoid unused param warning.
@ -5948,6 +6245,7 @@ fn get_vao<'a>(vertex_array_kind: VertexArrayKind,
VertexArrayKind::Resolve => &vaos.resolve_vao,
}
}
#[derive(Clone, Copy, PartialEq)]
enum FramebufferKind {
Main,

View File

@ -1,425 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Screen capture infrastructure for the Gecko Profiler and Composition Recorder.
use std::collections::HashMap;
use api::{ImageFormat, TextureTarget};
use api::units::*;
use crate::device::{Device, PBO, DrawTarget, ReadTarget, Texture, TextureFilter};
use crate::internal_types::RenderTargetInfo;
use crate::renderer::Renderer;
/// A handle to a screenshot that is being asynchronously captured and scaled.
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct AsyncScreenshotHandle(usize);
/// A handle to a recorded frame that was captured.
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct RecordedFrameHandle(usize);
/// An asynchronously captured screenshot bound to a PBO which has not yet been mapped for copying.
struct AsyncScreenshot {
/// The PBO that will contain the screenshot data.
pbo: PBO,
/// The size of the screenshot.
screenshot_size: DeviceIntSize,
/// Thge image format of the screenshot.
image_format: ImageFormat,
}
/// How the `AsyncScreenshotGrabber` captures frames.
#[derive(Debug, Eq, PartialEq)]
enum AsyncScreenshotGrabberMode {
/// Capture screenshots for the Gecko profiler.
///
/// This mode will asynchronously scale the screenshots captured.
ProfilerScreenshots,
/// Capture screenshots for the CompositionRecorder.
///
/// This mode does not scale the captured screenshots.
CompositionRecorder,
}
/// Renderer infrastructure for capturing screenshots and scaling them asynchronously.
pub(in crate) struct AsyncScreenshotGrabber {
/// The textures used to scale screenshots.
scaling_textures: Vec<Texture>,
/// PBOs available to be used for screenshot readback.
available_pbos: Vec<PBO>,
/// PBOs containing screenshots that are awaiting readback.
awaiting_readback: HashMap<AsyncScreenshotHandle, AsyncScreenshot>,
/// The handle for the net PBO that will be inserted into `in_use_pbos`.
next_pbo_handle: usize,
/// The mode the grabber operates in.
mode: AsyncScreenshotGrabberMode,
}
impl Default for AsyncScreenshotGrabber {
fn default() -> Self {
return AsyncScreenshotGrabber {
scaling_textures: Vec::new(),
available_pbos: Vec::new(),
awaiting_readback: HashMap::new(),
next_pbo_handle: 1,
mode: AsyncScreenshotGrabberMode::ProfilerScreenshots,
};
}
}
impl AsyncScreenshotGrabber {
/// Create a new AsyncScreenshotGrabber for the composition recorder.
pub fn new_composition_recorder() -> Self {
let mut recorder = Self::default();
recorder.mode = AsyncScreenshotGrabberMode::CompositionRecorder;
recorder
}
/// Deinitialize the allocated textures and PBOs.
pub fn deinit(self, device: &mut Device) {
for texture in self.scaling_textures {
device.delete_texture(texture);
}
for pbo in self.available_pbos {
device.delete_pbo(pbo);
}
for (_, async_screenshot) in self.awaiting_readback {
device.delete_pbo(async_screenshot.pbo);
}
}
/// Take a screenshot and scale it asynchronously.
///
/// The returned handle can be used to access the mapped screenshot data via
/// `map_and_recycle_screenshot`.
/// The returned size is the size of the screenshot.
pub fn get_screenshot(
&mut self,
device: &mut Device,
window_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
image_format: ImageFormat,
) -> (AsyncScreenshotHandle, DeviceIntSize) {
let screenshot_size = match self.mode {
AsyncScreenshotGrabberMode::ProfilerScreenshots => {
let scale = (buffer_size.width as f32 / window_rect.size.width as f32)
.min(buffer_size.height as f32 / window_rect.size.height as f32);
(window_rect.size.to_f32() * scale).round().to_i32()
}
AsyncScreenshotGrabberMode::CompositionRecorder => {
assert_eq!(buffer_size, window_rect.size);
buffer_size
}
};
let required_size = buffer_size.area() as usize * image_format.bytes_per_pixel() as usize;
assert!(screenshot_size.width <= buffer_size.width);
assert!(screenshot_size.height <= buffer_size.height);
let pbo = match self.mode {
AsyncScreenshotGrabberMode::ProfilerScreenshots => match self.available_pbos.pop() {
Some(pbo) => {
assert_eq!(pbo.get_reserved_size(), required_size);
pbo
}
None => device.create_pbo_with_size(required_size),
},
// When operating in the `CompositionRecorder` mode, PBOs are not mapped for readback
// until the recording has completed, so `self.available_pbos` will always be empty.
AsyncScreenshotGrabberMode::CompositionRecorder => {
device.create_pbo_with_size(required_size)
}
};
let read_target = match self.mode {
AsyncScreenshotGrabberMode::ProfilerScreenshots => {
self.scale_screenshot(
device,
ReadTarget::Default,
window_rect,
buffer_size,
screenshot_size,
image_format,
0,
);
ReadTarget::from_texture(&self.scaling_textures[0], 0)
}
AsyncScreenshotGrabberMode::CompositionRecorder => ReadTarget::Default,
};
device.read_pixels_into_pbo(
read_target,
DeviceIntRect::new(DeviceIntPoint::new(0, 0), screenshot_size),
image_format,
&pbo,
);
let handle = AsyncScreenshotHandle(self.next_pbo_handle);
self.next_pbo_handle += 1;
self.awaiting_readback.insert(
handle,
AsyncScreenshot {
pbo,
screenshot_size,
image_format,
},
);
(handle, screenshot_size)
}
/// Take the screenshot in the given `ReadTarget` and scale it to `dest_size` recursively.
///
/// Each scaling operation scales only by a factor of two to preserve quality.
///
/// Textures are scaled such that `scaling_textures[n]` is half the size of
/// `scaling_textures[n+1]`.
///
/// After the scaling completes, the final screenshot will be in
/// `scaling_textures[0]`.
fn scale_screenshot(
&mut self,
device: &mut Device,
read_target: ReadTarget,
read_target_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
dest_size: DeviceIntSize,
image_format: ImageFormat,
level: usize,
) {
assert_eq!(self.mode, AsyncScreenshotGrabberMode::ProfilerScreenshots);
let texture_size = buffer_size * (1 << level);
if level == self.scaling_textures.len() {
let texture = device.create_texture(
TextureTarget::Default,
image_format,
texture_size.width,
texture_size.height,
TextureFilter::Linear,
Some(RenderTargetInfo { has_depth: false }),
1,
);
self.scaling_textures.push(texture);
} else {
let current_texture_size = self.scaling_textures[level].get_dimensions();
assert_eq!(current_texture_size.width, texture_size.width);
assert_eq!(current_texture_size.height, texture_size.height);
}
let (read_target, read_target_rect) = if read_target_rect.size.width > 2 * dest_size.width {
self.scale_screenshot(
device,
read_target,
read_target_rect,
buffer_size,
dest_size * 2,
image_format,
level + 1,
);
(
ReadTarget::from_texture(&self.scaling_textures[level + 1], 0),
DeviceIntRect::new(DeviceIntPoint::new(0, 0), dest_size * 2),
)
} else {
(read_target, read_target_rect)
};
let draw_target = DrawTarget::from_texture(&self.scaling_textures[level], 0 as _, false);
let draw_target_rect = draw_target
.to_framebuffer_rect(DeviceIntRect::new(DeviceIntPoint::new(0, 0), dest_size));
let read_target_rect = FramebufferIntRect::from_untyped(&read_target_rect.to_untyped());
if level == 0 {
device.blit_render_target_invert_y(
read_target,
read_target_rect,
draw_target,
draw_target_rect,
);
} else {
device.blit_render_target(
read_target,
read_target_rect,
draw_target,
draw_target_rect,
TextureFilter::Linear,
);
}
}
/// Map the contents of the screenshot given by the handle and copy it into
/// the given buffer.
pub fn map_and_recycle_screenshot(
&mut self,
device: &mut Device,
handle: AsyncScreenshotHandle,
dst_buffer: &mut [u8],
dst_stride: usize,
) -> bool {
let AsyncScreenshot {
pbo,
screenshot_size,
image_format,
} = match self.awaiting_readback.remove(&handle) {
Some(screenshot) => screenshot,
None => return false,
};
let success = if let Some(bound_pbo) = device.map_pbo_for_readback(&pbo) {
let src_buffer = &bound_pbo.data;
let src_stride =
screenshot_size.width as usize * image_format.bytes_per_pixel() as usize;
for (src_slice, dst_slice) in src_buffer
.chunks(src_stride)
.zip(dst_buffer.chunks_mut(dst_stride))
.take(screenshot_size.height as usize)
{
dst_slice[.. src_stride].copy_from_slice(src_slice);
}
true
} else {
false
};
match self.mode {
AsyncScreenshotGrabberMode::ProfilerScreenshots => self.available_pbos.push(pbo),
AsyncScreenshotGrabberMode::CompositionRecorder => device.delete_pbo(pbo),
}
success
}
}
// Screen-capture specific Renderer impls.
impl Renderer {
/// Record a frame for the Composition Recorder.
///
/// The returned handle can be passed to `map_recorded_frame` to copy it into
/// a buffer.
/// The returned size is the size of the frame.
pub fn record_frame(
&mut self,
image_format: ImageFormat,
) -> Option<(RecordedFrameHandle, DeviceIntSize)> {
let device_size = self.device_size()?;
self.device.begin_frame();
let (handle, _) = self
.async_frame_recorder
.get_or_insert_with(AsyncScreenshotGrabber::new_composition_recorder)
.get_screenshot(
&mut self.device,
DeviceIntRect::new(DeviceIntPoint::new(0, 0), device_size),
device_size,
image_format,
);
self.device.end_frame();
Some((RecordedFrameHandle(handle.0), device_size))
}
/// Map a frame captured for the composition recorder into the given buffer.
pub fn map_recorded_frame(
&mut self,
handle: RecordedFrameHandle,
dst_buffer: &mut [u8],
dst_stride: usize,
) -> bool {
if let Some(async_frame_recorder) = self.async_frame_recorder.as_mut() {
async_frame_recorder.map_and_recycle_screenshot(
&mut self.device,
AsyncScreenshotHandle(handle.0),
dst_buffer,
dst_stride,
)
} else {
false
}
}
/// Free the data structures used by the composition recorder.
pub fn release_composition_recorder_structures(&mut self) {
if let Some(async_frame_recorder) = self.async_frame_recorder.take() {
self.device.begin_frame();
async_frame_recorder.deinit(&mut self.device);
self.device.end_frame();
}
}
/// Take a screenshot and scale it asynchronously.
///
/// The returned handle can be used to access the mapped screenshot data via
/// `map_and_recycle_screenshot`.
///
/// The returned size is the size of the screenshot.
pub fn get_screenshot_async(
&mut self,
window_rect: DeviceIntRect,
buffer_size: DeviceIntSize,
image_format: ImageFormat,
) -> (AsyncScreenshotHandle, DeviceIntSize) {
self.device.begin_frame();
let handle = self
.async_screenshots
.get_or_insert_with(AsyncScreenshotGrabber::default)
.get_screenshot(&mut self.device, window_rect, buffer_size, image_format);
self.device.end_frame();
handle
}
/// Map the contents of the screenshot given by the handle and copy it into
/// the given buffer.
pub fn map_and_recycle_screenshot(
&mut self,
handle: AsyncScreenshotHandle,
dst_buffer: &mut [u8],
dst_stride: usize,
) -> bool {
if let Some(async_screenshots) = self.async_screenshots.as_mut() {
async_screenshots.map_and_recycle_screenshot(
&mut self.device,
handle,
dst_buffer,
dst_stride,
)
} else {
false
}
}
/// Release the screenshot grabbing structures that the profiler was using.
pub fn release_profiler_structures(&mut self) {
if let Some(async_screenshots) = self.async_screenshots.take() {
self.device.begin_frame();
async_screenshots.deinit(&mut self.device);
self.device.end_frame();
}
}
}