mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-25 05:41:12 +00:00
Backed out changeset 93f4eafb8beb (bug 1256693) for media mochitest leaks CLOSED TREE
MozReview-Commit-ID: 1K6kSNA28N9
This commit is contained in:
parent
85732789fb
commit
733e78634f
@ -115,11 +115,10 @@ BufferTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
|
||||
if (!aAllocator || aAllocator->IsSameProcess()) {
|
||||
return MemoryTextureData::Create(aSize, aFormat, aMoz2DBackend, aFlags,
|
||||
aAllocFlags, aAllocator);
|
||||
} else if (aAllocator->AsShmemAllocator()) {
|
||||
} else {
|
||||
return ShmemTextureData::Create(aSize, aFormat, aMoz2DBackend, aFlags,
|
||||
aAllocFlags, aAllocator);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BufferTextureData*
|
||||
@ -138,15 +137,14 @@ BufferTextureData::CreateInternal(ISurfaceAllocator* aAllocator,
|
||||
GfxMemoryImageReporter::DidAlloc(buffer);
|
||||
|
||||
return new MemoryTextureData(aDesc, aMoz2DBackend, buffer, aBufferSize);
|
||||
} else if (aAllocator->AsShmemAllocator()) {
|
||||
} else {
|
||||
ipc::Shmem shm;
|
||||
if (!aAllocator->AsShmemAllocator()->AllocUnsafeShmem(aBufferSize, OptimalShmemType(), &shm)) {
|
||||
if (!aAllocator->AllocUnsafeShmem(aBufferSize, OptimalShmemType(), &shm)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return new ShmemTextureData(aDesc, aMoz2DBackend, shm);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
BufferTextureData*
|
||||
@ -495,7 +493,7 @@ ShmemTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
|
||||
// Should have used CreateForYCbCr.
|
||||
MOZ_ASSERT(aFormat != gfx::SurfaceFormat::YUV);
|
||||
|
||||
if (!aAllocator || !aAllocator->AsShmemAllocator()) {
|
||||
if (!aAllocator) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -510,7 +508,7 @@ ShmemTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
|
||||
}
|
||||
|
||||
mozilla::ipc::Shmem shm;
|
||||
if (!aAllocator->AsShmemAllocator()->AllocUnsafeShmem(bufSize, OptimalShmemType(), &shm)) {
|
||||
if (!aAllocator->AllocUnsafeShmem(bufSize, OptimalShmemType(), &shm)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -543,7 +541,7 @@ ShmemTextureData::CreateSimilar(ISurfaceAllocator* aAllocator,
|
||||
void
|
||||
ShmemTextureData::Deallocate(ISurfaceAllocator* aAllocator)
|
||||
{
|
||||
aAllocator->AsShmemAllocator()->DeallocShmem(mShmem);
|
||||
aAllocator->DeallocShmem(mShmem);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -270,7 +270,7 @@ DIBTextureData*
|
||||
ShmemDIBTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
|
||||
ISurfaceAllocator* aAllocator)
|
||||
{
|
||||
MOZ_ASSERT(aAllocator->AsLayerForwarder()->GetParentPid() != base::ProcessId());
|
||||
MOZ_ASSERT(aAllocator->ParentPid() != base::ProcessId());
|
||||
|
||||
DWORD mapSize = aSize.width * aSize.height * BytesPerPixel(aFormat);
|
||||
HANDLE fileMapping = ::CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, 0, mapSize, NULL);
|
||||
@ -332,7 +332,7 @@ ShmemDIBTextureData::Create(gfx::IntSize aSize, gfx::SurfaceFormat aFormat,
|
||||
|
||||
HANDLE hostHandle = NULL;
|
||||
|
||||
if (!ipc::DuplicateHandle(fileMapping, aAllocator->AsLayerForwarder()->GetParentPid(),
|
||||
if (!ipc::DuplicateHandle(fileMapping, aAllocator->ParentPid(),
|
||||
&hostHandle, 0, DUPLICATE_SAME_ACCESS)) {
|
||||
gfxCriticalError() << "Failed to duplicate handle to parent process for surface.";
|
||||
::DeleteObject(bitmap);
|
||||
|
@ -538,7 +538,7 @@ ClientLayerManager::MakeSnapshotIfRequired()
|
||||
DrawOptions(1.0f, CompositionOp::OP_OVER));
|
||||
dt->SetTransform(oldMatrix);
|
||||
}
|
||||
mForwarder->DestroySurfaceDescriptor(&inSnapshot);
|
||||
mForwarder->DestroySharedSurface(&inSnapshot);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -62,12 +62,12 @@ RemoveTextureFromCompositableTracker::ReleaseTextureClient()
|
||||
{
|
||||
if (mTextureClient &&
|
||||
mTextureClient->GetAllocator() &&
|
||||
!mTextureClient->GetAllocator()->UsesImageBridge())
|
||||
!mTextureClient->GetAllocator()->IsImageBridgeChild())
|
||||
{
|
||||
TextureClientReleaseTask* task = new TextureClientReleaseTask(mTextureClient);
|
||||
RefPtr<ISurfaceAllocator> allocator = mTextureClient->GetAllocator();
|
||||
mTextureClient = nullptr;
|
||||
allocator->AsClientAllocator()->GetMessageLoop()->PostTask(FROM_HERE, task);
|
||||
allocator->GetMessageLoop()->PostTask(FROM_HERE, task);
|
||||
} else {
|
||||
mTextureClient = nullptr;
|
||||
}
|
||||
|
@ -78,7 +78,8 @@ void
|
||||
ImageClient::RemoveTextureWithWaiter(TextureClient* aTexture,
|
||||
AsyncTransactionWaiter* aAsyncTransactionWaiter)
|
||||
{
|
||||
if ((aAsyncTransactionWaiter || GetForwarder()->UsesImageBridge())
|
||||
if ((aAsyncTransactionWaiter ||
|
||||
GetForwarder()->IsImageBridgeChild())
|
||||
#ifndef MOZ_WIDGET_GONK
|
||||
// If the texture client is taking part in recycling then we should make sure
|
||||
// the host has finished with it before dropping the ref and triggering
|
||||
|
@ -249,7 +249,7 @@ DeallocateTextureClient(TextureDeallocParams params)
|
||||
MessageLoop* ipdlMsgLoop = nullptr;
|
||||
|
||||
if (params.allocator) {
|
||||
ipdlMsgLoop = params.allocator->AsClientAllocator()->GetMessageLoop();
|
||||
ipdlMsgLoop = params.allocator->GetMessageLoop();
|
||||
if (!ipdlMsgLoop) {
|
||||
// An allocator with no message loop means we are too late in the shutdown
|
||||
// sequence.
|
||||
@ -697,7 +697,7 @@ TextureClient::SetRecycleAllocator(ITextureClientRecycleAllocator* aAllocator)
|
||||
bool
|
||||
TextureClient::InitIPDLActor(CompositableForwarder* aForwarder)
|
||||
{
|
||||
MOZ_ASSERT(aForwarder && aForwarder->GetMessageLoop() == mAllocator->AsClientAllocator()->GetMessageLoop());
|
||||
MOZ_ASSERT(aForwarder && aForwarder->GetMessageLoop() == mAllocator->GetMessageLoop());
|
||||
if (mActor && !mActor->mDestroyed && mActor->GetForwarder() == aForwarder) {
|
||||
return true;
|
||||
}
|
||||
|
@ -395,8 +395,7 @@ gfxShmSharedReadLock::gfxShmSharedReadLock(ISurfaceAllocator* aAllocator)
|
||||
MOZ_ASSERT(mAllocator);
|
||||
if (mAllocator) {
|
||||
#define MOZ_ALIGN_WORD(x) (((x) + 3) & ~3)
|
||||
if (mAllocator->AsLayerForwarder()->GetTileLockAllocator()->AllocShmemSection(
|
||||
MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) {
|
||||
if (mAllocator->AllocShmemSection(MOZ_ALIGN_WORD(sizeof(ShmReadLockInfo)), &mShmemSection)) {
|
||||
ShmReadLockInfo* info = GetShmReadLockInfoPtr();
|
||||
info->readCount = 1;
|
||||
mAllocSuccess = true;
|
||||
@ -428,13 +427,7 @@ gfxShmSharedReadLock::ReadUnlock() {
|
||||
int32_t readCount = PR_ATOMIC_DECREMENT(&info->readCount);
|
||||
MOZ_ASSERT(readCount >= 0);
|
||||
if (readCount <= 0) {
|
||||
auto fwd = mAllocator->AsLayerForwarder();
|
||||
if (fwd) {
|
||||
fwd->GetTileLockAllocator()->DeallocShmemSection(mShmemSection);
|
||||
} else {
|
||||
// we are on the compositor process
|
||||
FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mShmemSection);
|
||||
}
|
||||
mAllocator->FreeShmemSection(mShmemSection);
|
||||
}
|
||||
return readCount;
|
||||
}
|
||||
|
@ -791,7 +791,7 @@ ShmemTextureHost::DeallocateSharedData()
|
||||
if (mShmem) {
|
||||
MOZ_ASSERT(mDeallocator,
|
||||
"Shared memory would leak without a ISurfaceAllocator");
|
||||
mDeallocator->AsShmemAllocator()->DeallocShmem(*mShmem);
|
||||
mDeallocator->DeallocShmem(*mShmem);
|
||||
mShmem = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ class PTextureChild;
|
||||
* additionally forward modifications of the Layer tree).
|
||||
* ImageBridgeChild is another CompositableForwarder.
|
||||
*/
|
||||
class CompositableForwarder : public ClientIPCAllocator
|
||||
class CompositableForwarder : public ISurfaceAllocator
|
||||
{
|
||||
public:
|
||||
|
||||
@ -141,6 +141,8 @@ public:
|
||||
return mTextureFactoryIdentifier.mMaxTextureSize;
|
||||
}
|
||||
|
||||
bool IsOnCompositorSide() const override { return false; }
|
||||
|
||||
/**
|
||||
* Returns the type of backend that is used off the main thread.
|
||||
* We only don't allow changing the backend type at runtime so this value can
|
||||
|
@ -48,6 +48,8 @@ protected:
|
||||
bool ReceiveCompositableUpdate(const CompositableOperation& aEdit,
|
||||
EditReplyVector& replyv);
|
||||
|
||||
bool IsOnCompositorSide() const override { return true; }
|
||||
|
||||
/**
|
||||
* Return true if this protocol is asynchronous with respect to the content
|
||||
* thread (ImageBridge for instance).
|
||||
|
@ -6,6 +6,27 @@
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "ISurfaceAllocator.h"
|
||||
#include <sys/types.h> // for int32_t
|
||||
#include "gfx2DGlue.h" // for IntSize
|
||||
#include "gfxPlatform.h" // for gfxPlatform, gfxImageFormat
|
||||
#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface
|
||||
#include "mozilla/Assertions.h" // for MOZ_ASSERT, etc
|
||||
#include "mozilla/Atomics.h" // for PrimitiveIntrinsics
|
||||
#include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc
|
||||
#include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc
|
||||
#include "mozilla/layers/SharedBufferManagerChild.h"
|
||||
#include "ShadowLayerUtils.h"
|
||||
#include "mozilla/mozalloc.h" // for operator delete[], etc
|
||||
#include "nsAutoPtr.h" // for nsRefPtr, getter_AddRefs, etc
|
||||
#include "nsDebug.h" // for NS_RUNTIMEABORT
|
||||
#include "nsXULAppAPI.h" // for XRE_GetProcessType, etc
|
||||
#include "mozilla/ipc/Shmem.h"
|
||||
#include "mozilla/layers/ImageDataSerializer.h"
|
||||
#ifdef DEBUG
|
||||
#include "prenv.h"
|
||||
#endif
|
||||
|
||||
using namespace mozilla::ipc;
|
||||
|
||||
namespace mozilla {
|
||||
namespace layers {
|
||||
@ -16,7 +37,355 @@ mozilla::Atomic<ptrdiff_t> GfxMemoryImageReporter::sAmount(0);
|
||||
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType()
|
||||
{
|
||||
return ipc::SharedMemory::SharedMemoryType::TYPE_BASIC;
|
||||
return mozilla::ipc::SharedMemory::TYPE_BASIC;
|
||||
}
|
||||
|
||||
bool
|
||||
IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface)
|
||||
{
|
||||
return aSurface.type() != SurfaceDescriptor::T__None &&
|
||||
aSurface.type() != SurfaceDescriptor::Tnull_t;
|
||||
}
|
||||
|
||||
ISurfaceAllocator::~ISurfaceAllocator()
|
||||
{
|
||||
// Check if we're not leaking..
|
||||
MOZ_ASSERT(mUsedShmems.empty());
|
||||
}
|
||||
|
||||
void
|
||||
ISurfaceAllocator::Finalize()
|
||||
{
|
||||
ShrinkShmemSectionHeap();
|
||||
}
|
||||
|
||||
static inline uint8_t*
|
||||
GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor)
|
||||
{
|
||||
MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor));
|
||||
MOZ_RELEASE_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorBuffer);
|
||||
|
||||
auto memOrShmem = aDescriptor.get_SurfaceDescriptorBuffer().data();
|
||||
if (memOrShmem.type() == MemoryOrShmem::TShmem) {
|
||||
return memOrShmem.get_Shmem().get<uint8_t>();
|
||||
} else {
|
||||
return reinterpret_cast<uint8_t*>(memOrShmem.get_uintptr_t());
|
||||
}
|
||||
}
|
||||
|
||||
already_AddRefed<gfx::DrawTarget>
|
||||
GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend)
|
||||
{
|
||||
uint8_t* data = GetAddressFromDescriptor(aDescriptor);
|
||||
auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor();
|
||||
uint32_t stride = ImageDataSerializer::GetRGBStride(rgb);
|
||||
return gfx::Factory::CreateDrawTargetForData(gfx::BackendType::CAIRO,
|
||||
data, rgb.size(),
|
||||
stride, rgb.format());
|
||||
}
|
||||
|
||||
already_AddRefed<gfx::DataSourceSurface>
|
||||
GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor)
|
||||
{
|
||||
uint8_t* data = GetAddressFromDescriptor(aDescriptor);
|
||||
auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor();
|
||||
uint32_t stride = ImageDataSerializer::GetRGBStride(rgb);
|
||||
return gfx::Factory::CreateWrappingDataSourceSurface(data, stride, rgb.size(),
|
||||
rgb.format());
|
||||
}
|
||||
|
||||
bool
|
||||
ISurfaceAllocator::AllocSurfaceDescriptor(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
SurfaceDescriptor* aBuffer)
|
||||
{
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer);
|
||||
}
|
||||
|
||||
bool
|
||||
ISurfaceAllocator::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
uint32_t aCaps,
|
||||
SurfaceDescriptor* aBuffer)
|
||||
{
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
gfx::SurfaceFormat format =
|
||||
gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent);
|
||||
size_t size = ImageDataSerializer::ComputeRGBBufferSize(aSize, format);
|
||||
if (!size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemoryOrShmem bufferDesc;
|
||||
if (IsSameProcess()) {
|
||||
uint8_t* data = new (std::nothrow) uint8_t[size];
|
||||
if (!data) {
|
||||
return false;
|
||||
}
|
||||
GfxMemoryImageReporter::DidAlloc(data);
|
||||
#ifdef XP_MACOSX
|
||||
// Workaround a bug in Quartz where drawing an a8 surface to another a8
|
||||
// surface with OP_SOURCE still requires the destination to be clear.
|
||||
if (format == gfx::SurfaceFormat::A8) {
|
||||
memset(data, 0, size);
|
||||
}
|
||||
#endif
|
||||
bufferDesc = reinterpret_cast<uintptr_t>(data);
|
||||
} else {
|
||||
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType shmemType = OptimalShmemType();
|
||||
mozilla::ipc::Shmem shmem;
|
||||
if (!AllocUnsafeShmem(size, shmemType, &shmem)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bufferDesc = shmem;
|
||||
}
|
||||
|
||||
// Use an intermediate buffer by default. Skipping the intermediate buffer is
|
||||
// only possible in certain configurations so let's keep it simple here for now.
|
||||
const bool hasIntermediateBuffer = true;
|
||||
*aBuffer = SurfaceDescriptorBuffer(RGBDescriptor(aSize, format, hasIntermediateBuffer),
|
||||
bufferDesc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
ISurfaceAllocator::IsShmem(SurfaceDescriptor* aSurface)
|
||||
{
|
||||
return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorBuffer)
|
||||
&& (aSurface->get_SurfaceDescriptorBuffer().data().type() == MemoryOrShmem::TShmem);
|
||||
}
|
||||
|
||||
void
|
||||
ISurfaceAllocator::DestroySharedSurface(SurfaceDescriptor* aSurface)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aSurface);
|
||||
if (!aSurface) {
|
||||
return;
|
||||
}
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
SurfaceDescriptorBuffer& desc = aSurface->get_SurfaceDescriptorBuffer();
|
||||
switch (desc.data().type()) {
|
||||
case MemoryOrShmem::TShmem: {
|
||||
DeallocShmem(desc.data().get_Shmem());
|
||||
break;
|
||||
}
|
||||
case MemoryOrShmem::Tuintptr_t: {
|
||||
uint8_t* ptr = (uint8_t*)desc.data().get_uintptr_t();
|
||||
GfxMemoryImageReporter::WillFree(ptr);
|
||||
delete [] ptr;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
NS_RUNTIMEABORT("surface type not implemented!");
|
||||
}
|
||||
*aSurface = SurfaceDescriptor();
|
||||
}
|
||||
|
||||
// XXX - We should actually figure out the minimum shmem allocation size on
|
||||
// a certain platform and use that.
|
||||
const uint32_t sShmemPageSize = 4096;
|
||||
|
||||
#ifdef DEBUG
|
||||
const uint32_t sSupportedBlockSize = 4;
|
||||
#endif
|
||||
|
||||
enum AllocationStatus
|
||||
{
|
||||
STATUS_ALLOCATED,
|
||||
STATUS_FREED
|
||||
};
|
||||
|
||||
struct ShmemSectionHeapHeader
|
||||
{
|
||||
Atomic<uint32_t> mTotalBlocks;
|
||||
Atomic<uint32_t> mAllocatedBlocks;
|
||||
};
|
||||
|
||||
struct ShmemSectionHeapAllocation
|
||||
{
|
||||
Atomic<uint32_t> mStatus;
|
||||
uint32_t mSize;
|
||||
};
|
||||
|
||||
bool
|
||||
ISurfaceAllocator::AllocShmemSection(size_t aSize, mozilla::layers::ShmemSection* aShmemSection)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
// For now we only support sizes of 4. If we want to support different sizes
|
||||
// some more complicated bookkeeping should be added.
|
||||
MOZ_ASSERT(aSize == sSupportedBlockSize);
|
||||
MOZ_ASSERT(aShmemSection);
|
||||
|
||||
uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
|
||||
|
||||
for (size_t i = 0; i < mUsedShmems.size(); i++) {
|
||||
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
|
||||
if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) {
|
||||
aShmemSection->shmem() = mUsedShmems[i];
|
||||
MOZ_ASSERT(mUsedShmems[i].IsWritable());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!aShmemSection->shmem().IsWritable()) {
|
||||
ipc::Shmem tmp;
|
||||
if (!AllocUnsafeShmem(sShmemPageSize, ipc::SharedMemory::TYPE_BASIC, &tmp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>();
|
||||
header->mTotalBlocks = 0;
|
||||
header->mAllocatedBlocks = 0;
|
||||
|
||||
mUsedShmems.push_back(tmp);
|
||||
aShmemSection->shmem() = tmp;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aShmemSection->shmem().IsWritable());
|
||||
|
||||
ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>();
|
||||
uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader);
|
||||
|
||||
ShmemSectionHeapAllocation* allocHeader = nullptr;
|
||||
|
||||
if (header->mTotalBlocks > header->mAllocatedBlocks) {
|
||||
// Search for the first available block.
|
||||
for (size_t i = 0; i < header->mTotalBlocks; i++) {
|
||||
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
|
||||
|
||||
if (allocHeader->mStatus == STATUS_FREED) {
|
||||
break;
|
||||
}
|
||||
heap += allocationSize;
|
||||
}
|
||||
MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED);
|
||||
MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize);
|
||||
} else {
|
||||
heap += header->mTotalBlocks * allocationSize;
|
||||
|
||||
header->mTotalBlocks++;
|
||||
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
|
||||
allocHeader->mSize = aSize;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(allocHeader);
|
||||
header->mAllocatedBlocks++;
|
||||
allocHeader->mStatus = STATUS_ALLOCATED;
|
||||
|
||||
aShmemSection->size() = aSize;
|
||||
aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>();
|
||||
ShrinkShmemSectionHeap();
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ISurfaceAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize);
|
||||
MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize);
|
||||
|
||||
ShmemSectionHeapAllocation* allocHeader =
|
||||
reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() +
|
||||
aShmemSection.offset() -
|
||||
sizeof(ShmemSectionHeapAllocation));
|
||||
|
||||
MOZ_ASSERT(allocHeader->mSize == aShmemSection.size());
|
||||
|
||||
DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED);
|
||||
// If this fails something really weird is going on.
|
||||
MOZ_ASSERT(success);
|
||||
|
||||
ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>();
|
||||
header->mAllocatedBlocks--;
|
||||
|
||||
ShrinkShmemSectionHeap();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ISurfaceAllocator::ShrinkShmemSectionHeap()
|
||||
{
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// The loop will terminate as we either increase i, or decrease size
|
||||
// every time through.
|
||||
size_t i = 0;
|
||||
while (i < mUsedShmems.size()) {
|
||||
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
|
||||
if (header->mAllocatedBlocks == 0) {
|
||||
DeallocShmem(mUsedShmems[i]);
|
||||
|
||||
// We don't particularly care about order, move the last one in the array
|
||||
// to this position.
|
||||
if (i < mUsedShmems.size() - 1) {
|
||||
mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1];
|
||||
}
|
||||
mUsedShmems.pop_back();
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
ISurfaceAllocator::AllocGrallocBuffer(const gfx::IntSize& aSize,
|
||||
uint32_t aFormat,
|
||||
uint32_t aUsage,
|
||||
MaybeMagicGrallocBufferHandle* aHandle)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return SharedBufferManagerChild::GetSingleton()->AllocGrallocBuffer(aSize, aFormat, aUsage, aHandle);
|
||||
}
|
||||
|
||||
void
|
||||
ISurfaceAllocator::DeallocGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
SharedBufferManagerChild::GetSingleton()->DeallocGrallocBuffer(*aHandle);
|
||||
}
|
||||
|
||||
void
|
||||
ISurfaceAllocator::DropGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
SharedBufferManagerChild::GetSingleton()->DropGrallocBuffer(*aHandle);
|
||||
}
|
||||
|
||||
} // namespace layers
|
||||
|
@ -16,12 +16,11 @@
|
||||
#include "mozilla/Atomics.h" // for Atomic
|
||||
#include "mozilla/layers/LayersMessages.h" // for ShmemSection
|
||||
#include "LayersTypes.h"
|
||||
#include "gfxPrefs.h"
|
||||
#include <vector>
|
||||
#include "mozilla/layers/AtomicRefCountedWithFinalize.h"
|
||||
|
||||
/*
|
||||
* FIXME [bjacob] *** PURE CRAZYNESS WARNING ***
|
||||
* (I think that this doesn't apply anymore.)
|
||||
*
|
||||
* This #define is actually needed here, because subclasses of ISurfaceAllocator,
|
||||
* namely ShadowLayerForwarder, will or will not override AllocGrallocBuffer
|
||||
@ -41,13 +40,8 @@ class DataSourceSurface;
|
||||
|
||||
namespace layers {
|
||||
|
||||
class MaybeMagicGrallocBufferHandle;
|
||||
class CompositableForwarder;
|
||||
class ShadowLayerForwarder;
|
||||
|
||||
class ShmemAllocator;
|
||||
class ShmemSectionAllocator;
|
||||
class LegacySurfaceDescriptorAllocator;
|
||||
class ClientIPCAllocator;
|
||||
|
||||
enum BufferCapabilities {
|
||||
DEFAULT_BUFFER_CAPS = 0,
|
||||
@ -65,7 +59,12 @@ class SurfaceDescriptor;
|
||||
|
||||
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType OptimalShmemType();
|
||||
bool IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface);
|
||||
bool IsSurfaceDescriptorOwned(const SurfaceDescriptor& aDescriptor);
|
||||
bool ReleaseOwnedSurfaceDescriptor(const SurfaceDescriptor& aDescriptor);
|
||||
|
||||
already_AddRefed<gfx::DrawTarget> GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend);
|
||||
already_AddRefed<gfx::DataSourceSurface> GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor);
|
||||
/**
|
||||
* An interface used to create and destroy surfaces that are shared with the
|
||||
* Compositor process (using shmem, or gralloc, or other platform specific memory)
|
||||
@ -79,106 +78,106 @@ class ISurfaceAllocator : public AtomicRefCountedWithFinalize<ISurfaceAllocator>
|
||||
{
|
||||
public:
|
||||
MOZ_DECLARE_REFCOUNTED_TYPENAME(ISurfaceAllocator)
|
||||
ISurfaceAllocator()
|
||||
: mDefaultMessageLoop(MessageLoop::current())
|
||||
{}
|
||||
|
||||
// down-casting
|
||||
void Finalize();
|
||||
|
||||
virtual ShmemAllocator* AsShmemAllocator() { return nullptr; }
|
||||
|
||||
virtual ShmemSectionAllocator* AsShmemSectionAllocator() { return nullptr; }
|
||||
|
||||
virtual CompositableForwarder* AsCompositableForwarder() { return nullptr; }
|
||||
|
||||
virtual ShadowLayerForwarder* AsLayerForwarder() { return nullptr; }
|
||||
|
||||
virtual ClientIPCAllocator* AsClientAllocator() { return nullptr; }
|
||||
|
||||
virtual LegacySurfaceDescriptorAllocator*
|
||||
AsLegacySurfaceDescriptorAllocator() { return nullptr; }
|
||||
|
||||
// ipc info
|
||||
|
||||
virtual bool IPCOpen() const { return true; }
|
||||
|
||||
virtual bool IsSameProcess() const = 0;
|
||||
|
||||
virtual bool UsesImageBridge() const { return false; }
|
||||
|
||||
protected:
|
||||
void Finalize() {}
|
||||
|
||||
virtual ~ISurfaceAllocator() {}
|
||||
|
||||
friend class AtomicRefCountedWithFinalize<ISurfaceAllocator>;
|
||||
};
|
||||
|
||||
/// Methods that are specific to the client/child side.
|
||||
class ClientIPCAllocator : public ISurfaceAllocator
|
||||
{
|
||||
public:
|
||||
virtual ClientIPCAllocator* AsClientAllocator() override { return this; }
|
||||
|
||||
virtual MessageLoop * GetMessageLoop() const = 0;
|
||||
|
||||
virtual int32_t GetMaxTextureSize() const { return gfxPrefs::MaxTextureSize(); }
|
||||
};
|
||||
|
||||
/// An allocator can provide shared memory.
|
||||
///
|
||||
/// The allocated shmems can be deallocated on either process, as long as they
|
||||
/// belong to the same channel.
|
||||
class ShmemAllocator
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Allocate shared memory that can be accessed by only one process at a time.
|
||||
* Ownership of this memory is passed when the memory is sent in an IPDL
|
||||
* message.
|
||||
*/
|
||||
virtual bool AllocShmem(size_t aSize,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aType,
|
||||
mozilla::ipc::Shmem* aShmem) = 0;
|
||||
|
||||
/**
|
||||
* Allocate shared memory that can be accessed by both processes at the
|
||||
* same time. Safety is left for the user of the memory to care about.
|
||||
*/
|
||||
virtual bool AllocUnsafeShmem(size_t aSize,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aType,
|
||||
mozilla::ipc::Shmem* aShmem) = 0;
|
||||
|
||||
/**
|
||||
* Allocate memory in shared memory that can always be accessed by both
|
||||
* processes at a time. Safety is left for the user of the memory to care
|
||||
* about.
|
||||
*/
|
||||
bool AllocShmemSection(size_t aSize,
|
||||
mozilla::layers::ShmemSection* aShmemSection);
|
||||
|
||||
/**
|
||||
* Deallocates a shmem section.
|
||||
*/
|
||||
void FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection);
|
||||
|
||||
/**
|
||||
* Deallocate memory allocated by either AllocShmem or AllocUnsafeShmem.
|
||||
*/
|
||||
virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) = 0;
|
||||
};
|
||||
|
||||
/// An allocator that can group allocations in bigger chunks of shared memory.
|
||||
///
|
||||
/// The allocated shmem sections can only be deallocated by the same allocator
|
||||
/// instance (and only in the child process).
|
||||
class ShmemSectionAllocator
|
||||
{
|
||||
public:
|
||||
virtual bool AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) = 0;
|
||||
|
||||
virtual void DeallocShmemSection(ShmemSection& aShmemSection) = 0;
|
||||
|
||||
virtual void MemoryPressure() {}
|
||||
};
|
||||
|
||||
/// Some old stuff that's still around and used for screenshots.
|
||||
///
|
||||
/// New code should not need this (see TextureClient).
|
||||
class LegacySurfaceDescriptorAllocator
|
||||
{
|
||||
public:
|
||||
// was AllocBuffer
|
||||
virtual bool AllocSurfaceDescriptor(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
SurfaceDescriptor* aBuffer) = 0;
|
||||
SurfaceDescriptor* aBuffer);
|
||||
|
||||
// was AllocBufferWithCaps
|
||||
virtual bool AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
uint32_t aCaps,
|
||||
SurfaceDescriptor* aBuffer) = 0;
|
||||
SurfaceDescriptor* aBuffer);
|
||||
|
||||
virtual void DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) = 0;
|
||||
/**
|
||||
* Returns the maximum texture size supported by the compositor.
|
||||
*/
|
||||
virtual int32_t GetMaxTextureSize() const { return INT32_MAX; }
|
||||
|
||||
virtual void DestroySharedSurface(SurfaceDescriptor* aSurface);
|
||||
|
||||
// method that does the actual allocation work
|
||||
bool AllocGrallocBuffer(const gfx::IntSize& aSize,
|
||||
uint32_t aFormat,
|
||||
uint32_t aUsage,
|
||||
MaybeMagicGrallocBufferHandle* aHandle);
|
||||
|
||||
void DeallocGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle);
|
||||
|
||||
void DropGrallocBuffer(MaybeMagicGrallocBufferHandle* aHandle);
|
||||
|
||||
virtual bool IPCOpen() const { return true; }
|
||||
virtual bool IsSameProcess() const = 0;
|
||||
virtual base::ProcessId ParentPid() const { return base::ProcessId(); }
|
||||
|
||||
virtual bool IsImageBridgeChild() const { return false; }
|
||||
|
||||
virtual MessageLoop * GetMessageLoop() const
|
||||
{
|
||||
return mDefaultMessageLoop;
|
||||
}
|
||||
|
||||
// Returns true if aSurface wraps a Shmem.
|
||||
static bool IsShmem(SurfaceDescriptor* aSurface);
|
||||
|
||||
virtual CompositableForwarder* AsCompositableForwarder() { return nullptr; }
|
||||
protected:
|
||||
|
||||
virtual bool IsOnCompositorSide() const = 0;
|
||||
|
||||
virtual ~ISurfaceAllocator();
|
||||
|
||||
void ShrinkShmemSectionHeap();
|
||||
|
||||
// This is used to implement an extremely simple & naive heap allocator.
|
||||
std::vector<mozilla::ipc::Shmem> mUsedShmems;
|
||||
|
||||
MessageLoop* mDefaultMessageLoop;
|
||||
|
||||
friend class AtomicRefCountedWithFinalize<ISurfaceAllocator>;
|
||||
};
|
||||
|
||||
already_AddRefed<gfx::DrawTarget>
|
||||
GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend);
|
||||
|
||||
already_AddRefed<gfx::DataSourceSurface>
|
||||
GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor);
|
||||
|
||||
uint8_t*
|
||||
GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor);
|
||||
|
||||
class GfxMemoryImageReporter final : public nsIMemoryReporter
|
||||
{
|
||||
~GfxMemoryImageReporter() {}
|
||||
|
@ -941,10 +941,9 @@ ImageBridgeChild::AllocShmem(size_t aSize,
|
||||
{
|
||||
MOZ_ASSERT(!mShuttingDown);
|
||||
if (InImageBridgeChildThread()) {
|
||||
return PImageBridgeChild::AllocUnsafeShmem(aSize, aType,
|
||||
aShmem);
|
||||
return PImageBridgeChild::AllocShmem(aSize, aType, aShmem);
|
||||
} else {
|
||||
return DispatchAllocShmemInternal(aSize, aType, aShmem, true); // true: unsafe
|
||||
return DispatchAllocShmemInternal(aSize, aType, aShmem, false); // false: unsafe
|
||||
}
|
||||
}
|
||||
|
||||
@ -967,15 +966,14 @@ static void ProxyAllocShmemNow(AllocShmemParams* aParams,
|
||||
MOZ_ASSERT(aDone);
|
||||
MOZ_ASSERT(aBarrier);
|
||||
|
||||
auto shmAllocator = aParams->mAllocator->AsShmemAllocator();
|
||||
if (aParams->mUnsafe) {
|
||||
aParams->mSuccess = shmAllocator->AllocUnsafeShmem(aParams->mSize,
|
||||
aParams->mType,
|
||||
aParams->mShmem);
|
||||
aParams->mSuccess = aParams->mAllocator->AllocUnsafeShmem(aParams->mSize,
|
||||
aParams->mType,
|
||||
aParams->mShmem);
|
||||
} else {
|
||||
aParams->mSuccess = shmAllocator->AllocShmem(aParams->mSize,
|
||||
aParams->mType,
|
||||
aParams->mShmem);
|
||||
aParams->mSuccess = aParams->mAllocator->AllocShmem(aParams->mSize,
|
||||
aParams->mType,
|
||||
aParams->mShmem);
|
||||
}
|
||||
|
||||
ReentrantMonitorAutoEnter autoMon(*aBarrier);
|
||||
@ -1017,7 +1015,7 @@ static void ProxyDeallocShmemNow(ISurfaceAllocator* aAllocator,
|
||||
MOZ_ASSERT(aDone);
|
||||
MOZ_ASSERT(aBarrier);
|
||||
|
||||
aAllocator->AsShmemAllocator()->DeallocShmem(*aShmem);
|
||||
aAllocator->DeallocShmem(*aShmem);
|
||||
|
||||
ReentrantMonitorAutoEnter autoMon(*aBarrier);
|
||||
*aDone = true;
|
||||
|
@ -105,14 +105,11 @@ bool InImageBridgeChildThread();
|
||||
class ImageBridgeChild final : public PImageBridgeChild
|
||||
, public CompositableForwarder
|
||||
, public AsyncTransactionTrackersHolder
|
||||
, public ShmemAllocator
|
||||
{
|
||||
friend class ImageContainer;
|
||||
typedef InfallibleTArray<AsyncParentMessageData> AsyncParentMessageArray;
|
||||
public:
|
||||
|
||||
virtual ShmemAllocator* AsShmemAllocator() override { return this; }
|
||||
|
||||
/**
|
||||
* Creates the image bridge with a dedicated thread for ImageBridgeChild.
|
||||
*
|
||||
@ -249,7 +246,7 @@ public:
|
||||
virtual void Connect(CompositableClient* aCompositable,
|
||||
ImageContainer* aImageContainer) override;
|
||||
|
||||
virtual bool UsesImageBridge() const override { return true; }
|
||||
virtual bool IsImageBridgeChild() const override { return true; }
|
||||
|
||||
/**
|
||||
* See CompositableForwarder::UseTextures
|
||||
@ -296,12 +293,17 @@ public:
|
||||
* call on the ImageBridgeChild thread.
|
||||
*/
|
||||
virtual bool AllocUnsafeShmem(size_t aSize,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aType,
|
||||
mozilla::ipc::Shmem* aShmem) override;
|
||||
/**
|
||||
* See ISurfaceAllocator.h
|
||||
* Can be used from any thread.
|
||||
* If used outside the ImageBridgeChild thread, it will proxy a synchronous
|
||||
* call on the ImageBridgeChild thread.
|
||||
*/
|
||||
virtual bool AllocShmem(size_t aSize,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aType,
|
||||
mozilla::ipc::Shmem* aShmem) override;
|
||||
|
||||
/**
|
||||
* See ISurfaceAllocator.h
|
||||
* Can be used from any thread.
|
||||
|
@ -350,6 +350,10 @@ ImageBridgeParent::NotifyImageComposites(nsTArray<ImageCompositeNotification>& a
|
||||
return ok;
|
||||
}
|
||||
|
||||
MessageLoop * ImageBridgeParent::GetMessageLoop() const {
|
||||
return mMessageLoop;
|
||||
}
|
||||
|
||||
void
|
||||
ImageBridgeParent::DeferredDestroy()
|
||||
{
|
||||
@ -395,8 +399,8 @@ ImageBridgeParent::OnChannelConnected(int32_t aPid)
|
||||
|
||||
bool
|
||||
ImageBridgeParent::AllocShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
ipc::Shmem* aShmem)
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
ipc::Shmem* aShmem)
|
||||
{
|
||||
if (mStopped) {
|
||||
return false;
|
||||
|
@ -39,8 +39,7 @@ namespace layers {
|
||||
* interesting stuff is in ImageContainerParent.
|
||||
*/
|
||||
class ImageBridgeParent final : public PImageBridgeParent,
|
||||
public CompositableParentManager,
|
||||
public ShmemAllocator
|
||||
public CompositableParentManager
|
||||
{
|
||||
public:
|
||||
typedef InfallibleTArray<CompositableOperation> EditArray;
|
||||
@ -51,8 +50,6 @@ public:
|
||||
ImageBridgeParent(MessageLoop* aLoop, Transport* aTransport, ProcessId aChildProcessId);
|
||||
~ImageBridgeParent();
|
||||
|
||||
virtual ShmemAllocator* AsShmemAllocator() override { return this; }
|
||||
|
||||
virtual void ActorDestroy(ActorDestroyReason aWhy) override;
|
||||
|
||||
static PImageBridgeParent*
|
||||
@ -99,9 +96,10 @@ public:
|
||||
// Shutdown step 2
|
||||
virtual bool RecvStop() override;
|
||||
|
||||
virtual MessageLoop* GetMessageLoop() const { return mMessageLoop; };
|
||||
virtual MessageLoop* GetMessageLoop() const override;
|
||||
|
||||
// ShmemAllocator
|
||||
|
||||
// ISurfaceAllocator
|
||||
|
||||
virtual bool AllocShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
@ -142,8 +140,6 @@ public:
|
||||
base::ProcessHandle aPeerProcess,
|
||||
mozilla::ipc::ProtocolCloneContext* aCtx) override;
|
||||
|
||||
virtual bool UsesImageBridge() const override { return true; }
|
||||
|
||||
protected:
|
||||
void OnChannelConnected(int32_t pid) override;
|
||||
|
||||
|
@ -36,8 +36,7 @@ class CompositableParent;
|
||||
class ShadowLayersManager;
|
||||
|
||||
class LayerTransactionParent final : public PLayerTransactionParent,
|
||||
public CompositableParentManager,
|
||||
public ShmemAllocator
|
||||
public CompositableParentManager
|
||||
{
|
||||
typedef mozilla::layout::RenderFrameParent RenderFrameParent;
|
||||
typedef InfallibleTArray<Edit> EditArray;
|
||||
@ -62,8 +61,7 @@ public:
|
||||
uint64_t GetId() const { return mId; }
|
||||
Layer* GetRoot() const { return mRoot; }
|
||||
|
||||
virtual ShmemAllocator* AsShmemAllocator() override { return this; }
|
||||
|
||||
// ISurfaceAllocator
|
||||
virtual bool AllocShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
ipc::Shmem* aShmem) override;
|
||||
|
@ -15,18 +15,16 @@
|
||||
#include "ShadowLayerChild.h" // for ShadowLayerChild
|
||||
#include "gfx2DGlue.h" // for Moz2D transition helpers
|
||||
#include "gfxPlatform.h" // for gfxImageFormat, gfxPlatform
|
||||
//#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface
|
||||
#include "gfxSharedImageSurface.h" // for gfxSharedImageSurface
|
||||
#include "ipc/IPCMessageUtils.h" // for gfxContentType, null_t
|
||||
#include "IPDLActor.h"
|
||||
#include "mozilla/Assertions.h" // for MOZ_ASSERT, etc
|
||||
#include "mozilla/gfx/Point.h" // for IntSize
|
||||
#include "mozilla/layers/CompositableClient.h" // for CompositableClient, etc
|
||||
#include "mozilla/layers/ImageDataSerializer.h"
|
||||
#include "mozilla/layers/LayersMessages.h" // for Edit, etc
|
||||
#include "mozilla/layers/LayersSurfaces.h" // for SurfaceDescriptor, etc
|
||||
#include "mozilla/layers/LayersTypes.h" // for MOZ_LAYERS_LOG
|
||||
#include "mozilla/layers/LayerTransactionChild.h"
|
||||
#include "mozilla/layers/SharedBufferManagerChild.h"
|
||||
#include "ShadowLayerUtils.h"
|
||||
#include "mozilla/layers/TextureClient.h" // for TextureClient
|
||||
#include "mozilla/mozalloc.h" // for operator new, etc
|
||||
@ -199,164 +197,6 @@ struct AutoTxnEnd {
|
||||
Transaction* mTxn;
|
||||
};
|
||||
|
||||
|
||||
// XXX - We should actually figure out the minimum shmem allocation size on
|
||||
// a certain platform and use that.
|
||||
const uint32_t sShmemPageSize = 4096;
|
||||
|
||||
#ifdef DEBUG
|
||||
const uint32_t sSupportedBlockSize = 4;
|
||||
#endif
|
||||
|
||||
FixedSizeSmallShmemSectionAllocator::FixedSizeSmallShmemSectionAllocator(ShmemAllocator* aShmProvider)
|
||||
: mShmProvider(aShmProvider)
|
||||
{
|
||||
MOZ_ASSERT(mShmProvider);
|
||||
}
|
||||
|
||||
FixedSizeSmallShmemSectionAllocator::~FixedSizeSmallShmemSectionAllocator()
|
||||
{
|
||||
ShrinkShmemSectionHeap();
|
||||
// Check if we're not leaking..
|
||||
MOZ_ASSERT(mUsedShmems.empty());
|
||||
}
|
||||
|
||||
bool
|
||||
FixedSizeSmallShmemSectionAllocator::AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection)
|
||||
{
|
||||
// For now we only support sizes of 4. If we want to support different sizes
|
||||
// some more complicated bookkeeping should be added.
|
||||
MOZ_ASSERT(aSize == sSupportedBlockSize);
|
||||
MOZ_ASSERT(aShmemSection);
|
||||
|
||||
uint32_t allocationSize = (aSize + sizeof(ShmemSectionHeapAllocation));
|
||||
|
||||
for (size_t i = 0; i < mUsedShmems.size(); i++) {
|
||||
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
|
||||
if ((header->mAllocatedBlocks + 1) * allocationSize + sizeof(ShmemSectionHeapHeader) < sShmemPageSize) {
|
||||
aShmemSection->shmem() = mUsedShmems[i];
|
||||
MOZ_ASSERT(mUsedShmems[i].IsWritable());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!aShmemSection->shmem().IsWritable()) {
|
||||
ipc::Shmem tmp;
|
||||
if (!mShmProvider->AllocUnsafeShmem(sShmemPageSize, OptimalShmemType(), &tmp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ShmemSectionHeapHeader* header = tmp.get<ShmemSectionHeapHeader>();
|
||||
header->mTotalBlocks = 0;
|
||||
header->mAllocatedBlocks = 0;
|
||||
|
||||
mUsedShmems.push_back(tmp);
|
||||
aShmemSection->shmem() = tmp;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aShmemSection->shmem().IsWritable());
|
||||
|
||||
ShmemSectionHeapHeader* header = aShmemSection->shmem().get<ShmemSectionHeapHeader>();
|
||||
uint8_t* heap = aShmemSection->shmem().get<uint8_t>() + sizeof(ShmemSectionHeapHeader);
|
||||
|
||||
ShmemSectionHeapAllocation* allocHeader = nullptr;
|
||||
|
||||
if (header->mTotalBlocks > header->mAllocatedBlocks) {
|
||||
// Search for the first available block.
|
||||
for (size_t i = 0; i < header->mTotalBlocks; i++) {
|
||||
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
|
||||
|
||||
if (allocHeader->mStatus == STATUS_FREED) {
|
||||
break;
|
||||
}
|
||||
heap += allocationSize;
|
||||
}
|
||||
MOZ_ASSERT(allocHeader && allocHeader->mStatus == STATUS_FREED);
|
||||
MOZ_ASSERT(allocHeader->mSize == sSupportedBlockSize);
|
||||
} else {
|
||||
heap += header->mTotalBlocks * allocationSize;
|
||||
|
||||
header->mTotalBlocks++;
|
||||
allocHeader = reinterpret_cast<ShmemSectionHeapAllocation*>(heap);
|
||||
allocHeader->mSize = aSize;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(allocHeader);
|
||||
header->mAllocatedBlocks++;
|
||||
allocHeader->mStatus = STATUS_ALLOCATED;
|
||||
|
||||
aShmemSection->size() = aSize;
|
||||
aShmemSection->offset() = (heap + sizeof(ShmemSectionHeapAllocation)) - aShmemSection->shmem().get<uint8_t>();
|
||||
ShrinkShmemSectionHeap();
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
FixedSizeSmallShmemSectionAllocator::FreeShmemSection(mozilla::layers::ShmemSection& aShmemSection)
|
||||
{
|
||||
MOZ_ASSERT(aShmemSection.size() == sSupportedBlockSize);
|
||||
MOZ_ASSERT(aShmemSection.offset() < sShmemPageSize - sSupportedBlockSize);
|
||||
|
||||
ShmemSectionHeapAllocation* allocHeader =
|
||||
reinterpret_cast<ShmemSectionHeapAllocation*>(aShmemSection.shmem().get<char>() +
|
||||
aShmemSection.offset() -
|
||||
sizeof(ShmemSectionHeapAllocation));
|
||||
|
||||
MOZ_ASSERT(allocHeader->mSize == aShmemSection.size());
|
||||
|
||||
DebugOnly<bool> success = allocHeader->mStatus.compareExchange(STATUS_ALLOCATED, STATUS_FREED);
|
||||
// If this fails something really weird is going on.
|
||||
MOZ_ASSERT(success);
|
||||
|
||||
ShmemSectionHeapHeader* header = aShmemSection.shmem().get<ShmemSectionHeapHeader>();
|
||||
header->mAllocatedBlocks--;
|
||||
}
|
||||
|
||||
void
|
||||
FixedSizeSmallShmemSectionAllocator::DeallocShmemSection(mozilla::layers::ShmemSection& aShmemSection)
|
||||
{
|
||||
FreeShmemSection(aShmemSection);
|
||||
ShrinkShmemSectionHeap();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
FixedSizeSmallShmemSectionAllocator::ShrinkShmemSectionHeap()
|
||||
{
|
||||
// The loop will terminate as we either increase i, or decrease size
|
||||
// every time through.
|
||||
size_t i = 0;
|
||||
while (i < mUsedShmems.size()) {
|
||||
ShmemSectionHeapHeader* header = mUsedShmems[i].get<ShmemSectionHeapHeader>();
|
||||
if (header->mAllocatedBlocks == 0) {
|
||||
mShmProvider->DeallocShmem(mUsedShmems[i]);
|
||||
|
||||
// We don't particularly care about order, move the last one in the array
|
||||
// to this position.
|
||||
if (i < mUsedShmems.size() - 1) {
|
||||
mUsedShmems[i] = mUsedShmems[mUsedShmems.size() - 1];
|
||||
}
|
||||
mUsedShmems.pop_back();
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FixedSizeSmallShmemSectionAllocator*
|
||||
ShadowLayerForwarder::GetTileLockAllocator()
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (!mSectionAllocator) {
|
||||
mSectionAllocator = new FixedSizeSmallShmemSectionAllocator(this);
|
||||
}
|
||||
return mSectionAllocator;
|
||||
}
|
||||
|
||||
void
|
||||
CompositableForwarder::IdentifyTextureHost(const TextureFactoryIdentifier& aIdentifier)
|
||||
{
|
||||
@ -366,12 +206,10 @@ CompositableForwarder::IdentifyTextureHost(const TextureFactoryIdentifier& aIden
|
||||
}
|
||||
|
||||
ShadowLayerForwarder::ShadowLayerForwarder()
|
||||
: mMessageLoop(MessageLoop::current())
|
||||
, mDiagnosticTypes(DiagnosticTypes::NO_DIAGNOSTIC)
|
||||
: mDiagnosticTypes(DiagnosticTypes::NO_DIAGNOSTIC)
|
||||
, mIsFirstPaint(false)
|
||||
, mWindowOverlayChanged(false)
|
||||
, mPaintSyncId(0)
|
||||
, mSectionAllocator(nullptr)
|
||||
{
|
||||
mTxn = new Transaction();
|
||||
}
|
||||
@ -387,10 +225,6 @@ ShadowLayerForwarder::~ShadowLayerForwarder()
|
||||
mShadowManager->SetForwarder(nullptr);
|
||||
mShadowManager->Destroy();
|
||||
}
|
||||
|
||||
if (mSectionAllocator) {
|
||||
delete mSectionAllocator;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -919,41 +753,42 @@ ShadowLayerForwarder::EndTransaction(InfallibleTArray<EditReply>* aReplies,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ShadowLayerForwarder::AllocUnsafeShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
ipc::Shmem* aShmem)
|
||||
{
|
||||
MOZ_ASSERT(HasShadowManager(), "no shadow manager");
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ShmemAllocated(mShadowManager);
|
||||
return mShadowManager->AllocUnsafeShmem(aSize, aShmType, aShmem);
|
||||
}
|
||||
|
||||
bool
|
||||
ShadowLayerForwarder::AllocShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aShmType,
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
ipc::Shmem* aShmem)
|
||||
{
|
||||
MOZ_ASSERT(HasShadowManager(), "no shadow manager");
|
||||
if (!IPCOpen()) {
|
||||
if (!HasShadowManager() ||
|
||||
!mShadowManager->IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ShmemAllocated(mShadowManager);
|
||||
return mShadowManager->AllocShmem(aSize, aShmType, aShmem);
|
||||
return mShadowManager->AllocShmem(aSize, aType, aShmem);
|
||||
}
|
||||
bool
|
||||
ShadowLayerForwarder::AllocUnsafeShmem(size_t aSize,
|
||||
ipc::SharedMemory::SharedMemoryType aType,
|
||||
ipc::Shmem* aShmem)
|
||||
{
|
||||
MOZ_ASSERT(HasShadowManager(), "no shadow manager");
|
||||
if (!HasShadowManager() ||
|
||||
!mShadowManager->IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
ShmemAllocated(mShadowManager);
|
||||
return mShadowManager->AllocUnsafeShmem(aSize, aType, aShmem);
|
||||
}
|
||||
|
||||
void
|
||||
ShadowLayerForwarder::DeallocShmem(ipc::Shmem& aShmem)
|
||||
{
|
||||
MOZ_ASSERT(HasShadowManager(), "no shadow manager");
|
||||
if (HasShadowManager() && mShadowManager->IPCOpen()) {
|
||||
mShadowManager->DeallocShmem(aShmem);
|
||||
if (!HasShadowManager() ||
|
||||
!mShadowManager->IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
mShadowManager->DeallocShmem(aShmem);
|
||||
}
|
||||
|
||||
bool
|
||||
@ -972,7 +807,7 @@ ShadowLayerForwarder::IsSameProcess() const
|
||||
}
|
||||
|
||||
base::ProcessId
|
||||
ShadowLayerForwarder::GetParentPid() const
|
||||
ShadowLayerForwarder::ParentPid() const
|
||||
{
|
||||
if (!HasShadowManager() || !mShadowManager->IPCOpen()) {
|
||||
return base::ProcessId();
|
||||
@ -1112,148 +947,5 @@ void ShadowLayerForwarder::SendPendingAsyncMessges()
|
||||
mShadowManager->SendChildAsyncMessages(replies);
|
||||
}
|
||||
|
||||
bool
|
||||
IsSurfaceDescriptorValid(const SurfaceDescriptor& aSurface)
|
||||
{
|
||||
return aSurface.type() != SurfaceDescriptor::T__None &&
|
||||
aSurface.type() != SurfaceDescriptor::Tnull_t;
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
GetAddressFromDescriptor(const SurfaceDescriptor& aDescriptor)
|
||||
{
|
||||
MOZ_ASSERT(IsSurfaceDescriptorValid(aDescriptor));
|
||||
MOZ_RELEASE_ASSERT(aDescriptor.type() == SurfaceDescriptor::TSurfaceDescriptorBuffer);
|
||||
|
||||
auto memOrShmem = aDescriptor.get_SurfaceDescriptorBuffer().data();
|
||||
if (memOrShmem.type() == MemoryOrShmem::TShmem) {
|
||||
return memOrShmem.get_Shmem().get<uint8_t>();
|
||||
} else {
|
||||
return reinterpret_cast<uint8_t*>(memOrShmem.get_uintptr_t());
|
||||
}
|
||||
}
|
||||
|
||||
already_AddRefed<gfx::DataSourceSurface>
|
||||
GetSurfaceForDescriptor(const SurfaceDescriptor& aDescriptor)
|
||||
{
|
||||
uint8_t* data = GetAddressFromDescriptor(aDescriptor);
|
||||
auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor();
|
||||
uint32_t stride = ImageDataSerializer::GetRGBStride(rgb);
|
||||
return gfx::Factory::CreateWrappingDataSourceSurface(data, stride, rgb.size(),
|
||||
rgb.format());
|
||||
}
|
||||
|
||||
already_AddRefed<gfx::DrawTarget>
|
||||
GetDrawTargetForDescriptor(const SurfaceDescriptor& aDescriptor, gfx::BackendType aBackend)
|
||||
{
|
||||
uint8_t* data = GetAddressFromDescriptor(aDescriptor);
|
||||
auto rgb = aDescriptor.get_SurfaceDescriptorBuffer().desc().get_RGBDescriptor();
|
||||
uint32_t stride = ImageDataSerializer::GetRGBStride(rgb);
|
||||
return gfx::Factory::CreateDrawTargetForData(gfx::BackendType::CAIRO,
|
||||
data, rgb.size(),
|
||||
stride, rgb.format());
|
||||
}
|
||||
|
||||
bool
|
||||
ShadowLayerForwarder::AllocSurfaceDescriptor(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
SurfaceDescriptor* aBuffer)
|
||||
{
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
return AllocSurfaceDescriptorWithCaps(aSize, aContent, DEFAULT_BUFFER_CAPS, aBuffer);
|
||||
}
|
||||
|
||||
bool
|
||||
ShadowLayerForwarder::AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
uint32_t aCaps,
|
||||
SurfaceDescriptor* aBuffer)
|
||||
{
|
||||
if (!IPCOpen()) {
|
||||
return false;
|
||||
}
|
||||
gfx::SurfaceFormat format =
|
||||
gfxPlatform::GetPlatform()->Optimal2DFormatForContent(aContent);
|
||||
size_t size = ImageDataSerializer::ComputeRGBBufferSize(aSize, format);
|
||||
if (!size) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MemoryOrShmem bufferDesc;
|
||||
if (IsSameProcess()) {
|
||||
uint8_t* data = new (std::nothrow) uint8_t[size];
|
||||
if (!data) {
|
||||
return false;
|
||||
}
|
||||
GfxMemoryImageReporter::DidAlloc(data);
|
||||
#ifdef XP_MACOSX
|
||||
// Workaround a bug in Quartz where drawing an a8 surface to another a8
|
||||
// surface with OP_SOURCE still requires the destination to be clear.
|
||||
if (format == gfx::SurfaceFormat::A8) {
|
||||
memset(data, 0, size);
|
||||
}
|
||||
#endif
|
||||
bufferDesc = reinterpret_cast<uintptr_t>(data);
|
||||
} else {
|
||||
|
||||
mozilla::ipc::Shmem shmem;
|
||||
if (!AllocUnsafeShmem(size, OptimalShmemType(), &shmem)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bufferDesc = shmem;
|
||||
}
|
||||
|
||||
// Use an intermediate buffer by default. Skipping the intermediate buffer is
|
||||
// only possible in certain configurations so let's keep it simple here for now.
|
||||
const bool hasIntermediateBuffer = true;
|
||||
*aBuffer = SurfaceDescriptorBuffer(RGBDescriptor(aSize, format, hasIntermediateBuffer),
|
||||
bufferDesc);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
ShadowLayerForwarder::IsShmem(SurfaceDescriptor* aSurface)
|
||||
{
|
||||
return aSurface && (aSurface->type() == SurfaceDescriptor::TSurfaceDescriptorBuffer)
|
||||
&& (aSurface->get_SurfaceDescriptorBuffer().data().type() == MemoryOrShmem::TShmem);
|
||||
}
|
||||
|
||||
void
|
||||
ShadowLayerForwarder::DestroySurfaceDescriptor(SurfaceDescriptor* aSurface)
|
||||
{
|
||||
MOZ_ASSERT(IPCOpen());
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(aSurface);
|
||||
if (!aSurface) {
|
||||
return;
|
||||
}
|
||||
if (!IPCOpen()) {
|
||||
return;
|
||||
}
|
||||
SurfaceDescriptorBuffer& desc = aSurface->get_SurfaceDescriptorBuffer();
|
||||
switch (desc.data().type()) {
|
||||
case MemoryOrShmem::TShmem: {
|
||||
DeallocShmem(desc.data().get_Shmem());
|
||||
break;
|
||||
}
|
||||
case MemoryOrShmem::Tuintptr_t: {
|
||||
uint8_t* ptr = (uint8_t*)desc.data().get_uintptr_t();
|
||||
GfxMemoryImageReporter::WillFree(ptr);
|
||||
delete [] ptr;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
NS_RUNTIMEABORT("surface type not implemented!");
|
||||
}
|
||||
*aSurface = SurfaceDescriptor();
|
||||
}
|
||||
|
||||
} // namespace layers
|
||||
} // namespace mozilla
|
||||
|
@ -22,13 +22,11 @@
|
||||
#include "nsRegion.h" // for nsIntRegion
|
||||
#include "nsTArrayForwardDeclare.h" // for InfallibleTArray
|
||||
#include "nsIWidget.h"
|
||||
#include <vector>
|
||||
|
||||
namespace mozilla {
|
||||
namespace layers {
|
||||
|
||||
class EditReply;
|
||||
class FixedSizeSmallShmemSectionAllocator;
|
||||
class ImageContainer;
|
||||
class Layer;
|
||||
class PLayerChild;
|
||||
@ -41,6 +39,7 @@ class ThebesBuffer;
|
||||
class ThebesBufferData;
|
||||
class Transaction;
|
||||
|
||||
|
||||
/**
|
||||
* We want to share layer trees across thread contexts and address
|
||||
* spaces for several reasons; chief among them
|
||||
@ -115,23 +114,12 @@ class Transaction;
|
||||
*/
|
||||
|
||||
class ShadowLayerForwarder final : public CompositableForwarder
|
||||
, public ShmemAllocator
|
||||
, public LegacySurfaceDescriptorAllocator
|
||||
{
|
||||
friend class ClientLayerManager;
|
||||
|
||||
public:
|
||||
virtual ~ShadowLayerForwarder();
|
||||
|
||||
virtual ShmemAllocator* AsShmemAllocator() override { return this; }
|
||||
|
||||
virtual ShadowLayerForwarder* AsLayerForwarder() override { return this; }
|
||||
|
||||
virtual LegacySurfaceDescriptorAllocator*
|
||||
AsLegacySurfaceDescriptorAllocator() override { return this; }
|
||||
|
||||
FixedSizeSmallShmemSectionAllocator* GetTileLockAllocator();
|
||||
|
||||
/**
|
||||
* Setup the IPDL actor for aCompositable to be part of layers
|
||||
* transactions.
|
||||
@ -334,7 +322,7 @@ public:
|
||||
* buffer, and the double-buffer pair is gone.
|
||||
*/
|
||||
|
||||
|
||||
// ISurfaceAllocator
|
||||
virtual bool AllocUnsafeShmem(size_t aSize,
|
||||
mozilla::ipc::SharedMemory::SharedMemoryType aType,
|
||||
mozilla::ipc::Shmem* aShmem) override;
|
||||
@ -344,12 +332,8 @@ public:
|
||||
virtual void DeallocShmem(mozilla::ipc::Shmem& aShmem) override;
|
||||
|
||||
virtual bool IPCOpen() const override;
|
||||
|
||||
virtual bool IsSameProcess() const override;
|
||||
|
||||
virtual MessageLoop* GetMessageLoop() const override { return mMessageLoop; }
|
||||
|
||||
base::ProcessId GetParentPid() const;
|
||||
virtual base::ProcessId ParentPid() const override;
|
||||
|
||||
/**
|
||||
* Construct a shadow of |aLayer| on the "other side", at the
|
||||
@ -366,20 +350,6 @@ public:
|
||||
|
||||
static void PlatformSyncBeforeUpdate();
|
||||
|
||||
virtual bool AllocSurfaceDescriptor(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
SurfaceDescriptor* aBuffer) override;
|
||||
|
||||
virtual bool AllocSurfaceDescriptorWithCaps(const gfx::IntSize& aSize,
|
||||
gfxContentType aContent,
|
||||
uint32_t aCaps,
|
||||
SurfaceDescriptor* aBuffer) override;
|
||||
|
||||
virtual void DestroySurfaceDescriptor(SurfaceDescriptor* aSurface) override;
|
||||
|
||||
// Returns true if aSurface wraps a Shmem.
|
||||
static bool IsShmem(SurfaceDescriptor* aSurface);
|
||||
|
||||
protected:
|
||||
ShadowLayerForwarder();
|
||||
|
||||
@ -396,14 +366,12 @@ protected:
|
||||
private:
|
||||
|
||||
Transaction* mTxn;
|
||||
MessageLoop* mMessageLoop;
|
||||
std::vector<AsyncChildMessageData> mPendingAsyncMessages;
|
||||
DiagnosticTypes mDiagnosticTypes;
|
||||
bool mIsFirstPaint;
|
||||
bool mWindowOverlayChanged;
|
||||
int32_t mPaintSyncId;
|
||||
InfallibleTArray<PluginWindowData> mPluginWindowData;
|
||||
FixedSizeSmallShmemSectionAllocator* mSectionAllocator;
|
||||
};
|
||||
|
||||
class CompositableClient;
|
||||
@ -440,50 +408,6 @@ protected:
|
||||
PLayerChild* mShadow;
|
||||
};
|
||||
|
||||
/// A simple shmem section allocator that can only allocate small
|
||||
/// fixed size elements (only intended to be used to store tile
|
||||
/// copy-on-write locks for now).
|
||||
class FixedSizeSmallShmemSectionAllocator final : public ShmemSectionAllocator
|
||||
{
|
||||
public:
|
||||
enum AllocationStatus
|
||||
{
|
||||
STATUS_ALLOCATED,
|
||||
STATUS_FREED
|
||||
};
|
||||
|
||||
struct ShmemSectionHeapHeader
|
||||
{
|
||||
Atomic<uint32_t> mTotalBlocks;
|
||||
Atomic<uint32_t> mAllocatedBlocks;
|
||||
};
|
||||
|
||||
struct ShmemSectionHeapAllocation
|
||||
{
|
||||
Atomic<uint32_t> mStatus;
|
||||
uint32_t mSize;
|
||||
};
|
||||
|
||||
explicit FixedSizeSmallShmemSectionAllocator(ShmemAllocator* aShmProvider);
|
||||
|
||||
~FixedSizeSmallShmemSectionAllocator();
|
||||
|
||||
virtual bool AllocShmemSection(uint32_t aSize, ShmemSection* aShmemSection) override;
|
||||
|
||||
virtual void DeallocShmemSection(ShmemSection& aShmemSection) override;
|
||||
|
||||
virtual void MemoryPressure() override { ShrinkShmemSectionHeap(); }
|
||||
|
||||
// can be called on the compositor process.
|
||||
static void FreeShmemSection(ShmemSection& aShmemSection);
|
||||
|
||||
void ShrinkShmemSectionHeap();
|
||||
|
||||
protected:
|
||||
std::vector<mozilla::ipc::Shmem> mUsedShmems;
|
||||
ShmemAllocator* mShmProvider;
|
||||
};
|
||||
|
||||
} // namespace layers
|
||||
} // namespace mozilla
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "mozilla/layers/CompositableForwarder.h"
|
||||
#include "mozilla/layers/ISurfaceAllocator.h"
|
||||
#include "mozilla/layers/ShadowLayerUtilsGralloc.h"
|
||||
#include "mozilla/layers/SharedBufferManagerChild.h"
|
||||
#include "gfx2DGlue.h"
|
||||
#include "gfxPrefs.h" // for gfxPrefs
|
||||
#include "SharedSurfaceGralloc.h"
|
||||
@ -113,8 +112,8 @@ void
|
||||
GrallocTextureData::Deallocate(ISurfaceAllocator* aAllocator)
|
||||
{
|
||||
MOZ_ASSERT(aAllocator);
|
||||
if (aAllocator && aAllocator->IPCOpen()) {
|
||||
SharedBufferManagerChild::GetSingleton()->DeallocGrallocBuffer(mGrallocHandle);
|
||||
if (aAllocator) {
|
||||
aAllocator->DeallocGrallocBuffer(&mGrallocHandle);
|
||||
}
|
||||
|
||||
mGrallocHandle = null_t();
|
||||
@ -125,8 +124,8 @@ void
|
||||
GrallocTextureData::Forget(ISurfaceAllocator* aAllocator)
|
||||
{
|
||||
MOZ_ASSERT(aAllocator);
|
||||
if (aAllocator && aAllocator->IPCOpen()) {
|
||||
SharedBufferManagerChild::GetSingleton()->DropGrallocBuffer(mGrallocHandle);
|
||||
if (aAllocator) {
|
||||
aAllocator->DropGrallocBuffer(&mGrallocHandle);
|
||||
}
|
||||
|
||||
mGrallocHandle = null_t();
|
||||
@ -281,10 +280,10 @@ GrallocTextureData::Create(gfx::IntSize aSize, AndroidFormat aAndroidFormat,
|
||||
gfx::BackendType aMoz2dBackend, uint32_t aUsage,
|
||||
ISurfaceAllocator* aAllocator)
|
||||
{
|
||||
if (!aAllocator || !aAllocator->IPCOpen()) {
|
||||
if (!aAllocator) {
|
||||
return nullptr;
|
||||
}
|
||||
int32_t maxSize = aAllocator->AsClientAllocator()->GetMaxTextureSize();
|
||||
int32_t maxSize = aAllocator->GetMaxTextureSize();
|
||||
if (aSize.width > maxSize || aSize.height > maxSize) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -314,7 +313,7 @@ GrallocTextureData::Create(gfx::IntSize aSize, AndroidFormat aAndroidFormat,
|
||||
}
|
||||
|
||||
MaybeMagicGrallocBufferHandle handle;
|
||||
if (!SharedBufferManagerChild::GetSingleton()->AllocGrallocBuffer(aSize, aAndroidFormat, aUsage, &handle)) {
|
||||
if (!aAllocator->AllocGrallocBuffer(aSize, aAndroidFormat, aUsage, &handle)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ gfxReusableSharedImageSurfaceWrapper::ReadUnlock()
|
||||
MOZ_ASSERT(readCount >= 0, "Read count should not be negative");
|
||||
|
||||
if (readCount == 0) {
|
||||
mAllocator->AsShmemAllocator()->DeallocShmem(mSurface->GetShmem());
|
||||
mAllocator->DeallocShmem(mSurface->GetShmem());
|
||||
}
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ gfxReusableSharedImageSurfaceWrapper::GetWritable(gfxImageSurface** aSurface)
|
||||
|
||||
// Something else is reading the surface, copy it
|
||||
RefPtr<gfxSharedImageSurface> copySurface =
|
||||
gfxSharedImageSurface::CreateUnsafe(mAllocator->AsShmemAllocator(), mSurface->GetSize(), mSurface->Format());
|
||||
gfxSharedImageSurface::CreateUnsafe(mAllocator.get(), mSurface->GetSize(), mSurface->Format());
|
||||
copySurface->CopyFrom(mSurface);
|
||||
*aSurface = copySurface;
|
||||
|
||||
|
@ -2120,7 +2120,7 @@ nsIWidget::SnapshotWidgetOnScreen()
|
||||
RefPtr<gfx::DrawTarget> dt =
|
||||
gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(size, gfx::SurfaceFormat::B8G8R8A8);
|
||||
if (!snapshot || !dt) {
|
||||
forwarder->DestroySurfaceDescriptor(&surface);
|
||||
forwarder->DestroySharedSurface(&surface);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -2129,7 +2129,7 @@ nsIWidget::SnapshotWidgetOnScreen()
|
||||
gfx::Rect(gfx::Point(), gfx::Size(size)),
|
||||
gfx::DrawSurfaceOptions(gfx::Filter::POINT));
|
||||
|
||||
forwarder->DestroySurfaceDescriptor(&surface);
|
||||
forwarder->DestroySharedSurface(&surface);
|
||||
return dt->Snapshot();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user