Bug 1602131 - WebGPU render passes r=jgilbert,webidl,smaug

Adds support for recording render passes in WebGPU.
The `wgpu` logo is also added to the repository ("gfx/wgpu/logo.png") under [CC-BY-SA](https://creativecommons.org/licenses/by-sa/3.0/) license.

Differential Revision: https://phabricator.services.mozilla.com/D62461

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Dzmitry Malyshau 2020-02-19 19:25:30 +00:00
parent c9f1371cfe
commit 199a362906
29 changed files with 654 additions and 220 deletions

View File

@ -10,6 +10,7 @@
#include "Buffer.h"
#include "ComputePassEncoder.h"
#include "Device.h"
#include "RenderPassEncoder.h"
namespace mozilla {
namespace webgpu {
@ -51,6 +52,12 @@ already_AddRefed<ComputePassEncoder> CommandEncoder::BeginComputePass(
return pass.forget();
}
already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
const dom::GPURenderPassDescriptor& aDesc) {
RefPtr<RenderPassEncoder> pass = new RenderPassEncoder(this, aDesc);
return pass.forget();
}
void CommandEncoder::EndComputePass(Span<const uint8_t> aData,
ErrorResult& aRv) {
if (!mValid) {
@ -67,6 +74,22 @@ void CommandEncoder::EndComputePass(Span<const uint8_t> aData,
mBridge->SendCommandEncoderRunComputePass(mId, std::move(shmem));
}
void CommandEncoder::EndRenderPass(Span<const uint8_t> aData,
ErrorResult& aRv) {
if (!mValid) {
return aRv.ThrowInvalidStateError("Command encoder is not valid");
}
ipc::Shmem shmem;
if (!mBridge->AllocShmem(aData.Length(), ipc::Shmem::SharedMemory::TYPE_BASIC,
&shmem)) {
return aRv.ThrowAbortError(nsPrintfCString(
"Unable to allocate shmem of size %zu", aData.Length()));
}
memcpy(shmem.get<uint8_t>(), aData.data(), aData.Length());
mBridge->SendCommandEncoderRunRenderPass(mId, std::move(shmem));
}
already_AddRefed<CommandBuffer> CommandEncoder::Finish(
const dom::GPUCommandBufferDescriptor& aDesc) {
RawId id = 0;

View File

@ -49,6 +49,7 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
public:
void EndComputePass(Span<const uint8_t> aData, ErrorResult& aRv);
void EndRenderPass(Span<const uint8_t> aData, ErrorResult& aRv);
void CopyBufferToBuffer(const Buffer& aSource, BufferAddress aSourceOffset,
const Buffer& aDestination,
@ -56,6 +57,8 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
BufferAddress aSize);
already_AddRefed<ComputePassEncoder> BeginComputePass(
const dom::GPUComputePassDescriptor& aDesc);
already_AddRefed<RenderPassEncoder> BeginRenderPass(
const dom::GPURenderPassDescriptor& aDesc);
already_AddRefed<CommandBuffer> Finish(
const dom::GPUCommandBufferDescriptor& aDesc);
};

View File

@ -5,7 +5,9 @@
#include "mozilla/dom/WebGPUBinding.h"
#include "RenderPassEncoder.h"
#include "BindGroup.h"
#include "CommandEncoder.h"
#include "RenderPipeline.h"
namespace mozilla {
namespace webgpu {
@ -13,13 +15,174 @@ namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(RenderPassEncoder, mParent)
GPU_IMPL_JS_WRAP(RenderPassEncoder)
RenderPassEncoder::~RenderPassEncoder() = default;
ffi::WGPULoadOp ConvertLoadOp(const dom::GPULoadOp& aOp) {
switch (aOp) {
case dom::GPULoadOp::Load:
return ffi::WGPULoadOp_Load;
default:
MOZ_CRASH("Unexpected load op");
}
}
ffi::WGPUStoreOp ConvertStoreOp(const dom::GPUStoreOp& aOp) {
switch (aOp) {
case dom::GPUStoreOp::Store:
return ffi::WGPUStoreOp_Store;
case dom::GPUStoreOp::Clear:
return ffi::WGPUStoreOp_Clear;
default:
MOZ_CRASH("Unexpected load op");
}
}
ffi::WGPUColor ConvertColor(const dom::GPUColorDict& aColor) {
ffi::WGPUColor color = {aColor.mR, aColor.mG, aColor.mB, aColor.mA};
return color;
}
ffi::WGPURawPass BeginRenderPass(RawId aEncoderId,
const dom::GPURenderPassDescriptor& aDesc) {
ffi::WGPURenderPassDescriptor desc = {};
ffi::WGPURenderPassDepthStencilAttachmentDescriptor dsDesc = {};
if (aDesc.mDepthStencilAttachment.WasPassed()) {
const auto& dsa = aDesc.mDepthStencilAttachment.Value();
dsDesc.attachment = dsa.mAttachment->mId;
if (dsa.mDepthLoadValue.IsFloat()) {
dsDesc.depth_load_op = ffi::WGPULoadOp_Clear;
dsDesc.clear_depth = dsa.mDepthLoadValue.GetAsFloat();
}
if (dsa.mDepthLoadValue.IsGPULoadOp()) {
dsDesc.depth_load_op =
ConvertLoadOp(dsa.mDepthLoadValue.GetAsGPULoadOp());
}
dsDesc.depth_store_op = ConvertStoreOp(dsa.mDepthStoreOp);
if (dsa.mStencilLoadValue.IsUnsignedLong()) {
dsDesc.stencil_load_op = ffi::WGPULoadOp_Clear;
dsDesc.clear_stencil = dsa.mStencilLoadValue.GetAsUnsignedLong();
}
if (dsa.mStencilLoadValue.IsGPULoadOp()) {
dsDesc.stencil_load_op =
ConvertLoadOp(dsa.mStencilLoadValue.GetAsGPULoadOp());
}
dsDesc.stencil_store_op = ConvertStoreOp(dsa.mStencilStoreOp);
desc.depth_stencil_attachment = &dsDesc;
}
std::array<ffi::WGPURenderPassColorAttachmentDescriptor,
WGPUMAX_COLOR_TARGETS>
colorDescs = {};
desc.color_attachments = colorDescs.data();
for (size_t i = 0; i < aDesc.mColorAttachments.Length(); ++i) {
const auto& ca = aDesc.mColorAttachments[i];
ffi::WGPURenderPassColorAttachmentDescriptor& cd = colorDescs[i];
cd.attachment = ca.mAttachment->mId;
cd.store_op = ConvertStoreOp(ca.mStoreOp);
if (ca.mResolveTarget.WasPassed()) {
cd.resolve_target = &ca.mResolveTarget.Value().mId;
}
if (ca.mLoadValue.IsGPULoadOp()) {
cd.load_op = ConvertLoadOp(ca.mLoadValue.GetAsGPULoadOp());
} else {
cd.load_op = ffi::WGPULoadOp_Clear;
if (ca.mLoadValue.IsDoubleSequence()) {
const auto& seq = ca.mLoadValue.GetAsDoubleSequence();
if (seq.Length() >= 1) {
cd.clear_color.r = seq[0];
}
if (seq.Length() >= 2) {
cd.clear_color.g = seq[1];
}
if (seq.Length() >= 3) {
cd.clear_color.b = seq[2];
}
if (seq.Length() >= 4) {
cd.clear_color.a = seq[3];
}
}
if (ca.mLoadValue.IsGPUColorDict()) {
cd.clear_color =
ConvertColor(ca.mLoadValue.GetAsGPUColorDict());
}
}
}
return ffi::wgpu_command_encoder_begin_render_pass(aEncoderId, &desc);
}
RenderPassEncoder::RenderPassEncoder(CommandEncoder* const aParent,
const dom::GPURenderPassDescriptor& aDesc)
: ChildOf(aParent), mRaw(BeginRenderPass(aParent->mId, aDesc)) {}
RenderPassEncoder::~RenderPassEncoder() {
if (mValid) {
mValid = false;
ffi::wgpu_render_pass_destroy(mRaw);
}
}
void RenderPassEncoder::SetBindGroup(
uint32_t aSlot, const BindGroup& aBindGroup,
const dom::Sequence<uint32_t>& aDynamicOffsets) {
if (mValid) {
MOZ_CRASH("TODO");
ffi::wgpu_render_pass_set_bind_group(&mRaw, aSlot, aBindGroup.mId,
aDynamicOffsets.Elements(),
aDynamicOffsets.Length());
}
}
void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
if (mValid) {
ffi::wgpu_render_pass_set_pipeline(&mRaw, aPipeline.mId);
}
}
void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer,
uint64_t aOffset) {
if (mValid) {
ffi::wgpu_render_pass_set_index_buffer(&mRaw, aBuffer.mId, aOffset);
}
}
void RenderPassEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
uint64_t aOffset) {
if (mValid) {
// TODO: change the Rust API to use a single vertex buffer?
ffi::wgpu_render_pass_set_vertex_buffers(&mRaw, aSlot, &aBuffer.mId,
&aOffset, 1);
}
}
void RenderPassEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
uint32_t aFirstVertex, uint32_t aFirstInstance) {
if (mValid) {
ffi::wgpu_render_pass_draw(&mRaw, aVertexCount, aInstanceCount,
aFirstVertex, aFirstInstance);
}
}
void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
uint32_t aInstanceCount,
uint32_t aFirstIndex, int32_t aBaseVertex,
uint32_t aFirstInstance) {
if (mValid) {
ffi::wgpu_render_pass_draw_indexed(&mRaw, aIndexCount, aInstanceCount,
aFirstIndex, aBaseVertex,
aFirstInstance);
}
}
void RenderPassEncoder::EndPass(ErrorResult& aRv) {
if (mValid) {
mValid = false;
uintptr_t length = 0;
const uint8_t* pass_data = ffi::wgpu_render_pass_finish(&mRaw, &length);
mParent->EndRenderPass(Span(pass_data, length), aRv);
ffi::wgpu_render_pass_destroy(mRaw);
}
}

View File

@ -22,6 +22,7 @@ namespace webgpu {
class CommandEncoder;
class RenderBundle;
class RenderPipeline;
class RenderPassEncoder final : public ObjectBase,
public ChildOf<CommandEncoder> {
@ -29,15 +30,27 @@ class RenderPassEncoder final : public ObjectBase,
GPU_DECL_CYCLE_COLLECTION(RenderPassEncoder)
GPU_DECL_JS_WRAP(RenderPassEncoder)
RenderPassEncoder() = delete;
RenderPassEncoder(CommandEncoder* const aParent,
const dom::GPURenderPassDescriptor& aDesc);
protected:
virtual ~RenderPassEncoder();
void Cleanup() {}
ffi::WGPURawPass mRaw;
public:
void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
const dom::Sequence<uint32_t>& aDynamicOffsets);
void SetPipeline(const RenderPipeline& aPipeline);
void SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset);
void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset);
void Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
uint32_t aFirstVertex, uint32_t aFirstInstance);
void DrawIndexed(uint32_t aIndexCount, uint32_t aInstanceCount,
uint32_t aFirstIndex, int32_t aBaseVertex,
uint32_t aFirstInstance);
void EndPass(ErrorResult& aRv);
};
} // namespace webgpu

View File

@ -13,7 +13,20 @@ namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(RenderPipeline, mParent)
GPU_IMPL_JS_WRAP(RenderPipeline)
RenderPipeline::~RenderPipeline() = default;
RenderPipeline::RenderPipeline(Device* const aParent, RawId aId)
: ChildOf(aParent), mId(aId) {}
RenderPipeline::~RenderPipeline() { Cleanup(); }
void RenderPipeline::Cleanup() {
if (mValid && mParent) {
mValid = false;
WebGPUChild* bridge = mParent->mBridge;
if (bridge && bridge->IsOpen()) {
bridge->DestroyRenderPipeline(mId);
}
}
}
} // namespace webgpu
} // namespace mozilla

View File

@ -19,10 +19,13 @@ class RenderPipeline final : public ObjectBase, public ChildOf<Device> {
GPU_DECL_CYCLE_COLLECTION(RenderPipeline)
GPU_DECL_JS_WRAP(RenderPipeline)
RenderPipeline(Device* const aParent, RawId aId);
const RawId mId;
private:
RenderPipeline() = delete;
virtual ~RenderPipeline();
void Cleanup() {}
void Cleanup();
};
} // namespace webgpu

View File

@ -45,6 +45,7 @@ parent:
async DeviceCreateCommandEncoder(RawId selfId, GPUCommandEncoderDescriptor desc, RawId newId);
async CommandEncoderCopyBufferToBuffer(RawId selfId, RawId sourceId, BufferAddress sourceOffset, RawId destinationId, BufferAddress destinationOffset, BufferAddress size);
async CommandEncoderRunComputePass(RawId selfId, Shmem shmem);
async CommandEncoderRunRenderPass(RawId selfId, Shmem shmem);
async CommandEncoderFinish(RawId selfId, GPUCommandBufferDescriptor desc);
async CommandEncoderDestroy(RawId selfId);
async CommandBufferDestroy(RawId selfId);
@ -59,6 +60,7 @@ parent:
async ShaderModuleDestroy(RawId selfId);
async DeviceCreateComputePipeline(RawId selfId, SerialComputePipelineDescriptor desc, RawId newId);
async ComputePipelineDestroy(RawId selfId);
async RenderPipelineDestroy(RawId selfId);
async Shutdown();
child:

View File

@ -248,6 +248,10 @@ void WebGPUChild::DestroyComputePipeline(RawId aId) {
SendComputePipelineDestroy(aId);
ffi::wgpu_client_kill_compute_pipeline_id(mClient, aId);
}
void WebGPUChild::DestroyRenderPipeline(RawId aId) {
SendRenderPipelineDestroy(aId);
ffi::wgpu_client_kill_render_pipeline_id(mClient, aId);
}
} // namespace webgpu
} // namespace mozilla

View File

@ -66,6 +66,7 @@ class WebGPUChild final : public PWebGPUChild {
void DestroyBindGroup(RawId aId);
void DestroyShaderModule(RawId aId);
void DestroyComputePipeline(RawId aId);
void DestroyRenderPipeline(RawId aId);
private:
virtual ~WebGPUChild();

View File

@ -146,6 +146,13 @@ ipc::IPCResult WebGPUParent::RecvCommandEncoderRunComputePass(RawId aSelfId,
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderRunRenderPass(RawId aSelfId,
Shmem&& shmem) {
ffi::wgpu_server_encode_render_pass(mContext, aSelfId, shmem.get<uint8_t>(),
shmem.Size<uint8_t>());
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
RawId aSelfId, const dom::GPUCommandBufferDescriptor& aDesc) {
Unused << aDesc;
@ -272,6 +279,11 @@ ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) {
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) {
ffi::wgpu_server_render_pipeline_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvShutdown() {
mTimer.Stop();
ffi::wgpu_server_poll_all_devices(mContext, true);

View File

@ -47,6 +47,7 @@ class WebGPUParent final : public PWebGPUParent {
RawId aDestinationId, BufferAddress aDestinationOffset,
BufferAddress aSize);
ipc::IPCResult RecvCommandEncoderRunComputePass(RawId aSelfId, Shmem&& shmem);
ipc::IPCResult RecvCommandEncoderRunRenderPass(RawId aSelfId, Shmem&& shmem);
ipc::IPCResult RecvCommandEncoderFinish(
RawId aSelfId, const dom::GPUCommandBufferDescriptor& aDesc);
ipc::IPCResult RecvCommandEncoderDestroy(RawId aSelfId);
@ -71,6 +72,7 @@ class WebGPUParent final : public PWebGPUParent {
RawId aSelfId, const SerialComputePipelineDescriptor& aDesc,
RawId aNewId);
ipc::IPCResult RecvComputePipelineDestroy(RawId aSelfId);
ipc::IPCResult RecvRenderPipelineDestroy(RawId aSelfId);
ipc::IPCResult RecvShutdown();
private:

View File

@ -7,3 +7,5 @@ prefs = dom.webgpu.enabled=true
[test_buffer_mapping.html]
[test_command_buffer_creation.html]
[test_submit_compute_empty.html]
[test_submit_render_empty.html]
disabled=https://bugzilla.mozilla.org/show_bug.cgi?id=1614702

View File

@ -0,0 +1,32 @@
<!DOCTYPE HTML>
<html>
<head>
<meta charset='utf-8'>
<script src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" href="/tests/SimpleTest/test.css">
</head>
<body>
<script>
ok(SpecialPowers.getBoolPref('dom.webgpu.enabled'), 'Pref should be enabled.');
const func = async function() {
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const encoder = device.createCommandEncoder();
const pass = encoder.beginRenderPass({
colorAttachments: [],
});
pass.endPass();
const command_buffer = encoder.finish();
device.defaultQueue.submit([command_buffer]);
ok(command_buffer !== undefined, 'command_buffer !== undefined');
};
SimpleTest.waitForExplicitFinish();
func().finally(() => SimpleTest.finish());
</script>
</body>
</html>

View File

@ -760,9 +760,10 @@ dictionary GPUCommandEncoderDescriptor : GPUObjectDescriptorBase {
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUCommandEncoder {
//GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor);
[NewObject]
GPUComputePassEncoder beginComputePass(optional GPUComputePassDescriptor descriptor = {});
[NewObject]
GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor);
void copyBufferToBuffer(
GPUBuffer source,
@ -813,16 +814,15 @@ interface mixin GPUProgrammablePassEncoder {
// Render Pass
interface mixin GPURenderEncoderBase {
//void setPipeline(GPURenderPipeline pipeline);
void setPipeline(GPURenderPipeline pipeline);
//void setIndexBuffer(GPUBuffer buffer, u64 offset);
//void setVertexBuffers(u32 startSlot,
// sequence<GPUBuffer> buffers, sequence<u64> offsets);
void setIndexBuffer(GPUBuffer buffer, optional u64 offset = 0);
void setVertexBuffer(u32 slot, GPUBuffer buffer, optional u64 offset = 0);
//void draw(u32 vertexCount, u32 instanceCount,
// u32 firstVertex, u32 firstInstance);
//void drawIndexed(u32 indexCount, u32 instanceCount,
// u32 firstIndex, i32 baseVertex, u32 firstInstance);
void draw(u32 vertexCount, u32 instanceCount,
u32 firstVertex, u32 firstInstance);
void drawIndexed(u32 indexCount, u32 instanceCount,
u32 firstIndex, i32 baseVertex, u32 firstInstance);
//void drawIndirect(GPUBuffer indirectBuffer, u64 indirectOffset);
//void drawIndexedIndirect(GPUBuffer indirectBuffer, u64 indirectOffset);
@ -841,7 +841,8 @@ interface GPURenderPassEncoder {
//void setStencilReference(u32 reference);
//void executeBundles(sequence<GPURenderBundle> bundles);
//void endPass();
[Throws]
void endPass();
};
GPURenderPassEncoder includes GPUObjectBase;
GPURenderPassEncoder includes GPUProgrammablePassEncoder;
@ -887,7 +888,7 @@ interface GPURenderBundleEncoder {
//GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {});
};
GPURenderBundleEncoder includes GPUObjectBase;
GPURenderBundleEncoder includes GPURenderEncoderBase;
//GPURenderBundleEncoder includes GPURenderEncoderBase;
dictionary GPURenderBundleDescriptor : GPUObjectDescriptorBase {
};

View File

@ -1,28 +1,30 @@
This is an active GitHub mirror of the WebGPU native implementation in Rust, which now lives in [Mozilla-central](https://hg.mozilla.org/mozilla-central). Issues and pull requests are accepted, but we merge them in m-c manually and then sync to GitHub instead of landing directly here.
<img align="right" width="25%" src="logo.png">
This is an active GitHub mirror of the WebGPU implementation in Rust, which now lives in "gfx/wgpu" of [Mozilla-central](https://hg.mozilla.org/mozilla-central/file/tip/gfx/wgpu). Issues and pull requests are accepted, but some bidirectional synchronization may be involved.
---
# WebGPU
[![Build Status](https://travis-ci.org/gfx-rs/wgpu.svg)](https://travis-ci.org/gfx-rs/wgpu)
[![Matrix](https://img.shields.io/badge/Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org)
[![Build Status](https://travis-ci.org/gfx-rs/wgpu.svg?branch=master)](https://travis-ci.org/gfx-rs/wgpu)
[![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core)
[![Crates.io](https://img.shields.io/crates/v/wgpu-native.svg?label=wgpu-native)](https://crates.io/crates/wgpu-native)
[![Gitter](https://badges.gitter.im/gfx-rs/webgpu.svg)](https://gitter.im/gfx-rs/webgpu)
This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) and [Rendy](https://github.com/amethyst/rendy) libraries. The corresponding WebIDL specification can be found at [gpuweb project](https://github.com/gpuweb/gpuweb/blob/master/spec/index.bs).
This is an experimental [WebGPU](https://www.w3.org/community/gpu/) implementation, exposing both Rust and C interfaces as a native static library. It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) and a few [Rendy](https://github.com/amethyst/rendy) bits. See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress).
The implementation consists of the following parts:
1. `wgpu-core` - internal Rust API for WebGPU implementations to use
2. `wgpu-native` - the native implementation of WebGPU as a C API library
3. `wgpu-remote` - remoting layer to work with WebGPU across the process boundary
4. `ffi` - the C headers generated by [cbindgen](https://github.com/eqrion/cbindgen) for both of the libraries
3. `wgpu-remote` - remoting layer to work with WebGPU across the process boundary, as a C API library used by Gecko
4. `ffi` - the C headers generated by [cbindgen](https://github.com/eqrion/cbindgen) for the native headers
## Supported Platforms
API | Windows | Linux | macOS & iOS |
----- | ------------------ | ------------------ | ------------------ |
DX11 | :heavy_check_mark: | | |
DX11 | :white_check_mark: | | |
DX12 | :heavy_check_mark: | | |
Vulkan | :heavy_check_mark: | :heavy_check_mark: | |
Metal | | | :heavy_check_mark: |
OpenGL | | | |
OpenGL | :construction: | :construction: | :construction: |
## Usage

View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Generated with cbindgen:0.12.2 */
/* Generated with cbindgen:0.13.1 */
/* DO NOT MODIFY THIS MANUALLY! This file was generated using cbindgen.
* To generate this file:
@ -311,6 +311,8 @@ typedef uint64_t WGPUId_TextureView_Dummy;
typedef WGPUId_TextureView_Dummy WGPUTextureViewId;
typedef const WGPUTextureViewId *WGPUOptionRef_TextureViewId;
typedef struct {
double r;
double g;
@ -326,13 +328,13 @@ typedef struct {
typedef struct {
WGPUTextureViewId attachment;
WGPUTextureViewId resolve_target;
WGPUOptionRef_TextureViewId resolve_target;
WGPULoadOp load_op;
WGPUStoreOp store_op;
WGPUColor clear_color;
} WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__TextureViewId;
} WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId;
typedef WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__TextureViewId WGPURawRenderPassColorAttachmentDescriptor;
typedef WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId WGPURenderPassColorAttachmentDescriptor;
typedef struct {
WGPUTextureViewId attachment;
@ -346,28 +348,6 @@ typedef struct {
typedef WGPURenderPassDepthStencilAttachmentDescriptorBase_TextureViewId WGPURenderPassDepthStencilAttachmentDescriptor;
typedef struct {
WGPURawRenderPassColorAttachmentDescriptor colors[WGPUMAX_COLOR_TARGETS];
WGPURenderPassDepthStencilAttachmentDescriptor depth_stencil;
} WGPURawRenderTargets;
typedef struct {
WGPURawPass raw;
WGPURawRenderTargets targets;
} WGPURawRenderPass;
typedef const WGPUTextureViewId *WGPUOptionRef_TextureViewId;
typedef struct {
WGPUTextureViewId attachment;
WGPUOptionRef_TextureViewId resolve_target;
WGPULoadOp load_op;
WGPUStoreOp store_op;
WGPUColor clear_color;
} WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId;
typedef WGPURenderPassColorAttachmentDescriptorBase_TextureViewId__OptionRef_TextureViewId WGPURenderPassColorAttachmentDescriptor;
typedef struct {
const WGPURenderPassColorAttachmentDescriptor *color_attachments;
uintptr_t color_attachments_length;
@ -680,7 +660,7 @@ typedef struct {
typedef WGPUDeviceId WGPUQueueId;
typedef WGPURawRenderPass *WGPURenderPassId;
typedef WGPURawPass *WGPURenderPassId;
typedef uint64_t WGPUId_RenderBundle_Dummy;
@ -746,12 +726,12 @@ WGPURawPass *wgpu_command_encoder_begin_compute_pass(WGPUCommandEncoderId encode
/**
* # Safety
*
* This function is unsafe as there is no guarantee that the given pointer
* (`RenderPassDescriptor::color_attachments`) is valid for
* `RenderPassDescriptor::color_attachments_length` elements.
* This function is unsafe because improper use may lead to memory
* problems. For example, a double-free may occur if the function is called
* twice on the same raw pointer.
*/
WGPURawRenderPass *wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id,
const WGPURenderPassDescriptor *desc);
WGPURawPass *wgpu_command_encoder_begin_render_pass(WGPUCommandEncoderId encoder_id,
const WGPURenderPassDescriptor *desc);
void wgpu_command_encoder_copy_buffer_to_buffer(WGPUCommandEncoderId command_encoder_id,
WGPUBufferId source,
@ -882,26 +862,26 @@ void wgpu_queue_submit(WGPUQueueId queue_id,
const WGPUCommandBufferId *command_buffers,
uintptr_t command_buffers_length);
void wgpu_render_pass_destroy(WGPURawRenderPass *pass);
void wgpu_render_pass_destroy(WGPURawPass *pass);
void wgpu_render_pass_draw(WGPURawRenderPass *pass,
void wgpu_render_pass_draw(WGPURawPass *pass,
uint32_t vertex_count,
uint32_t instance_count,
uint32_t first_vertex,
uint32_t first_instance);
void wgpu_render_pass_draw_indexed(WGPURawRenderPass *pass,
void wgpu_render_pass_draw_indexed(WGPURawPass *pass,
uint32_t index_count,
uint32_t instance_count,
uint32_t first_index,
int32_t base_vertex,
uint32_t first_instance);
void wgpu_render_pass_draw_indexed_indirect(WGPURawRenderPass *pass,
void wgpu_render_pass_draw_indexed_indirect(WGPURawPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_render_pass_draw_indirect(WGPURawRenderPass *pass,
void wgpu_render_pass_draw_indirect(WGPURawPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
@ -914,17 +894,17 @@ void wgpu_render_pass_draw_indirect(WGPURawRenderPass *pass,
*/
void wgpu_render_pass_end_pass(WGPURenderPassId pass_id);
void wgpu_render_pass_execute_bundles(WGPURawRenderPass *_pass,
void wgpu_render_pass_execute_bundles(WGPURawPass *_pass,
const WGPURenderBundleId *_bundles,
uintptr_t _bundles_length);
const uint8_t *wgpu_render_pass_finish(WGPURawRenderPass *pass, uintptr_t *length);
const uint8_t *wgpu_render_pass_finish(WGPURawPass *pass, uintptr_t *length);
void wgpu_render_pass_insert_debug_marker(WGPURawRenderPass *_pass, WGPURawString _label);
void wgpu_render_pass_insert_debug_marker(WGPURawPass *_pass, WGPURawString _label);
void wgpu_render_pass_pop_debug_group(WGPURawRenderPass *_pass);
void wgpu_render_pass_pop_debug_group(WGPURawPass *_pass);
void wgpu_render_pass_push_debug_group(WGPURawRenderPass *_pass, WGPURawString _label);
void wgpu_render_pass_push_debug_group(WGPURawPass *_pass, WGPURawString _label);
/**
* # Safety
@ -932,27 +912,27 @@ void wgpu_render_pass_push_debug_group(WGPURawRenderPass *_pass, WGPURawString _
* This function is unsafe as there is no guarantee that the given pointer is
* valid for `offset_length` elements.
*/
void wgpu_render_pass_set_bind_group(WGPURawRenderPass *pass,
void wgpu_render_pass_set_bind_group(WGPURawPass *pass,
uint32_t index,
WGPUBindGroupId bind_group_id,
const WGPUDynamicOffset *offsets,
uintptr_t offset_length);
void wgpu_render_pass_set_blend_color(WGPURawRenderPass *pass, const WGPUColor *color);
void wgpu_render_pass_set_blend_color(WGPURawPass *pass, const WGPUColor *color);
void wgpu_render_pass_set_index_buffer(WGPURawRenderPass *pass,
void wgpu_render_pass_set_index_buffer(WGPURawPass *pass,
WGPUBufferId buffer_id,
WGPUBufferAddress offset);
void wgpu_render_pass_set_pipeline(WGPURawRenderPass *pass, WGPURenderPipelineId pipeline_id);
void wgpu_render_pass_set_pipeline(WGPURawPass *pass, WGPURenderPipelineId pipeline_id);
void wgpu_render_pass_set_scissor_rect(WGPURawRenderPass *pass,
void wgpu_render_pass_set_scissor_rect(WGPURawPass *pass,
uint32_t x,
uint32_t y,
uint32_t w,
uint32_t h);
void wgpu_render_pass_set_stencil_reference(WGPURawRenderPass *pass, uint32_t value);
void wgpu_render_pass_set_stencil_reference(WGPURawPass *pass, uint32_t value);
/**
* # Safety
@ -960,13 +940,13 @@ void wgpu_render_pass_set_stencil_reference(WGPURawRenderPass *pass, uint32_t va
* This function is unsafe as there is no guarantee that the given pointers
* (`buffer_ids` and `offsets`) are valid for `length` elements.
*/
void wgpu_render_pass_set_vertex_buffers(WGPURawRenderPass *pass,
void wgpu_render_pass_set_vertex_buffers(WGPURawPass *pass,
uint32_t start_slot,
const WGPUBufferId *buffer_ids,
const WGPUBufferAddress *offsets,
uintptr_t length);
void wgpu_render_pass_set_viewport(WGPURawRenderPass *pass,
void wgpu_render_pass_set_viewport(WGPURawPass *pass,
float x,
float y,
float w,

BIN
gfx/wgpu/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 21 KiB

View File

@ -87,6 +87,7 @@ pub struct PipelineLayout<B: hal::Backend> {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BufferBinding {
pub buffer: BufferId,
pub offset: BufferAddress,
@ -95,6 +96,7 @@ pub struct BufferBinding {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum BindingResource {
Buffer(BufferBinding),
Sampler(SamplerId),
@ -103,6 +105,7 @@ pub enum BindingResource {
#[repr(C)]
#[derive(Debug)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct BindGroupBinding {
pub binding: u32,
pub resource: BindingResource,

View File

@ -81,7 +81,7 @@ impl BindGroupEntry {
}
self.expected_layout_id == Some(layout_id)
}
None => true,
None => false,
};
self.provided = Some(BindGroupPair {

View File

@ -40,7 +40,7 @@ enum ComputeCommand {
}
impl super::RawPass {
pub fn new_compute(parent: id::CommandEncoderId) -> Self {
pub unsafe fn new_compute(parent: id::CommandEncoderId) -> Self {
Self::from_vec(Vec::<ComputeCommand>::with_capacity(1), parent)
}

View File

@ -208,56 +208,16 @@ pub struct CommandBufferDescriptor {
pub todo: u32,
}
type RawRenderPassColorAttachmentDescriptor =
pub type RawRenderPassColorAttachmentDescriptor =
RenderPassColorAttachmentDescriptorBase<id::TextureViewId, id::TextureViewId>;
#[repr(C)]
#[derive(peek_poke::PeekCopy, peek_poke::Poke)]
pub struct RawRenderTargets {
pub colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS],
pub depth_stencil: RenderPassDepthStencilAttachmentDescriptor,
}
#[repr(C)]
pub struct RawRenderPass {
raw: RawPass,
targets: RawRenderTargets,
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer
/// (`RenderPassDescriptor::color_attachments`) is valid for
/// `RenderPassDescriptor::color_attachments_length` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &RenderPassDescriptor,
) -> *mut RawRenderPass {
let mut colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS] = mem::zeroed();
for (color, at) in colors
.iter_mut()
.zip(slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length))
{
*color = RawRenderPassColorAttachmentDescriptor {
attachment: at.attachment,
resolve_target: at.resolve_target.map_or(id::TextureViewId::ERROR, |rt| *rt),
load_op: at.load_op,
store_op: at.store_op,
clear_color: at.clear_color,
};
}
let pass = RawRenderPass {
raw: RawPass::new_render(encoder_id),
targets: RawRenderTargets {
colors,
depth_stencil: desc.depth_stencil_attachment
.cloned()
.unwrap_or_else(|| mem::zeroed()),
},
};
Box::into_raw(Box::new(pass))
}
impl<F> Global<F> {
pub fn command_encoder_finish<B: GfxBackend>(
&self,

View File

@ -6,6 +6,7 @@ use crate::{
command::{
bind::{Binder, LayoutChange},
PhantomSlice,
RawRenderPassColorAttachmentDescriptor,
RawRenderTargets,
},
conv,
@ -37,26 +38,28 @@ use std::{
collections::hash_map::Entry,
iter,
marker::PhantomData,
mem,
ops::Range,
slice,
};
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PeekCopy, Poke)]
pub enum LoadOp {
Clear = 0,
Load = 1,
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, PeekCopy, Poke)]
pub enum StoreOp {
Clear = 0,
Store = 1,
}
#[repr(C)]
#[derive(Debug)]
#[derive(Debug, PeekCopy, Poke)]
pub struct RenderPassColorAttachmentDescriptorBase<T, R> {
pub attachment: T,
pub resolve_target: R,
@ -66,7 +69,7 @@ pub struct RenderPassColorAttachmentDescriptorBase<T, R> {
}
#[repr(C)]
#[derive(Clone, Debug)]
#[derive(Clone, Debug, PeekCopy, Poke)]
pub struct RenderPassDepthStencilAttachmentDescriptorBase<T> {
pub attachment: T,
pub depth_load_op: LoadOp,
@ -154,16 +157,36 @@ enum RenderCommand {
}
impl super::RawPass {
pub fn new_render(parent_id: id::CommandEncoderId) -> Self {
Self::from_vec(Vec::<RenderCommand>::with_capacity(1), parent_id)
}
}
pub unsafe fn new_render(parent_id: id::CommandEncoderId, desc: &RenderPassDescriptor) -> Self {
let mut pass = Self::from_vec(Vec::<RenderCommand>::with_capacity(1), parent_id);
impl super::RawRenderPass {
pub unsafe fn finish_render(mut self) -> (Vec<u8>, id::CommandEncoderId, RawRenderTargets) {
self.raw.finish(RenderCommand::End);
let (vec, parent_id) = self.raw.into_vec();
(vec, parent_id, self.targets)
let mut targets = RawRenderTargets {
depth_stencil: desc.depth_stencil_attachment
.cloned()
.unwrap_or_else(|| mem::zeroed()),
colors: mem::zeroed(),
};
for (color, at) in targets.colors
.iter_mut()
.zip(slice::from_raw_parts(desc.color_attachments, desc.color_attachments_length))
{
*color = RawRenderPassColorAttachmentDescriptor {
attachment: at.attachment,
resolve_target: at.resolve_target.map_or(id::TextureViewId::ERROR, |rt| *rt),
load_op: at.load_op,
store_op: at.store_op,
clear_color: at.clear_color,
};
}
pass.encode(&targets);
pass
}
pub unsafe fn finish_render(mut self) -> (Vec<u8>, id::CommandEncoderId) {
self.finish(RenderCommand::End);
let (vec, parent_id) = self.into_vec();
(vec, parent_id)
}
}
@ -289,8 +312,6 @@ impl<F> Global<F> {
pub fn command_encoder_run_render_pass<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
color_attachments: &[RenderPassColorAttachmentDescriptor],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachmentDescriptor>,
raw_data: &[u8],
) {
let hub = B::hub(self);
@ -316,6 +337,38 @@ impl<F> Global<F> {
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (view_guard, _) = hub.texture_views.read(&mut token);
let mut peeker = raw_data.as_ptr();
let raw_data_end = unsafe {
raw_data.as_ptr().add(raw_data.len())
};
let mut targets: RawRenderTargets = unsafe { mem::zeroed() };
assert!(unsafe { peeker.add(RawRenderTargets::max_size()) <= raw_data_end });
peeker = unsafe { targets.peek_from(peeker) };
let color_attachments = targets.colors
.iter()
.take_while(|at| at.attachment != id::TextureViewId::ERROR)
.map(|at| {
RenderPassColorAttachmentDescriptor {
attachment: at.attachment,
resolve_target: if at.resolve_target == id::TextureViewId::ERROR {
None
} else {
Some(&at.resolve_target)
},
load_op: at.load_op,
store_op: at.store_op,
clear_color: at.clear_color,
}
})
.collect::<arrayvec::ArrayVec<[_; MAX_COLOR_TARGETS]>>();
let depth_stencil_attachment = if targets.depth_stencil.attachment == id::TextureViewId::ERROR {
None
} else {
Some(&targets.depth_stencil)
};
let (context, sample_count) = {
use hal::{adapter::PhysicalDevice as _, device::Device as _};
@ -401,7 +454,7 @@ impl<F> Global<F> {
let mut colors = ArrayVec::new();
let mut resolves = ArrayVec::new();
for at in color_attachments {
for at in &color_attachments {
let view = &view_guard[at.attachment];
if let Some(ex) = extent {
assert_eq!(ex, view.extent);
@ -770,10 +823,6 @@ impl<F> Global<F> {
},
};
let mut peeker = raw_data.as_ptr();
let raw_data_end = unsafe {
raw_data.as_ptr().add(raw_data.len())
};
let mut command = RenderCommand::Draw {
vertex_count: 0,
instance_count: 0,
@ -1113,7 +1162,7 @@ impl<F> Global<F> {
pub mod render_ffi {
use super::{
RenderCommand,
super::{PhantomSlice, RawRenderPass, Rect},
super::{PhantomSlice, RawPass, Rect},
};
use crate::{
id,
@ -1132,38 +1181,38 @@ pub mod render_ffi {
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_bind_group(
pass: &mut RawRenderPass,
pass: &mut RawPass,
index: u32,
bind_group_id: id::BindGroupId,
offsets: *const DynamicOffset,
offset_length: usize,
) {
pass.raw.encode(&RenderCommand::SetBindGroup {
pass.encode(&RenderCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
bind_group_id,
phantom_offsets: PhantomSlice::new(),
});
pass.raw.encode_slice(
pass.encode_slice(
slice::from_raw_parts(offsets, offset_length),
);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_pipeline(
pass: &mut RawRenderPass,
pass: &mut RawPass,
pipeline_id: id::RenderPipelineId,
) {
pass.raw.encode(&RenderCommand::SetPipeline(pipeline_id));
pass.encode(&RenderCommand::SetPipeline(pipeline_id));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_index_buffer(
pass: &mut RawRenderPass,
pass: &mut RawPass,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.raw.encode(&RenderCommand::SetIndexBuffer {
pass.encode(&RenderCommand::SetIndexBuffer {
buffer_id,
offset,
});
@ -1177,45 +1226,45 @@ pub mod render_ffi {
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_vertex_buffers(
pass: &mut RawRenderPass,
pass: &mut RawPass,
start_slot: u32,
buffer_ids: *const id::BufferId,
offsets: *const BufferAddress,
length: usize,
) {
pass.raw.encode(&RenderCommand::SetVertexBuffers {
pass.encode(&RenderCommand::SetVertexBuffers {
start_index: start_slot.try_into().unwrap(),
count: length.try_into().unwrap(),
phantom_buffer_ids: PhantomSlice::new(),
phantom_offsets: PhantomSlice::new(),
});
pass.raw.encode_slice(
pass.encode_slice(
slice::from_raw_parts(buffer_ids, length),
);
pass.raw.encode_slice(
pass.encode_slice(
slice::from_raw_parts(offsets, length),
);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_blend_color(
pass: &mut RawRenderPass,
pass: &mut RawPass,
color: &Color,
) {
pass.raw.encode(&RenderCommand::SetBlendColor(*color));
pass.encode(&RenderCommand::SetBlendColor(*color));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_stencil_reference(
pass: &mut RawRenderPass,
pass: &mut RawPass,
value: u32,
) {
pass.raw.encode(&RenderCommand::SetStencilReference(value));
pass.encode(&RenderCommand::SetStencilReference(value));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_viewport(
pass: &mut RawRenderPass,
pass: &mut RawPass,
x: f32,
y: f32,
w: f32,
@ -1223,7 +1272,7 @@ pub mod render_ffi {
depth_min: f32,
depth_max: f32,
) {
pass.raw.encode(&RenderCommand::SetViewport {
pass.encode(&RenderCommand::SetViewport {
rect: Rect { x, y, w, h },
depth_min,
depth_max,
@ -1232,24 +1281,24 @@ pub mod render_ffi {
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_scissor_rect(
pass: &mut RawRenderPass,
pass: &mut RawPass,
x: u32,
y: u32,
w: u32,
h: u32,
) {
pass.raw.encode(&RenderCommand::SetScissor(Rect { x, y, w, h }));
pass.encode(&RenderCommand::SetScissor(Rect { x, y, w, h }));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_draw(
pass: &mut RawRenderPass,
pass: &mut RawPass,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
pass.raw.encode(&RenderCommand::Draw {
pass.encode(&RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
@ -1259,14 +1308,14 @@ pub mod render_ffi {
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_draw_indexed(
pass: &mut RawRenderPass,
pass: &mut RawPass,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
pass.raw.encode(&RenderCommand::DrawIndexed {
pass.encode(&RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
@ -1277,11 +1326,11 @@ pub mod render_ffi {
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_draw_indirect(
pass: &mut RawRenderPass,
pass: &mut RawPass,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.raw.encode(&RenderCommand::DrawIndirect {
pass.encode(&RenderCommand::DrawIndirect {
buffer_id,
offset,
});
@ -1289,11 +1338,11 @@ pub mod render_ffi {
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_draw_indexed_indirect(
pass: &mut RawRenderPass,
pass: &mut RawPass,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.raw.encode(&RenderCommand::DrawIndexedIndirect {
pass.encode(&RenderCommand::DrawIndexedIndirect {
buffer_id,
offset,
});
@ -1301,7 +1350,7 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_execute_bundles(
_pass: &mut RawRenderPass,
_pass: &mut RawPass,
_bundles: *const id::RenderBundleId,
_bundles_length: usize,
) {
@ -1310,7 +1359,7 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_push_debug_group(
_pass: &mut RawRenderPass,
_pass: &mut RawPass,
_label: RawString,
) {
//TODO
@ -1318,14 +1367,14 @@ pub mod render_ffi {
#[no_mangle]
pub extern "C" fn wgpu_render_pass_pop_debug_group(
_pass: &mut RawRenderPass,
_pass: &mut RawPass,
) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_insert_debug_marker(
_pass: &mut RawRenderPass,
_pass: &mut RawPass,
_label: RawString,
) {
//TODO
@ -1333,17 +1382,11 @@ pub mod render_ffi {
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_finish(
pass: &mut RawRenderPass,
pass: &mut RawPass,
length: &mut usize,
) -> *const u8 {
//TODO: put target information into the byte stream
pass.raw.finish(RenderCommand::End);
*length = pass.raw.size();
pass.raw.base
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: *mut RawRenderPass) {
let _ = Box::from_raw(pass).raw.into_vec();
pass.finish(RenderCommand::End);
*length = pass.size();
pass.base
}
}

View File

@ -1369,6 +1369,9 @@ impl<F: AllIdentityFilter + IdentityFilter<id::CommandBufferId>> Global<F> {
if let Some((sc_id, fbo)) = comb.used_swap_chain.take() {
let sc = &mut swap_chain_guard[sc_id.value];
assert!(sc.acquired_view_id.is_some(),
"SwapChainOutput for {:?} was dropped before the respective command buffer {:?} got submitted!",
sc_id.value, cmb_id);
if sc.acquired_framebuffers.is_empty() {
signal_swapchain_semaphores.push(sc_id.value);
}

View File

@ -115,7 +115,7 @@ pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
// Command
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type CommandEncoderId = CommandBufferId;
pub type RenderPassId = *mut crate::command::RawRenderPass;
pub type RenderPassId = *mut crate::command::RawPass;
pub type ComputePassId = *mut crate::command::RawPass;
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
// Swap chain

View File

@ -15,8 +15,12 @@ use crate::{
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub use hal::adapter::{AdapterInfo, DeviceType};
use hal::{self, adapter::PhysicalDevice as _, queue::QueueFamily as _, Instance as _};
use hal::{
self,
adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _},
queue::QueueFamily as _,
Instance as _,
};
#[derive(Debug)]
@ -173,6 +177,69 @@ impl From<Backend> for BackendBit {
}
}
/// Metadata about a backend adapter.
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct AdapterInfo {
/// Adapter name
pub name: String,
/// Vendor PCI id of the adapter
pub vendor: usize,
/// PCI id of the adapter
pub device: usize,
/// Type of device
pub device_type: DeviceType,
/// Backend used for device
pub backend: Backend,
}
impl AdapterInfo {
fn from_gfx(adapter_info: HalAdapterInfo, backend: Backend) -> Self {
let HalAdapterInfo {
name,
vendor,
device,
device_type,
} = adapter_info;
AdapterInfo {
name,
vendor,
device,
device_type: device_type.into(),
backend,
}
}
}
/// Supported physical device types
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum DeviceType {
/// Other
Other,
/// Integrated
IntegratedGpu,
/// Discrete
DiscreteGpu,
/// Virtual / Hosted
VirtualGpu,
/// Cpu / Software Rendering
Cpu,
}
impl From<HalDeviceType> for DeviceType {
fn from(device_type: HalDeviceType) -> Self {
match device_type {
HalDeviceType::Other => Self::Other,
HalDeviceType::IntegratedGpu => Self::IntegratedGpu,
HalDeviceType::DiscreteGpu => Self::DiscreteGpu,
HalDeviceType::VirtualGpu => Self::VirtualGpu,
HalDeviceType::Cpu => Self::Cpu,
}
}
}
pub enum AdapterInputs<'a, I> {
IdSet(&'a [I], fn(&I) -> Backend),
Mask(BackendBit, fn() -> I),
@ -194,6 +261,76 @@ impl<I: Clone> AdapterInputs<'_, I> {
}
impl<F: IdentityFilter<AdapterId>> Global<F> {
pub fn enumerate_adapters(&self, inputs: AdapterInputs<F::Input>) -> Vec<AdapterId> {
let instance = &self.instance;
let mut token = Token::root();
let mut adapters = Vec::new();
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
{
if let Some(ref inst) = instance.vulkan {
if let Some(id_vulkan) = inputs.find(Backend::Vulkan) {
for raw in inst.enumerate_adapters() {
let adapter = Adapter { raw };
log::info!("Adapter Vulkan {:?}", adapter.raw.info);
adapters.push(backend::Vulkan::hub(self).adapters.register_identity(
id_vulkan.clone(),
adapter,
&mut token,
));
}
}
}
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
if let Some(id_metal) = inputs.find(Backend::Metal) {
for raw in instance.metal.enumerate_adapters() {
let adapter = Adapter { raw };
log::info!("Adapter Metal {:?}", adapter.raw.info);
adapters.push(backend::Metal::hub(self).adapters.register_identity(
id_metal.clone(),
adapter,
&mut token,
));
}
}
}
#[cfg(windows)]
{
if let Some(ref inst) = instance.dx12 {
if let Some(id_dx12) = inputs.find(Backend::Dx12) {
for raw in inst.enumerate_adapters() {
let adapter = Adapter { raw };
log::info!("Adapter Dx12 {:?}", adapter.raw.info);
adapters.push(backend::Dx12::hub(self).adapters.register_identity(
id_dx12.clone(),
adapter,
&mut token,
));
}
}
}
if let Some(id_dx11) = inputs.find(Backend::Dx11) {
for raw in instance.dx11.enumerate_adapters() {
let adapter = Adapter { raw };
log::info!("Adapter Dx11 {:?}", adapter.raw.info);
adapters.push(backend::Dx11::hub(self).adapters.register_identity(
id_dx11.clone(),
adapter,
&mut token,
));
}
}
}
adapters
}
pub fn pick_adapter(
&self,
desc: &RequestAdapterOptions,
@ -361,7 +498,7 @@ impl<F: IdentityFilter<AdapterId>> Global<F> {
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
let adapter = &adapter_guard[adapter_id];
adapter.raw.info.clone()
AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend())
}
pub fn adapter_destroy<B: GfxBackend>(&self, adapter_id: AdapterId) {

View File

@ -30,6 +30,9 @@ pub mod resource;
pub mod swap_chain;
pub mod track;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
pub use hal::pso::read_spirv;
use peek_poke::{PeekCopy, Poke};
@ -45,6 +48,7 @@ type Epoch = u32;
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum Backend {
Empty = 0,
Vulkan = 1,

View File

@ -5,7 +5,6 @@
use crate::GLOBAL;
pub use core::command::{
wgpu_command_encoder_begin_render_pass,
compute_ffi::*,
render_ffi::*,
};
@ -82,6 +81,20 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
}
/// # Safety
///
/// This function is unsafe because improper use may lead to memory
/// problems. For example, a double-free may occur if the function is called
/// twice on the same raw pointer.
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &core::command::RenderPassDescriptor,
) -> *mut core::command::RawPass {
let pass = core::command::RawPass::new_render(encoder_id, desc);
Box::into_raw(Box::new(pass))
}
/// # Safety
///
/// This function is unsafe because improper use may lead to memory
@ -89,33 +102,13 @@ pub extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
/// twice on the same raw pointer.
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_end_pass(pass_id: id::RenderPassId) {
let (pass_data, encoder_id, targets) = Box::from_raw(pass_id).finish_render();
let color_attachments: arrayvec::ArrayVec<[_; core::device::MAX_COLOR_TARGETS]> = targets.colors
.iter()
.flat_map(|at| {
if at.attachment == id::TextureViewId::ERROR {
None
} else {
Some(core::command::RenderPassColorAttachmentDescriptor {
attachment: at.attachment,
resolve_target: if at.resolve_target == id::TextureViewId::ERROR {
None
} else {
Some(&at.resolve_target)
},
load_op: at.load_op,
store_op: at.store_op,
clear_color: at.clear_color,
})
}
})
.collect();
let depth_stencil_attachment = if targets.depth_stencil.attachment == id::TextureViewId::ERROR {
None
} else {
Some(&targets.depth_stencil)
};
gfx_select!(encoder_id => GLOBAL.command_encoder_run_render_pass(encoder_id, &color_attachments, depth_stencil_attachment, &pass_data))
let (pass_data, encoder_id) = Box::from_raw(pass_id).finish_render();
gfx_select!(encoder_id => GLOBAL.command_encoder_run_render_pass(encoder_id, &pass_data))
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: *mut core::command::RawPass) {
let _ = Box::from_raw(pass).into_vec();
}
/// # Safety

View File

@ -9,7 +9,6 @@ use core::{
};
pub use core::command::{
wgpu_command_encoder_begin_render_pass,
compute_ffi::*,
render_ffi::*,
};
@ -32,6 +31,7 @@ struct IdentityHub {
bind_groups: IdentityManager,
shader_modules: IdentityManager,
compute_pipelines: IdentityManager,
render_pipelines: IdentityManager,
}
#[derive(Debug, Default)]
@ -219,6 +219,19 @@ pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: core::command::RawPass)
let _ = pass.into_vec();
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &core::command::RenderPassDescriptor,
) -> core::command::RawPass {
core::command::RawPass::new_render(encoder_id, desc)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: core::command::RawPass) {
let _ = pass.into_vec();
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_bind_group_layout_id(
client: &Client,
@ -353,3 +366,16 @@ pub extern "C" fn wgpu_client_kill_compute_pipeline_id(
.compute_pipelines
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_render_pipeline_id(
client: &Client,
id: id::RenderPipelineId,
) {
client
.identities
.lock()
.select(id.backend())
.render_pipelines
.free(id)
}

View File

@ -190,6 +190,11 @@ pub unsafe extern "C" fn wgpu_server_encoder_copy_buffer_to_buffer(
gfx_select!(self_id => global.command_encoder_copy_buffer_to_buffer(self_id, source_id, source_offset, destination_id, destination_offset, size));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointers are
/// valid for `color_attachments_length` and `command_length` elements,
/// respectively.
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encode_compute_pass(
global: &Global,
@ -210,15 +215,11 @@ pub unsafe extern "C" fn wgpu_server_encode_compute_pass(
pub unsafe extern "C" fn wgpu_server_encode_render_pass(
global: &Global,
self_id: id::CommandEncoderId,
color_attachments: *const core::command::RenderPassColorAttachmentDescriptor,
color_attachment_length: usize,
depth_stencil_attachment: Option<&core::command::RenderPassDepthStencilAttachmentDescriptor>,
commands: *const u8,
command_length: usize,
) {
let color_attachments = slice::from_raw_parts(color_attachments, color_attachment_length);
let raw_pass = slice::from_raw_parts(commands, command_length);
gfx_select!(self_id => global.command_encoder_run_render_pass(self_id, color_attachments, depth_stencil_attachment, raw_pass));
gfx_select!(self_id => global.command_encoder_run_render_pass(self_id, raw_pass));
}
/// # Safety
@ -325,3 +326,11 @@ pub extern "C" fn wgpu_server_compute_pipeline_destroy(
) {
gfx_select!(self_id => global.compute_pipeline_destroy(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_render_pipeline_destroy(
global: &Global,
self_id: id::RenderPipelineId,
) {
gfx_select!(self_id => global.render_pipeline_destroy(self_id));
}