summaryrefslogtreecommitdiffstats
path: root/dom/webgpu/ipc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /dom/webgpu/ipc
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--dom/webgpu/ipc/PWebGPU.ipdl94
-rw-r--r--dom/webgpu/ipc/WebGPUChild.cpp697
-rw-r--r--dom/webgpu/ipc/WebGPUChild.h125
-rw-r--r--dom/webgpu/ipc/WebGPUParent.cpp713
-rw-r--r--dom/webgpu/ipc/WebGPUParent.h99
-rw-r--r--dom/webgpu/ipc/WebGPUSerialize.h53
-rw-r--r--dom/webgpu/ipc/WebGPUTypes.h20
7 files changed, 1801 insertions, 0 deletions
diff --git a/dom/webgpu/ipc/PWebGPU.ipdl b/dom/webgpu/ipc/PWebGPU.ipdl
new file mode 100644
index 0000000000..98bece449d
--- /dev/null
+++ b/dom/webgpu/ipc/PWebGPU.ipdl
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: sw=2 ts=8 et :
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+using layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h";
+using wr::ExternalImageId from "mozilla/webrender/WebRenderAPI.h";
+using RawId from "mozilla/webgpu/WebGPUTypes.h";
+using BufferAddress from "mozilla/webgpu/WebGPUTypes.h";
+using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h";
+using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
+using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h";
+using webgpu::ffi::WGPUHostMap from "mozilla/webgpu/ffi/wgpu.h";
+
+include "mozilla/ipc/ByteBufUtils.h";
+include "mozilla/webgpu/WebGPUSerialize.h";
+include "mozilla/layers/WebRenderMessageUtils.h";
+include protocol PCompositorBridge;
+
+namespace mozilla {
+namespace webgpu {
+
+/**
+ * Represents the connection between a WebGPUChild actor that issues WebGPU
+ * command from the content process, and a WebGPUParent in the compositor
+ * process that runs the commands.
+ */
+async protocol PWebGPU
+{
+ manager PCompositorBridge;
+
+parent:
+ async DeviceAction(RawId selfId, ByteBuf buf);
+ async TextureAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async CommandEncoderAction(RawId selfId, RawId aDeviceId, ByteBuf buf);
+ async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
+
+ async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId);
+ async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId);
+ async AdapterDestroy(RawId selfId);
+ async BufferReturnShmem(RawId selfId, Shmem shmem);
+ async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);
+ async BufferUnmap(RawId selfId, Shmem shmem, bool flush);
+ async BufferDestroy(RawId selfId);
+ async TextureDestroy(RawId selfId);
+ async TextureViewDestroy(RawId selfId);
+ async SamplerDestroy(RawId selfId);
+ async DeviceDestroy(RawId selfId);
+
+ async CommandEncoderFinish(RawId selfId, RawId deviceId, GPUCommandBufferDescriptor desc);
+ async CommandEncoderDestroy(RawId selfId);
+ async CommandBufferDestroy(RawId selfId);
+ async QueueSubmit(RawId selfId, RawId[] commandBuffers);
+ async QueueWriteBuffer(RawId selfId, RawId bufferId, BufferAddress bufferOffset, Shmem shmem);
+ async QueueWriteTexture(RawId selfId, WGPUTextureCopyView destination, Shmem shmem, WGPUTextureDataLayout layout, WGPUExtent3d extent);
+
+ async BindGroupLayoutDestroy(RawId selfId);
+ async PipelineLayoutDestroy(RawId selfId);
+ async BindGroupDestroy(RawId selfId);
+ async ShaderModuleDestroy(RawId selfId);
+ async ComputePipelineDestroy(RawId selfId);
+ async RenderPipelineDestroy(RawId selfId);
+ async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, ExternalImageId externalId);
+ async SwapChainPresent(ExternalImageId externalId, RawId textureId, RawId commandEncoderId);
+ async SwapChainDestroy(ExternalImageId externalId);
+
+ async Shutdown();
+
+child:
+ async Error(RawId aDeviceId, nsCString message);
+ async DropAction(ByteBuf buf);
+ async FreeAdapter(RawId id);
+ async FreeDevice(RawId id);
+ async FreePipelineLayout(RawId id);
+ async FreeShaderModule(RawId id);
+ async FreeBindGroupLayout(RawId id);
+ async FreeBindGroup(RawId id);
+ async FreeCommandBuffer(RawId id);
+ async FreeRenderPipeline(RawId id);
+ async FreeComputePipeline(RawId id);
+ async FreeBuffer(RawId id);
+ async FreeTexture(RawId id);
+ async FreeTextureView(RawId id);
+ async FreeSampler(RawId id);
+ async __delete__();
+};
+
+} // webgpu
+} // mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.cpp b/dom/webgpu/ipc/WebGPUChild.cpp
new file mode 100644
index 0000000000..3ba2f84eb0
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.cpp
@@ -0,0 +1,697 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUChild.h"
+#include "mozilla/EnumTypeTraits.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/dom/GPUUncapturedErrorEvent.h"
+#include "mozilla/webgpu/ValidationError.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "Sampler.h"
+
+namespace mozilla {
+namespace webgpu {
+
+NS_IMPL_CYCLE_COLLECTION(WebGPUChild)
+NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebGPUChild, AddRef)
+NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebGPUChild, Release)
+
+static ffi::WGPUCompareFunction ConvertCompareFunction(
+ const dom::GPUCompareFunction& aCompare) {
+ // Value of 0 = Undefined is reserved on the C side for "null" semantics.
+ return ffi::WGPUCompareFunction(UnderlyingValue(aCompare) + 1);
+}
+
+static ffi::WGPUClient* initialize() {
+ ffi::WGPUInfrastructure infra = ffi::wgpu_client_new();
+ return infra.client;
+}
+
+WebGPUChild::WebGPUChild() : mClient(initialize()), mIPCOpen(false) {}
+
+WebGPUChild::~WebGPUChild() {
+ if (mClient) {
+ ffi::wgpu_client_delete(mClient);
+ }
+}
+
+RefPtr<RawIdPromise> WebGPUChild::InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions) {
+ const int max_ids = 10;
+ RawId ids[max_ids] = {0};
+ unsigned long count =
+ ffi::wgpu_client_make_adapter_ids(mClient, ids, max_ids);
+
+ nsTArray<RawId> sharedIds(count);
+ for (unsigned long i = 0; i != count; ++i) {
+ sharedIds.AppendElement(ids[i]);
+ }
+
+ return SendInstanceRequestAdapter(aOptions, sharedIds)
+ ->Then(
+ GetCurrentSerialEventTarget(), __func__,
+ [](const RawId& aId) {
+ return aId == 0 ? RawIdPromise::CreateAndReject(Nothing(), __func__)
+ : RawIdPromise::CreateAndResolve(aId, __func__);
+ },
+ [](const ipc::ResponseRejectReason& aReason) {
+ return RawIdPromise::CreateAndReject(Some(aReason), __func__);
+ });
+}
+
+Maybe<RawId> WebGPUChild::AdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc) {
+ RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId);
+ if (SendAdapterRequestDevice(aSelfId, aDesc, id)) {
+ return Some(id);
+ }
+ ffi::wgpu_client_kill_device_id(mClient, id);
+ return Nothing();
+}
+
+RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
+ const dom::GPUBufferDescriptor& aDesc) {
+ ffi::WGPUBufferDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.size = aDesc.mSize;
+ desc.usage = aDesc.mUsage;
+ desc.mapped_at_creation = aDesc.mMappedAtCreation;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_buffer(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc) {
+ ffi::WGPUTextureDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
+ const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
+ desc.size.width = seq.Length() > 0 ? seq[0] : 1;
+ desc.size.height = seq.Length() > 1 ? seq[1] : 1;
+ desc.size.depth = seq.Length() > 2 ? seq[2] : 1;
+ } else if (aDesc.mSize.IsGPUExtent3DDict()) {
+ const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict();
+ desc.size.width = dict.mWidth;
+ desc.size.height = dict.mHeight;
+ desc.size.depth = dict.mDepth;
+ } else {
+ MOZ_CRASH("Unexpected union");
+ }
+ desc.mip_level_count = aDesc.mMipLevelCount;
+ desc.sample_count = aDesc.mSampleCount;
+ desc.dimension = ffi::WGPUTextureDimension(aDesc.mDimension);
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.usage = aDesc.mUsage;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_texture(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::TextureCreateView(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc) {
+ ffi::WGPUTextureViewDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ ffi::WGPUTextureFormat format = ffi::WGPUTextureFormat_Sentinel;
+ if (aDesc.mFormat.WasPassed()) {
+ format = ffi::WGPUTextureFormat(aDesc.mFormat.Value());
+ desc.format = &format;
+ }
+ ffi::WGPUTextureViewDimension dimension =
+ ffi::WGPUTextureViewDimension_Sentinel;
+ if (aDesc.mDimension.WasPassed()) {
+ dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
+ desc.dimension = &dimension;
+ }
+
+ desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
+ desc.base_mip_level = aDesc.mBaseMipLevel;
+ desc.level_count =
+ aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
+ desc.base_array_layer = aDesc.mBaseArrayLayer;
+ desc.array_layer_count =
+ aDesc.mArrayLayerCount.WasPassed() ? aDesc.mArrayLayerCount.Value() : 0;
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_texture_view(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendTextureAction(aSelfId, aDeviceId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc) {
+ ffi::WGPUSamplerDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ desc.address_modes[0] = ffi::WGPUAddressMode(aDesc.mAddressModeU);
+ desc.address_modes[1] = ffi::WGPUAddressMode(aDesc.mAddressModeV);
+ desc.address_modes[2] = ffi::WGPUAddressMode(aDesc.mAddressModeW);
+ desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
+ desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
+ desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
+ desc.lod_min_clamp = aDesc.mLodMinClamp;
+ desc.lod_max_clamp = aDesc.mLodMaxClamp;
+
+ ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
+ if (aDesc.mCompare.WasPassed()) {
+ comparison = ConvertCompareFunction(aDesc.mCompare.Value());
+ desc.compare = &comparison;
+ }
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_sampler(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc) {
+ ffi::WGPUCommandEncoderDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_command_encoder(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::CommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ if (!SendCommandEncoderFinish(aSelfId, aDeviceId, aDesc)) {
+ MOZ_CRASH("IPC failure");
+ }
+ // We rely on knowledge that `CommandEncoderId` == `CommandBufferId`
+ // TODO: refactor this to truly behave as if the encoder is being finished,
+ // and a new command buffer ID is being created from it. Resolve the ID
+ // type aliasing at the place that introduces it: `wgpu-core`.
+ return aSelfId;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc) {
+ struct OptionalData {
+ ffi::WGPUTextureViewDimension dim;
+ ffi::WGPURawTextureSampleType type;
+ ffi::WGPUTextureFormat format;
+ };
+ nsTArray<OptionalData> optional(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ OptionalData data = {};
+ if (entry.mViewDimension.WasPassed()) {
+ data.dim = ffi::WGPUTextureViewDimension(entry.mViewDimension.Value());
+ }
+ if (entry.mTextureComponentType.WasPassed()) {
+ switch (entry.mTextureComponentType.Value()) {
+ case dom::GPUTextureComponentType::Float:
+ data.type = ffi::WGPURawTextureSampleType_Float;
+ break;
+ case dom::GPUTextureComponentType::Uint:
+ data.type = ffi::WGPURawTextureSampleType_Uint;
+ break;
+ case dom::GPUTextureComponentType::Sint:
+ data.type = ffi::WGPURawTextureSampleType_Sint;
+ break;
+ case dom::GPUTextureComponentType::Depth_comparison:
+ data.type = ffi::WGPURawTextureSampleType_Depth;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ break;
+ }
+ }
+ if (entry.mStorageTextureFormat.WasPassed()) {
+ data.format = ffi::WGPUTextureFormat(entry.mStorageTextureFormat.Value());
+ }
+ optional.AppendElement(data);
+ }
+
+ nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
+ for (size_t i = 0; i < aDesc.mEntries.Length(); ++i) {
+ const auto& entry = aDesc.mEntries[i];
+ ffi::WGPUBindGroupLayoutEntry e = {};
+ e.binding = entry.mBinding;
+ e.visibility = entry.mVisibility;
+ e.ty = ffi::WGPURawBindingType(entry.mType);
+ e.multisampled = entry.mMultisampled;
+ e.has_dynamic_offset = entry.mHasDynamicOffset;
+ if (entry.mViewDimension.WasPassed()) {
+ e.view_dimension = &optional[i].dim;
+ }
+ if (entry.mTextureComponentType.WasPassed()) {
+ e.texture_sample_type = &optional[i].type;
+ }
+ if (entry.mStorageTextureFormat.WasPassed()) {
+ e.storage_texture_format = &optional[i].format;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupLayoutDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_bind_group_layout(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupLayoutId> bindGroupLayouts(
+ aDesc.mBindGroupLayouts.Length());
+ for (const auto& layout : aDesc.mBindGroupLayouts) {
+ bindGroupLayouts.AppendElement(layout->mId);
+ }
+
+ ffi::WGPUPipelineLayoutDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.bind_group_layouts = bindGroupLayouts.Elements();
+ desc.bind_group_layouts_length = bindGroupLayouts.Length();
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_pipeline_layout(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateBindGroup(
+ RawId aSelfId, const dom::GPUBindGroupDescriptor& aDesc) {
+ nsTArray<ffi::WGPUBindGroupEntry> entries(aDesc.mEntries.Length());
+ for (const auto& entry : aDesc.mEntries) {
+ ffi::WGPUBindGroupEntry e = {};
+ e.binding = entry.mBinding;
+ if (entry.mResource.IsGPUBufferBinding()) {
+ const auto& bufBinding = entry.mResource.GetAsGPUBufferBinding();
+ e.buffer = bufBinding.mBuffer->mId;
+ e.offset = bufBinding.mOffset;
+ e.size = bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
+ }
+ if (entry.mResource.IsGPUTextureView()) {
+ e.texture_view = entry.mResource.GetAsGPUTextureView()->mId;
+ }
+ if (entry.mResource.IsGPUSampler()) {
+ e.sampler = entry.mResource.GetAsGPUSampler()->mId;
+ }
+ entries.AppendElement(e);
+ }
+
+ ffi::WGPUBindGroupDescriptor desc = {};
+ nsCString label;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ desc.layout = aDesc.mLayout->mId;
+ desc.entries = entries.Elements();
+ desc.entries_length = entries.Length();
+
+ ByteBuf bb;
+ RawId id =
+ ffi::wgpu_client_create_bind_group(mClient, aSelfId, &desc, ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateShaderModule(
+ RawId aSelfId, const dom::GPUShaderModuleDescriptor& aDesc) {
+ ffi::WGPUShaderModuleDescriptor desc = {};
+
+ nsCString wgsl;
+ if (aDesc.mCode.IsString()) {
+ LossyCopyUTF16toASCII(aDesc.mCode.GetAsString(), wgsl);
+ desc.wgsl_chars = wgsl.get();
+ } else {
+ const auto& code = aDesc.mCode.GetAsUint32Array();
+ code.ComputeState();
+ desc.spirv_words = code.Data();
+ desc.spirv_words_length = code.Length();
+ }
+
+ ByteBuf bb;
+ RawId id = ffi::wgpu_client_create_shader_module(mClient, aSelfId, &desc,
+ ToFFI(&bb));
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+RawId WebGPUChild::DeviceCreateComputePipeline(
+ RawId aSelfId, const dom::GPUComputePipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds) {
+ ffi::WGPUComputePipelineDescriptor desc = {};
+ nsCString label, entryPoint;
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+ desc.compute_stage.module = aDesc.mComputeStage.mModule->mId;
+ LossyCopyUTF16toASCII(aDesc.mComputeStage.mEntryPoint, entryPoint);
+ desc.compute_stage.entry_point = entryPoint.get();
+
+ ByteBuf bb;
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_compute_pipeline(
+ mClient, aSelfId, &desc, ToFFI(&bb), implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aImplicitBindGroupLayoutIds->AppendElement(cur);
+ }
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+static ffi::WGPURasterizationStateDescriptor ConvertRasterizationDescriptor(
+ const dom::GPURasterizationStateDescriptor& aDesc) {
+ ffi::WGPURasterizationStateDescriptor desc = {};
+ desc.front_face = ffi::WGPUFrontFace(aDesc.mFrontFace);
+ desc.cull_mode = ffi::WGPUCullMode(aDesc.mCullMode);
+ desc.depth_bias = aDesc.mDepthBias;
+ desc.depth_bias_slope_scale = aDesc.mDepthBiasSlopeScale;
+ desc.depth_bias_clamp = aDesc.mDepthBiasClamp;
+ return desc;
+}
+
+static ffi::WGPUBlendDescriptor ConvertBlendDescriptor(
+ const dom::GPUBlendDescriptor& aDesc) {
+ ffi::WGPUBlendDescriptor desc = {};
+ desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor);
+ desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor);
+ desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation);
+ return desc;
+}
+
+static ffi::WGPUColorStateDescriptor ConvertColorDescriptor(
+ const dom::GPUColorStateDescriptor& aDesc) {
+ ffi::WGPUColorStateDescriptor desc = {};
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.alpha_blend = ConvertBlendDescriptor(aDesc.mAlphaBlend);
+ desc.color_blend = ConvertBlendDescriptor(aDesc.mColorBlend);
+ desc.write_mask = aDesc.mWriteMask;
+ return desc;
+}
+
+static ffi::WGPUStencilStateFaceDescriptor ConvertStencilFaceDescriptor(
+ const dom::GPUStencilStateFaceDescriptor& aDesc) {
+ ffi::WGPUStencilStateFaceDescriptor desc = {};
+ desc.compare = ConvertCompareFunction(aDesc.mCompare);
+ desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp);
+ desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp);
+ desc.pass_op = ffi::WGPUStencilOperation(aDesc.mPassOp);
+ return desc;
+}
+
+static ffi::WGPUDepthStencilStateDescriptor ConvertDepthStencilDescriptor(
+ const dom::GPUDepthStencilStateDescriptor& aDesc) {
+ ffi::WGPUDepthStencilStateDescriptor desc = {};
+ desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
+ desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
+ desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
+ desc.stencil.front = ConvertStencilFaceDescriptor(aDesc.mStencilFront);
+ desc.stencil.back = ConvertStencilFaceDescriptor(aDesc.mStencilBack);
+ desc.stencil.read_mask = aDesc.mStencilReadMask;
+ desc.stencil.write_mask = aDesc.mStencilWriteMask;
+ return desc;
+}
+
+RawId WebGPUChild::DeviceCreateRenderPipeline(
+ RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds) {
+ ffi::WGPURenderPipelineDescriptor desc = {};
+ nsCString label, vsEntry, fsEntry;
+ ffi::WGPUProgrammableStageDescriptor vertexStage = {};
+ ffi::WGPUProgrammableStageDescriptor fragmentStage = {};
+
+ if (aDesc.mLabel.WasPassed()) {
+ LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
+ desc.label = label.get();
+ }
+ if (aDesc.mLayout.WasPassed()) {
+ desc.layout = aDesc.mLayout.Value().mId;
+ }
+
+ vertexStage.module = aDesc.mVertexStage.mModule->mId;
+ LossyCopyUTF16toASCII(aDesc.mVertexStage.mEntryPoint, vsEntry);
+ vertexStage.entry_point = vsEntry.get();
+ desc.vertex_stage = &vertexStage;
+
+ if (aDesc.mFragmentStage.WasPassed()) {
+ const auto& stage = aDesc.mFragmentStage.Value();
+ fragmentStage.module = stage.mModule->mId;
+ LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry);
+ fragmentStage.entry_point = fsEntry.get();
+ desc.fragment_stage = &fragmentStage;
+ }
+
+ desc.primitive_topology =
+ ffi::WGPUPrimitiveTopology(aDesc.mPrimitiveTopology);
+ const auto rasterization =
+ ConvertRasterizationDescriptor(aDesc.mRasterizationState);
+ desc.rasterization_state = &rasterization;
+
+ nsTArray<ffi::WGPUColorStateDescriptor> colorStates;
+ for (const auto& colorState : aDesc.mColorStates) {
+ colorStates.AppendElement(ConvertColorDescriptor(colorState));
+ }
+ desc.color_states = colorStates.Elements();
+ desc.color_states_length = colorStates.Length();
+
+ ffi::WGPUDepthStencilStateDescriptor depthStencilState = {};
+ if (aDesc.mDepthStencilState.WasPassed()) {
+ depthStencilState =
+ ConvertDepthStencilDescriptor(aDesc.mDepthStencilState.Value());
+ desc.depth_stencil_state = &depthStencilState;
+ }
+
+ desc.vertex_state.index_format =
+ ffi::WGPUIndexFormat(aDesc.mVertexState.mIndexFormat);
+ nsTArray<ffi::WGPUVertexBufferDescriptor> vertexBuffers;
+ nsTArray<ffi::WGPUVertexAttributeDescriptor> vertexAttributes;
+ for (const auto& vertex_desc : aDesc.mVertexState.mVertexBuffers) {
+ ffi::WGPUVertexBufferDescriptor vb_desc = {};
+ if (!vertex_desc.IsNull()) {
+ const auto& vd = vertex_desc.Value();
+ vb_desc.stride = vd.mArrayStride;
+ vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
+ // Note: we are setting the length but not the pointer
+ vb_desc.attributes_length = vd.mAttributes.Length();
+ for (const auto& vat : vd.mAttributes) {
+ ffi::WGPUVertexAttributeDescriptor ad = {};
+ ad.offset = vat.mOffset;
+ ad.format = ffi::WGPUVertexFormat(vat.mFormat);
+ ad.shader_location = vat.mShaderLocation;
+ vertexAttributes.AppendElement(ad);
+ }
+ }
+ vertexBuffers.AppendElement(vb_desc);
+ }
+ // Now patch up all the pointers to attribute lists.
+ size_t numAttributes = 0;
+ for (auto& vb_desc : vertexBuffers) {
+ vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
+ numAttributes += vb_desc.attributes_length;
+ }
+
+ desc.vertex_state.vertex_buffers = vertexBuffers.Elements();
+ desc.vertex_state.vertex_buffers_length = vertexBuffers.Length();
+ desc.sample_count = aDesc.mSampleCount;
+ desc.sample_mask = aDesc.mSampleMask;
+ desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
+
+ ByteBuf bb;
+ RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
+ RawId id = ffi::wgpu_client_create_render_pipeline(
+ mClient, aSelfId, &desc, ToFFI(&bb), implicit_bgl_ids);
+
+ for (const auto& cur : implicit_bgl_ids) {
+ if (!cur) break;
+ aImplicitBindGroupLayoutIds->AppendElement(cur);
+ }
+ if (!SendDeviceAction(aSelfId, std::move(bb))) {
+ MOZ_CRASH("IPC failure");
+ }
+ return id;
+}
+
+ipc::IPCResult WebGPUChild::RecvError(RawId aDeviceId,
+ const nsACString& aMessage) {
+ if (!aDeviceId) {
+ // TODO: figure out how to report these kinds of errors
+ printf_stderr("Validation error without device target: %s\n",
+ PromiseFlatCString(aMessage).get());
+ } else if (mDeviceMap.find(aDeviceId) == mDeviceMap.end()) {
+ printf_stderr("Validation error on a dropped device: %s\n",
+ PromiseFlatCString(aMessage).get());
+ } else {
+ auto* target = mDeviceMap[aDeviceId];
+ MOZ_ASSERT(target);
+ dom::GPUUncapturedErrorEventInit init;
+ init.mError.SetAsGPUValidationError() =
+ new ValidationError(target, aMessage);
+ RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
+ dom::GPUUncapturedErrorEvent::Constructor(target, u"uncapturederror"_ns,
+ init);
+ target->DispatchEvent(*event);
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvDropAction(const ipc::ByteBuf& aByteBuf) {
+ const auto* byteBuf = ToFFI(&aByteBuf);
+ ffi::wgpu_client_drop_action(mClient, byteBuf);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUChild::RecvFreeAdapter(RawId id) {
+ ffi::wgpu_client_kill_adapter_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeDevice(RawId id) {
+ ffi::wgpu_client_kill_device_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreePipelineLayout(RawId id) {
+ ffi::wgpu_client_kill_pipeline_layout_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeShaderModule(RawId id) {
+ ffi::wgpu_client_kill_shader_module_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBindGroupLayout(RawId id) {
+ ffi::wgpu_client_kill_bind_group_layout_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBindGroup(RawId id) {
+ ffi::wgpu_client_kill_bind_group_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeCommandBuffer(RawId id) {
+ ffi::wgpu_client_kill_encoder_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeRenderPipeline(RawId id) {
+ ffi::wgpu_client_kill_render_pipeline_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeComputePipeline(RawId id) {
+ ffi::wgpu_client_kill_compute_pipeline_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeBuffer(RawId id) {
+ ffi::wgpu_client_kill_buffer_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeTexture(RawId id) {
+ ffi::wgpu_client_kill_texture_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeTextureView(RawId id) {
+ ffi::wgpu_client_kill_texture_view_id(mClient, id);
+ return IPC_OK();
+}
+ipc::IPCResult WebGPUChild::RecvFreeSampler(RawId id) {
+ ffi::wgpu_client_kill_sampler_id(mClient, id);
+ return IPC_OK();
+}
+
+void WebGPUChild::DeviceCreateSwapChain(RawId aSelfId,
+ const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ wr::ExternalImageId aExternalImageId) {
+ RawId queueId = aSelfId; // TODO: multiple queues
+ nsTArray<RawId> bufferIds(maxBufferCount);
+ for (size_t i = 0; i < maxBufferCount; ++i) {
+ bufferIds.AppendElement(ffi::wgpu_client_make_buffer_id(mClient, aSelfId));
+ }
+ SendDeviceCreateSwapChain(aSelfId, queueId, aRgbDesc, bufferIds,
+ aExternalImageId);
+}
+
+void WebGPUChild::SwapChainPresent(wr::ExternalImageId aExternalImageId,
+ RawId aTextureId) {
+ // Hack: the function expects `DeviceId`, but it only uses it for `backend()`
+ // selection.
+ RawId encoderId = ffi::wgpu_client_make_encoder_id(mClient, aTextureId);
+ SendSwapChainPresent(aExternalImageId, aTextureId, encoderId);
+}
+
+void WebGPUChild::RegisterDevice(RawId aId, Device* aDevice) {
+ mDeviceMap.insert({aId, aDevice});
+}
+
+void WebGPUChild::UnregisterDevice(RawId aId) {
+ mDeviceMap.erase(aId);
+ SendDeviceDestroy(aId);
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUChild.h b/dom/webgpu/ipc/WebGPUChild.h
new file mode 100644
index 0000000000..d2bedbe4c1
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUChild.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_CHILD_H_
+#define WEBGPU_CHILD_H_
+
+#include "mozilla/webgpu/PWebGPUChild.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/WeakPtr.h"
+
+namespace mozilla {
+namespace dom {
+struct GPURequestAdapterOptions;
+} // namespace dom
+namespace layers {
+class CompositorBridgeChild;
+} // namespace layers
+namespace webgpu {
+namespace ffi {
+struct WGPUClient;
+struct WGPUTextureViewDescriptor;
+} // namespace ffi
+
+typedef MozPromise<RawId, Maybe<ipc::ResponseRejectReason>, true> RawIdPromise;
+
+ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x);
+
+class WebGPUChild final : public PWebGPUChild, public SupportsWeakPtr {
+ public:
+ friend class layers::CompositorBridgeChild;
+
+ NS_DECL_CYCLE_COLLECTION_NATIVE_CLASS(WebGPUChild)
+ NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebGPUChild)
+
+ public:
+ explicit WebGPUChild();
+
+ bool IsOpen() const { return mIPCOpen; }
+
+ RefPtr<RawIdPromise> InstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions);
+ Maybe<RawId> AdapterRequestDevice(RawId aSelfId,
+ const dom::GPUDeviceDescriptor& aDesc);
+ RawId DeviceCreateBuffer(RawId aSelfId,
+ const dom::GPUBufferDescriptor& aDesc);
+ RawId DeviceCreateTexture(RawId aSelfId,
+ const dom::GPUTextureDescriptor& aDesc);
+ RawId TextureCreateView(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUTextureViewDescriptor& aDesc);
+ RawId DeviceCreateSampler(RawId aSelfId,
+ const dom::GPUSamplerDescriptor& aDesc);
+ RawId DeviceCreateCommandEncoder(
+ RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc);
+ RawId CommandEncoderFinish(RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ RawId DeviceCreateBindGroupLayout(
+ RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc);
+ RawId DeviceCreatePipelineLayout(
+ RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc);
+ RawId DeviceCreateBindGroup(RawId aSelfId,
+ const dom::GPUBindGroupDescriptor& aDesc);
+ RawId DeviceCreateShaderModule(RawId aSelfId,
+ const dom::GPUShaderModuleDescriptor& aDesc);
+ RawId DeviceCreateComputePipeline(
+ RawId aSelfId, const dom::GPUComputePipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds);
+ RawId DeviceCreateRenderPipeline(
+ RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc,
+ nsTArray<RawId>* const aImplicitBindGroupLayoutIds);
+
+ void DeviceCreateSwapChain(RawId aSelfId, const RGBDescriptor& aRgbDesc,
+ size_t maxBufferCount,
+ wr::ExternalImageId aExternalImageId);
+ void SwapChainPresent(wr::ExternalImageId aExternalImageId, RawId aTextureId);
+
+ void RegisterDevice(RawId aId, Device* aDevice);
+ void UnregisterDevice(RawId aId);
+
+ private:
+ virtual ~WebGPUChild();
+
+ // AddIPDLReference and ReleaseIPDLReference are only to be called by
+ // CompositorBridgeChild's AllocPWebGPUChild and DeallocPWebGPUChild methods
+ // respectively. We intentionally make them private to prevent misuse.
+ // The purpose of these methods is to be aware of when the IPC system around
+ // this actor goes down: mIPCOpen is then set to false.
+ void AddIPDLReference() {
+ MOZ_ASSERT(!mIPCOpen);
+ mIPCOpen = true;
+ AddRef();
+ }
+ void ReleaseIPDLReference() {
+ MOZ_ASSERT(mIPCOpen);
+ mIPCOpen = false;
+ Release();
+ }
+
+ ffi::WGPUClient* const mClient;
+ bool mIPCOpen;
+ std::unordered_map<RawId, Device*> mDeviceMap;
+
+ public:
+ ipc::IPCResult RecvError(RawId aDeviceId, const nsACString& aMessage);
+ ipc::IPCResult RecvDropAction(const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvFreeAdapter(RawId id);
+ ipc::IPCResult RecvFreeDevice(RawId id);
+ ipc::IPCResult RecvFreePipelineLayout(RawId id);
+ ipc::IPCResult RecvFreeShaderModule(RawId id);
+ ipc::IPCResult RecvFreeBindGroupLayout(RawId id);
+ ipc::IPCResult RecvFreeBindGroup(RawId id);
+ ipc::IPCResult RecvFreeCommandBuffer(RawId id);
+ ipc::IPCResult RecvFreeRenderPipeline(RawId id);
+ ipc::IPCResult RecvFreeComputePipeline(RawId id);
+ ipc::IPCResult RecvFreeBuffer(RawId id);
+ ipc::IPCResult RecvFreeTexture(RawId id);
+ ipc::IPCResult RecvFreeTextureView(RawId id);
+ ipc::IPCResult RecvFreeSampler(RawId id);
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_CHILD_H_
diff --git a/dom/webgpu/ipc/WebGPUParent.cpp b/dom/webgpu/ipc/WebGPUParent.cpp
new file mode 100644
index 0000000000..5728d7c242
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.cpp
@@ -0,0 +1,713 @@
+/* -*- Mode: C++; tab-width: 20; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "WebGPUParent.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+#include "mozilla/layers/ImageDataSerializer.h"
+#include "mozilla/layers/TextureHost.h"
+
+namespace mozilla {
+namespace webgpu {
+
+const uint64_t POLL_TIME_MS = 100;
+
+// A helper class to force error checks coming across FFI.
+// It will assert in destructor if unchecked.
+// TODO: refactor this to avoid stack-allocating the buffer all the time.
+class ErrorBuffer {
+ // if the message doesn't fit, it will be truncated
+ static constexpr unsigned BUFFER_SIZE = 256;
+ char mUtf8[BUFFER_SIZE] = {};
+ bool mGuard = false;
+
+ public:
+ ErrorBuffer() { mUtf8[0] = 0; }
+ ErrorBuffer(const ErrorBuffer&) = delete;
+ ~ErrorBuffer() { MOZ_ASSERT(!mGuard); }
+
+ ffi::WGPUErrorBuffer ToFFI() {
+ mGuard = true;
+ ffi::WGPUErrorBuffer errorBuf = {mUtf8, BUFFER_SIZE};
+ return errorBuf;
+ }
+
+ bool CheckAndForward(PWebGPUParent* aParent, RawId aDeviceId) {
+ mGuard = false;
+ if (!mUtf8[0]) {
+ return false;
+ }
+ nsAutoCString cString(mUtf8);
+ if (!aParent->SendError(aDeviceId, cString)) {
+ NS_ERROR("Unable to SendError");
+ }
+ return true;
+ }
+};
+
+class PresentationData {
+ NS_INLINE_DECL_REFCOUNTING(PresentationData);
+
+ public:
+ RawId mDeviceId = 0;
+ RawId mQueueId = 0;
+ RefPtr<layers::MemoryTextureHost> mTextureHost;
+ uint32_t mSourcePitch = 0;
+ uint32_t mTargetPitch = 0;
+ uint32_t mRowCount = 0;
+ std::vector<RawId> mUnassignedBufferIds;
+ std::vector<RawId> mAvailableBufferIds;
+ std::vector<RawId> mQueuedBufferIds;
+ Mutex mBuffersLock;
+
+ PresentationData() : mBuffersLock("WebGPU presentation buffers") {
+ MOZ_COUNT_CTOR(PresentationData);
+ }
+
+ private:
+ ~PresentationData() { MOZ_COUNT_DTOR(PresentationData); }
+};
+
+static void FreeAdapter(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeAdapter(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeDevice(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeDevice(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSwapChain(RawId id, void* param) {
+ Unused << id;
+ Unused << param;
+}
+static void FreePipelineLayout(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreePipelineLayout(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeShaderModule(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeShaderModule(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBindGroupLayout(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroupLayout(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBindGroup(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBindGroup(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeCommandBuffer(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeCommandBuffer(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeRenderPipeline(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeRenderPipeline(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeComputePipeline(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeComputePipeline(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeBuffer(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeBuffer(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeTexture(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeTexture(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeTextureView(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeTextureView(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSampler(RawId id, void* param) {
+ if (!static_cast<WebGPUParent*>(param)->SendFreeSampler(id)) {
+ MOZ_CRASH("IPC failure");
+ }
+}
+static void FreeSurface(RawId id, void* param) {
+ Unused << id;
+ Unused << param;
+}
+
+static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
+ ffi::WGPUIdentityRecyclerFactory factory = {param};
+ factory.free_adapter = FreeAdapter;
+ factory.free_device = FreeDevice;
+ factory.free_swap_chain = FreeSwapChain;
+ factory.free_pipeline_layout = FreePipelineLayout;
+ factory.free_shader_module = FreeShaderModule;
+ factory.free_bind_group_layout = FreeBindGroupLayout;
+ factory.free_bind_group = FreeBindGroup;
+ factory.free_command_buffer = FreeCommandBuffer;
+ factory.free_render_pipeline = FreeRenderPipeline;
+ factory.free_compute_pipeline = FreeComputePipeline;
+ factory.free_buffer = FreeBuffer;
+ factory.free_texture = FreeTexture;
+ factory.free_texture_view = FreeTextureView;
+ factory.free_sampler = FreeSampler;
+ factory.free_surface = FreeSurface;
+ return factory;
+}
+
+WebGPUParent::WebGPUParent()
+ : mContext(ffi::wgpu_server_new(MakeFactory(this))) {
+ mTimer.Start(base::TimeDelta::FromMilliseconds(POLL_TIME_MS), this,
+ &WebGPUParent::MaintainDevices);
+}
+
+WebGPUParent::~WebGPUParent() = default;
+
+void WebGPUParent::MaintainDevices() {
+ ffi::wgpu_server_poll_all_devices(mContext, false);
+}
+
+ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver) {
+ ffi::WGPURequestAdapterOptions options = {};
+ if (aOptions.mPowerPreference.WasPassed()) {
+ options.power_preference = static_cast<ffi::WGPUPowerPreference>(
+ aOptions.mPowerPreference.Value());
+ }
+ // TODO: make available backends configurable by prefs
+
+ ErrorBuffer error;
+ int8_t index = ffi::wgpu_server_instance_request_adapter(
+ mContext, &options, aTargetIds.Elements(), aTargetIds.Length(),
+ error.ToFFI());
+ if (index >= 0) {
+ resolver(aTargetIds[index]);
+ } else {
+ resolver(0);
+ }
+ error.CheckAndForward(this, 0);
+
+ // free the unused IDs
+ for (size_t i = 0; i < aTargetIds.Length(); ++i) {
+ if (static_cast<int8_t>(i) != index && !SendFreeAdapter(aTargetIds[i])) {
+ NS_ERROR("Unable to SendFreeAdapter");
+ }
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
+ RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) {
+ ffi::WGPUDeviceDescriptor desc = {};
+ desc.shader_validation = true; // required for implicit pipeline layouts
+
+ if (aDesc.mLimits.WasPassed()) {
+ const auto& lim = aDesc.mLimits.Value();
+ desc.limits.max_bind_groups = lim.mMaxBindGroups;
+ desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout =
+ lim.mMaxDynamicUniformBuffersPerPipelineLayout;
+ desc.limits.max_dynamic_storage_buffers_per_pipeline_layout =
+ lim.mMaxDynamicStorageBuffersPerPipelineLayout;
+ desc.limits.max_sampled_textures_per_shader_stage =
+ lim.mMaxSampledTexturesPerShaderStage;
+ desc.limits.max_samplers_per_shader_stage = lim.mMaxSamplersPerShaderStage;
+ desc.limits.max_storage_buffers_per_shader_stage =
+ lim.mMaxStorageBuffersPerShaderStage;
+ desc.limits.max_storage_textures_per_shader_stage =
+ lim.mMaxStorageTexturesPerShaderStage;
+ desc.limits.max_uniform_buffers_per_shader_stage =
+ lim.mMaxUniformBuffersPerShaderStage;
+ desc.limits.max_uniform_buffer_binding_size =
+ lim.mMaxUniformBufferBindingSize;
+ } else {
+ ffi::wgpu_server_fill_default_limits(&desc.limits);
+ }
+
+ ErrorBuffer error;
+ ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId,
+ error.ToFFI());
+ error.CheckAndForward(this, 0);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aSelfId) {
+ ffi::wgpu_server_adapter_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) {
+ ffi::wgpu_server_device_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferReturnShmem(RawId aSelfId,
+ Shmem&& aShmem) {
+ mSharedMemoryMap[aSelfId] = aShmem;
+ return IPC_OK();
+}
+
+struct MapRequest {
+ const ffi::WGPUGlobal* const mContext;
+ ffi::WGPUBufferId mBufferId;
+ ffi::WGPUHostMap mHostMap;
+ uint64_t mOffset;
+ ipc::Shmem mShmem;
+ WebGPUParent::BufferMapResolver mResolver;
+ MapRequest(const ffi::WGPUGlobal* context, ffi::WGPUBufferId bufferId,
+ ffi::WGPUHostMap hostMap, uint64_t offset, ipc::Shmem&& shmem,
+ WebGPUParent::BufferMapResolver&& resolver)
+ : mContext(context),
+ mBufferId(bufferId),
+ mHostMap(hostMap),
+ mOffset(offset),
+ mShmem(shmem),
+ mResolver(resolver) {}
+};
+
+static void MapCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ auto* req = reinterpret_cast<MapRequest*>(userdata);
+ // TODO: better handle errors
+ MOZ_ASSERT(status == ffi::WGPUBufferMapAsyncStatus_Success);
+ if (req->mHostMap == ffi::WGPUHostMap_Read) {
+ const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, req->mBufferId, req->mOffset,
+ req->mShmem.Size<uint8_t>());
+ memcpy(req->mShmem.get<uint8_t>(), ptr, req->mShmem.Size<uint8_t>());
+ }
+ req->mResolver(std::move(req->mShmem));
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
+ ffi::WGPUHostMap aHostMap,
+ uint64_t aOffset, uint64_t aSize,
+ BufferMapResolver&& aResolver) {
+ auto* request = new MapRequest(mContext, aSelfId, aHostMap, aOffset,
+ std::move(mSharedMemoryMap[aSelfId]),
+ std::move(aResolver));
+ ffi::WGPUBufferMapOperation mapOperation = {
+ aHostMap, &MapCallback, reinterpret_cast<uint8_t*>(request)};
+ ffi::wgpu_server_buffer_map(mContext, aSelfId, aOffset, aSize, mapOperation);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
+ bool aFlush) {
+ if (aFlush) {
+ // TODO: flush exact modified sub-range
+ uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ mContext, aSelfId, 0, aShmem.Size<uint8_t>());
+ MOZ_ASSERT(ptr != nullptr);
+ memcpy(ptr, aShmem.get<uint8_t>(), aShmem.Size<uint8_t>());
+ }
+
+ ffi::wgpu_server_buffer_unmap(mContext, aSelfId);
+
+ const auto iter = mSharedMemoryMap.find(aSelfId);
+ if (iter == mSharedMemoryMap.end()) {
+ DeallocShmem(aShmem);
+ } else {
+ iter->second = aShmem;
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
+ ffi::wgpu_server_buffer_drop(mContext, aSelfId);
+
+ const auto iter = mSharedMemoryMap.find(aSelfId);
+ if (iter != mSharedMemoryMap.end()) {
+ DeallocShmem(iter->second);
+ mSharedMemoryMap.erase(iter);
+ }
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aSelfId) {
+ ffi::wgpu_server_texture_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) {
+ ffi::wgpu_server_texture_view_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSelfId) {
+ ffi::wgpu_server_sampler_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc) {
+ Unused << aDesc;
+ ffi::WGPUCommandBufferDescriptor desc = {};
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext, aSelfId, &desc, error.ToFFI());
+
+ error.CheckAndForward(this, aDeviceId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aSelfId) {
+ ffi::wgpu_server_encoder_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aSelfId) {
+ ffi::wgpu_server_command_buffer_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueSubmit(
+ RawId aSelfId, const nsTArray<RawId>& aCommandBuffers) {
+ ffi::wgpu_server_queue_submit(mContext, aSelfId, aCommandBuffers.Elements(),
+ aCommandBuffers.Length());
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteBuffer(RawId aSelfId,
+ RawId aBufferId,
+ uint64_t aBufferOffset,
+ Shmem&& aShmem) {
+ ffi::wgpu_server_queue_write_buffer(mContext, aSelfId, aBufferId,
+ aBufferOffset, aShmem.get<uint8_t>(),
+ aShmem.Size<uint8_t>());
+ DeallocShmem(aShmem);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvQueueWriteTexture(
+ RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination, Shmem&& aShmem,
+ const ffi::WGPUTextureDataLayout& aDataLayout,
+ const ffi::WGPUExtent3d& aExtent) {
+ ffi::wgpu_server_queue_write_texture(
+ mContext, aSelfId, &aDestination, aShmem.get<uint8_t>(),
+ aShmem.Size<uint8_t>(), &aDataLayout, &aExtent);
+ DeallocShmem(aShmem);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aSelfId) {
+ ffi::wgpu_server_bind_group_layout_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aSelfId) {
+ ffi::wgpu_server_pipeline_layout_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) {
+ ffi::wgpu_server_bind_group_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aSelfId) {
+ ffi::wgpu_server_shader_module_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) {
+ ffi::wgpu_server_compute_pipeline_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) {
+ ffi::wgpu_server_render_pipeline_drop(mContext, aSelfId);
+ return IPC_OK();
+}
+
+// TODO: proper destruction
+static const uint64_t kBufferAlignment = 0x100;
+
+static uint64_t Align(uint64_t value) {
+ return (value | (kBufferAlignment - 1)) + 1;
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceCreateSwapChain(
+ RawId aSelfId, RawId aQueueId, const RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds, ExternalImageId aExternalId) {
+ const auto rows = aDesc.size().height;
+ const auto bufferStride =
+ Align(static_cast<uint64_t>(aDesc.size().width) * 4);
+ const auto textureStride = layers::ImageDataSerializer::GetRGBStride(aDesc);
+ const auto wholeBufferSize = CheckedInt<size_t>(textureStride) * rows;
+ if (!wholeBufferSize.isValid()) {
+ NS_ERROR("Invalid total buffer size!");
+ return IPC_OK();
+ }
+ auto* textureHostData = new (fallible) uint8_t[wholeBufferSize.value()];
+ if (!textureHostData) {
+ NS_ERROR("Unable to allocate host data!");
+ return IPC_OK();
+ }
+ RefPtr<layers::MemoryTextureHost> textureHost = new layers::MemoryTextureHost(
+ textureHostData, aDesc, layers::TextureFlags::NO_FLAGS);
+ textureHost->DisableExternalTextures();
+ textureHost->CreateRenderTexture(aExternalId);
+ nsTArray<RawId> bufferIds(aBufferIds.Clone());
+ RefPtr<PresentationData> data = new PresentationData();
+ data->mDeviceId = aSelfId;
+ data->mQueueId = aQueueId;
+ data->mTextureHost = textureHost;
+ data->mSourcePitch = bufferStride;
+ data->mTargetPitch = textureStride;
+ data->mRowCount = rows;
+ for (const RawId id : bufferIds) {
+ data->mUnassignedBufferIds.push_back(id);
+ }
+ if (!mCanvasMap.insert({AsUint64(aExternalId), data}).second) {
+ NS_ERROR("External image is already registered as WebGPU canvas!");
+ }
+ return IPC_OK();
+}
+
+struct PresentRequest {
+ const ffi::WGPUGlobal* mContext;
+ RefPtr<PresentationData> mData;
+};
+
+static void PresentCallback(ffi::WGPUBufferMapAsyncStatus status,
+ uint8_t* userdata) {
+ auto* req = reinterpret_cast<PresentRequest*>(userdata);
+ PresentationData* data = req->mData.get();
+ // get the buffer ID
+ data->mBuffersLock.Lock();
+ RawId bufferId = data->mQueuedBufferIds.back();
+ data->mQueuedBufferIds.pop_back();
+ data->mAvailableBufferIds.push_back(bufferId);
+ data->mBuffersLock.Unlock();
+ // copy the data
+ if (status == ffi::WGPUBufferMapAsyncStatus_Success) {
+ const auto bufferSize = data->mRowCount * data->mSourcePitch;
+ const uint8_t* ptr = ffi::wgpu_server_buffer_get_mapped_range(
+ req->mContext, bufferId, 0, bufferSize);
+ uint8_t* dst = data->mTextureHost->GetBuffer();
+ for (uint32_t row = 0; row < data->mRowCount; ++row) {
+ memcpy(dst, ptr, data->mTargetPitch);
+ dst += data->mTargetPitch;
+ ptr += data->mSourcePitch;
+ }
+ wgpu_server_buffer_unmap(req->mContext, bufferId);
+ } else {
+ // TODO: better handle errors
+ NS_WARNING("WebGPU frame mapping failed!");
+ }
+ // free yourself
+ delete req;
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
+ wr::ExternalImageId aExternalId, RawId aTextureId,
+ RawId aCommandEncoderId) {
+ // step 0: get the data associated with the swapchain
+ const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
+ if (lookup == mCanvasMap.end()) {
+ NS_WARNING("WebGPU presenting on a destroyed swap chain!");
+ return IPC_OK();
+ }
+ RefPtr<PresentationData> data = lookup->second.get();
+ RawId bufferId = 0;
+ const auto& size = data->mTextureHost->GetSize();
+ const auto bufferSize = data->mRowCount * data->mSourcePitch;
+
+ // step 1: find an available staging buffer, or create one
+ data->mBuffersLock.Lock();
+ if (!data->mAvailableBufferIds.empty()) {
+ bufferId = data->mAvailableBufferIds.back();
+ data->mAvailableBufferIds.pop_back();
+ } else if (!data->mUnassignedBufferIds.empty()) {
+ bufferId = data->mUnassignedBufferIds.back();
+ data->mUnassignedBufferIds.pop_back();
+
+ ffi::WGPUBufferUsage usage =
+ WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ;
+ ffi::WGPUBufferDescriptor desc = {};
+ desc.size = bufferSize;
+ desc.usage = usage;
+
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_buffer(mContext, data->mDeviceId, &desc,
+ bufferId, error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ } else {
+ bufferId = 0;
+ }
+ if (bufferId) {
+ data->mQueuedBufferIds.insert(data->mQueuedBufferIds.begin(), bufferId);
+ }
+ data->mBuffersLock.Unlock();
+ if (!bufferId) {
+ // TODO: add a warning - no buffer are available!
+ return IPC_OK();
+ }
+
+ // step 3: submit a copy command for the frame
+ ffi::WGPUCommandEncoderDescriptor encoderDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_device_create_encoder(mContext, data->mDeviceId,
+ &encoderDesc, aCommandEncoderId,
+ error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ }
+
+ const ffi::WGPUTextureCopyView texView = {
+ aTextureId,
+ };
+ const ffi::WGPUTextureDataLayout bufLayout = {
+ 0,
+ data->mSourcePitch,
+ 0,
+ };
+ const ffi::WGPUBufferCopyView bufView = {
+ bufferId,
+ bufLayout,
+ };
+ const ffi::WGPUExtent3d extent = {
+ static_cast<uint32_t>(size.width),
+ static_cast<uint32_t>(size.height),
+ 1,
+ };
+ ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aCommandEncoderId,
+ &texView, &bufView, &extent);
+ ffi::WGPUCommandBufferDescriptor commandDesc = {};
+ {
+ ErrorBuffer error;
+ ffi::wgpu_server_encoder_finish(mContext, aCommandEncoderId, &commandDesc,
+ error.ToFFI());
+ if (error.CheckAndForward(this, data->mDeviceId)) {
+ return IPC_OK();
+ }
+ }
+
+ ffi::wgpu_server_queue_submit(mContext, data->mQueueId, &aCommandEncoderId,
+ 1);
+
+ // step 4: request the pixels to be copied into the external texture
+ // TODO: this isn't strictly necessary. When WR wants to Lock() the external
+ // texture,
+ // we can just give it the contents of the last mapped buffer instead of the
+ // copy.
+ auto* const presentRequest = new PresentRequest{
+ mContext,
+ data,
+ };
+
+ ffi::WGPUBufferMapOperation mapOperation = {
+ ffi::WGPUHostMap_Read, &PresentCallback,
+ reinterpret_cast<uint8_t*>(presentRequest)};
+ ffi::wgpu_server_buffer_map(mContext, bufferId, 0, bufferSize, mapOperation);
+
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvSwapChainDestroy(
+ wr::ExternalImageId aExternalId) {
+ const auto& lookup = mCanvasMap.find(AsUint64(aExternalId));
+ MOZ_ASSERT(lookup != mCanvasMap.end());
+ RefPtr<PresentationData> data = lookup->second.get();
+ mCanvasMap.erase(AsUint64(aExternalId));
+ data->mTextureHost = nullptr;
+ layers::TextureHost::DestroyRenderTexture(aExternalId);
+
+ data->mBuffersLock.Lock();
+ for (const auto bid : data->mUnassignedBufferIds) {
+ if (!SendFreeBuffer(bid)) {
+ NS_WARNING("Unable to free an ID for non-assigned buffer");
+ }
+ }
+ for (const auto bid : data->mAvailableBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext, bid);
+ }
+ for (const auto bid : data->mQueuedBufferIds) {
+ ffi::wgpu_server_buffer_drop(mContext, bid);
+ }
+ data->mBuffersLock.Unlock();
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvShutdown() {
+ mTimer.Stop();
+ for (const auto& p : mCanvasMap) {
+ const wr::ExternalImageId extId = {p.first};
+ layers::TextureHost::DestroyRenderTexture(extId);
+ }
+ mCanvasMap.clear();
+ ffi::wgpu_server_poll_all_devices(mContext, true);
+ ffi::wgpu_server_delete(const_cast<ffi::WGPUGlobal*>(mContext));
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aSelf,
+ const ipc::ByteBuf& aByteBuf) {
+ ipc::ByteBuf byteBuf;
+ ErrorBuffer error;
+ ffi::wgpu_server_device_action(mContext, aSelf, ToFFI(&aByteBuf),
+ ToFFI(&byteBuf), error.ToFFI());
+
+ if (byteBuf.mData) {
+ if (!SendDropAction(std::move(byteBuf))) {
+ NS_WARNING("Unable to set a drop action!");
+ }
+ }
+
+ error.CheckAndForward(this, aSelf);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_texture_action(mContext, aSelf, ToFFI(&aByteBuf),
+ error.ToFFI());
+
+ error.CheckAndForward(this, aDevice);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
+ RawId aSelf, RawId aDevice, const ipc::ByteBuf& aByteBuf) {
+ ErrorBuffer error;
+ ffi::wgpu_server_command_encoder_action(mContext, aSelf, ToFFI(&aByteBuf),
+ error.ToFFI());
+ error.CheckAndForward(this, aDevice);
+ return IPC_OK();
+}
+
+ipc::IPCResult WebGPUParent::RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId) {
+ ErrorBuffer error;
+ if (aIsCompute) {
+ ffi::wgpu_server_compute_pipeline_get_bind_group_layout(
+ mContext, aPipelineId, aIndex, aAssignId, error.ToFFI());
+ } else {
+ ffi::wgpu_server_render_pipeline_get_bind_group_layout(
+ mContext, aPipelineId, aIndex, aAssignId, error.ToFFI());
+ }
+
+ error.CheckAndForward(this, 0);
+ return IPC_OK();
+}
+
+} // namespace webgpu
+} // namespace mozilla
diff --git a/dom/webgpu/ipc/WebGPUParent.h b/dom/webgpu/ipc/WebGPUParent.h
new file mode 100644
index 0000000000..9df919d55b
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUParent.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_PARENT_H_
+#define WEBGPU_PARENT_H_
+
+#include "mozilla/webgpu/PWebGPUParent.h"
+#include "mozilla/webrender/WebRenderAPI.h"
+#include "WebGPUTypes.h"
+#include "base/timer.h"
+
+namespace mozilla {
+namespace webgpu {
+class PresentationData;
+
+class WebGPUParent final : public PWebGPUParent {
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebGPUParent)
+
+ public:
+ explicit WebGPUParent();
+
+ ipc::IPCResult RecvInstanceRequestAdapter(
+ const dom::GPURequestAdapterOptions& aOptions,
+ const nsTArray<RawId>& aTargetIds,
+ InstanceRequestAdapterResolver&& resolver);
+ ipc::IPCResult RecvAdapterRequestDevice(RawId aSelfId,
+ const dom::GPUDeviceDescriptor& aDesc,
+ RawId aNewId);
+ ipc::IPCResult RecvAdapterDestroy(RawId aSelfId);
+ ipc::IPCResult RecvDeviceDestroy(RawId aSelfId);
+ ipc::IPCResult RecvBufferReturnShmem(RawId aSelfId, Shmem&& aShmem);
+ ipc::IPCResult RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap,
+ uint64_t aOffset, uint64_t size,
+ BufferMapResolver&& aResolver);
+ ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush);
+ ipc::IPCResult RecvBufferDestroy(RawId aSelfId);
+ ipc::IPCResult RecvTextureDestroy(RawId aSelfId);
+ ipc::IPCResult RecvTextureViewDestroy(RawId aSelfId);
+ ipc::IPCResult RecvSamplerDestroy(RawId aSelfId);
+ ipc::IPCResult RecvCommandEncoderFinish(
+ RawId aSelfId, RawId aDeviceId,
+ const dom::GPUCommandBufferDescriptor& aDesc);
+ ipc::IPCResult RecvCommandEncoderDestroy(RawId aSelfId);
+ ipc::IPCResult RecvCommandBufferDestroy(RawId aSelfId);
+ ipc::IPCResult RecvQueueSubmit(RawId aSelfId,
+ const nsTArray<RawId>& aCommandBuffers);
+ ipc::IPCResult RecvQueueWriteBuffer(RawId aSelfId, RawId aBufferId,
+ uint64_t aBufferOffset, Shmem&& aShmem);
+ ipc::IPCResult RecvQueueWriteTexture(
+ RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination,
+ Shmem&& aShmem, const ffi::WGPUTextureDataLayout& aDataLayout,
+ const ffi::WGPUExtent3d& aExtent);
+ ipc::IPCResult RecvBindGroupLayoutDestroy(RawId aSelfId);
+ ipc::IPCResult RecvPipelineLayoutDestroy(RawId aSelfId);
+ ipc::IPCResult RecvBindGroupDestroy(RawId aSelfId);
+ ipc::IPCResult RecvShaderModuleDestroy(RawId aSelfId);
+ ipc::IPCResult RecvComputePipelineDestroy(RawId aSelfId);
+ ipc::IPCResult RecvRenderPipelineDestroy(RawId aSelfId);
+ ipc::IPCResult RecvDeviceCreateSwapChain(RawId aSelfId, RawId aQueueId,
+ const layers::RGBDescriptor& aDesc,
+ const nsTArray<RawId>& aBufferIds,
+ ExternalImageId aExternalId);
+ ipc::IPCResult RecvSwapChainPresent(wr::ExternalImageId aExternalId,
+ RawId aTextureId,
+ RawId aCommandEncoderId);
+ ipc::IPCResult RecvSwapChainDestroy(wr::ExternalImageId aExternalId);
+
+ ipc::IPCResult RecvDeviceAction(RawId aSelf, const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvTextureAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvCommandEncoderAction(RawId aSelf, RawId aDevice,
+ const ipc::ByteBuf& aByteBuf);
+ ipc::IPCResult RecvBumpImplicitBindGroupLayout(RawId aPipelineId,
+ bool aIsCompute,
+ uint32_t aIndex,
+ RawId aAssignId);
+
+ ipc::IPCResult RecvShutdown();
+
+ private:
+ virtual ~WebGPUParent();
+ void MaintainDevices();
+
+ const ffi::WGPUGlobal* const mContext;
+ base::RepeatingTimer<WebGPUParent> mTimer;
+ /// Shmem associated with a mappable buffer has to be owned by one of the
+ /// processes. We keep it here for every mappable buffer while the buffer is
+ /// used by GPU.
+ std::unordered_map<uint64_t, Shmem> mSharedMemoryMap;
+ /// Associated presentation data for each swapchain.
+ std::unordered_map<uint64_t, RefPtr<PresentationData>> mCanvasMap;
+};
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_PARENT_H_
diff --git a/dom/webgpu/ipc/WebGPUSerialize.h b/dom/webgpu/ipc/WebGPUSerialize.h
new file mode 100644
index 0000000000..ffaacc0405
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUSerialize.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_SERIALIZE_H_
+#define WEBGPU_SERIALIZE_H_
+
+#include "WebGPUTypes.h"
+#include "ipc/EnumSerializer.h"
+#include "ipc/IPCMessageUtils.h"
+#include "mozilla/dom/WebGPUBinding.h"
+#include "mozilla/webgpu/ffi/wgpu.h"
+
+namespace IPC {
+
+#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \
+ template <> \
+ struct ParamTraits<something> \
+ : public ContiguousEnumSerializer<something, something(0), guard> {}
+
+#define DEFINE_IPC_SERIALIZER_DOM_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something::EndGuard_)
+#define DEFINE_IPC_SERIALIZER_FFI_ENUM(something) \
+ DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)
+
+DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
+
+DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
+
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
+ mPowerPreference);
+DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUExtensions);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor,
+ mExtensions, mLimits);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width,
+ height, depth);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z);
+
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDataLayout,
+ offset, bytes_per_row, rows_per_image);
+DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureCopyView,
+ texture, mip_level, origin);
+
+#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
+#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
+#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD
+
+} // namespace IPC
+#endif // WEBGPU_SERIALIZE_H_
diff --git a/dom/webgpu/ipc/WebGPUTypes.h b/dom/webgpu/ipc/WebGPUTypes.h
new file mode 100644
index 0000000000..3e8e62afd4
--- /dev/null
+++ b/dom/webgpu/ipc/WebGPUTypes.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WEBGPU_TYPES_H_
+#define WEBGPU_TYPES_H_
+
+#include <cstdint>
+
+namespace mozilla {
+namespace webgpu {
+
+typedef uint64_t RawId;
+typedef uint64_t BufferAddress;
+
+} // namespace webgpu
+} // namespace mozilla
+
+#endif // WEBGPU_TYPES_H_