Skip to content

[WIP] update to wgpu-native v25.0.2.1 #718

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 19 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions codegen/wgpu_native_patcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def compare_flags():
"""

idl = get_idl_parser()
hp = get_h_parser()
hp = get_h_parser() # gets both headerfiles!

name_map = {
"ColorWrite": "ColorWriteMask",
Expand All @@ -50,7 +50,9 @@ def compare_flags():
for name, flag in idl.flags.items():
name = name_map.get(name, name)
if name not in hp.flags:
print(f"Flag {name} missing in wgpu.h")
print(
f"Flag {name} missing in wgpu.h"
) # should actually be webgpu.h/wgpu.h as this hparser here has both
else:
for key, val in flag.items():
key = key.title().replace("_", "") # MAP_READ -> MapRead
Expand All @@ -70,6 +72,7 @@ def write_mappings():
idl = get_idl_parser()
hp = get_h_parser()

# these are empty and have no use?
name_map = {}
name_map_i = {v: k for k, v in name_map.items()}

Expand Down Expand Up @@ -126,6 +129,7 @@ def write_mappings():
("BackendType", False),
("NativeFeature", True),
("PipelineStatisticName", True),
("Dx12Compiler", False),
):
pylines.append(f' "{name}":' + " {")
for key, val in hp.enums[name].items():
Expand Down
2 changes: 1 addition & 1 deletion examples/cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import wgpu
import numpy as np


from rendercanvas.auto import RenderCanvas, loop


Expand Down Expand Up @@ -454,7 +455,6 @@ async def draw_frame_async():
uniform_dtype = [("transform", "float32", (4, 4))]
uniform_data = np.zeros((), dtype=uniform_dtype)


print("Available adapters on this system:")
for a in wgpu.gpu.enumerate_adapters_sync():
print(a.summary)
Expand Down
4 changes: 2 additions & 2 deletions tests/test_wgpu_native_errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def test_validate_shader_error1(caplog):
┌─ :10:20
10 │ out.position = matrics * out.position;
│ ^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [7]
│ ^^^^^^^^^^^^^^^^^^^^^^ naga::ir::Expression [7]
= Expression [7] is invalid
= Operation Multiply can't work with [4] (of type Matrix { columns: Quad, rows: Quad, scalar: Scalar { kind: Float, width: 4 } }) and [6] (of type Vector { size: Tri, scalar: Scalar { kind: Float, width: 4 } })
Expand Down Expand Up @@ -238,7 +238,7 @@ def test_validate_shader_error2(caplog):
┌─ :9:16
9 │ return vec3<f32>(1.0, 0.0, 1.0);
│ ^^^^^^^^^^^^^^^^^^^^^^^^ naga::Expression [8]
│ ^^^^^^^^^^^^^^^^^^^^^^^^ naga::ir::Expression [8]
= The `return` value Some([8]) does not match the function return value

Expand Down
4 changes: 3 additions & 1 deletion tools/download_wgpu_native.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,9 @@ def main(version=None, os_string=None, arch=None, upstream=None):

current_version = get_current_version()
if version != current_version:
print(f"Version changed, updating {VERSION_FILE}")
print(
f"Version changed, updating {VERSION_FILE}, diff: https://github.com/{upstream}/compare/v{current_version}...v{version}"
)
filename = "commit-sha"
url = f"https://github.com/{upstream}/releases/download/v{version}/{filename}"
commit_sha_filename = os.path.join(tmp, filename)
Expand Down
10 changes: 7 additions & 3 deletions wgpu/_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ class GPU:
def request_adapter_sync(
self,
*,
feaure_level: str = "core",
feature_level: str = "core",
power_preference: enums.PowerPreference = None,
force_fallback_adapter: bool = False,
canvas=None,
Expand All @@ -105,6 +105,7 @@ def request_adapter_sync(
from .backends.auto import gpu

return gpu.request_adapter_sync(
feature_level=feature_level,
power_preference=power_preference,
force_fallback_adapter=force_fallback_adapter,
canvas=canvas,
Expand All @@ -115,7 +116,7 @@ def request_adapter_sync(
async def request_adapter_async(
self,
*,
feaure_level: str = "core",
feature_level: str = "core",
power_preference: enums.PowerPreference = None,
force_fallback_adapter: bool = False,
canvas=None,
Expand All @@ -124,7 +125,7 @@ async def request_adapter_async(
implementation, from which one can request a `GPUDevice`.

Arguments:
feaure_level (str): The feature level "core" (default) or "compatibility".
feature_level (str): The feature level "core" (default) or "compatibility".
This provides a way to opt into additional validation restrictions.
power_preference (PowerPreference): "high-performance" or "low-power".
force_fallback_adapter (bool): whether to use a (probably CPU-based)
Expand All @@ -135,7 +136,10 @@ async def request_adapter_async(
# If this method gets called, no backend has been loaded yet, let's do that now!
from .backends.auto import gpu

# note, feature_level current' does nothing: # not used currently: https://gpuweb.github.io/gpuweb/#dom-gpurequestadapteroptions-featurelevel

return await gpu.request_adapter_async(
feature_level=feature_level,
power_preference=power_preference,
force_fallback_adapter=force_fallback_adapter,
canvas=canvas,
Expand Down
4 changes: 2 additions & 2 deletions wgpu/backends/wgpu_native/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@


# The wgpu-native version that we target/expect
__version__ = "24.0.3.1"
__commit_sha__ = "e305465e8f1abd2b13878274bf74bbde920096a3"
__version__ = "25.0.2.1"
__commit_sha__ = "af9074edf144efe4f1432b2e42c477429c4964c1"
version_info = tuple(map(int, __version__.split("."))) # noqa: RUF048
_check_expected_version(version_info) # produces a warning on mismatch

Expand Down
24 changes: 12 additions & 12 deletions wgpu/backends/wgpu_native/_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ class GPU(classes.GPU):
def request_adapter_sync(
self,
*,
feaure_level: str = "core",
feature_level: str = "core",
power_preference: enums.PowerPreference = None,
force_fallback_adapter: bool = False,
canvas=None,
Expand All @@ -437,7 +437,7 @@ def request_adapter_sync(
"""
check_can_use_sync_variants()
awaitable = self._request_adapter(
feaure_level=feaure_level,
feature_level=feature_level,
power_preference=power_preference,
force_fallback_adapter=force_fallback_adapter,
canvas=canvas,
Expand All @@ -448,7 +448,7 @@ def request_adapter_sync(
async def request_adapter_async(
self,
*,
feaure_level: str = "core",
feature_level: str = "core",
power_preference: enums.PowerPreference = None,
force_fallback_adapter: bool = False,
canvas=None,
Expand All @@ -466,15 +466,15 @@ async def request_adapter_async(
be left to None. If given, the object must implement ``WgpuCanvasInterface``.
"""
awaitable = self._request_adapter(
feaure_level=feaure_level,
feature_level=feature_level,
power_preference=power_preference,
force_fallback_adapter=force_fallback_adapter,
canvas=canvas,
) # no-cover
return await awaitable

def _request_adapter(
self, *, feaure_level, power_preference, force_fallback_adapter, canvas
self, *, feature_level, power_preference, force_fallback_adapter, canvas
):
# Similar to https://github.com/gfx-rs/wgpu?tab=readme-ov-file#environment-variables
# It seems that the environment variables are only respected in their
Expand Down Expand Up @@ -527,7 +527,7 @@ def _request_adapter(
c_feature_level = {
"core": lib.WGPUFeatureLevel_Core,
"compatibility": lib.WGPUFeatureLevel_Compatibility,
}[feaure_level]
}[feature_level]

# H: nextInChain: WGPUChainedStruct *, featureLevel: WGPUFeatureLevel, powerPreference: WGPUPowerPreference, forceFallbackAdapter: WGPUBool/int, backendType: WGPUBackendType, compatibleSurface: WGPUSurface
struct = new_struct_p(
Expand Down Expand Up @@ -1266,6 +1266,7 @@ def device_lost_callback(c_device, c_reason, c_message, userdata1, userdata2):
def uncaptured_error_callback(
c_device, c_type, c_message, userdata1, userdata2
):
# TODO: does this always raise an exception? retest the loop cases!
error_type = enum_int2str["ErrorType"].get(c_type, "Unknown")
msg = from_c_string_view(c_message)
msg = "\n".join(line.rstrip() for line in msg.splitlines())
Expand Down Expand Up @@ -1356,12 +1357,12 @@ class GPUDevice(classes.GPUDevice, GPUObjectBase):
def _poll(self):
# Internal function
if self._internal:
# H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUSubmissionIndex const * wrappedSubmissionIndex)
# H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUSubmissionIndex const * submissionIndex)
libf.wgpuDevicePoll(self._internal, False, ffi.NULL)

def _poll_wait(self):
if self._internal:
# H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUSubmissionIndex const * wrappedSubmissionIndex)
# H: WGPUBool f(WGPUDevice device, WGPUBool wait, WGPUSubmissionIndex const * submissionIndex)
libf.wgpuDevicePoll(self._internal, True, ffi.NULL)

def create_buffer(
Expand Down Expand Up @@ -1771,18 +1772,17 @@ def create_shader_module(
value=to_c_string_view("gl_VertexIndex"),
)
)
# note, GLSL is a wgpu-native feature and still uses the older structure!
# H: chain: WGPUChainedStruct, stage: WGPUShaderStage/int, code: WGPUStringView, defineCount: int, defines: WGPUShaderDefine *
source_struct = new_struct_p(
"WGPUShaderModuleGLSLDescriptor *",
"WGPUShaderSourceGLSL *",
# not used: chain
code=to_c_string_view(code),
stage=c_stage,
defineCount=len(defines),
defines=new_array("WGPUShaderDefine[]", defines),
# not used: chain
)
source_struct[0].chain.next = ffi.NULL
source_struct[0].chain.sType = lib.WGPUSType_ShaderModuleGLSLDescriptor
source_struct[0].chain.sType = lib.WGPUSType_ShaderSourceGLSL
else:
# === WGSL
# H: chain: WGPUChainedStruct, code: WGPUStringView
Expand Down
10 changes: 8 additions & 2 deletions wgpu/backends/wgpu_native/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,6 @@
"Internal": GPUInternalError,
}


if sys.platform.startswith("darwin"):
from rubicon.objc.api import ObjCInstance, ObjCClass

Expand Down Expand Up @@ -87,14 +86,21 @@ def get_memoryview_from_address(address, nbytes, format="B"):
_the_instance = None


def get_wgpu_instance():
def get_wgpu_instance(extras=None):
"""Get the global wgpu instance."""
# Note, we could also use wgpuInstanceRelease,
# but we keep a global instance, so we don't have to.
global _the_instance
if _the_instance is not None and extras is not None:
# reset the instance if extras are given to avoid not getting requested extras.
lib.wgpuInstanceRelease(_the_instance)
_the_instance = None

if _the_instance is None:
# H: nextInChain: WGPUChainedStruct *
struct = ffi.new("WGPUInstanceDescriptor *")
if extras is not None:
struct.nextInChain = ffi.cast("WGPUChainedStruct *", extras)
_the_instance = lib.wgpuCreateInstance(struct)
return _the_instance

Expand Down
5 changes: 5 additions & 0 deletions wgpu/backends/wgpu_native/_mappings.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,6 +365,11 @@
"fragment-shader-invocations": 3,
"compute-shader-invocations": 4,
},
"Dx12Compiler": {
"Undefined": 0,
"Fxc": 1,
"Dxc": 2,
},
}

enum_int2str = {
Expand Down
100 changes: 98 additions & 2 deletions wgpu/backends/wgpu_native/extras.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,20 @@
import os
from typing import List
from typing import List, Union

from . import GPUCommandEncoder, GPUComputePassEncoder, GPURenderPassEncoder
from ._api import Dict, GPUBindGroupLayout, enums, logger, structs
from ._api import (
Dict,
GPUBindGroupLayout,
enums,
logger,
structs,
new_struct_p,
to_c_string_view,
enum_str2int,
)
from ...enums import Enum
from ._helpers import get_wgpu_instance
from ..._coreutils import get_library_filename


# NOTE: these functions represent backend-specific extra API.
Expand Down Expand Up @@ -172,3 +183,88 @@ def write_timestamp(encoder, query_set, query_index):
encoder, (GPURenderPassEncoder, GPUComputePassEncoder, GPUCommandEncoder)
)
encoder._write_timestamp(query_set, query_index)


def set_instance_extras(
backends=0, # default all
flags=0,
dx12_compiler="fxc",
gles3_minor_version="Atomic",
fence_behavior="Normal",
dxil_path: Union[os.PathLike, None] = None,
dxc_path: Union[os.PathLike, None] = None,
dxc_max_shader_model: float = 6.5,
):
"""
Sets the global instance with extras. Replaces any existing instance.
Args:
backends: bitflag/int, which backends to enable on the instance level. Defaults to (0b0: all).
flags: bitflag/int for debugging the instance and compiler. Defaults to (0b0: default).
dx12_compiler: enum/str, either "Fxc", "Dxc" or "Undefined". Defaults to "Fxc" same as "Undefined". Dxc requires additional library files.
gles3_minor_version: enum/int, 0, 1 or 2. Defaults to "Atomic" (handled by driver).
fence_behavior: enum/int, "Normal" or "AutoFinish". Defaults to "Normal".
dxil_path: Path to the dxil.dll file, if not provided or `None`, will try to load from wgpu/resources.
dxc_path: Path to the dxcompiler.dll file, if not provided or `None`, will try to load from wgpu/resources.
dxc_max_shader_model: float between 6.0 and 6.7, the maximum shader model to use with DXC. Defaults to 6.5.
"""
# TODO document and explain, find reference for defaults

c_dx12_compiler = enum_str2int["Dx12Compiler"].get(
dx12_compiler.capitalize(), enum_str2int["Dx12Compiler"]["Undefined"]
)
# https://docs.rs/wgpu/latest/wgpu/enum.Dx12Compiler.html#variant.DynamicDxc #explains the idea, will improve in the future.
# https://github.com/gfx-rs/wgpu-native/blob/v25.0.2.1/src/conv.rs#L308-L349 handles the fxc fallback, most of the time...
if (
c_dx12_compiler == enum_str2int["Dx12Compiler"]["Dxc"]
and not (dxil_path or dxc_path)
): # os.path.exists(dxil_path) or os.path.exists(dxc_path)): # this check errors with None as default. but we can't have empty strings.
# if dxc is specified but no paths are provided, there will be a panic about static-dxc, so maybe we check against that.
try:
dxil_path = get_library_filename("dxil.dll")
dxc_path = get_library_filename("dxcompiler.dll")
except RuntimeError as e:
# here we couldn't load the libs from wgpu/resources... so we assume the user doesn't have them.
# TODO: explain user to add DXC manually or provide a script/package it? (in the future)
logger.warning(
f"could not load .dll files for DXC from /resource: {e}.\n Please provide a path manually which can panic. Falling back to FXC"
)
c_dx12_compiler = enum_str2int["Dx12Compiler"]["Fxc"]

# https://docs.rs/wgpu/latest/wgpu/enum.Gles3MinorVersion.html
if gles3_minor_version[-1].isdigit():
gles3_minor_version = (
int(gles3_minor_version[-1]) + 1
) # hack as the last char easily maps to the enum.
elif isinstance(gles3_minor_version, str):
gles3_minor_version = 0 # likely means atomic

# https://docs.rs/wgpu/latest/wgpu/enum.GlFenceBehavior.html
fence_behavior_map = {
"Normal": 0, # WGPUGLFenceBehavior_Normal
"AutoFinish": 1, # WGPUGLFenceBehavior_AutoFinish
}
fence_behavior = fence_behavior_map.get(fence_behavior, 0)

# hack as only version 6.0..6.7 are supported and enum mapping fits.
c_max_shader_model = int((dxc_max_shader_model - 6.0) * 1.0)

# TODO: can we codegen the native only flags? do we put them here or in a flags.py?

# H: chain: WGPUChainedStruct, backends: WGPUInstanceBackend/int, flags: WGPUInstanceFlag/int, dx12ShaderCompiler: WGPUDx12Compiler, gles3MinorVersion: WGPUGles3MinorVersion, glFenceBehaviour: WGPUGLFenceBehaviour, dxilPath: WGPUStringView, dxcPath: WGPUStringView, dxcMaxShaderModel: WGPUDxcMaxShaderModel
c_extras = new_struct_p(
"WGPUInstanceExtras *",
# not used: chain
backends=backends,
flags=flags,
dx12ShaderCompiler=c_dx12_compiler,
gles3MinorVersion=gles3_minor_version,
glFenceBehaviour=fence_behavior,
dxilPath=to_c_string_view(dxil_path),
dxcPath=to_c_string_view(dxc_path),
dxcMaxShaderModel=c_max_shader_model,
)

c_extras.chain.sType = (
0x00030006 # lib.WGPUSType_InstanceExtras (but we don't import lib here?)
)
get_wgpu_instance(extras=c_extras) # this sets a global
Loading