Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion codegen/apipatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,13 @@ def patch_base_api(code):
idl = get_idl_parser()

# Write __all__
extra_public_classes = ["GPUPromise"]
all_public_classes = [*idl.classes.keys(), *extra_public_classes]
part1, found_all, part2 = code.partition("\n__all__ =")
if found_all:
part2 = part2.split("]", 1)[-1]
line = "\n__all__ = ["
line += ", ".join(f'"{name}"' for name in sorted(idl.classes.keys()))
line += ", ".join(f'"{name}"' for name in sorted(all_public_classes))
line += "]"
code = part1 + line + part2

Expand Down Expand Up @@ -160,14 +162,22 @@ def patch_classes(self):
for classname, i1, i2 in self.iter_classes():
seen_classes.add(classname)
self._apidiffs = set()
pre_lines = "\n".join(self.lines[i1 - 3 : i1])
self._apidiffs_from_lines(pre_lines, classname)
if self.class_is_known(classname):
if "@apidiff.add" in pre_lines:
print(f"ERROR: apidiff.add for known {classname}")
elif "@apidiff.hide" in pre_lines:
pass # continue as normal
old_line = self.lines[i1]
new_line = self.get_class_def(classname)
if old_line != new_line:
fixme_line = "# FIXME: was " + old_line.split("class ", 1)[-1]
self.replace_line(i1, f"{fixme_line}\n{new_line}")
self.patch_properties(classname, i1 + 1, i2)
self.patch_methods(classname, i1 + 1, i2)
elif "@apidiff.add" in pre_lines:
pass
else:
msg = f"unknown api: class {classname}"
self.insert_line(i1, "# FIXME: " + msg)
Expand Down
6 changes: 3 additions & 3 deletions tests/test_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import wgpu.utils
from testutils import can_use_wgpu_lib, run_tests
from wgpu import GPUDevice, MapMode, TextureFormat
from wgpu.backends.wgpu_native import WgpuAwaitable
from wgpu.backends.wgpu_native import GPUPromise


@mark.anyio
Expand All @@ -17,15 +17,15 @@ def finalizer(i):
return i * i

def callback(i):
awaitable.set_result(i)
awaitable._wgpu_set_result(i)

def poll_function():
nonlocal count
count += 1
if count >= 3:
callback(10)

awaitable = WgpuAwaitable("test", callback, finalizer, poll_function)
awaitable = GPUPromise("test", finalizer, callback, poll_function)

if use_async:
result = await awaitable
Expand Down
128 changes: 124 additions & 4 deletions wgpu/_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

import weakref
import logging
from typing import List, Dict, Union, Optional
from typing import List, Dict, Union, Optional, Callable, Any

from ._coreutils import ApiDiff, str_flag_to_int
from ._diagnostics import diagnostics, texture_format_to_bpp
Expand Down Expand Up @@ -46,6 +46,7 @@
"GPUPipelineBase",
"GPUPipelineError",
"GPUPipelineLayout",
"GPUPromise",
"GPUQuerySet",
"GPUQueue",
"GPURenderBundle",
Expand Down Expand Up @@ -113,7 +114,7 @@ def request_adapter_sync(

# IDL: Promise<GPUAdapter?> requestAdapter(optional GPURequestAdapterOptions options = {}); -> DOMString featureLevel = "core", GPUPowerPreference powerPreference, boolean forceFallbackAdapter = false, boolean xrCompatible = false
@apidiff.change("arguments include canvas")
async def request_adapter_async(
def request_adapter_async(
self,
*,
feature_level: str = "core",
Expand All @@ -138,7 +139,7 @@ async def request_adapter_async(

# note, feature_level current' does nothing: # not used currently: https://gpuweb.github.io/gpuweb/#dom-gpurequestadapteroptions-featurelevel

return await gpu.request_adapter_async(
return gpu.request_adapter_async(
feature_level=feature_level,
power_preference=power_preference,
force_fallback_adapter=force_fallback_adapter,
Expand Down Expand Up @@ -553,6 +554,125 @@ def _release(self):
self._drop_texture()


# TODO: GPUFuture or GPUPromise; Python API or JS?
# Leaning towards the JS
#
# JS:
# promise.then(lambda result: ...)
# promise.then(handle_result, handle_exception)
# promise.catch(handle_exception)
# primise.finally()
#
# Python:
# future.result()
# future.set_result()
# future.set_exception()
# future.done()
# future.cancelled()
# future.add_done_callback(lambda future: ...)
# future.remove_done_callback()
# future.cancel()
# future.exception()
# future.get_loop()


@apidiff.add("Add a GPU-specific Future")
class GPUPromise:
"""A GPUPromise represents the eventual result of an asynchronous wgpu operation.

A ``GPUPromise`` is a bit like an ``asyncio.Future``, but specific for wgpu, and with
an API more similar to JavaScript's ``Promise``.

Some methods of the wgpu API are asynchronous. They return a ``GPUPromise``,
which provides a few different ways handle it:

* It can be awaited using ``await future``. This is the "cleanest" way, but
can only be used from a co-routine (i.e. an async code path).
* A callback can be registered using ``future.then(callback)``, which will
be called when the future resolves.
* You can sync-wait for it, using ``future.wait()``. This is simple, but
makes code less portable and potentially slower.

A ``GPUPromise`` is in one of these states:

* pending: initial state, neither fulfilled nor rejected.
* fulfilled: meaning that the operation was completed successfully.
* rejected: meaning that the operation failed.
"""

def __init__(self, title: str, finalizer: Callable | None, *args):
self._title = title
self._finalizer = finalizer # function to finish the result
self._result_or_error = None
self._callback = None

def __repr__(self):
state = "pending"
value_repr = ""
if self._result_or_error is not None:
if self._result_or_error[0] is not None:
state = "fulfilled"
value_repr = repr(self._result_or_error[0]).split("\n", 1)[0]
if len(value_repr) > 30:
value_repr = value_repr[:29] + "…"
value_repr = f"'{value_repr}'"
else:
state = "rejected"
return f"<GPUPromise {state} {value_repr} at {hex(id(self))}>"

def _wgpu_set_result(self, result):
self._result_or_error = result, None

def _wgpu_set_error(self, error):
self._result_or_error = None, error

def _finish(self):
try:
result, error = self._result_or_error
if error:
raise RuntimeError(error)
else:
result = self._finalizer(result)
if self._callback is not None:
# TODO: wrap in a try-except, or a log_exception thingy?
self._callback(result)
return result
finally:
# Reset attrs to prevent potential memory leaks
self._finalizer = self._result_or_error = self._callback = None

def sync_wait(self) -> Any:
"""Synchronously wait for the future to resolve and return the result.

Note that this method should be avoided in event callbacks, since it can
make them slow.

Note that this method may not be supported by all backends (e.g. the
upcoming JavaScript/Pyodide one), and using it will make your code less
portable.
"""
# TODO: allow calling multiple times
raise NotImplementedError()

def then(self, callback: Callable[Any, None]):
"""Set a callback that will be called when the future resolves.

The callback will receive one argument: the result of the future.
"""
# TODO: allow calling multiple times
# TODO: allow calling after being resolved -> tests!
# TODO: return another promise, so we can do chaining? Or maybe not interesting for this use-case...
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems to be pretty crucial to me!

if callable(callback):
self._callback = callback
else:
raise TypeError(
f"GPUPromise.then() got a callback that is not callable: {callback!r}"
)

def __await__(self):
raise NotImplementedError()


class GPUAdapterInfo(dict):
"""Represents information about an adapter."""

Expand Down Expand Up @@ -2650,7 +2770,7 @@ def generic_repr(self):


def _set_repr_methods():
exceptions = ["GPUAdapterInfo"]
exceptions = ["GPUAdapterInfo", "GPUPromise"]
m = globals()
for class_name in __all__:
if class_name in exceptions:
Expand Down
1 change: 0 additions & 1 deletion wgpu/backends/wgpu_native/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,3 @@
_register_backend(gpu)

from .extras import request_device_sync, request_device
from ._helpers import WgpuAwaitable
Loading
Loading