diff --git a/codegen/__init__.py b/codegen/__init__.py index b2b7767a..d194bc4e 100644 --- a/codegen/__init__.py +++ b/codegen/__init__.py @@ -11,7 +11,7 @@ def main(): log = io.StringIO() with PrintToFile(log): - print("# Code generatation report") + print("# Code generation report") prepare() update_api() update_wgpu_native() diff --git a/docs/guide.rst b/docs/guide.rst index c0dab996..3d476eca 100644 --- a/docs/guide.rst +++ b/docs/guide.rst @@ -115,7 +115,7 @@ Offscreen +++++++++ If you render offscreen, or only do compute, you do not need a canvas. You also won't need a GUI toolkit, draw function or enter the event loop. -Instead, you will obtain a command encoder and submit it's records to the queue directly. +Instead, you will obtain a command encoder and submit its records to the queue directly. Examples and external resources diff --git a/examples/compute_noop.py b/examples/compute_noop.py index b353a09f..8e9d08b5 100644 --- a/examples/compute_noop.py +++ b/examples/compute_noop.py @@ -35,7 +35,7 @@ # %% The short version, using memoryview # The first arg is the input data, per binding -# The second arg are the output types, per binding +# The second arg is the output types, per binding out = compute_with_buffers({0: data}, {1: (n, "i")}, shader_source, n=n) # The result is a dict matching the output types diff --git a/wgpu/resources/codegen_report.md b/wgpu/resources/codegen_report.md index ce53a383..7c72d1fc 100644 --- a/wgpu/resources/codegen_report.md +++ b/wgpu/resources/codegen_report.md @@ -1,4 +1,4 @@ -# Code generatation report +# Code generation report ## Preparing * The webgpu.idl defines 37 classes with 75 functions * The webgpu.idl defines 5 flags, 34 enums, 60 structs