2024-01-01 14:58:21 -05:00
|
|
|
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
|
2023-12-08 19:19:16 -05:00
|
|
|
|
2024-01-21 15:51:45 -05:00
|
|
|
import { assert, assertEquals, assertThrows } from "./test_util.ts";
|
2023-12-08 19:19:16 -05:00
|
|
|
|
|
|
|
let isCI: boolean;
|
|
|
|
try {
|
|
|
|
isCI = (Deno.env.get("CI")?.length ?? 0) > 0;
|
|
|
|
} catch {
|
|
|
|
isCI = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip these tests on linux CI, because the vulkan emulator is not good enough
|
|
|
|
// yet, and skip on macOS CI because these do not have virtual GPUs.
|
|
|
|
const isLinuxOrMacCI =
|
|
|
|
(Deno.build.os === "linux" || Deno.build.os === "darwin") && isCI;
|
|
|
|
// Skip these tests in WSL because it doesn't have good GPU support.
|
|
|
|
const isWsl = await checkIsWsl();
|
|
|
|
|
|
|
|
Deno.test({
|
|
|
|
permissions: { read: true, env: true },
|
|
|
|
ignore: isWsl || isLinuxOrMacCI,
|
|
|
|
}, async function webgpuComputePass() {
|
|
|
|
const adapter = await navigator.gpu.requestAdapter();
|
|
|
|
assert(adapter);
|
|
|
|
|
|
|
|
const numbers = [1, 4, 3, 295];
|
|
|
|
|
|
|
|
const device = await adapter.requestDevice();
|
|
|
|
assert(device);
|
|
|
|
|
|
|
|
const shaderCode = await Deno.readTextFile(
|
chore: move cli/tests/ -> tests/ (#22369)
This looks like a massive PR, but it's only a move from cli/tests ->
tests, and updates of relative paths for files.
This is the first step towards aggregate all of the integration test
files under tests/, which will lead to a set of integration tests that
can run without the CLI binary being built.
While we could leave these tests under `cli`, it would require us to
keep a more complex directory structure for the various test runners. In
addition, we have a lot of complexity to ignore various test files in
the `cli` project itself (cargo publish exclusion rules, autotests =
false, etc).
And finally, the `tests/` folder will eventually house the `test_ffi`,
`test_napi` and other testing code, reducing the size of the root repo
directory.
For easier review, the extremely large and noisy "move" is in the first
commit (with no changes -- just a move), while the remainder of the
changes to actual files is in the second commit.
2024-02-10 15:22:13 -05:00
|
|
|
"tests/testdata/webgpu/computepass_shader.wgsl",
|
2023-12-08 19:19:16 -05:00
|
|
|
);
|
|
|
|
|
|
|
|
const shaderModule = device.createShaderModule({
|
|
|
|
code: shaderCode,
|
|
|
|
});
|
|
|
|
|
|
|
|
const size = new Uint32Array(numbers).byteLength;
|
|
|
|
|
|
|
|
const stagingBuffer = device.createBuffer({
|
|
|
|
size: size,
|
|
|
|
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
|
|
|
|
});
|
|
|
|
|
|
|
|
const storageBuffer = device.createBuffer({
|
|
|
|
label: "Storage Buffer",
|
|
|
|
size: size,
|
|
|
|
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST |
|
|
|
|
GPUBufferUsage.COPY_SRC,
|
|
|
|
mappedAtCreation: true,
|
|
|
|
});
|
|
|
|
|
|
|
|
const buf = new Uint32Array(storageBuffer.getMappedRange());
|
|
|
|
|
|
|
|
buf.set(numbers);
|
|
|
|
|
|
|
|
storageBuffer.unmap();
|
|
|
|
|
|
|
|
const computePipeline = device.createComputePipeline({
|
|
|
|
layout: "auto",
|
|
|
|
compute: {
|
|
|
|
module: shaderModule,
|
|
|
|
entryPoint: "main",
|
|
|
|
},
|
|
|
|
});
|
|
|
|
const bindGroupLayout = computePipeline.getBindGroupLayout(0);
|
|
|
|
|
|
|
|
const bindGroup = device.createBindGroup({
|
|
|
|
layout: bindGroupLayout,
|
|
|
|
entries: [
|
|
|
|
{
|
|
|
|
binding: 0,
|
|
|
|
resource: {
|
|
|
|
buffer: storageBuffer,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
],
|
|
|
|
});
|
|
|
|
|
|
|
|
const encoder = device.createCommandEncoder();
|
|
|
|
|
|
|
|
const computePass = encoder.beginComputePass();
|
|
|
|
computePass.setPipeline(computePipeline);
|
|
|
|
computePass.setBindGroup(0, bindGroup);
|
|
|
|
computePass.insertDebugMarker("compute collatz iterations");
|
|
|
|
computePass.dispatchWorkgroups(numbers.length);
|
|
|
|
computePass.end();
|
|
|
|
|
|
|
|
encoder.copyBufferToBuffer(storageBuffer, 0, stagingBuffer, 0, size);
|
|
|
|
|
|
|
|
device.queue.submit([encoder.finish()]);
|
|
|
|
|
|
|
|
await stagingBuffer.mapAsync(1);
|
|
|
|
|
|
|
|
const data = stagingBuffer.getMappedRange();
|
|
|
|
|
|
|
|
assertEquals(new Uint32Array(data), new Uint32Array([0, 2, 7, 55]));
|
|
|
|
|
|
|
|
stagingBuffer.unmap();
|
|
|
|
|
|
|
|
device.destroy();
|
|
|
|
|
|
|
|
// TODO(lucacasonato): webgpu spec should add a explicit destroy method for
|
|
|
|
// adapters.
|
|
|
|
const resources = Object.keys(Deno.resources());
|
|
|
|
Deno.close(Number(resources[resources.length - 1]));
|
|
|
|
});
|
|
|
|
|
|
|
|
Deno.test({
|
|
|
|
permissions: { read: true, env: true },
|
|
|
|
ignore: isWsl || isLinuxOrMacCI,
|
|
|
|
}, async function webgpuHelloTriangle() {
|
|
|
|
const adapter = await navigator.gpu.requestAdapter();
|
|
|
|
assert(adapter);
|
|
|
|
|
|
|
|
const device = await adapter.requestDevice();
|
|
|
|
assert(device);
|
|
|
|
|
|
|
|
const shaderCode = await Deno.readTextFile(
|
chore: move cli/tests/ -> tests/ (#22369)
This looks like a massive PR, but it's only a move from cli/tests ->
tests, and updates of relative paths for files.
This is the first step towards aggregate all of the integration test
files under tests/, which will lead to a set of integration tests that
can run without the CLI binary being built.
While we could leave these tests under `cli`, it would require us to
keep a more complex directory structure for the various test runners. In
addition, we have a lot of complexity to ignore various test files in
the `cli` project itself (cargo publish exclusion rules, autotests =
false, etc).
And finally, the `tests/` folder will eventually house the `test_ffi`,
`test_napi` and other testing code, reducing the size of the root repo
directory.
For easier review, the extremely large and noisy "move" is in the first
commit (with no changes -- just a move), while the remainder of the
changes to actual files is in the second commit.
2024-02-10 15:22:13 -05:00
|
|
|
"tests/testdata/webgpu/hellotriangle_shader.wgsl",
|
2023-12-08 19:19:16 -05:00
|
|
|
);
|
|
|
|
|
|
|
|
const shaderModule = device.createShaderModule({
|
|
|
|
code: shaderCode,
|
|
|
|
});
|
|
|
|
|
|
|
|
const pipelineLayout = device.createPipelineLayout({
|
|
|
|
bindGroupLayouts: [],
|
|
|
|
});
|
|
|
|
|
|
|
|
const renderPipeline = device.createRenderPipeline({
|
|
|
|
layout: pipelineLayout,
|
|
|
|
vertex: {
|
|
|
|
module: shaderModule,
|
|
|
|
entryPoint: "vs_main",
|
|
|
|
},
|
|
|
|
fragment: {
|
|
|
|
module: shaderModule,
|
|
|
|
entryPoint: "fs_main",
|
|
|
|
targets: [
|
|
|
|
{
|
|
|
|
format: "rgba8unorm-srgb",
|
|
|
|
},
|
|
|
|
],
|
|
|
|
},
|
|
|
|
});
|
|
|
|
|
|
|
|
const dimensions = {
|
|
|
|
width: 200,
|
|
|
|
height: 200,
|
|
|
|
};
|
|
|
|
const unpaddedBytesPerRow = dimensions.width * 4;
|
|
|
|
const align = 256;
|
|
|
|
const paddedBytesPerRowPadding = (align - unpaddedBytesPerRow % align) %
|
|
|
|
align;
|
|
|
|
const paddedBytesPerRow = unpaddedBytesPerRow + paddedBytesPerRowPadding;
|
|
|
|
|
|
|
|
const outputBuffer = device.createBuffer({
|
|
|
|
label: "Capture",
|
|
|
|
size: paddedBytesPerRow * dimensions.height,
|
|
|
|
usage: GPUBufferUsage.MAP_READ | GPUBufferUsage.COPY_DST,
|
|
|
|
});
|
|
|
|
const texture = device.createTexture({
|
|
|
|
label: "Capture",
|
|
|
|
size: dimensions,
|
|
|
|
format: "rgba8unorm-srgb",
|
|
|
|
usage: GPUTextureUsage.RENDER_ATTACHMENT | GPUTextureUsage.COPY_SRC,
|
|
|
|
});
|
|
|
|
|
|
|
|
const encoder = device.createCommandEncoder();
|
|
|
|
const view = texture.createView();
|
|
|
|
const renderPass = encoder.beginRenderPass({
|
|
|
|
colorAttachments: [
|
|
|
|
{
|
|
|
|
view,
|
|
|
|
storeOp: "store",
|
|
|
|
loadOp: "clear",
|
|
|
|
clearValue: [0, 1, 0, 1],
|
|
|
|
},
|
|
|
|
],
|
|
|
|
});
|
|
|
|
renderPass.setPipeline(renderPipeline);
|
|
|
|
renderPass.draw(3, 1);
|
|
|
|
renderPass.end();
|
|
|
|
|
|
|
|
encoder.copyTextureToBuffer(
|
|
|
|
{
|
|
|
|
texture,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
buffer: outputBuffer,
|
|
|
|
bytesPerRow: paddedBytesPerRow,
|
|
|
|
rowsPerImage: 0,
|
|
|
|
},
|
|
|
|
dimensions,
|
|
|
|
);
|
|
|
|
|
|
|
|
const bundle = encoder.finish();
|
|
|
|
device.queue.submit([bundle]);
|
|
|
|
|
|
|
|
await outputBuffer.mapAsync(1);
|
|
|
|
const data = new Uint8Array(outputBuffer.getMappedRange());
|
|
|
|
|
|
|
|
assertEquals(
|
|
|
|
data,
|
chore: move cli/tests/ -> tests/ (#22369)
This looks like a massive PR, but it's only a move from cli/tests ->
tests, and updates of relative paths for files.
This is the first step towards aggregate all of the integration test
files under tests/, which will lead to a set of integration tests that
can run without the CLI binary being built.
While we could leave these tests under `cli`, it would require us to
keep a more complex directory structure for the various test runners. In
addition, we have a lot of complexity to ignore various test files in
the `cli` project itself (cargo publish exclusion rules, autotests =
false, etc).
And finally, the `tests/` folder will eventually house the `test_ffi`,
`test_napi` and other testing code, reducing the size of the root repo
directory.
For easier review, the extremely large and noisy "move" is in the first
commit (with no changes -- just a move), while the remainder of the
changes to actual files is in the second commit.
2024-02-10 15:22:13 -05:00
|
|
|
await Deno.readFile("tests/testdata/webgpu/hellotriangle.out"),
|
2023-12-08 19:19:16 -05:00
|
|
|
);
|
|
|
|
|
|
|
|
outputBuffer.unmap();
|
|
|
|
|
|
|
|
device.destroy();
|
|
|
|
|
|
|
|
// TODO(lucacasonato): webgpu spec should add a explicit destroy method for
|
|
|
|
// adapters.
|
|
|
|
const resources = Object.keys(Deno.resources());
|
|
|
|
Deno.close(Number(resources[resources.length - 1]));
|
|
|
|
});
|
|
|
|
|
|
|
|
Deno.test({
|
|
|
|
ignore: isWsl || isLinuxOrMacCI,
|
|
|
|
}, async function webgpuAdapterHasFeatures() {
|
|
|
|
const adapter = await navigator.gpu.requestAdapter();
|
|
|
|
assert(adapter);
|
|
|
|
assert(adapter.features);
|
|
|
|
const resources = Object.keys(Deno.resources());
|
|
|
|
Deno.close(Number(resources[resources.length - 1]));
|
|
|
|
});
|
|
|
|
|
2024-01-23 09:15:40 -05:00
|
|
|
Deno.test({
|
|
|
|
ignore: isWsl || isLinuxOrMacCI,
|
|
|
|
}, async function webgpuNullWindowSurfaceThrows() {
|
|
|
|
const adapter = await navigator.gpu.requestAdapter();
|
|
|
|
assert(adapter);
|
|
|
|
|
|
|
|
const device = await adapter.requestDevice();
|
|
|
|
assert(device);
|
|
|
|
|
2024-01-21 15:51:45 -05:00
|
|
|
assertThrows(
|
|
|
|
() => {
|
|
|
|
new Deno.UnsafeWindowSurface("cocoa", null, null);
|
|
|
|
},
|
|
|
|
);
|
2024-01-23 09:15:40 -05:00
|
|
|
|
|
|
|
device.destroy();
|
|
|
|
const resources = Object.keys(Deno.resources());
|
|
|
|
Deno.close(Number(resources[resources.length - 1]));
|
2024-01-21 15:51:45 -05:00
|
|
|
});
|
|
|
|
|
2024-01-27 12:40:09 -05:00
|
|
|
Deno.test(function getPreferredCanvasFormat() {
|
|
|
|
const preferredFormat = navigator.gpu.getPreferredCanvasFormat();
|
|
|
|
assert(preferredFormat === "bgra8unorm" || preferredFormat === "rgba8unorm");
|
|
|
|
});
|
|
|
|
|
2023-12-08 19:19:16 -05:00
|
|
|
async function checkIsWsl() {
|
|
|
|
return Deno.build.os === "linux" && await hasMicrosoftProcVersion();
|
|
|
|
|
|
|
|
async function hasMicrosoftProcVersion() {
|
|
|
|
// https://github.com/microsoft/WSL/issues/423#issuecomment-221627364
|
|
|
|
try {
|
|
|
|
const procVersion = await Deno.readTextFile("/proc/version");
|
|
|
|
return /microsoft/i.test(procVersion);
|
|
|
|
} catch {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|