1
0
Fork 0
mirror of https://github.com/denoland/deno.git synced 2024-11-24 15:19:26 -05:00

BREAKING(buffer): remove Deno.Buffer (#25441)

Towards #22079

---------

Signed-off-by: Asher Gomez <ashersaupingomez@gmail.com>
This commit is contained in:
Asher Gomez 2024-09-06 18:28:05 +10:00 committed by GitHub
parent 7937ae3f2f
commit d8f3123c36
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 0 additions and 747 deletions

View file

@ -2651,81 +2651,6 @@ declare namespace Deno {
signal?: AbortSignal;
}
/**
* A variable-sized buffer of bytes with `read()` and `write()` methods.
*
* @deprecated This will be removed in Deno 2.0. See the
* {@link https://docs.deno.com/runtime/manual/advanced/migrate_deprecations | Deno 1.x to 2.x Migration Guide}
* for migration instructions.
*
* @category I/O
*/
export class Buffer implements Reader, ReaderSync, Writer, WriterSync {
constructor(ab?: ArrayBuffer);
/** Returns a slice holding the unread portion of the buffer.
*
* The slice is valid for use only until the next buffer modification (that
* is, only until the next call to a method like `read()`, `write()`,
* `reset()`, or `truncate()`). If `options.copy` is false the slice aliases the buffer content at
* least until the next buffer modification, so immediate changes to the
* slice will affect the result of future reads.
* @param options Defaults to `{ copy: true }`
*/
bytes(options?: { copy?: boolean }): Uint8Array;
/** Returns whether the unread portion of the buffer is empty. */
empty(): boolean;
/** A read only number of bytes of the unread portion of the buffer. */
readonly length: number;
/** The read only capacity of the buffer's underlying byte slice, that is,
* the total space allocated for the buffer's data. */
readonly capacity: number;
/** Discards all but the first `n` unread bytes from the buffer but
* continues to use the same allocated storage. It throws if `n` is
* negative or greater than the length of the buffer. */
truncate(n: number): void;
/** Resets the buffer to be empty, but it retains the underlying storage for
* use by future writes. `.reset()` is the same as `.truncate(0)`. */
reset(): void;
/** Reads the next `p.length` bytes from the buffer or until the buffer is
* drained. Returns the number of bytes read. If the buffer has no data to
* return, the return is EOF (`null`). */
readSync(p: Uint8Array): number | null;
/** Reads the next `p.length` bytes from the buffer or until the buffer is
* drained. Resolves to the number of bytes read. If the buffer has no
* data to return, resolves to EOF (`null`).
*
* NOTE: This methods reads bytes synchronously; it's provided for
* compatibility with `Reader` interfaces.
*/
read(p: Uint8Array): Promise<number | null>;
writeSync(p: Uint8Array): number;
/** NOTE: This methods writes bytes synchronously; it's provided for
* compatibility with `Writer` interface. */
write(p: Uint8Array): Promise<number>;
/** Grows the buffer's capacity, if necessary, to guarantee space for
* another `n` bytes. After `.grow(n)`, at least `n` bytes can be written to
* the buffer without another allocation. If `n` is negative, `.grow()` will
* throw. If the buffer can't grow it will throw an error.
*
* Based on Go Lang's
* [Buffer.Grow](https://golang.org/pkg/bytes/#Buffer.Grow). */
grow(n: number): void;
/** Reads data from `r` until EOF (`null`) and appends it to the buffer,
* growing the buffer as needed. It resolves to the number of bytes read.
* If the buffer becomes too large, `.readFrom()` will reject with an error.
*
* Based on Go Lang's
* [Buffer.ReadFrom](https://golang.org/pkg/bytes/#Buffer.ReadFrom). */
readFrom(r: Reader): Promise<number>;
/** Reads data from `r` until EOF (`null`) and appends it to the buffer,
* growing the buffer as needed. It returns the number of bytes read. If the
* buffer becomes too large, `.readFromSync()` will throw an error.
*
* Based on Go Lang's
* [Buffer.ReadFrom](https://golang.org/pkg/bytes/#Buffer.ReadFrom). */
readFromSync(r: ReaderSync): number;
}
/**
* Read Reader `r` until EOF (`null`) and resolve to the content as
* Uint8Array`.

View file

@ -1,237 +0,0 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// This code has been ported almost directly from Go's src/bytes/buffer.go
// Copyright 2009 The Go Authors. All rights reserved. BSD license.
// https://github.com/golang/go/blob/master/LICENSE
import { internals, primordials } from "ext:core/mod.js";
const {
ArrayBufferPrototypeGetByteLength,
TypedArrayPrototypeSubarray,
TypedArrayPrototypeSlice,
TypedArrayPrototypeSet,
TypedArrayPrototypeGetBuffer,
TypedArrayPrototypeGetByteLength,
MathFloor,
MathMin,
PromiseResolve,
Uint8Array,
Error,
} = primordials;
import { assert } from "ext:deno_web/00_infra.js";
// MIN_READ is the minimum ArrayBuffer size passed to a read call by
// buffer.ReadFrom. As long as the Buffer has at least MIN_READ bytes beyond
// what is required to hold the contents of r, readFrom() will not grow the
// underlying buffer.
const MIN_READ = 32 * 1024;
const MAX_SIZE = 2 ** 32 - 2;
// `off` is the offset into `dst` where it will at which to begin writing values
// from `src`.
// Returns the number of bytes copied.
function copyBytes(src, dst, off = 0) {
const r = TypedArrayPrototypeGetByteLength(dst) - off;
if (TypedArrayPrototypeGetByteLength(src) > r) {
src = TypedArrayPrototypeSubarray(src, 0, r);
}
TypedArrayPrototypeSet(dst, src, off);
return TypedArrayPrototypeGetByteLength(src);
}
class Buffer {
#buf = null; // contents are the bytes buf[off : len(buf)]
#off = 0; // read at buf[off], write at buf[buf.byteLength]
constructor(ab) {
internals.warnOnDeprecatedApi(
"new Deno.Buffer()",
new Error().stack,
"Use `Buffer` from `https://jsr.io/@std/io/doc/buffer/~` instead.",
);
if (ab == null) {
this.#buf = new Uint8Array(0);
return;
}
this.#buf = new Uint8Array(ab);
}
bytes(options = { copy: true }) {
if (options.copy === false) {
return TypedArrayPrototypeSubarray(this.#buf, this.#off);
}
return TypedArrayPrototypeSlice(this.#buf, this.#off);
}
empty() {
return TypedArrayPrototypeGetByteLength(this.#buf) <= this.#off;
}
get length() {
return TypedArrayPrototypeGetByteLength(this.#buf) - this.#off;
}
get capacity() {
return ArrayBufferPrototypeGetByteLength(
TypedArrayPrototypeGetBuffer(this.#buf),
);
}
truncate(n) {
if (n === 0) {
this.reset();
return;
}
if (n < 0 || n > this.length) {
throw Error("bytes.Buffer: truncation out of range");
}
this.#reslice(this.#off + n);
}
reset() {
this.#reslice(0);
this.#off = 0;
}
#tryGrowByReslice(n) {
const l = TypedArrayPrototypeGetByteLength(this.#buf);
if (n <= this.capacity - l) {
this.#reslice(l + n);
return l;
}
return -1;
}
#reslice(len) {
const ab = TypedArrayPrototypeGetBuffer(this.#buf);
assert(len <= ArrayBufferPrototypeGetByteLength(ab));
this.#buf = new Uint8Array(ab, 0, len);
}
readSync(p) {
if (this.empty()) {
// Buffer is empty, reset to recover space.
this.reset();
if (TypedArrayPrototypeGetByteLength(p) === 0) {
// this edge case is tested in 'bufferReadEmptyAtEOF' test
return 0;
}
return null;
}
const nread = copyBytes(
TypedArrayPrototypeSubarray(this.#buf, this.#off),
p,
);
this.#off += nread;
return nread;
}
read(p) {
const rr = this.readSync(p);
return PromiseResolve(rr);
}
writeSync(p) {
const m = this.#grow(TypedArrayPrototypeGetByteLength(p));
return copyBytes(p, this.#buf, m);
}
write(p) {
const n = this.writeSync(p);
return PromiseResolve(n);
}
#grow(n) {
const m = this.length;
// If buffer is empty, reset to recover space.
if (m === 0 && this.#off !== 0) {
this.reset();
}
// Fast: Try to grow by means of a reslice.
const i = this.#tryGrowByReslice(n);
if (i >= 0) {
return i;
}
const c = this.capacity;
if (n <= MathFloor(c / 2) - m) {
// We can slide things down instead of allocating a new
// ArrayBuffer. We only need m+n <= c to slide, but
// we instead let capacity get twice as large so we
// don't spend all our time copying.
copyBytes(TypedArrayPrototypeSubarray(this.#buf, this.#off), this.#buf);
} else if (c + n > MAX_SIZE) {
throw new Error("The buffer cannot be grown beyond the maximum size.");
} else {
// Not enough space anywhere, we need to allocate.
const buf = new Uint8Array(MathMin(2 * c + n, MAX_SIZE));
copyBytes(TypedArrayPrototypeSubarray(this.#buf, this.#off), buf);
this.#buf = buf;
}
// Restore this.#off and len(this.#buf).
this.#off = 0;
this.#reslice(MathMin(m + n, MAX_SIZE));
return m;
}
grow(n) {
if (n < 0) {
throw Error("Buffer.grow: negative count");
}
const m = this.#grow(n);
this.#reslice(m);
}
async readFrom(r) {
let n = 0;
const tmp = new Uint8Array(MIN_READ);
while (true) {
const shouldGrow = this.capacity - this.length < MIN_READ;
// read into tmp buffer if there's not enough room
// otherwise read directly into the internal buffer
const buf = shouldGrow
? tmp
: new Uint8Array(TypedArrayPrototypeGetBuffer(this.#buf), this.length);
const nread = await r.read(buf);
if (nread === null) {
return n;
}
// write will grow if needed
if (shouldGrow) {
this.writeSync(TypedArrayPrototypeSubarray(buf, 0, nread));
} else this.#reslice(this.length + nread);
n += nread;
}
}
readFromSync(r) {
let n = 0;
const tmp = new Uint8Array(MIN_READ);
while (true) {
const shouldGrow = this.capacity - this.length < MIN_READ;
// read into tmp buffer if there's not enough room
// otherwise read directly into the internal buffer
const buf = shouldGrow
? tmp
: new Uint8Array(TypedArrayPrototypeGetBuffer(this.#buf), this.length);
const nread = r.readSync(buf);
if (nread === null) {
return n;
}
// write will grow if needed
if (shouldGrow) {
this.writeSync(TypedArrayPrototypeSubarray(buf, 0, nread));
} else this.#reslice(this.length + nread);
n += nread;
}
}
}
export { Buffer };

View file

@ -20,7 +20,6 @@ import * as errors from "ext:runtime/01_errors.js";
import * as version from "ext:runtime/01_version.ts";
import * as permissions from "ext:runtime/10_permissions.js";
import * as io from "ext:deno_io/12_io.js";
import * as buffer from "ext:runtime/13_buffer.js";
import * as fs from "ext:deno_fs/30_fs.js";
import * as os from "ext:runtime/30_os.js";
import * as fsEvents from "ext:runtime/40_fs_events.js";
@ -82,9 +81,6 @@ const denoNs = {
env: os.env,
exit: os.exit,
execPath: os.execPath,
Buffer: buffer.Buffer,
readAll: buffer.readAll,
readAllSync: buffer.readAllSync,
copy: io.copy,
SeekMode: io.SeekMode,
File: fs.File,

View file

@ -1,4 +1,3 @@
// deno-lint-ignore-file no-deprecated-deno-api
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// Remove Intl.v8BreakIterator because it is a non-standard API.
@ -800,7 +799,6 @@ function bootstrapMainRuntime(runtimeOptions, warmup = false) {
}
if (internals.future) {
delete globalThis.window;
delete Deno.Buffer;
delete Deno.FsFile.prototype.rid;
}
} else {
@ -959,7 +957,6 @@ function bootstrapWorkerRuntime(
}
if (internals.future) {
delete Deno.Buffer;
delete Deno.FsFile.prototype.rid;
}
} else {

View file

@ -41,7 +41,6 @@ extension!(runtime,
"06_util.js",
"10_permissions.js",
"11_workers.js",
"13_buffer.js",
"30_os.js",
"40_fs_events.js",
"40_process.js",

View file

@ -15,7 +15,6 @@ util::unit_test_factory!(
blob_test,
body_test,
broadcast_channel_test,
buffer_test,
build_test,
cache_api_test,
chmod_test,

View file

@ -1,5 +1,4 @@
console.log("window is", globalThis.window);
console.log("Deno.Buffer is", Deno.Buffer);
console.log(
"Deno.FsFile.prototype.rid is",
Deno.openSync(import.meta.filename).rid,

View file

@ -1,5 +1,4 @@
window is undefined
Deno.Buffer is undefined
Deno.FsFile.prototype.rid is undefined
Deno.Listener.prototype.rid is undefined
Deno.Conn.prototype.rid is undefined

View file

@ -1,423 +0,0 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
// deno-lint-ignore-file no-deprecated-deno-api
// This code has been ported almost directly from Go's src/bytes/buffer_test.go
// Copyright 2009 The Go Authors. All rights reserved. BSD license.
// https://github.com/golang/go/blob/master/LICENSE
import {
assert,
assertEquals,
assertRejects,
assertThrows,
DENO_FUTURE,
} from "./test_util.ts";
import { writeAllSync } from "@std/io/write-all";
const MAX_SIZE = 2 ** 32 - 2;
// N controls how many iterations of certain checks are performed.
const N = 100;
let testBytes: Uint8Array | null;
let testString: string | null;
const ignoreMaxSizeTests = true;
function init() {
if (testBytes == null) {
testBytes = new Uint8Array(N);
for (let i = 0; i < N; i++) {
testBytes[i] = "a".charCodeAt(0) + (i % 26);
}
const decoder = new TextDecoder();
testString = decoder.decode(testBytes);
}
}
function check(buf: Deno.Buffer, s: string) {
const bytes = buf.bytes();
assertEquals(buf.length, bytes.byteLength);
const decoder = new TextDecoder();
const bytesStr = decoder.decode(bytes);
assertEquals(bytesStr, s);
assertEquals(buf.length, s.length);
}
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
async function fillBytes(
buf: Deno.Buffer,
s: string,
n: number,
fub: Uint8Array,
): Promise<string> {
check(buf, s);
for (; n > 0; n--) {
const m = await buf.write(fub);
assertEquals(m, fub.byteLength);
const decoder = new TextDecoder();
s += decoder.decode(fub);
check(buf, s);
}
return s;
}
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
async function empty(
buf: Deno.Buffer,
s: string,
fub: Uint8Array,
) {
check(buf, s);
while (true) {
const r = await buf.read(fub);
if (r === null) {
break;
}
s = s.slice(r);
check(buf, s);
}
check(buf, "");
}
function repeat(c: string, bytes: number): Uint8Array {
assertEquals(c.length, 1);
const ui8 = new Uint8Array(bytes);
ui8.fill(c.charCodeAt(0));
return ui8;
}
Deno.test({ ignore: DENO_FUTURE }, function bufferNewBuffer() {
init();
assert(testBytes);
assert(testString);
const buf = new Deno.Buffer(testBytes.buffer as ArrayBuffer);
check(buf, testString);
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferBasicOperations() {
init();
assert(testBytes);
assert(testString);
const buf = new Deno.Buffer();
for (let i = 0; i < 5; i++) {
check(buf, "");
buf.reset();
check(buf, "");
buf.truncate(0);
check(buf, "");
let n = await buf.write(testBytes.subarray(0, 1));
assertEquals(n, 1);
check(buf, "a");
n = await buf.write(testBytes.subarray(1, 2));
assertEquals(n, 1);
check(buf, "ab");
n = await buf.write(testBytes.subarray(2, 26));
assertEquals(n, 24);
check(buf, testString.slice(0, 26));
buf.truncate(26);
check(buf, testString.slice(0, 26));
buf.truncate(20);
check(buf, testString.slice(0, 20));
await empty(buf, testString.slice(0, 20), new Uint8Array(5));
await empty(buf, "", new Uint8Array(100));
// TODO(bartlomieju): buf.writeByte()
// TODO(bartlomieju): buf.readByte()
}
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferReadEmptyAtEOF() {
// check that EOF of 'buf' is not reached (even though it's empty) if
// results are written to buffer that has 0 length (ie. it can't store any data)
const buf = new Deno.Buffer();
const zeroLengthTmp = new Uint8Array(0);
const result = await buf.read(zeroLengthTmp);
assertEquals(result, 0);
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferLargeByteWrites() {
init();
const buf = new Deno.Buffer();
const limit = 9;
for (let i = 3; i < limit; i += 3) {
const s = await fillBytes(buf, "", 5, testBytes!);
await empty(buf, s, new Uint8Array(Math.floor(testString!.length / i)));
}
check(buf, "");
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferTooLargeByteWrites() {
init();
const tmp = new Uint8Array(72);
const growLen = Number.MAX_VALUE;
const xBytes = repeat("x", 0);
const buf = new Deno.Buffer(xBytes.buffer as ArrayBuffer);
await buf.read(tmp);
assertThrows(
() => {
buf.grow(growLen);
},
Error,
"grown beyond the maximum size",
);
});
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
function bufferGrowWriteMaxBuffer() {
const bufSize = 16 * 1024;
const capacities = [MAX_SIZE, MAX_SIZE - 1];
for (const capacity of capacities) {
let written = 0;
const buf = new Deno.Buffer();
const writes = Math.floor(capacity / bufSize);
for (let i = 0; i < writes; i++) {
written += buf.writeSync(repeat("x", bufSize));
}
if (written < capacity) {
written += buf.writeSync(repeat("x", capacity - written));
}
assertEquals(written, capacity);
}
},
);
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
async function bufferGrowReadCloseMaxBufferPlus1() {
const reader = new Deno.Buffer(new ArrayBuffer(MAX_SIZE + 1));
const buf = new Deno.Buffer();
await assertRejects(
async () => {
await buf.readFrom(reader);
},
Error,
"grown beyond the maximum size",
);
},
);
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
function bufferGrowReadSyncCloseMaxBufferPlus1() {
const reader = new Deno.Buffer(new ArrayBuffer(MAX_SIZE + 1));
const buf = new Deno.Buffer();
assertThrows(
() => {
buf.readFromSync(reader);
},
Error,
"grown beyond the maximum size",
);
},
);
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
function bufferGrowReadSyncCloseToMaxBuffer() {
const capacities = [MAX_SIZE, MAX_SIZE - 1];
for (const capacity of capacities) {
const reader = new Deno.Buffer(new ArrayBuffer(capacity));
const buf = new Deno.Buffer();
buf.readFromSync(reader);
assertEquals(buf.length, capacity);
}
},
);
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
async function bufferGrowReadCloseToMaxBuffer() {
const capacities = [MAX_SIZE, MAX_SIZE - 1];
for (const capacity of capacities) {
const reader = new Deno.Buffer(new ArrayBuffer(capacity));
const buf = new Deno.Buffer();
await buf.readFrom(reader);
assertEquals(buf.length, capacity);
}
},
);
Deno.test(
{ ignore: ignoreMaxSizeTests || DENO_FUTURE },
async function bufferReadCloseToMaxBufferWithInitialGrow() {
const capacities = [MAX_SIZE, MAX_SIZE - 1, MAX_SIZE - 512];
for (const capacity of capacities) {
const reader = new Deno.Buffer(new ArrayBuffer(capacity));
const buf = new Deno.Buffer();
buf.grow(MAX_SIZE);
await buf.readFrom(reader);
assertEquals(buf.length, capacity);
}
},
);
Deno.test({ ignore: DENO_FUTURE }, async function bufferLargeByteReads() {
init();
assert(testBytes);
assert(testString);
const buf = new Deno.Buffer();
for (let i = 3; i < 30; i += 3) {
const n = Math.floor(testBytes.byteLength / i);
const s = await fillBytes(buf, "", 5, testBytes.subarray(0, n));
await empty(buf, s, new Uint8Array(testString.length));
}
check(buf, "");
});
Deno.test({ ignore: DENO_FUTURE }, function bufferCapWithPreallocatedSlice() {
const buf = new Deno.Buffer(new ArrayBuffer(10));
assertEquals(buf.capacity, 10);
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferReadFrom() {
init();
assert(testBytes);
assert(testString);
const buf = new Deno.Buffer();
for (let i = 3; i < 30; i += 3) {
const s = await fillBytes(
buf,
"",
5,
testBytes.subarray(0, Math.floor(testBytes.byteLength / i)),
);
const b = new Deno.Buffer();
await b.readFrom(buf);
const fub = new Uint8Array(testString.length);
await empty(b, s, fub);
}
await assertRejects(async function () {
await new Deno.Buffer().readFrom(null!);
});
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferReadFromSync() {
init();
assert(testBytes);
assert(testString);
const buf = new Deno.Buffer();
for (let i = 3; i < 30; i += 3) {
const s = await fillBytes(
buf,
"",
5,
testBytes.subarray(0, Math.floor(testBytes.byteLength / i)),
);
const b = new Deno.Buffer();
b.readFromSync(buf);
const fub = new Uint8Array(testString.length);
await empty(b, s, fub);
}
assertThrows(function () {
new Deno.Buffer().readFromSync(null!);
});
});
Deno.test({ ignore: DENO_FUTURE }, async function bufferTestGrow() {
const tmp = new Uint8Array(72);
for (const startLen of [0, 100, 1000, 10000]) {
const xBytes = repeat("x", startLen);
for (const growLen of [0, 100, 1000, 10000]) {
const buf = new Deno.Buffer(xBytes.buffer as ArrayBuffer);
// If we read, this affects buf.off, which is good to test.
const nread = (await buf.read(tmp)) ?? 0;
buf.grow(growLen);
const yBytes = repeat("y", growLen);
await buf.write(yBytes);
// Check that buffer has correct data.
assertEquals(
buf.bytes().subarray(0, startLen - nread),
xBytes.subarray(nread),
);
assertEquals(
buf.bytes().subarray(startLen - nread, startLen - nread + growLen),
yBytes,
);
}
}
});
Deno.test({ ignore: DENO_FUTURE }, function testBufferBytesArrayBufferLength() {
// defaults to copy
const args = [{}, { copy: undefined }, undefined, { copy: true }];
for (const arg of args) {
const bufSize = 64 * 1024;
const bytes = new TextEncoder().encode("a".repeat(bufSize));
const reader = new Deno.Buffer();
writeAllSync(reader, bytes);
const writer = new Deno.Buffer();
writer.readFromSync(reader);
const actualBytes = writer.bytes(arg);
assertEquals(actualBytes.byteLength, bufSize);
assert(actualBytes.buffer !== writer.bytes(arg).buffer);
assertEquals(actualBytes.byteLength, actualBytes.buffer.byteLength);
}
});
Deno.test({ ignore: DENO_FUTURE }, function testBufferBytesCopyFalse() {
const bufSize = 64 * 1024;
const bytes = new TextEncoder().encode("a".repeat(bufSize));
const reader = new Deno.Buffer();
writeAllSync(reader, bytes);
const writer = new Deno.Buffer();
writer.readFromSync(reader);
const actualBytes = writer.bytes({ copy: false });
assertEquals(actualBytes.byteLength, bufSize);
assertEquals(actualBytes.buffer, writer.bytes({ copy: false }).buffer);
assert(actualBytes.buffer.byteLength > actualBytes.byteLength);
});
Deno.test(
{ ignore: DENO_FUTURE },
function testBufferBytesCopyFalseGrowExactBytes() {
const bufSize = 64 * 1024;
const bytes = new TextEncoder().encode("a".repeat(bufSize));
const reader = new Deno.Buffer();
writeAllSync(reader, bytes);
const writer = new Deno.Buffer();
writer.grow(bufSize);
writer.readFromSync(reader);
const actualBytes = writer.bytes({ copy: false });
assertEquals(actualBytes.byteLength, bufSize);
assertEquals(actualBytes.buffer.byteLength, actualBytes.byteLength);
},
);
Deno.test(
{ ignore: DENO_FUTURE },
function testThrowsErrorWhenBufferExceedsMaxLength() {
const kStringMaxLengthPlusOne = 536870888 + 1;
const bytes = new Uint8Array(kStringMaxLengthPlusOne);
assertThrows(
() => {
new TextDecoder().decode(bytes);
},
TypeError,
"buffer exceeds maximum length",
);
},
);

View file

@ -238,7 +238,6 @@
"ext:runtime/06_util.js": "../runtime/js/06_util.js",
"ext:runtime/10_permissions.js": "../runtime/js/10_permissions.js",
"ext:runtime/11_workers.js": "../runtime/js/11_workers.js",
"ext:runtime/13_buffer.js": "../runtime/js/13_buffer.js",
"ext:runtime/30_os.js": "../runtime/js/30_os.js",
"ext:runtime/40_fs_events.js": "../runtime/js/40_fs_events.js",
"ext:runtime/40_process.js": "../runtime/js/40_process.js",