mirror of
https://github.com/denoland/deno.git
synced 2024-12-24 16:19:12 -05:00
fix(ext/http): unify default gzip compression level (#20050)
This tweaks the HTTP response-writer in order to align the two possible execution flows into using the same gzip default compression level, that is `1` (otherwise the implicit default level is `6`).
This commit is contained in:
parent
8a175a780a
commit
5abf4cd951
1 changed files with 14 additions and 6 deletions
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
use async_compression::tokio::write::BrotliEncoder;
|
use async_compression::tokio::write::BrotliEncoder;
|
||||||
use async_compression::tokio::write::GzipEncoder;
|
use async_compression::tokio::write::GzipEncoder;
|
||||||
|
use async_compression::Level;
|
||||||
use cache_control::CacheControl;
|
use cache_control::CacheControl;
|
||||||
use deno_core::error::custom_error;
|
use deno_core::error::custom_error;
|
||||||
use deno_core::error::AnyError;
|
use deno_core::error::AnyError;
|
||||||
|
@ -702,6 +703,11 @@ fn http_response(
|
||||||
compressing: bool,
|
compressing: bool,
|
||||||
encoding: Encoding,
|
encoding: Encoding,
|
||||||
) -> Result<(HttpResponseWriter, hyper::Body), AnyError> {
|
) -> Result<(HttpResponseWriter, hyper::Body), AnyError> {
|
||||||
|
// Gzip, after level 1, doesn't produce significant size difference.
|
||||||
|
// This default matches nginx default gzip compression level (1):
|
||||||
|
// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level
|
||||||
|
const GZIP_DEFAULT_COMPRESSION_LEVEL: u8 = 1;
|
||||||
|
|
||||||
match data {
|
match data {
|
||||||
Some(data) if compressing => match encoding {
|
Some(data) if compressing => match encoding {
|
||||||
Encoding::Brotli => {
|
Encoding::Brotli => {
|
||||||
|
@ -715,11 +721,10 @@ fn http_response(
|
||||||
Ok((HttpResponseWriter::Closed, writer.into_inner().into()))
|
Ok((HttpResponseWriter::Closed, writer.into_inner().into()))
|
||||||
}
|
}
|
||||||
Encoding::Gzip => {
|
Encoding::Gzip => {
|
||||||
// Gzip, after level 1, doesn't produce significant size difference.
|
let mut writer = GzEncoder::new(
|
||||||
// Probably the reason why nginx's default gzip compression level is
|
Vec::new(),
|
||||||
// 1.
|
Compression::new(GZIP_DEFAULT_COMPRESSION_LEVEL.into()),
|
||||||
// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level
|
);
|
||||||
let mut writer = GzEncoder::new(Vec::new(), Compression::new(1));
|
|
||||||
writer.write_all(&data)?;
|
writer.write_all(&data)?;
|
||||||
Ok((HttpResponseWriter::Closed, writer.finish()?.into()))
|
Ok((HttpResponseWriter::Closed, writer.finish()?.into()))
|
||||||
}
|
}
|
||||||
|
@ -739,7 +744,10 @@ fn http_response(
|
||||||
let (_, writer) = tokio::io::split(b);
|
let (_, writer) = tokio::io::split(b);
|
||||||
let writer: Pin<Box<dyn tokio::io::AsyncWrite>> = match encoding {
|
let writer: Pin<Box<dyn tokio::io::AsyncWrite>> = match encoding {
|
||||||
Encoding::Brotli => Box::pin(BrotliEncoder::new(writer)),
|
Encoding::Brotli => Box::pin(BrotliEncoder::new(writer)),
|
||||||
Encoding::Gzip => Box::pin(GzipEncoder::new(writer)),
|
Encoding::Gzip => Box::pin(GzipEncoder::with_quality(
|
||||||
|
writer,
|
||||||
|
Level::Precise(GZIP_DEFAULT_COMPRESSION_LEVEL.into()),
|
||||||
|
)),
|
||||||
_ => unreachable!(), // forbidden by accepts_compression
|
_ => unreachable!(), // forbidden by accepts_compression
|
||||||
};
|
};
|
||||||
let (stream, shutdown_handle) =
|
let (stream, shutdown_handle) =
|
||||||
|
|
Loading…
Reference in a new issue