From c61a0f2f84e619a70704b59fd72cd7da863d4461 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Mon, 15 Oct 2018 16:44:35 -0400 Subject: [PATCH] First pass at http benchmark. --- tests/http_bench.ts | 34 ++++++++++++++++++++++++++++ third_party | 2 +- tools/benchmark.py | 2 ++ tools/http_benchmark.py | 49 +++++++++++++++++++++++++++++++++++++++++ tools/node_http.js | 8 +++++++ tools/testdata/wrk1.txt | 9 ++++++++ tools/util.py | 8 +++++++ tools/util_test.py | 8 +++++++ website/app.js | 23 +++++++++++++++++++ website/app_test.js | 8 +++++++ website/index.html | 4 ++++ 11 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 tests/http_bench.ts create mode 100755 tools/http_benchmark.py create mode 100644 tools/node_http.js create mode 100644 tools/testdata/wrk1.txt diff --git a/tests/http_bench.ts b/tests/http_bench.ts new file mode 100644 index 0000000000..89d53ad1d8 --- /dev/null +++ b/tests/http_bench.ts @@ -0,0 +1,34 @@ +// Used for benchmarking Deno's networking. See tools/http_benchmark.py +// TODO Replace this with a real HTTP server once +// https://github.com/denoland/deno/issues/726 is completed. +import * as deno from "deno"; +const addr = deno.args[1] || "127.0.0.1:4500"; +const listener = deno.listen("tcp", addr); +const response = new TextEncoder().encode( + "HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n" +); + +async function handle(conn: deno.Conn): Promise { + const buffer = new Uint8Array(1024); + try { + while (true) { + const r = await conn.read(buffer); + if (r.eof) { + break; + } + await conn.write(response); + } + } finally { + conn.close(); + } +} + +async function main(): Promise { + console.log("Listening on", addr); + while (true) { + const conn = await listener.accept(); + handle(conn); + } +} + +main(); diff --git a/third_party b/third_party index e1a8530361..4f1056e8f0 160000 --- a/third_party +++ b/third_party @@ -1 +1 @@ -Subproject commit e1a853036189b8694dcc42db4d43433edb6c9036 +Subproject commit 4f1056e8f0bb08f1f1add4f358431fce84ce35eb diff --git a/tools/benchmark.py b/tools/benchmark.py index 4422764aa2..856ee3c856 100755 --- a/tools/benchmark.py +++ b/tools/benchmark.py @@ -14,6 +14,7 @@ from util import run, run_output, root_path, build_path, executable_suffix import tempfile import http_server import throughput_benchmark +from http_benchmark import http_benchmark # The list of the tuples of the benchmark name and arguments exec_time_benchmarks = [ @@ -183,6 +184,7 @@ def main(argv): # pipe. if os.name != 'nt': new_data["throughput"] = run_throughput(deno_path) + new_data["req_per_sec"] = http_benchmark(deno_path) if "linux" in sys.platform: # Thread count test, only on linux new_data["thread_count"] = run_thread_count_benchmark(deno_path) diff --git a/tools/http_benchmark.py b/tools/http_benchmark.py new file mode 100755 index 0000000000..0cfdc988c2 --- /dev/null +++ b/tools/http_benchmark.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python + +import os +import sys +import util +import time +import subprocess + +ADDR = "127.0.0.1:4544" +DURATION = "10s" + + +def http_benchmark(deno_exe): + deno_cmd = [deno_exe, "--allow-net", "tests/http_bench.ts", ADDR] + node_cmd = ["node", "tools/node_http.js", ADDR.split(":")[1]] + + print "http_benchmark testing DENO." + deno_rps = run(deno_cmd) + + print "http_benchmark testing NODE." + node_rps = run(node_cmd) + + return {"deno": deno_rps, "node": node_rps} + + +def run(server_cmd): + # Run deno echo server in the background. + server = subprocess.Popen(server_cmd) + time.sleep(5) # wait for server to wake up. TODO racy. + wrk_platform = { + "linux2": "linux", + "darwin": "mac", + }[sys.platform] + try: + cmd = "third_party/wrk/" + wrk_platform + "/wrk -d " + DURATION + " http://" + ADDR + "/" + print cmd + output = subprocess.check_output(cmd, shell=True) + req_per_sec = util.parse_wrk_output(output) + print output + return req_per_sec + finally: + server.kill() + + +if __name__ == '__main__': + if len(sys.argv) < 2: + print "Usage ./tools/tcp_http_benchmark.py out/debug/deno" + sys.exit(1) + http_benchmark(sys.argv[1]) diff --git a/tools/node_http.js b/tools/node_http.js new file mode 100644 index 0000000000..dbe9de81af --- /dev/null +++ b/tools/node_http.js @@ -0,0 +1,8 @@ +const http = require("http"); +const port = process.argv[2] || "4544"; +console.log("port", port); +http + .Server((req, res) => { + res.end("Hello World\n"); + }) + .listen(port); diff --git a/tools/testdata/wrk1.txt b/tools/testdata/wrk1.txt new file mode 100644 index 0000000000..d31d1e6fe3 --- /dev/null +++ b/tools/testdata/wrk1.txt @@ -0,0 +1,9 @@ +Running 10s test @ http://127.0.0.1:4500/ + 2 threads and 10 connections + Thread Stats Avg Stdev Max +/- Stdev + Latency 5.08ms 1.37ms 34.96ms 96.63% + Req/Sec 0.92k 51.83 1.00k 78.50% + 18381 requests in 10.00s, 0.89MB read + Socket errors: connect 0, read 18381, write 0, timeout 0 +Requests/sec: 1837.86 +Transfer/sec: 91.53KB diff --git a/tools/util.py b/tools/util.py index 2620e706fc..10b6b9c6fb 100644 --- a/tools/util.py +++ b/tools/util.py @@ -324,3 +324,11 @@ def extract_number(pattern, string): if len(matches) != 1: return None return int(matches[0]) + + +def parse_wrk_output(output): + req_per_sec = None + for line in output.split("\n"): + if req_per_sec is None: + req_per_sec = extract_number(r'Requests/sec:\s+(\d+)', line) + return req_per_sec diff --git a/tools/util_test.py b/tools/util_test.py index 4adf0d658b..24fd2eba1c 100644 --- a/tools/util_test.py +++ b/tools/util_test.py @@ -78,11 +78,19 @@ def parse_unit_test_output_test(): assert expected == None +def parse_wrk_output_test(): + print "Testing util.parse_wrk_output_test()..." + f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt")) + req_per_sec = util.parse_wrk_output(f.read()) + assert req_per_sec == 1837 + + def util_test(): pattern_match_test() parse_exit_code_test() shell_quote_win_test() parse_unit_test_output_test() + parse_wrk_output_test() if __name__ == '__main__': diff --git a/website/app.js b/website/app.js index 7346defb4d..595e95755d 100644 --- a/website/app.js +++ b/website/app.js @@ -51,6 +51,10 @@ export function createThroughputColumns(data) { return createColumns(data, "throughput"); } +export function createReqPerSecColumns(data) { + return createColumns(data, "req_per_sec"); +} + export function createBinarySizeColumns(data) { const propName = "binary_size"; const binarySizeNames = Object.keys(data[data.length - 1][propName]); @@ -132,6 +136,7 @@ export async function main() { const execTimeColumns = createExecTimeColumns(data); const throughputColumns = createThroughputColumns(data); + const reqPerSecColumns = createReqPerSecColumns(data); const binarySizeColumns = createBinarySizeColumns(data); const threadCountColumns = createThreadCountColumns(data); const syscallCountColumns = createSyscallCountColumns(data); @@ -188,6 +193,24 @@ export async function main() { } }); + c3.generate({ + bindto: "#req-per-sec-chart", + data: { + columns: reqPerSecColumns, + onclick: viewCommitOnClick(sha1List) + }, + axis: { + x: { + type: "category", + show: false, + categories: sha1ShortList + }, + y: { + label: "seconds" + } + } + }); + c3.generate({ bindto: "#binary-size-chart", data: { diff --git a/website/app_test.js b/website/app_test.js index 42891bf6be..cebf78aeba 100644 --- a/website/app_test.js +++ b/website/app_test.js @@ -28,6 +28,10 @@ const regularData = [ "10M_tcp": 1.6, "10M_cat": 1.0 }, + req_per_sec: { + node: 16000, + deno: 1000 + }, benchmark: { hello: { mean: 0.05 @@ -66,6 +70,10 @@ const regularData = [ "10M_tcp": 1.6, "10M_cat": 1.0 }, + req_per_sec: { + node: 1600, + deno: 3.0 + }, benchmark: { hello: { mean: 0.055 diff --git a/website/index.html b/website/index.html index 03bb268bf0..dbdef58b9d 100644 --- a/website/index.html +++ b/website/index.html @@ -29,6 +29,10 @@

Throughput

+

Req/Sec

+ Tests HTTP server performance against Node. +
+

Executable size

deno ships only a single binary. We track its size here.