diff --git a/.gitignore b/.gitignore index df1d850fdb7f87b06fbfa30d4370e7a4700fd311..01b1f255c90e130c9b95c734a69a96a71bf53cec 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ .idea .vscode *.swp +target diff --git a/README.md b/README.md index 12317f7fae77069d7e63118c5c3cef35ac0becc9..36a9d1af4a4a1f2466acbb56ba3d30b26a08e7d9 100644 --- a/README.md +++ b/README.md @@ -110,8 +110,6 @@ wasm32-wasi (installed) * benches:存放了benchmark代码,其中包含了`gcd`, `echo_string`和`authentication` 三种不同测试函数的wat格式文件 * experiments/application: 存放了一些常见的可编译成Wasm格式的函数代码 -* experiments/bash_libraries: 存放了用来做测试使用的bash脚本代码 -* experiments/concurrency: 存放了进行函数并发性能测试的脚本代码 * src/main.rs:WasmEngine引擎的主入口,定义了Restfult API访问接口 * src/wrapper: 对底层WebAssembly Runtime运行时的封装 * src/function_store: 存放了Wasm函数镜像管理和Wasm Module实例管理代码 @@ -208,7 +206,7 @@ $ curl --location --request POST 'localhost:10000/function/query' \ status code: 200, message: queried function info: FunctionEntry { func_name: "authentication", func_image_name: "127.0.0.1:5000/authentication-wasm:v4", func_local_path: "/var/lib/wasmengine/functions/authentication/authentication.wasm", wasi_cap: false } ``` -**调用uthentication函数** +**调用authentication函数** ```bash $ curl --location --request POST 'localhost:10000/function/invoke' \ --header 'Content-Type: application/json' \ diff --git a/experiments/bash_libraries/csv_to_dat.sh b/experiments/bash_libraries/csv_to_dat.sh deleted file mode 100644 index d6951a33a426990517f93b7beefe9dcb02ed31d0..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/csv_to_dat.sh +++ /dev/null @@ -1,29 +0,0 @@ -# shellcheck shell=bash -if [ -n "$__csv_to_dat_sh__" ]; then return; fi -__csv_to_dat_sh__=$(date) - -source "panic.sh" || exit 1 - -# Takes a variadic number of paths to *.csv files and converts to *.dat files in the same directory -csv_to_dat() { - if (($# == 0)); then - panic "insufficient parameters" - return 1 - fi - - for arg in "$@"; do - if ! [[ "$arg" =~ ".csv"$ ]]; then - panic "$arg is not a *.csv file" - return 1 - fi - if [[ ! -f "$arg" ]]; then - panic "$arg does not exit" - return 1 - fi - done - - for file in "$@"; do - echo -n "#" > "${file/.csv/.dat}" - tr ',' ' ' < "$file" | column -t >> "${file/.csv/.dat}" - done -} diff --git a/experiments/bash_libraries/error_msg.sh b/experiments/bash_libraries/error_msg.sh deleted file mode 100644 index c30585a1584788761dfaae33763502d3c9b534e5..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/error_msg.sh +++ /dev/null @@ -1,7 +0,0 @@ -# shellcheck shell=bash -if [ -n "$__error_msg_sh__" ]; then return; fi -__error_msg_sh__=$(date) - -error_msg() { - printf "%.23s %s() at %s:%s - %s\n" "$(date +%F.%T.%N)" "${FUNCNAME[0]}" "$(realpath "${BASH_SOURCE[0]##*/}")" "${BASH_LINENO[0]}" "${@}" -} diff --git a/experiments/bash_libraries/generate_gnuplots.sh b/experiments/bash_libraries/generate_gnuplots.sh deleted file mode 100644 index 45826ce0b1ffac564882fdc4b0f13e86186922f6..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/generate_gnuplots.sh +++ /dev/null @@ -1,36 +0,0 @@ -# shellcheck shell=bash -if [ -n "$__generate_gnuplots_sh__" ]; then return; fi -__generate_gnuplots_sh__=$(date) - -source "panic.sh" || exit 1 - -# Runs all *.gnuplot files found gnuplot_directory from results_directory -# Outputting resulting diagrams in results_directory -# $1 - results_directory containing the data file referenced in the gnuplot file -# $2 - gnuplot_directory containing the *.gnuplot specification files -generate_gnuplots() { - local -r results_directory="$1" - local -r experiment_directory="$2" - - if ! command -v gnuplot &> /dev/null; then - panic "gnuplot could not be found in path" - return 1 - fi - # shellcheck disable=SC2154 - if [ -z "$results_directory" ]; then - panic "results_directory was unset or empty" - return 1 - fi - # shellcheck disable=SC2154 - if [ -z "$experiment_directory" ]; then - panic "error: EXPERIMENT_DIRECTORY was unset or empty" - return 1 - fi - cd "$results_directory" || exit - - shopt -s nullglob - for gnuplot_file in "$experiment_directory"/*.gnuplot; do - gnuplot "$gnuplot_file" - done - cd "$experiment_directory" || exit -} diff --git a/experiments/bash_libraries/get_result_count.sh b/experiments/bash_libraries/get_result_count.sh deleted file mode 100644 index d7bd5011dce4df4d3488af49d0854a628732a090..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/get_result_count.sh +++ /dev/null @@ -1,38 +0,0 @@ -# shellcheck shell=bash -if [ -n "$__get_result_count_sh__" ]; then return; fi -__get_result_count_sh__=$(date) - -source "panic.sh" || exit 1 - -# Given a file, returns the number of results -# This assumes a *.csv file with a header -# $1 the file we want to check for results -# $2 an optional return nameref -get_result_count() { - if (($# != 1)); then - panic "insufficient parameters. $#/1" - return 1 - elif [[ ! -f $1 ]]; then - panic "the file $1 does not exist" - return 1 - elif [[ ! -s $1 ]]; then - panic "the file $1 is size 0" - return 1 - fi - - local -r file=$1 - - # Subtract one line for the header - local -i count=$(($(wc -l < "$file") - 1)) - - if (($# == 2)); then - # shellcheck disable=2034 - local -n __result=$2 - fi - - if ((count > 0)); then - return 0 - else - return 1 - fi -} diff --git a/experiments/bash_libraries/panic.sh b/experiments/bash_libraries/panic.sh deleted file mode 100644 index 0f6d1f39af502730a68b180151d962329df53bc5..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/panic.sh +++ /dev/null @@ -1,23 +0,0 @@ -# shellcheck shell=bash -if [ -n "$__panic_sh__" ]; then return; fi -__panic_sh__=$(date) - -source "error_msg.sh" || exit 1 - -declare __common_did_dump_callstack=false - -__common_dump_callstack() { - echo "Call Stack:" - for ((i = 1; i < ${#FUNCNAME[@]}; i++)); do - printf "\t%d - %s\n" "$((i - 1))" "${FUNCNAME[i]} (${BASH_SOURCE[i + 1]}:${BASH_LINENO[i]})" - done -} - -# Public API -panic() { - error_msg "${@}" - [[ "$__common_did_dump_callstack" == false ]] && { - __common_dump_callstack - __common_did_dump_callstack=true - } -} diff --git a/experiments/bash_libraries/percentiles_table.sh b/experiments/bash_libraries/percentiles_table.sh deleted file mode 100644 index a56c672b38083ed60ca731679990cc718413b8fb..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/percentiles_table.sh +++ /dev/null @@ -1,77 +0,0 @@ -# shellcheck shell=bash - -source "type_checks.sh" || exit 1 - -# These utility functions are used to generate percentile tables that summarize distributions of data. -# Each row represents an independent variable, such as a scheduling policy -# The data for each column is provided as a single column of sorted data -# If the data is too course, a percentile might be statistically insignificant. If this is the case, -# The script writes an * to the appropriate cell -# -# Example: -# -# percentiles_table_header "./table.csv" -# for $variant in (fifo_nopreemption fifo_preemption edf_nopreemption edf_preemption); do -# percentiles_table_row "./${variant}.csv" "./table.csv" "$variant" -# done -# -# See Also: -# - csv_to_dat - Can transform a table into a *.dat file suitable for gnuplot -# -# References -# - The AWK Programming Language - https://ia802309.us.archive.org/25/items/pdfy-MgN0H1joIoDVoIC7/The_AWK_Programming_Language.pdf -# - GAWK: Effective AWK Programming - https://www.gnu.org/software/gawk/manual/gawk.pdf - -percentiles_table_header() { - local table_file="${1:?table_file not set}" - # Can optionally override "app" in header - local label_header="${2:-app}" - echo "${label_header},cnt,min,mean,p50,p90,p99,max" >>"$table_file" -} - -# columnar_data_file is assumed to be a file containing a single column or sorted data -percentiles_table_row() { - local -r columnar_data_file="${1:?columnar_data_file not set}" - check_file columnar_data_file - local -r table_file="${2:?table_file not set}" - check_file table_file - local -r row_label="${3:?row_label not set}" - local -r format_string="${4:-%1.4f}" - - # Count the number of results - local -i sample_size - sample_size=$(wc -l <"$columnar_data_file") - - if ((sample_size == 0)); then - # We might not have actually run every variant depending on iterations and workload mix - # Insert a degenerate row if this is the case - echo "$row_label,0,*,*,*,*,*,*" >>"$table_file" - else - awk ' - BEGIN { - sample_size='"$sample_size"' - row_label="'"$row_label"'" - format_string="'"$format_string"'" - invalid_number_symbol="*" - sum = 0 - p50_idx = int(sample_size * 0.5) - p90_idx = int(sample_size * 0.9) - p99_idx = int(sample_size * 0.99) - p100_idx = sample_size - } - - # Empty pattern matches all rows - { sum += $0 } - NR==1 { min = sample_size > 0 ? sprintf(format_string, $0) : invalid_number_symbol } - NR==p50_idx { p50 = sample_size >= 3 ? sprintf(format_string, $0) : invalid_number_symbol } - NR==p90_idx { p90 = sample_size >= 10 ? sprintf(format_string, $0) : invalid_number_symbol } - NR==p99_idx { p99 = sample_size >= 100 ? sprintf(format_string, $0) : invalid_number_symbol } - NR==p100_idx { p100 = sample_size > 0 ? sprintf(format_string, $0) : invalid_number_symbol } - - END { - mean = sample_size > 0 ? sprintf(format_string, sum / NR) : invalid_number_symbol - printf "%s,%d,%s,%s,%s,%s,%s,%s\n", row_label, sample_size, min, mean, p50, p90, p99, p100 - } - ' <"$columnar_data_file" >>"$table_file" - fi -} diff --git a/experiments/bash_libraries/type_checks.sh b/experiments/bash_libraries/type_checks.sh deleted file mode 100644 index a24d2d59671806db502f11b3dc68536c375c2a1a..0000000000000000000000000000000000000000 --- a/experiments/bash_libraries/type_checks.sh +++ /dev/null @@ -1,42 +0,0 @@ -# shellcheck shell=bash - -# Example: Be sure to not set the -i attribute until after validating the content -# local -r second=${2:?second not set} -# check_number second || return 1 -# local -i second -check_number() { - local arg=${1:?arg not set} - # echo "${arg_raw}: ${!arg_raw}" - # A non-numeric string seems to coerce to 0 - ((arg == 0)) && [[ ${!arg} != "0" ]] && echo "$arg contains ${!arg}, which is not a valid number" && return 1 - - return 0 -} - -check_file() { - local arg_raw=${1:?arg not set} - local -n arg="$arg_raw" - [[ ! -f "$arg" ]] && echo "${arg_raw} contains $arg, which is not a valid file" && return 1 - - return 0 -} - -check_nameref() { - # Namerefs automatically transitively resolve, so we have to use indirect expansion to get the name of the intermediate variable name - local nameref_name=${1:?arg not set} - local -n nameref="$nameref_name" - local nameref_value=${!nameref} - - [[ ! -v nameref ]] && echo "nameref $nameref_name contains $nameref_value, which does not resolve to variable" && return 1 - - return 0 -} - -check_argc() { - local -i expected_argc="$1" - local argv="$2" - local -i actual_argc="${#argv}" - ((expected_argc != actual_argc)) && echo "expected ${expected_argc} received ${actual_argc}" && return 1 - - return 0 -} diff --git a/experiments/concurrency/dockerfile b/experiments/concurrency/dockerfile deleted file mode 100644 index 558512cb085a5ed4b17a63071ae05df59e8bc5b5..0000000000000000000000000000000000000000 --- a/experiments/concurrency/dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM hub.oepkgs.net/openeuler/openeuler:21.03 -COPY wasm-engine /root/ -COPY authentication-wasi.wasm /root/modules/ -WORKDIR /root -CMD ["./wasm-engine", "-p", "modules"] diff --git a/experiments/concurrency/run-wasm-engine.sh b/experiments/concurrency/run-wasm-engine.sh deleted file mode 100644 index 8dab1e48b31e273786464a3733761e8788f9a462..0000000000000000000000000000000000000000 --- a/experiments/concurrency/run-wasm-engine.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -PROJECT_DIR=$(git rev-parse --show-toplevel) -cpus="$1" -memory="$2" - -copy_file_to_context() { - cp "$PROJECT_DIR"/target/release/wasm-engine . - cp "$PROJECT_DIR"/experiments/application/authentication-wasi/target/wasm32-wasi/release/authentication-wasi.wasm . -} - -docker_build_run() { - docker build -f dockerfile -t wasm-engine:authentication-wasi . - docker run -itd --cpus "$cpus" --memory "$memory" -p 10000:10000 wasm-engine:authentication-wasi -} - -isula_build_run() { - isula-build ctr-img build -f dockerfile -o docker-daemon:wasm-engine:authentication-wasi . - docker run -itd --cpus "$cpus" --memory "$memory" -p 10000:10000 wasm-engine:authentication-wasi -} - -clean() { - rm -f wasm-engine authentication-wasi.wasm -} - -copy_file_to_context -isula_build_run -clean diff --git a/experiments/concurrency/run.sh b/experiments/concurrency/run.sh deleted file mode 100644 index eccb7960a9164b36579686313b958f5f7a34c986..0000000000000000000000000000000000000000 --- a/experiments/concurrency/run.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/bin/bash - -# This experiment is intended to document how the level of concurrent requests influence the latency, throughput, and success/failure rate - -# Add bash_libraries directory to path -__run_sh__base_path="$(dirname "$(realpath --logical "${BASH_SOURCE[0]}")")" -__run_sh__bash_libraries_relative_path="../bash_libraries" -__run_sh__bash_libraries_absolute_path=$(cd "$__run_sh__base_path" && cd "$__run_sh__bash_libraries_relative_path" && pwd) -export PATH="$__run_sh__bash_libraries_absolute_path:$PATH" - -source csv_to_dat.sh || exit 1 -source generate_gnuplots.sh || exit 1 -source get_result_count.sh || exit 1 -source panic.sh || exit 1 -source percentiles_table.sh || exit 1 - -if ! command -v hey >/dev/null; then - echo "hey is not present." - exit 1 -fi - -declare -gi iterations=10000 -declare -ga concurrency=(1 20 40 60 80 100) -declare -g hey_command="" - -run_samples() { - if (($# != 2)); then - panic "invalid number of arguments \"$1\"" - return 1 - elif [[ -z "$1" ]]; then - panic "hostname \"$1\" was empty" - return 1 - fi - - local hostname="$1" - local test_type="$2" - - printf "Running Samples: " - hey_iterations=16 - conn=1 - get_command_from_type "$test_type" - eval "$hey_command" || { - printf "[ERR]\n" - panic "samples failed" - return 1 - } - - printf "[OK]\n" - return 0 -} - -# Execute the experiments -# $1 (hostname) -# $2 (results_directory) - a directory where we will store our results -# $3 (test_type) -run_experiments() { - if (($# != 3)); then - panic "invalid number of arguments \"$1\"" - return 1 - elif [[ -z "$1" ]]; then - panic "hostname \"$1\" was empty" - return 1 - elif [[ ! -d "$2" ]]; then - panic "directory \"$2\" does not exist" - return 1 - fi - - local hostname="$1" - local results_directory="$2" - - # Execute the experiments - printf "Running Experiments:\n" - for conn in ${concurrency[*]}; do - printf "\t%d Concurrency: " "$conn" - conn=$conn - hey_iterations=$iterations - get_command_from_type "$test_type" - eval $hey_command >"$results_directory/con$conn.csv" 2>/dev/null || { - printf "[ERR]\n" - panic "experiment failed" - return 1 - } - get_result_count "$results_directory/con$conn.csv" || { - printf "[ERR]\n" - panic "con$conn.csv unexpectedly has zero requests" - return 1 - } - printf "[OK]\n" - done - - return 0 -} - -process_results() { - if (($# != 1)); then - panic "invalid number of arguments ($#, expected 1)" - return 1 - elif ! [[ -d "$1" ]]; then - panic "directory $1 does not exist" - return 1 - fi - - local -r results_directory="$1" - - printf "Processing Results: " - # Write headers to CSVs - if ! grep -q "Concurrency,Success_Rate" "$results_directory/success.csv" 2>/dev/null; then - printf "Concurrency,Success_Rate\n" >>"$results_directory/success.csv" - fi - if ! grep -q "Concurrency,Throughput" "$results_directory/throughput.csv" 2>/dev/null; then - printf "Concurrency,Throughput\n" >>"$results_directory/throughput.csv" - fi - if ! grep -q "cnt,min,mean" "$results_directory/latency.csv" 2>/dev/null; then - percentiles_table_header "$results_directory/latency.csv" "Con" - fi - - for conn in ${concurrency[*]}; do - - if [[ ! -f "$results_directory/con$conn.csv" ]]; then - printf "[ERR]\n" - panic "Missing $results_directory/con$conn.csv" - return 1 - fi - - # Calculate Success Rate for csv (percent of requests resulting in 200) - awk -F, ' - $7 == 200 {ok++} - END{printf "'"$conn"',%3.5f\n", (ok / '"$iterations"' * 100)} - ' <"$results_directory/con$conn.csv" >>"$results_directory/success.csv" - - # Filter on 200s, convert from s to ms, and sort - awk -F, '$7 == 200 {print ($1 * 1000)}' <"$results_directory/con$conn.csv" | - sort -g >"$results_directory/con$conn-response.csv" - - # Get Number of 200s - oks=$(wc -l <"$results_directory/con$conn-response.csv") - ((oks == 0)) && continue # If all errors, skip line - - # We determine duration by looking at the timestamp of the last complete request - # TODO: Should this instead just use the client-side synthetic duration_sec value? - duration=$(tail -n1 "$results_directory/con$conn.csv" | cut -d, -f8) - - # Throughput is calculated as the mean number of successful requests per second - throughput=$(echo "$oks/$duration" | bc) - printf "%d,%f\n" "$conn" "$throughput" >>"$results_directory/throughput.csv" - - # Generate Latency Data for csv - percentiles_table_row "$results_directory/con$conn-response.csv" "$results_directory/latency.csv" "$conn" - - # Delete scratch file used for sorting/counting - rm -rf "$results_directory/con$conn-response.csv" - done - - # Transform csvs to dat files for gnuplot - csv_to_dat "$results_directory/success.csv" "$results_directory/throughput.csv" "$results_directory/latency.csv" - - # Generate gnuplots - generate_gnuplots "$results_directory" "$__run_sh__base_path" || { - printf "[ERR]\n" - panic "failed to generate gnuplots" - } - - printf "[OK]\n" - return 0 -} - -get_command_from_type() { - local test_type="$1" - - common_part="hey -n \$hey_iterations -c \$conn -cpus 1 -o csv -m POST -H \"Content-Type: application/json\" -d '{\"arg_uri\":\"yes\", \"arg_body\":\"yes\", \"arg_secret\":\"12345\"}' " - case "$test_type" in - "wasi") hey_command="$common_part""http://${target_hostname}:10000/function/run/authentication-wasi/1.0.0/start" ;; - "wasm") hey_command="$common_part""http://${target_hostname}:10000/function/run/authentication/1.0.0/authentication" ;; - "js") hey_command="$common_part""http://${target_hostname}:8001/authentication" ;; - *) - echo "you need to choose only one test type" - exit 1 - ;; - esac -} - -# Expected Symbol used by the framework -experiment_client() { - local -r target_hostname="$1" - local -r results_directory="$2" - local -r test_type="$3" - - run_samples "$target_hostname" "$test_type" || return 1 - run_experiments "$target_hostname" "$results_directory" "$test_type" || return 1 - process_results "$results_directory" || return 1 - - return 0 -} - -experiment_client 127.0.0.1 /root/gitee/wasm-engine/experiments/result wasi diff --git a/experiments/concurrency/success.gnuplot b/experiments/concurrency/success.gnuplot deleted file mode 100644 index 02be39e2216cfc1648d6055fc08fa30927445526..0000000000000000000000000000000000000000 --- a/experiments/concurrency/success.gnuplot +++ /dev/null @@ -1,12 +0,0 @@ -reset - -set term jpeg -set output "success.jpg" - -set xlabel "Concurrency" -set ylabel "% 2XX" - -set xrange [-5:105] -set yrange [0:110] - -plot 'success.dat' using 1:2 title '2XX' diff --git a/experiments/concurrency/throughput.gnuplot b/experiments/concurrency/throughput.gnuplot deleted file mode 100644 index 967baa9100e75e253d5dab9a83289d084122ccb1..0000000000000000000000000000000000000000 --- a/experiments/concurrency/throughput.gnuplot +++ /dev/null @@ -1,15 +0,0 @@ -reset - -set term jpeg -set output "throughput.jpg" - -stats 'throughput.dat' nooutput -N = STATS_records - -set xlabel "Concurrency" -set ylabel "Requests/sec" - -set xrange [-5:105] -set yrange [0:] - -plot for [i=0:N/6-1]'throughput.dat' every ::(0+i*6)::(5+i*6) using 1:2 title 'Reqs/sec '.i with linespoint pointtype (i+1) \ No newline at end of file diff --git a/experiments/pyplot/latency.py b/experiments/pyplot/latency.py deleted file mode 100644 index 17ec59298087d1cdc9406e3b05b6db68a16e057a..0000000000000000000000000000000000000000 --- a/experiments/pyplot/latency.py +++ /dev/null @@ -1,24 +0,0 @@ -from line_chart import LineChart - - -class Latency(LineChart): - def process_raw_data(self): - for csv in self.csv_file: - data = {} - with open(csv) as f: - lines = f.readlines() - total_round = len(lines[1:])/6 - for line in lines[1:]: - raw = line.strip().split(',') - if raw[0] not in data: - data[raw[0]] = float(raw[3]) - continue - data[raw[0]] = data[raw[0]] + float(raw[3]) - for k, v in data.items(): - data[k] = v / total_round - self.data.append(data) - - -m = Latency("latency") -m.process_raw_data() -m.plot("concurrency", "latency", ["latency-wasi(ms)", "latency-js(ms)"]) diff --git a/experiments/pyplot/line_chart.py b/experiments/pyplot/line_chart.py deleted file mode 100644 index f9d3cb3eb5a1859358454cecdd3a85962d6c0481..0000000000000000000000000000000000000000 --- a/experiments/pyplot/line_chart.py +++ /dev/null @@ -1,56 +0,0 @@ -from abc import ABC, abstractmethod -import sys -import matplotlib.pyplot as plt - - -class LineChart(ABC): - """ - Line chart program - """ - - def __init__(self, output): - self.csv_file = [] - self.init_args() - self.data = [] - self.output = output - - def init_args(self): - import argparse - import platform - - class MyParser(argparse.ArgumentParser): - def error(self, message): - sys.stderr.write('error: %s\n' % message) - self.print_help() - sys.exit(2) - - parser = MyParser(description="plot line chart") - parser.add_argument('csv', nargs='+', help='result csv file') - args = parser.parse_args() - - for csv in args.csv: - self.csv_file.append(csv) - - @abstractmethod - def process_raw_data(self): - pass - - def plot(self, xlabel, ylabel, plot_label): - max_y = 0 - for i, data_entry in enumerate(self.data): - x_ticks = [k for k in data_entry.keys()] - y_ticks = [v for v in data_entry.values()] - print(y_ticks) - cur_max = max(y_ticks) - if cur_max > max_y: - max_y = cur_max - - plt.scatter(x_ticks, y_ticks, color="C" + str(i)) - plt.plot(x_ticks, y_ticks, - color="C" + str(i), label=plot_label[i]) - plt.ylim(top=max_y * 1.2) - plt.xlabel(xlabel) - plt.ylabel(ylabel) - plt.legend(loc="best") - plt.show() - plt.savefig(self.output, bbox_inches='tight') diff --git a/experiments/pyplot/throughput.py b/experiments/pyplot/throughput.py deleted file mode 100644 index 45e5bd6e3a0ff1f1516bcf2b3bde7c8086b48732..0000000000000000000000000000000000000000 --- a/experiments/pyplot/throughput.py +++ /dev/null @@ -1,25 +0,0 @@ -from line_chart import LineChart - - -class Throughput(LineChart): - def process_raw_data(self): - for csv in self.csv_file: - data = {} - with open(csv) as f: - lines = f.readlines() - total_round = len(lines[1:])/6 - for line in lines[1:]: - raw = line.strip().split(',') - if raw[0] not in data: - data[raw[0]] = float(raw[1]) - continue - data[raw[0] - ] = data[raw[0]] + float(raw[1]) - for k, v in data.items(): - data[k] = v / total_round - self.data.append(data) - - -m = Throughput("throughput") -m.process_raw_data() -m.plot("concurrency", "Requests/sec", ["throughput-wasi", "throughput-js"])