-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
13 changed files
with
624 additions
and
18 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
CPMAddPackage( | ||
NAME benchmark | ||
VERSION 1.8.0 | ||
GITHUB_REPOSITORY google/benchmark | ||
OPTIONS "BENCHMARK_ENABLE_TESTING Off" | ||
) | ||
|
||
set(TARGET_BENCHMARKS ${PROJECT_NAME}_bench) | ||
file(GLOB TEST_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) | ||
|
||
add_executable(${TARGET_BENCHMARKS} ${TEST_SOURCES}) | ||
target_include_directories(${TARGET_BENCHMARKS} PUBLIC ${CMAKE_SOURCE_DIR}/include) | ||
target_link_libraries(${TARGET_BENCHMARKS} PRIVATE ${LIBRARY_NAME} benchmark::benchmark) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
import json | ||
import sys | ||
from pathlib import Path | ||
|
||
import pandas as pd | ||
import seaborn as sns | ||
from matplotlib import pyplot as plt | ||
|
||
filename = sys.argv[1] | ||
basename = filename.split(".")[0] | ||
|
||
# load benchmarks data | ||
with Path(filename).open("r") as jfile: | ||
jdata = json.load(jfile) | ||
bench_df = pd.DataFrame(jdata["benchmarks"]) | ||
|
||
# only keep names with /, i.e. drop avg/rms/etc entries | ||
bench_df = bench_df.loc[bench_df.name.str.contains("/")] | ||
|
||
# create a column with complexity n | ||
bench_df[["benchmark_name", "benchmark_type", "n"]] = bench_df.name.str.split( | ||
"/BM_|_|/", | ||
expand=True, | ||
).apply( | ||
lambda x: ["_".join([x[1], x[2]]), "_".join([x[3], x[4]]), x[5]], | ||
axis=1, | ||
result_type="expand", | ||
) | ||
bench_df["n"] = bench_df["n"].astype("uint32") | ||
bench_df = bench_df[["benchmark_name", "n", "cpu_time", "benchmark_type"]] | ||
benchmarks = bench_df.benchmark_name.unique() | ||
|
||
palette = sns.color_palette("husl", len(benchmarks)) | ||
fig, axes = plt.subplots(2, 2, figsize=(10, 6), sharex=True) | ||
for i, benchmark in enumerate(benchmarks): | ||
ax = axes[0, i] if i < 2 else axes[1, i - 2] | ||
data = bench_df[bench_df["benchmark_name"] == benchmark] | ||
sns.lineplot( | ||
x="n", | ||
y="cpu_time", | ||
hue="benchmark_type", | ||
data=data, | ||
ax=ax, | ||
color=palette[i], | ||
marker="o", | ||
markersize=8, | ||
) | ||
ax.set_title(benchmark) | ||
ax.set_xscale("log") | ||
ax.set_yscale("log") | ||
ax.set_xlabel("n") | ||
ax.set_ylabel("Time (s)") | ||
ax.legend() | ||
|
||
fig.tight_layout() | ||
fig.savefig(f"{basename}.png", bbox_inches="tight", dpi=120) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
#include <benchmark/benchmark.h> | ||
|
||
#include <algorithm> | ||
#include <random> | ||
#include <vector> | ||
#include <dmt/fdmt.hpp> | ||
|
||
class FDMTFixture : public benchmark::Fixture { | ||
public: | ||
void SetUp(const ::benchmark::State& state) { | ||
f_min = 704.0F; | ||
f_max = 1216.0F; | ||
nchans = 4096; | ||
tsamp = 0.00008192F; | ||
dt_max = 2048; | ||
nsamps = state.range(0); | ||
} | ||
|
||
void TearDown(const ::benchmark::State&) {} | ||
|
||
template <typename T> | ||
std::vector<T> generate_vector(size_t size, std::mt19937& gen) { | ||
std::vector<T> vec(size); | ||
std::uniform_real_distribution<T> dis(0.0, 1.0); | ||
std::generate(vec.begin(), vec.end(), [&]() { return dis(gen); }); | ||
return vec; | ||
} | ||
|
||
float f_min; | ||
float f_max; | ||
size_t nchans; | ||
float tsamp; | ||
size_t dt_max; | ||
size_t nsamps; | ||
}; | ||
|
||
BENCHMARK_DEFINE_F(FDMTFixture, BM_fdmt_plan_seq_cpu)(benchmark::State& state) { | ||
for (auto _ : state) { | ||
FDMT fdmt(f_min, f_max, nchans, nsamps, tsamp, dt_max); | ||
} | ||
} | ||
|
||
BENCHMARK_DEFINE_F(FDMTFixture, BM_fdmt_initialise_seq_cpu)(benchmark::State& state) { | ||
FDMT fdmt(f_min, f_max, nchans, nsamps, tsamp, dt_max); | ||
|
||
std::random_device rd; | ||
std::mt19937 gen(rd()); | ||
auto waterfall = generate_vector<float>(nchans * nsamps, gen); | ||
std::vector<float> state_init( | ||
nchans * fdmt.get_dt_grid_init().size() * nsamps, 0.0F); | ||
for (auto _ : state) { | ||
fdmt.initialise(waterfall.data(), state_init.data()); | ||
} | ||
} | ||
|
||
BENCHMARK_DEFINE_F(FDMTFixture, BM_fdmt_execute_seq_cpu)(benchmark::State& state) { | ||
FDMT fdmt(f_min, f_max, nchans, nsamps, tsamp, dt_max); | ||
|
||
std::random_device rd; | ||
std::mt19937 gen(rd()); | ||
auto waterfall = generate_vector<float>(nchans * nsamps, gen); | ||
std::vector<float> dmt(fdmt.get_dt_grid_final().size() * nsamps, 0.0F); | ||
for (auto _ : state) { | ||
fdmt.execute(waterfall.data(), waterfall.size(), dmt.data(), | ||
dmt.size()); | ||
} | ||
} | ||
|
||
BENCHMARK_DEFINE_F(FDMTFixture, BM_fdmt_overall_seq_cpu)(benchmark::State& state) { | ||
std::random_device rd; | ||
std::mt19937 gen(rd()); | ||
auto waterfall = generate_vector<float>(nchans * nsamps, gen); | ||
|
||
for (auto _ : state) { | ||
FDMT fdmt(f_min, f_max, nchans, nsamps, tsamp, dt_max); | ||
state.PauseTiming(); | ||
std::vector<float> dmt(fdmt.get_dt_grid_final().size() * nsamps, 0.0F); | ||
state.ResumeTiming(); | ||
|
||
fdmt.execute(waterfall.data(), waterfall.size(), dmt.data(), | ||
dmt.size()); | ||
} | ||
} | ||
|
||
constexpr size_t min_nsamps = 1 << 11; | ||
constexpr size_t max_nsamps = 1 << 16; | ||
|
||
BENCHMARK_REGISTER_F(FDMTFixture, BM_fdmt_plan_seq_cpu) | ||
->RangeMultiplier(2) | ||
->Range(min_nsamps, max_nsamps); | ||
BENCHMARK_REGISTER_F(FDMTFixture, BM_fdmt_initialise_seq_cpu) | ||
->RangeMultiplier(2) | ||
->Range(min_nsamps, max_nsamps); | ||
BENCHMARK_REGISTER_F(FDMTFixture, BM_fdmt_execute_seq_cpu) | ||
->RangeMultiplier(2) | ||
->Range(min_nsamps, max_nsamps); | ||
BENCHMARK_REGISTER_F(FDMTFixture, BM_fdmt_overall_seq_cpu) | ||
->RangeMultiplier(2) | ||
->Range(min_nsamps, max_nsamps); | ||
|
||
BENCHMARK_MAIN(); |
Oops, something went wrong.