Skip to content

Commit

Permalink
Merge pull request #3 from psrenergy/px/benchmark
Browse files Browse the repository at this point in the history
Add Benchmark Suite + Utilities
  • Loading branch information
pedromxavier authored Jul 29, 2023
2 parents ca97082 + 5aab612 commit 5e1243a
Show file tree
Hide file tree
Showing 30 changed files with 513 additions and 116 deletions.
31 changes: 31 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
name: Benchmark
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
test:
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
include:
- version: '1'
os: ubuntu-latest
arch: x64
steps:
- uses: actions/checkout@v2
- uses: julia-actions/setup-julia@v1
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: julia-actions/julia-buildpkg@v1
- name: Run Benchmarks
run: |
julia --proj=benchmark benchmark/benchmark.jl --run --main
julia --proj=benchmark benchmark/benchmark.jl --run --dev
julia --proj=benchmark benchmark/report.jl --run
- name: Comment PR
uses: thollander/actions-comment-pull-request@v2
with:
filePath: benchmark/data/REPORT.md
12 changes: 7 additions & 5 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
name = "PseudoBooleanOptimization"
uuid = "c8fa9a04-bc42-452d-8558-dc51757be744"
name = "PseudoBooleanOptimization"
uuid = "c8fa9a04-bc42-452d-8558-dc51757be744"
authors = ["pedromxavier <[email protected]>"]
version = "0.1.0"
version = "0.1.1"

[deps]
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MutableArithmetics = "d8a4904e-b15c-11e9-3269-09a3773c0cb0"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[compat]
MutableArithmetics = "1.3"
julia = "1.6"
1 change: 1 addition & 0 deletions benchmark/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/data
1 change: 1 addition & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
PseudoBooleanOptimization = "c8fa9a04-bc42-452d-8558-dc51757be744"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
15 changes: 15 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Benchmark

This benchmark suite should encompass performance measurements for

- [ ] Constructors
- [ ] Operators
- [ ] `+`
- [ ] `-`
- [ ] `*`
- [ ] `/`
- [ ] `^`

Operators should be evaluated for all pseudo-Boolean function implementations and using different coefficient distributions.

The benchmark will first be runned against the latest master branch commit and then against the current dev environment.
97 changes: 71 additions & 26 deletions benchmark/benchmark.jl
Original file line number Diff line number Diff line change
@@ -1,34 +1,79 @@
import Pkg

if "--main" ARGS
Pkg.add(; name="PseudoBooleanOptimization", rev="main")
end

if "--dev" ARGS
Pkg.develop(; path=joinpath(@__DIR__, ".."))
end

using Random
using BenchmarkTools
using PseudoBooleanOptimization
const PBO = PseudoBooleanOptimization

Random.seed!(0)

const SUITE = BenchmarkGroup()

f = PBO.PBF{Symbol,Float64}([
:x => 3.0,
:y => 3.0,
:z => 2.0,
:w => 1.0,
])

g = PBO.PBF{Symbol,Float64}([
:x => 3.0,
:y => -3.0,
:z => 2.0,
:w => -1.0,
])

seed!(0)

SUITE["operators"] = BenchmarkGroup()
SUITE["operators"]["+"] = @benchmarkable(
f + g;
setup = begin
f = rand(PBF)
g = rand(PBF)
end
)
include("suites/constructors.jl")

benchmark_constructors!(SUITE, PBO.PBF{Int,Float64})

include("suites/operators.jl")

benchmark_operators!(SUITE, PBO.PBF{Int,Float64})

include("suites/quadratization.jl")

benchmark_quadratization!(SUITE, PBO.PBF{Int,Float64})

function benchmark_main(suite)
data_path = joinpath(@__DIR__, "data")

mkpath(data_path) # Create if not exists

params_path = joinpath(data_path, "params.json")
results_path = joinpath(data_path, "results-main.json")

BenchmarkTools.tune!(SUITE)
@info "Generating parameters @ main"
BenchmarkTools.tune!(suite)
BenchmarkTools.save(params_path, params(suite))

results = BenchmarkTools.run(SUITE)
@info "Running benchmark @ main"
results = BenchmarkTools.run(suite)

BenchmarkTools.save(results_path, results)

return nothing
end

function benchmark_dev(suite)
data_path = joinpath(@__DIR__, "data")

mkpath(data_path) # Create if not exists

params_path = joinpath(data_path, "params.json")
results_path = joinpath(data_path, "results-dev.json")

@info "Loading parameters @ dev"
loadparams!(suite, first(BenchmarkTools.load(params_path)), :evals, :samples)

@info "Running benchmarks @ dev"
results = BenchmarkTools.run(suite)

BenchmarkTools.save(results_path, results)

return nothing
end

if "--run" ARGS
if "--main" ARGS
benchmark_main(SUITE)
end

if "--dev" ARGS
benchmark_dev(SUITE)
end
end
Empty file removed benchmark/dict/function.jl
Empty file.
Empty file removed benchmark/dict/operators.jl
Empty file.
104 changes: 104 additions & 0 deletions benchmark/report.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
using BenchmarkTools

function get_results(data_path)
main_results_path = joinpath(data_path, "results-main.json")
dev_results_path = joinpath(data_path, "results-dev.json")

main_results = first(BenchmarkTools.load(main_results_path))
dev_results = first(BenchmarkTools.load(dev_results_path))

return get_results(main_results, dev_results)
end

function get_results(main_results, dev_results)
results = Dict{String,Any}()

for (key, val) in dev_results
if val isa BenchmarkTools.BenchmarkGroup
results[key] = get_results(main_results[key], val)
elseif val isa BenchmarkTools.Trial
results[key] = (main_results[key], val)
end
end

return results
end

function status_emoji(status::Symbol)
if status == :regression
return ""
elseif status == :improvement
return "🎉"
elseif status == :invariant
return "🟰"
else
return ""
end
end

function compare_results(results; keypath = "")
report = []

for (key, val) in results
if val isa Dict
append!(report, compare_results(val; keypath = "$keypath/$key"))
elseif val isa Tuple
main_trial, dev_trial = val

case_id = "$keypath/$key"

main_μ = BenchmarkTools.mean(main_trial)
main_m = BenchmarkTools.median(main_trial)
main_σ = BenchmarkTools.std(main_trial)

dev_μ = BenchmarkTools.mean(dev_trial)
dev_m = BenchmarkTools.median(dev_trial)
dev_σ = BenchmarkTools.std(dev_trial)

cmp_m = BenchmarkTools.judge(dev_m, main_m; time_tolerance=0.1)

status = status_emoji(BenchmarkTools.time(cmp_m))
ratio = BenchmarkTools.prettypercent(BenchmarkTools.time(BenchmarkTools.ratio(cmp_m)))

push!(
report,
"| `$case_id` | $(BenchmarkTools.prettytime(BenchmarkTools.time(main_μ))) ($(BenchmarkTools.prettytime(BenchmarkTools.time(main_m)))) ± $(BenchmarkTools.prettytime(BenchmarkTools.time(main_σ))) | $(BenchmarkTools.prettytime(BenchmarkTools.time(dev_μ))) ($(BenchmarkTools.prettytime(BenchmarkTools.time(dev_m)))) ± $(BenchmarkTools.prettytime(BenchmarkTools.time(dev_σ))) | $(status) ($(ratio)) |"
)
end
end

return sort!(report)
end

function write_report(report, data_path)
report_path = joinpath(data_path, "REPORT.md")

open(report_path, "w") do io
println(io, "# Performance Report - `main` vs. `dev`")
println(io)
println(io, "| case | `main` | `dev` | diff |")
println(io, "| :--- | :----: | :---: | :--: |")

for entry in report
println(io, entry)
end
end

return nothing
end

function main()
data_path = joinpath(@__DIR__, "data")

results = get_results(data_path)

report = compare_results(results)

write_report(report, data_path)

return nothing
end

if "--run" ARGS
main()
end
5 changes: 5 additions & 0 deletions benchmark/suites/constructors.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
function benchmark_constructors!(suite, ::Type{F}) where {V,T,F<:PBO.AbstractFunction{V,T}}
suite["constructors"] = BenchmarkGroup()

return nothing
end
Loading

0 comments on commit 5e1243a

Please sign in to comment.