Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor test/Benchmarks #2234

Merged
merged 1 commit into from
Jun 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 45 additions & 35 deletions test/Benchmarks/Benchmarks.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,62 +4,72 @@
# Use of this source code is governed by an MIT-style license that can be found
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.

module TestBenchmarks

using Test

import MathOptInterface as MOI
import MathOptInterface.Utilities as MOIU

const NUM_BENCHMARKS = length(MOI.Benchmarks.BENCHMARKS)
function runtests()
for name in names(@__MODULE__; all = true)
if startswith("$(name)", "test_")
@testset "$(name)" begin
getfield(@__MODULE__, name)()
end
end
end
return
end

@testset "suite" begin
function test_suite()
suite = MOI.Benchmarks.suite() do
return MOIU.MockOptimizer(MOIU.Model{Float64}())
return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
end
@test length(suite.data) == NUM_BENCHMARKS

@test length(suite.data) == length(MOI.Benchmarks.BENCHMARKS)
suite = MOI.Benchmarks.suite(exclude = [r"delete_"]) do
return MOIU.MockOptimizer(MOIU.Model{Float64}())
return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
end
# Note: update this value whenever more benchmarks are added to
# `src/Benchmarks/Benchmarks.jl`.
@test 6 <= length(suite.data) <= NUM_BENCHMARKS - 3
@test 6 <= length(suite.data) <= length(MOI.Benchmarks.BENCHMARKS) - 3
return
end

@testset "Perform benchmark" begin
function test_baseline()
params = joinpath(@__DIR__, "baseline_params.json")
baseline = joinpath(@__DIR__, "baseline_baseline.json")
@test !isfile(params)
@test !isfile(baseline)
@testset "create_baseline" begin
suite = MOI.Benchmarks.suite() do
return MOIU.MockOptimizer(MOIU.Model{Float64}())
end
MOI.Benchmarks.create_baseline(
suite,
"baseline";
directory = @__DIR__,
seconds = 2,
verbose = true,
)
suite = MOI.Benchmarks.suite() do
return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
end
MOI.Benchmarks.create_baseline(
suite,
"baseline";
directory = @__DIR__,
samples = 1,
verbose = true,
)
@test isfile(params)
@test isfile(baseline)
@testset "compare_against_baseline" begin
suite = MOI.Benchmarks.suite() do
return MOIU.MockOptimizer(MOIU.Model{Float64}())
end
MOI.Benchmarks.compare_against_baseline(
suite,
"baseline";
directory = @__DIR__,
seconds = 2,
verbose = true,
)
suite = MOI.Benchmarks.suite() do
return MOI.Utilities.MockOptimizer(MOI.Utilities.Model{Float64}())
end
MOI.Benchmarks.compare_against_baseline(
suite,
"baseline";
directory = @__DIR__,
samples = 1,
verbose = true,
)
rm(params)
rm(baseline)
@testset "Report" begin
report = read(joinpath(@__DIR__, "report.txt"), String)
@test occursin("=> invariant", report)
end
report = read(joinpath(@__DIR__, "report.txt"), String)
@test occursin("=> invariant", report)
rm(joinpath(@__DIR__, "report.txt"))
return
end

end

TestBenchmarks.runtests()
1 change: 1 addition & 0 deletions test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ end
for submodule in
["Nonlinear", "Bridges", "FileFormats", "Test", "Utilities", "Benchmarks"]
include("$(submodule)/$(submodule).jl")
GC.gc() # Force GC run here to reduce memory pressure
end

# Test hygiene of @model macro
Expand Down