Skip to content

Commit f9e0ab9

Browse files
authored
[breaking] move BenchmarkTools.jl to a package extension (#2982)
This is a breaking change because it requires the user to install and load BenchmarkTools.jl. We justify releasing it in a minor release of MathOptInterface because we assess that there are very few users of it, and because it should be used only during package development by advanced users. We reserve the right to revert this commit in a future release of MOI if it causes problems in the ecosystem.
1 parent fa067ca commit f9e0ab9

File tree

5 files changed

+132
-71
lines changed

5 files changed

+132
-71
lines changed

Project.toml

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ uuid = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
33
version = "1.50.1"
44

55
[deps]
6-
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
76
CodecBzip2 = "523fee87-0ab8-5b00-afb7-3ecf72e48cfd"
87
CodecZlib = "944b1d66-785c-5afd-91f1-9de20f533193"
98
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
@@ -18,6 +17,12 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
1817
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
1918
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
2019

20+
[weakdeps]
21+
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
22+
23+
[extensions]
24+
MathOptInterfaceBenchmarkToolsExt = "BenchmarkTools"
25+
2126
[compat]
2227
BenchmarkTools = "1"
2328
CodecBzip2 = "0.6, 0.7, 0.8"
@@ -38,8 +43,9 @@ Test = "1"
3843
julia = "1.10"
3944

4045
[extras]
46+
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
4147
JSONSchema = "7d188eb4-7ad8-530c-ae41-71a32a6d4692"
4248
ParallelTestRunner = "d3525ed8-44d0-4b2c-a655-542cee43accc"
4349

4450
[targets]
45-
test = ["JSONSchema", "ParallelTestRunner"]
51+
test = ["BenchmarkTools", "JSONSchema", "ParallelTestRunner"]

docs/src/submodules/Benchmarks/overview.md

Lines changed: 19 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,29 @@ DocTestFilters = [r"MathOptInterface|MOI"]
99
# The `Benchmarks` submodule
1010

1111
To aid the development of efficient solver wrappers, MathOptInterface provides
12-
benchmarking capability. Benchmarking a wrapper follows a two-step process.
12+
a suite of benchmarks in the `MOI.Benchmarks` submodule.
13+
14+
!!! warning
15+
To use this submodule you must first install and load
16+
[BenchmarkTools.jl](https://github.com/juliaci/benchmarktools.jl).
17+
```julia
18+
import Pkg
19+
Pkg.add("BenchmarkTools")
20+
import BenchmarkTools
21+
```
22+
23+
## Benchmarking a solver wrapper
24+
25+
Benchmarking a wrapper follows a two-step process.
1326

1427
First, prior to making changes, create a baseline for the benchmark results on a
1528
given benchmark suite as follows:
1629

1730
```julia
18-
using SolverPackage # Replace with your choice of solver.
31+
# You must load BenchmarkTools.jl to enable MOI.Benchmarks
32+
import BenchmarkTools
33+
# Replace `SolverPackage` with your choice of solver
34+
using SolverPackage
1935
import MathOptInterface as MOI
2036

2137
suite = MOI.Benchmarks.suite() do
@@ -33,6 +49,7 @@ Second, after making changes to the package, re-run the benchmark suite and
3349
compare to the prior saved results:
3450

3551
```julia
52+
import BenchmarkTools
3653
using SolverPackage
3754
import MathOptInterface as MOI
3855

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
# Copyright (c) 2017: Miles Lubin and contributors
2+
# Copyright (c) 2017: Google Inc.
3+
#
4+
# Use of this source code is governed by an MIT-style license that can be found
5+
# in the LICENSE.md file or at https://opensource.org/licenses/MIT.
6+
7+
module MathOptInterfaceBenchmarkToolsExt
8+
9+
import BenchmarkTools
10+
import MathOptInterface as MOI
11+
12+
function MOI.Benchmarks.suite(
13+
new_model::Function;
14+
exclude::Vector{Regex} = Regex[],
15+
)
16+
group = BenchmarkTools.BenchmarkGroup()
17+
for (name, func) in MOI.Benchmarks.BENCHMARKS
18+
if any(occursin.(exclude, Ref(name)))
19+
continue
20+
end
21+
group[name] = BenchmarkTools.@benchmarkable $func($new_model)
22+
end
23+
return group
24+
end
25+
26+
function MOI.Benchmarks.create_baseline(
27+
suite::BenchmarkTools.BenchmarkGroup,
28+
name::String;
29+
directory::String = "",
30+
kwargs...,
31+
)
32+
BenchmarkTools.tune!(suite)
33+
BenchmarkTools.save(
34+
joinpath(directory, name * "_params.json"),
35+
BenchmarkTools.params(suite),
36+
)
37+
results = BenchmarkTools.run(suite; kwargs...)
38+
BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results)
39+
return
40+
end
41+
42+
function MOI.Benchmarks.compare_against_baseline(
43+
suite::BenchmarkTools.BenchmarkGroup,
44+
name::String;
45+
directory::String = "",
46+
report_filename::String = "report.txt",
47+
kwargs...,
48+
)
49+
params_filename = joinpath(directory, name * "_params.json")
50+
baseline_filename = joinpath(directory, name * "_baseline.json")
51+
if !isfile(params_filename) || !isfile(baseline_filename)
52+
error("You create a baseline with `create_baseline` first.")
53+
end
54+
BenchmarkTools.loadparams!(
55+
suite,
56+
BenchmarkTools.load(params_filename)[1],
57+
:evals,
58+
:samples,
59+
)
60+
new_results = BenchmarkTools.run(suite; kwargs...)
61+
old_results = BenchmarkTools.load(baseline_filename)[1]
62+
open(joinpath(directory, report_filename), "w") do io
63+
println(stdout, "\n========== Results ==========")
64+
println(io, "\n========== Results ==========")
65+
for key in keys(new_results)
66+
judgement = BenchmarkTools.judge(
67+
BenchmarkTools.median(new_results[key]),
68+
BenchmarkTools.median(old_results[key]),
69+
)
70+
println(stdout, "\n", key)
71+
println(io, "\n", key)
72+
show(stdout, MIME"text/plain"(), judgement)
73+
show(io, MIME"text/plain"(), judgement)
74+
end
75+
end
76+
return
77+
end
78+
79+
end # module

src/Benchmarks/Benchmarks.jl

Lines changed: 25 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
module Benchmarks
88

9-
import BenchmarkTools
109
import MathOptInterface as MOI
1110

1211
const BENCHMARKS = Dict{String,Function}()
@@ -22,26 +21,24 @@ arguments, and returns a new instance of the optimizer you wish to benchmark.
2221
2322
Use `exclude` to exclude a subset of benchmarks.
2423
24+
## BenchmarkTools
25+
26+
To use this function you must first install and load the `BenchmarkTools.jl`
27+
package.
28+
2529
## Example
2630
2731
```julia
28-
julia> MOI.Benchmarks.suite() do
29-
return GLPK.Optimizer()
30-
end
32+
julia> import BenchmarkTools, GLPK, Gurobi
33+
34+
julia> MOI.Benchmarks.suite(GLPK.Optimizer)
3135
3236
julia> MOI.Benchmarks.suite(; exclude = [r"delete"]) do
3337
return Gurobi.Optimizer()
3438
end
3539
```
3640
"""
37-
function suite(new_model::Function; exclude::Vector{Regex} = Regex[])
38-
group = BenchmarkTools.BenchmarkGroup()
39-
for (name, func) in BENCHMARKS
40-
any(occursin.(exclude, Ref(name))) && continue
41-
group[name] = BenchmarkTools.@benchmarkable $func($new_model)
42-
end
43-
return group
44-
end
41+
function suite end
4542

4643
"""
4744
create_baseline(suite, name::String; directory::String = ""; kwargs...)
@@ -50,12 +47,17 @@ Run all benchmarks in `suite` and save to files called `name` in `directory`.
5047
5148
Extra `kwargs` are based to `BenchmarkTools.run`.
5249
50+
## BenchmarkTools
51+
52+
To use this function you must first install and load the `BenchmarkTools.jl`
53+
package.
54+
5355
## Example
5456
5557
```julia
56-
julia> import GLPK
58+
julia> import BenchmarkTools, GLPK
5759
58-
julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer());
60+
julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);
5961
6062
julia> MOI.Benchmarks.create_baseline(
6163
my_suite,
@@ -65,21 +67,7 @@ julia> MOI.Benchmarks.create_baseline(
6567
)
6668
```
6769
"""
68-
function create_baseline(
69-
suite::BenchmarkTools.BenchmarkGroup,
70-
name::String;
71-
directory::String = "",
72-
kwargs...,
73-
)
74-
BenchmarkTools.tune!(suite)
75-
BenchmarkTools.save(
76-
joinpath(directory, name * "_params.json"),
77-
BenchmarkTools.params(suite),
78-
)
79-
results = BenchmarkTools.run(suite; kwargs...)
80-
BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results)
81-
return
82-
end
70+
function create_baseline end
8371

8472
"""
8573
compare_against_baseline(
@@ -95,12 +83,17 @@ A report summarizing the comparison is written to `report_filename` in
9583
9684
Extra `kwargs` are based to `BenchmarkTools.run`.
9785
86+
## BenchmarkTools
87+
88+
To use this function you must first install and load the `BenchmarkTools.jl`
89+
package.
90+
9891
## Example
9992
10093
```julia
101-
julia> import GLPK
94+
julia> import BenchmarkTools, GLPK
10295
103-
julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer());
96+
julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);
10497
10598
julia> MOI.Benchmarks.compare_against_baseline(
10699
my_suite,
@@ -110,42 +103,7 @@ julia> MOI.Benchmarks.compare_against_baseline(
110103
)
111104
```
112105
"""
113-
function compare_against_baseline(
114-
suite::BenchmarkTools.BenchmarkGroup,
115-
name::String;
116-
directory::String = "",
117-
report_filename::String = "report.txt",
118-
kwargs...,
119-
)
120-
params_filename = joinpath(directory, name * "_params.json")
121-
baseline_filename = joinpath(directory, name * "_baseline.json")
122-
if !isfile(params_filename) || !isfile(baseline_filename)
123-
error("You create a baseline with `create_baseline` first.")
124-
end
125-
BenchmarkTools.loadparams!(
126-
suite,
127-
BenchmarkTools.load(params_filename)[1],
128-
:evals,
129-
:samples,
130-
)
131-
new_results = BenchmarkTools.run(suite; kwargs...)
132-
old_results = BenchmarkTools.load(baseline_filename)[1]
133-
open(joinpath(directory, report_filename), "w") do io
134-
println(stdout, "\n========== Results ==========")
135-
println(io, "\n========== Results ==========")
136-
for key in keys(new_results)
137-
judgement = BenchmarkTools.judge(
138-
BenchmarkTools.median(new_results[key]),
139-
BenchmarkTools.median(old_results[key]),
140-
)
141-
println(stdout, "\n", key)
142-
println(io, "\n", key)
143-
show(stdout, MIME"text/plain"(), judgement)
144-
show(io, MIME"text/plain"(), judgement)
145-
end
146-
end
147-
return
148-
end
106+
function compare_against_baseline end
149107

150108
###
151109
### Benchmarks

test/Benchmarks/test_Benchmarks.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ module TestBenchmarks
88

99
using Test
1010

11+
import BenchmarkTools
1112
import MathOptInterface as MOI
1213

1314
function runtests()

0 commit comments

Comments
 (0)