1111from rich .table import Table
1212from rich .text import Text
1313
14+ from pytest_codspeed import __semver_version__
1415from pytest_codspeed .instruments import Instrument
16+ from pytest_codspeed .instruments .hooks import InstrumentHooks
1517
1618if TYPE_CHECKING :
1719 from typing import Any , Callable
@@ -131,17 +133,26 @@ class Benchmark:
131133
132134
133135def run_benchmark (
134- name : str , uri : str , fn : Callable [P , T ], args , kwargs , config : BenchmarkConfig
136+ instrument_hooks : InstrumentHooks | None ,
137+ name : str ,
138+ uri : str ,
139+ fn : Callable [P , T ],
140+ args ,
141+ kwargs ,
142+ config : BenchmarkConfig ,
135143) -> tuple [Benchmark , T ]:
144+ def __codspeed_root_frame__ () -> T :
145+ return fn (* args , ** kwargs )
146+
136147 # Compute the actual result of the function
137- out = fn ( * args , ** kwargs )
148+ out = __codspeed_root_frame__ ( )
138149
139150 # Warmup
140151 times_per_round_ns : list [float ] = []
141152 warmup_start = start = perf_counter_ns ()
142153 while True :
143154 start = perf_counter_ns ()
144- fn ( * args , ** kwargs )
155+ __codspeed_root_frame__ ( )
145156 end = perf_counter_ns ()
146157 times_per_round_ns .append (end - start )
147158 if end - warmup_start > config .warmup_time_ns :
@@ -166,16 +177,21 @@ def run_benchmark(
166177 # Benchmark
167178 iter_range = range (iter_per_round )
168179 run_start = perf_counter_ns ()
180+ if instrument_hooks :
181+ instrument_hooks .start_benchmark ()
169182 for _ in range (rounds ):
170183 start = perf_counter_ns ()
171184 for _ in iter_range :
172- fn ( * args , ** kwargs )
185+ __codspeed_root_frame__ ( )
173186 end = perf_counter_ns ()
174187 times_per_round_ns .append (end - start )
175188
176189 if end - run_start > config .max_time_ns :
177190 # TODO: log something
178191 break
192+ if instrument_hooks :
193+ instrument_hooks .stop_benchmark ()
194+ instrument_hooks .set_executed_benchmark (uri )
179195 benchmark_end = perf_counter_ns ()
180196 total_time = (benchmark_end - run_start ) / 1e9
181197
@@ -192,8 +208,15 @@ def run_benchmark(
192208
193209class WallTimeInstrument (Instrument ):
194210 instrument = "walltime"
211+ instrument_hooks : InstrumentHooks | None
195212
196213 def __init__ (self , config : CodSpeedConfig ) -> None :
214+ try :
215+ self .instrument_hooks = InstrumentHooks ()
216+ self .instrument_hooks .set_integration ("pytest-codspeed" , __semver_version__ )
217+ except RuntimeError :
218+ self .instrument_hooks = None
219+
197220 self .config = config
198221 self .benchmarks : list [Benchmark ] = []
199222
@@ -209,6 +232,7 @@ def measure(
209232 ** kwargs : P .kwargs ,
210233 ) -> T :
211234 bench , out = run_benchmark (
235+ instrument_hooks = self .instrument_hooks ,
212236 name = name ,
213237 uri = uri ,
214238 fn = fn ,
0 commit comments