11from __future__ import annotations
22
3+ import os
4+ import warnings
35from dataclasses import asdict , dataclass
46from math import ceil
57from statistics import mean , quantiles , stdev
1113from rich .table import Table
1214from rich .text import Text
1315
16+ from pytest_codspeed import __semver_version__
1417from pytest_codspeed .instruments import Instrument
18+ from pytest_codspeed .instruments .hooks import InstrumentHooks
1519
1620if TYPE_CHECKING :
1721 from typing import Any , Callable
@@ -131,17 +135,26 @@ class Benchmark:
131135
132136
133137def run_benchmark (
134- name : str , uri : str , fn : Callable [P , T ], args , kwargs , config : BenchmarkConfig
138+ instrument_hooks : InstrumentHooks | None ,
139+ name : str ,
140+ uri : str ,
141+ fn : Callable [P , T ],
142+ args ,
143+ kwargs ,
144+ config : BenchmarkConfig ,
135145) -> tuple [Benchmark , T ]:
146+ def __codspeed_root_frame__ () -> T :
147+ return fn (* args , ** kwargs )
148+
136149 # Compute the actual result of the function
137- out = fn ( * args , ** kwargs )
150+ out = __codspeed_root_frame__ ( )
138151
139152 # Warmup
140153 times_per_round_ns : list [float ] = []
141154 warmup_start = start = perf_counter_ns ()
142155 while True :
143156 start = perf_counter_ns ()
144- fn ( * args , ** kwargs )
157+ __codspeed_root_frame__ ( )
145158 end = perf_counter_ns ()
146159 times_per_round_ns .append (end - start )
147160 if end - warmup_start > config .warmup_time_ns :
@@ -166,16 +179,21 @@ def run_benchmark(
166179 # Benchmark
167180 iter_range = range (iter_per_round )
168181 run_start = perf_counter_ns ()
182+ if instrument_hooks :
183+ instrument_hooks .start_benchmark ()
169184 for _ in range (rounds ):
170185 start = perf_counter_ns ()
171186 for _ in iter_range :
172- fn ( * args , ** kwargs )
187+ __codspeed_root_frame__ ( )
173188 end = perf_counter_ns ()
174189 times_per_round_ns .append (end - start )
175190
176191 if end - run_start > config .max_time_ns :
177192 # TODO: log something
178193 break
194+ if instrument_hooks :
195+ instrument_hooks .stop_benchmark ()
196+ instrument_hooks .set_executed_benchmark (uri )
179197 benchmark_end = perf_counter_ns ()
180198 total_time = (benchmark_end - run_start ) / 1e9
181199
@@ -192,8 +210,19 @@ def run_benchmark(
192210
193211class WallTimeInstrument (Instrument ):
194212 instrument = "walltime"
213+ instrument_hooks : InstrumentHooks | None
195214
196215 def __init__ (self , config : CodSpeedConfig ) -> None :
216+ try :
217+ self .instrument_hooks = InstrumentHooks ()
218+ self .instrument_hooks .set_integration ("pytest-codspeed" , __semver_version__ )
219+ except RuntimeError as e :
220+ if os .environ .get ("CODSPEED_ENV" ) is not None :
221+ warnings .warn (
222+ f"Failed to initialize instrument hooks: { e } " , RuntimeWarning
223+ )
224+ self .instrument_hooks = None
225+
197226 self .config = config
198227 self .benchmarks : list [Benchmark ] = []
199228
@@ -209,6 +238,7 @@ def measure(
209238 ** kwargs : P .kwargs ,
210239 ) -> T :
211240 bench , out = run_benchmark (
241+ instrument_hooks = self .instrument_hooks ,
212242 name = name ,
213243 uri = uri ,
214244 fn = fn ,
0 commit comments