@@ -441,86 +441,88 @@ def _export(
441441 logger .warning ("Exporter already shutdown, ignoring batch" )
442442 return self ._result .FAILURE # type: ignore [reportReturnType]
443443
444- finish_export = self ._metrics .start_export (self ._count_data (data ))
445-
446- # FIXME remove this check if the export type for traces
447- # gets updated to a class that represents the proto
448- # TracesData and use the code below instead.
449- deadline_sec = time () + self ._timeout
450- for retry_num in range (_MAX_RETRYS ):
451- try :
452- if self ._client is None :
453- return self ._result .FAILURE
454- self ._client .Export (
455- request = self ._translate_data (data ),
456- metadata = self ._headers ,
457- timeout = deadline_sec - time (),
458- )
459- finish_export (None , None )
460- return self ._result .SUCCESS # type: ignore [reportReturnType]
461- except RpcError as error :
462- retry_info_bin = dict (error .trailing_metadata ()).get ( # type: ignore [reportAttributeAccessIssue]
463- "google.rpc.retryinfo-bin" # type: ignore [reportArgumentType]
464- )
465- # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff.
466- backoff_seconds = 2 ** retry_num * random .uniform (0.8 , 1.2 )
467- if retry_info_bin is not None :
468- retry_info = RetryInfo ()
469- retry_info .ParseFromString (retry_info_bin )
470- backoff_seconds = (
471- retry_info .retry_delay .seconds
472- + retry_info .retry_delay .nanos / 1.0e9
444+ with self ._metrics .export_operation (self ._count_data (data )) as result :
445+ # FIXME remove this check if the export type for traces
446+ # gets updated to a class that represents the proto
447+ # TracesData and use the code below instead.
448+ deadline_sec = time () + self ._timeout
449+ for retry_num in range (_MAX_RETRYS ):
450+ try :
451+ if self ._client is None :
452+ return self ._result .FAILURE
453+ self ._client .Export (
454+ request = self ._translate_data (data ),
455+ metadata = self ._headers ,
456+ timeout = deadline_sec - time (),
473457 )
474-
475- # For UNAVAILABLE errors, reinitialize the channel to force reconnection
476- if error .code () == StatusCode .UNAVAILABLE and retry_num == 0 : # type: ignore
477- logger .debug (
478- "Reinitializing gRPC channel for %s exporter due to UNAVAILABLE error" ,
479- self ._exporting ,
458+ return self ._result .SUCCESS # type: ignore [reportReturnType]
459+ except RpcError as error :
460+ retry_info_bin = dict (error .trailing_metadata ()).get ( # type: ignore [reportAttributeAccessIssue]
461+ "google.rpc.retryinfo-bin" # type: ignore [reportArgumentType]
480462 )
481- try :
482- if self ._channel :
483- self ._channel .close ()
484- except Exception as e :
463+ # multiplying by a random number between .8 and 1.2 introduces a +/20% jitter to each backoff.
464+ backoff_seconds = 2 ** retry_num * random .uniform (0.8 , 1.2 )
465+ if retry_info_bin is not None :
466+ retry_info = RetryInfo ()
467+ retry_info .ParseFromString (retry_info_bin )
468+ backoff_seconds = (
469+ retry_info .retry_delay .seconds
470+ + retry_info .retry_delay .nanos / 1.0e9
471+ )
472+
473+ # For UNAVAILABLE errors, reinitialize the channel to force reconnection
474+ if (
475+ error .code () == StatusCode .UNAVAILABLE
476+ and retry_num == 0
477+ ): # type: ignore
485478 logger .debug (
486- "Error closing channel for %s exporter to %s: %s" ,
479+ "Reinitializing gRPC channel for %s exporter due to UNAVAILABLE error" ,
480+ self ._exporting ,
481+ )
482+ try :
483+ if self ._channel :
484+ self ._channel .close ()
485+ except Exception as e :
486+ logger .debug (
487+ "Error closing channel for %s exporter to %s: %s" ,
488+ self ._exporting ,
489+ self ._endpoint ,
490+ str (e ),
491+ )
492+ # Enable channel reconnection for subsequent calls
493+ self ._initialize_channel_and_stub ()
494+
495+ if (
496+ error .code () not in _RETRYABLE_ERROR_CODES # type: ignore [reportAttributeAccessIssue]
497+ or retry_num + 1 == _MAX_RETRYS
498+ or backoff_seconds > (deadline_sec - time ())
499+ or self ._shutdown
500+ ):
501+ logger .error (
502+ "Failed to export %s to %s, error code: %s" ,
487503 self ._exporting ,
488504 self ._endpoint ,
489- str (e ),
505+ error .code (), # type: ignore [reportAttributeAccessIssue]
506+ exc_info = error .code () == StatusCode .UNKNOWN , # type: ignore [reportAttributeAccessIssue]
490507 )
491- # Enable channel reconnection for subsequent calls
492- self ._initialize_channel_and_stub ()
493-
494- if (
495- error .code () not in _RETRYABLE_ERROR_CODES # type: ignore [reportAttributeAccessIssue]
496- or retry_num + 1 == _MAX_RETRYS
497- or backoff_seconds > (deadline_sec - time ())
498- or self ._shutdown
499- ):
500- logger .error (
501- "Failed to export %s to %s, error code: %s" ,
508+ result .error = error
509+ result .error_attrs = {
510+ RPC_RESPONSE_STATUS_CODE : error .code ().name
511+ }
512+ return self ._result .FAILURE # type: ignore [reportReturnType]
513+ logger .warning (
514+ "Transient error %s encountered while exporting %s to %s, retrying in %.2fs." ,
515+ error .code (), # type: ignore [reportAttributeAccessIssue]
502516 self ._exporting ,
503517 self ._endpoint ,
504- error .code (), # type: ignore [reportAttributeAccessIssue]
505- exc_info = error .code () == StatusCode .UNKNOWN , # type: ignore [reportAttributeAccessIssue]
518+ backoff_seconds ,
506519 )
507- finish_export (
508- error , {RPC_RESPONSE_STATUS_CODE : error .code ().name }
509- )
510- return self ._result .FAILURE # type: ignore [reportReturnType]
511- logger .warning (
512- "Transient error %s encountered while exporting %s to %s, retrying in %.2fs." ,
513- error .code (), # type: ignore [reportAttributeAccessIssue]
514- self ._exporting ,
515- self ._endpoint ,
516- backoff_seconds ,
517- )
518- shutdown = self ._shutdown_in_progress .wait (backoff_seconds )
519- if shutdown :
520- logger .warning ("Shutdown in progress, aborting retry." )
521- break
522- # Not possible to reach here but the linter is complaining.
523- return self ._result .FAILURE # type: ignore [reportReturnType]
520+ shutdown = self ._shutdown_in_progress .wait (backoff_seconds )
521+ if shutdown :
522+ logger .warning ("Shutdown in progress, aborting retry." )
523+ break
524+ # Not possible to reach here but the linter is complaining.
525+ return self ._result .FAILURE # type: ignore [reportReturnType]
524526
525527 def shutdown (self , timeout_millis : float = 30_000 , ** kwargs ) -> None :
526528 """
0 commit comments