@@ -536,13 +536,6 @@ private async Task<ChatResult> ProcessWithToolsAsync(
536536 ChatRequestOptions requestOptions ,
537537 CancellationToken cancellationToken )
538538 {
539- NativeLogConfig . llama_log_set ( ( level , message ) => {
540- if ( level == LLamaLogLevel . Error )
541- {
542- Console . Error . Write ( message ) ;
543- }
544- } ) ; // Remove llama native logging
545-
546539 var model = KnownModels . GetModel ( chat . Model ) ;
547540 var iterations = 0 ;
548541 var lastResponseTokens = new List < LLMTokenValue > ( ) ;
@@ -551,12 +544,9 @@ private async Task<ChatResult> ProcessWithToolsAsync(
551544 while ( iterations < MaxToolIterations )
552545 {
553546 var lastMsg = chat . Messages . Last ( ) ;
554- await SendNotification ( chat . Id , new LLMTokenValue
555- {
556- Type = TokenType . FullAnswer ,
557- Text = $ "Processing with tools... iteration { iterations + 1 } \n \n "
558- } , false ) ;
547+ var tokenCallbackOrg = requestOptions . TokenCallback ;
559548 requestOptions . InteractiveUpdates = false ;
549+ requestOptions . TokenCallback = null ;
560550 lastResponseTokens = await ProcessChatRequest ( chat , model , lastMsg , requestOptions , cancellationToken ) ;
561551 lastMsg . MarkProcessed ( ) ;
562552 lastResponse = string . Concat ( lastResponseTokens . Select ( x => x . Text ) ) ;
@@ -590,6 +580,7 @@ private async Task<ChatResult> ProcessWithToolsAsync(
590580 else // Final response
591581 {
592582 requestOptions . InteractiveUpdates = true ;
583+ requestOptions . TokenCallback = tokenCallbackOrg ;
593584 await SendNotification ( chat . Id , new LLMTokenValue
594585 {
595586 Type = TokenType . FullAnswer ,
0 commit comments