@@ -276,7 +276,7 @@ private void AddMessagesToHistory(ChatSession session, List<Message> messages)
276276
277277
278278 [ Experimental ( "KMEXP01" ) ]
279- private static IKernelMemory CreateMemory ( string modelName , string path ,
279+ private static IKernelMemory CreateMemory ( string modelName , string ? path ,
280280 out KernelMemFix . LlamaSharpTextGenerator generator )
281281 {
282282 InferenceParams infParams = new ( ) { AntiPrompts = [ "INFO" , "<|im_end|>" , "Question:" ] } ;
@@ -307,7 +307,7 @@ private static IKernelMemory CreateMemory(string modelName, string path,
307307 . Build ( ) ;
308308 }
309309
310- internal static async Task < LLamaWeights > GetOrLoadModelAsync ( string path , string modelKey )
310+ internal static async Task < LLamaWeights > GetOrLoadModelAsync ( string ? path , string modelKey )
311311 {
312312 if ( modelCache . TryGetValue ( modelKey , out var cachedModel ) )
313313 {
@@ -452,7 +452,7 @@ public static IKernelMemoryBuilder WithLLamaSharpTextGeneration(
452452
453453 [ Experimental ( "KMEXP01" ) ]
454454 public static IKernelMemoryBuilder WithLLamaSharpMaINTemp ( this IKernelMemoryBuilder builder ,
455- LLamaSharpConfig config , string path , string modelName , out LlamaSharpTextGenerator generator )
455+ LLamaSharpConfig config , string ? path , string modelName , out LlamaSharpTextGenerator generator )
456456 {
457457 // Load the first model with caching.
458458 var model = LLMService . GetOrLoadModelAsync ( path , modelName ) . Result ;
0 commit comments