@@ -678,18 +678,18 @@ model, err := openai.NewChatModel(ctx, &openai.ChatModelConfig{
678678 Seed : &seed, // Random seed
679679 LogitBias : map [string ]int {}, // Token bias
680680 User : &user, // User identifier
681-
681+
682682 ReasoningEffort :openai.ReasoningEffortLevelHigh , // Reasoning level, default "medium"
683-
683+
684684 Modalities : make ([]openai.Modality , 0 ), // Model response modality types: ["text","audio"] default text
685-
685+
686686 Audio : &openai.Audio { // Audio output parameters, required when modality includes audio
687687 Format: openai.AudioFormatMp3 ,
688688 Voice: openai.AudioVoiceAlloy ,
689689 },
690-
690+
691691 ExtraFields : map [string ]any{}, // Extra fields, will add or override request fields, used for experimental validation
692-
692+
693693})
694694```
695695
@@ -703,7 +703,7 @@ Conversation generation supports both normal mode and streaming mode:
703703``` go
704704// Invoke mode
705705response , err := model.Generate (ctx, messages)
706-
706+
707707// Streaming mode
708708stream , err := model.Stream (ctx, messages)
709709```
@@ -729,7 +729,7 @@ imageStr := base64.StdEncoding.EncodeToString(image)
729729messages := []*schema.Message {
730730 // System message
731731 schema.SystemMessage (" You are an assistant" ),
732-
732+
733733 // Multimodal message (with image)
734734 {
735735 Role: schema.User ,
@@ -793,33 +793,33 @@ package main
793793import (
794794 " context"
795795 " time"
796-
796+
797797 " github.com/cloudwego/eino-ext/components/model/openai"
798798 " github.com/cloudwego/eino/schema"
799799)
800800
801801func main () {
802802 ctx := context.Background ()
803-
803+
804804 // Initialize model
805805 model , err := openai.NewChatModel (ctx, &openai.ChatModelConfig {
806806 APIKey: " your-api-key" , // required
807807 Timeout: 30 * time.Second ,
808808 Model: " gpt-4" , // required
809-
809+
810810 // If the model supports audio generation and you need to generate audio, configure as follows
811811 // Modalities: []openai.Modality{openai.AudioModality, openai.TextModality},
812812 // Audio: &openai.Audio{
813813 // Format: openai.AudioFormatMp3,
814814 // Voice: openai.AudioVoiceAlloy,
815815 // },
816816},
817-
817+
818818 })
819819 if err != nil {
820820 panic (err)
821821 }
822-
822+
823823 // Base64 format image data
824824 image , err := os.ReadFile (" ./examples/image/cat.png" )
825825 if err != nil {
@@ -850,32 +850,32 @@ func main() {
850850 },
851851 },
852852 }
853-
853+
854854 // Generate response
855855 response , err := model.Generate (ctx, messages)
856856 if err != nil {
857857 panic (err)
858858 }
859-
859+
860860 // Process response
861861 /*
862- The generated multimodal content is stored in the response.AssistantGentMultiContent field
862+ The generated multimodal content is stored in the response.AssistantGenMultiContent field
863863 In this example, the final generated message looks like:
864864 AssistantMessage = schema.Message{
865865 Role: schema.Assistant,
866866 AssistantGenMultiContent : []schema.MessageOutputPart{
867867 {Type: schema.ChatMessagePartTypeImageURL,
868868 Image: &schema.MessageOutputImage{
869869 MessagePartCommon: schema.MessagePartCommon{
870- Base64Data: &DataStr,
870+ Base64Data: &DataStr,
871871 MIMEType: "image/png",
872872 },
873873 },
874874 },
875875 },
876876 }
877877 */
878-
878+
879879 fmt.Printf (" Assistant: %s \n " , resp)
880880}
881881```
@@ -888,14 +888,14 @@ package main
888888import (
889889 " context"
890890 " time"
891-
891+
892892 " github.com/cloudwego/eino-ext/components/model/openai"
893893 " github.com/cloudwego/eino/schema"
894894)
895895
896896func main () {
897897 ctx := context.Background ()
898-
898+
899899 // Initialize model
900900 model , err := openai.NewChatModel (ctx, &openai.ChatModelConfig {
901901 APIKey: " your-api-key" ,
@@ -905,20 +905,20 @@ func main() {
905905 if err != nil {
906906 panic (err)
907907 }
908-
908+
909909 // Prepare messages
910910 messages := []*schema.Message {
911911 schema.SystemMessage (" You are an assistant" ),
912912 schema.UserMessage (" Write a story" ),
913913 }
914-
914+
915915 // Get streaming response
916916 reader , err := model.Stream (ctx, messages)
917917 if err != nil {
918918 panic (err)
919919 }
920920 defer reader.Close () // Remember to close
921-
921+
922922 // Process streaming content
923923 for {
924924 chunk , err := reader.Recv ()
0 commit comments