diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a28bef15..63c1caed 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -5,4 +5,19 @@ # Default owner for everything, unless overridden by a more specific rule. * @Azure-Samples/azure-ai-samples-maintainers -scenarios/docs @Azure-Samples/azure-ai-samples-maintainers @Azure-Samples/AI-Platform-Docs + +#### files referenced in docs (DO NOT EDIT, except for Docs team!!!) ################### +/scenarios/projects/basic/chat-simple.py @azure-samples/AI-Platform-Docs +/scenarios/projects/basic/chat-template.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/assets/chat_eval_data.jsonl @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/assets/grounded_chat.prompty @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/assets/intent_mapping.prompty @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/assets/products.csv @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/chat_with_products.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/config.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/create_search_index.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/evaluate.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/get_product_documents.py @azure-samples/AI-Platform-Docs +/scenarios/rag/custom-rag-app/requirements.txt @azure-samples/AI-Platform-Docs +/scenarios/langchain/getting-started-with-langchain-chat-models.ipynb @azure-samples/AI-Platform-Docs +/scenarios/langchain/getting-started-with-langchain-embeddings.ipynb @azure-samples/AI-Platform-Docs \ No newline at end of file diff --git a/scenarios/langchain/getting-started-with-langchain-chat-models.ipynb b/scenarios/langchain/getting-started-with-langchain-chat-models.ipynb index b838843b..88d00422 100644 --- a/scenarios/langchain/getting-started-with-langchain-chat-models.ipynb +++ b/scenarios/langchain/getting-started-with-langchain-chat-models.ipynb @@ -26,7 +26,7 @@ "\n", " 1. Create an [Azure subscription](https://azure.microsoft.com).\n", " 2. Create an Azure AI hub resource as explained at [How to create and manage an Azure AI Studio hub](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/create-azure-ai-resource).\n", - " 3. Deploy one model supporting the [Azure AI model inference API](https://aka.ms/azureai/modelinference). In this example we use a `Mistral-Large-2407` and a `Mistral-Small` deployment. \n", + " 3. Deploy one model supporting the [Azure AI model inference API](https://aka.ms/azureai/modelinference). In this example we use a `mistral-medium-2505` and a `Mistral-Small` deployment. \n", "\n", " * You can follow the instructions at [Add and configure models to Azure AI model inference service](https://learn.microsoft.com/azure/ai-studio/ai-services/how-to/create-model-deployments)." ] @@ -54,7 +54,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_client" + }, "outputs": [], "source": [ "import os\n", @@ -63,7 +65,7 @@ "model = AzureAIChatCompletionsModel(\n", " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", - " model=\"mistral-large-2407\",\n", + " model=\"mistral-medium-2505\",\n", ")" ] }, @@ -84,7 +86,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "human_message" + }, "outputs": [], "source": [ "from langchain_core.messages import HumanMessage, SystemMessage\n", @@ -181,7 +185,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_producer_verifier" + }, "outputs": [], "source": [ "from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel\n", @@ -189,7 +195,7 @@ "producer = AzureAIChatCompletionsModel(\n", " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", - " model=\"mistral-large-2407\",\n", + " model=\"mistral-medium-2505\",\n", ")\n", "\n", "verifier = AzureAIChatCompletionsModel(\n", @@ -209,7 +215,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "generate_poem" + }, "outputs": [], "source": [ "from langchain_core.prompts import PromptTemplate\n", @@ -242,7 +250,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_output_parser" + }, "outputs": [], "source": [ "from langchain_core.output_parsers import StrOutputParser\n", @@ -260,7 +270,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_chain" + }, "outputs": [], "source": [ "chain = producer_template | producer | parser | verifier_template | verifier | parser" @@ -276,7 +288,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_multiple_outputs_chain" + }, "outputs": [], "source": [ "generate_poem = producer_template | producer | parser\n", @@ -286,7 +300,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_chain_with_passthrough" + }, "outputs": [], "source": [ "from langchain_core.runnables import RunnablePassthrough, RunnableParallel\n", @@ -304,7 +320,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "invoke_chain" + }, "outputs": [], "source": [ "chain.invoke({\"topic\": \"living in a foreign country\"})" @@ -329,7 +347,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "configure_logging" + }, "outputs": [], "source": [ "import sys\n", @@ -363,7 +383,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_client_with_logging" + }, "outputs": [], "source": [ "import os\n", @@ -372,7 +394,7 @@ "model = AzureAIChatCompletionsModel(\n", " endpoint=os.environ[\"AZURE_INFERENCE_ENDPOINT\"],\n", " credential=os.environ[\"AZURE_INFERENCE_CREDENTIAL\"],\n", - " model=\"mistral-large-2407\",\n", + " model=\"mistral-medium-2505\",\n", " client_kwargs={\"logging_enable\": True},\n", ")" ] diff --git a/scenarios/langchain/getting-started-with-langchain-embeddings.ipynb b/scenarios/langchain/getting-started-with-langchain-embeddings.ipynb index 3f939f0f..87a63f9c 100644 --- a/scenarios/langchain/getting-started-with-langchain-embeddings.ipynb +++ b/scenarios/langchain/getting-started-with-langchain-embeddings.ipynb @@ -54,7 +54,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_embed_model_client" + }, "outputs": [], "source": [ "import os\n", @@ -84,7 +86,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "create_vector_store" + }, "outputs": [], "source": [ "from langchain_core.vectorstores import InMemoryVectorStore\n", @@ -102,7 +106,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "add_documents" + }, "outputs": [], "source": [ "from langchain_core.documents import Document\n", @@ -124,7 +130,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "name": "search_similarity" + }, "outputs": [], "source": [ "results = vector_store.similarity_search(query=\"thud\", k=1)\n",