Skip to content

Commit c30bc35

Browse files
committed
notebook3
1 parent 0917bec commit c30bc35

2 files changed

Lines changed: 26 additions & 25 deletions

File tree

mistral7b/model_rag_carbon.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -411,7 +411,7 @@
411411
"\n",
412412
"# Set the path to your downloaded GGUF model\n",
413413
"# IMPORTANT: Use a raw string (r\"...\") for Windows paths\n",
414-
"#MODEL_PATH =r\"D:\\\\Mistral7B\\\\mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
414+
"# MODEL_PATH =r\"D:\\\\Mistral7B\\\\mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
415415
"\n",
416416
"# Set the path to your data (PDFs, .txt, etc.)\n",
417417
"DATA_PATH = r\"D:\\Mistral7B\\data\"\n",
@@ -581,7 +581,7 @@
581581
],
582582
"source": [
583583
"print(\"Initializing models...\")\n",
584-
"MODEL_PATH =\"D:/Mistral7B/mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
584+
"MODEL_PATH = \"D:/Mistral7B/mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
585585
"\n",
586586
"# Load the local LLM (Mistral 7B) with GPU offloading\n",
587587
"llm = LlamaCPP(\n",

mistral7b/model_rag_carbon_recursive.ipynb

Lines changed: 24 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -398,7 +398,7 @@
398398
}
399399
],
400400
"source": [
401-
"#import os\n",
401+
"# import os\n",
402402
"import time\n",
403403
"from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings\n",
404404
"from llama_index.llms.llama_cpp import LlamaCPP\n",
@@ -411,7 +411,7 @@
411411
"\n",
412412
"# Set the path to your downloaded GGUF model\n",
413413
"# IMPORTANT: Use a raw string (r\"...\") for Windows paths\n",
414-
"#MODEL_PATH =r\"D:\\\\Mistral7B\\\\mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
414+
"# MODEL_PATH =r\"D:\\\\Mistral7B\\\\mistral-7b-instruct-v0.2.Q4_K_M.gguf\"\n",
415415
"\n",
416416
"# Set the path to your data (PDFs, .txt, etc.)\n",
417417
"DATA_PATH = r\"D:\\Mistral7B\\data\"\n",
@@ -596,7 +596,7 @@
596596
],
597597
"source": [
598598
"print(\"Initializing models...\")\n",
599-
"MODEL_PATH =r\"D:\\Mistral7B\\tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf\"\n",
599+
"MODEL_PATH = r\"D:\\Mistral7B\\tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf\"\n",
600600
"\n",
601601
"# Load the local LLM (Mistral 7B) with GPU offloading\n",
602602
"llm = LlamaCPP(\n",
@@ -677,9 +677,9 @@
677677
"# Create the query engine, passing in the new template\n",
678678
"query_engine = index.as_query_engine(\n",
679679
" streaming=True,\n",
680-
" text_qa_template=qa_template,# <-- Pass the template here\n",
680+
" text_qa_template=qa_template, # <-- Pass the template here\n",
681681
" similarity_top_k=3,\n",
682-
" include_source_nodes=True\n",
682+
" include_source_nodes=True,\n",
683683
")\n",
684684
"# --- END MODIFICATION ---\n",
685685
"\n",
@@ -2772,8 +2772,6 @@
27722772
}
27732773
],
27742774
"source": [
2775-
"\n",
2776-
"\n",
27772775
"# --- Define the new prompts for the Critic and Refiner ---\n",
27782776
"\n",
27792777
"CRITIC_PROMPT = \"\"\"\n",
@@ -2837,7 +2835,7 @@
28372835
"try:\n",
28382836
" while True:\n",
28392837
" query = input(\"Ask a question about your documents: \")\n",
2840-
" if query.lower() == 'exit':\n",
2838+
" if query.lower() == \"exit\":\n",
28412839
" break\n",
28422840
"\n",
28432841
" # --- Start tracking the entire multi-step process ---\n",
@@ -2846,12 +2844,12 @@
28462844
"\n",
28472845
" # --- Step 1: Get the Draft (and Context) ---\n",
28482846
" response_stream = query_engine.query(query)\n",
2849-
" \n",
2847+
"\n",
28502848
" # Collect the streamed draft text\n",
28512849
" draft_text = \"\"\n",
28522850
" for chunk_text in response_stream.response_gen:\n",
28532851
" draft_text += chunk_text\n",
2854-
" \n",
2852+
"\n",
28552853
" # Extract the source context\n",
28562854
" context_str = \"\\n---\\n\".join(\n",
28572855
" [node.get_content() for node in response_stream.source_nodes]\n",
@@ -2862,9 +2860,9 @@
28622860
" print(textwrap.fill(draft_text, width=80))\n",
28632861
"\n",
28642862
" # --- MODIFICATION: Start Recursive Loop ---\n",
2865-
" \n",
2866-
" current_draft = draft_text # Initialize the loop with the first draft\n",
2867-
" \n",
2863+
"\n",
2864+
" current_draft = draft_text # Initialize the loop with the first draft\n",
2865+
"\n",
28682866
" for i in range(REFINEMENT_CYCLES):\n",
28692867
" print(f\"\\n--- Refinement Cycle {i + 1}/{REFINEMENT_CYCLES} ---\")\n",
28702868
"\n",
@@ -2873,9 +2871,9 @@
28732871
" critic_prompt = CRITIC_PROMPT.format(\n",
28742872
" context=context_str,\n",
28752873
" question=query,\n",
2876-
" draft=current_draft # Use the *current* draft\n",
2874+
" draft=current_draft, # Use the *current* draft\n",
28772875
" )\n",
2878-
" \n",
2876+
"\n",
28792877
" feedback_response = llm.complete(critic_prompt)\n",
28802878
" feedback_text = feedback_response.text\n",
28812879
" print(feedback_text)\n",
@@ -2884,20 +2882,21 @@
28842882
" if \"The draft is perfect\" in feedback_text:\n",
28852883
" print(\"--- Critic approved. Stopping refinement loop. ---\")\n",
28862884
" break # Exit the for loop early\n",
2887-
" \n",
2885+
"\n",
28882886
" # --- Step 4: Run the Refiner ---\n",
28892887
" print(\"--- Refiner is working... ---\")\n",
28902888
" refiner_prompt = REFINER_PROMPT.format(\n",
2891-
" draft=current_draft,\n",
2892-
" feedback=feedback_text\n",
2889+
" draft=current_draft, feedback=feedback_text\n",
28932890
" )\n",
2894-
" \n",
2891+
"\n",
28952892
" refiner_response = llm.complete(refiner_prompt)\n",
2896-
" \n",
2893+
"\n",
28972894
" # --- Step 5: Update Draft for Next Loop ---\n",
2898-
" current_draft = refiner_response.text # The refined answer becomes the new draft\n",
2895+
" current_draft = (\n",
2896+
" refiner_response.text\n",
2897+
" ) # The refined answer becomes the new draft\n",
28992898
"\n",
2900-
" print(f\"--- Intermediate Refined Draft (Cycle {i+1}) ---\")\n",
2899+
" print(f\"--- Intermediate Refined Draft (Cycle {i + 1}) ---\")\n",
29012900
" print(textwrap.fill(current_draft, width=80))\n",
29022901
"\n",
29032902
" # --- END OF MODIFIED LOOP ---\n",
@@ -2924,7 +2923,9 @@
29242923
" total_emissions_kg = tracker.stop()\n",
29252924
" print(\"\\n\\n--- Total Emissions Summary (Session) ---\")\n",
29262925
" if tracker.emissions_data:\n",
2927-
" print(f\"Total Energy Consumed: {tracker.emissions_data.energy_consumed * 1000:.4f} Wh\")\n",
2926+
" print(\n",
2927+
" f\"Total Energy Consumed: {tracker.emissions_data.energy_consumed * 1000:.4f} Wh\"\n",
2928+
" )\n",
29282929
" print(f\"Total CO2 Emitted: {total_emissions_kg * 1000:.4f} gCO2eq\")\n",
29292930
" print(\"Full report saved to 'emissions.csv'\")"
29302931
]

0 commit comments

Comments
 (0)