-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathresearch_engine_seq.py
More file actions
92 lines (78 loc) · 3.15 KB
/
research_engine_seq.py
File metadata and controls
92 lines (78 loc) · 3.15 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
from web_searching import web_search
from web_scraping import web_scrape
from llm_models import get_llm
from utilities import to_obj
from prompts import (
ASSISTANT_SELECTION_PROMPT_TEMPLATE,
WEB_SEARCH_PROMPT_TEMPLATE,
SUMMARY_PROMPT_TEMPLATE,
RESEARCH_REPORT_PROMPT_TEMPLATE
)
NUM_SEARCH_QUERIES = 2
NUM_SEARCH_RESULTS_PER_QUERY = 3
RESULT_TEXT_MAX_CHARACTERS = 10000
question = 'What can I see and do in the Spanish town of Astorga?'
###
llm = get_llm()
# select research assistaint instructions
assistant_selection_prompt = ASSISTANT_SELECTION_PROMPT_TEMPLATE.format(
user_question=question)
assistant_instructions = llm.invoke(assistant_selection_prompt)
assistant_instructions_dict = to_obj(assistant_instructions.content)
# generate serach queries
web_search_prompt = WEB_SEARCH_PROMPT_TEMPLATE.format(
assistant_instructions=assistant_instructions_dict[
'assistant_instructions'],
num_search_queries=NUM_SEARCH_QUERIES,
user_question=assistant_instructions_dict[
'user_question'])
web_search_queries = llm.invoke(web_search_prompt)
web_search_queries_list = to_obj(
web_search_queries.content.replace('\n', ''))
# find all the search result urls: NUM_SEARCH_QUERIES x NUM_SEARCH_RESULTS_PER_QUERY
searches_and_result_urls = [{
'result_urls': web_search(
web_query=wq['search_query'],
num_results=NUM_SEARCH_RESULTS_PER_QUERY),
'search_query': wq['search_query']}
for wq in web_search_queries_list]
# flatten the search result urls
search_query_and_result_url_list = []
for qr in searches_and_result_urls:
search_query_and_result_url_list.extend([{
'search_query': qr['search_query'],
'result_url': r}
for r in qr['result_urls']])
# scrape the result text from each result url
result_text_list = [{
'result_text': web_scrape(
url=re['result_url'])[:RESULT_TEXT_MAX_CHARACTERS],
'result_url': re['result_url'],
'search_query': re['search_query']}
for re in search_query_and_result_url_list]
# summarize each result text
result_text_summary_list = []
for rt in result_text_list:
summary_prompt = SUMMARY_PROMPT_TEMPLATE.format(
search_result_text=rt['result_text'],
search_query=rt['search_query'])
text_summary = llm.invoke(summary_prompt)
result_text_summary_list.append({
'text_summary': text_summary,
'result_url': rt['result_url'],
'search_query': rt['search_query']})
# create a text including result summary and url from each result
stringified_summary_list = [
f'Source URL: {sr["result_url"]}\nSummary: {sr["text_summary"]}'
for sr in result_text_summary_list]
# merge all result summaries
appended_result_summaries = '\n'.join(stringified_summary_list)
# compile report from summaries
research_report_prompt = RESEARCH_REPORT_PROMPT_TEMPLATE.format(
research_summary=appended_result_summaries,
user_question=question
)
research_report = llm.invoke(research_report_prompt)
print(f'stringified_summary_list={stringified_summary_list}')
print(f'merged_result_summaries={appended_result_summaries}')
print(f'research_report={research_report}')