from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig, GraphToolConfig agent_chain = create_agent(toolkit, llm) while True: query = input("What do you want to ask? ") print('question: ', query) response = agent_chain.run(input=query) print(f'Agent: {response}')
func
for the tool, you can put your own wrapper function instead of a lambda, that calls query and gets the sources nodes from the response objecttoolkit = LlamaToolkit( index_configs=index_configs, graph_configs=graph_configs ) memory = ConversationBufferMemory(memory_key="chat_history") agent_chain = create_llama_chat_agent( toolkit, llm, memory=memory, verbose=True )
func
property of the Tool object will query your graphgraph = [create your graph] query_configs = [create your query configs] def query_index(q): response = graph.query(q, query_configs=query_configs) source_nodes = response.source_nodes source_texts = [x.node.get_text() for x in source_nodes] source_scores = [x.score for x in source_nodes] # Do something with the texts/scores? ... return str(response) ... func=lambda q: query_index(q), ...
initialize_agent
to build an agent_chain, but the it does not give the accurate answer as create_llama_chat_agent
does.graphs = [] tools = [] idx = 0 for file in files: print(file) graph = get_graph('indexing/' + file+'.json') graphs.append(graph) desc = graph_desc[idx] if idx < len(graph_desc) else 'others' tool = Tool( name = file + ' Graph', func= lambda q: str(graph.query(q)), #lambda q: str(query_index(q, graph, query_configs)), description="useful for when you want to answer questions about the " + desc, return_direct=True ) tools.append(tool) idx += 1 memory = ConversationBufferMemory(memory_key="chat_history") llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo", max_tokens=512) agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory) return agent_chain
initialized_agent
memory = ConversationBufferMemory(memory_key="chat_history") agent_chain = create_llama_chat_agent( toolkit, llm, memory=memory, verbose=True )
graph_desc = [' general gurufocus tutorials questions, such as some key page tutorials or stock summary page', ' warren buffet and BERKSHIRE HATHAWAY INC SHAREHOLDER LETTERS', ' 10K sec filing for some popular stocks'] graphs = [] graph_configs = [] idx = 0 for file in files: print(file) graph = ComposableGraph.load_from_disk('indexing/' + file+'.json', llm_predictor=llm_predictor, prompt_helper=prompt_helper) graphs.append(graph) # graph config graph_config = GraphToolConfig( graph=graph, name=f"Graph Index", description="useful for when user ask about gurufocus" + graph_desc[idx], query_configs=query_configs, tool_kwargs={"return_direct": True} ) graph_configs.append(graph_config) idx += 1
# load index index_list = [] index_configs = [] index_desc = { 'getting-started.json': ', including homepage dashboard, stock summary page, guru pages, insider trades, all-in-one screener, excel add-in, and google sheets add-on', 'stock-summary-page.json': ', including stock summary page, warning signs, gf score, gf value, performance charts, peter lynch chart, segment data charts' } for file in files: file_list = os.listdir('indexing/' + file) for filename in file_list: file_path = f'indexing/{file}/{filename}' cur_index = GPTSimpleVectorIndex.load_from_disk(file_path, llm_predictor=llm_predictor, prompt_helper=prompt_helper) index_list.append(cur_index) desc = index_desc[filename] if filename in index_desc else ' ' tool_config = IndexToolConfig( index=cur_index, name=f"Vector Index {filename}", description=f"useful for when you want to answer queries about the {filename[:-5]} " + desc, index_query_kwargs={"similarity_top_k": 3}, tool_kwargs={"return_direct": True} ) index_configs.append(tool_config) toolkit = LlamaToolkit( index_configs=index_configs, graph_configs=graph_configs )
create_llama_chat_agent
, I wonder if it's possible that I can modify this to pass func
in the tool . So that I can get the resources using toolkit.lambda q: str(graph.query(q, query_configs=query_configs))
# define LLM llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) decompose_transform = DecomposeQueryTransform( llm_predictor, verbose=True ) # define query configs for graph query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 1, "include_summary": True, "refine_template": CHAT_REFINE_PROMPT }, "query_transform": decompose_transform }, { "index_struct_type": "list", "query_mode": "default", "query_kwargs": { "response_mode": "tree_summarize", "verbose": True, "refine_template": CHAT_REFINE_PROMPT } }, ] # define query configs for index index_query_configs = [ { "index_query_kwargs": {"similarity_top_k": 3}, "tool_kwargs": {"return_direct": True} } ]
Sorry, the provided knowledge source context is not related to the topic
# add graph tools graph_desc = [' general gurufocus tutorials questions, such as some key page tutorials or stock summary page', ' warren buffet and BERKSHIRE HATHAWAY INC SHAREHOLDER LETTERS', ' 10K sec filing for some popular stocks'] graphs = [] tools = [] idx = 0 for file in files: print(file) graph = get_graph('indexing/' + file+'.json') graphs.append(graph) desc = graph_desc[idx] if idx < len(graph_desc) else 'others' tool = Tool( name = file + ' Graph', func= lambda q: str(query_index(q, graph, query_configs)), description="useful for when you want to answer questions about the " + desc, return_direct=True ) tools.append(tool) idx += 1 # Add all index tools index_desc = { 'getting-started.json': ', including homepage dashboard, stock summary page, guru pages, insider trades, all-in-one screener, excel add-in, and google sheets add-on', 'stock-summary-page.json': ', including stock summary page, warning signs, gf score, gf value, performance charts, peter lynch chart, segment data charts' } for file in files: file_list = os.listdir('indexing/' + file) for filename in file_list: file_path = f'indexing/{file}/{filename}' cur_index = GPTSimpleVectorIndex.load_from_disk(file_path, llm_predictor=llm_predictor, prompt_helper=prompt_helper) desc = index_desc[filename] if filename in index_desc else ' ' tool = Tool( name = file + ' index', func= lambda q: str(query_index(q, index, index_query_configs)), description="useful for when you want to answer questions about the " + desc, return_direct=True ) tools.append(tool)
query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 3, # updated to match the index query config? "include_summary": True, "refine_template": CHAT_REFINE_PROMPT }, "query_transform": decompose_transform }, { "index_struct_type": "list", "query_mode": "default", "query_kwargs": { "response_mode": "tree_summarize", "verbose": True, "refine_template": CHAT_REFINE_PROMPT } }, ]
query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 3, # updated to match the index query config? "include_summary": True, "refine_template": CHAT_REFINE_PROMPT }, "query_transform": decompose_transform }, { "index_struct_type": "list", "query_mode": "default", "query_kwargs": { "response_mode": "tree_summarize", "verbose": True, "refine_template": CHAT_REFINE_PROMPT } }, ]