import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from langchain import OpenAI from langchain.agents import initialize_agent from gpt_index import GPTListIndex, GPTIndexMemory index = GPTListIndex([]) memory = GPTIndexMemory(index=index, memory_key="chat_history", query_kwargs={"response_mode": "compact"}) llm = OpenAI(temperature=0) agent_chain = initialize_agent([], llm, agent="conversational-react-description", memory=memory)
agent_chain.run(input="what's my name?")
Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon:import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from langchain.agents import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain import OpenAI from langchain.agents import initialize_agent from gpt_index import GPTSimpleVectorIndex index = GPTSimpleVectorIndex.load_from_disk('../vector_indices/index_simple.json') tools = [ Tool( name="GPT Index", func=lambda q: str(index.query(q)), description="useful for when you want to answer questions about the author. The input to this tool should be a complete English sentence.", return_direct=True ), ] memory = ConversationBufferMemory(memory_key="chat_history") llm = OpenAI(temperature=0) agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
agent_chain.run(input="What did the author do growing up?")
Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon:import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from langchain.agents import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain import OpenAI from langchain.agents import initialize_agent from gpt_index import GPTSimpleVectorIndex index = GPTSimpleVectorIndex.load_from_disk('../vector_indices/index_simple.json') tools = [ Tool( name="GPT Index", func=lambda q: str(index.query(q)), description="useful for when you want to answer questions about the author. The input to this tool should be a complete English sentence.", return_direct=True ), ] memory = ConversationBufferMemory(memory_key="chat_history") llm = OpenAI(temperature=0) agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
def log_input_output(agent, input_text): logging.info(f"Input: {input_text}") output = agent.run(input=input_text) logging.info(f"Output: {output}") return output
log_input_output(agent_chain, "What did the author do growing up?")
Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon:index.query
through an agent, you can follow these steps:import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from langchain.agents import Tool from langchain.chains.conversation.memory import ConversationBufferMemory from langchain import OpenAI from langchain.agents import initialize_agent from gpt_index import GPTSimpleVectorIndex index = GPTSimpleVectorIndex.load_from_disk('../vector_indices/index_simple.json') tools = [ Tool( name="GPT Index", func=lambda q: str(index.query(q)), description="useful for when you want to answer questions about the author. The input to this tool should be a complete English sentence.", return_direct=True ), ] memory = ConversationBufferMemory(memory_key="chat_history") llm = OpenAI(temperature=0) agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
agent_chain.run(input="What did the author do growing up?")
Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon:index.query(..., service_context=service_context)
Entering new AgentExecutor chain...Thought: Do I need to use a tool? Yes
func=lambda q: str(index.query(q)),
func=lambda q: str(index.query(q, service_context=service_context)),
response = agent_chain.run(input=query)
, but maybe not either haha)from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext from llama_index.logger import LlamaLogger docs = SimpleDirectoryReader('./data').load_data() service_context = ServiceContext.from_defaults(llama_logger=llama_logger) index = GPTSimpleVectorIndex.from_documents(docs, service_context=service_context) response = index.query("What did the author do growing up?", service_context=service_context) llama_logger.get_logs()
import logging import sys logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))