from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.agents import initialize_agent
from gpt_index import GPTSimpleVectorIndex
# define LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit=750)
index = GPTSimpleVectorIndex.load_from_disk(save_path='minimalist_entrepreneur_2.json', service_context=service_context)
331 """Run query.
332
333 NOTE: Relies on mutual recursion between
(...)
344 composable graph.
...
45 )
46 llm_predictor = service_context.llm_predictor
47 embed_model = service_context.embed_model
ValueError: Cannot use llm_token_counter on an instance without a service context.