# configure response synthesizer
response_synthesizer = get_response_synthesizer(text_qa_template=custom_query_template)
# can define filters specific to this vector index (so you can # reuse pinecone indexes) metadata_filters = {"title": "paul_graham_essay"}
GPTPineconeIndex
GPTPineconeIndex
and how can I store the built index for future use?{ "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 3, }, }
graph = ComposableGraph.from_indices( GPTListIndex, index_arr, index_summaries=summaries, service_context=service_context )
ComposableGraph.from_indices
I always get this error ValueError: Got a larger chunk overlap (20) than chunk size (-42), should be smaller.
# set maximum input size max_input_size = 256 # set number of output tokens num_outputs = 256 # set maximum chunk overlap max_chunk_overlap = 20 chunk_size_limit = 512 # define LLM llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.2, model_name="gpt-3.5-turbo", max_tokens=num_outputs)) prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper, chunk_size_limit=chunk_size_limit)
chunk_size_limit = 512
. However, I see one node text is very long and its token is about 3000. Would it be at most 512 tokens ?# Refine Prompt CHAT_REFINE_PROMPT_TMPL_MSGS = [ HumanMessagePromptTemplate.from_template("{query_str}"), AIMessagePromptTemplate.from_template("{existing_answer}"), HumanMessagePromptTemplate.from_template( "We have the opportunity to refine the above answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question. " "If the context isn't useful, output the original answer again.", ), ]
# define query configs for graph query_configs = [ { "index_struct_type": "simple_dict", "query_mode": "default", "query_kwargs": { "similarity_top_k": 1, "include_summary": True, "refine_template": CHAT_REFINE_PROMPT_TMPL_MSGS }, "query_transform": decompose_transform }, { "index_struct_type": "list", "query_mode": "default", "query_kwargs": { "response_mode": "tree_summarize", "verbose": True, "refine_template": CHAT_REFINE_PROMPT_TMPL_MSGS } }, ]
response = graph.query(q, query_configs=query_configs)
, I got error line 117, in refine_response_single refine_template = self.refine_template.partial_format( AttributeError: 'list' object has no attribute 'partial_format'
from llama_index.langchain_helpers.agents import LlamaToolkit, create_llama_chat_agent, IndexToolConfig, GraphToolConfig agent_chain = create_agent(toolkit, llm) while True: query = input("What do you want to ask? ") print('question: ', query) response = agent_chain.run(input=query) print(f'Agent: {response}')
The original answer remains relevant and does not require refinement based on the new context provided.
Why this happened ? how can i fix this ?