# loop for the number of documents index = GPTTreeIndex.from_documents(nodes, service_context=service_context) indexList.append(index) index_summary = index.query(query_str, response_mode="tree_summarize") indexSummaries.append(index_summary) graph = ComposableGraph.from_indices( GPTTreeIndex, indexList, index_summaries=indexSummaries, )
mode="summarize"
instead of response_mode="tree_summarize"
, although maybe newer versions have made both options have the same functionality π
graph = ComposableGraph.from_indices( GPTTreeIndex, indexList, index_summaries=indexSummaries, ) graph.save_to_disk("save_path.json") query_configs = [ QueryConfig( index_struct_type="tree", query_mode=QueryMode.DEFAULT, query_kwargs={ "child_branch_factor": 1 } ), ] max_input_size = 3500 num_output = 512 max_chunk_overlap = 50 prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap) # define LLM llm_predictor_chatgpt = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", max_tokens=1500)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, prompt_helper=prompt_helper, chunk_size_limit = 1500, llama_logger=llama_logger) query = "" response = graph.query(query, query_configs=query_configs, service_context=service_context)
File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\composability\graph.py", line 145, in query return query_runner.query(query_str) File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\query_runner.py", line 341, in query return query_combiner.run(query_bundle, level) File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\query_combiner\base.py", line 66, in run return self._query_runner.query_transformed( File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\query_runner.py", line 186, in query_transformed nodes = query_obj.retrieve(query_bundle) File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\base.py", line 249, in retrieve nodes = self._retrieve(query_bundle, similarity_tracker=similarity_tracker) File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\tree\leaf_query.py", line 382, in _retrieve return self._retrieve_level( File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\tree\leaf_query.py", line 357, in _retrieve_level selected_nodes = self._select_nodes( File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\query\tree\leaf_query.py", line 258, in _select_nodes self._service_context.prompt_helper.get_numbered_text_from_nodes( File "C:\Users\\AppData\Local\Programs\Python\Python310\lib\site-packages\llama_index\indices\prompt_helper.py", line 214, in get_numbered_text_from_nodes node_text = " ".join(node.get_text().splitlines()) AttributeError: 'Response' object has no attribute 'splitlines'
QueryConfig
from the GPT index library, you can use the following code:from gpt_index.indices.query.schema import QueryConfig
QueryConfig
in the documentation (https://gpt-index.readthedocs.io/en/latest/reference/query.html#gpt_index.indices.query.schema.QueryConfig).Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon:response_mode
options available in LlamaIndex:default
: For the given index, it creates and refines an answer by sequentially going through each Node, making a separate LLM call per Node. This mode is suitable for more detailed answers.response = index.query("What did the author do growing up?", response_mode="default")
compact
: For the given index, it compacts the prompt during each LLM call by stuffing as many Node text chunks that can fit within the maximum prompt size. If there are too many chunks to fit in one prompt, it creates and refines an answer by going through multiple prompts.response = index.query("What did the author do growing up?", response_mode="compact")
tree_summarize
: Given a set of Nodes and the query, it recursively constructs a tree and returns the root node as the response. This mode is suitable for summarization purposes.response = index.query("What did the author do growing up?", response_mode="tree_summarize")
Note: you can ask me a follow up question by @ mentioning me again
:speech_balloon: