This method is responsible for answering the corresponding document def load_index(self, index_name, prompt): try: index_path = f"{self.directory_path}/indexes/index_{index_name}" documents = SimpleDirectoryReader(index_path).load_data() index = GPTVectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine() return query_engine.query(prompt) except Exception as e: return HTTPException(status_code=404, detail='Not Found')
try: query_engine = index.as_query_engine() return query_engine.query(prompt) except Exception as e: return HTTPException(status_code=404, detail='Not
try: # Rebuild the storage context storage_context = StorageContext.from_defaults(persist_dir="./storage") # Load the index index = load_index_from_storage(storage_context) except: # Storage not found; create a new one from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader documents = SimpleDirectoryReader("./data").load_data() index = GPTVectorStoreIndex.from_documents(documents) index.storage_context.persist() # Now add the query part here try: query_engine = index.as_query_engine() return query_engine.query(prompt) except Exception as e: return HTTPException(status_code=404, detail='Not Found')
index_set = {} for file in files: cur_index = GPTSimpleVectorIndex.load_from_disk(f'{directory_path}/indexes/index_{file}.json', service_context=service_context) index_set[file] = cur_index response = index_set['036283'].query("How does the sum of $1,500,000,000 relate to Apple in 2020?", similarity_top_k=3) print(response)
# Data folder 1 documents = SimpleDirectoryReader("./data").load_data() index = GPTVectorStoreIndex.from_documents(documents) index.storage_context.persist() # stores by default to storage folder # Data folder 2 documents = SimpleDirectoryReader("path to second folder").load_data() index = GPTVectorStoreIndex.from_documents(documents) index.storage_context.persist(persist_dir="provide_folder_name_of_your_choice")
# Rebuild the storage context, provide the folder name and it will load that folder name storage_context = StorageContext.from_defaults(persist_dir="./storage") # Load the index index = load_index_from_storage(storage_context)
Token indices sequence length is longer than the specified maximum sequence length for this model (3301 > 1024). Running this sequence through the model will result in indexing errors
llm = OpenAI(temperature=0, model="gpt-3.5-turbo") service_context_chatgpt = ServiceContext.from_defaults(llm=llm, chunk_size=1024)