from llama_index.managed.llm_rerank import LLMRerank from llama_index.managed.response_synthesizer import TreeSummarizeResponseSynthesizer reranker = LLMRerank( top_n=10, llm=gemini, ) response_synthesizer = TreeSummarizeResponseSynthesizer( llm=gemini, max_length=512, ) single_step_query_engine = index.as_query_engine( similarity_top_k=20, node_postprocessors=[reranker], response_synthesizer=response_synthesizer, )
from llama_index.core.query_engine import RetrieverQueryEngine from llama_index.postprocessor.cohere_rerank import CohereRerank reranker = CohereRerank(top_n=10) query_engine = RetrieverQueryEngine(retriever, node_postprocessors=[reranker])
from llama_index.core.postprocessor import SentenceTransformerRerank postprocessor = SentenceTransformerRerank( model="mixedbread-ai/mxbai-rerank-base-v1", top_n=2 )
top_n
parameter in the reranker determines the number of top results to consider for reranking. You can adjust this parameter based on your specific needs.import logging logging.getLogger().setLevel(logging.WARNING)
import logging logging.getLogger('llama_index').setLevel(logging.WARNING)
from llama_index.core import SimpleDirectoryReader # Load the data reader = SimpleDirectoryReader(input_files=["pg_essay.txt"]) documents = reader.load_data() # Set up the parallel query pipeline # ... (code for setting up the pipeline goes here)
"pg_essay.txt"
with the actual path to your document file.VectorStoreIndex.from_documents()
method provided by LlamaIndex. This method takes a list of documents and a storage context as input and builds a new vector store index.from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.index import VectorStoreIndex # Load the data reader = SimpleDirectoryReader(input_files=["your_document_file"]) documents = reader.load_data() # Create a storage context with your vector store storage_context = StorageContext.from_defaults(vector_store=your_vector_store) # Build the index index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
"your_document_file"
with the actual path to your document file and your_vector_store
with your actual vector store. This will create a new vector store index from the documents in your docstore.VectorStoreIndex.from_documents()
method provided by LlamaIndex. This method takes a list of documents and a storage context as input and builds a new vector store index.from llama_index.core import SimpleDirectoryReader, StorageContext from llama_index.index import VectorStoreIndex # Load the data reader = SimpleDirectoryReader(input_files=["your_document_file"]) documents = reader.load_data() # Create a storage context with your vector store storage_context = StorageContext.from_defaults(vector_store=your_vector_store) # Build the index index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
"your_document_file"
with the actual path to your document file and your_vector_store
with your actual vector store. This will create a new vector store index from the documents in your docstore.load_index_from_storage()
function. This function takes a storage context as input and loads the index from the storage.from llama_index.core import StorageContext from llama_index.index import load_index_from_storage # Create a storage context storage_context = StorageContext.from_defaults(persist_dir="your_persist_directory") # Load the index from storage index = load_index_from_storage(storage_context)
"your_persist_directory"
with the actual directory where your index is stored. This will load the index from the storage and rebuild it.