# Indexing # this should directly index documents into Elasticsearch client = ElasticsearchVectorClient() GPTOpensearchIndex(documents, client=client, chunk_size_limit=1024) # Querying # this should ask the query 'q' on the Elasticsearch index, using the qa & refinement templates provided. # and with the LLM Predictor provided client = ElasticsearchVectorClient() index = GPTOpensearchIndex([], client=client) llm_predictor = LLMPredictor(llm=ChatOpenAI( temperature=0, model_name="gpt-3.5-turbo")) similarity_top_k = 1 index.query(q, similarity_top_k=similarity_top_k, llm_predictor=llm_predictor, text_qa_template=CHAT_QA_PROMPT, refine_template=CHAT_REFINE_PROMPT)
documents = SimpleDirectoryReader('./data').load_data() client = ElasticsearchVectorClient() vector_store = OpensearchVectorStore(client=client) storage_context = StorageContext.from_defaults(vector_store=vector_store) service_context = ServiceContext.from_defaults(chunk_size_limit=1024) VectorStoreIndex.from_documents(documents, storage_context=storage_context, service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=1, text_qa_template=CHAT_QA_PROMPT, refine_template=CHAT_REFINE_PROMPT) response = query_engine.query("my query")
from_vector_store
client = ElasticsearchVectorClient(account_id=account_id, app_id=app_id) vector_store = OpensearchVectorStore(client) index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
storage_context
parameterservice_context
parameterfrom_vector_store
is very new, I can't remember if it's on the latest pypi version yet or not π But that's mainly used to connect to a vector db that's already be populated. Setting up the storage like you did before is the initial way to do itas_query_engine()
thing too, which kind of seperates the query from the index a bitindex = VectorStoreIndex([], storage_context=storage_context)
from_vector_store
is just making that look less awkward (and automates the storage_context setup)