pip3 install llama-index-vector-stores-chroma
all that's required for from llama_index.vector_stores.chroma import ChromaVectorStore
it's installed but it is the only import that isn't resolving after migrating to 0.10.7. Curious if there is something else I need to do here?Requirement already satisfied: llama-index-vector-stores-chroma in ./env/lib/python3.10/site-packages (0.1.2) Requirement already satisfied: llama-index-core<0.11.0,>=0.10.1 in ./env/lib/python3.10/site-packages (from llama-index-vector-stores-chroma) (0.10.9)
d = property(_swigfaiss.Index_d_get, _swigfaiss.Index_d_set, doc=r""" vector dimension""")
enable_hybrid=True,
and I would like to compare the differences in quality. Is there a way to do this after the fact with llama index, or would I need to rebuild the entire vector db from scratch?RetrieverQueryEngine
when assinign the variable vector_query_engine
any good examples?def _query_index(self, query_engine: RetrieverQueryEngine, query: str) -> RESPONSE_TYPE: embedded_query = Settings.embed_model.get_text_embedding(query) response = query_engine.query(QueryBundle(query_str=query, embedding=embedded_query)) return response def _create_query_engine(self) -> RetrieverQueryEngine: vector_index = VectorStoreIndex.from_vector_store(vector_store=self.vector_store, embed_model=Settings.embed_model) vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=5) vector_query_engine = RetrieverQueryEngine( retriever=vector_retriever, response_synthesizer=self.response_synthesizer, node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.50),], ) vector_query_engine.update_prompts({"response_synthesizer:text_qa_template": self.qa_prompt_tmpl}) return vector_query_engine def query_rag(self, query: str) -> Dict[str, Any]: vector_query_engine = self._create_query_engine() response = self._query_index(query_engine=vector_query_engine, query=query)
response
variables?response = engine.query(query)
response = engine.query(QueryBundle(query_str=query, embedding=embedded_query))
def query_index(self, query_engines: List[BaseQueryEngine], queries: List[str]): for query in queries: embedded_query = Settings.embed_model.get_text_embedding(query) for engine in query_engines: response = engine.query(query) <...or...> response = engine.query(QueryBundle(query_str=query, embedding=embedded_query))
vector_index = VectorStoreIndex.from_vector_store(vector_store=rag.vector_store, embed_model=Settings.embed_model) query_engine0 = vector_index.as_query_engine(llm=Settings.llm, similarity_top_k=15, node_postprocessors=[ SimilarityPostprocessor(similarity_cutoff=0.60), cohere_rerank ] )
llm = MistralAI(model="mistral-tiny", api_key=MISTRAL_API_KEY) .... print(type(llm)) <class 'llama_index.llms.mistral.MistralAI'> .... service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model, system_prompt=SPF) .... index = VectorStoreIndex.from_documents( documents=[web_docs, pdf_docs], service_context=service_context, storage_context=storage_context, show_progress=True )
Could not load OpenAI model. If you intended to use OpenAI, please check your OPENAI_API_KEY.