index.vector_store.add(nodes)
def create_engine_with_nodes(self, nodes): index = self.create_index() index.vector_store.add(nodes) model = OpenAI(model=self.LLM) query_engine = self.create_engine_from_index(index, model) return query_engine def create_engine(self): index = self.create_index() model = OpenAI(model=self.LLM) query_engine = self.create_engine_from_index(index, model) return query_engine def create_index(self): embedding_model = OpenAIEmbedding(model=OpenAIEmbeddingModelType.TEXT_EMBED_3_LARGE, dimensions=1024) if self.USE_QDRANT: client = qdrant_client.QdrantClient( os.environ.get("QDRANT_CLOUD_ENDPOINT"), api_key=os.environ.get("QDRANT_API_KEY"), grpc_port=6334, prefer_grpc=True, timeout=30 ) print("Creating Qdrant Vector Store Index from nodes") vector_store = QdrantVectorStore(client=client, collection_name=self.collection_name, parallel=5) index = VectorStoreIndex.from_vector_store(vector_store=vector_store, embed_model=embedding_model) else: index = VectorStoreIndex(embed_model=embedding_model) return index
Exception has occurred: TypeError (note: full exception trace is shown but execution is paused at: _run_module_as_main) create() got an unexpected keyword argument 'dimensions'
self.embedding_model = OpenAIEmbedding(model=OpenAIEmbeddingModelType.TEXT_EMBED_3_LARGE, dimensions=1024)
response = query_engine.query(query)
pipeline = IngestionPipeline( transformations=[ SentenceSplitter(), self.embedding_model, ] )
openai
version or llama-index-embeddings-openai
version?>>> embed_model = OpenAIEmbedding(model="text-embedding-3-large", dimensions=1024) >>> embed = embed_model.get_text_embedding("Hello world!") >>> len(embed) 1024 >>>
openai==1.14.3 llama-index-embeddings-openai==0.1.6
llama-index-core==0.10.26 llama-index-embeddings-openai==0.1.7 llama-index-llms-openai==0.1.14 llama-index-vector-stores-qdrant==0.2.8 llamaindex-py-client==0.1.15 openai==1.5.0
core
and openai
are older so I just updated themobjc[79306]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. objc[79306]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug. objc[79307]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. objc[79307]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug. objc[79309]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. objc[79309]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug. objc[79310]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. objc[79310]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called. We cannot safely call it or ignore it in the fork() child process. Crashing instead. Set a breakpoint on objc_initializeAfterForkError to debug. objc[79308]: +[__NSCFConstantString initialize] may have been in progress in another thread when fork() was called.
vector_store = QdrantVectorStore(client=client, collection_name=self.collection_name, parallel=5)
QdrantVectorStore(..., client=QdrantCleint(..), aclient=AsyncQdrantClient(...))
await vector_store.async_add(nodes)
response = query_engine.query(query)
retriever = query_engine.retriever results = retriever.retrieve(query) print(len(results)) # prints 10
self.LLM = "gpt-4-1106-preview" model = OpenAI(model=self.LLM) query_engine = index.as_query_engine( similarity_top_k=self.TOP_K, node_postprocessors=[ SimilarityPostprocessor(similarity_cutoff=self.SIMILARITY_SCORE_CUTOFF), SentenceTransformerRerank(model="cross-encoder/ms-marco-MiniLM-L-2-v2", top_n=self.RERANK_CUTOFF) ], llm=model, response_mode=ResponseMode.SIMPLE_SUMMARIZE )