add
, from MilvusVectorStore
, and insert
from Index
classes? Both does the same thing?ERROR: Unknown field for GenerationConfig: safety_settings File "/workspaces/CORTEX/.venv/lib/python3.10/site-packages/llama_index/llms/vertex/base.py", line 384, in achat generation = await acompletion_with_retry( File "/workspaces/CORTEX/.venv/lib/python3.10/site-packages/llama_index/llms/vertex/utils.py", line 148, in acompletion_with_retry return await _completion_with_retry(**kwargs)
llamaindex
, potentially due to a recent Google update that affected some configurations. However, I am unsure of the root cause.llm = Vertex(...) chat = await llm.achat(...)
pyproject.toml [tool.poetry] ... [tool.poetry.dependencies] python = ">=3.10.11,<=3.11.5" uvicorn = {extras = ["standard"], version = "^0.25.0"} fastapi = "^0.109.0" pyyaml = "^6.0.1" injector = "^0.21.0" overrides = "^7.7.0" langchain = "^0.1.10" langchain-google-genai = "0.0.8" json-repair = "^0.9.0" unstructured = {extras = ["all-docs"], version = "^0.14.2"} opencv-python-headless = "^4.9.0.80" jq = "^1.6.0" pytesseract = "^0.3.10" pymilvus = "^2.3.6" langchain-openai = "^0.0.8" scikit-learn = "^1.4.1.post1" transformers = "^4.38.2" ruff = "^0.3.1" pandas = "^2.2.1" llama-index = "^0.10.33" python-multipart = "^0.0.9" llama-index-vector-stores-milvus = "^0.1.10" playwright = "^1.43.0" python-magic = "^0.4.27" llama-index-llms-gemini = "0.1.6" opencv-python = "^4.9.0.80" llama-index-llms-anthropic = "^0.1.11" llama-index-llms-ollama = "^0.1.5" llama-index-embeddings-ollama = "^0.1.2" pymupdf = "^1.24.4" pypdf = {extras = ["image"], version = "^4.2.0"} surya-ocr = "^0.4.12" llama-index-multi-modal-llms-ollama = "^0.1.3" llama-index-llms-groq = "^0.1.4" gensim = "^4.3.2" firebase-admin = "^6.5.0" demjson3 = "^3.0.6"
async def get_docs(self) -> List[Document]: """Get the parsed documents from llamaparse.""" documents = [] with tempfile.NamedTemporaryFile(delete=True) as temp: temp.write(base64.b64decode(self.b64)) temp.flush() temp.seek(0) parser = LlamaParse(result_type=self.result_type) documents = await parser.aload_data(temp.name) return documents
Error while parsing the file '/var/folders/d1/p4s8h3q552xbvq30g272vdbh0000gn/T/tmpqgr4rzp3': Currently, only the following file types are supported: ['.pdf', '.doc', '.docx', '.docm', '.dot', '.dotx', '.dotm', '.rtf', '.wps', '.wpd', '.sxw', '.stw', '.sxg', '.pages', '.mw', '.mcw', '.uot', '.uof', '.uos', '.uop', '.ppt', '.pptx', '.pot', '.pptm', '.potx', '.potm', '.key', '.odp', '.odg', '.otp', '.fopd', '.sxi', '.sti', '.epub', '.html', '.htm']
asyncio.run
inside PropertyGraphIndex
when running an APIPropertyGraphIndex
while running an API that uses llamaindex
. However, I encounter a RuntimeError
because llamaindex
is using asyncio.run
inside the SchemaLLMPathExtractor
, which conflicts with the running event loop in my API.RuntimeError: asyncio.run() cannot be called from a running event loop
return asyncio.run(self.acall(nodes, show_progress=show_progress, **kwargs))
kg_extractor: SchemaLLMPathExtractor = SchemaLLMPathExtractor( llm=llm, possible_entities=entity.entities.as_literal_typealias(), possible_relations=entity.relationships.as_literal_typealias(), kg_validation_schema=entity.validation_schema, strict=True, num_workers=4, ) index = PropertyGraphIndex.from_existing( property_graph_store=self.client, llm=llm, kg_extractors=[kg_extractor], use_async=False, embed_model=embeddings, show_progress=True, ) for document in documents: index.insert(document)
possible_entities = Optional[List[str]]
????read a document from a folder/source -> create a "vector store" based on the documents -> querying the llm using the context of the created vector store