Traceback (most recent call last):
File "/Users/ahmednadiir/Desktop/agency/app.py", line 11, in <module>
from quran import quran_engine
File "/Users/ahmednadiir/Desktop/agency/quran.py", line 19, in <module>
quran_csv = CSVReader().load_data(file=csv_path);
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ahmednadiir/Desktop/agency/localEvir/lib/python3.11/site-packages/llama_index/readers/file/tabular/base.py", line 48, in load_data
return [Document(text="\n".join(text_list), metadata=extra_info)]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ahmednadiir/Desktop/agency/localEvir/lib/python3.11/site-packages/pydantic/v1/main.py", line 341, in __init__
raise validation_error
pydantic.v1.error_wrappers.ValidationError: 1 validation error for Document
extra_info
none is not an allowed value (type=type_error.none.not_allowed)
import os
from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage
from llama_index.readers.file import CSVReader
def get_index(data, index_name):
index = None
if not os.path.exists(index_name):
print("building index", index_name)
index = VectorStoreIndex.from_documents(data, show_progress=True)
index.storage_context.persist(persist_dir=index_name)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=index_name)
)
return index
csv_path = os.path.join("data", "quran-english-tafsir.csv");
quran_csv = CSVReader().load_data(file=csv_path);
quran_index = get_index(quran_csv, "quran")
quran_engine = quran_index.as_query_engine()
quran_engine.query()
from llama_index.core.llms.ollama import Ollama
pip install llama-index-llms-ollama
Could not load OpenAI embedding model. If you intended to use OpenAI, please check your OPENAI_API_KEY.
Original error:
No API key found for OpenAI.
Please set either the OPENAI_API_KEY environment variable or openai.api_key prior to initialization.
API keys can be found or created at https://platform.openai.com/account/api-keys
API_KEY = "api-is-here"
import os
from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage
from llama_index.readers.file import PDFReader
from dotenv import load_dotenv
from llama_index.core import Settings
from llama_index.llms.gemini import Gemini
Settings.llm = Gemini()
load_dotenv()
def get_index(data, index_name):
index = None
if not os.path.exists(index_name):
print("building index", index_name)
index = VectorStoreIndex.from_documents(data, show_progress=True)
index.storage_context.persist(persist_dir=index_name)
else:
index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=index_name)
)
return index
pdf_path = os.path.join("data", "the-tafsir-of-the-quran.pdf")
canada_pdf = PDFReader().load_data(file=pdf_path)
canada_index = get_index(canada_pdf, "canada")
canada_engine = canada_index.as_query_engine()