Find answers from the community

Updated 3 months ago

Csv

am getting this error and i dont see any way to fix it:
Traceback (most recent call last): File "/Users/ahmednadiir/Desktop/agency/app.py", line 11, in <module> from quran import quran_engine File "/Users/ahmednadiir/Desktop/agency/quran.py", line 19, in <module> quran_csv = CSVReader().load_data(file=csv_path); ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/ahmednadiir/Desktop/agency/localEvir/lib/python3.11/site-packages/llama_index/readers/file/tabular/base.py", line 48, in load_data return [Document(text="\n".join(text_list), metadata=extra_info)] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/ahmednadiir/Desktop/agency/localEvir/lib/python3.11/site-packages/pydantic/v1/main.py", line 341, in __init__ raise validation_error pydantic.v1.error_wrappers.ValidationError: 1 validation error for Document extra_info none is not an allowed value (type=type_error.none.not_allowed)

code:
import os from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage from llama_index.readers.file import CSVReader def get_index(data, index_name): index = None if not os.path.exists(index_name): print("building index", index_name) index = VectorStoreIndex.from_documents(data, show_progress=True) index.storage_context.persist(persist_dir=index_name) else: index = load_index_from_storage( StorageContext.from_defaults(persist_dir=index_name) ) return index csv_path = os.path.join("data", "quran-english-tafsir.csv"); quran_csv = CSVReader().load_data(file=csv_path); quran_index = get_index(quran_csv, "quran") quran_engine = quran_index.as_query_engine() quran_engine.query()
L
C
d
9 comments
.load_data(..., extra_info={})
Should probably patch that reader to do that for you
so like this quran_csv = CSVReader().load_data(file=csv_path, extra_info={});
is there anyway i can put my gemini api on my local .env file so it can defaultly use it without definding it like it doing to openai
Nope. The easiest way is just setting it as a global default

Plain Text
from llama_index.core import Settings

Settings.llm = Gemini()
i did this:
import os from dotenv import load_dotenv from llama_index.core import StorageContext, VectorStoreIndex, load_index_from_storage from llama_index.readers.file import CSVReader from llama_index.llms.gemini import Gemini from llama_index.core import Settings Settings.llm = Gemini() load_dotenv() GOOGLE_API_KEY = "api-ishere"; os.environ["GOOGLE_API_KEY"] = GOOGLE_API_KEY def get_index(data, index_name): index = None if not os.path.exists(index_name): print("building index", index_name) index = VectorStoreIndex.from_documents(data, show_progress=True) index.storage_context.persist(persist_dir=index_name) else: index = load_index_from_storage( StorageContext.from_defaults(persist_dir=index_name) ) return index csv_path = os.path.join("data", "quran-english-tafsir.csv"); quran_csv = CSVReader().load_data(file=csv_path, extra_info={}); quran_index = get_index(quran_csv, "quran") quran_engine = quran_index.as_query_engine() quran_engine.query()

but i got :
google.api_core.exceptions.InvalidArgument: 400 API key not valid. Please pass a valid API key. [reason: "API_KEY_INVALID" domain: "googleapis.com" metadata { key: "service" value: "generativelanguage.googleapis.com" } ]

i dont know the reason altho i pass the right api key in the file and also in the .env file?
Where is your api key from? Google is kind of confusing, Gemini and vertex are two separate platforms (not sure if you are using vertexai or something else)
This is what works for me:

Plain Text
from dotenv import load_dotenv
load_dotenv()

Settings.llm = Gemini(
    api_key = os.getenv('GEMINI_API_KEY'),
    model_name = 'models/gemini-pro'
)
Add a reply
Sign up and join the conversation on Discord