def setup_index(documents):
embed_model = HuggingFaceEmbedding('sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2')
service_context_embedding = ServiceContext.from_defaults(embed_model=embed_model, llm=None, chunk_size=1024)
return VectorStoreIndex.from_documents(documents, service_context=service_context_embedding)
def load_documents():
# Create storage context from persisted data
storage_context = StorageContext.from_defaults(persist_dir="./data")
# Load index from storage context
index = load_index_from_storage(storage_context)
return index
Could not load OpenAI model. If you intended to use OpenAI, please check your OPENAI_API_KEY.
Original error:
No API key found for OpenAI.
service_context
when you are loading the index as well if service_context
is not declared globally. service_context_llm = ServiceContext.from_defaults(
llm=llm,
params=parameters,
apikey=api_key,
roject_id=project_id
),
system_prompt="Eres un asistente de IA que responde sobre postulaciones a puestos de trabajo \
No debes ser sesgado ni racista en tus respuestas. Responde siempre en español."
)
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=10,
)
response_synthesizer = llama_index.response_synthesizers.get_response_synthesizer(
response_mode="compact",
service_context=service_context_llm,
use_async=False,
streaming=False,
)
return RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
)
load_index_from_storage
will create a new service context if it's not passed in yea.index = load_index_from_storage(storage_context, service_context=service_context)
from llama_index import set_global_service_context set_global_service_context(service_context)
response_synthesizer = llama_index.response_synthesizers.get_response_synthesizer(
response_mode="compact",
service_context=service_context_llm,
use_async=False,
streaming=False,
)
index.as_query_engine(response_mode="compact", similarity_top_k=10)
to create the query engine