Yea thats for embeddings, you can leave the rest of the arguments as default though if you arent using azure, just set the api key
You will also need to set the api key in the LLM definition too
from llama_index import ServiceContext, LLMPredictor
from langchain.llms import OpenAI
# if you want to use gpt-3.5 or gpt-4, use ChatOpenAI
# from langchain.chat_models import ChatOpenAI
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=OpenAI(model_name='text-davinci-003', temperature=0, openai_api_key="<key>"))
embed_model=embed_model
)
index = load_index_from_storage(storage_context, service_context=service_context)