from llama_index.embeddings.openai import OpenAIEmbedding # test directly embed_model = OpenAIEmbedding(api_key=api_key) resp = embed_model.get_text_embedding("hello world") # can also pass in directly index = VectorStoreIndex(nodes=nodes, embed_model=embed_model)
from llama_index.llms.openai import OpenAI llm = OpenAI(model="gpt-4o-mini" api_key="...") query_engine = index.as_query_engine(llm=llm)
LLMSingleSelector.from_defaults(llm=llm)