llm = OpenAI() embed_model = OpenAIEmbedding() index = VectorStoreIndex.from_documents(..., embed_model=embed_model) index.as_query_engine(llm=llm)
from llama_index.core import Settings Settings.llm = llm Settings.embed_model = embed_model
Settings.prompt_helper=prompt_helper
, but I kind of advocate against using itllm = OpenAI(temperature=0.0, model_name="gpt-4-0613", max_tokens=3000) documents = SimpleDirectoryReader( directory_path, file_metadata=filename_fn ).load_data() index = VectorStoreIndex.from_documents( documents ) query_engine = index.as_query_engine(llm=llm)
from llama_index.core import Settings Settings.llm = llm ... query_engine = index.as_query_engine()
text-embedding-ada-002
Settings.prompt_helper = ...
-- but in 99.999% of use cases you don't need to change this lol