from llama_index.core import Settings Settings.llm = your llm instance Settings.embed_model = your embed model isntance # then proceed further
pip install -U llama-index-llms-openai
storage_context = StorageContext.from_defaults(persist_dir=f"{product_code}_llama")
index = load_index_from_storage(storage_context)
llm = OpenAI(temperature=temperature, model=GPT_MODEL, max_tokens=num_outputs)
service_context = ServiceContext.from_defaults(llm=llm)
engine = index.as_chat_engine(
chat_mode="context",
verbose=True,
service_context=service_context,
temperature=temperature,
system_prompt=prompt,
)