query_engne = load_index_from_storage(storage_context, service_context=service_context).as_query_engine()
from llama_index.llms import OpenAI _llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.5, model="gpt-3.5-turbo", max_tokens=num_outputs))
model="gpt-4"
not model_name="gpt-4"