from llama_index import ServiceContext, LLMPredictor from langchain.chat_models import ChatOpenAI llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, max_tokens=1024, model_name="gpt-3.5-turbo"))