from langchain.chat_models import ChatOpenAI llm = LLMPredictor(llm=ChatOpenAI(openai_api_key=OPENAI_API_KEY,temperature=0, max_tokens=1024, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(chunk_size_limit=512, llm=llm)
from llama_index.llms import OpenAI llm = OpenAI(model="gpt-3.5-turbo", temperature=0) service_context = ServiceContext.from_defaults(llm=llm)