index.query(..., pandas_prompt=DEFAULT_PANDAS_PROMPT)
from langchain.chat_models import ChatOpenAI from llama_index import ServiceContext, LLMPredictor # can also use gpt-4 if you have access llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)