response = query_engne.query(input_text = "\nRespond in Italian")
from llama_index import LLMPredictor, ServiceContext from llama_index.llms import OpenAI llm_predictor = LLMPredictor(system_prompt="Always respond in Italian.") service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) query_engne = load_index_from_storage(storage_context, service_context=service_context).as_query_engine()