I'm assuming you mean a sort of system prompt right?
Here's an example, specifc to chat models like gpt-3.5 and gpt 4. You'll want to set the text_qa_template and refine_template
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate
)
SYSTEM_PROMPT = SystemMessagePromptTemplate.from_template("Every response should be written like you are a pirate.")
CHAT_REFINE_PROMPT_TMPL_MSGS = [
SYSTEM_PROMPT,
HumanMessagePromptTemplate.from_template("{query_str}"),
AIMessagePromptTemplate.from_template("{existing_answer}"),
HumanMessagePromptTemplate.from_template(
"We have the opportunity to refine the above answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, output the original answer again.",
),
]
CHAT_REFINE_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_REFINE_PROMPT_TMPL_MSGS)
CHAT_REFINE_PROMPT = RefinePrompt.from_langchain_prompt(CHAT_REFINE_PROMPT_LC)
CHAT_QA_PROMPT_TMPL_MSGS = [
SYSTEM_PROMPT,
HumanMessagePromptTemplate.from_template(
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
]
CHAT_QA_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_QA_PROMPT_TMPL_MSGS)
CHAT_QA_PROMPT = QuestionAnswerPrompt.from_langchain_prompt(CHAT_QA_PROMPT_LC)
...
query_engine = index.as_query_engine(text_qa_template=CHAT_QA_PROMPT, refine_template=CHAT_REFINE_PROMPT)