`from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole message_templates = [ ChatMessage(content="You are an expert system.", role=MessageRole.SYSTEM), ChatMessage( content="Generate a short story about {topic}", role=MessageRole.USER, ), ]
service_context = ServiceContext.from_defaults( llm=llm, system_prompt="Generate a short story about.." )
from llama_index.llms import OpenAI refine_templ = "My custom refin prompt..." SYSTEM_PROMPT = "My custom system prompt..." qa_prompt = PromptTemplate(refine_templ) llm = OpenAI(temperature=1, model="gpt-3.5-turbo",max_tokens=2048) service_context = ServiceContext.from_defaults(system_prompt=SYSTEM_PROMPT,llm=llm) response_synthesizer = get_response_synthesizer(service_context=service_context,refine_template=qa_prompt) query_engine = index.as_query_engine(similarity_top_k=1,response_synthesizer=response_synthesizer) response = query_engine.query(message)