from llama_index.llms.bedrock import Bedrock llm = Bedrock(model="meta.llama2-13b-chat-v1", profile_name="machineuser1") embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5") service_context = ServiceContext.from_defaults( llm = llm, embed_model = embed_model, chunk_size=256, )
str(message)
which makes both the role and content into a stringdef completion_to_prompt(completion): return completion def messages_to_prompt(messages): return "\n".join([str(x) for x in messages]) llm = Bedrock(..., completion_to_prompt=completion_to_prompt, messages_to_prompt=messages_to_prompt)
response_synthesizer = get_response_synthesizer( ##try compact?
text_qa_template=qa_template,
refine_template=new_summary_tmpl,
#streaming=True
)
def messages_to_prompt(messages): prompt = "" for message in messages: if message.role == 'system': prompt += f"<|system|>\n{message.content}</s>\n" elif message.role == 'user': prompt += f"<|user|>\n{message.content}</s>\n" elif message.role == 'assistant': prompt += f"<|assistant|>\n{message.content}</s>\n" # ensure we start with a system prompt, insert blank if needed if not prompt.startswith("<|system|>\n"): prompt = "<|system|>\n</s>\n" + prompt # add final assistant prompt prompt = prompt + "<|assistant|>\n" return prompt def completion_to_prompt(completion): return "<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
template = (
"We have provided trusted context information below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given this trusted and cientific information, please answer the question: {query_str}. Remember that the statements of the context are verfied and come from trusted sources.\n"
)
qa_template = Prompt(template)
new_summary_tmpl_str = (
"The original query is as follows: {query_str}"
"We have provided an existing answer: {existing_answer}"
"We have the opportunity to refine the existing answer (only if needed) with some more trusted context below. Remember that the statements of the context are verfied and come from trusted sources."
"------------"
"{context_msg}"
"------------"
"Given the new trusted context, refine the original answer to better answer the query. If the context isn't useful, return the original answer. Remember that the statements of the new context are verfied and come from trusted sources."
"Refined Answer: sure thing! "
)
new_summary_tmpl = PromptTemplate(new_summary_tmpl_str)