import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from sentence_transformers import SentenceTransformer
from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader, PromptTemplate
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
documents = SimpleDirectoryReader("../Datasets/Merged").load_data()
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
def messages_to_prompt(messages):
prompt = """You are an expert in blockchain technologies and blockchain patterns.\n
Your answer should be returned in the following format/ \n
Summary: \n
Context: \n
Problem: \n
Forces: \n
Solution: \n
Benefits: \n
Drawbacks: \n
Related patterns: \n
Known uses: \n
The context will be provided and you should base your answer on it.
"""
for message in messages:
if message.role == "system":
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == "user":
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == "assistant":
prompt += f"<|assistant|>\n{message.content}</s>\n"
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
prompt = prompt + "<|assistant|>\n"
return prompt
)