from llama_index.core.prompts import PromptTemplate from llama_index.llms.openai import OpenAI prompt = PromptTemplate("Some topic {topic}") formatted_prompt = prompt.format(topic="ghosts") llm = OpenAI(model="gpt-4o-mini") response = llm.complete(formatted_prompt) print(str(response))
from llama_index.core.prompts import ChatPromptTemplate from llama_index.core.llms import ChatMessage from llama_index.llms.openai import OpenAI prompt = ChatPromptTemplate.from_messages([ ChatMessage(role="system", content="Talk like a pirate."), ChatMessage(role="user", content="Tell me a joke about {topic}.") ]) formatted_messages = prompt.format_messages(topic="Dogs") llm = OpenAI(model="gpt-4o-mini") response = llm.chat(formatted_messages) print(response.message.content)
from llama_index.core.memory import ChatMemoryBuffer memory = ChatMemoryBuffer.from_defaults(llm=llm) memory.put(ChatMessage(role='user', content='some_message')) # get latest buffer messages = memory.get() # get all messages = memory.get_all() # combine memory with your prompt new_message = ChatMessage(role="user", content=formatted_prompt) messages = memory.get() response = llm.chat(messages) # add messages to memory memory.put(new_message) memory.put(response.message)
pip install llama-index-llms-ollama
from llama_index.llms.ollama import Ollama llm = Ollama(model="llama3.1:latest", request_timeout=120)