from llama_index.core.chat_engine import SimpleChatEngine chat_engine = SimpleChatEngine.from_defaults(chat_history=ADD_HERE)
import os from collections import deque from llama_index.core.chat_engine import SimpleChatEngine from llama_index.llms.openai import OpenAI chat_history = deque(maxlen=4) llm = OpenAI(model="gpt-4-turbo", temperature=0) system_prompt = "You are a pro real estate agent" chat_engine = SimpleChatEngine.from_defaults(chat_history=chat_history, system_prompt=system_prompt, llm=llm) while True: user_input = input("User: ") response = chat_engine.chat(user_input) print("Bot:", response) print(f"History length: {len(chat_engine.chat_history)} \n")
chat_history = = [ ChatMessage(content=system_prompt, role="system"), ChatMessage(content="user_msg", role="user") ]
memory = deque(maxlen=4) llm = OpenAI(model="gpt-4-turbo", temperature=0) chat_engine = SimpleChatEngine.from_defaults(memory = memory, system_prompt=system_prompt, llm=llm)
memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
memory = deque(maxlen=4) chat_engine = SimpleChatEngine.from_defaults(memory=memory)