from llama_index.memory import ChatMemoryBuffer memory = ChatMemoryBuffer.from_defaults(token_limit=1500) chat_engine = index.as_chat_engine( chat_mode="context", memory=memory, system_prompt="You are a chatbot, able to have normal interactions, as well as talk about an essay discussing Paul Grahams life.", )