import asyncio
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import (
CallbackManager,
LlamaDebugHandler,
CBEventType,
)
from my_project.config import Config
PRIME_DIRECTIVES = """Always obey the human.
Never point at the creature."""
def get_prime_directives() -> str:
"""Get an updated list of prime directives to follow."""
return PRIME_DIRECTIVES
async def main():
_llm = OpenAI(
model='gpt-4-turbo-preview',
api_key=Config.ACLOUD_API_KEY,
api_base='https://api.acloudapp.com/v1',
temperature=0,
)
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
callback_manager = CallbackManager([llama_debug])
_get_prime_directives = FunctionTool.from_defaults(get_prime_directives)
_oai = OpenAIAgent.from_tools(
[_get_prime_directives],
llm=_llm,
verbose=True,
chat_history=[
ChatMessage(
role="system",
content="You will make happy things.",
)
],
callback_manager=callback_manager
)
async def stream_chat_and_print(agent, message):
response = await agent.astream_chat(message=message)
try:
async for chunk in response.async_response_gen():
print("chunk:", chunk)
print('Finished processing message')
except TypeError:
print('no chunks to be had for:', message)
await stream_chat_and_print(_oai, "Hi. What's your name?")
await stream_chat_and_print(_oai, "What are your prime directives?")
if __name__ == "__main__":
asyncio.run(main())