Find answers from the community

Updated 8 months ago

stupid discord character limit

stupid discord character limit
W
z
L
6 comments
You can add it as a file
Plain Text
import asyncio
from llama_index.agent.openai import OpenAIAgent
from llama_index.core.tools import FunctionTool
from llama_index.llms.openai import OpenAI
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.callbacks import (
    CallbackManager,
    LlamaDebugHandler,
    CBEventType,
)

from my_project.config import Config


PRIME_DIRECTIVES = """Always obey the human.
Never point at the creature."""

def get_prime_directives() -> str:
    """Get an updated list of prime directives to follow."""
    return PRIME_DIRECTIVES


async def main():
    _llm = OpenAI(
        model='gpt-4-turbo-preview',
        api_key=Config.ACLOUD_API_KEY,
        api_base='https://api.acloudapp.com/v1',
        temperature=0,
    )
    llama_debug = LlamaDebugHandler(print_trace_on_end=True)
    callback_manager = CallbackManager([llama_debug])

    _get_prime_directives = FunctionTool.from_defaults(get_prime_directives)

    _oai = OpenAIAgent.from_tools(
        [_get_prime_directives],
        llm=_llm,
        verbose=True,
        chat_history=[
            ChatMessage(
                role="system",
                content="You will make happy things.",
            )
        ],
        callback_manager=callback_manager
    )

    async def stream_chat_and_print(agent, message):
        response = await agent.astream_chat(message=message)
        try:
            async for chunk in response.async_response_gen():
                print("chunk:", chunk)
            print('Finished processing message')
        except TypeError:
            print('no chunks to be had for:', message)

    await stream_chat_and_print(_oai, "Hi. What's your name?")
    await stream_chat_and_print(_oai, "What are your prime directives?")


if __name__ == "__main__":
    asyncio.run(main())
the sync version of this works
but this version doesn't yield anything on tool calls, and doesn't return the other attribute that usually returns content from tools (i forget the name)
It works fine for me πŸ˜… Extremely minimal code

Plain Text
from llama_index.core import Document, VectorStoreIndex
from llama_index.core.tools import QueryEngineTool
from llama_index.agent.openai import OpenAIAgent

index = VectorStoreIndex.from_documents([Document.example()])

query_engine = index.as_query_engine()

tool = QueryEngineTool.from_defaults(query_engine, name='search', description='Useful for asking questions  about LLMs.')

agent = OpenAIAgent.from_tools([tool])


async def run():
  response = await agent.astream_chat("What are some facts about LLMs?")
  async for token in response.async_response_gen():
    print(str(token), end="", flush=True)

if __name__ == "__main__":
  import asyncio
  asyncio.run(run())
Add a reply
Sign up and join the conversation on Discord