Hi, I'm trying to get the final processed prompt being sent to LLM. I searched online and noticed a related post answered by @Logan M :
https://github.com/run-llama/llama_index/issues/13310The code provided is:
from typing import Dict, List
from llama_index.core.instrumentation.events.llm import (
LLMChatEndEvent,
LLMChatStartEvent,
LLMChatInProgressEvent,
)
class ExampleEventHandler(BaseEventHandler):
events: List[BaseEvent] = []
@classmethod
def class
name(cls) -> str: """Class name.""" return "ExampleEventHandler" def handle(self) -> None: """Logic for handling event.""" print("-----------------------") # all events have these attributes print(event.id)
print(event.timestamp)
print(event.span_id)
# event specific attributes
if isinstance(event, LLMChatStartEvent):
# initial
print(event.messages)
print(event.additional_kwargs)
print(event.model_dict)
elif isinstance(event, LLMChatInProgressEvent):
# streaming
print(event.response.delta)
elif isinstance(event, LLMChatEndEvent):
# final response
print(event.response)
self.events.append(event)
print("-----------------------")
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(
name)
dispatcher.add_event_handler(ExampleEventHandler())
However, I'm confused about how to incorporate this code into mine. Basically I have a vector store index built from nodes, and I'm using it as query engine to ask questions.
from llama_index.core import VectorStoreIndex
recursive_index = VectorStoreIndex(nodes=base_nodes + objects)
recursive_query_engine = recursive_index.as_query_engine(
similarity_top_k=5,
verbose=True,
response_mode="compact"
)
Thanks for your help!