Hi, does somebody know if I can you llamaindex with Ollama right with the truLens recorder. I'm trying to evaluate the RAG but I get an error:
`
tru = Tru()
tru.reset_database()
llm = Ollama(model="wizard-vicuna-uncensored",base_url="http://192.168.1.232:11435")
response = completion(
model="ollama/wizard-vicuna-uncensored",
messages=[{ "content": "respond in 20 words. who are you?","role": "user"}],
api_base="http://192.168.1.232:11435"
)
LiteLLM.set_verbose=True
litellm_provider = LiteLLM(model_engine="ollama/wizard-vicuna-uncensored", endpoint="http://192.168.1.232:11435")
grounded = Groundedness(groundedness_provider=litellm_provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name = "Groundedness")
.on(Select.RecordCalls.retrieve.rets.collect())
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
f_qa_relevance = (
Feedback(litellm_provider.relevance_with_cot_reasons, name = "Answer Relevance")
.on(Select.RecordCalls.retrieve.args.query)
.on_output()
)
f_context_relevance = (
Feedback(litellm_provider.qs_relevance_with_cot_reasons, name = "Context Relevance")
.on(Select.RecordCalls.retrieve.args.query)
.on(Select.RecordCalls.retrieve.rets.collect())
.aggregate(np.mean)
)
from trulens_eval import TruLlama
from trulens_eval import FeedbackMode
tru_recorder = TruLlama(
query_engine3,
app_id="App_1",
feedbacks=[
f_qa_relevance,
f_context_relevance,
f_groundedness
]
)
for question in eval_questions:
with tru_recorder as recording:
print(question)
query_engine3.query(question)
`
I'm adapting this from the example in the course:
https://learn.deeplearning.ai/building-evaluating-advanced-rag/lesson/3/rag-triad-of-metricswhere I want to use Ollama instead of OpenAI API