from llama_index.llms.ollama import Ollama from llama_index.core.prompts import PromptTemplate from pydantic.v1 import BaseModel, Field class MyClass(BaseModel): """Some description.""" name: str = Field(description="Some description") llm = Ollama(..., json_mode=True) prompt = PromptTemplate("Give me a name based on {topic}") output = llm.structured_predict(MyClass, prompt, topic="movies") print(output.name) # or async output = await llm.astructured_predict(MyClass, prompt, topic="movies")
ReActAgentWorker
or something elsefrom llama_index.core.agent import ReActAgentWorker agent_worker = ReActAgentWorker.from_tools(initial_tools, llm=llm, verbose=True)
Ollama(,,,, json_mode=True)
, or alternatively setting Ollama(..., additional_kwargs={"stop": ["Observation:"]})
to help with output parsing