Find answers from the community

Updated 6 months ago

```from llama_index.llms.ollama import

Plain Text
from llama_index.llms.ollama import Ollama
llm=Ollama(model="llama3")

Plain Text
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.indices.postprocessor import SimilarityPostprocessor
retriever=VectorIndexRetriever(index=index,similarity_top_k=2,)
postprocessor=SimilarityPostprocessor(similarity_cutoff=0.5)
query_engine=RetrieverQueryEngine(node_postprocessors=[postprocessor],retriever=retriever,response_synthesizer=llm)

Plain Text
query_engine.query("What is 8086?")
L
2 comments
the response synthesizer is not an llm
Plain Text
from llama_index.core import get_response_synthesizer

response_synthesizer = get_response_synthesizer(response_mode="compact", llm=llm)
Add a reply
Sign up and join the conversation on Discord