Basically,
ResponseSynthesizer
->
get_response_synthesizer()
https://medium.com/@jerryjliu98/llamaindex-0-7-0-better-enabling-bottoms-up-llm-application-development-959db8f75024Here's the full example from the blog as well
from llama_index import (
VectorStoreIndex,
get_response_synthesizer,
)
from llama_index.indices.postprocessor import (
SimilarityPostprocessor,
SentenceEmbeddingOptimizer
)
documents = ...
# build index
index = VectorStoreIndex.from_documents(documents)
# configure response synthesizer
response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize",
)
# assemble query engine
query_engine = index.as_query_engine(
similarity_top_k=3,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.7),
SentenceEmbeddingOptimizer(percentile_cutoff=0.5)
]
)