query_engine = RetrieverQueryEngine.from_args( retriever=hybrid_retriever, node_postprocessors=[cohere_rerank], llm=llm, )
from llama_index.core.chat_engine.condense_question import CondenseQuestionChatEngine chat_engine = CondenseQuestionChatEngine(query_engine=query_engine,...)