from llama_index.core import get_response_synthesizer synthesizer = get_response_synthesizer(llm=llm, response_mode="compact", streaming=True) query_engine = SubQuestionQueryEngine.from_defaults(..., response_synthesizer=synthesizer) response = query_engine.query("..") for token in response.response_gen: print(token, end="", flush=True)