storage_context = StorageContext.from_defaults(persist_dir='index_store') index = load_index_from_storage(storage_context) query_engine = index.as_query_engine(response_mode='refine') for sent in rewriting: if sent in question : qq = "Write a lengthy response to this query" response = query_engine.query(qq + '\n\n'+ question) return response response = query_engine.query(question) for phrase in phrases: if phrase in str(response) : content =agent({"input": question}) actualcontent = content['output'] response = re.sub(r'(.?)', r'<a href="\2" target="_blank">\1</a>', actualcontent)
return response
how to costume this to organize the response and outputs in a good shape not just as a paragraph having no static schema or anything
how the query engine decides if it find response from llama index or not def create_index(path): max_input = 4096 tokens = 512 chunk_size = 600 max_chunk_overlap = 0.2 promptHelper = PromptHelper(max_input, tokens, max_chunk_overlap, chunk_size_limit=chunk_size)
def answerMe(question): storage_context = StorageContext.from_defaults(persist_dir='index_store') index = load_index_from_storage(storage_context) query_engine = index.as_query_engine(streaming=True,retriever=True ,verbose=True) response = query_engine.query(question) print(query_engine.query(question).print_response_stream()) # i want to check something weather the query engine find a response to just return it if not i want tot do smth else >>(what should i check) return response