Traceback (most recent call last): File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/runpy.py", line 188, in _run_module_as_main mod_name, mod_spec, code = _get_module_details(mod_name, _Error) File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/runpy.py", line 111, in _get_module_details __import__(pkg_name) File "/Users/sumved/genia/scm/genia-llamaindex-0.10/test.py", line 81, in <module> query_engine = index.as_query_engine() File "/Users/sumved/genia/scm/genia-llamaindex-0.10/venv/lib/python3.9/site-packages/llama_index/core/indices/base.py", line 391, in as_query_engine return RetrieverQueryEngine.from_args( File "/Users/sumved/genia/scm/genia-llamaindex-0.10/venv/lib/python3.9/site-packages/llama_index/core/query_engine/retriever_query_engine.py", line 108, in from_args response_synthesizer = response_synthesizer or get_response_synthesizer( File "/Users/sumved/genia/scm/genia-llamaindex-0.10/venv/lib/python3.9/site-packages/llama_index/core/response_synthesizers/factory.py", line 66, in get_response_synthesizer prompt_helper = prompt_helper or prompt_helper_from_settings_or_context( File "/Users/sumved/genia/scm/genia-llamaindex-0.10/venv/lib/python3.9/site-packages/llama_index/core/settings.py", line 306, in prompt_helper_from_settings_or_context return settings.prompt_helper File "/Users/sumved/genia/scm/genia-llamaindex-0.10/venv/lib/python3.9/site-packages/llama_index/core/settings.py", line 206, in prompt_helper self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata) AttributeError: 'AzureOpenAI' object has no attribute 'metadata'
# Initialize Service Context service_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, chunk_size=256, chunk_overlap=10, callback_manager=callback_manager, )
# configure retriever retriever = VectorIndexRetriever( index=index, similarity_top_k=1, ) # configure response synthesizer response_synthesizer = get_response_synthesizer( streaming=False, response_mode=ResponseMode.COMPACT, # verbose=True, ) # assemble query engine query_engine = RetrieverQueryEngine( retriever=retriever, response_synthesizer=response_synthesizer, node_postprocessors=[SimilarityPostprocessor(similarity_cutoff=0.7)], ) return query_engine
# construct list_index and vector_index from storage_context and service_context list_index = ListIndex(nodes, service_context=service_context) vector_index = VectorStoreIndex(nodes, service_context=service_context) # define list_query_engine and vector_query_engine list_query_engine = list_index.as_query_engine( response_mode="tree_summarize", use_async=True, ) vector_query_engine = vector_index.as_query_engine() list_tool = QueryEngineTool.from_defaults( query_engine=list_query_engine, description="Useful for summarization questions related to the data source", ) vector_tool = QueryEngineTool.from_defaults( query_engine=vector_query_engine, description="Useful for retrieving specific context related to the data source", ) # construct RouterQueryEngine query_engine = RouterQueryEngine( # selector=LLMSingleSelector.from_defaults(), selector=PydanticSingleSelector.from_defaults(), query_engine_tools=[ list_tool, vector_tool, ], ) response_str = query_engine.query( "What is the maximum quantity that can be submitted for a First Leg Order in trading?" ).response
HTTP Request: POST https://visdam-labs.openai.azure.com/chat/completions "HTTP/1.1 404 Resource Not Found"