I'm facing a similar issue to this when initializing StructuredLLMPredictor() with no params as in the docs. I attempted on version 0.8.11-0.8.11.post3 and 0.8.7
def llama_index_boiler(vector_input, dir_or_file, multiple_or_not, rail_template, prompt_template, params_needed=None):
llm_predictor = StructuredLLMPredictor()
if dir_or_file and not multiple_or_not:
reader = SimpleDirectoryReader(input_dir=vector_input)
documents = reader.load_data()
index = VectorStoreIndex.from_documents(documents)
if not dir_or_file and not multiple_or_not and vector_input != "":
reader = SimpleDirectoryReader(input_files=[vector_input])
documents = reader.load_data()
index = VectorStoreIndex.from_documents(documents)
if not dir_or_file and multiple_or_not:
reader = SimpleDirectoryReader(input_files=vector_input)
documents = reader.load_data()
index = VectorStoreIndex.from_documents(documents)
if vector_input == "":
pass
output_parser = GuardrailsOutputParser.from_rail( # llamaIndex parses guardrails output from rail file
rail_template, llm=llm_predictor.llm
)
formatted_qa_tmpl = prompt_template
fmt_qa_tmpl = output_parser.format(formatted_qa_tmpl) # llamaindex formats templates
fmt_refine_tmpl = output_parser.format(DEFAULT_REFINE_PROMPT_TMPL)
qa_prompt = QuestionAnswerPrompt(fmt_qa_tmpl, output_parser=output_parser)
refine_prompt = RefinePrompt(fmt_refine_tmpl, output_parser=output_parser)
query_engine = index.as_query_engine( # init query engine
text_qa_template=qa_prompt,
refine_template=refine_prompt,
llm_predictor=llm_predictor,
)
if dir_or_file:
return [query_engine, index]
else:
return query_engine
It was actually working before, but suddenly stopped, i've reverted to older builds of my project but it keeps occuring now.