from llama_index.core.ingestion import IngestionPipeline from llama_index.core.node_parser import TokenTextSplitter from elasticsearch import Elasticsearch, NotFoundError from llama_index.extractors.entity import EntityExtractor
def run_pipeline(self, docs): try: text_splitter = TokenTextSplitter( separator=" ", chunk_size=512, chunk_overlap=128 ) entity_extractor = EntityExtractor(prediction_threshold=0.5, label_entities=False, # include the entity label in the metadata (can be erroneous) device="cpu", # set to "cuda" if you have a GPU ) pipeline = IngestionPipeline( transformations=[text_splitter, entity_extractor] ) return pipeline.run( documents=docs, in_place=True, show_progress=True, ) except Exception as e: logging.error(f"Error running pipeline: {e}") return None when i run this pipeline, I get this error - ERROR:root:Error running pipeline: too many values to unpack (expected 2). when the code goes to extracting entities part, i get this error.