def indexgenerator(indexPath, documentsPath):
# check if storage already exists
if not os.path.exists(indexPath):
print("Not existing")
# load the documents and create the index
entity_extractor = EntityExtractor(prediction_threshold=0.2,label_entities=False, device="cpu")
node_parser = SentenceSplitter(chunk_overlap=200,chunk_size=2000)
transformations = [node_parser, entity_extractor]
documents = SimpleDirectoryReader(input_dir=r"Text_Files").load_data()
pipeline = IngestionPipeline(transformations=transformations)
nodes = pipeline.run(documents=documents)
service_context = ServiceContext.from_defaults(llm=OpenAI(model="gpt-3.5-turbo", temperature=0),embed_model=embed_model)
index = VectorStoreIndex(nodes, service_context=service_context)
# store it for later
index.storage_context.persist(indexPath)
else:
#load existing index
print("Existing")
storage_context = StorageContext.from_defaults(persist_dir=indexPath)
index = load_index_from_storage(storage_context)
return index