splitter.split_text()
text_splitter = TokenTextSplitter( separator="\n\n", chunk_size=4000, chunk_overlap=200, backup_separators=["\n"], tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode ) node_parser = SimpleNodeParser(text_splitter=text_splitter) nodes = node_parser.get_nodes_from_documents( documents, show_progress=False) array_of_text = [] for node in nodes: array_of_text.append(node.text)