This is the code
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.extractors import TitleExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.llms.ollama import Ollama
import qdrant_client
model_name = 'BAAI/bge-base-en-v1.5'
embed_model = HuggingFaceEmbedding(
model_name=model_name, trust_remote_code=True)
documents = SimpleDirectoryReader("data").load_data()
Settings.embed_model = embed_model
Settings.llm = Ollama(model="llama3.1", request_timeout=360.0)
client = qdrant_client.QdrantClient(location=":memory:")
vector_store = QdrantVectorStore(client=client, collection_name="test_store")
pipeline = IngestionPipeline(
transformations=[
SentenceSplitter(chunk_size=25, chunk_overlap=0),
# TitleExtractor(),
embed_model,
],
vector_store=vector_store,
)
# Ingest directly into a vector db
pipeline.run(documents=documents)
nodes = vector_store.get_nodes()
# Create your index
from llama_index.core import VectorStoreIndex
index = VectorStoreIndex.from_vector_store(vector_store)