i dno if im doing something wrong.
from llama_index.core import VectorStoreIndex
from llama_index.core.schema import TextNode
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(
model_name="jinaai/jina-embeddings-v2-base-en",
)
nodes = [TextNode(text="first question to match", id_ = "1"), TextNode(text="this is a simulation", id_ = "2")]
index = VectorStoreIndex(nodes , embed_model = embed_model, show_progress=True)
vector_retriever = index.as_retriever(similarity_top_k=10)
matches = vector_retriever.retrieve("first question to match")
for node in matches:
print(node.get_score())
print(node.get_text())
any advice on how to improve this? I find bm25 embeddings to do better with real content, so am trying hybrid search. but quite disappointed with the semantic search