Find answers from the community

Updated 2 months ago

I am creating a query engine wiht Llama-

I am creating a query engine wiht Llama-index version 0.10.18 with local embeddings(hugging face) from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core import ServiceContext,StorageContext


from llama_index.llms.huggingface import HuggingFaceInferenceAPI


from llama_index.embeddings.langchain import LangchainEmbedding
from langchain_community.embeddings.huggingface import HuggingFaceInferenceAPIEmbeddings


import os
from huggingface_hub import login

hf_token=''
login(hf_token)

os.environ['HuggingFaceHub_API_Token']= hf_token

llm= HuggingFaceInferenceAPI(
model_name="HuggingFaceH4/zephyr-7b-alpha",
api_key=hf_token
)

embed_model=HuggingFaceInferenceAPIEmbeddings(
model_name="thenlper/gte-large",
api_key=hf_token,
)

Persis_Dir = './db'

if not os.path.exists(Persis_Dir):
#create index
documents = SimpleDirectoryReader('data').load_data()
parser=SimpleNodeParser() #parsing_doc_into_nodes
nodes=parser.get_nodes_from_documents(documents)

StorageContext= StorageContext.from_defaults()

index=VectorStoreIndex(
nodes,
llm=llm,
embed_model=embed_model,
storage_context=StorageContext
)
else:
#load existing
pass Error:- ValueError: "HuggingFaceEmbeddings" object has no field "callback_manager" in indexing part @everyone
L
2 comments
If you are using langchain embeddings, they need to be wrapped
pip install llama-index-embeddings-langchain

Plain Text
from llama_index.embeddings.langchain import LangchainEmbedding
embed_model = LangchainEmbedding(<lc embeddings>)
Add a reply
Sign up and join the conversation on Discord