gpt4all embeddings error
import tiktoken
from llama_index import (
LLMPredictor,
ServiceContext,
set_global_service_context
)
from langchain.llms import GPT4All
from langchain.embeddings import GPT4AllEmbeddings
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callbacks = [StreamingStdOutCallbackHandler()]
local_path = "/path/to/gpt4 model/llama-2-7b-chat.ggmlv3.q4_0.bin"
# Verbose is required to pass to the callback manager
llm = GPT4All(model=local_path, callbacks=callbacks, backend="gptj", verbose=True)
service_context = ServiceContext.from_defaults(
llm_predictor=LLMPredictor(llm=llm),
embed_model=GPT4AllEmbeddings()
)
# set the global default!
set_global_service_context(service_context)
OUTPUT:
ValueError Traceback (most recent call last)
/tmp/ipykernel_83194/1998556855.py in <module>
53 # )
54
---> 55 service_context = ServiceContext.from_defaults(
56 llm_predictor=llm_predictor,
~/anaconda3/lib/python3.10/site-packages/llama_index/indices/service_context.py in from_defaults(cls, llm_predictor, llm, prompt_helper, embed_model, node_parser, llama_logger, callback_manager, chunk_size, chunk_overlap, context_window, num_output, chunk_size_limit)
163 # NOTE: the embed_model isn't used in all indices
164 embed_model = embed_model or OpenAIEmbedding()
--> 165 embed_model.callback_manager = callback_manager
166
167 prompt_helper = prompt_helper or _get_default_prompt_helper(
~/.local/lib/python3.10/site-packages/pydantic/main.cpython-310-x86_64-linux-gnu.so in pydantic.main.BaseModel.__setattr__()
ValueError: "GPT4AllEmbeddings" object has no field "callback_manager"