ValueError: shapes (1536,) and (768,) not aligned: 1536 (dim 0) != 768 (dim 0)
#custom knowledge from llama_index import ( GPTVectorStoreIndex, LangchainEmbedding, StorageContext, load_index_from_storage, SimpleDirectoryReader ) from llama_index import ServiceContext, LLMPredictor import os from langchain.chat_models import ChatOpenAI from langchain.embeddings.huggingface import HuggingFaceEmbeddings os.environ['OPENAI_API_KEY'] = "YOUR_OPENAI_API_KEY" llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, max_tokens=1024, model_name="gpt-3.5-turbo")) embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=512,embed_model=embed_model) documents = SimpleDirectoryReader(input_files=["path to one doc"]).load_data() open_index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context) query_engine = open_index.as_query_engine(similarity_top_k=3, service_context=service_context) response = query_engine.query("summarize this document") print(response.response)
ValueError: shapes (1536,) and (768,) not aligned: 1536 (dim 0) != 768 (dim 0)
#custom knowledge from llama_index import ( GPTVectorStoreIndex, LangchainEmbedding, StorageContext, load_index_from_storage, SimpleDirectoryReader ) from llama_index import ServiceContext, LLMPredictor import os from langchain.chat_models import ChatOpenAI from langchain.embeddings.huggingface import HuggingFaceEmbeddings os.environ['OPENAI_API_KEY'] = "YOUR_OPENAI_API_KEY" llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, max_tokens=1024, model_name="gpt-3.5-turbo")) embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=512,embed_model=embed_model) documents = SimpleDirectoryReader(input_files=["path to a doc"]).load_data() open_index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context) open_index.storage_context.persist(persist_dir="./hff_Storage") storage_context = StorageContext.from_defaults(persist_dir="./hff_Storage") index = load_index_from_storage(storage_context=storage_context) query_engine = index.as_query_engine(similarity_top_k=3, service_context=service_context) response = query_engine.query("summarize this document") print(response.response)
>>> from langchain.embeddings.huggingface import HuggingFaceEmbeddings >>> from llama_index import GPTVectorStoreIndex, ServiceContext, StorageContext, load_index_from_storage, Document >>> >>> embed_model = LangchainEmbedding(HuggingFaceEmbeddings()) >>> service_context = ServiceContext.from_defaults(embed_model=embed_model) >>> doc = Document("this is a document lol!") >>> >>> new_index = GPTVectorStoreIndex.from_documents([doc], service_context=service_context) >>> new_index.as_query_engine().query("hello world") Response(response='\nHello World!', source_nodes=[NodeWithScore(node=Node(text='this is a document lol!', doc_id='7cace66a-1302-41ef-8fa6-98e6cf6feac3', embedding=None, doc_hash='57e74d18803a15a129af5ba1f71081081f50b4e7007689bd4205c0be84063aad', extra_info=None, node_info={'start': 0, 'end': 23}, relationships={<DocumentRelationship.SOURCE: '1'>: '1852954d-a584-4c8c-8f6d-201e901b0765'}), score=0.1624280677241592)], extra_info={'7cace66a-1302-41ef-8fa6-98e6cf6feac3': None}) >>> >>> new_index.storage_context.persist(persist_dir="./newer") >>> >>> newer_index = load_index_from_storage(StorageContext.from_defaults(persist_dir="./newer"), service_context=service_context) >>> newer_index.as_query_engine().query("hello world") Response(response='\nHello World!', source_nodes=[NodeWithScore(node=Node(text='this is a document lol!', doc_id='7cace66a-1302-41ef-8fa6-98e6cf6feac3', embedding=None, doc_hash='57e74d18803a15a129af5ba1f71081081f50b4e7007689bd4205c0be84063aad', extra_info=None, node_info={'start': 0, 'end': 23}, relationships={<DocumentRelationship.SOURCE: '1'>: '1852954d-a584-4c8c-8f6d-201e901b0765'}), score=0.1624280677241592)], extra_info={'7cace66a-1302-41ef-8fa6-98e6cf6feac3': None}) >>>