from llama_index.llms import OpenAI, HuggingFaceLLM
from langchain.embeddings import HuggingFaceBgeEmbeddings, HuggingFaceEmbeddings
hf = HuggingFaceEmbeddings(
model_name="BAAI/bge-small-en-v1.5",
model_kwargs={'device': 'cpu'},
encode_kwargs={'normalize_embeddings': True}
)
llm = HuggingFaceLLM(model_name="Deci/DeciLM-6b")
service_context = ServiceContext.from_defaults(
callback_manager=callback_manager, embed_model=hf, llm=llm
)
set_global_service_context(service_context)
# Load in the Documents
documents = SimpleDirectoryReader(input_files=input_files).load_data()
parser = SimpleNodeParser.from_defaults()
nodes = parser.get_nodes_from_documents(documents, show_progress=True)
response_synthesizer = get_response_synthesizer(
response_mode=ResponseMode.COMPACT, use_async=True, verbose=True,
)
doc_summary_index = DocumentSummaryIndex(
nodes, show_progress=True, response_synthesizer=response_synthesizer)