Find answers from the community

Updated 3 months ago

Traceback (most recent call last):

Traceback (most recent call last):
File "D:\rag-llamaindex\backend\main.py", line 7, in <module>
from app.api.routers.chat import chat_router
File "D:\rag-llamaindex\backend\app\api\routers\chat.py", line 9, in <module>
from llama_index.llms.base import MessageRole, ChatMessage
ImportError: cannot import name 'MessageRole' from 'llama_index.llms.base' (D:\ragllama\Lib\site-packages\llama_index\llms\base.py)
W
S
16 comments
Try importing like this:
Plain Text
from llama_index.llms import ChatMessage, MessageRole
Still getting same error message
Share the updated error once
Traceback (most recent call last):
File "D:\rag-llamaindex\backend\main.py", line 7, in <module>
from app.api.routers.chat import chat_router
File "D:\rag-llamaindex\backend\app\api\routers\chat.py", line 9, in <module>
from llama_index.llms.base import ChatMessage, MessageRole
ImportError: cannot import name 'MessageRole' from 'llama_index.llms.base' (D:\ragllama\Lib\site-packages\llama_index\llms\base.py)
Is MessageRole class still available in llama_index.llms.base module?
You are still importing from the base class,
File "D:\rag-llamaindex\backend\app\api\routers\chat.py", line 9, in <module>
from llama_index.llms.base import ChatMessage, MessageRole

Do this: from llama_index.llms import ChatMessage, MessageRole
import logging
import os
from llama_index import (
SimpleDirectoryReader,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import (
messages_to_prompt,
completion_to_prompt,
)
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.retrievers import VectorIndexRetriever
#from llama_index.llms import ChatMessage, MessageRole


model_path = r"C:\Users\user\AppData\Local\llama_index\models\mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
STORAGE_DIR = "./storage" # directory to cache the generated index
DATA_DIR = "./data" # directory containing the documents to index

service_context = ServiceContext.from_defaults(
embed_model='local',
llm=LlamaCPP(
model_url=None,
model_path=model_path,
temperature=0.7,
max_new_tokens=256,
context_window=3900,
model_kwargs={"n_gpu_layers": 1},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
verbose=True,
)
)

def get_index():
logger = logging.getLogger("uvicorn")

if not os.path.exists(STORAGE_DIR):
logger.info("Creating new index")

documents = SimpleDirectoryReader(DATA_DIR).load_data()
index = VectorStoreIndex.from_documents(documents,service_context=service_context)

index.storage_context.persist(STORAGE_DIR)
logger.info(f"Finished creating new index. Stored in {STORAGE_DIR}")
else:

logger.info(f"Loading index from {STORAGE_DIR}...")
storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR)
index = load_index_from_storage(storage_context,service_context=service_context)
logger.info(f"Finished loading index from {STORAGE_DIR}")
return index

index = get_index()

query_engine = index.as_query_engine()
Will this work ?
For summary generation
Import issue resolved?

Also yeah this should work but if your main target is only summary stuff I would suggest you check the https://docs.llamaindex.ai/en/stable/examples/index_structs/doc_summary/DocSummary.html
That issue resolved but not getting proper responses from the ingested docs.
Also I want to summarise from local PDF doc csv json txt files
Any update on this @WhiteFang_Jr
If you want to summarise the docs, Tree sumarize might help you in this case:

Plain Text
from llama_index import SummaryIndex

index = SummaryIndex.from_documents(documents)

query_engine = index.as_query_engine(response_mode="tree_summarize", use_async=True)
response = query_engine.query("Summarize the provided text.")
Is it possible to include summarization with create-llama package
You can change the code as per your requirement in there once you download it
Add a reply
Sign up and join the conversation on Discord