Find answers from the community

Home
Members
toyari.
t
toyari.
Offline, last seen 3 months ago
Joined September 25, 2024
Hello got this error for the DenseXRetrievalPack = download_llama_pack("DenseXRetrievalPack", "./dense_pack")

Traceback (most recent call last):

File c:\Users\33641\Desktop\DeepDetect\myenv\Lib\site-packages\IPython\core\interactiveshell.py:3526 in run_code
exec(code_obj, self.user_global_ns, self.user_ns)

Cell In[6], line 3
DenseXRetrievalPack = download_llama_pack("DenseXRetrievalPack", "./dense_pack")

File c:\Users\33641\Desktop\DeepDetect\myenv\Lib\site-packages\llama_index\llama_pack\download.py:29 in download_llama_pack
pack_cls = download_llama_module(

File c:\Users\33641\Desktop\DeepDetect\myenv\Lib\site-packages\llama_index\download\module.py:256 in download_llama_module
spec.loader.exec_module(module) # type: ignore

File <frozen importlib._bootstrap_external>:936 in exec_module

File <frozen importlib._bootstrap_external>:1074 in get_code

File <frozen importlib._bootstrap_external>:1004 in source_to_code

File <frozen importlib._bootstrap>:241 in _call_with_frames_removed

File c:\Users\33641\Desktop\DeepDetect\dense_pack/base.py:65
)
^
SyntaxError: (unicode error) 'utf-8' codec can't decode byte 0xaf in position 662: invalid start byte
3 comments
t
L
t
toyari.
·

Client

Got this error since earlier. Anyone know how to resolve this?

My code :
import os
import logging
import sys
from llama_index import ServiceContext
from llama_index import (
KnowledgeGraphIndex,
LLMPredictor,
ServiceContext,
SimpleDirectoryReader,
)
from llama_index.llms import OpenAI
from llama_index.storage.storage_context import StorageContext
from llama_index.graph_stores import Neo4jGraphStore
from IPython.display import Markdown, display

from openai import OpenAI

client = OpenAI(
api_key=os.environ['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'], # this is also the default, it can be omitted
)

logging.basicConfig(stream=sys.stdout, level=logging.INFO)

define LLM

llm = OpenAI(temperature=0, model="gpt-3.5-turbo-16k-0613") #gpt-4-0613 (or)
service_context = ServiceContext.from_defaults(llm=llm, chunk_size=1000)
2 comments
t
L