File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1030, in _gcd_import File "<frozen importlib._bootstrap>", line 1007, in _find_and_load File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 680, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 850, in exec_module File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed File "/app/main.py", line 1, in <module> __import__('pysqlite3') ModuleNotFoundError: No module named 'pysqlite3'
ERROR: Could not find a version that satisfies the requirement pysqlite3-binary==0.5.2 (from versions: none) ERROR: No matching distribution found for pysqlite3-binary==0.5.2
2024-05-08 13:15:21,369 - openai._base_client - INFO - Retrying request to /embeddings in 6.947019 seconds (_base_client.py:927) 2024-05-08 13:15:28,721 - httpx - INFO - HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 429 Too Many Requests" (_client.py:1027) 2024-05-08 13:15:28,723 - openai._base_client - INFO - Retrying request to /embeddings in 7.414641 seconds (_base_client.py:927) 2024-05-08 13:15:36,494 - httpx - INFO - HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 429 Too Many Requests" (_client.py:1027) 2024-05-08 13:15:36,495 - openai._base_client - INFO - Retrying request to /embeddings in 6.268730 seconds (_base_client.py:927) ^C^C 2024-05-08 13:15:43,168 - httpx - INFO - HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 429 Too Many Requests" (_client.py:1027) 2024-05-08 13:15:43,169 - openai._base_client - INFO - Retrying request to /embeddings in 6.461716 seconds (_base_client.py:927) 2024-05-08 13:15:50,020 - httpx - INFO - HTTP Request: POST https://api.openai.com/v1/embeddings "HTTP/1.1 429 Too Many Requests" (_client.py:1027) 2024-05-08 13:15:50,021 - llama_index.llms.openai_utils - WARNING - Retrying llama_index.embeddings.openai.get_embeddings in 0.20855130440353753 seconds as it raised RateLimitError: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}. (before_sleep.py:65)
INFO:openai:error_code=None error_message="This model's maximum context length is 4097 tokens, however you requested 4358 tokens (3334 in your prompt; 1024 for the completion). Please reduce your prompt; or completion length." error_param=None error_type=invalid_request_error message='OpenAI API error received' stream_error=False
num_outputs = 1024 llm_predictor = LLMPredictor( llm=OpenAI( temperature=0.1, model_name="gpt-3.5-turbo-16k", max_tokens=num_outputs, streaming=True, ) ) storage_context = StorageContext.from_defaults(persist_dir="indexstore/newnew") service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) # print(storage_context) index = load_index_from_storage( storage_context, service_context=service_context ) # Load the index
def construct_index(directory_path): num_outputs = 1024 llm_predictor = LLMPredictor( llm=OpenAI( temperature=0.1, model_name="text-davinci-003", max_tokens=num_outputs ) ) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) docs = SimpleDirectoryReader(directory_path).load_data() index = GPTVectorStoreIndex(nodes=docs, service_context=service_context) index.storage_context.persist(persist_dir="index") return index