self._search
context somehow# Create in-memory ChromaDB client chroma_name = ( f"analysis_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}" ) print(f"Chroma collection name: {chroma_name}") chroma_client = chromadb.EphemeralClient() chroma_collection = chroma_client.get_or_create_collection(chroma_name) # Create ChromaVectorStore print(f"Creating ChromaVectorStore for collection: {chroma_name}") vector_store = ChromaVectorStore( collection_name=chroma_name, chroma_collection=chroma_collection ) print( f"ChromaVectorStore created from collection: {vector_store.collection_name}" )
An error occurred: 'Collection' object has no attribute 'model_fields
discord.py
doesn't really love nest_asyncio lolTraceback (most recent call last): File "/Users/zachhandley/Documents/GitHub/my-project/api/app/db/vector_stores_temp.py", line 295, in <module> asyncio.run(main()) File "/Users/zachhandley/Documents/GitHub/my-project/api/.venv/lib/python3.11/site-packages/nest_asyncio.py", line 30, in run return loop.run_until_complete(task) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/.venv/lib/python3.11/site-packages/nest_asyncio.py", line 98, in run_until_complete return f.result() ^^^^^^^^^^ File "/usr/local/Cellar/python@3.11/3.11.7_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/futures.py", line 203, in result raise self._exception.with_traceback(self._exception_tb) File "/usr/local/Cellar/python@3.11/3.11.7_1/Frameworks/Python.framework/Versions/3.11/lib/python3.11/asyncio/tasks.py", line 277, in __step result = coro.send(None) ^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/app/db/vector_stores_temp.py", line 238, in main user_images = await vector_store_temp.get_user_images() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/app/db/vector_stores_temp.py", line 191, in get_user_images return await self._image_retriever.aretrieve(query_str) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/app/ai/zimage_retriever.py", line 249, in aretrieve return await self._atext_to_image_retrieve(query) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/app/ai/zimage_retriever.py", line 220, in _atext_to_image_retrieve engine = index.as_chat_engine( ^^^^^^^^^^^^^^^^^^^^^ File "/Users/zachhandley/Documents/GitHub/my-project/api/.venv/lib/python3.11/site-packages/llama_index/core/indices/base.py", line 413, in as_chat_engine resolve_llm(llm, callback_manager=self._callback_manager) File "/Users/zachhandley/Documents/GitHub/my-project/api/.venv/lib/python3.11/site-packages/llama_index/core/llms/utils.py", line 101, in resolve_llm llm.callback_manager = callback_manager or Settings.callback_manager ^^^^^^^^^^^^^^^^^^^^ File "pydantic/main.py", line 357, in pydantic.main.BaseModel.__setattr__ ValueError: "OpenAIMultiModal" object has no field "callback_manager"
"Error generating summary: 'NodeWithScore' object has no attribute 'image_mimetype'"
image_doc = ImageDocument( doc_id=f"{image_request.userId}_{image_id}", image=image_data, text=image_request.text_request.text if image_request.text_request else "", image_mimetype=image_request.mimetype, )
import openai import os from llama_index.multi_modal_llms.openai import OpenAIMultiModal from llama_index.indices.multi_modal.base import MultiModalVectorStoreIndex from llama_index.query_engine import SimpleMultiModalQueryEngine from llama_index import SimpleDirectoryReader from llama_index.vector_stores import QdrantVectorStore from qdrant_client import QdrantClient from dotenv import load_dotenv load_dotenv() OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") openai.api_key = OPENAI_API_KEY client = QdrantClient(url="http://localhost") openai_mm_llm = OpenAIMultiModal( model="gpt-4-vision-preview", api_key=os.getenv("OPENAI_API_KEY"), max_new_tokens=1500, ) vector_store = QdrantVectorStore( "global_text_store", client=client, ) image_store = QdrantVectorStore( "global_image_store", client=client, ) index = MultiModalVectorStoreIndex.from_vector_store( vector_store=vector_store, image_vector_store=image_store, use_async=False, show_progress=True, ) retriever = index.as_retriever() image_nodes = retriever.retrieve("Find images in the knowledgebase.") print("Image Nodes: ", image_nodes) query_engine = SimpleMultiModalQueryEngine( retriever=index.as_retriever(), openai_mm_llm=openai_mm_llm, ) response_1 = query_engine.query( "Describe the images in your knowledgebase as if you were a blind person.", ) print("Response: ", response_1)
document = Document(script, "script") CHAT_REFINE_PROMPT_TMPL_MSGS = [ # optionally add a system prompt # SystemMessagePromptTemplate.from_template("Some system message"), HumanMessagePromptTemplate.from_template("{query_str}"), AIMessagePromptTemplate.from_template("{existing_answer}"), HumanMessagePromptTemplate.from_template( "We have the opportunity to refine the above answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "Given the new context, refine the original answer to better " "answer the question. " "If the context isn't useful, output the original answer again.", ), ] CHAT_REFINE_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_REFINE_PROMPT_TMPL_MSGS) CHAT_REFINE_PROMPT = Prompt.from_langchain_prompt(CHAT_REFINE_PROMPT_LC) CHAT_QA_PROMPT_TMPL_MSGS = [ # optionally add a system prompt # SystemMessagePromptTemplate.from_template("My system message"), HumanMessagePromptTemplate.from_template( "Context information is below. \n" "------------\n" "{context_msg}\n" "------------\n" "Given the context information and not prior knowledge, " "answer the question: {query_str}\n" ), ] CHAT_QA_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_QA_PROMPT_TMPL_MSGS) CHAT_QA_PROMPT = Prompt.from_langchain_prompt(CHAT_QA_PROMPT_LC) article_llm = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")) article_service_context = ServiceContext.from_defaults(llm_predictor=article_llm, chunk_size=1024) query_engine = GPTVectorStoreIndex.from_documents([document]).as_query_engine(text_qa_template=CHAT_QA_PROMPT, refine_template=CHAT_REFINE_PROMPT) response = query_engine.query(script_prompt) print(f"Script: {response}") return response
response = await query_engine.aquery(script_prompt) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/llama_index/indices/query/base.py", line 30, in aquery response = await self._aquery(str_or_query_bundle) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/llama_index/query_engine/sub_question_query_engine.py", line 124, in _aquery sub_questions = await self._question_gen.agenerate( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/llama_index/question_gen/llm_generators.py", line 78, in agenerate parse = self._prompt.output_parser.parse(prediction) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/llama_index/question_gen/output_parser.py", line 10, in parse json_dict = parse_json_markdown(output) ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/lib/python3.11/site-packages/llama_index/output_parsers/utils.py", line 18, in parse_json_markdown raise OutputParserException(f"Got invalid JSON object. Error: {e}") llama_index.output_parsers.base.OutputParserException: Got invalid JSON object. Error: Unterminated string starting at: line 49 column 22 (char 4229)
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt, chunk_size=1024) # get the summary prompt summory_prompt = "" with open("summary_prompt.txt", "r") as f: summary_prompt = f.read() summary_query = summory_prompt print(f"Summary Query length {len(summary_query)}") text = f"{post.title}\n{post.subtitle}\n{post.content}" document = Document(text, article.url) document_summary_index = DocumentSummaryIndex.from_documents([document], service_context=service_context) index = document_summary_index.as_query_engine() summary = index.query(summary_query) print(f"Summary: {summary}")