compact
response mode here? https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/#:~:text=Details%3A%20stuff,between%20text%20chunks).Details: stuff as many text (concatenated/packed from the retrieved chunks) that can fit within the context window (considering the maximum prompt size between text_qa_template and refine_template). If the text is too long to fit in one prompt, it is split in as many parts as needed (using a TokenTextSplitter and thus allowing some overlap between text chunks).
query_engine = index.as_query_engine( vector_store_query_mode="hybrid", similarity_top_k=2 ) response = query_engine.query( "What did the author do growing up?", )
----> 4 dataset_generator.generate_questions_from_nodes() File ~/Desktop/gen-ai-bot-builder/fastapi/genai_env_4/lib/python3.9/site-packages/nest_asyncio.py:99, in _patch_loop.<locals>.run_until_complete(self, future) 96 if not f.done(): 97 raise RuntimeError( 98 'Event loop stopped before Future completed.') ---> 99 return f.result() File /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/futures.py:201, in Future.result(self) 199 self.__log_traceback = False 200 if self._exception is not None: --> 201 raise self._exception 202 return self._result File /Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.9/lib/python3.9/asyncio/tasks.py:256, in Task.__step(***failed resolving arguments***) 252 try: 253 if exc is None: 254 # We use the `send` method directly, because coroutines 255 # don't have `__iter__` and `__next__` methods. --> 256 result = coro.send(None) 257 else: 258 result = coro.throw(exc) File ~/Desktop/gen-ai-bot-builder/fastapi/genai_env_4/lib/python3.9/site-packages/llama_index/llama_dataset/generator.py:218, in RagDatasetGenerator.agenerate_questions_from_nodes(self) 216 """Generates questions for each document.""" 217 dataset = await self._agenerate_dataset(self.nodes, labelled=False) --> 218 return dataset.questions AttributeError: 'LabelledRagDataset' object has no attribute 'questions'
RagDatasetGenerator.from_documents(documents)
from llama_index.embeddings import AzureOpenAIEmbedding # You need to deploy your own embedding model as well as your own chat completion model embed_model = AzureOpenAIEmbedding( model="text-embedding-ada-002", deployment_name="text-embedding-ada-002-2", api_key='...', azure_endpoint='...', api_version="2", ) embed_model.get_text_embedding( "It is raining cats and dogs here!" ) When I try to run it, the following error occurs. NotFoundError: Error code: 404 - {'error': {'code': '404', 'message': 'Resource not found'}}
from llama_index.core.indices.vector_store.base import VectorStoreIndex from llama_index.legacy.vector_stores.qdrant import QdrantVectorStore import qdrant_client from llama_index.core import Settings from llama_index.legacy.embeddings.ollama_embedding import OllamaEmbedding from llama_index.legacy.embeddings import LangchainEmbedding from langchain.embeddings import OllamaEmbeddings # Initialize Ollama embedding model embed_model = OllamaEmbedding(model_name="nomic-embed-text", base_url="http://localhost:11434") # Set the global embedding model Settings.embed_model = embed_model
--------------------------------------------------------------------------- AssertionError Traceback (most recent call last) Cell In[48], line 13 10 embed_model = OllamaEmbedding(model_name="nomic-embed-text", base_url="http://localhost:11434") 12 # Set the global embedding model ---> 13 Settings.embed_model = embed_model File ~/Library/Caches/pypoetry/virtualenvs/ollama-env-Rz8XYqBf-py3.12/lib/python3.12/site-packages/llama_index/core/settings.py:74, in _Settings.embed_model(self, embed_model) 71 @embed_model.setter 72 def embed_model(self, embed_model: EmbedType) -> None: 73 """Set the embedding model.""" ---> 74 self._embed_model = resolve_embed_model(embed_model) File ~/Library/Caches/pypoetry/virtualenvs/ollama-env-Rz8XYqBf-py3.12/lib/python3.12/site-packages/llama_index/core/embeddings/utils.py:136, in resolve_embed_model(embed_model, callback_manager) 133 print("Embeddings have been explicitly disabled. Using MockEmbedding.") 134 embed_model = MockEmbedding(embed_dim=1) --> 136 assert isinstance(embed_model, BaseEmbedding) 138 embed_model.callback_manager = callback_manager or Settings.callback_manager 140 return embed_model AssertionError:
# Weaviate weaviate_client = weaviate.Client("http://localhost:8080") # Connect with default parameters # First, flush the existing vector store to ensure a clean state flush_weaviate_vector_store(weaviate_client) vector_store = WeaviateVectorStore( weaviate_client=weaviate_client, index_name="LlamaIndex" )
import weaviate from llama_index.vector_stores import WeaviateVectorStore weaviate_client = weaviate.Client("http://localhost:8080") # Connect with default parameters vector_store = WeaviateVectorStore( weaviate_client=weaviate_client, index_name="LlamaIndex" )