Find answers from the community

Updated 2 months ago

Hey everyone, I am trying to use Gemini

Hey everyone, I am trying to use Gemini Pro through a Vertex Google API key, and was trying to use the chat engine feature, I was first starting with the example given in the documentation but every time I run my code, I keep getting the error below, and I am not able to debug what I might be doing wrong, here is the error i keep getting:

Plain Text
 .venv/lib/python3.11/site-packages/google/api_core/grpc_helpers.py", line 174, in error_remapped_callable
    raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.ServiceUnavailable: 503 Getting metadata from plugin failed with error: 'str' object has no attribute 'before_request' 


Any help would be greatly appreciated!
L
s
4 comments
Do you have more of the traceback? How did you setup your LLM?
this is my full code:

Plain Text
import xml.etree.ElementTree as ET
from llama_index.vector_stores.elasticsearch import ElasticsearchStore
from llama_index.core import Settings
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from dotenv import load_dotenv
import os
from llama_index.llms.vertex import Vertex
from llama_index.core import VectorStoreIndex, StorageContext

load_dotenv()

class Title21QA:
    def __init__(self):
        GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")

        Settings.llm = Vertex(
            model="gemini-pro", project="lexim-gen-ai", credentials=GOOGLE_API_KEY
        )

        # Settings.llm.set_safety_settings(safety_settings)
        Settings.embed_model = HuggingFaceEmbedding(
            model_name="BAAI/bge-small-en-v1.5"
        )

        # Load the XML document
        self.document_text = self.load_document("Title21_DocQA/ECFR-title21-march07.xml")

        # Process the document text and create a vector representation
        self.document_vector = Settings.embed_model.get_text_embedding(self.document_text)

        # Create an Elasticsearch vector store with a dedicated index name
        self.vector_store = ElasticsearchStore(
            index_name="leximgpt_title21",
            es_url=os.getenv("ES_CLOUD_ID"),
            es_user=os.getenv("ES_USER"),
            es_password=os.getenv("ES_PASSWORD"),
        )

        self.storage_context = StorageContext.from_defaults(
            vector_store=self.vector_store)
        self.index = VectorStoreIndex.from_vector_store(
            self.vector_store, storage_context=self.storage_context)
        
    def load_document(self, xml_path):
        with open(xml_path, 'r') as file:
            document_text = file.read()
        return document_text

    def get_answer(self):
        chat_engine = self.index.as_chat_engine()
        streaming_response = chat_engine.stream_chat("Tell me a joke.")
        for token in streaming_response.response_gen:
            print(token, end="")


# Example usage
service = Title21QA()

answer_generator = service.get_answer()

for answer in answer_generator:
    print(answer)
and for traceback i have this:

Plain Text
Traceback (most recent call last):
  File "/Users/shanayajain/leximgpt-AI/Title21_DocQA/test.py", line 59, in <module>
    answer_generator = service.get_answer()
                       ^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/Title21_DocQA/test.py", line 51, in get_answer
    streaming_response = chat_engine.stream_chat("Tell me a joke.")
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/callbacks/utils.py", line 41, in wrapper
    return func(self, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/agent/runner/base.py", line 623, in stream_chat
    chat_response = self._chat(
                    ^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/agent/runner/base.py", line 520, in _chat
    cur_step_output = self._run_step(
                      ^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/agent/runner/base.py", line 374, in _run_step
    cur_step_output = self.agent_worker.stream_step(step, task, **kwargs)
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/callbacks/utils.py", line 41, in wrapper
    return func(self, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/agent/react/step.py", line 622, in stream_step
    return self._run_step_stream(step, task)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/agent/react/step.py", line 504, in _run_step_stream
    for latest_chunk in chat_stream:
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/core/llms/callbacks.py", line 99, in wrapped_gen
    for x in f_return_val:
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/llama_index/llms/vertex/base.py", line 255, in gen
    for r in response:
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py", line 934, in _send_message_streaming
    for chunk in stream:
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/vertexai/generative_models/_generative_models.py", line 505, in _generate_content_streaming
    response_stream = self._prediction_client.stream_generate_content(
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py", line 2207, in stream_generate_content
    response = rpc(
               ^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/google/api_core/gapic_v1/method.py", line 131, in __call__
    return wrapped_func(*args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/Users/shanayajain/leximgpt-AI/.venv/lib/python3.11/site-packages/google/api_core/grpc_helpers.py", line 174, in error_remapped_callable
    raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.ServiceUnavailable: 503 Getting metadata from plugin failed with error: 'str' object has no attribute 'before_request'
huh, I have not seen that one before πŸ€”
Add a reply
Sign up and join the conversation on Discord