Find answers from the community

Updated 2 years ago

How can I add prompt for the index based

How can I add prompt for the index based chatbot? here is the index based chatbot
Plain Text
import openai
import os
from flask import session
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType

def answer_question(query, query_engine):
    response = query_engine.query(query)
    return response

def generate_response_kbase(message, session):
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key
    os.environ["OPENAI_API_KEY"] = openai.api_key 

    # query kbase ---------------------------------------
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/"))

    query_engine = index.as_query_engine()

    response = answer_question(message, query_engine)

    assistant_message = response.response.replace('\n', '').replace('"', '')
    return assistant_message
p
T
L
26 comments
I am able to use prompt for normal chatbot like this but struggling for the index based (above)
Plain Text
import openai
from flask import session

def generate_response(message, session):

    # Prompt Engineering -----------------------------------------------
    #  If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown

    prompt = "If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown \n"
    
    # Get the previously submitted prompts from the session
    submitted_prompts = session.get('submitted_prompts', [])

    # Add the new prompt to the submitted prompts list
    submitted_prompts.append(message)

    # Concatenate all the prompts with a space and new line between each
    prompt += '\n'.join(submitted_prompts)

    #print(prompt)

    # Rest of the code ------------------------------------------------------
    message_objs = [
        {"role": "system", "content": prompt},
        {"role": "user", "content": message},
    ]
    
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key

    response = openai.ChatCompletion.create(
        model="gpt-3.5-turbo",
        messages=message_objs
    )

    assistant_message = response.choices[0].message["content"]
    return assistant_message.strip()
I tried it. it throws this error
Plain Text
AttributeError: 'Prompt' object has no attribute 'input_variables'
Show me the code. What prompts would you like to customize?
Plain Text
import openai
import os
from flask import session
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType

def answer_question(query, query_engine):
    response = query_engine.query(query)
    return response

def generate_response_kbase(message, session):
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key
    os.environ["OPENAI_API_KEY"] = openai.api_key 

    # query kbase ---------------------------------------
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/"))

    query_engine = index.as_query_engine()


    # Prompts ---------------------------------------
    custom_prompt = Prompt("""\
    If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown \n
    """)

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine, 
        condense_question_prompt=custom_prompt,
        verbose=True
    )

    response = answer_question(message, query_engine)

    assistant_message = response.response.replace('\n', '').replace('"', '')
    return assistant_message
CondenseQuestionChatEngine imported?
Plain Text
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
Plain Text
File "/Users//Desktop/beta51-main/app.py", line 12, in <module>
    from chatbot_kbase import generate_response_kbase, answer_question
  File "/Users//Desktop/beta51-main/chatbot_kbase.py", line 8, in <module>
    from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
ModuleNotFoundError: No module named 'llama_index.chat_engine'
i imported like this
Plain Text
from llama_index.chatbot import ChatEngine
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
also this error
Plain Text
ModuleNotFoundError: No module named 'llama_index.chatbot
What is the version of llama_index?
ok that seem to run well. but prompt seem to have no effect on chatbot response.
Plain Text
import openai
import os
from flask import session
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine


def answer_question(query, query_engine):
    response = query_engine.query(query)
    return response

def generate_response_kbase(message, session):
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key
    os.environ["OPENAI_API_KEY"] = openai.api_key 

    # query kbase ---------------------------------------
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/"))

    query_engine = index.as_query_engine()


    # Prompts ---------------------------------------
    custom_qa_prompt_template = """
    If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown.
    ---------------------
    {context_str}
    ---------------------
    Given this information, please answer the question: {query_str}
    """

    custom_qa_prompt = QuestionAnswerPrompt(custom_qa_prompt_template)

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine, 
        condense_question_prompt=custom_qa_prompt,
        verbose=True
    )

    response = answer_question(message, query_engine)

    assistant_message = response.response.replace('\n', '').replace('"', '')
    return assistant_message
Because you have only defined chat_engine and not used it.
I am confused. Not sure where to use chat_engine intead of query_engine. I tried this and got this erorr
Plain Text
    return kwargs[key]
KeyError: 'context_str'
and the code
Plain Text
import openai
import os
from flask import session
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine


def answer_question(query, chat_engine):
    response = chat_engine.chat(query)
    return response

def generate_response_kbase(message, session):
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key
    os.environ["OPENAI_API_KEY"] = openai.api_key 

    # query kbase ---------------------------------------
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/"))

    query_engine = index.as_query_engine()


    # Prompts ---------------------------------------
    custom_qa_prompt_template = """
    If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown.
    ---------------------
    {context_str}
    ---------------------
    Given this information, please answer the question: {query_str}
    """

    custom_qa_prompt = QuestionAnswerPrompt(custom_qa_prompt_template)

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine, 
        condense_question_prompt=custom_qa_prompt,
        verbose=True
    )

    response = answer_question(message, chat_engine)

    assistant_message = response.response.replace('\n', '').replace('"', '')
    return assistant_message
Plain Text
from llama_index import Prompt, QuestionAnswerPrompt, RefinePrompt, StorageContext, VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, VectorStoreIndex, ServiceContext, SimpleWebPageReader, download_loader, load_index_from_storage
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
  AIMessagePromptTemplate,
  ChatPromptTemplate,
  HumanMessagePromptTemplate
)


# ----------------------------------------
# ใ€Load Indexใ€‘
# ----------------------------------------
storage_context = StorageContext.from_defaults(persist_dir="./index")
index = load_index_from_storage(storage_context)


# ----------------------------------------
# ใ€define Promptใ€‘
# ----------------------------------------
# Query Prompt
TEMPLATE_STR = (
  "We have provided context information below. \n"
  "---------------------\n"
  "{context_str}"
  "\n---------------------\n"
  "Given this information, please answer the question: {query_str}\n"
)
QA_TEMPLATE = Prompt(TEMPLATE_STR)

# ----------------------------------------
# ใ€Query Indexใ€‘
# ----------------------------------------
llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0, max_tokens=512, streaming=True))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
query_engine = index.as_query_engine(
  streaming=True,
  service_context=service_context,
  text_qa_template=QA_TEMPLATE,
)

query_engine.query("What did the author do growing up?").print_response_stream()
Maybe it works.
This seem to be working. but some prompt is working and another is not. I am still skeptical.
Plain Text
import openai
import os
from flask import session
from llama_index import StorageContext, load_index_from_storage
from llama_index.prompts.base import Prompt
from llama_index.prompts.prompt_type import PromptType
from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine


def answer_question(query, chat_engine):
    response = chat_engine.chat(query)
    return response

def generate_response_kbase(message, session):
    api_key = session.get('OPENAI_API_KEY')
    if api_key is None:
        raise ValueError("OpenAI API key not found in session.")
    
    openai.api_key = api_key
    os.environ["OPENAI_API_KEY"] = openai.api_key 

    # query kbase ---------------------------------------
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/"))

    query_engine = index.as_query_engine()


    # Prompts ---------------------------------------
    custom_prompt = Prompt("""\
    Given a conversation (between Human and Assistant) and a follow up message from Human, \
    rewrite the message to be a standalone question that captures all relevant context \
    from the conversation.

    <Chat History> 
    {chat_history}

    <Follow Up Message>
    {question}

    <Standalone question>
    """)

    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine, 
        condense_question_prompt=custom_prompt,
        verbose=True
    )

    response = answer_question(message, chat_engine)

    assistant_message = response.response.replace('\n', '').replace('"', '')
    return assistant_message
If I change the prompt to this " custom_prompt = Prompt("""\
If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown" it suppose to reply "Unknown" when I ask a non-sensical question. but it is not.
I am confused, i am already customizng here no?
Plain Text
custom_prompt = Prompt("""\
    If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown\

    <Chat History> 
    {chat_history}

    <Follow Up Message>
    {question}

    <Standalone question>
    """)
The way the CondenseQuestionChatEngine works is it will always generate a query for the index it is attached to.

You can customize the text_qa_template of the underlying index

Or you could use an agent instead -> https://gpt-index.readthedocs.io/en/latest/use_cases/agents.html
I get this error.
Plain Text
    raise OutputParserException(f"Could not parse LLM output: `{text}`")
langchain.schema.OutputParserException: Could not parse LLM output: `Thought: Do I need to use a tool? Yes
Action: Unknown
Yea that's a langchain error. Looks like the LLM barfed on the output structure :PSadge:
Add a reply
Sign up and join the conversation on Discord