import openai import os from flask import session from llama_index import StorageContext, load_index_from_storage from llama_index.prompts.base import Prompt from llama_index.prompts.prompt_type import PromptType def answer_question(query, query_engine): response = query_engine.query(query) return response def generate_response_kbase(message, session): api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key os.environ["OPENAI_API_KEY"] = openai.api_key # query kbase --------------------------------------- index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/")) query_engine = index.as_query_engine() response = answer_question(message, query_engine) assistant_message = response.response.replace('\n', '').replace('"', '') return assistant_message
import openai from flask import session def generate_response(message, session): # Prompt Engineering ----------------------------------------------- # If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown prompt = "If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown \n" # Get the previously submitted prompts from the session submitted_prompts = session.get('submitted_prompts', []) # Add the new prompt to the submitted prompts list submitted_prompts.append(message) # Concatenate all the prompts with a space and new line between each prompt += '\n'.join(submitted_prompts) #print(prompt) # Rest of the code ------------------------------------------------------ message_objs = [ {"role": "system", "content": prompt}, {"role": "user", "content": message}, ] api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=message_objs ) assistant_message = response.choices[0].message["content"] return assistant_message.strip()
AttributeError: 'Prompt' object has no attribute 'input_variables'
import openai import os from flask import session from llama_index import StorageContext, load_index_from_storage from llama_index.prompts.base import Prompt from llama_index.prompts.prompt_type import PromptType def answer_question(query, query_engine): response = query_engine.query(query) return response def generate_response_kbase(message, session): api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key os.environ["OPENAI_API_KEY"] = openai.api_key # query kbase --------------------------------------- index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/")) query_engine = index.as_query_engine() # Prompts --------------------------------------- custom_prompt = Prompt("""\ If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown \n """) chat_engine = CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, condense_question_prompt=custom_prompt, verbose=True ) response = answer_question(message, query_engine) assistant_message = response.response.replace('\n', '').replace('"', '') return assistant_message
from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
File "/Users//Desktop/beta51-main/app.py", line 12, in <module> from chatbot_kbase import generate_response_kbase, answer_question File "/Users//Desktop/beta51-main/chatbot_kbase.py", line 8, in <module> from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine ModuleNotFoundError: No module named 'llama_index.chat_engine'
from llama_index.chatbot import ChatEngine from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine
import openai import os from flask import session from llama_index import StorageContext, load_index_from_storage from llama_index.prompts.base import Prompt from llama_index.prompts.prompt_type import PromptType from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine def answer_question(query, query_engine): response = query_engine.query(query) return response def generate_response_kbase(message, session): api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key os.environ["OPENAI_API_KEY"] = openai.api_key # query kbase --------------------------------------- index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/")) query_engine = index.as_query_engine() # Prompts --------------------------------------- custom_qa_prompt_template = """ If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown. --------------------- {context_str} --------------------- Given this information, please answer the question: {query_str} """ custom_qa_prompt = QuestionAnswerPrompt(custom_qa_prompt_template) chat_engine = CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, condense_question_prompt=custom_qa_prompt, verbose=True ) response = answer_question(message, query_engine) assistant_message = response.response.replace('\n', '').replace('"', '') return assistant_message
return kwargs[key] KeyError: 'context_str'
import openai import os from flask import session from llama_index import StorageContext, load_index_from_storage from llama_index.prompts.base import Prompt from llama_index.prompts.prompt_type import PromptType from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine def answer_question(query, chat_engine): response = chat_engine.chat(query) return response def generate_response_kbase(message, session): api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key os.environ["OPENAI_API_KEY"] = openai.api_key # query kbase --------------------------------------- index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/")) query_engine = index.as_query_engine() # Prompts --------------------------------------- custom_qa_prompt_template = """ If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown. --------------------- {context_str} --------------------- Given this information, please answer the question: {query_str} """ custom_qa_prompt = QuestionAnswerPrompt(custom_qa_prompt_template) chat_engine = CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, condense_question_prompt=custom_qa_prompt, verbose=True ) response = answer_question(message, chat_engine) assistant_message = response.response.replace('\n', '').replace('"', '') return assistant_message
from llama_index import Prompt, QuestionAnswerPrompt, RefinePrompt, StorageContext, VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, VectorStoreIndex, ServiceContext, SimpleWebPageReader, download_loader, load_index_from_storage from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine from langchain import OpenAI from langchain.chat_models import ChatOpenAI from langchain.prompts.chat import ( AIMessagePromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate ) # ---------------------------------------- # ใLoad Indexใ # ---------------------------------------- storage_context = StorageContext.from_defaults(persist_dir="./index") index = load_index_from_storage(storage_context) # ---------------------------------------- # ใdefine Promptใ # ---------------------------------------- # Query Prompt TEMPLATE_STR = ( "We have provided context information below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given this information, please answer the question: {query_str}\n" ) QA_TEMPLATE = Prompt(TEMPLATE_STR) # ---------------------------------------- # ใQuery Indexใ # ---------------------------------------- llm_predictor = LLMPredictor(llm=ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0, max_tokens=512, streaming=True)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor) query_engine = index.as_query_engine( streaming=True, service_context=service_context, text_qa_template=QA_TEMPLATE, ) query_engine.query("What did the author do growing up?").print_response_stream()
import openai import os from flask import session from llama_index import StorageContext, load_index_from_storage from llama_index.prompts.base import Prompt from llama_index.prompts.prompt_type import PromptType from llama_index.prompts.prompts import QuestionAnswerPrompt, RefinePrompt from llama_index.chat_engine.condense_question import CondenseQuestionChatEngine def answer_question(query, chat_engine): response = chat_engine.chat(query) return response def generate_response_kbase(message, session): api_key = session.get('OPENAI_API_KEY') if api_key is None: raise ValueError("OpenAI API key not found in session.") openai.api_key = api_key os.environ["OPENAI_API_KEY"] = openai.api_key # query kbase --------------------------------------- index = load_index_from_storage(StorageContext.from_defaults(persist_dir="storage/knowledge_base/")) query_engine = index.as_query_engine() # Prompts --------------------------------------- custom_prompt = Prompt("""\ Given a conversation (between Human and Assistant) and a follow up message from Human, \ rewrite the message to be a standalone question that captures all relevant context \ from the conversation. <Chat History> {chat_history} <Follow Up Message> {question} <Standalone question> """) chat_engine = CondenseQuestionChatEngine.from_defaults( query_engine=query_engine, condense_question_prompt=custom_prompt, verbose=True ) response = answer_question(message, chat_engine) assistant_message = response.response.replace('\n', '').replace('"', '') return assistant_message
custom_prompt = Prompt("""\ If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with Unknown\ <Chat History> {chat_history} <Follow Up Message> {question} <Standalone question> """)
CondenseQuestionChatEngine
works is it will always generate a query for the index it is attached to.raise OutputParserException(f"Could not parse LLM output: `{text}`") langchain.schema.OutputParserException: Could not parse LLM output: `Thought: Do I need to use a tool? Yes Action: Unknown