index = GPTSimpleVectorIndex.load_from_disk('index.json')
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo"))
response = index.query(
"How to plan a wedding?",
llm_predictor=llm_predictor
)
"The article discusses how to plan a wedding using Allseated, an all-in-one digital platform for organizing wedding ideas, including creating floor plans, seating charts, and guest lists."
from gpt_index.prompts.prompts import QuestionAnswerPrompt DEFAULT_TEXT_QA_PROMPT_TMPL = ( "Context information is below. \n" "---------------------\n" "{context_str}" "\n---------------------\n" "Given the context information and not prior knowledge, " "answer the question: {query_str}\n" ) query_prompt_template = QuestionAnswerPrompt(DEFAULT_TEXT_QA_PROMPT_TMPL) ... index.query("blah", text_qa_template=text_qa_template=query_prompt_template)
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
index.save_to_dict()
and then use the json
library to write it in the encoding of your choicegpt-3.5-turbo
working with context of previous messages? like "role": "system",
and "role": "user"
?from langchain.prompts.chat import (
AIMessagePromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
ChatPromptTemplate,
)
QA_TURBO_TEMPLATE_MSG = [
SystemMessagePromptTemplate.from_template("System Message + {context_str}"),
HumanMessagePromptTemplate.from_template("Human Message Example"),
AIMessagePromptTemplate.from_template("AI Message Example"),
HumanMessagePromptTemplate.from_template("{query_str}")
]
QA_TURBO_TEMPLATE_LC = ChatPromptTemplate.from_messages(QA_TURBO_TEMPLATE_MSG)
QA_TURBO_TEMPLATE = QuestionAnswerPrompt.from_langchain_prompt(QA_TURBO_TEMPLATE_LC)
QA_PROMPT = QA_TURBO_TEMPLATE
index.query(q, llm_predictor=LLM_PREDICTOR, llama_logger=llama_logger, text_qa_template=QA_PROMPT, similarity_top_k=SIMILARITY_TOP_K, response_mode="compact")