import openai
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from llama_index.memory import ChatMemoryBuffer
import os
from llama_index import VectorStoreIndex,SimpleDirectoryReader,LLMPredictor
load_dotenv()
# openai.api_key = os.environ.get("OPENAI_API_KEY")
from llama_index import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir="apg_index")
# Load index from the storage context
new_index = load_index_from_storage(storage_context)
new_query_engine = new_index.as_query_engine()
userQuery = input("Query: ")
response = new_query_engine.query(userQuery)
print(response)
print("\n")