GPTVectorStoreIndex([], storage_context=storage_context)
from llama_index.storage.storage_context import StorageContext from llama_index import LangchainEmbedding from langchain.embeddings import OpenAIEmbeddings from llama_index import download_loader # Initialize the pinecone remote vector store pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_API_ENV) index = pinecone.Index("websources") # construct vector store vector_store = PineconeVectorStore(pinecone_index=index) # create storage context storage_context = StorageContext.from_defaults(vector_store=vector_store) # LLM for Service Context chat_llm =llm = AzureOpenAI( deployment_name="Sample", model_kwargs={ "api_key": API_KEY, "api_base": API_BASE, "api_type": "azure", "api_version": "2023-03-15-preview", } ) # Embedding LM for Service Context llm_predictor = LLMPredictor(llm=chat_llm) embedding_llm = LangchainEmbedding( OpenAIEmbeddings( model="text-embedding-ada-002", deployment=EMBED_DEPLOYMENT, openai_api_key=API_KEY, openai_api_base=API_BASE, openai_api_type = "azure", openai_api_version="2023-03-15-preview", ), embed_batch_size=1, ) embed_model = embedding_llm service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model) # load documents # documents = SimpleDirectoryReader("./data").load_data() # create index, which will insert documents/vectors to pinecone index = VectorStoreIndex.from_documents([], storage_context=storage_context, service_context=service_context)
# set Logging to DEBUG for more detailed outputs query_engine = index.as_query_engine() response = query_engine.query("Tell me more about stroke?")
Response(response=None, source_nodes=[], extra_info={})