%pip install llama-index-embeddings-azure-openai
%pip install llama-index-llms-azure-openai
!pip install llama-index
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
import logging
import sys
logging.basicConfig(
stream=sys.stdout, level=logging.INFO
) # logging.DEBUG for more verbose output
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
from google.colab import userdata
api_key = userdata.get("OPENAI1")
azure_endpoint = userdata.get("ENDPOINT")
api_version = "2023-07-01-preview"
llm = AzureOpenAI(
model="gpt-4",
deployment_name="my-deployment-name",
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
# You need to deploy your own embedding model as well as your own chat completion model
embed_model = AzureOpenAIEmbedding(
model="text-embedding-ada-002",
deployment_name="my-other-deployment-name",
api_key=api_key,
azure_endpoint=azure_endpoint,
api_version=api_version,
)
from llama_index.core import (
VectorStoreIndex,
SummaryIndex,
SimpleKeywordTableIndex,
SimpleDirectoryReader,
Settings,
)
Settings.llm = llm
Settings.embed_model = embed_model
!mkdir -p 'data/'
!wget -O "./data/paul_graham_essay.txt" "https://raw.githubusercontent.com/run-llama/llama_index/main/docs/examples/data/paul_graham/paul_graham_essay.txt"
documents = SimpleDirectoryReader(
input_files=["./data/paul_graham_essay.txt"]
).load_data()
index = VectorStoreIndex.from_documents(documents)