here is mine:
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model="text-embedding-ada-002",
deployment="learning",
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
.........
llm = AzureChatOpenAI(deployment_name=deployment, temperature=0.1, max_tokens=num_output, openai_api_version=openai.api_version, model_kwargs={
"api_key": openai.api_key,
"api_base": openai.api_base,
"api_type": openai.api_type,
"api_version": openai.api_version,
})
llm_predictor = LLMPredictor(llm=llm)
# Initialisation de l'outil qui définit quel llm est utilisé, quel embed, quelle taille en token il peut prendre au maximum, quelle taille en sortie
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embedding_llm,
context_window=context_window,
num_output=num_output,
)
set_global_service_context(service_context)