service_context = ServiceContext.from_defaults( llm=llm, embed_model=embeddings, callback_manager=callback_manager ) object_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, service_context=service_context )
object_index = ObjectIndex.from_objects( table_schema_objs, table_node_mapping, VectorStoreIndex, embed_model=embed_model, callback_manager=callback_manager, )
from llama_index.core import Settings Settings.callback_manager = callback_manager Settings.llm = llm Settings.embed_model = embed_model
from llama_index.core.indices.struct_store.sql_query import SQLTableRetrieverQueryEngine query_engine = SQLTableRetrieverQueryEngine( sql_database=sql_database, table_retriever=object_index.as_retriever(), text_to_sql_prompt=DEFAULT_TEXT_TO_SQL_PROMPT, response_synthesis_prompt=DEFAULT_RESPONSE_SYNTHESIS_PROMPT, embed_model=embeddings, callback_manager=callback_manager, llm=llm )
Could not load OpenAI embedding model. If you intended to use OpenAI, please check your OPENAI_API_KEY.
query_engine = SQLTableRetrieverQueryEngine( sql_database=sql_database, table_retriever=object_index.as_retriever(), text_to_sql_prompt=DEFAULT_TEXT_TO_SQL_PROMPT, response_synthesis_prompt=DEFAULT_RESPONSE_SYNTHESIS_PROMPT, service_context=service_context, )
llama-index-llms-azure-openai==0.1.4
from llama_index.llms.azure_openai import AzureOpenAI def AzureLLMClient(): config = settings.AZURE_OPENAI_CONFIG llm = AzureOpenAI( engine=config['engine'], model=config['model'], temperature=config['temperature'], api_key=config['api_key'], azure_endpoint=config['azure_endpoint'], api_base=config['api_base'], api_version=config['api_version'], max_retries=config['max_retries'], timeout=config['timeout'] ) return llm
import os from llama_index.embeddings.huggingface import HuggingFaceEmbedding from transformers import AutoModel, AutoTokenizer def EmbeddingModel(): USE_MODEL_CACHE = os.getenv('USE_MODEL_CACHE') if (USE_MODEL_CACHE == "true"): tokenizer = AutoTokenizer.from_pretrained( 'XXX', cache_dir='/build/huggingface_cache') model_name = AutoModel.from_pretrained( 'XXX', cache_dir='/build/huggingface_cache') embeddings = HuggingFaceEmbedding( model=model_name, tokenizer=tokenizer, ) else: embeddings = HuggingFaceEmbedding( model_name='XXX' ) return embeddings
embed_model
isn't getting passed into the NLSQLRetriever under the hoodSettings.llm = llm Settings.embed_model = embed_model