from llama_index.embeddings.openai import OpenAIEmbedding embed_model = OpenAIEmbedding(embed_batch_size=50) service_context = ServiceContext.from_defaults(...., embed_model=embed_model)
use_async=True
maaaay also help? I can't remember though, been a while since I tested that