from llama_index import ServiceContext, set_global_service_context llm = <setup from langchain> # adjust context window and lower chunk size since the window is smaller service_context = ServiceContext.from_defaults(llm=llm, context_window=2048, chunk_size=512) set_global_service_context(service_context)