I feel like something is maybe wonky in your streamlit/session state? This works fine locally for me
>>> from llama_index.core import VectorStoreIndex, Document, Settings
>>> from llama_index.llms.ollama import Ollama
>>> llm = Ollama(model="starling-lm", request_timeout=3000)
>>> Settings.llm = llm
>>> index = VectorStoreIndex.from_documents([Document.example()])
>>> chat_engine = index.as_chat_engine(chat_mode="best", llm=Settings.llm)
>>> chat_engine
<llama_index.core.agent.react.base.ReActAgent object at 0x7f6b5abb7fd0>
>>> chat_engine.agent_worker._llm
Ollama(callback_manager=<llama_index.core.callbacks.base.CallbackManager object at 0x7f6c169baf80>, system_prompt=None, messages_to_prompt=<function messages_to_prompt at 0x7f6c16a2ab00>, completion_to_prompt=<function default_completion_to_prompt at 0x7f6c168c9d80>, output_parser=None, pydantic_program_mode=<PydanticProgramMode.DEFAULT: 'default'>, query_wrapper_prompt=None, base_url='http://localhost:11434', model='starling-lm', temperature=0.75, context_window=3900, request_timeout=3000.0, prompt_key='prompt', additional_kwargs={})
>>>