from llama_index.llms.ollama import Ollama llm = Ollama(model="llama2", request_timeout=60.0) response = llm.complete("") print(response)
llm = Ollama(model="llama2", request_timeout=60.0, temperature=0.8)