from llama_index import VectorStoreIndex, ServiceContext from llama_index.llms import Ollama from llama_index.embeddings import OllamaEmbedding llm = llm = Ollama(model="llama2", request_timeout=60.0) embed_model = OllamaEmbedding( model_name="llama2", base_url="http://localhost:11434", ollama_additional_kwargs={"mirostat": 0}, ) service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) set_global_service_context(service_context) index = VectorStoreIndex.from_documents(documents)