Settings.llm = Ollama(model="llama2", request_timeout=30, temperature=0) Settings.embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5", device="cuda") documents = SimpleDirectoryReader("/home/chepworth/PycharmProjects/cmar/RAG_Data").load_data() index = VectorStoreIndex.from_documents(documents) query_engine = index.as_query_engine(streaming=True) response = query_engine.query("How big is saturn?") response.print_response_stream()