from llama_index import ServiceContext from llama_index.llms import Anthropic service_context = ServiceContext.from_defaults(llm=Anthropic())
query_engine = index.as_query_engine(streaming=True) streaming_response = query_engine.query("Who is Paul Graham.")
from llama_index import LLMPredictor, ServiceContext from langchain.llms import Anthropic llm_predictor = LLMPredictor(llm=Anthropic()) service_context = ServiceContext.from_defaults( llm_predictor=llm_predictor ) index = GPTVectorStoreIndex.from_documents(document,service_context=service_context) query_engine = index.as_query_engine(streaming=True) streaming_response = query_engine.query("Who is Paul Graham.")