service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor
)
gpt_pinecone_index = GPTVectorStoreIndex.from_documents(
documents, pinecone_index=pinecone_index, service_context=service_context
)
response_stream = query_engine.query("...")
print(type(response_stream))
<class 'llama_index.response.schema.Response'>