service_context = ServiceContext.from_defaults(llm=llm, chunk_size=512)
graph_store = Neo4jGraphStore(
username="neo4j",
password="password",
url="bolt://localhost:7687",
database="neo4j",
)
graph_storage_context = StorageContext.from_defaults(graph_store=graph_store)
graph_index = KnowledgeGraphIndex(
storage_context=graph_storage_context,
kg_triplet_extract_fn=extract_triplets,
service_context=service_context,
verbose=True
)
prompts_dict = query_engine.get_prompts()
print(list(prompts_dict))
['response_synthesizer:text_qa_template', 'response_synthesizer:refine_template']
llm = Ollama(model="mixtral",request_timeout=180.0)
service_context = ServiceContext.from_defaults(llm=llm, chunk_size=512)
graph_store = Neo4jGraphStore(
username="neo4j",
password="password",
url="bolt://localhost:7687",
database="neo4j",
)
from llama_index import StorageContext, load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir="./graph_storage", graph_store=graph_store)
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine(
include_text=True, response_mode="tree_summarize", service_context=service_context
)
with open("Data/p1.txt") as f:
data = f.read()
response = query_engine.query(data)
print(response)