CitationQueryEngine.from_defaults()
it sets the text qa template to be this https://github.com/jerryjliu/llama_index/blob/ae3e0bb5ca7811e579da39bbfac8c217dc818cfc/llama_index/query_engine/citation_query_engine.py#L22from llama_index.llms.base import ChatMessage from llama_index.prompts.base import ChatPromptTemplate text_qa_messages = [ ChatMessage(role="system", content="Some system prompt"), ChatMessage( content="some template string. A good default is the one I linked above in the code base", role=MessageRole.USER, ), ] text_qa_template = ChatPromptTemplate(message_templates=text_qa_messages)
self.model = OpenAI( model=self.modelName, temperature=self.temperature, max_tokens=self.contextBuffer, stream=True, ) # WHY NOT SET SYSTEM PROMPT HERE????? self.serviceContext = ServiceContext.from_defaults( llm=self.model, embed_model=self.embedModel, ) self.index = VectorStoreIndex.from_vector_store( service_context=self.serviceContext, vector_store=WeaviateVectorStore( weaviate_client=self.client, index_name="index_name" ) ) self.queryEngine = CitationQueryEngine.from_args( self.index, streaming=self.streaming, citation_qa_template=ChatPromptTemplate( message_templates=self.chatHistory, ), # VS USE CITATION_QA_TEMPLATE HERE????? service_context=self.serviceContext, similarity_top_k=self.topK, citation_chunk_size=self.citationSize, )
from llama_index import LLMPredictor llm_predictor = LLMPredictor(llm=self.model, system_prompt="Talk like a pirate") service_context = service_context.from_defaults(llm_predictor=llm_predictor , embed_model=self.embedModel,