@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
self.generate_kwargs.update({"stream": False})
is_formatted = kwargs.pop("formatted", False)
max_tokens = kwargs.pop("max_tokens", 64)
stop = kwargs.pop("stop", "\n")
echo = kwargs.pop("echo", True)
if not is_formatted:
prompt = self.completion_to_prompt(prompt)
else:
#response = self._model(prompt=prompt, **self.generate_kwargs)
response = self._model("Q: " + prompt + " A: ", max_tokens=max_tokens, stop=stop, echo=echo)
return CompletionResponse(text=response["choices"][0]["text"], raw=response)
response = llm.complete("Name the planets in the solar system.", formatted=True, max_tokens=300, stop=["Q:", "\n"], echo=True)