@kapa.ai
query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
Initialize HuggingFaceLLM
llm = HuggingFaceLLM(
context_window=1028,
max_new_tokens=256,
generate_kwargs={"temperature": 0.7, "do_sample": False},
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name="StabilityAI/stablelm-tuned-alpha-3b",
model_name="StabilityAI/stablelm-tuned-alpha-3b",
device_map="cpu",
stopping_ids=[50278, 50279, 50277, 1, 0],
tokenizer_kwargs={"max_length": 1028},
# uncomment this if using CUDA to reduce memory usage
# model_kwargs={"torch_dtype": torch.float16}
)
ValidationError Traceback (most recent call last)
/tmp/ipykernel_910/4083314887.py in <cell line: 25>()
23 qa_template = Prompt(template)
24 # Initialize HuggingFaceLLM
---> 25 llm = HuggingFaceLLM(
26 context_window=1028,
27 max_new_tokens=256,
~/.conda/envs/default/lib/python3.9/site-packages/llama_index/llms/huggingface.py in init(self, context_window, max_new_tokens, system_prompt, query_wrapper_prompt, tokenizer_name, model_name, model, tokenizer, device_map, stopping_ids, tokenizer_kwargs, tokenizer_outputs_to_remove, model_kwargs, generate_kwargs, callback_manager)
148 self._stopping_criteria = StoppingCriteriaList([StopOnTokens()])
149
--> 150 super().init(
151 context_window=context_window,
152 max_new_tokens=max_new_tokens,
~/.conda/envs/default/lib/python3.9/site-packages/pydantic/main.cpython-39-x86_64-linux-gnu.so in pydantic.main.BaseModel.init()
ValidationError: 1 validation error for HuggingFaceLLM
query_wrapper_prompt
str type expected (type=type_error.str)