|
| 1 | +from transformers import AutoModelForCausalLM, AutoTokenizer |
| 2 | + |
| 3 | +from ._types import LM, LMAnswer, NoneResponseError |
| 4 | + |
| 5 | + |
| 6 | +def extract_thinking_content(output: str) -> tuple[str, str | None]: |
| 7 | + """Extract the thinking content and the final answer from the model output. |
| 8 | + based on <think> and </think> tags. |
| 9 | +
|
| 10 | + Args: |
| 11 | + output (str): The model output. |
| 12 | + Returns: |
| 13 | + tuple[str, str | None]: The thinking content and the final answer. |
| 14 | + """ |
| 15 | + if "<think>" in output and "</think>" in output: |
| 16 | + thinking_content = output.split("<think>")[1].split("</think>")[0].strip() |
| 17 | + content = output.split("</think>")[-1].strip() |
| 18 | + return content, thinking_content |
| 19 | + |
| 20 | + return output, None |
| 21 | + |
| 22 | + |
| 23 | +class HFChat(LM): |
| 24 | + |
| 25 | + def __init__(self, model_name: str, pure: bool = False): |
| 26 | + super().__init__(model_name=model_name, pure=pure) |
| 27 | + |
| 28 | + # load the tokenizer and the model |
| 29 | + self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
| 30 | + self.model = AutoModelForCausalLM.from_pretrained( |
| 31 | + model_name, torch_dtype="auto", device_map="auto" |
| 32 | + ) |
| 33 | + |
| 34 | + def _gen(self, prompt: str) -> LMAnswer: |
| 35 | + messages = [ |
| 36 | + {"role": "user", "content": prompt}, |
| 37 | + {"role": "system", "content": self.instruction}, |
| 38 | + ] |
| 39 | + text = self.tokenizer.apply_chat_template( |
| 40 | + messages, |
| 41 | + tokenize=False, |
| 42 | + add_generation_prompt=True, |
| 43 | + ) |
| 44 | + |
| 45 | + model_inputs = self.tokenizer([text], return_tensors="pt").to(self.model.device) |
| 46 | + |
| 47 | + # conduct text completion |
| 48 | + generated_ids = self.model.generate(**model_inputs, max_new_tokens=32768) |
| 49 | + output_ids = generated_ids[0][len(model_inputs.input_ids[0]) :].tolist() |
| 50 | + output = self.tokenizer.decode(output_ids, skip_special_tokens=True).strip("\n") |
| 51 | + |
| 52 | + if output is None: |
| 53 | + raise NoneResponseError(self.model_name) |
| 54 | + |
| 55 | + content, thinking_content = extract_thinking_content(output) |
| 56 | + return LMAnswer(answer=content, reasoning_steps=thinking_content) |
0 commit comments