Skip to content

AttributeError: module 'bitsandbytes' has no attribute 'nn' #1166

@Harsh-raj

Description

@Harsh-raj

Why am i getting this error when i am trying to load the checkpoint of lora fine tuned phi-1_5 model. Following is the complete error log

phi) harsh@harsh:~/phi-1_5$ python3 inference_lora_phi.py
Traceback (most recent call last):
  File "/home/harsh/phi-1_5/inference_lora_phi.py", line 14, in <module>
    model = PeftModel.from_pretrained(model, "./output/phi-1_5_FT_lr1e-5_ep3_batch8_lora1632_denfc12qkv_proj/checkpoint-14555")
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/harsh/phi-1_5/peft/src/peft/peft_model.py", line 387, in from_pretrained
    model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)
            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/harsh/phi-1_5/peft/src/peft/peft_model.py", line 1212, in __init__
    super().__init__(model, peft_config, adapter_name)
  File "/home/harsh/phi-1_5/peft/src/peft/peft_model.py", line 131, in __init__
    self.base_model = cls(model, {adapter_name: peft_config}, adapter_name)
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/lora/model.py", line 136, in __init__
    super().__init__(model, config, adapter_name)
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/tuners_utils.py", line 165, in __init__
    self.inject_adapter(self.model, adapter_name)
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/tuners_utils.py", line 342, in inject_adapter
    self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key)
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/lora/model.py", line 220, in _create_and_replace
    new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs)
                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/lora/model.py", line 282, in _create_new_module
    from .bnb import dispatch_bnb_8bit
  File "/home/harsh/phi-1_5/peft/src/peft/tuners/lora/bnb.py", line 272, in <module>
    if is_bnb_4bit_available():
       ^^^^^^^^^^^^^^^^^^^^^^^
  File "/home/harsh/phi-1_5/peft/src/peft/import_utils.py", line 33, in is_bnb_4bit_available
    return hasattr(bnb.nn, "Linear4bit")
                   ^^^^^^

Following is the inference code i am using to inference the lora finetuned model

import gradio as gr 
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
from threading import Thread
from peft import LoraConfig, get_peft_model, PeftModelForCausalLM, PeftConfig, PeftModel

# # Loading the tokenizer and model from Hugging Face's model hub.
tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5", torch_dtype="auto", trust_remote_code=True)
# model = AutoModelForCausalLM.from_pretrained("microsoft/phi-1_5",  trust_remote_code=True)

config = PeftConfig.from_pretrained("./output/phi-1_5_FT_lr1e-5_ep3_batch8_lora1632_denfc12qkv_proj/checkpoint-14555")
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, "./output/phi-1_5_FT_lr1e-5_ep3_batch8_lora1632_denfc12qkv_proj/checkpoint-14555")
# using CUDA for an optimal experience
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)


# Defining a custom stopping criteria class for the model's text generation.
class StopOnTokens(StoppingCriteria):
    def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
        stop_ids = [50256]  # IDs of tokens where the generation should stop.
        for stop_id in stop_ids:
            if input_ids[0][-1] == stop_id:  # Checking if the last generated token is a stop token.
                return True
        return False


# Function to generate model predictions.
def predict(message, history):
    history_transformer_format = history + [[message, ""]]
    stop = StopOnTokens()

    # Formatting the input for the model.
    #messages = "<|endoftext|>".join(["<|endoftext|>".join(["\n" + item[0], "\n" + item[1]])
    #                   for item in history_transformer_format])

    model_inputs = tokenizer([message], return_tensors="pt").to(device) #messages
    streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
    generate_kwargs = dict(
        model_inputs,
        streamer=streamer,
        max_new_tokens=512,
        do_sample=True,
        top_p=0.95,
        top_k=50,
        temperature=0.5,
        num_beams=1,
        stopping_criteria=StoppingCriteriaList([stop])
    )
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()  # Starting the generation in a separate thread.
    partial_message = ""
    for new_token in streamer:
        partial_message += new_token
        if '<|endoftext|>' in partial_message:  # Breaking the loop if the stop token is generated.
            break
        yield partial_message


# Setting up the Gradio chat interface.
gr.ChatInterface(predict,
                 title="Phi1.5").launch(share=True)  # Launching the web interface.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions