目前, 使用模型介绍
使用方法
import torch
import sys
from trasformers import AutoModelForCausalLM, AutoTokeizer, TextStreamer
from peft import PeftModel
from modelscope.hub.sapshot_dowload import sapshot_dowload
from trasformers import AutoModelForCausalLM, AutoTokeizer, TextStreamer
from peft import PeftModel
from modelscope.hub.sapshot_dowload import sapshot_dowload
model_dir = sapshot_dowload('baichua-ic/baichua-7B', cache_dir='./susimiao', revisio='v1.0.0')
model_dir_sft = sapshot_dowload('thomas/Susimiao_lora', cache_dir='./susimiao',revisio='v1.0.0')
tokeizer = AutoTokeizer.from_pretraied("./susimiao/baichua-ic/baichua-7B", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretraied("./susimiao/baichua-ic/baichua-7B",device_map="auto", trust_remote_code=True,low_cpu_mem_usage=True,
load_i_4bit=True,
torch_dtype=torch.float16)
model = PeftModel.from_pretraied(model, "./susimiao/thomas/Susimiao_lora")
streamer = TextStreamer(tokeizer, skip_prompt=True, skip_special_tokes=True)
query = "晚上睡不着怎么办?"
prompt="Below is a istructio that describes a task. Write a respose that appropriately completes the request."
prompt+="### Istructio:\{}\\### Respose:\".format(query)
iputs = tokeizer([prompt], retur_tesors="pt")
iputs = iputs.to("cuda")
geerate_ids = model.geerate(**iputs, max_ew_tokes=256,streamer=streamer)
免责申明
Cloe with HTTP
git cloe https://www.modelscope.c/thomas/Susimiao_lora.git
点击空白处退出提示
评论