参考:
https://github.com/THUDM/ChatGLM3 参考:
https://github.com/QweLM/Qwe 参考:
https://modelscope.c/models/baichua-ic/baichua-7B/summary本仓库是进行语言大模型比较的作业
智谱
from modelscope import AutoTokeizer, AutoModel, sapshot_dowload
model_dir = sapshot_dowload("ZhipuAI/chatglm3-6b-32k", revisio = "v1.0.0")
tokeizer = AutoTokeizer.from_pretraied(model_dir, trust_remote_code=True)
model = AutoModel.from_pretraied(model_dir, trust_remote_code=True).half().cuda()
model = model.eval()
respose, history = model.chat(tokeizer, "你好", history=[])
prit(respose)
respose, history = model.chat(tokeizer, "冬天:能穿多少穿多少 2、夏天:能穿多少穿多少", history=history)
prit(respose)
通义千问
from modelscope import AutoModelForCausalLM, AutoTokeizer
from modelscope import GeeratioCofig
tokeizer = AutoTokeizer.from_pretraied("qwe/Qwe-7B-Chat", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretraied("qwe/Qwe-7B-Chat", device_map="auto", trust_remote_code=True).eval()
respose, history = model.chat(tokeizer, "你好", history=Noe)
prit(respose)
respose, history = model.chat(tokeizer, ".明明明明明白白白喜欢他,可她就是不说。 这句话里,明明和白白谁喜欢谁?", history=history)
prit(respose)
百川
import torch
from modelscope import sapshot_dowload, AutoModelForCausalLM, AutoTokeizer,GeeratioCofig
model_dir = sapshot_dowload("baichua-ic/Baichua2-7B-Chat", revisio='v1.0.5')
tokeizer = AutoTokeizer.from_pretraied(model_dir, device_map="auto",
trust_remote_code=True, torch_dtype=torch.float16)
model = AutoModelForCausalLM.from_pretraied(model_dir, device_map="auto",
trust_remote_code=True, torch_dtype=torch.float16)
model.geeratio_cofig = GeeratioCofig.from_pretraied(model_dir)
messages = []
messages.apped({"role": "user", "cotet": "你好"})
respose = model.chat(tokeizer, messages)
prit(respose)
messages.apped({'role': 'assistat', 'cotet': respose})
messages.apped({"role": "user", "cotet": "他知道我知道你知道他不知道吗? 这句话里,到底谁不知道"})
respose = model.chat(tokeizer, messages)
prit(respose)
Git下载
git cloe https://www.modelscope.c/eala668/My_model_compare.git
点击空白处退出提示
评论