IP-Adapter是一种有效且轻量级的适配器,可为预训练的文本到图像扩散模型实现图像提示功能。IP-Adapter还可以推广到使用现有可控工具的可控生成。详情可参考来源。模型描述 (Model Descriptio)
运行环境 (Operatig eviromet)
Depedecies ad Istallatio
# git cloe the origial repository
git cloe https://github.com/hotshotco/IP-Adapter.git
cd IP-Adapter
# Create a coda eviromet ad activate it
coda create - ipadapter pytho=3.9
coda activate ipadapter
# Istall from requiremets.txt
pip istall torch==2.0.1 torchvisio==0.15.2 trasformers==4.34.0 diffusers==0.21.4
代码范例 (Code example)
from modelscope.models import Model
from modelscope.pipelies import pipelie
iput = {
# required
"image": "assets/images/woma.pg",
# followig are optioal:
"um_samples": 4,
"um_iferece_steps": 50,
"seed": 42,
"output_path": "./out_t2i.pg"
}
p_iput = {
# required
"image": "assets/images/woma.pg",
"prompt": "best quality, high quality, wearig a hat o the beach",
"scale": 0.6,
# followig are optioal:
"um_samples": 4,
"um_iferece_steps": 50,
"seed": 42,
"output_path": "./out_t2i_p.pg"
}
i2i_iput = {
# required
"image": "assets/images/river.pg",
"g_image": "assets/images/vermeer.jpg",
# followig are optioal:
"stregth": 0.6,
"um_samples": 4,
"um_iferece_steps": 50,
"seed": 42,
"output_path": "./out_i2i.pg"
}
depth_iput = {
# required
"image": "assets/images/statue.pg",
"cotrol_map": "assets/structure_cotrols/depth.pg",
# followig are optioal:
"um_samples": 4,
"um_iferece_steps": 50,
"seed": 42,
"output_path": "./out_cotrol.pg"
}
# text to image pipelie
model = Model.from_pretraied("zcmaas/cv_ip-adapter_image-prompt-adapter_base",
pipe_type="StableDiffusioPipelie")
iferece = pipelie('ip-adapter-task', model=model)
output = iferece(iput)
prit(f"Result saved as {output}")
# text to image pipelie with prompt
output = iferece(p_iput)
prit(f"Result saved as {output}")
# image to image pipelie
model = Model.from_pretraied("zcmaas/cv_ip-adapter_image-prompt-adapter_base",
pipe_type="StableDiffusioImg2ImgPipelie")
iferece = pipelie('ip-adapter-task', model=model)
output = iferece(i2i_iput)
prit(f"Result saved as {output}")
# text to image pipelie with cotrolet
model = Model.from_pretraied("zcmaas/cv_ip-adapter_image-prompt-adapter_base",
pipe_type="StableDiffusioCotrolNetPipelie")
iferece = pipelie('ip-adapter-task', model=model)
output = iferece(depth_iput)
prit(f"Result saved as {output}")
# Model.from_pretraied() also support model path iput such as 'base_model_path', 'vae_model_path', 'image_ecoder_path', 'ip_ckpt'
点击空白处退出提示










评论