Stable-diffusion-WebUI 的API调用(内含文生图和图生图实例)
import os
import gradio as gr
import numpy as np
from PIL import Image
import requests
# 加载模型和参数
model_checkpoint_path = "path_to_your_stable_diffusion_model_checkpoint.ckpt"
model_config_path = "path_to_your_stable_diffusion_model_config.json"
model_dir = os.path.dirname(model_checkpoint_path)
# 文本到图像的API调用
def text_to_image(prompt):
response = requests.post(f"http://localhost:7860/generate_image", json={
"prompt": prompt,
"n_samples": 1, # 生成1张图像
"seed": None,
"steps": 50,
"model_name": model_config_path,
"model_directory": model_dir,
"run_in_background": False,
"cuda_device": 0
})
image_path = response.json()["images"][0]
return image_path
# 图像到图像的API调用
def image_to_image(inp_img):
img_bytes = inp_img.read()
image = Image.open(img_bytes)
image_array = np.asarray(image)
response = requests.post(f"http://localhost:7860/conditional_image_synthesis", json={
"input_code": image_array.tolist(),
"guidance_scale": 7.5,
"seed": None,
"steps": 100,
"model_name": model_config_path,
"model_directory": model_dir,
"run_in_background": False,
"cuda_device": 0
})
image_path = response.json()["images"][0]
return image_path
# 初始化Gradio界面
gr.Interface(fn=text_to_image, inputs="text", outputs="image", title="Stable Diffusion Text to Image").launch()
gr.Interface(fn=image_to_image, inputs="image", outputs="image", title="Stable Diffusion Image to Image").launch()
在这个代码实例中,我们首先加载了Stable Diffusion模型的参数和配置。然后,我们定义了两个函数text_to_image
和image_to_image
,它们分别用于文本到图像和图像到图像的转换。这两个函数通过HTTP POST请求调用Stable Diffusion WebUI的后端API。最后,我们使用gradio库初始化了两个用户界面,让用户可以通过输入文本或上传图像来进行转换。
评论已关闭