te')); return $arr; } /* 遍历用户所有主题 * @param $uid 用户ID * @param int $page 页数 * @param int $pagesize 每页记录条数 * @param bool $desc 排序方式 TRUE降序 FALSE升序 * @param string $key 返回的数组用那一列的值作为 key * @param array $col 查询哪些列 */ function thread_tid_find_by_uid($uid, $page = 1, $pagesize = 1000, $desc = TRUE, $key = 'tid', $col = array()) { if (empty($uid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('uid' => $uid), array('tid' => $orderby), $page, $pagesize, $key, $col); return $arr; } // 遍历栏目下tid 支持数组 $fid = array(1,2,3) function thread_tid_find_by_fid($fid, $page = 1, $pagesize = 1000, $desc = TRUE) { if (empty($fid)) return array(); $orderby = TRUE == $desc ? -1 : 1; $arr = thread_tid__find($cond = array('fid' => $fid), array('tid' => $orderby), $page, $pagesize, 'tid', array('tid', 'verify_date')); return $arr; } function thread_tid_delete($tid) { if (empty($tid)) return FALSE; $r = thread_tid__delete(array('tid' => $tid)); return $r; } function thread_tid_count() { $n = thread_tid__count(); return $n; } // 统计用户主题数 大数量下严谨使用非主键统计 function thread_uid_count($uid) { $n = thread_tid__count(array('uid' => $uid)); return $n; } // 统计栏目主题数 大数量下严谨使用非主键统计 function thread_fid_count($fid) { $n = thread_tid__count(array('fid' => $fid)); return $n; } ?>python - Convert model.safetensors in onnx - Stack Overflow
最新消息:雨落星辰是一个专注网站SEO优化、网站SEO诊断、搜索引擎研究、网络营销推广、网站策划运营及站长类的自媒体原创博客

python - Convert model.safetensors in onnx - Stack Overflow

programmeradmin5浏览0评论

I have created a customised model with kohya from the v1-5-pruned-emaonly.safetensors model and would now like to import this model into a c# application. I have read that I can use onnx to do this but I get stuck at loading the model weights. The code is as follows

from diffusers import StableDiffusionPipeline
import torch
from safetensors import safe_open

model_path = "runwayml/stable-diffusion-v1-5"
pipe = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=torch.float16)

lora_path = "P718_long_crack_v1.safetensors"

pipe.unet.load_attn_procs(lora_path)

pipe.to("cuda")

unet = pipe.unet

producing the following error

Loading pipeline components...: 100%|██████████| 7/7 [00:01<00:00,  4.75it/s]
D:\ONNX\venv\lib\site-packages\diffusers\loaders\unet.py:212: FutureWarning: `load_attn_procs` is deprecated and will be removed in version 0.40.0. Using the `load_attn_procs()` method has been deprecated and will be removed in a future version. Please use `load_lora_adapter()`.
  deprecate("load_attn_procs", "0.40.0", deprecation_message)
Traceback (most recent call last):
  File "D:\ONNX\Converter.py", line 11, in <module>
    pipe.unet.load_attn_procs(lora_path)
  File "D:\ONNX\venv\lib\site-packages\huggingface_hub\utils\_validators.py", line 114, in _inner_fn
    return fn(*args, **kwargs)
  File "D:\ONNX\venv\lib\site-packages\diffusers\loaders\unet.py", line 217, in load_attn_procs
    is_model_cpu_offload, is_sequential_cpu_offload = self._process_lora(
  File "D:\ONNX\venv\lib\site-packages\diffusers\loaders\unet.py", line 333, in _process_lora
    lora_config_kwargs = get_peft_kwargs(rank, network_alphas, state_dict, is_unet=True)
  File "D:\ONNX\venv\lib\site-packages\diffusers\utils\peft_utils.py", line 153, in get_peft_kwargs
    r = lora_alpha = list(rank_dict.values())[0]
IndexError: list index out of range

am i taking the wrong approach?

发布评论

评论列表(0)

  1. 暂无评论