feat: flexible model architecture for dit models (Flux & SD3) (#490)
* Refactor: wtype per tensor * Fix default args * refactor: fix flux * Refactor photmaker v2 support * unet: refactor the refactoring * Refactor: fix controlnet and tae * refactor: upscaler * Refactor: fix runtime type override * upscaler: use fp16 again * Refactor: Flexible sd3 arch * Refactor: Flexible Flux arch * format code --------- Co-authored-by: leejet <leejet714@gmail.com>
This commit is contained in:
parent
4570715727
commit
7ce63e740c
46
clip.hpp
46
clip.hpp
@ -545,9 +545,12 @@ protected:
|
|||||||
int64_t vocab_size;
|
int64_t vocab_size;
|
||||||
int64_t num_positions;
|
int64_t num_positions;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, wtype, embed_dim, vocab_size);
|
enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32;
|
||||||
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions);
|
enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
|
||||||
|
|
||||||
|
params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size);
|
||||||
|
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -591,11 +594,14 @@ protected:
|
|||||||
int64_t image_size;
|
int64_t image_size;
|
||||||
int64_t num_patches;
|
int64_t num_patches;
|
||||||
int64_t num_positions;
|
int64_t num_positions;
|
||||||
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
|
enum ggml_type patch_wtype = GGML_TYPE_F16; // tensor_types.find(prefix + "patch_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "patch_embedding.weight"] : GGML_TYPE_F16;
|
||||||
|
enum ggml_type class_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "class_embedding") != tensor_types.end() ? tensor_types[prefix + "class_embedding"] : GGML_TYPE_F32;
|
||||||
|
enum ggml_type position_wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end() ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, patch_wtype, patch_size, patch_size, num_channels, embed_dim);
|
||||||
params["patch_embedding.weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, patch_size, patch_size, num_channels, embed_dim);
|
params["class_embedding"] = ggml_new_tensor_1d(ctx, class_wtype, embed_dim);
|
||||||
params["class_embedding"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, embed_dim);
|
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, position_wtype, embed_dim, num_positions);
|
||||||
params["position_embedding.weight"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, num_positions);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -651,9 +657,10 @@ enum CLIPVersion {
|
|||||||
|
|
||||||
class CLIPTextModel : public GGMLBlock {
|
class CLIPTextModel : public GGMLBlock {
|
||||||
protected:
|
protected:
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
if (version == OPEN_CLIP_VIT_BIGG_14) {
|
if (version == OPEN_CLIP_VIT_BIGG_14) {
|
||||||
params["text_projection"] = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, projection_dim, hidden_size);
|
enum ggml_type wtype = GGML_TYPE_F32; // tensor_types.find(prefix + "text_projection") != tensor_types.end() ? tensor_types[prefix + "text_projection"] : GGML_TYPE_F32;
|
||||||
|
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -798,9 +805,9 @@ protected:
|
|||||||
int64_t out_features;
|
int64_t out_features;
|
||||||
bool transpose_weight;
|
bool transpose_weight;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
|
enum ggml_type wtype = tensor_types.find(prefix + "weight") != tensor_types.end() ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
if (transpose_weight) {
|
if (transpose_weight) {
|
||||||
LOG_ERROR("transpose_weight");
|
|
||||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
|
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
|
||||||
} else {
|
} else {
|
||||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
|
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
|
||||||
@ -861,12 +868,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
CLIPTextModel model;
|
CLIPTextModel model;
|
||||||
|
|
||||||
CLIPTextModelRunner(ggml_backend_t backend,
|
CLIPTextModelRunner(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
|
const std::string prefix,
|
||||||
CLIPVersion version = OPENAI_CLIP_VIT_L_14,
|
CLIPVersion version = OPENAI_CLIP_VIT_L_14,
|
||||||
int clip_skip_value = 1,
|
int clip_skip_value = 1,
|
||||||
bool with_final_ln = true)
|
bool with_final_ln = true)
|
||||||
: GGMLRunner(backend, wtype), model(version, clip_skip_value, with_final_ln) {
|
: GGMLRunner(backend), model(version, clip_skip_value, with_final_ln) {
|
||||||
model.init(params_ctx, wtype);
|
model.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
@ -908,13 +916,13 @@ struct CLIPTextModelRunner : public GGMLRunner {
|
|||||||
struct ggml_tensor* embeddings = NULL;
|
struct ggml_tensor* embeddings = NULL;
|
||||||
|
|
||||||
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) {
|
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) {
|
||||||
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
|
auto token_embed_weight = model.get_token_embed_weight();
|
||||||
wtype,
|
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
|
||||||
model.hidden_size,
|
token_embed_weight->type,
|
||||||
num_custom_embeddings);
|
model.hidden_size,
|
||||||
|
num_custom_embeddings);
|
||||||
set_backend_tensor_data(custom_embeddings, custom_embeddings_data);
|
set_backend_tensor_data(custom_embeddings, custom_embeddings_data);
|
||||||
|
|
||||||
auto token_embed_weight = model.get_token_embed_weight();
|
|
||||||
// concatenate custom embeddings
|
// concatenate custom embeddings
|
||||||
embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1);
|
embeddings = ggml_concat(compute_ctx, token_embed_weight, custom_embeddings, 1);
|
||||||
}
|
}
|
||||||
|
14
common.hpp
14
common.hpp
@ -182,9 +182,11 @@ protected:
|
|||||||
int64_t dim_in;
|
int64_t dim_in;
|
||||||
int64_t dim_out;
|
int64_t dim_out;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
|
||||||
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
|
enum ggml_type wtype = (tensor_types.find(prefix + "proj.weight") != tensor_types.end()) ? tensor_types[prefix + "proj.weight"] : GGML_TYPE_F32;
|
||||||
params["proj.bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, dim_out * 2);
|
enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "proj.bias") != tensor_types.end()) ? tensor_types[prefix + "proj.bias"] : GGML_TYPE_F32;
|
||||||
|
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
|
||||||
|
params["proj.bias"] = ggml_new_tensor_1d(ctx, bias_wtype, dim_out * 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -438,8 +440,10 @@ public:
|
|||||||
|
|
||||||
class AlphaBlender : public GGMLBlock {
|
class AlphaBlender : public GGMLBlock {
|
||||||
protected:
|
protected:
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
|
||||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
|
||||||
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
|
||||||
|
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
float get_alpha() {
|
float get_alpha() {
|
||||||
|
@ -46,7 +46,6 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
SDVersion version = VERSION_SD1;
|
SDVersion version = VERSION_SD1;
|
||||||
PMVersion pm_version = PM_VERSION_1;
|
PMVersion pm_version = PM_VERSION_1;
|
||||||
CLIPTokenizer tokenizer;
|
CLIPTokenizer tokenizer;
|
||||||
ggml_type wtype;
|
|
||||||
std::shared_ptr<CLIPTextModelRunner> text_model;
|
std::shared_ptr<CLIPTextModelRunner> text_model;
|
||||||
std::shared_ptr<CLIPTextModelRunner> text_model2;
|
std::shared_ptr<CLIPTextModelRunner> text_model2;
|
||||||
|
|
||||||
@ -57,12 +56,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
std::vector<std::string> readed_embeddings;
|
std::vector<std::string> readed_embeddings;
|
||||||
|
|
||||||
FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend,
|
FrozenCLIPEmbedderWithCustomWords(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
const std::string& embd_dir,
|
const std::string& embd_dir,
|
||||||
SDVersion version = VERSION_SD1,
|
SDVersion version = VERSION_SD1,
|
||||||
PMVersion pv = PM_VERSION_1,
|
PMVersion pv = PM_VERSION_1,
|
||||||
int clip_skip = -1)
|
int clip_skip = -1)
|
||||||
: version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir), wtype(wtype) {
|
: version(version), pm_version(pv), tokenizer(version == VERSION_SD2 ? 0 : 49407), embd_dir(embd_dir) {
|
||||||
if (clip_skip <= 0) {
|
if (clip_skip <= 0) {
|
||||||
clip_skip = 1;
|
clip_skip = 1;
|
||||||
if (version == VERSION_SD2 || version == VERSION_SDXL) {
|
if (version == VERSION_SD2 || version == VERSION_SDXL) {
|
||||||
@ -70,12 +69,12 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (version == VERSION_SD1) {
|
if (version == VERSION_SD1) {
|
||||||
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip);
|
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip);
|
||||||
} else if (version == VERSION_SD2) {
|
} else if (version == VERSION_SD2) {
|
||||||
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_H_14, clip_skip);
|
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPEN_CLIP_VIT_H_14, clip_skip);
|
||||||
} else if (version == VERSION_SDXL) {
|
} else if (version == VERSION_SDXL) {
|
||||||
text_model = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false);
|
text_model = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
|
||||||
text_model2 = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
|
text_model2 = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "cond_stage_model.1.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,14 +137,14 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size);
|
LOG_DEBUG("embedding wrong hidden size, got %i, expected %i", tensor_storage.ne[0], hidden_size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
embd = ggml_new_tensor_2d(embd_ctx, wtype, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1);
|
embd = ggml_new_tensor_2d(embd_ctx, tensor_storage.type, hidden_size, tensor_storage.n_dims > 1 ? tensor_storage.ne[1] : 1);
|
||||||
*dst_tensor = embd;
|
*dst_tensor = embd;
|
||||||
return true;
|
return true;
|
||||||
};
|
};
|
||||||
model_loader.load_tensors(on_load, NULL);
|
model_loader.load_tensors(on_load, NULL);
|
||||||
readed_embeddings.push_back(embd_name);
|
readed_embeddings.push_back(embd_name);
|
||||||
token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd));
|
token_embed_custom.resize(token_embed_custom.size() + ggml_nbytes(embd));
|
||||||
memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(wtype)),
|
memcpy((void*)(token_embed_custom.data() + num_custom_embeddings * hidden_size * ggml_type_size(embd->type)),
|
||||||
embd->data,
|
embd->data,
|
||||||
ggml_nbytes(embd));
|
ggml_nbytes(embd));
|
||||||
for (int i = 0; i < embd->ne[1]; i++) {
|
for (int i = 0; i < embd->ne[1]; i++) {
|
||||||
@ -590,9 +589,9 @@ struct FrozenCLIPEmbedderWithCustomWords : public Conditioner {
|
|||||||
struct FrozenCLIPVisionEmbedder : public GGMLRunner {
|
struct FrozenCLIPVisionEmbedder : public GGMLRunner {
|
||||||
CLIPVisionModelProjection vision_model;
|
CLIPVisionModelProjection vision_model;
|
||||||
|
|
||||||
FrozenCLIPVisionEmbedder(ggml_backend_t backend, ggml_type wtype)
|
FrozenCLIPVisionEmbedder(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
|
||||||
: vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend, wtype) {
|
: vision_model(OPEN_CLIP_VIT_H_14, true), GGMLRunner(backend) {
|
||||||
vision_model.init(params_ctx, wtype);
|
vision_model.init(params_ctx, tensor_types, "cond_stage_model.transformer");
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
@ -627,7 +626,6 @@ struct FrozenCLIPVisionEmbedder : public GGMLRunner {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct SD3CLIPEmbedder : public Conditioner {
|
struct SD3CLIPEmbedder : public Conditioner {
|
||||||
ggml_type wtype;
|
|
||||||
CLIPTokenizer clip_l_tokenizer;
|
CLIPTokenizer clip_l_tokenizer;
|
||||||
CLIPTokenizer clip_g_tokenizer;
|
CLIPTokenizer clip_g_tokenizer;
|
||||||
T5UniGramTokenizer t5_tokenizer;
|
T5UniGramTokenizer t5_tokenizer;
|
||||||
@ -636,15 +634,15 @@ struct SD3CLIPEmbedder : public Conditioner {
|
|||||||
std::shared_ptr<T5Runner> t5;
|
std::shared_ptr<T5Runner> t5;
|
||||||
|
|
||||||
SD3CLIPEmbedder(ggml_backend_t backend,
|
SD3CLIPEmbedder(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
int clip_skip = -1)
|
int clip_skip = -1)
|
||||||
: wtype(wtype), clip_g_tokenizer(0) {
|
: clip_g_tokenizer(0) {
|
||||||
if (clip_skip <= 0) {
|
if (clip_skip <= 0) {
|
||||||
clip_skip = 2;
|
clip_skip = 2;
|
||||||
}
|
}
|
||||||
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, false);
|
clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, false);
|
||||||
clip_g = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
|
clip_g = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_g.transformer.text_model", OPEN_CLIP_VIT_BIGG_14, clip_skip, false);
|
||||||
t5 = std::make_shared<T5Runner>(backend, wtype);
|
t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_clip_skip(int clip_skip) {
|
void set_clip_skip(int clip_skip) {
|
||||||
@ -974,21 +972,19 @@ struct SD3CLIPEmbedder : public Conditioner {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct FluxCLIPEmbedder : public Conditioner {
|
struct FluxCLIPEmbedder : public Conditioner {
|
||||||
ggml_type wtype;
|
|
||||||
CLIPTokenizer clip_l_tokenizer;
|
CLIPTokenizer clip_l_tokenizer;
|
||||||
T5UniGramTokenizer t5_tokenizer;
|
T5UniGramTokenizer t5_tokenizer;
|
||||||
std::shared_ptr<CLIPTextModelRunner> clip_l;
|
std::shared_ptr<CLIPTextModelRunner> clip_l;
|
||||||
std::shared_ptr<T5Runner> t5;
|
std::shared_ptr<T5Runner> t5;
|
||||||
|
|
||||||
FluxCLIPEmbedder(ggml_backend_t backend,
|
FluxCLIPEmbedder(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
int clip_skip = -1)
|
int clip_skip = -1) {
|
||||||
: wtype(wtype) {
|
|
||||||
if (clip_skip <= 0) {
|
if (clip_skip <= 0) {
|
||||||
clip_skip = 2;
|
clip_skip = 2;
|
||||||
}
|
}
|
||||||
clip_l = std::make_shared<CLIPTextModelRunner>(backend, wtype, OPENAI_CLIP_VIT_L_14, clip_skip, true);
|
clip_l = std::make_shared<CLIPTextModelRunner>(backend, tensor_types, "text_encoders.clip_l.transformer.text_model", OPENAI_CLIP_VIT_L_14, clip_skip, true);
|
||||||
t5 = std::make_shared<T5Runner>(backend, wtype);
|
t5 = std::make_shared<T5Runner>(backend, tensor_types, "text_encoders.t5xxl.transformer");
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_clip_skip(int clip_skip) {
|
void set_clip_skip(int clip_skip) {
|
||||||
|
@ -317,10 +317,10 @@ struct ControlNet : public GGMLRunner {
|
|||||||
bool guided_hint_cached = false;
|
bool guided_hint_cached = false;
|
||||||
|
|
||||||
ControlNet(ggml_backend_t backend,
|
ControlNet(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
SDVersion version = VERSION_SD1)
|
SDVersion version = VERSION_SD1)
|
||||||
: GGMLRunner(backend, wtype), control_net(version) {
|
: GGMLRunner(backend), control_net(version) {
|
||||||
control_net.init(params_ctx, wtype);
|
control_net.init(params_ctx, tensor_types, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
~ControlNet() {
|
~ControlNet() {
|
||||||
|
@ -31,10 +31,10 @@ struct UNetModel : public DiffusionModel {
|
|||||||
UNetModelRunner unet;
|
UNetModelRunner unet;
|
||||||
|
|
||||||
UNetModel(ggml_backend_t backend,
|
UNetModel(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
SDVersion version = VERSION_SD1,
|
SDVersion version = VERSION_SD1,
|
||||||
bool flash_attn = false)
|
bool flash_attn = false)
|
||||||
: unet(backend, wtype, version, flash_attn) {
|
: unet(backend, tensor_types, "model.diffusion_model", version, flash_attn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void alloc_params_buffer() {
|
void alloc_params_buffer() {
|
||||||
@ -83,9 +83,8 @@ struct MMDiTModel : public DiffusionModel {
|
|||||||
MMDiTRunner mmdit;
|
MMDiTRunner mmdit;
|
||||||
|
|
||||||
MMDiTModel(ggml_backend_t backend,
|
MMDiTModel(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types)
|
||||||
SDVersion version = VERSION_SD3_2B)
|
: mmdit(backend, tensor_types, "model.diffusion_model") {
|
||||||
: mmdit(backend, wtype, version) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void alloc_params_buffer() {
|
void alloc_params_buffer() {
|
||||||
@ -133,10 +132,9 @@ struct FluxModel : public DiffusionModel {
|
|||||||
Flux::FluxRunner flux;
|
Flux::FluxRunner flux;
|
||||||
|
|
||||||
FluxModel(ggml_backend_t backend,
|
FluxModel(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
SDVersion version = VERSION_FLUX_DEV,
|
bool flash_attn = false)
|
||||||
bool flash_attn = false)
|
: flux(backend, tensor_types, "model.diffusion_model", flash_attn) {
|
||||||
: flux(backend, wtype, version, flash_attn) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void alloc_params_buffer() {
|
void alloc_params_buffer() {
|
||||||
|
@ -142,10 +142,9 @@ struct ESRGAN : public GGMLRunner {
|
|||||||
int scale = 4;
|
int scale = 4;
|
||||||
int tile_size = 128; // avoid cuda OOM for 4gb VRAM
|
int tile_size = 128; // avoid cuda OOM for 4gb VRAM
|
||||||
|
|
||||||
ESRGAN(ggml_backend_t backend,
|
ESRGAN(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types)
|
||||||
ggml_type wtype)
|
: GGMLRunner(backend) {
|
||||||
: GGMLRunner(backend, wtype) {
|
rrdb_net.init(params_ctx, tensor_types, "");
|
||||||
rrdb_net.init(params_ctx, wtype);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
|
@ -1010,8 +1010,7 @@ int main(int argc, const char* argv[]) {
|
|||||||
int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth
|
int upscale_factor = 4; // unused for RealESRGAN_x4plus_anime_6B.pth
|
||||||
if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) {
|
if (params.esrgan_path.size() > 0 && params.upscale_repeats > 0) {
|
||||||
upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(),
|
upscaler_ctx_t* upscaler_ctx = new_upscaler_ctx(params.esrgan_path.c_str(),
|
||||||
params.n_threads,
|
params.n_threads);
|
||||||
params.wtype);
|
|
||||||
|
|
||||||
if (upscaler_ctx == NULL) {
|
if (upscaler_ctx == NULL) {
|
||||||
printf("new_upscaler_ctx failed\n");
|
printf("new_upscaler_ctx failed\n");
|
||||||
|
57
flux.hpp
57
flux.hpp
@ -35,8 +35,9 @@ namespace Flux {
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
float eps;
|
float eps;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["scale"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size);
|
ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "scale") != tensor_types.end()) ? tensor_types[prefix + "scale"] : GGML_TYPE_F32;
|
||||||
|
params["scale"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -823,25 +824,55 @@ namespace Flux {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct FluxRunner : public GGMLRunner {
|
struct FluxRunner : public GGMLRunner {
|
||||||
|
static std::map<std::string, enum ggml_type> empty_tensor_types;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
FluxParams flux_params;
|
FluxParams flux_params;
|
||||||
Flux flux;
|
Flux flux;
|
||||||
std::vector<float> pe_vec; // for cache
|
std::vector<float> pe_vec; // for cache
|
||||||
|
|
||||||
FluxRunner(ggml_backend_t backend,
|
FluxRunner(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
|
||||||
SDVersion version = VERSION_FLUX_DEV,
|
const std::string prefix = "",
|
||||||
bool flash_attn = false)
|
bool flash_attn = false)
|
||||||
: GGMLRunner(backend, wtype) {
|
: GGMLRunner(backend) {
|
||||||
flux_params.flash_attn = flash_attn;
|
flux_params.flash_attn = flash_attn;
|
||||||
if (version == VERSION_FLUX_SCHNELL) {
|
flux_params.guidance_embed = false;
|
||||||
flux_params.guidance_embed = false;
|
flux_params.depth = 0;
|
||||||
|
flux_params.depth_single_blocks = 0;
|
||||||
|
for (auto pair : tensor_types) {
|
||||||
|
std::string tensor_name = pair.first;
|
||||||
|
if (tensor_name.find("model.diffusion_model.") == std::string::npos)
|
||||||
|
continue;
|
||||||
|
if (tensor_name.find("guidance_in.in_layer.weight") != std::string::npos) {
|
||||||
|
// not schnell
|
||||||
|
flux_params.guidance_embed = true;
|
||||||
|
}
|
||||||
|
size_t db = tensor_name.find("double_blocks.");
|
||||||
|
if (db != std::string::npos) {
|
||||||
|
tensor_name = tensor_name.substr(db); // remove prefix
|
||||||
|
int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str());
|
||||||
|
if (block_depth + 1 > flux_params.depth) {
|
||||||
|
flux_params.depth = block_depth + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
size_t sb = tensor_name.find("single_blocks.");
|
||||||
|
if (sb != std::string::npos) {
|
||||||
|
tensor_name = tensor_name.substr(sb); // remove prefix
|
||||||
|
int block_depth = atoi(tensor_name.substr(14, tensor_name.find(".", 14)).c_str());
|
||||||
|
if (block_depth + 1 > flux_params.depth_single_blocks) {
|
||||||
|
flux_params.depth_single_blocks = block_depth + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (version == VERSION_FLUX_LITE) {
|
|
||||||
flux_params.depth = 8;
|
LOG_INFO("Flux blocks: %d double, %d single", flux_params.depth, flux_params.depth_single_blocks);
|
||||||
|
if (!flux_params.guidance_embed) {
|
||||||
|
LOG_INFO("Flux guidance is disabled (Schnell mode)");
|
||||||
}
|
}
|
||||||
|
|
||||||
flux = Flux(flux_params);
|
flux = Flux(flux_params);
|
||||||
flux.init(params_ctx, wtype);
|
flux.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
@ -959,7 +990,7 @@ namespace Flux {
|
|||||||
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
||||||
ggml_backend_t backend = ggml_backend_cpu_init();
|
ggml_backend_t backend = ggml_backend_cpu_init();
|
||||||
ggml_type model_data_type = GGML_TYPE_Q8_0;
|
ggml_type model_data_type = GGML_TYPE_Q8_0;
|
||||||
std::shared_ptr<FluxRunner> flux = std::shared_ptr<FluxRunner>(new FluxRunner(backend, model_data_type));
|
std::shared_ptr<FluxRunner> flux = std::shared_ptr<FluxRunner>(new FluxRunner(backend));
|
||||||
{
|
{
|
||||||
LOG_INFO("loading from '%s'", file_path.c_str());
|
LOG_INFO("loading from '%s'", file_path.c_str());
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
#include "ggml-cpu.h"
|
#include "ggml-cpu.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#include "model.h"
|
||||||
|
|
||||||
#ifdef SD_USE_CUBLAS
|
#ifdef SD_USE_CUBLAS
|
||||||
#include "ggml-cuda.h"
|
#include "ggml-cuda.h"
|
||||||
#endif
|
#endif
|
||||||
@ -673,13 +675,13 @@ __STATIC_INLINE__ struct ggml_tensor* ggml_nn_attention(struct ggml_context* ctx
|
|||||||
#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) && !defined(SD_USE_VULKAN) && !defined(SD_USE_SYCL)
|
#if defined(SD_USE_FLASH_ATTENTION) && !defined(SD_USE_CUBLAS) && !defined(SD_USE_METAL) && !defined(SD_USE_VULKAN) && !defined(SD_USE_SYCL)
|
||||||
struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head]
|
struct ggml_tensor* kqv = ggml_flash_attn(ctx, q, k, v, false); // [N * n_head, n_token, d_head]
|
||||||
#else
|
#else
|
||||||
float d_head = (float)q->ne[0];
|
float d_head = (float)q->ne[0];
|
||||||
struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k]
|
struct ggml_tensor* kq = ggml_mul_mat(ctx, k, q); // [N * n_head, n_token, n_k]
|
||||||
kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head));
|
kq = ggml_scale_inplace(ctx, kq, 1.0f / sqrt(d_head));
|
||||||
if (mask) {
|
if (mask) {
|
||||||
kq = ggml_diag_mask_inf_inplace(ctx, kq, 0);
|
kq = ggml_diag_mask_inf_inplace(ctx, kq, 0);
|
||||||
}
|
}
|
||||||
kq = ggml_soft_max_inplace(ctx, kq);
|
kq = ggml_soft_max_inplace(ctx, kq);
|
||||||
struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head]
|
struct ggml_tensor* kqv = ggml_mul_mat(ctx, v, kq); // [N * n_head, n_token, d_head]
|
||||||
#endif
|
#endif
|
||||||
return kqv;
|
return kqv;
|
||||||
@ -964,7 +966,6 @@ protected:
|
|||||||
|
|
||||||
std::map<struct ggml_tensor*, const void*> backend_tensor_data_map;
|
std::map<struct ggml_tensor*, const void*> backend_tensor_data_map;
|
||||||
|
|
||||||
ggml_type wtype = GGML_TYPE_F32;
|
|
||||||
ggml_backend_t backend = NULL;
|
ggml_backend_t backend = NULL;
|
||||||
|
|
||||||
void alloc_params_ctx() {
|
void alloc_params_ctx() {
|
||||||
@ -1040,8 +1041,8 @@ protected:
|
|||||||
public:
|
public:
|
||||||
virtual std::string get_desc() = 0;
|
virtual std::string get_desc() = 0;
|
||||||
|
|
||||||
GGMLRunner(ggml_backend_t backend, ggml_type wtype = GGML_TYPE_F32)
|
GGMLRunner(ggml_backend_t backend)
|
||||||
: backend(backend), wtype(wtype) {
|
: backend(backend) {
|
||||||
alloc_params_ctx();
|
alloc_params_ctx();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1170,20 +1171,22 @@ protected:
|
|||||||
GGMLBlockMap blocks;
|
GGMLBlockMap blocks;
|
||||||
ParameterMap params;
|
ParameterMap params;
|
||||||
|
|
||||||
void init_blocks(struct ggml_context* ctx, ggml_type wtype) {
|
void init_blocks(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
for (auto& pair : blocks) {
|
for (auto& pair : blocks) {
|
||||||
auto& block = pair.second;
|
auto& block = pair.second;
|
||||||
|
block->init(ctx, tensor_types, prefix + pair.first);
|
||||||
block->init(ctx, wtype);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void init_params(struct ggml_context* ctx, ggml_type wtype) {}
|
virtual void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void init(struct ggml_context* ctx, ggml_type wtype) {
|
void init(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
|
||||||
init_blocks(ctx, wtype);
|
if (prefix.size() > 0) {
|
||||||
init_params(ctx, wtype);
|
prefix = prefix + ".";
|
||||||
|
}
|
||||||
|
init_blocks(ctx, tensor_types, prefix);
|
||||||
|
init_params(ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t get_params_num() {
|
size_t get_params_num() {
|
||||||
@ -1239,13 +1242,15 @@ protected:
|
|||||||
bool bias;
|
bool bias;
|
||||||
bool force_f32;
|
bool force_f32;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
|
enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
if (in_features % ggml_blck_size(wtype) != 0 || force_f32) {
|
if (in_features % ggml_blck_size(wtype) != 0 || force_f32) {
|
||||||
wtype = GGML_TYPE_F32;
|
wtype = GGML_TYPE_F32;
|
||||||
}
|
}
|
||||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
|
params["weight"] = ggml_new_tensor_2d(ctx, wtype, in_features, out_features);
|
||||||
if (bias) {
|
if (bias) {
|
||||||
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_features);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
|
||||||
|
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_features);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1273,9 +1278,9 @@ class Embedding : public UnaryBlock {
|
|||||||
protected:
|
protected:
|
||||||
int64_t embedding_dim;
|
int64_t embedding_dim;
|
||||||
int64_t num_embeddings;
|
int64_t num_embeddings;
|
||||||
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
enum ggml_type wtype = (tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings);
|
params["weight"] = ggml_new_tensor_2d(ctx, wtype, embedding_dim, num_embeddings);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -1313,10 +1318,12 @@ protected:
|
|||||||
std::pair<int, int> dilation;
|
std::pair<int, int> dilation;
|
||||||
bool bias;
|
bool bias;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kernel_size.second, kernel_size.first, in_channels, out_channels);
|
enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16;
|
||||||
|
params["weight"] = ggml_new_tensor_4d(ctx, wtype, kernel_size.second, kernel_size.first, in_channels, out_channels);
|
||||||
if (bias) {
|
if (bias) {
|
||||||
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels);
|
enum ggml_type wtype = GGML_TYPE_F32; // (tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
|
||||||
|
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1356,10 +1363,12 @@ protected:
|
|||||||
int64_t dilation;
|
int64_t dilation;
|
||||||
bool bias;
|
bool bias;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["weight"] = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, 1, kernel_size, in_channels, out_channels); // 5d => 4d
|
enum ggml_type wtype = GGML_TYPE_F16; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F16;
|
||||||
|
params["weight"] = ggml_new_tensor_4d(ctx, wtype, 1, kernel_size, in_channels, out_channels); // 5d => 4d
|
||||||
if (bias) {
|
if (bias) {
|
||||||
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, out_channels);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
|
||||||
|
params["bias"] = ggml_new_tensor_1d(ctx, wtype, out_channels);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1398,11 +1407,13 @@ protected:
|
|||||||
bool elementwise_affine;
|
bool elementwise_affine;
|
||||||
bool bias;
|
bool bias;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
if (elementwise_affine) {
|
if (elementwise_affine) {
|
||||||
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
|
params["weight"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape);
|
||||||
if (bias) {
|
if (bias) {
|
||||||
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, normalized_shape);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.ypes.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
|
||||||
|
params["bias"] = ggml_new_tensor_1d(ctx, wtype, normalized_shape);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1438,10 +1449,12 @@ protected:
|
|||||||
float eps;
|
float eps;
|
||||||
bool affine;
|
bool affine;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
if (affine) {
|
if (affine) {
|
||||||
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
params["bias"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, num_channels);
|
enum ggml_type bias_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "bias") != tensor_types.end()) ? tensor_types[prefix + "bias"] : GGML_TYPE_F32;
|
||||||
|
params["weight"] = ggml_new_tensor_1d(ctx, wtype, num_channels);
|
||||||
|
params["bias"] = ggml_new_tensor_1d(ctx, bias_wtype, num_channels);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
lora.hpp
5
lora.hpp
@ -16,10 +16,9 @@ struct LoraModel : public GGMLRunner {
|
|||||||
ggml_tensor* zero_index = NULL;
|
ggml_tensor* zero_index = NULL;
|
||||||
|
|
||||||
LoraModel(ggml_backend_t backend,
|
LoraModel(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
|
||||||
const std::string& file_path = "",
|
const std::string& file_path = "",
|
||||||
const std::string& prefix = "")
|
const std::string prefix = "")
|
||||||
: file_path(file_path), GGMLRunner(backend, wtype) {
|
: file_path(file_path), GGMLRunner(backend) {
|
||||||
if (!model_loader.init_from_file(file_path, prefix)) {
|
if (!model_loader.init_from_file(file_path, prefix)) {
|
||||||
load_failed = true;
|
load_failed = true;
|
||||||
}
|
}
|
||||||
|
101
mmdit.hpp
101
mmdit.hpp
@ -147,8 +147,9 @@ protected:
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
float eps;
|
float eps;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
|
||||||
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
|
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -636,7 +637,6 @@ public:
|
|||||||
struct MMDiT : public GGMLBlock {
|
struct MMDiT : public GGMLBlock {
|
||||||
// Diffusion model with a Transformer backbone.
|
// Diffusion model with a Transformer backbone.
|
||||||
protected:
|
protected:
|
||||||
SDVersion version = VERSION_SD3_2B;
|
|
||||||
int64_t input_size = -1;
|
int64_t input_size = -1;
|
||||||
int64_t patch_size = 2;
|
int64_t patch_size = 2;
|
||||||
int64_t in_channels = 16;
|
int64_t in_channels = 16;
|
||||||
@ -652,13 +652,13 @@ protected:
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
std::string qk_norm;
|
std::string qk_norm;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, std::string prefix = "") {
|
||||||
params["pos_embed"] = ggml_new_tensor_3d(ctx, GGML_TYPE_F32, hidden_size, num_patchs, 1);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "pos_embed") != tensor_types.end()) ? tensor_types[prefix + "pos_embed"] : GGML_TYPE_F32;
|
||||||
|
params["pos_embed"] = ggml_new_tensor_3d(ctx, wtype, hidden_size, num_patchs, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MMDiT(SDVersion version = VERSION_SD3_2B)
|
MMDiT(std::map<std::string, enum ggml_type>& tensor_types) {
|
||||||
: version(version) {
|
|
||||||
// input_size is always None
|
// input_size is always None
|
||||||
// learn_sigma is always False
|
// learn_sigma is always False
|
||||||
// register_length is alwalys 0
|
// register_length is alwalys 0
|
||||||
@ -670,48 +670,44 @@ public:
|
|||||||
// pos_embed_scaling_factor is not used
|
// pos_embed_scaling_factor is not used
|
||||||
// pos_embed_offset is not used
|
// pos_embed_offset is not used
|
||||||
// context_embedder_config is always {'target': 'torch.nn.Linear', 'params': {'in_features': 4096, 'out_features': 1536}}
|
// context_embedder_config is always {'target': 'torch.nn.Linear', 'params': {'in_features': 4096, 'out_features': 1536}}
|
||||||
if (version == VERSION_SD3_2B) {
|
|
||||||
input_size = -1;
|
// read tensors from tensor_types
|
||||||
patch_size = 2;
|
for (auto pair : tensor_types) {
|
||||||
in_channels = 16;
|
std::string tensor_name = pair.first;
|
||||||
depth = 24;
|
if (tensor_name.find("model.diffusion_model.") == std::string::npos)
|
||||||
mlp_ratio = 4.0f;
|
continue;
|
||||||
adm_in_channels = 2048;
|
size_t jb = tensor_name.find("joint_blocks.");
|
||||||
out_channels = 16;
|
if (jb != std::string::npos) {
|
||||||
pos_embed_max_size = 192;
|
tensor_name = tensor_name.substr(jb); // remove prefix
|
||||||
num_patchs = 36864; // 192 * 192
|
int block_depth = atoi(tensor_name.substr(13, tensor_name.find(".", 13)).c_str());
|
||||||
context_size = 4096;
|
if (block_depth + 1 > depth) {
|
||||||
context_embedder_out_dim = 1536;
|
depth = block_depth + 1;
|
||||||
} else if (version == VERSION_SD3_5_8B) {
|
}
|
||||||
input_size = -1;
|
if (tensor_name.find("attn.ln") != std::string::npos) {
|
||||||
patch_size = 2;
|
if (tensor_name.find(".bias") != std::string::npos) {
|
||||||
in_channels = 16;
|
qk_norm = "ln";
|
||||||
depth = 38;
|
} else {
|
||||||
mlp_ratio = 4.0f;
|
qk_norm = "rms";
|
||||||
adm_in_channels = 2048;
|
}
|
||||||
out_channels = 16;
|
}
|
||||||
pos_embed_max_size = 192;
|
if (tensor_name.find("attn2") != std::string::npos) {
|
||||||
num_patchs = 36864; // 192 * 192
|
if (block_depth > d_self) {
|
||||||
context_size = 4096;
|
d_self = block_depth;
|
||||||
context_embedder_out_dim = 2432;
|
}
|
||||||
qk_norm = "rms";
|
}
|
||||||
} else if (version == VERSION_SD3_5_2B) {
|
}
|
||||||
input_size = -1;
|
|
||||||
patch_size = 2;
|
|
||||||
in_channels = 16;
|
|
||||||
depth = 24;
|
|
||||||
d_self = 12;
|
|
||||||
mlp_ratio = 4.0f;
|
|
||||||
adm_in_channels = 2048;
|
|
||||||
out_channels = 16;
|
|
||||||
pos_embed_max_size = 384;
|
|
||||||
num_patchs = 147456;
|
|
||||||
context_size = 4096;
|
|
||||||
context_embedder_out_dim = 1536;
|
|
||||||
qk_norm = "rms";
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (d_self >= 0) {
|
||||||
|
pos_embed_max_size *= 2;
|
||||||
|
num_patchs *= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("MMDiT layers: %d (including %d MMDiT-x layers)", depth, d_self + 1);
|
||||||
|
|
||||||
int64_t default_out_channels = in_channels;
|
int64_t default_out_channels = in_channels;
|
||||||
hidden_size = 64 * depth;
|
hidden_size = 64 * depth;
|
||||||
|
context_embedder_out_dim = 64 * depth;
|
||||||
int64_t num_heads = depth;
|
int64_t num_heads = depth;
|
||||||
|
|
||||||
blocks["x_embedder"] = std::shared_ptr<GGMLBlock>(new PatchEmbed(input_size, patch_size, in_channels, hidden_size, true));
|
blocks["x_embedder"] = std::shared_ptr<GGMLBlock>(new PatchEmbed(input_size, patch_size, in_channels, hidden_size, true));
|
||||||
@ -870,15 +866,16 @@ public:
|
|||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct MMDiTRunner : public GGMLRunner {
|
struct MMDiTRunner : public GGMLRunner {
|
||||||
MMDiT mmdit;
|
MMDiT mmdit;
|
||||||
|
|
||||||
|
static std::map<std::string, enum ggml_type> empty_tensor_types;
|
||||||
|
|
||||||
MMDiTRunner(ggml_backend_t backend,
|
MMDiTRunner(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
|
||||||
SDVersion version = VERSION_SD3_2B)
|
const std::string prefix = "")
|
||||||
: GGMLRunner(backend, wtype), mmdit(version) {
|
: GGMLRunner(backend), mmdit(tensor_types) {
|
||||||
mmdit.init(params_ctx, wtype);
|
mmdit.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
@ -975,7 +972,7 @@ struct MMDiTRunner : public GGMLRunner {
|
|||||||
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
||||||
ggml_backend_t backend = ggml_backend_cpu_init();
|
ggml_backend_t backend = ggml_backend_cpu_init();
|
||||||
ggml_type model_data_type = GGML_TYPE_F16;
|
ggml_type model_data_type = GGML_TYPE_F16;
|
||||||
std::shared_ptr<MMDiTRunner> mmdit = std::shared_ptr<MMDiTRunner>(new MMDiTRunner(backend, model_data_type));
|
std::shared_ptr<MMDiTRunner> mmdit = std::shared_ptr<MMDiTRunner>(new MMDiTRunner(backend));
|
||||||
{
|
{
|
||||||
LOG_INFO("loading from '%s'", file_path.c_str());
|
LOG_INFO("loading from '%s'", file_path.c_str());
|
||||||
|
|
||||||
|
57
model.cpp
57
model.cpp
@ -927,6 +927,7 @@ bool ModelLoader::init_from_gguf_file(const std::string& file_path, const std::s
|
|||||||
GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes());
|
GGML_ASSERT(ggml_nbytes(dummy) == tensor_storage.nbytes());
|
||||||
|
|
||||||
tensor_storages.push_back(tensor_storage);
|
tensor_storages.push_back(tensor_storage);
|
||||||
|
tensor_storages_types[tensor_storage.name] = tensor_storage.type;
|
||||||
}
|
}
|
||||||
|
|
||||||
gguf_free(ctx_gguf_);
|
gguf_free(ctx_gguf_);
|
||||||
@ -1071,6 +1072,7 @@ bool ModelLoader::init_from_safetensors_file(const std::string& file_path, const
|
|||||||
}
|
}
|
||||||
|
|
||||||
tensor_storages.push_back(tensor_storage);
|
tensor_storages.push_back(tensor_storage);
|
||||||
|
tensor_storages_types[tensor_storage.name] = tensor_storage.type;
|
||||||
|
|
||||||
// LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str());
|
// LOG_DEBUG("%s %s", tensor_storage.to_string().c_str(), dtype.c_str());
|
||||||
}
|
}
|
||||||
@ -1296,7 +1298,7 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer,
|
|||||||
zip_t* zip,
|
zip_t* zip,
|
||||||
std::string dir,
|
std::string dir,
|
||||||
size_t file_index,
|
size_t file_index,
|
||||||
const std::string& prefix) {
|
const std::string prefix) {
|
||||||
uint8_t* buffer_end = buffer + buffer_size;
|
uint8_t* buffer_end = buffer + buffer_size;
|
||||||
if (buffer[0] == 0x80) { // proto
|
if (buffer[0] == 0x80) { // proto
|
||||||
if (buffer[1] != 2) {
|
if (buffer[1] != 2) {
|
||||||
@ -1401,6 +1403,8 @@ bool ModelLoader::parse_data_pkl(uint8_t* buffer,
|
|||||||
// printf(" ZIP got tensor %s \n ", reader.tensor_storage.name.c_str());
|
// printf(" ZIP got tensor %s \n ", reader.tensor_storage.name.c_str());
|
||||||
reader.tensor_storage.name = prefix + reader.tensor_storage.name;
|
reader.tensor_storage.name = prefix + reader.tensor_storage.name;
|
||||||
tensor_storages.push_back(reader.tensor_storage);
|
tensor_storages.push_back(reader.tensor_storage);
|
||||||
|
tensor_storages_types[reader.tensor_storage.name] = reader.tensor_storage.type;
|
||||||
|
|
||||||
// LOG_DEBUG("%s", reader.tensor_storage.name.c_str());
|
// LOG_DEBUG("%s", reader.tensor_storage.name.c_str());
|
||||||
// reset
|
// reset
|
||||||
reader = PickleTensorReader();
|
reader = PickleTensorReader();
|
||||||
@ -1455,28 +1459,12 @@ bool ModelLoader::init_from_ckpt_file(const std::string& file_path, const std::s
|
|||||||
|
|
||||||
SDVersion ModelLoader::get_sd_version() {
|
SDVersion ModelLoader::get_sd_version() {
|
||||||
TensorStorage token_embedding_weight;
|
TensorStorage token_embedding_weight;
|
||||||
bool is_flux = false;
|
|
||||||
bool is_schnell = true;
|
|
||||||
bool is_lite = true;
|
|
||||||
bool is_sd3 = false;
|
|
||||||
for (auto& tensor_storage : tensor_storages) {
|
for (auto& tensor_storage : tensor_storages) {
|
||||||
if (tensor_storage.name.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) {
|
|
||||||
is_schnell = false;
|
|
||||||
}
|
|
||||||
if (tensor_storage.name.find("model.diffusion_model.double_blocks.") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.double_blocks.") != std::string::npos) {
|
||||||
is_flux = true;
|
return VERSION_FLUX;
|
||||||
}
|
}
|
||||||
if (tensor_storage.name.find("model.diffusion_model.double_blocks.8") != std::string::npos) {
|
if (tensor_storage.name.find("model.diffusion_model.joint_blocks.") != std::string::npos) {
|
||||||
is_lite = false;
|
return VERSION_SD3;
|
||||||
}
|
|
||||||
if (tensor_storage.name.find("joint_blocks.0.x_block.attn2.ln_q.weight") != std::string::npos) {
|
|
||||||
return VERSION_SD3_5_2B;
|
|
||||||
}
|
|
||||||
if (tensor_storage.name.find("joint_blocks.37.x_block.attn.ln_q.weight") != std::string::npos) {
|
|
||||||
return VERSION_SD3_5_8B;
|
|
||||||
}
|
|
||||||
if (tensor_storage.name.find("model.diffusion_model.joint_blocks.23.") != std::string::npos) {
|
|
||||||
is_sd3 = true;
|
|
||||||
}
|
}
|
||||||
if (tensor_storage.name.find("conditioner.embedders.1") != std::string::npos) {
|
if (tensor_storage.name.find("conditioner.embedders.1") != std::string::npos) {
|
||||||
return VERSION_SDXL;
|
return VERSION_SDXL;
|
||||||
@ -1498,19 +1486,7 @@ SDVersion ModelLoader::get_sd_version() {
|
|||||||
// break;
|
// break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (is_flux) {
|
|
||||||
if (is_schnell) {
|
|
||||||
GGML_ASSERT(!is_lite);
|
|
||||||
return VERSION_FLUX_SCHNELL;
|
|
||||||
} else if (is_lite) {
|
|
||||||
return VERSION_FLUX_LITE;
|
|
||||||
} else {
|
|
||||||
return VERSION_FLUX_DEV;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (is_sd3) {
|
|
||||||
return VERSION_SD3_2B;
|
|
||||||
}
|
|
||||||
if (token_embedding_weight.ne[0] == 768) {
|
if (token_embedding_weight.ne[0] == 768) {
|
||||||
return VERSION_SD1;
|
return VERSION_SD1;
|
||||||
} else if (token_embedding_weight.ne[0] == 1024) {
|
} else if (token_embedding_weight.ne[0] == 1024) {
|
||||||
@ -1603,6 +1579,21 @@ ggml_type ModelLoader::get_vae_wtype() {
|
|||||||
return GGML_TYPE_COUNT;
|
return GGML_TYPE_COUNT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ModelLoader::set_wtype_override(ggml_type wtype, std::string prefix) {
|
||||||
|
for (auto& pair : tensor_storages_types) {
|
||||||
|
if (prefix.size() < 1 || pair.first.substr(0, prefix.size()) == prefix) {
|
||||||
|
for (auto& tensor_storage : tensor_storages) {
|
||||||
|
if (tensor_storage.name == pair.first) {
|
||||||
|
if (tensor_should_be_converted(tensor_storage, wtype)) {
|
||||||
|
pair.second = wtype;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
std::string ModelLoader::load_merges() {
|
std::string ModelLoader::load_merges() {
|
||||||
std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str));
|
std::string merges_utf8_str(reinterpret_cast<const char*>(merges_utf8_c_str), sizeof(merges_utf8_c_str));
|
||||||
return merges_utf8_str;
|
return merges_utf8_str;
|
||||||
|
17
model.h
17
model.h
@ -22,24 +22,20 @@ enum SDVersion {
|
|||||||
VERSION_SD2,
|
VERSION_SD2,
|
||||||
VERSION_SDXL,
|
VERSION_SDXL,
|
||||||
VERSION_SVD,
|
VERSION_SVD,
|
||||||
VERSION_SD3_2B,
|
VERSION_SD3,
|
||||||
VERSION_FLUX_DEV,
|
VERSION_FLUX,
|
||||||
VERSION_FLUX_SCHNELL,
|
|
||||||
VERSION_SD3_5_8B,
|
|
||||||
VERSION_SD3_5_2B,
|
|
||||||
VERSION_FLUX_LITE,
|
|
||||||
VERSION_COUNT,
|
VERSION_COUNT,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline bool sd_version_is_flux(SDVersion version) {
|
static inline bool sd_version_is_flux(SDVersion version) {
|
||||||
if (version == VERSION_FLUX_DEV || version == VERSION_FLUX_SCHNELL || version == VERSION_FLUX_LITE) {
|
if (version == VERSION_FLUX) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool sd_version_is_sd3(SDVersion version) {
|
static inline bool sd_version_is_sd3(SDVersion version) {
|
||||||
if (version == VERSION_SD3_2B || version == VERSION_SD3_5_8B || version == VERSION_SD3_5_2B) {
|
if (version == VERSION_SD3) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
@ -170,7 +166,7 @@ protected:
|
|||||||
zip_t* zip,
|
zip_t* zip,
|
||||||
std::string dir,
|
std::string dir,
|
||||||
size_t file_index,
|
size_t file_index,
|
||||||
const std::string& prefix);
|
const std::string prefix);
|
||||||
|
|
||||||
bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = "");
|
bool init_from_gguf_file(const std::string& file_path, const std::string& prefix = "");
|
||||||
bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = "");
|
bool init_from_safetensors_file(const std::string& file_path, const std::string& prefix = "");
|
||||||
@ -178,12 +174,15 @@ protected:
|
|||||||
bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = "");
|
bool init_from_diffusers_file(const std::string& file_path, const std::string& prefix = "");
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
std::map<std::string, enum ggml_type> tensor_storages_types;
|
||||||
|
|
||||||
bool init_from_file(const std::string& file_path, const std::string& prefix = "");
|
bool init_from_file(const std::string& file_path, const std::string& prefix = "");
|
||||||
SDVersion get_sd_version();
|
SDVersion get_sd_version();
|
||||||
ggml_type get_sd_wtype();
|
ggml_type get_sd_wtype();
|
||||||
ggml_type get_conditioner_wtype();
|
ggml_type get_conditioner_wtype();
|
||||||
ggml_type get_diffusion_model_wtype();
|
ggml_type get_diffusion_model_wtype();
|
||||||
ggml_type get_vae_wtype();
|
ggml_type get_vae_wtype();
|
||||||
|
void set_wtype_override(ggml_type wtype, std::string prefix = "");
|
||||||
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend);
|
bool load_tensors(on_new_tensor_cb_t on_new_tensor_cb, ggml_backend_t backend);
|
||||||
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
bool load_tensors(std::map<std::string, struct ggml_tensor*>& tensors,
|
||||||
ggml_backend_t backend,
|
ggml_backend_t backend,
|
||||||
|
11
pmid.hpp
11
pmid.hpp
@ -623,15 +623,15 @@ public:
|
|||||||
std::vector<float> zeros_right;
|
std::vector<float> zeros_right;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
PhotoMakerIDEncoder(ggml_backend_t backend, ggml_type wtype, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f)
|
PhotoMakerIDEncoder(ggml_backend_t backend, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix, SDVersion version = VERSION_SDXL, PMVersion pm_v = PM_VERSION_1, float sty = 20.f)
|
||||||
: GGMLRunner(backend, wtype),
|
: GGMLRunner(backend),
|
||||||
version(version),
|
version(version),
|
||||||
pm_version(pm_v),
|
pm_version(pm_v),
|
||||||
style_strength(sty) {
|
style_strength(sty) {
|
||||||
if (pm_version == PM_VERSION_1) {
|
if (pm_version == PM_VERSION_1) {
|
||||||
id_encoder.init(params_ctx, wtype);
|
id_encoder.init(params_ctx, tensor_types, prefix);
|
||||||
} else if (pm_version == PM_VERSION_2) {
|
} else if (pm_version == PM_VERSION_2) {
|
||||||
id_encoder2.init(params_ctx, wtype);
|
id_encoder2.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -780,11 +780,10 @@ struct PhotoMakerIDEmbed : public GGMLRunner {
|
|||||||
bool applied = false;
|
bool applied = false;
|
||||||
|
|
||||||
PhotoMakerIDEmbed(ggml_backend_t backend,
|
PhotoMakerIDEmbed(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
|
||||||
ModelLoader* ml,
|
ModelLoader* ml,
|
||||||
const std::string& file_path = "",
|
const std::string& file_path = "",
|
||||||
const std::string& prefix = "")
|
const std::string& prefix = "")
|
||||||
: file_path(file_path), GGMLRunner(backend, wtype), model_loader(ml) {
|
: file_path(file_path), GGMLRunner(backend), model_loader(ml) {
|
||||||
if (!model_loader->init_from_file(file_path, prefix)) {
|
if (!model_loader->init_from_file(file_path, prefix)) {
|
||||||
load_failed = true;
|
load_failed = true;
|
||||||
}
|
}
|
||||||
|
@ -29,12 +29,8 @@ const char* model_version_to_str[] = {
|
|||||||
"SD 2.x",
|
"SD 2.x",
|
||||||
"SDXL",
|
"SDXL",
|
||||||
"SVD",
|
"SVD",
|
||||||
"SD3 2B",
|
"SD3.x",
|
||||||
"Flux Dev",
|
"Flux"};
|
||||||
"Flux Schnell",
|
|
||||||
"SD3.5 8B",
|
|
||||||
"SD3.5 2B",
|
|
||||||
"Flux Lite 8B"};
|
|
||||||
|
|
||||||
const char* sampling_methods_str[] = {
|
const char* sampling_methods_str[] = {
|
||||||
"Euler A",
|
"Euler A",
|
||||||
@ -264,16 +260,18 @@ public:
|
|||||||
conditioner_wtype = wtype;
|
conditioner_wtype = wtype;
|
||||||
diffusion_model_wtype = wtype;
|
diffusion_model_wtype = wtype;
|
||||||
vae_wtype = wtype;
|
vae_wtype = wtype;
|
||||||
|
model_loader.set_wtype_override(wtype);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (version == VERSION_SDXL) {
|
if (version == VERSION_SDXL) {
|
||||||
vae_wtype = GGML_TYPE_F32;
|
vae_wtype = GGML_TYPE_F32;
|
||||||
|
model_loader.set_wtype_override(GGML_TYPE_F32, "vae.");
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("Weight type: %s", ggml_type_name(model_wtype));
|
LOG_INFO("Weight type: %s", model_wtype != SD_TYPE_COUNT ? ggml_type_name(model_wtype) : "??");
|
||||||
LOG_INFO("Conditioner weight type: %s", ggml_type_name(conditioner_wtype));
|
LOG_INFO("Conditioner weight type: %s", conditioner_wtype != SD_TYPE_COUNT ? ggml_type_name(conditioner_wtype) : "??");
|
||||||
LOG_INFO("Diffusion model weight type: %s", ggml_type_name(diffusion_model_wtype));
|
LOG_INFO("Diffusion model weight type: %s", diffusion_model_wtype != SD_TYPE_COUNT ? ggml_type_name(diffusion_model_wtype) : "??");
|
||||||
LOG_INFO("VAE weight type: %s", ggml_type_name(vae_wtype));
|
LOG_INFO("VAE weight type: %s", vae_wtype != SD_TYPE_COUNT ? ggml_type_name(vae_wtype) : "??");
|
||||||
|
|
||||||
LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor));
|
LOG_DEBUG("ggml tensor size = %d bytes", (int)sizeof(ggml_tensor));
|
||||||
|
|
||||||
@ -294,15 +292,15 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (version == VERSION_SVD) {
|
if (version == VERSION_SVD) {
|
||||||
clip_vision = std::make_shared<FrozenCLIPVisionEmbedder>(backend, conditioner_wtype);
|
clip_vision = std::make_shared<FrozenCLIPVisionEmbedder>(backend, model_loader.tensor_storages_types);
|
||||||
clip_vision->alloc_params_buffer();
|
clip_vision->alloc_params_buffer();
|
||||||
clip_vision->get_param_tensors(tensors);
|
clip_vision->get_param_tensors(tensors);
|
||||||
|
|
||||||
diffusion_model = std::make_shared<UNetModel>(backend, diffusion_model_wtype, version);
|
diffusion_model = std::make_shared<UNetModel>(backend, model_loader.tensor_storages_types, version);
|
||||||
diffusion_model->alloc_params_buffer();
|
diffusion_model->alloc_params_buffer();
|
||||||
diffusion_model->get_param_tensors(tensors);
|
diffusion_model->get_param_tensors(tensors);
|
||||||
|
|
||||||
first_stage_model = std::make_shared<AutoEncoderKL>(backend, vae_wtype, vae_decode_only, true, version);
|
first_stage_model = std::make_shared<AutoEncoderKL>(backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, true, version);
|
||||||
LOG_DEBUG("vae_decode_only %d", vae_decode_only);
|
LOG_DEBUG("vae_decode_only %d", vae_decode_only);
|
||||||
first_stage_model->alloc_params_buffer();
|
first_stage_model->alloc_params_buffer();
|
||||||
first_stage_model->get_param_tensors(tensors, "first_stage_model");
|
first_stage_model->get_param_tensors(tensors, "first_stage_model");
|
||||||
@ -327,19 +325,20 @@ public:
|
|||||||
if (diffusion_flash_attn) {
|
if (diffusion_flash_attn) {
|
||||||
LOG_WARN("flash attention in this diffusion model is currently unsupported!");
|
LOG_WARN("flash attention in this diffusion model is currently unsupported!");
|
||||||
}
|
}
|
||||||
cond_stage_model = std::make_shared<SD3CLIPEmbedder>(clip_backend, conditioner_wtype);
|
cond_stage_model = std::make_shared<SD3CLIPEmbedder>(clip_backend, model_loader.tensor_storages_types);
|
||||||
diffusion_model = std::make_shared<MMDiTModel>(backend, diffusion_model_wtype, version);
|
diffusion_model = std::make_shared<MMDiTModel>(backend, model_loader.tensor_storages_types);
|
||||||
} else if (sd_version_is_flux(version)) {
|
} else if (sd_version_is_flux(version)) {
|
||||||
cond_stage_model = std::make_shared<FluxCLIPEmbedder>(clip_backend, conditioner_wtype);
|
cond_stage_model = std::make_shared<FluxCLIPEmbedder>(clip_backend, model_loader.tensor_storages_types);
|
||||||
diffusion_model = std::make_shared<FluxModel>(backend, diffusion_model_wtype, version, diffusion_flash_attn);
|
diffusion_model = std::make_shared<FluxModel>(backend, model_loader.tensor_storages_types, diffusion_flash_attn);
|
||||||
} else {
|
} else {
|
||||||
if (id_embeddings_path.find("v2") != std::string::npos) {
|
if (id_embeddings_path.find("v2") != std::string::npos) {
|
||||||
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, conditioner_wtype, embeddings_path, version, PM_VERSION_2);
|
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, model_loader.tensor_storages_types, embeddings_path, version, PM_VERSION_2);
|
||||||
} else {
|
} else {
|
||||||
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, conditioner_wtype, embeddings_path, version);
|
cond_stage_model = std::make_shared<FrozenCLIPEmbedderWithCustomWords>(clip_backend, model_loader.tensor_storages_types, embeddings_path, version);
|
||||||
}
|
}
|
||||||
diffusion_model = std::make_shared<UNetModel>(backend, diffusion_model_wtype, version, diffusion_flash_attn);
|
diffusion_model = std::make_shared<UNetModel>(backend, model_loader.tensor_storages_types, version, diffusion_flash_attn);
|
||||||
}
|
}
|
||||||
|
|
||||||
cond_stage_model->alloc_params_buffer();
|
cond_stage_model->alloc_params_buffer();
|
||||||
cond_stage_model->get_param_tensors(tensors);
|
cond_stage_model->get_param_tensors(tensors);
|
||||||
|
|
||||||
@ -353,11 +352,11 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
vae_backend = backend;
|
vae_backend = backend;
|
||||||
}
|
}
|
||||||
first_stage_model = std::make_shared<AutoEncoderKL>(vae_backend, vae_wtype, vae_decode_only, false, version);
|
first_stage_model = std::make_shared<AutoEncoderKL>(vae_backend, model_loader.tensor_storages_types, "first_stage_model", vae_decode_only, false, version);
|
||||||
first_stage_model->alloc_params_buffer();
|
first_stage_model->alloc_params_buffer();
|
||||||
first_stage_model->get_param_tensors(tensors, "first_stage_model");
|
first_stage_model->get_param_tensors(tensors, "first_stage_model");
|
||||||
} else {
|
} else {
|
||||||
tae_first_stage = std::make_shared<TinyAutoEncoder>(backend, vae_wtype, vae_decode_only);
|
tae_first_stage = std::make_shared<TinyAutoEncoder>(backend, model_loader.tensor_storages_types, "decoder.layers", vae_decode_only);
|
||||||
}
|
}
|
||||||
// first_stage_model->get_param_tensors(tensors, "first_stage_model.");
|
// first_stage_model->get_param_tensors(tensors, "first_stage_model.");
|
||||||
|
|
||||||
@ -369,17 +368,17 @@ public:
|
|||||||
} else {
|
} else {
|
||||||
controlnet_backend = backend;
|
controlnet_backend = backend;
|
||||||
}
|
}
|
||||||
control_net = std::make_shared<ControlNet>(controlnet_backend, diffusion_model_wtype, version);
|
control_net = std::make_shared<ControlNet>(controlnet_backend, model_loader.tensor_storages_types, version);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (id_embeddings_path.find("v2") != std::string::npos) {
|
if (id_embeddings_path.find("v2") != std::string::npos) {
|
||||||
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_wtype, version, PM_VERSION_2);
|
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_loader.tensor_storages_types, "pmid", version, PM_VERSION_2);
|
||||||
LOG_INFO("using PhotoMaker Version 2");
|
LOG_INFO("using PhotoMaker Version 2");
|
||||||
} else {
|
} else {
|
||||||
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_wtype, version);
|
pmid_model = std::make_shared<PhotoMakerIDEncoder>(backend, model_loader.tensor_storages_types, "pmid", version);
|
||||||
}
|
}
|
||||||
if (id_embeddings_path.size() > 0) {
|
if (id_embeddings_path.size() > 0) {
|
||||||
pmid_lora = std::make_shared<LoraModel>(backend, model_wtype, id_embeddings_path, "");
|
pmid_lora = std::make_shared<LoraModel>(backend, id_embeddings_path, "");
|
||||||
if (!pmid_lora->load_from_file(true)) {
|
if (!pmid_lora->load_from_file(true)) {
|
||||||
LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str());
|
LOG_WARN("load photomaker lora tensors from %s failed", id_embeddings_path.c_str());
|
||||||
return false;
|
return false;
|
||||||
@ -532,9 +531,12 @@ public:
|
|||||||
denoiser = std::make_shared<DiscreteFlowDenoiser>();
|
denoiser = std::make_shared<DiscreteFlowDenoiser>();
|
||||||
} else if (sd_version_is_flux(version)) {
|
} else if (sd_version_is_flux(version)) {
|
||||||
LOG_INFO("running in Flux FLOW mode");
|
LOG_INFO("running in Flux FLOW mode");
|
||||||
float shift = 1.15f;
|
float shift = 1.0f; // TODO: validate
|
||||||
if (version == VERSION_FLUX_SCHNELL) {
|
for (auto pair : model_loader.tensor_storages_types) {
|
||||||
shift = 1.0f; // TODO: validate
|
if (pair.first.find("model.diffusion_model.guidance_in.in_layer.weight") != std::string::npos) {
|
||||||
|
shift = 1.15f;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
denoiser = std::make_shared<FluxFlowDenoiser>(shift);
|
denoiser = std::make_shared<FluxFlowDenoiser>(shift);
|
||||||
} else if (is_using_v_parameterization) {
|
} else if (is_using_v_parameterization) {
|
||||||
@ -633,7 +635,7 @@ public:
|
|||||||
LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str());
|
LOG_WARN("can not find %s or %s for lora %s", st_file_path.c_str(), ckpt_file_path.c_str(), lora_name.c_str());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LoraModel lora(backend, model_wtype, file_path);
|
LoraModel lora(backend, file_path);
|
||||||
if (!lora.load_from_file()) {
|
if (!lora.load_from_file()) {
|
||||||
LOG_WARN("load lora tensors from %s failed", file_path.c_str());
|
LOG_WARN("load lora tensors from %s failed", file_path.c_str());
|
||||||
return;
|
return;
|
||||||
|
@ -215,8 +215,7 @@ SD_API sd_image_t* img2vid(sd_ctx_t* sd_ctx,
|
|||||||
typedef struct upscaler_ctx_t upscaler_ctx_t;
|
typedef struct upscaler_ctx_t upscaler_ctx_t;
|
||||||
|
|
||||||
SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path,
|
SD_API upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path,
|
||||||
int n_threads,
|
int n_threads);
|
||||||
enum sd_type_t wtype);
|
|
||||||
SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx);
|
SD_API void free_upscaler_ctx(upscaler_ctx_t* upscaler_ctx);
|
||||||
|
|
||||||
SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor);
|
SD_API sd_image_t upscale(upscaler_ctx_t* upscaler_ctx, sd_image_t input_image, uint32_t upscale_factor);
|
||||||
|
31
t5.hpp
31
t5.hpp
@ -441,8 +441,9 @@ protected:
|
|||||||
int64_t hidden_size;
|
int64_t hidden_size;
|
||||||
float eps;
|
float eps;
|
||||||
|
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["weight"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, hidden_size);
|
enum ggml_type wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "weight") != tensor_types.end()) ? tensor_types[prefix + "weight"] : GGML_TYPE_F32;
|
||||||
|
params["weight"] = ggml_new_tensor_1d(ctx, wtype, hidden_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -717,14 +718,15 @@ struct T5Runner : public GGMLRunner {
|
|||||||
std::vector<int> relative_position_bucket_vec;
|
std::vector<int> relative_position_bucket_vec;
|
||||||
|
|
||||||
T5Runner(ggml_backend_t backend,
|
T5Runner(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
|
const std::string prefix,
|
||||||
int64_t num_layers = 24,
|
int64_t num_layers = 24,
|
||||||
int64_t model_dim = 4096,
|
int64_t model_dim = 4096,
|
||||||
int64_t ff_dim = 10240,
|
int64_t ff_dim = 10240,
|
||||||
int64_t num_heads = 64,
|
int64_t num_heads = 64,
|
||||||
int64_t vocab_size = 32128)
|
int64_t vocab_size = 32128)
|
||||||
: GGMLRunner(backend, wtype), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) {
|
: GGMLRunner(backend), model(num_layers, model_dim, ff_dim, num_heads, vocab_size) {
|
||||||
model.init(params_ctx, wtype);
|
model.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
@ -854,14 +856,17 @@ struct T5Embedder {
|
|||||||
T5UniGramTokenizer tokenizer;
|
T5UniGramTokenizer tokenizer;
|
||||||
T5Runner model;
|
T5Runner model;
|
||||||
|
|
||||||
|
static std::map<std::string, enum ggml_type> empty_tensor_types;
|
||||||
|
|
||||||
T5Embedder(ggml_backend_t backend,
|
T5Embedder(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types = empty_tensor_types,
|
||||||
int64_t num_layers = 24,
|
const std::string prefix = "",
|
||||||
int64_t model_dim = 4096,
|
int64_t num_layers = 24,
|
||||||
int64_t ff_dim = 10240,
|
int64_t model_dim = 4096,
|
||||||
int64_t num_heads = 64,
|
int64_t ff_dim = 10240,
|
||||||
int64_t vocab_size = 32128)
|
int64_t num_heads = 64,
|
||||||
: model(backend, wtype, num_layers, model_dim, ff_dim, num_heads, vocab_size) {
|
int64_t vocab_size = 32128)
|
||||||
|
: model(backend, tensor_types, prefix, num_layers, model_dim, ff_dim, num_heads, vocab_size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors, const std::string prefix) {
|
||||||
@ -951,7 +956,7 @@ struct T5Embedder {
|
|||||||
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
// ggml_backend_t backend = ggml_backend_cuda_init(0);
|
||||||
ggml_backend_t backend = ggml_backend_cpu_init();
|
ggml_backend_t backend = ggml_backend_cpu_init();
|
||||||
ggml_type model_data_type = GGML_TYPE_F32;
|
ggml_type model_data_type = GGML_TYPE_F32;
|
||||||
std::shared_ptr<T5Embedder> t5 = std::shared_ptr<T5Embedder>(new T5Embedder(backend, model_data_type));
|
std::shared_ptr<T5Embedder> t5 = std::shared_ptr<T5Embedder>(new T5Embedder(backend));
|
||||||
{
|
{
|
||||||
LOG_INFO("loading from '%s'", file_path.c_str());
|
LOG_INFO("loading from '%s'", file_path.c_str());
|
||||||
|
|
||||||
|
7
tae.hpp
7
tae.hpp
@ -188,12 +188,13 @@ struct TinyAutoEncoder : public GGMLRunner {
|
|||||||
bool decode_only = false;
|
bool decode_only = false;
|
||||||
|
|
||||||
TinyAutoEncoder(ggml_backend_t backend,
|
TinyAutoEncoder(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
|
const std::string prefix,
|
||||||
bool decoder_only = true)
|
bool decoder_only = true)
|
||||||
: decode_only(decoder_only),
|
: decode_only(decoder_only),
|
||||||
taesd(decode_only),
|
taesd(decode_only),
|
||||||
GGMLRunner(backend, wtype) {
|
GGMLRunner(backend) {
|
||||||
taesd.init(params_ctx, wtype);
|
taesd.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
|
7
unet.hpp
7
unet.hpp
@ -532,11 +532,12 @@ struct UNetModelRunner : public GGMLRunner {
|
|||||||
UnetModelBlock unet;
|
UnetModelBlock unet;
|
||||||
|
|
||||||
UNetModelRunner(ggml_backend_t backend,
|
UNetModelRunner(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
|
const std::string prefix,
|
||||||
SDVersion version = VERSION_SD1,
|
SDVersion version = VERSION_SD1,
|
||||||
bool flash_attn = false)
|
bool flash_attn = false)
|
||||||
: GGMLRunner(backend, wtype), unet(version, flash_attn) {
|
: GGMLRunner(backend), unet(version, flash_attn) {
|
||||||
unet.init(params_ctx, wtype);
|
unet.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
|
11
upscaler.cpp
11
upscaler.cpp
@ -32,13 +32,17 @@ struct UpscalerGGML {
|
|||||||
LOG_DEBUG("Using SYCL backend");
|
LOG_DEBUG("Using SYCL backend");
|
||||||
backend = ggml_backend_sycl_init(0);
|
backend = ggml_backend_sycl_init(0);
|
||||||
#endif
|
#endif
|
||||||
|
ModelLoader model_loader;
|
||||||
|
if (!model_loader.init_from_file(esrgan_path)) {
|
||||||
|
LOG_ERROR("init model loader from file failed: '%s'", esrgan_path.c_str());
|
||||||
|
}
|
||||||
|
model_loader.set_wtype_override(model_data_type);
|
||||||
if (!backend) {
|
if (!backend) {
|
||||||
LOG_DEBUG("Using CPU backend");
|
LOG_DEBUG("Using CPU backend");
|
||||||
backend = ggml_backend_cpu_init();
|
backend = ggml_backend_cpu_init();
|
||||||
}
|
}
|
||||||
LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type));
|
LOG_INFO("Upscaler weight type: %s", ggml_type_name(model_data_type));
|
||||||
esrgan_upscaler = std::make_shared<ESRGAN>(backend, model_data_type);
|
esrgan_upscaler = std::make_shared<ESRGAN>(backend, model_loader.tensor_storages_types);
|
||||||
if (!esrgan_upscaler->load_from_file(esrgan_path)) {
|
if (!esrgan_upscaler->load_from_file(esrgan_path)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -96,8 +100,7 @@ struct upscaler_ctx_t {
|
|||||||
};
|
};
|
||||||
|
|
||||||
upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
|
upscaler_ctx_t* new_upscaler_ctx(const char* esrgan_path_c_str,
|
||||||
int n_threads,
|
int n_threads) {
|
||||||
enum sd_type_t wtype) {
|
|
||||||
upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t));
|
upscaler_ctx_t* upscaler_ctx = (upscaler_ctx_t*)malloc(sizeof(upscaler_ctx_t));
|
||||||
if (upscaler_ctx == NULL) {
|
if (upscaler_ctx == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
12
vae.hpp
12
vae.hpp
@ -163,8 +163,9 @@ public:
|
|||||||
|
|
||||||
class VideoResnetBlock : public ResnetBlock {
|
class VideoResnetBlock : public ResnetBlock {
|
||||||
protected:
|
protected:
|
||||||
void init_params(struct ggml_context* ctx, ggml_type wtype) {
|
void init_params(struct ggml_context* ctx, std::map<std::string, enum ggml_type>& tensor_types, const std::string prefix = "") {
|
||||||
params["mix_factor"] = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
|
enum ggml_type wtype = (tensor_types.find(prefix + "mix_factor") != tensor_types.end()) ? tensor_types[prefix + "mix_factor"] : GGML_TYPE_F32;
|
||||||
|
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
float get_alpha() {
|
float get_alpha() {
|
||||||
@ -524,12 +525,13 @@ struct AutoEncoderKL : public GGMLRunner {
|
|||||||
AutoencodingEngine ae;
|
AutoencodingEngine ae;
|
||||||
|
|
||||||
AutoEncoderKL(ggml_backend_t backend,
|
AutoEncoderKL(ggml_backend_t backend,
|
||||||
ggml_type wtype,
|
std::map<std::string, enum ggml_type>& tensor_types,
|
||||||
|
const std::string prefix,
|
||||||
bool decode_only = false,
|
bool decode_only = false,
|
||||||
bool use_video_decoder = false,
|
bool use_video_decoder = false,
|
||||||
SDVersion version = VERSION_SD1)
|
SDVersion version = VERSION_SD1)
|
||||||
: decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend, wtype) {
|
: decode_only(decode_only), ae(decode_only, use_video_decoder, version), GGMLRunner(backend) {
|
||||||
ae.init(params_ctx, wtype);
|
ae.init(params_ctx, tensor_types, prefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string get_desc() {
|
std::string get_desc() {
|
||||||
|
Loading…
Reference in New Issue
Block a user