style: format code

This commit is contained in:
leejet 2024-08-25 00:19:37 +08:00
parent d08d7fa632
commit c837c5d9cc
10 changed files with 937 additions and 947 deletions

View File

@ -10,8 +10,8 @@ struct SDCondition {
struct ggml_tensor* c_concat = NULL; struct ggml_tensor* c_concat = NULL;
SDCondition() = default; SDCondition() = default;
SDCondition(struct ggml_tensor* c_crossattn, struct ggml_tensor* c_vector, struct ggml_tensor* c_concat) : SDCondition(struct ggml_tensor* c_crossattn, struct ggml_tensor* c_vector, struct ggml_tensor* c_concat)
c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat) {} : c_crossattn(c_crossattn), c_vector(c_vector), c_concat(c_concat) {}
}; };
struct Conditioner { struct Conditioner {
@ -978,7 +978,6 @@ struct SD3CLIPEmbedder : public Conditioner {
} }
}; };
struct FluxCLIPEmbedder : public Conditioner { struct FluxCLIPEmbedder : public Conditioner {
ggml_type wtype; ggml_type wtype;
CLIPTokenizer clip_l_tokenizer; CLIPTokenizer clip_l_tokenizer;

View File

@ -351,7 +351,6 @@ struct DiscreteFlowDenoiser : public Denoiser {
} }
}; };
float flux_time_shift(float mu, float sigma, float t) { float flux_time_shift(float mu, float sigma, float t) {
return std::exp(mu) / (std::exp(mu) + std::pow((1.0 / t - 1.0), sigma)); return std::exp(mu) / (std::exp(mu) + std::pow((1.0 / t - 1.0), sigma));
} }

View File

@ -1,9 +1,9 @@
#ifndef __DIFFUSION_MODEL_H__ #ifndef __DIFFUSION_MODEL_H__
#define __DIFFUSION_MODEL_H__ #define __DIFFUSION_MODEL_H__
#include "flux.hpp"
#include "mmdit.hpp" #include "mmdit.hpp"
#include "unet.hpp" #include "unet.hpp"
#include "flux.hpp"
struct DiffusionModel { struct DiffusionModel {
virtual void compute(int n_threads, virtual void compute(int n_threads,
@ -124,7 +124,6 @@ struct MMDiTModel : public DiffusionModel {
} }
}; };
struct FluxModel : public DiffusionModel { struct FluxModel : public DiffusionModel {
Flux::FluxRunner flux; Flux::FluxRunner flux;

View File

@ -53,7 +53,6 @@ public:
} }
}; };
struct QKNorm : public GGMLBlock { struct QKNorm : public GGMLBlock {
public: public:
QKNorm(int64_t dim) { QKNorm(int64_t dim) {
@ -146,7 +145,6 @@ public:
auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]); auto qkv_proj = std::dynamic_pointer_cast<Linear>(blocks["qkv"]);
auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]); auto norm = std::dynamic_pointer_cast<QKNorm>(blocks["norm"]);
auto qkv = qkv_proj->forward(ctx, x); auto qkv = qkv_proj->forward(ctx, x);
auto qkv_vec = split_qkv(ctx, qkv); auto qkv_vec = split_qkv(ctx, qkv);
int64_t head_dim = qkv_vec[0]->ne[0] / num_heads; int64_t head_dim = qkv_vec[0]->ne[0] / num_heads;
@ -176,7 +174,6 @@ public:
} }
}; };
struct ModulationOut { struct ModulationOut {
ggml_tensor* shift = NULL; ggml_tensor* shift = NULL;
ggml_tensor* scale = NULL; ggml_tensor* scale = NULL;
@ -190,8 +187,10 @@ struct Modulation : public GGMLBlock {
public: public:
bool is_double; bool is_double;
int multiplier; int multiplier;
public: public:
Modulation(int64_t dim, bool is_double): is_double(is_double) { Modulation(int64_t dim, bool is_double)
: is_double(is_double) {
multiplier = is_double ? 6 : 3; multiplier = is_double ? 6 : 3;
blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier)); blocks["lin"] = std::shared_ptr<GGMLBlock>(new Linear(dim, dim * multiplier));
} }
@ -289,7 +288,6 @@ public:
auto txt_mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["txt_mlp.0"]); auto txt_mlp_0 = std::dynamic_pointer_cast<Linear>(blocks["txt_mlp.0"]);
auto txt_mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["txt_mlp.2"]); auto txt_mlp_2 = std::dynamic_pointer_cast<Linear>(blocks["txt_mlp.2"]);
auto img_mods = img_mod->forward(ctx, vec); auto img_mods = img_mod->forward(ctx, vec);
ModulationOut img_mod1 = img_mods[0]; ModulationOut img_mod1 = img_mods[0];
ModulationOut img_mod2 = img_mods[1]; ModulationOut img_mod2 = img_mods[1];
@ -361,18 +359,18 @@ public:
} }
}; };
struct SingleStreamBlock : public GGMLBlock { struct SingleStreamBlock : public GGMLBlock {
public: public:
int64_t num_heads; int64_t num_heads;
int64_t hidden_size; int64_t hidden_size;
int64_t mlp_hidden_dim; int64_t mlp_hidden_dim;
public: public:
SingleStreamBlock(int64_t hidden_size, SingleStreamBlock(int64_t hidden_size,
int64_t num_heads, int64_t num_heads,
float mlp_ratio = 4.0f, float mlp_ratio = 4.0f,
float qk_scale = 0.f) : float qk_scale = 0.f)
hidden_size(hidden_size), num_heads(num_heads) { : hidden_size(hidden_size), num_heads(num_heads) {
int64_t head_dim = hidden_size / num_heads; int64_t head_dim = hidden_size / num_heads;
float scale = qk_scale; float scale = qk_scale;
if (scale <= 0.f) { if (scale <= 0.f) {
@ -445,7 +443,6 @@ public:
} }
}; };
struct LastLayer : public GGMLBlock { struct LastLayer : public GGMLBlock {
public: public:
LastLayer(int64_t hidden_size, LastLayer(int64_t hidden_size,
@ -497,7 +494,6 @@ struct FluxParams {
bool guidance_embed = true; bool guidance_embed = true;
}; };
struct Flux : public GGMLBlock { struct Flux : public GGMLBlock {
public: public:
std::vector<float> linspace(float start, float end, int num) { std::vector<float> linspace(float start, float end, int num) {
@ -609,9 +605,9 @@ public:
// std::cout << trans_ids[0][i] << " " << trans_ids[1][i] << " " << trans_ids[2][i] << std::endl; // std::cout << trans_ids[0][i] << " " << trans_ids[1][i] << " " << trans_ids[2][i] << std::endl;
} }
int emb_dim = 0; int emb_dim = 0;
for (int d : axes_dim) emb_dim += d / 2; for (int d : axes_dim)
emb_dim += d / 2;
std::vector<std::vector<float>> emb(bs * pos_len, std::vector<float>(emb_dim * 2 * 2, 0.0)); std::vector<std::vector<float>> emb(bs * pos_len, std::vector<float>(emb_dim * 2 * 2, 0.0));
int offset = 0; int offset = 0;
@ -629,10 +625,12 @@ public:
return flatten(emb); return flatten(emb);
} }
public: public:
FluxParams params; FluxParams params;
Flux() {} Flux() {}
Flux(FluxParams params) : params(params) { Flux(FluxParams params)
: params(params) {
int64_t out_channels = params.in_channels; int64_t out_channels = params.in_channels;
int64_t pe_dim = params.hidden_size / params.num_heads; int64_t pe_dim = params.hidden_size / params.num_heads;
@ -802,7 +800,6 @@ public:
} }
}; };
struct FluxRunner : public GGMLRunner { struct FluxRunner : public GGMLRunner {
public: public:
FluxParams flux_params; FluxParams flux_params;
@ -853,7 +850,6 @@ public:
// pe->data = NULL; // pe->data = NULL;
set_backend_tensor_data(pe, pe_vec.data()); set_backend_tensor_data(pe, pe_vec.data());
struct ggml_tensor* out = flux.forward(compute_ctx, struct ggml_tensor* out = flux.forward(compute_ctx,
x, x,
timesteps, timesteps,

View File

@ -1427,7 +1427,6 @@ ggml_type ModelLoader::get_conditioner_wtype() {
return GGML_TYPE_COUNT; return GGML_TYPE_COUNT;
} }
ggml_type ModelLoader::get_diffusion_model_wtype() { ggml_type ModelLoader::get_diffusion_model_wtype() {
for (auto& tensor_storage : tensor_storages) { for (auto& tensor_storage : tensor_storages) {
if (is_unused_tensor(tensor_storage.name)) { if (is_unused_tensor(tensor_storage.name)) {

View File

@ -165,4 +165,3 @@ public:
}; };
#endif // __MODEL_H__ #endif // __MODEL_H__

View File

@ -74,7 +74,6 @@ public:
ggml_type diffusion_model_wtype = GGML_TYPE_COUNT; ggml_type diffusion_model_wtype = GGML_TYPE_COUNT;
ggml_type vae_wtype = GGML_TYPE_COUNT; ggml_type vae_wtype = GGML_TYPE_COUNT;
SDVersion version; SDVersion version;
bool vae_decode_only = false; bool vae_decode_only = false;
bool free_params_immediately = false; bool free_params_immediately = false;