From fbd42b6fc16d14fbd362993fa1d083740a05f113 Mon Sep 17 00:00:00 2001 From: stduhpf Date: Sat, 1 Mar 2025 04:45:39 +0100 Subject: [PATCH] fix: fix embeddings with quantized models (#601) --- clip.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clip.hpp b/clip.hpp index cfc4cb3..2307ee3 100644 --- a/clip.hpp +++ b/clip.hpp @@ -546,7 +546,7 @@ protected: int64_t num_positions; void init_params(struct ggml_context* ctx, std::map& tensor_types, const std::string prefix = "") { - enum ggml_type token_wtype = (tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32; + enum ggml_type token_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "token_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "token_embedding.weight"] : GGML_TYPE_F32; enum ggml_type position_wtype = GGML_TYPE_F32; //(tensor_types.find(prefix + "position_embedding.weight") != tensor_types.end()) ? tensor_types[prefix + "position_embedding.weight"] : GGML_TYPE_F32; params["token_embedding.weight"] = ggml_new_tensor_2d(ctx, token_wtype, embed_dim, vocab_size);