Skip to content

Commit d05e46c

Browse files
authored
chore: add .clang-tidy configuration and apply modernize checks (#902)
1 parent 64a7698 commit d05e46c

32 files changed

+766
-747
lines changed

.clang-tidy

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
Checks: >
2+
modernize-make-shared,
3+
modernize-use-nullptr,
4+
modernize-use-override,
5+
modernize-pass-by-value,
6+
modernize-return-braced-init-list,
7+
modernize-deprecated-headers,
8+
HeaderFilterRegex: '^$'
9+
WarningsAsErrors: ''
10+
FormatStyle: none

clip.hpp

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,7 @@ class CLIPEmbeddings : public GGMLBlock {
550550
int64_t num_positions;
551551
bool force_clip_f32;
552552

553-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") {
553+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
554554
enum ggml_type token_wtype = GGML_TYPE_F32;
555555
if (!force_clip_f32) {
556556
token_wtype = get_type(prefix + "token_embedding.weight", tensor_types, GGML_TYPE_F32);
@@ -587,7 +587,7 @@ class CLIPEmbeddings : public GGMLBlock {
587587

588588
GGML_ASSERT(input_ids->ne[0] == position_embed_weight->ne[1]);
589589
input_ids = ggml_reshape_3d(ctx, input_ids, input_ids->ne[0], 1, input_ids->ne[1]);
590-
auto token_embedding = ggml_get_rows(ctx, custom_embed_weight != NULL ? custom_embed_weight : token_embed_weight, input_ids);
590+
auto token_embedding = ggml_get_rows(ctx, custom_embed_weight != nullptr ? custom_embed_weight : token_embed_weight, input_ids);
591591
token_embedding = ggml_reshape_3d(ctx, token_embedding, token_embedding->ne[0], token_embedding->ne[1], token_embedding->ne[3]);
592592

593593
// token_embedding + position_embedding
@@ -606,7 +606,7 @@ class CLIPVisionEmbeddings : public GGMLBlock {
606606
int64_t image_size;
607607
int64_t num_patches;
608608
int64_t num_positions;
609-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") {
609+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
610610
enum ggml_type patch_wtype = GGML_TYPE_F16;
611611
enum ggml_type class_wtype = GGML_TYPE_F32;
612612
enum ggml_type position_wtype = GGML_TYPE_F32;
@@ -641,10 +641,10 @@ class CLIPVisionEmbeddings : public GGMLBlock {
641641
// concat(patch_embedding, class_embedding) + position_embedding
642642
struct ggml_tensor* patch_embedding;
643643
int64_t N = pixel_values->ne[3];
644-
patch_embedding = ggml_nn_conv_2d(ctx, pixel_values, patch_embed_weight, NULL, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
645-
patch_embedding = ggml_reshape_3d(ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
646-
patch_embedding = ggml_cont(ctx, ggml_permute(ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
647-
patch_embedding = ggml_reshape_4d(ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
644+
patch_embedding = ggml_nn_conv_2d(ctx, pixel_values, patch_embed_weight, nullptr, patch_size, patch_size); // [N, embed_dim, image_size // pacht_size, image_size // pacht_size]
645+
patch_embedding = ggml_reshape_3d(ctx, patch_embedding, num_patches, embed_dim, N); // [N, embed_dim, num_patches]
646+
patch_embedding = ggml_cont(ctx, ggml_permute(ctx, patch_embedding, 1, 0, 2, 3)); // [N, num_patches, embed_dim]
647+
patch_embedding = ggml_reshape_4d(ctx, patch_embedding, 1, embed_dim, num_patches, N); // [N, num_patches, embed_dim, 1]
648648

649649
struct ggml_tensor* class_embedding = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, embed_dim, N);
650650
class_embedding = ggml_repeat(ctx, class_embed_weight, class_embedding); // [N, embed_dim]
@@ -669,7 +669,7 @@ enum CLIPVersion {
669669

670670
class CLIPTextModel : public GGMLBlock {
671671
protected:
672-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") {
672+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
673673
if (version == OPEN_CLIP_VIT_BIGG_14) {
674674
enum ggml_type wtype = GGML_TYPE_F32;
675675
params["text_projection"] = ggml_new_tensor_2d(ctx, wtype, projection_dim, hidden_size);
@@ -735,8 +735,8 @@ class CLIPTextModel : public GGMLBlock {
735735
if (return_pooled) {
736736
auto text_projection = params["text_projection"];
737737
ggml_tensor* pooled = ggml_view_1d(ctx, x, hidden_size, x->nb[1] * max_token_idx);
738-
if (text_projection != NULL) {
739-
pooled = ggml_nn_linear(ctx, pooled, text_projection, NULL);
738+
if (text_projection != nullptr) {
739+
pooled = ggml_nn_linear(ctx, pooled, text_projection, nullptr);
740740
} else {
741741
LOG_DEBUG("identity projection");
742742
}
@@ -814,7 +814,7 @@ class CLIPProjection : public UnaryBlock {
814814
int64_t out_features;
815815
bool transpose_weight;
816816

817-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") {
817+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, const std::string prefix = "") override {
818818
enum ggml_type wtype = get_type(prefix + "weight", tensor_types, GGML_TYPE_F32);
819819
if (transpose_weight) {
820820
params["weight"] = ggml_new_tensor_2d(ctx, wtype, out_features, in_features);
@@ -831,12 +831,12 @@ class CLIPProjection : public UnaryBlock {
831831
out_features(out_features),
832832
transpose_weight(transpose_weight) {}
833833

834-
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
834+
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
835835
struct ggml_tensor* w = params["weight"];
836836
if (transpose_weight) {
837837
w = ggml_cont(ctx, ggml_transpose(ctx, w));
838838
}
839-
return ggml_nn_linear(ctx, x, w, NULL);
839+
return ggml_nn_linear(ctx, x, w, nullptr);
840840
}
841841
};
842842

@@ -894,7 +894,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
894894
model.init(params_ctx, tensor_types, prefix);
895895
}
896896

897-
std::string get_desc() {
897+
std::string get_desc() override {
898898
return "clip";
899899
}
900900

@@ -921,17 +921,17 @@ struct CLIPTextModelRunner : public GGMLRunner {
921921

922922
struct ggml_cgraph* build_graph(struct ggml_tensor* input_ids,
923923
int num_custom_embeddings = 0,
924-
void* custom_embeddings_data = NULL,
924+
void* custom_embeddings_data = nullptr,
925925
size_t max_token_idx = 0,
926926
bool return_pooled = false,
927927
int clip_skip = -1) {
928928
struct ggml_cgraph* gf = ggml_new_graph(compute_ctx);
929929

930930
input_ids = to_backend(input_ids);
931931

932-
struct ggml_tensor* embeddings = NULL;
932+
struct ggml_tensor* embeddings = nullptr;
933933

934-
if (num_custom_embeddings > 0 && custom_embeddings_data != NULL) {
934+
if (num_custom_embeddings > 0 && custom_embeddings_data != nullptr) {
935935
auto token_embed_weight = model.get_token_embed_weight();
936936
auto custom_embeddings = ggml_new_tensor_2d(compute_ctx,
937937
token_embed_weight->type,
@@ -958,7 +958,7 @@ struct CLIPTextModelRunner : public GGMLRunner {
958958
bool return_pooled,
959959
int clip_skip,
960960
ggml_tensor** output,
961-
ggml_context* output_ctx = NULL) {
961+
ggml_context* output_ctx = nullptr) {
962962
auto get_graph = [&]() -> struct ggml_cgraph* {
963963
return build_graph(input_ids, num_custom_embeddings, custom_embeddings_data, max_token_idx, return_pooled, clip_skip);
964964
};

common.hpp

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ class ResBlock : public GGMLBlock {
121121
}
122122
}
123123

124-
virtual struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = NULL) {
124+
virtual struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x, struct ggml_tensor* emb = nullptr) {
125125
// For dims==3, we reduce dimension from 5d to 4d by merging h and w, in order not to change ggml
126126
// [N, c, t, h, w] => [N, c, t, h * w]
127127
// x: [N, channels, h, w] if dims == 2 else [N, channels, t, h, w]
@@ -131,7 +131,7 @@ class ResBlock : public GGMLBlock {
131131
auto out_layers_0 = std::dynamic_pointer_cast<GroupNorm32>(blocks["out_layers.0"]);
132132
auto out_layers_3 = std::dynamic_pointer_cast<UnaryBlock>(blocks["out_layers.3"]);
133133

134-
if (emb == NULL) {
134+
if (emb == nullptr) {
135135
GGML_ASSERT(skip_t_emb);
136136
}
137137

@@ -182,7 +182,7 @@ class GEGLU : public UnaryBlock {
182182
int64_t dim_in;
183183
int64_t dim_out;
184184

185-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") {
185+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") override {
186186
enum ggml_type wtype = get_type(prefix + "proj.weight", tensor_types, GGML_TYPE_F32);
187187
enum ggml_type bias_wtype = GGML_TYPE_F32;
188188
params["proj.weight"] = ggml_new_tensor_2d(ctx, wtype, dim_in, dim_out * 2);
@@ -193,7 +193,7 @@ class GEGLU : public UnaryBlock {
193193
GEGLU(int64_t dim_in, int64_t dim_out)
194194
: dim_in(dim_in), dim_out(dim_out) {}
195195

196-
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
196+
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
197197
// x: [ne3, ne2, ne1, dim_in]
198198
// return: [ne3, ne2, ne1, dim_out]
199199
struct ggml_tensor* w = params["proj.weight"];
@@ -222,7 +222,7 @@ class GELU : public UnaryBlock {
222222
blocks["proj"] = std::shared_ptr<GGMLBlock>(new Linear(dim_in, dim_out, bias));
223223
}
224224

225-
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) {
225+
struct ggml_tensor* forward(struct ggml_context* ctx, struct ggml_tensor* x) override {
226226
// x: [ne3, ne2, ne1, dim_in]
227227
// return: [ne3, ne2, ne1, dim_out]
228228
auto proj = std::dynamic_pointer_cast<Linear>(blocks["proj"]);
@@ -325,7 +325,7 @@ class CrossAttention : public GGMLBlock {
325325
auto k = to_k->forward(ctx, context); // [N, n_context, inner_dim]
326326
auto v = to_v->forward(ctx, context); // [N, n_context, inner_dim]
327327

328-
x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, NULL, false, false, flash_attn); // [N, n_token, inner_dim]
328+
x = ggml_nn_attention_ext(ctx, backend, q, k, v, n_head, nullptr, false, false, flash_attn); // [N, n_token, inner_dim]
329329

330330
x = to_out_0->forward(ctx, x); // [N, n_token, query_dim]
331331
return x;
@@ -483,7 +483,7 @@ class SpatialTransformer : public GGMLBlock {
483483

484484
class AlphaBlender : public GGMLBlock {
485485
protected:
486-
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") {
486+
void init_params(struct ggml_context* ctx, const String2GGMLType& tensor_types = {}, std::string prefix = "") override {
487487
// Get the type of the "mix_factor" tensor from the input tensors map with the specified prefix
488488
enum ggml_type wtype = GGML_TYPE_F32;
489489
params["mix_factor"] = ggml_new_tensor_1d(ctx, wtype, 1);

0 commit comments

Comments
 (0)