chore: add .clang-tidy configuration and apply modernize checks (#902)

This commit is contained in:
leejet
2025-10-18 23:23:40 +08:00
committed by GitHub
parent 64a7698347
commit d05e46ca5e
32 changed files with 766 additions and 747 deletions

View File

@@ -8,18 +8,18 @@
#include "wan.hpp"
struct DiffusionParams {
struct ggml_tensor* x = NULL;
struct ggml_tensor* timesteps = NULL;
struct ggml_tensor* context = NULL;
struct ggml_tensor* c_concat = NULL;
struct ggml_tensor* y = NULL;
struct ggml_tensor* guidance = NULL;
struct ggml_tensor* x = nullptr;
struct ggml_tensor* timesteps = nullptr;
struct ggml_tensor* context = nullptr;
struct ggml_tensor* c_concat = nullptr;
struct ggml_tensor* y = nullptr;
struct ggml_tensor* guidance = nullptr;
std::vector<ggml_tensor*> ref_latents = {};
bool increase_ref_index = false;
int num_video_frames = -1;
std::vector<struct ggml_tensor*> controls = {};
float control_strength = 0.f;
struct ggml_tensor* vace_context = NULL;
struct ggml_tensor* vace_context = nullptr;
float vace_strength = 1.f;
std::vector<int> skip_layers = {};
};
@@ -28,8 +28,8 @@ struct DiffusionModel {
virtual std::string get_desc() = 0;
virtual void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) = 0;
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) = 0;
virtual void alloc_params_buffer() = 0;
virtual void free_params_buffer() = 0;
virtual void free_compute_buffer() = 0;
@@ -49,38 +49,38 @@ struct UNetModel : public DiffusionModel {
: unet(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn) {
}
std::string get_desc() {
std::string get_desc() override {
return unet.get_desc();
}
void alloc_params_buffer() {
void alloc_params_buffer() override {
unet.alloc_params_buffer();
}
void free_params_buffer() {
void free_params_buffer() override {
unet.free_params_buffer();
}
void free_compute_buffer() {
void free_compute_buffer() override {
unet.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
unet.get_param_tensors(tensors, "model.diffusion_model");
}
size_t get_params_buffer_size() {
size_t get_params_buffer_size() override {
return unet.get_params_buffer_size();
}
int64_t get_adm_in_channels() {
int64_t get_adm_in_channels() override {
return unet.unet.adm_in_channels;
}
void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) override {
return unet.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
@@ -103,38 +103,38 @@ struct MMDiTModel : public DiffusionModel {
: mmdit(backend, offload_params_to_cpu, flash_attn, tensor_types, "model.diffusion_model") {
}
std::string get_desc() {
std::string get_desc() override {
return mmdit.get_desc();
}
void alloc_params_buffer() {
void alloc_params_buffer() override {
mmdit.alloc_params_buffer();
}
void free_params_buffer() {
void free_params_buffer() override {
mmdit.free_params_buffer();
}
void free_compute_buffer() {
void free_compute_buffer() override {
mmdit.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
mmdit.get_param_tensors(tensors, "model.diffusion_model");
}
size_t get_params_buffer_size() {
size_t get_params_buffer_size() override {
return mmdit.get_params_buffer_size();
}
int64_t get_adm_in_channels() {
int64_t get_adm_in_channels() override {
return 768 + 1280;
}
void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) override {
return mmdit.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
@@ -158,38 +158,38 @@ struct FluxModel : public DiffusionModel {
: flux(backend, offload_params_to_cpu, tensor_types, "model.diffusion_model", version, flash_attn, use_mask) {
}
std::string get_desc() {
std::string get_desc() override {
return flux.get_desc();
}
void alloc_params_buffer() {
void alloc_params_buffer() override {
flux.alloc_params_buffer();
}
void free_params_buffer() {
void free_params_buffer() override {
flux.free_params_buffer();
}
void free_compute_buffer() {
void free_compute_buffer() override {
flux.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
flux.get_param_tensors(tensors, "model.diffusion_model");
}
size_t get_params_buffer_size() {
size_t get_params_buffer_size() override {
return flux.get_params_buffer_size();
}
int64_t get_adm_in_channels() {
int64_t get_adm_in_channels() override {
return 768;
}
void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) override {
return flux.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
@@ -218,45 +218,45 @@ struct WanModel : public DiffusionModel {
: prefix(prefix), wan(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) {
}
std::string get_desc() {
std::string get_desc() override {
return wan.get_desc();
}
void alloc_params_buffer() {
void alloc_params_buffer() override {
wan.alloc_params_buffer();
}
void free_params_buffer() {
void free_params_buffer() override {
wan.free_params_buffer();
}
void free_compute_buffer() {
void free_compute_buffer() override {
wan.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
wan.get_param_tensors(tensors, prefix);
}
size_t get_params_buffer_size() {
size_t get_params_buffer_size() override {
return wan.get_params_buffer_size();
}
int64_t get_adm_in_channels() {
int64_t get_adm_in_channels() override {
return 768;
}
void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) override {
return wan.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,
diffusion_params.context,
diffusion_params.y,
diffusion_params.c_concat,
NULL,
nullptr,
diffusion_params.vace_context,
diffusion_params.vace_strength,
output,
@@ -277,38 +277,38 @@ struct QwenImageModel : public DiffusionModel {
: prefix(prefix), qwen_image(backend, offload_params_to_cpu, tensor_types, prefix, version, flash_attn) {
}
std::string get_desc() {
std::string get_desc() override {
return qwen_image.get_desc();
}
void alloc_params_buffer() {
void alloc_params_buffer() override {
qwen_image.alloc_params_buffer();
}
void free_params_buffer() {
void free_params_buffer() override {
qwen_image.free_params_buffer();
}
void free_compute_buffer() {
void free_compute_buffer() override {
qwen_image.free_compute_buffer();
}
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) {
void get_param_tensors(std::map<std::string, struct ggml_tensor*>& tensors) override {
qwen_image.get_param_tensors(tensors, prefix);
}
size_t get_params_buffer_size() {
size_t get_params_buffer_size() override {
return qwen_image.get_params_buffer_size();
}
int64_t get_adm_in_channels() {
int64_t get_adm_in_channels() override {
return 768;
}
void compute(int n_threads,
DiffusionParams diffusion_params,
struct ggml_tensor** output = NULL,
struct ggml_context* output_ctx = NULL) {
struct ggml_tensor** output = nullptr,
struct ggml_context* output_ctx = nullptr) override {
return qwen_image.compute(n_threads,
diffusion_params.x,
diffusion_params.timesteps,