Skip to content

Commit 6973add

Browse files
authored
add .clang-tidy, fix found bugs (#1175)
* ad .clang-tidy, fix found 'bugs' Signed-off-by: daquexian <[email protected]> * apply code-format changes Co-authored-by: daquexian <[email protected]>
1 parent 1b61b30 commit 6973add

File tree

14 files changed

+63
-40
lines changed

14 files changed

+63
-40
lines changed

.clang-tidy

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Copy from OneFlow
2+
3+
# `-allow-enabling-analyzer-alpha-checkers` should be passed to clang-tidy for CSA checkers named `clang-analyzer-alpha.*` (or `-allow-enabling-alpha-checkers` for run-clang-tidy.py)
4+
# `aggressive-binary-operation-simplification` should be enabled (via `-Xclang -analyzer-config -Xclang aggressive-binary-operation-simplification=true` in clang)
5+
# there is some problem in `clang-analyzer-alpha.clone.*`, so do not enable it
6+
# `clang-analyzer-alpha.deadcode.*` is just too verbose to enable
7+
Checks: '-*, maybe-*, clang-analyzer-core.*, clang-analyzer-unix.*, clang-analyzer-cplusplus.*, clang-analyzer-nullability.*, clang-analyzer-deadcode.*, clang-analyzer-security.*, clang-analyzer-optin.cplusplus.*, clang-analyzer-optin.performance.*, clang-analyzer-alpha.core.*, clang-analyzer-alpha.cplusplus.*, clang-analyzer-alpha.security.*, cppcoreguidelines-avoid-goto, cppcoreguidelines-interfaces-global-init, cppcoreguidelines-no-malloc, cppcoreguidelines-prefer-member-initializer, cppcoreguidelines-pro-type-member-init, cppcoreguidelines-pro-type-static-cast-downcast, cppcoreguidelines-slicing, cppcoreguidelines-special-member-functions, performance-unnecessary-value-param, performance-unnecessary-copy-initialization, performance-noexcept-move-constructor, performance-no-automatic-move, performance-move-const-arg, performance-implicit-conversion-in-loop, performance-for-range-copy, google-default-arguments, google-global-names-in-headers, google-explicit-constructor'
8+
9+
CheckOptions:
10+
# `cppcoreguidelines-special-member-functions` is enabled, refer to https://en.cppreference.com/w/cpp/language/rule_of_three
11+
- key: cppcoreguidelines-special-member-functions.AllowSoleDefaultDtor
12+
value: True
13+
- key: performance-move-const-arg.CheckTriviallyCopyableMove
14+
value: False

.gitignore

+4
Original file line numberDiff line numberDiff line change
@@ -53,3 +53,7 @@ build*/
5353

5454
# 3rd
5555
3rdparty
56+
57+
# Python
58+
*.egg-info/
59+
__pycache__/

examples/common/tengine_operations.c

+3-1
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
* Copyright (c) 2020, OPEN AI LAB
2222
*/
2323

24+
#include <assert.h>
2425
#include <stdint.h>
2526
#include <math.h>
2627
#include <string.h>
@@ -479,7 +480,7 @@ image copyMaker(image im, int top, int bottom, int left, int right, float value)
479480
void save_image(image im, const char* name)
480481
{
481482
char buff[256] = {0};
482-
unsigned char* data = (unsigned char*)calloc((size_t)im.w * im.h * im.c, sizeof(char));
483+
unsigned char* data = (unsigned char*)calloc((size_t)im.w * im.h * im.c, sizeof(unsigned char));
483484
int i, k;
484485
for (k = 0; k < im.c; ++k)
485486
{
@@ -1019,6 +1020,7 @@ static void sort_cls_score(cls_score* array, int left, int right)
10191020

10201021
void print_topk(float* data, int total_num, int topk)
10211022
{
1023+
assert(total_num >= topk);
10221024
cls_score* cls_scores = (cls_score*)malloc(total_num * sizeof(cls_score));
10231025
for (int i = 0; i < total_num; i++)
10241026
{

examples/tm_classification.c

+4
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,10 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
7171
int img_size = img_h * img_w * 3;
7272
int dims[] = {1, 3, img_h, img_w}; // nchw
7373
float* input_data = (float*)malloc(img_size * sizeof(float));
74+
if (input_data == NULL)
75+
{
76+
return -1;
77+
}
7478

7579
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
7680
if (input_tensor == NULL)

examples/tm_classification_uint8.c

+4
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,10 @@ int tengine_classify(const char* model_file, const char* image_file, int img_h,
9191
int img_size = img_h * img_w * 3;
9292
int dims[] = {1, 3, img_h, img_w}; // nchw
9393
uint8_t* input_data = (uint8_t*)malloc(img_size);
94+
if (input_data == NULL)
95+
{
96+
return -1;
97+
}
9498

9599
tensor_t input_tensor = get_graph_input_tensor(graph, 0, 0);
96100
if (input_tensor == NULL)

examples/tm_mobilefacenet.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,6 @@ int getFeature(const char* imagefile, float* feature)
7070
int height = MOBILE_FACE_HEIGHT;
7171
int width = MOBILE_FACE_WIDTH;
7272
int img_size = height * width * 3;
73-
int dims[] = {1, 3, height, width};
7473
float means[3] = {DEFAULT_MEAN1, DEFAULT_MEAN2, DEFAULT_MEAN3};
7574
float scales[3] = {1, 1, 1};
7675
std::vector<float> input_data(img_size);
@@ -180,4 +179,4 @@ int main(int argc, char* argv[])
180179

181180
release();
182181
return 0;
183-
}
182+
}

pytengine/setup.py

-13
Original file line numberDiff line numberDiff line change
@@ -22,19 +22,6 @@
2222
dst = dest + "/tengine/" + libtengine
2323
shutil.copyfile(src, dst)
2424

25-
files = [
26-
"__init__",
27-
"base",
28-
"context",
29-
"device",
30-
"graph",
31-
"libinfo",
32-
"node",
33-
"tengine",
34-
"tensor",
35-
libtengine,
36-
]
37-
3825
setup(
3926
name="pytengine",
4027
version="0.9.1",

source/device/cpu/op/reverse/reverse_ref.c

+9-8
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
#include "device/cpu/cpu_module.h"
3434

3535
#include <math.h>
36+
#include <assert.h>
3637

3738
struct reverse_param
3839
{
@@ -47,12 +48,12 @@ int ref_reverse_fp32(void* input, void* input_axis, void* output, const struct r
4748
int* axis_ptr = (int*)input_axis;
4849
int axis = axis_ptr[0];
4950

50-
int in_w = param->in_shape[3];
51-
int in_hw = param->in_shape[2] * in_w;
52-
int in_chw = param->in_shape[1] * in_hw;
53-
5451
if (param->dim_size == 4)
5552
{
53+
int in_w = param->in_shape[3];
54+
int in_hw = param->in_shape[2] * in_w;
55+
int in_chw = param->in_shape[1] * in_hw;
56+
5657
if (axis == 0 || axis == -4)
5758
{
5859
for (int i = 0; i < param->in_shape[0]; i++)
@@ -136,12 +137,12 @@ int ref_reverse_uint8(void* input, void* input_axis, void* output, const struct
136137
int* axis_ptr = (int*)input_axis;
137138
int axis = axis_ptr[0];
138139

139-
int in_w = param->in_shape[3];
140-
int in_hw = param->in_shape[2] * in_w;
141-
int in_chw = param->in_shape[1] * in_hw;
142-
143140
if (param->dim_size == 4)
144141
{
142+
int in_w = param->in_shape[3];
143+
int in_hw = param->in_shape[2] * in_w;
144+
int in_chw = param->in_shape[1] * in_hw;
145+
145146
if (axis == 0 || axis == -4)
146147
{
147148
for (int i = 0; i < param->in_shape[0]; i++)

source/olreport/onlinereportutil.c

+6-2
Original file line numberDiff line numberDiff line change
@@ -168,9 +168,13 @@ void get_os_kernel_info(char* os, int maxlen)
168168
return;
169169
}
170170

171-
int offset = fscanf(fp, "%s", os);
171+
int res = fscanf(fp, "%s", os);
172+
if (res != 1)
173+
{
174+
return;
175+
}
172176
fclose(fp);
173-
offset = strlen(os);
177+
int offset = strlen(os);
174178
os[offset] = ' ';
175179
offset += 1;
176180

source/utility/float.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
*/
2424

2525
#include "utility/float.h"
26+
#include <math.h>
2627

2728
#define BF16_EXP_MAX (256 - 1) // 2^8 - 1
2829
#define FP16_EXP_MAX (32 - 1) // 2^5 - 1
@@ -99,7 +100,7 @@ fp32_t fp16_to_fp32(fp16_t package)
99100
return data.value;
100101
}
101102

102-
return data.value;
103+
return NAN;
103104
}
104105

105106
fp16_t fp32_to_fp16(fp32_t value)

tools/convert_tool/caffe/caffe2tengine.cpp

+6-1
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424

2525
#include "caffe2tengine.hpp"
2626

27+
#include <assert.h>
28+
2729
/*
2830
* SELF DEFINE VARIABLE
2931
* FOR CAFFE SERIALIZER
@@ -149,7 +151,10 @@ static void load_blob(ir_graph_t* ir_graph, std::string node_name, const std::ve
149151
}
150152
}
151153
}
152-
set_ir_tensor_shape(ir_tensor, dims, dim_num);
154+
int res = set_ir_tensor_shape(ir_tensor, dims, dim_num);
155+
free(dims);
156+
dims = nullptr;
157+
assert(res == 0);
153158
ir_tensor->tensor_type = TENSOR_TYPE_CONST;
154159
int tensor_size = data_num * sizeof(float);
155160
ir_tensor->data = sys_malloc(tensor_size);

tools/convert_tool/onnx/onnx2tengine.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,7 @@ int onnx_serializer::load_constant_tensor(ir_graph_t* graph, const onnx::GraphPr
208208

209209
const char* name = node.input(1).c_str();
210210
int dim_num = onnx_tensor.dims_size();
211-
int* dims = new int[dim_num];
211+
std::vector<int> dims(dim_num);
212212
for (int j = 0; j < dim_num; j++)
213213
{
214214
dims[j] = onnx_tensor.dims(j);
@@ -221,7 +221,7 @@ int onnx_serializer::load_constant_tensor(ir_graph_t* graph, const onnx::GraphPr
221221
fprintf(stderr, "create ir tensor failed!\n");
222222
return -1;
223223
}
224-
set_ir_tensor_shape(ir_tensor, dims, dim_num);
224+
set_ir_tensor_shape(ir_tensor, dims.data(), dim_num);
225225
ir_tensor->tensor_type = TENSOR_TYPE_CONST;
226226
// set tensor data
227227
if (7 == onnx_tensor.data_type())

tools/convert_tool/tensorflow/tf2tengine.cpp

+6-5
Original file line numberDiff line numberDiff line change
@@ -724,12 +724,13 @@ void tensorflow_serializer::CleanupResizeNearestNeighbor()
724724

725725
for (unsigned int i = 0; i < data_node->outputs.size(); i++)
726726
{
727-
data_shape_node = data_node->outputs[i];
728-
729-
if (data_shape_node->op == "Shape")
730-
break;
727+
if (data_node->outputs[i]->op == "Shape")
728+
{
729+
data_shape_node = data_node->outputs[i];
730+
}
731731
}
732732

733+
assert(data_shape_node != nullptr);
733734
DisconnectNode(data_shape_node);
734735

735736
TFNode* mul_node = cur_node->inputs[1];
@@ -2589,4 +2590,4 @@ void tensorflow_serializer::register_op_load()
25892590
op_load_map["Pad"] = std::pair<int, op_load_t>(OP_PAD, load_pad);
25902591
op_load_map["ConcatV2"] = std::pair<int, op_load_t>(OP_CONCAT, load_concat);
25912592
op_load_map["MatMul"] = std::pair<int, op_load_t>(OP_FC, load_gemm);
2592-
}
2593+
}

tools/convert_tool/utils/graph_optimizer/graph_opt.cpp

+2-5
Original file line numberDiff line numberDiff line change
@@ -413,8 +413,8 @@ static int weight_bn(ir_graph_t* graph, ir_node_t* conv_node, float* mean, float
413413
float* kernel_data = (float*)kernel_tensor->data;
414414
int channel_num = kernel_tensor->dims[0];
415415

416-
float* scale_mean = (float*)malloc(channel_num * sizeof(float));
417-
float* scale_var_inv = (float*)malloc(channel_num * sizeof(float));
416+
std::vector<float> scale_mean(channel_num);
417+
std::vector<float> scale_var_inv(channel_num);
418418

419419
float rescale_factor_tmp = rescale_factor;
420420
float* bias = NULL;
@@ -502,9 +502,6 @@ static int weight_bn(ir_graph_t* graph, ir_node_t* conv_node, float* mean, float
502502
bias_data[i] = scale_mean[i];
503503
}
504504

505-
free(scale_var_inv);
506-
free(scale_mean);
507-
508505
return 0;
509506
}
510507

0 commit comments

Comments
 (0)