Skip to content

Commit 55c07c2

Browse files
committed
ggml : suppress Windows compiler warnings
This commit suppresses compiler warnings emitted from ggml on Windows. The motivation for these changes is that some compilers generate warnings for these conversion, for example Windows MSVC, and there are quite a few of them. This makes it a little difficult to spot new warnings that may be introduced and also can be difficult for users/embedders of ggml where these warnings are hard to separate from their own warnings.
1 parent adaea08 commit 55c07c2

File tree

6 files changed

+37
-31
lines changed

6 files changed

+37
-31
lines changed

ggml/src/ggml-backend-reg.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ static fs::path get_executable_path() {
453453
return base_path + "/";
454454
#elif defined(_WIN32)
455455
std::vector<wchar_t> path(MAX_PATH);
456-
DWORD len = GetModuleFileNameW(NULL, path.data(), path.size());
456+
DWORD len = GetModuleFileNameW(NULL, path.data(), (DWORD)path.size());
457457
if (len == 0) {
458458
return {};
459459
}

ggml/src/ggml-cpu/ggml-cpu-quants.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,8 @@
11
#define GGML_COMMON_IMPL_C
2+
// assert.h is included before ggml-common.h to a warning about
3+
// redefinition of static_assert.
4+
#include <assert.h>
5+
26
#include "ggml-common.h"
37

48
#include "ggml-quants.h"
@@ -9,7 +13,6 @@
913

1014
#include <math.h>
1115
#include <string.h>
12-
#include <assert.h>
1316
#include <float.h>
1417
#include <stdlib.h> // for qsort
1518
#include <stdio.h> // for GGML_ASSERT

ggml/src/ggml-opt.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -596,8 +596,8 @@ static void ggml_opt_eval_graph(ggml_opt_context_t opt_ctx, ggml_cgraph * graph,
596596
GGML_ASSERT(opt_pars.adamw.wd <= 1.0f);
597597

598598
// beta1, beta2 after applying warmup
599-
const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, opt_ctx->iter));
600-
const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, opt_ctx->iter));
599+
const float beta1h = 1.0f/(1.0f - powf(opt_pars.adamw.beta1, (float)opt_ctx->iter));
600+
const float beta2h = 1.0f/(1.0f - powf(opt_pars.adamw.beta2, (float)opt_ctx->iter));
601601

602602
float * adamw_par_data = ggml_get_data_f32(opt_ctx->adamw_params);
603603
adamw_par_data[0] = opt_pars.adamw.alpha;
@@ -807,7 +807,7 @@ void ggml_opt_fit(
807807
int64_t epoch = 1;
808808

809809
ggml_opt_params params = ggml_opt_default_params(backend_sched, ctx_compute, inputs, outputs, loss_type);
810-
params.opt_period = opt_period;
810+
params.opt_period = (int32_t)opt_period;
811811
params.get_opt_pars = get_opt_pars;
812812
params.get_opt_pars_ud = &epoch;
813813
ggml_opt_context_t opt_ctx = ggml_opt_init(params);

ggml/src/ggml-quants.c

+4-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,8 @@
11
#define GGML_COMMON_IMPL_C
2+
// assert.h is included before ggml-common.h to a warning about
3+
// redefinition of static_assert.
4+
#include <assert.h>
5+
26
#include "ggml-common.h"
37

48
#include "ggml-quants.h"
@@ -8,7 +12,6 @@
812

913
#include <math.h>
1014
#include <string.h>
11-
#include <assert.h>
1215
#include <float.h>
1316
#include <stdlib.h> // for qsort
1417
#include <stdio.h> // for GGML_ASSERT

ggml/src/ggml.c

+23-23
Original file line numberDiff line numberDiff line change
@@ -1965,7 +1965,7 @@ static struct ggml_tensor * ggml_acc_impl(
19651965

19661966
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
19671967

1968-
int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
1968+
int32_t params[] = { (int32_t)nb1, (int32_t)nb2, (int32_t)nb3, (int32_t)offset, inplace ? 1 : 0 };
19691969
ggml_set_op_params(result, params, sizeof(params));
19701970

19711971
result->op = GGML_OP_ACC;
@@ -2881,7 +2881,7 @@ static struct ggml_tensor * ggml_set_impl(
28812881
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
28822882

28832883
GGML_ASSERT(offset < (size_t)(1 << 30));
2884-
int32_t params[] = { nb1, nb2, nb3, offset, inplace ? 1 : 0 };
2884+
int32_t params[] = { (int32_t)nb1, (int32_t)nb2, (int32_t)nb3, (int32_t)offset, inplace ? 1 : 0 };
28852885
ggml_set_op_params(result, params, sizeof(params));
28862886

28872887
result->op = GGML_OP_SET;
@@ -3269,15 +3269,15 @@ struct ggml_tensor * ggml_permute(
32693269
int ne[GGML_MAX_DIMS];
32703270
int nb[GGML_MAX_DIMS];
32713271

3272-
ne[axis0] = a->ne[0];
3273-
ne[axis1] = a->ne[1];
3274-
ne[axis2] = a->ne[2];
3275-
ne[axis3] = a->ne[3];
3272+
ne[axis0] = (int)a->ne[0];
3273+
ne[axis1] = (int)a->ne[1];
3274+
ne[axis2] = (int)a->ne[2];
3275+
ne[axis3] = (int)a->ne[3];
32763276

3277-
nb[axis0] = a->nb[0];
3278-
nb[axis1] = a->nb[1];
3279-
nb[axis2] = a->nb[2];
3280-
nb[axis3] = a->nb[3];
3277+
nb[axis0] = (int)a->nb[0];
3278+
nb[axis1] = (int)a->nb[1];
3279+
nb[axis2] = (int)a->nb[2];
3280+
nb[axis3] = (int)a->nb[3];
32813281

32823282
result->ne[0] = ne[0];
32833283
result->ne[1] = ne[1];
@@ -3916,7 +3916,7 @@ struct ggml_tensor* ggml_conv_1d_ph(
39163916
struct ggml_tensor * b,
39173917
int s,
39183918
int d) {
3919-
return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
3919+
return ggml_conv_1d(ctx, a, b, s, (int)a->ne[0] / 2, d);
39203920
}
39213921

39223922
// ggml_conv_1d_dw
@@ -3948,7 +3948,7 @@ struct ggml_tensor * ggml_conv_1d_dw_ph(
39483948
struct ggml_tensor * b,
39493949
int s0,
39503950
int d0) {
3951-
return ggml_conv_1d_dw(ctx, a, b, s0, a->ne[0] / 2, d0);
3951+
return ggml_conv_1d_dw(ctx, a, b, s0, (int)a->ne[0] / 2, d0);
39523952
}
39533953

39543954
// ggml_conv_transpose_1d
@@ -4022,7 +4022,7 @@ struct ggml_tensor * ggml_conv_2d_sk_p0(
40224022
struct ggml_context * ctx,
40234023
struct ggml_tensor * a,
40244024
struct ggml_tensor * b) {
4025-
return ggml_conv_2d(ctx, a, b, a->ne[0], a->ne[1], 0, 0, 1, 1);
4025+
return ggml_conv_2d(ctx, a, b, (int)a->ne[0], (int)a->ne[1], 0, 0, 1, 1);
40264026
}
40274027

40284028
// ggml_conv_2d_s1_ph
@@ -4031,7 +4031,7 @@ struct ggml_tensor * ggml_conv_2d_s1_ph(
40314031
struct ggml_context * ctx,
40324032
struct ggml_tensor * a,
40334033
struct ggml_tensor * b) {
4034-
return ggml_conv_2d(ctx, a, b, 1, 1, a->ne[0] / 2, a->ne[1] / 2, 1, 1);
4034+
return ggml_conv_2d(ctx, a, b, 1, 1, (int)a->ne[0] / 2, (int)a->ne[1] / 2, 1, 1);
40354035
}
40364036

40374037
// ggml_conv_2d_dw
@@ -4132,7 +4132,7 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0(
41324132
// ggml_pool_*
41334133

41344134
static int64_t ggml_calc_pool_output_size(int64_t ins, int ks, int s, float p) {
4135-
return (ins + 2 * p - ks) / s + 1;
4135+
return (int64_t)(ins + 2 * p - ks) / s + 1;
41364136
}
41374137

41384138
// ggml_pool_1d
@@ -4145,7 +4145,7 @@ struct ggml_tensor * ggml_pool_1d(
41454145
int s0,
41464146
int p0) {
41474147
const int64_t ne[4] = {
4148-
ggml_calc_pool_output_size(a->ne[0], k0, s0, p0),
4148+
ggml_calc_pool_output_size(a->ne[0], k0, s0, (float)p0),
41494149
a->ne[1],
41504150
a->ne[2],
41514151
a->ne[3],
@@ -4182,7 +4182,7 @@ struct ggml_tensor * ggml_pool_2d(
41824182
};
41834183
result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
41844184

4185-
int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
4185+
int32_t params[] = { op, k0, k1, s0, s1, (int)p0, (int)p1 };
41864186
ggml_set_op_params(result, params, sizeof(params));
41874187

41884188
result->op = GGML_OP_POOL_2D;
@@ -4205,7 +4205,7 @@ struct ggml_tensor * ggml_pool_2d_back(
42054205
struct ggml_tensor * result;
42064206
result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, af->ne);
42074207

4208-
int32_t params[] = { op, k0, k1, s0, s1, p0, p1 };
4208+
int32_t params[] = { op, k0, k1, s0, s1, (int)p0, (int)p1 };
42094209
ggml_set_op_params(result, params, sizeof(params));
42104210

42114211
result->op = GGML_OP_POOL_2D_BACK;
@@ -4245,7 +4245,7 @@ struct ggml_tensor * ggml_upscale(
42454245
struct ggml_tensor * a,
42464246
int scale_factor,
42474247
enum ggml_scale_mode mode) {
4248-
return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3], mode);
4248+
return ggml_upscale_impl(ctx, a, (int)a->ne[0] * scale_factor, (int)a->ne[1] * scale_factor, (int)a->ne[2], (int)a->ne[3], mode);
42494249
}
42504250

42514251
struct ggml_tensor * ggml_upscale_ext(
@@ -4617,8 +4617,8 @@ struct ggml_tensor * ggml_win_part(
46174617
const int px = (w - a->ne[1]%w)%w;
46184618
const int py = (w - a->ne[2]%w)%w;
46194619

4620-
const int npx = (px + a->ne[1])/w;
4621-
const int npy = (py + a->ne[2])/w;
4620+
const int npx = (int)(px + a->ne[1])/w;
4621+
const int npy = (int)(py + a->ne[2])/w;
46224622
const int np = npx*npy;
46234623

46244624
const int64_t ne[4] = { a->ne[0], w, w, np, };
@@ -5689,7 +5689,7 @@ static void ggml_compute_backward(
56895689
const int32_t p0 = ggml_get_op_params_i32(tensor, 5);
56905690
const int32_t p1 = ggml_get_op_params_i32(tensor, 6);
56915691

5692-
ggml_add_or_set(ctx, cgraph, isrc0, ggml_pool_2d_back(ctx, grad, src0, op, k0, k1, s0, s1, p0, p1));
5692+
ggml_add_or_set(ctx, cgraph, isrc0, ggml_pool_2d_back(ctx, grad, src0, op, k0, k1, s0, s1, (float)p0, (float)p1));
56935693
}
56945694
} break;
56955695
case GGML_OP_WIN_PART:
@@ -5967,7 +5967,7 @@ struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t siz
59675967
assert(obj_size == (size_t)((char *)p - (char *)cgraph));
59685968

59695969
*cgraph = (struct ggml_cgraph) {
5970-
/*.size =*/ size,
5970+
/*.size =*/ (int)size,
59715971
/*.n_nodes =*/ 0,
59725972
/*.n_leafs =*/ 0,
59735973
/*.nodes =*/ nodes_ptr,

ggml/src/gguf.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -454,7 +454,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
454454
}
455455
GGML_ASSERT(int64_t(ctx->kv.size()) == n_kv);
456456

457-
const int alignment_idx = gguf_find_key(ctx, GGUF_KEY_GENERAL_ALIGNMENT);
457+
const int alignment_idx = (int)gguf_find_key(ctx, GGUF_KEY_GENERAL_ALIGNMENT);
458458
ctx->alignment = alignment_idx == -1 ? GGUF_DEFAULT_ALIGNMENT : gguf_get_val_u32(ctx, alignment_idx);
459459

460460
if (ctx->alignment == 0 || (ctx->alignment & (ctx->alignment - 1)) != 0) {
@@ -589,7 +589,7 @@ struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_par
589589
GGML_ASSERT(int64_t(ctx->info.size()) == n_tensors);
590590

591591
// we require the data section to be aligned, so take into account any padding
592-
if (fseek(file, GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) {
592+
if (fseek(file, (long)GGML_PAD(ftell(file), ctx->alignment), SEEK_SET) != 0) {
593593
fprintf(stderr, "%s: failed to seek to beginning of data section\n", __func__);
594594
gguf_free(ctx);
595595
return nullptr;

0 commit comments

Comments
 (0)