Skip to content

Commit dca4613

Browse files
authored
[ET-VK][ez][testing] Remove conv2d test in compute_api_test + reduce stdout logging
Differential Revision: D76156109 Pull Request resolved: #11452
1 parent 9074fb3 commit dca4613

File tree

2 files changed

+13
-109
lines changed

2 files changed

+13
-109
lines changed

backends/vulkan/test/utils/test_utils.cpp

Lines changed: 0 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -137,45 +137,6 @@ void record_bitw8_image_to_nchw_nobitw8buffer_op(
137137
v_src.numel_ubo());
138138
}
139139

140-
void record_conv2d_prepack_weights_op(
141-
api::Context* const context,
142-
vkapi::VulkanBuffer& src_buffer,
143-
api::vTensor& v_dst,
144-
const std::vector<int64_t>& original_sizes,
145-
const bool transposed) {
146-
vkapi::PipelineBarrier pipeline_barrier{};
147-
148-
std::string kernel_name;
149-
if (transposed) {
150-
kernel_name = "conv_transpose2d";
151-
} else {
152-
kernel_name = "conv2d";
153-
}
154-
kernel_name += "_prepack_weights";
155-
add_dtype_suffix(kernel_name, v_dst);
156-
vkapi::ShaderInfo shader = VK_KERNEL_FROM_STR(kernel_name);
157-
158-
api::ParamsBuffer original_sizes_ubo(
159-
context, utils::make_ivec4(original_sizes, /*reverse = */ true));
160-
161-
vkapi::SpecVarList specialization_constants = {};
162-
context->submit_compute_job(
163-
shader,
164-
pipeline_barrier,
165-
v_dst.logical_limits(),
166-
adaptive_work_group_size(v_dst.logical_limits()),
167-
specialization_constants,
168-
VK_NULL_HANDLE,
169-
0,
170-
v_dst.image(
171-
pipeline_barrier,
172-
vkapi::PipelineStage::COMPUTE,
173-
vkapi::MemoryAccessType::WRITE),
174-
src_buffer,
175-
v_dst.sizes_ubo(),
176-
original_sizes_ubo.buffer());
177-
}
178-
179140
void record_binary_op(
180141
api::Context* const context,
181142
const std::string& op_name,

backends/vulkan/test/vulkan_compute_api_test.cpp

Lines changed: 13 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -2044,7 +2044,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_offset_node) {
20442044
}
20452045
}
20462046

2047-
TEST(VulkanComputeGraphTest, test_etvk_copy_channel_offset_node) {
2047+
TEST(VulkanComputeGraphTest, DISABLED_test_etvk_copy_channel_offset_node) {
20482048
GraphConfig config;
20492049
ComputeGraph graph(config);
20502050

@@ -2103,7 +2103,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_channel_offset_node) {
21032103

21042104
TEST(
21052105
VulkanComputeGraphTest,
2106-
test_etvk_copy_channel_offset_node_clean_boundary) {
2106+
DISABLED_test_etvk_copy_channel_offset_node_clean_boundary) {
21072107
// Tricky part for channel copy is handling the boundary across multiple copy.
21082108
// For example, when we concat two [3, 1, 1] nchw-tensors along the channel
21092109
// dimension, due to channel packing, elements from different source texel
@@ -2312,7 +2312,7 @@ TEST(VulkanComputeGraphTest, test_etvk_copy_offset_int_node) {
23122312
}
23132313
}
23142314

2315-
TEST(VulkanComputeGraphTest, test_etvk_copy_channel_offset_int_node) {
2315+
TEST(VulkanComputeGraphTest, DISABLED_test_etvk_copy_channel_offset_int_node) {
23162316
GraphConfig config;
23172317
ComputeGraph graph(config);
23182318

@@ -2966,71 +2966,6 @@ TEST(VulkanComputeGraphOpsTest, max_pool2d_smoke_test) {
29662966
kernel);
29672967
}
29682968

2969-
void test_conv2d(
2970-
const std::vector<int64_t>& original_sizes,
2971-
const std::vector<int64_t>& padded_sizes,
2972-
const std::vector<int64_t>& gpu_sizes,
2973-
const bool transposed,
2974-
const std::vector<float>& data_out_expected) {
2975-
vTensor vten = vTensor(
2976-
context(),
2977-
gpu_sizes,
2978-
vkapi::kFloat,
2979-
utils::StorageType::TEXTURE_2D,
2980-
utils::GPUMemoryLayout::TENSOR_CHANNELS_PACKED);
2981-
2982-
// Create and fill input staging buffer
2983-
const int64_t in_numel = utils::multiply_integers(original_sizes);
2984-
StagingBuffer staging_buffer_in(context(), vkapi::kFloat, in_numel);
2985-
2986-
std::vector<float> data_in(in_numel);
2987-
for (int i = 0; i < in_numel; i++) {
2988-
data_in[i] = i + 1;
2989-
}
2990-
staging_buffer_in.copy_from(data_in.data(), sizeof(float) * in_numel);
2991-
2992-
// Output staging buffer
2993-
const int64_t out_numel =
2994-
padded_sizes[0] * padded_sizes[1] * original_sizes[2] * original_sizes[3];
2995-
StagingBuffer staging_buffer_out(context(), vkapi::kFloat, out_numel);
2996-
2997-
// Copy data in and out of the tensor
2998-
record_conv2d_prepack_weights_op(
2999-
context(), staging_buffer_in.buffer(), vten, original_sizes, transposed);
3000-
record_image_to_nchw_op(context(), vten, staging_buffer_out.buffer());
3001-
3002-
// Execute command buffer
3003-
submit_to_gpu();
3004-
3005-
// Extract data from output staging buffer
3006-
std::vector<float> data_out(out_numel);
3007-
staging_buffer_out.copy_to(data_out.data(), sizeof(float) * out_numel);
3008-
3009-
// Check data matches results copied from ATen-VK
3010-
for (int i = 0; i < vten.numel(); i++) {
3011-
CHECK_VALUE(data_out, i, data_out_expected[i]);
3012-
}
3013-
}
3014-
3015-
TEST(VulkanComputeGraphOpsTest, conv2d_prepack_test) {
3016-
test_conv2d(
3017-
/*original_sizes = */ {2, 3, 1, 2},
3018-
/*padded_sizes = */ {4, 4},
3019-
/*gpu_sizes = */ {4, 1, 8},
3020-
/*transposed = */ false,
3021-
/*data_out_expected = */ {1, 3, 5, 0, 2, 4, 6, 0, 7, 9, 11,
3022-
0, 8, 10, 12, 0, 0, 0, 0, 0, 0, 0,
3023-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
3024-
test_conv2d(
3025-
/*original_sizes = */ {2, 3, 1, 2},
3026-
/*padded_sizes = */ {4, 4},
3027-
/*gpu_sizes = */ {4, 1, 8},
3028-
/*transposed = */ true,
3029-
/*data_out_expected = */ {2, 8, 0, 0, 1, 7, 0, 0, 4, 10, 0,
3030-
0, 3, 9, 0, 0, 6, 12, 0, 0, 5, 11,
3031-
0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
3032-
}
3033-
30342969
void test_grid_priors(
30352970
std::vector<int64_t> input_sizes,
30362971
std::vector<int64_t> output_sizes,
@@ -3242,8 +3177,10 @@ void test_to_copy() {
32423177

32433178
EXPECT_EQ(data_in.size(), output_data.size());
32443179

3180+
#ifdef VULKAN_DEBUG
32453181
float mse_ex = 0.0f;
32463182
float mse_vk = 0.0f;
3183+
#endif
32473184

32483185
// check results
32493186
for (size_t i = 0; i < output_data.size(); ++i) {
@@ -3254,6 +3191,7 @@ void test_to_copy() {
32543191
torch::executor::Half output = output_data[i];
32553192
uint16_t* output_bits = reinterpret_cast<uint16_t*>(&output);
32563193

3194+
#ifdef VULKAN_DEBUG
32573195
std::string msg;
32583196
msg.reserve(64);
32593197
msg = "input = " + std::to_string(input) + "(0b" +
@@ -3265,6 +3203,10 @@ void test_to_copy() {
32653203

32663204
std::cout << msg << std::endl;
32673205

3206+
mse_ex += std::pow(expected_output - input, 2);
3207+
mse_vk += std::pow(output - input, 2);
3208+
#endif
3209+
32683210
// Note: Torch executor half "rounds up" when converting to fp16 whereas
32693211
// most driver implementations of Vulkan's opFConvert() just truncates the
32703212
// extra bits for performance (rounding introduces conditional).
@@ -3284,15 +3226,16 @@ void test_to_copy() {
32843226
EXPECT_TRUE(
32853227
(*output_bits == *expected_bits) ||
32863228
/*rounding error*/ ((*output_bits + 1u) == *expected_bits));
3287-
mse_ex += std::pow(expected_output - input, 2);
3288-
mse_vk += std::pow(output - input, 2);
32893229
}
32903230

3231+
#ifdef VULKAN_DEBUG
32913232
mse_ex /= output_data.size();
32923233
mse_vk /= output_data.size();
3234+
32933235
std::cout << "========================================================="
32943236
<< std::endl;
32953237
std::cout << "mse_ex = " << mse_ex << ", mse_vk = " << mse_vk << std::endl;
3238+
#endif
32963239
}
32973240

32983241
TEST(VulkanComputeGraphOpsTest, test_to_copy) {

0 commit comments

Comments
 (0)