Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion backends/vulkan/test/custom_ops/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ if(TARGET vulkan_backend)

# Prototyping utility files
set(PROTOTYPING_UTILS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR})
set(PROTOTYPING_UTILS_CPP ${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp)
set(PROTOTYPING_UTILS_CPP ${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
${CMAKE_CURRENT_SOURCE_DIR}/conv2d_utils.cpp
)

# Prototyping shaders
message(STATUS "shader stuff")
Expand Down
86 changes: 84 additions & 2 deletions backends/vulkan/test/custom_ops/conv2d_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,5 +6,87 @@

#include "conv2d_utils.h"

// Implementation file for conv2d utilities.
// Currently all functionality is implemented inline in the header.
namespace executorch {
namespace vulkan {
namespace prototyping {

std::string make_test_case_conv_params_suffix(const Conv2dConfig& config) {
std::string suffix;
// Only print groups if not equal to 1
if (config.groups != 1) {
suffix += "g=" + std::to_string(config.groups);
suffix += " ";
}

suffix += "k=";
if (config.kernel.h == config.kernel.w) {
suffix += std::to_string(config.kernel.w);
} else {
suffix +=
std::to_string(config.kernel.w) + "," + std::to_string(config.kernel.h);
}
// Only print stride if either dimension is not 1
if (config.stride.h > 1 || config.stride.w > 1) {
suffix += ",s=";
if (config.stride.h == config.stride.w) {
suffix += std::to_string(config.stride.w);
} else {
suffix += std::to_string(config.stride.w) + "," +
std::to_string(config.stride.h);
}
}
// Only print padding if either dimension is not 1
if (config.padding.h != 1 || config.padding.w != 1) {
suffix += ",p=";
if (config.padding.h == config.padding.w) {
suffix += std::to_string(config.padding.w);
} else {
suffix += std::to_string(config.padding.w) + "," +
std::to_string(config.padding.h);
}
}
// Only print dilation if either dimension is not 1
if (config.dilation.h != 1 || config.dilation.w != 1) {
suffix += ",d=";
if (config.dilation.h == config.dilation.w) {
suffix += std::to_string(config.dilation.w);
} else {
suffix += std::to_string(config.dilation.w) + "," +
std::to_string(config.dilation.h);
}
}
return suffix;
}

std::string to_string(const vkcompute::utils::StorageType storage_type) {
switch (storage_type) {
case vkcompute::utils::kTexture3D:
return "Tex";
case vkcompute::utils::kTexture2D:
return "Tex2D";
case vkcompute::utils::kBuffer:
return "Buf";
}
}

std::string make_test_case_name(
const Conv2dConfig& config,
const bool is_performance,
const vkcompute::utils::StorageType fp_storage_type,
const vkcompute::utils::StorageType int8_storage_type) {
std::string test_case_name = is_performance ? "PERF " : "ACCU ";
test_case_name += std::to_string(config.channels.in) + "->" +
std::to_string(config.channels.out) +
" I=" + std::to_string(config.input_size.h) + "," +
std::to_string(config.input_size.w) + " " +
make_test_case_conv_params_suffix(config);

test_case_name +=
" " + to_string(fp_storage_type) + "->" + to_string(int8_storage_type);

return test_case_name;
}

} // namespace prototyping
} // namespace vulkan
} // namespace executorch
8 changes: 8 additions & 0 deletions backends/vulkan/test/custom_ops/conv2d_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

#pragma once

#include <executorch/backends/vulkan/runtime/api/api.h>

#include <cstdint>
#include <string>

Expand Down Expand Up @@ -83,6 +85,12 @@ struct Conv2dConfig {
}
};

std::string make_test_case_name(
const Conv2dConfig& config,
const bool is_performance,
const vkcompute::utils::StorageType fp_storage_type,
const vkcompute::utils::StorageType int8_storage_type);

} // namespace prototyping
} // namespace vulkan
} // namespace executorch
116 changes: 56 additions & 60 deletions backends/vulkan/test/custom_ops/q8ta_q8csw_q8to_conv2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,17 @@
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.

#include <executorch/backends/vulkan/runtime/graph/ops/impl/Common.h>
#include <executorch/backends/vulkan/runtime/graph/ops/utils/ShaderNameUtils.h>
#include <iostream>
#include <vector>
#include "conv2d_utils.h"
#include "utils.h"

#include <executorch/backends/vulkan/runtime/graph/ops/impl/Common.h>
#include <executorch/backends/vulkan/runtime/graph/ops/utils/ShaderNameUtils.h>

#include <executorch/backends/vulkan/runtime/graph/ops/impl/Staging.h>

#include "conv2d_utils.h"
#include "utils.h"

// #define DEBUG_MODE

using namespace executorch::vulkan::prototyping;
Expand All @@ -24,22 +26,14 @@ static constexpr int64_t kRefDimSizeLimit = 100;
// Utility function to create a test case from a Conv2dConfig
TestCase create_test_case_from_config(
const Conv2dConfig& config,
utils::StorageType storage_type,
vkapi::ScalarType input_dtype,
utils::StorageType interm_storage_type) {
utils::StorageType fp_storage_type,
utils::StorageType int8_storage_type) {
TestCase test_case;

// Create a descriptive name for the test case
std::string storage_str =
(storage_type == utils::kTexture3D) ? "Texture3D" : "Buffer";
std::string dtype_str = (input_dtype == vkapi::kFloat) ? "Float" : "Half";

std::string test_name =
config.test_case_name + "_" + storage_str + "_" + dtype_str;
test_case.set_name(test_name);
test_case.set_name(config.test_case_name);

std::string operator_suffix = ".test";
if (interm_storage_type == utils::kTexture3D) {
if (int8_storage_type == utils::kTexture3D) {
operator_suffix += "_texture";
} else {
operator_suffix += "_buffer";
Expand All @@ -57,15 +51,15 @@ TestCase create_test_case_from_config(
std::vector<int64_t> input_size = {
1, config.channels.in, config.input_size.h, config.input_size.w};

utils::GPUMemoryLayout io_memory_layout = storage_type == utils::kBuffer
utils::GPUMemoryLayout fp_memory_layout = fp_storage_type == utils::kBuffer
? utils::kWidthPacked
: utils::kChannelsPacked;

ValueSpec input_tensor(
input_size,
input_dtype,
storage_type,
io_memory_layout,
fp_storage_type,
fp_memory_layout,
#ifdef DEBUG_MODE
DataGenType::RANDOM
#else
Expand Down Expand Up @@ -93,7 +87,7 @@ TestCase create_test_case_from_config(
ValueSpec quantized_weight(
weight_size,
vkapi::kChar, // int8 for quantized weights
storage_type,
fp_storage_type,
utils::kWidthPacked,
DataGenType::RANDINT8);
quantized_weight.set_constant(true);
Expand All @@ -108,15 +102,15 @@ TestCase create_test_case_from_config(
ValueSpec weight_scales(
{aligned_out_channels}, // Per output channel
input_dtype,
storage_type,
fp_storage_type,
utils::kWidthPacked,
DataGenType::RANDOM_SCALES);
weight_scales.set_constant(true);

ValueSpec weight_sums(
{aligned_out_channels}, // Per output channel
vkapi::kInt,
storage_type,
fp_storage_type,
utils::kWidthPacked,
DataGenType::ZEROS);
weight_sums.set_constant(true);
Expand All @@ -129,13 +123,12 @@ TestCase create_test_case_from_config(
ValueSpec bias(
{aligned_out_channels}, // Per output channel
input_dtype,
storage_type,
fp_storage_type,
utils::kWidthPacked,
DataGenType::ZEROS);
bias.set_constant(true);

// Output quantization parameters
// float output_scale_val = 0.01432;
float output_scale_val = 0.05314;
ValueSpec output_scale(output_scale_val);

Expand All @@ -157,8 +150,8 @@ TestCase create_test_case_from_config(
ValueSpec output(
{1, config.channels.out, H_out, W_out},
input_dtype,
storage_type,
io_memory_layout,
fp_storage_type,
fp_memory_layout,
DataGenType::ZEROS);

// Add all specs to test case for q8ta_q8csw_q8to operation
Expand Down Expand Up @@ -200,18 +193,16 @@ std::vector<TestCase> generate_quantized_conv2d_easy_cases() {
};
config.op_name = "conv2d_q8ta_q8csw_q8to";

// Test with both storage types and data types for completeness
std::vector<utils::StorageType> storage_types = {
utils::kTexture3D, utils::kBuffer};
std::vector<vkapi::ScalarType> float_types = {vkapi::kFloat};

// Generate test cases for each combination
for (const auto& storage_type : storage_types) {
for (const auto& input_dtype : float_types) {
for (const utils::StorageType fp_storage_type : storage_types) {
for (const utils::StorageType int8_storage_type : storage_types) {
config.test_case_name = make_test_case_name(
config, false, fp_storage_type, int8_storage_type);
test_cases.push_back(create_test_case_from_config(
config, storage_type, input_dtype, utils::kBuffer));
test_cases.push_back(create_test_case_from_config(
config, storage_type, input_dtype, utils::kTexture3D));
config, vkapi::kFloat, fp_storage_type, int8_storage_type));
}
}

Expand All @@ -221,6 +212,9 @@ std::vector<TestCase> generate_quantized_conv2d_easy_cases() {
// Generate test cases for quantized conv2d operation
std::vector<TestCase> generate_quantized_conv2d_test_cases() {
std::vector<TestCase> test_cases;
if (!vkcompute::api::context()->adapter_ptr()->supports_int8_dot_product()) {
return test_cases;
}

std::vector<Conv2dConfig> configs = {
// Pointwise convolutions: kernel size 1x1
Expand Down Expand Up @@ -317,7 +311,7 @@ std::vector<TestCase> generate_quantized_conv2d_test_cases() {
Padding(2, 2),
Dilation(1, 1),
4},
// Performance cases (pointwise)
// Performance cases (pointwise - will use im2col)
{OutInChannels(128, 128),
InputSize2D(128, 128),
KernelSize(1, 1),
Expand All @@ -332,7 +326,7 @@ std::vector<TestCase> generate_quantized_conv2d_test_cases() {
Padding(0, 0),
Dilation(1, 1),
1},
// Performance cases (general 2d convs)
// Performance cases (3x3 convs - will use im2col)
{OutInChannels(32, 3),
InputSize2D(256, 256),
KernelSize(3, 3),
Expand All @@ -354,6 +348,21 @@ std::vector<TestCase> generate_quantized_conv2d_test_cases() {
Padding(1, 1),
Dilation(1, 1),
1},
// Performance cases (grouped convs)
{OutInChannels(64, 64),
InputSize2D(128, 128),
KernelSize(3, 3),
Stride(1, 1),
Padding(1, 1),
Dilation(1, 1),
2},
{OutInChannels(96, 96),
InputSize2D(128, 128),
KernelSize(3, 3),
Stride(2, 2),
Padding(1, 1),
Dilation(1, 1),
3},
{OutInChannels(128, 128),
InputSize2D(128, 128),
KernelSize(5, 5),
Expand All @@ -368,32 +377,19 @@ std::vector<TestCase> generate_quantized_conv2d_test_cases() {

// Generate test cases for each combination
for (auto& config : configs) {
for (const auto& storage_type : storage_types) {
// Generate test case name programmatically
bool is_performance = config.channels.out > kRefDimSizeLimit ||
config.channels.in > kRefDimSizeLimit ||
config.input_size.h > kRefDimSizeLimit ||
config.input_size.w > kRefDimSizeLimit;
std::string prefix = is_performance ? "performance_" : "correctness_";
std::string suffix = std::to_string(config.channels.out) + "/" +
std::to_string(config.channels.in) + "_" +
std::to_string(config.input_size.h) + "/" +
std::to_string(config.input_size.w) + "_" +
std::to_string(config.kernel.h) + "/" +
std::to_string(config.kernel.w);

config.op_name = "conv2d_q8ta_q8csw_q8to";
config.test_case_name = prefix + suffix;

// Only test q8ta_q8csw_q8to if the int8 dot product extension is
// supported
if (vkcompute::api::context()
->adapter_ptr()
->supports_int8_dot_product()) {
test_cases.push_back(create_test_case_from_config(
config, storage_type, vkapi::kFloat, utils::kBuffer));
bool is_performance = config.channels.out > kRefDimSizeLimit ||
config.channels.in > kRefDimSizeLimit ||
config.input_size.h > kRefDimSizeLimit ||
config.input_size.w > kRefDimSizeLimit;

config.op_name = "conv2d_q8ta_q8csw_q8to";

for (const utils::StorageType fp_storage_type : storage_types) {
for (const utils::StorageType int8_storage_type : storage_types) {
config.test_case_name = make_test_case_name(
config, is_performance, fp_storage_type, int8_storage_type);
test_cases.push_back(create_test_case_from_config(
config, storage_type, vkapi::kFloat, utils::kTexture3D));
config, vkapi::kFloat, fp_storage_type, int8_storage_type));
}
}
}
Expand Down
Loading
Loading