update to vulkan-sdk-1.3.275.0

Signed-off-by: huruitao <huruitao@kaihong.com>
This commit is contained in:
huruitao 2024-05-21 14:26:42 +08:00
parent a7fd79e84f
commit e8ad9735b1
779 changed files with 49647 additions and 26943 deletions

3
.gitignore vendored Normal file → Executable file
View File

@ -4,6 +4,7 @@
compile_commands.json
/build*/
/buildtools/
/external/abseil_cpp/
/external/googletest
/external/SPIRV-Headers
/external/spirv-headers
@ -20,7 +21,7 @@ bazel-bin
bazel-genfiles
bazel-out
bazel-spirv-tools
bazel-spirv-tools
bazel-SPIRV-Tools
bazel-testlogs
# Vim

View File

@ -71,6 +71,7 @@ SPVTOOLS_SRC_FILES := \
source/val/validate_primitives.cpp \
source/val/validate_ray_query.cpp \
source/val/validate_ray_tracing.cpp \
source/val/validate_ray_tracing_reorder.cpp \
source/val/validate_scopes.cpp \
source/val/validate_small_type_uses.cpp \
source/val/validate_type.cpp
@ -78,6 +79,7 @@ SPVTOOLS_SRC_FILES := \
SPVTOOLS_OPT_SRC_FILES := \
source/opt/aggressive_dead_code_elim_pass.cpp \
source/opt/amd_ext_to_khr.cpp \
source/opt/analyze_live_input_pass.cpp \
source/opt/basic_block.cpp \
source/opt/block_merge_pass.cpp \
source/opt/block_merge_util.cpp \
@ -109,8 +111,9 @@ SPVTOOLS_OPT_SRC_FILES := \
source/opt/eliminate_dead_constant_pass.cpp \
source/opt/eliminate_dead_functions_pass.cpp \
source/opt/eliminate_dead_functions_util.cpp \
source/opt/eliminate_dead_input_components_pass.cpp \
source/opt/eliminate_dead_io_components_pass.cpp \
source/opt/eliminate_dead_members_pass.cpp \
source/opt/eliminate_dead_output_stores_pass.cpp \
source/opt/feature_manager.cpp \
source/opt/fix_func_call_arguments.cpp \
source/opt/fix_storage_class.cpp \
@ -133,9 +136,11 @@ SPVTOOLS_OPT_SRC_FILES := \
source/opt/instrument_pass.cpp \
source/opt/interface_var_sroa.cpp \
source/opt/interp_fixup_pass.cpp \
source/opt/invocation_interlock_placement_pass.cpp \
source/opt/ir_context.cpp \
source/opt/ir_loader.cpp \
source/opt/licm_pass.cpp \
source/opt/liveness.cpp \
source/opt/local_access_chain_convert_pass.cpp \
source/opt/local_redundancy_elimination.cpp \
source/opt/local_single_block_elim_pass.cpp \
@ -178,6 +183,8 @@ SPVTOOLS_OPT_SRC_FILES := \
source/opt/strip_debug_info_pass.cpp \
source/opt/strip_nonsemantic_info_pass.cpp \
source/opt/struct_cfg_analysis.cpp \
source/opt/switch_descriptorset_pass.cpp \
source/opt/trim_capabilities_pass.cpp \
source/opt/type_manager.cpp \
source/opt/types.cpp \
source/opt/unify_const_pass.cpp \
@ -217,7 +224,8 @@ $(1)/opencl.std.insts.inc \
--core-insts-output=$(1)/core.insts-unified1.inc \
--glsl-insts-output=$(1)/glsl.std.450.insts.inc \
--opencl-insts-output=$(1)/opencl.std.insts.inc \
--operand-kinds-output=$(1)/operand.kinds-unified1.inc
--operand-kinds-output=$(1)/operand.kinds-unified1.inc \
--output-language=c++
@echo "[$(TARGET_ARCH_ABI)] Grammar (from unified1) : instructions & operands <= grammar JSON files"
$(LOCAL_PATH)/source/opcode.cpp: $(1)/core.insts-unified1.inc
$(LOCAL_PATH)/source/operand.cpp: $(1)/operand.kinds-unified1.inc
@ -291,7 +299,8 @@ $(1)/extension_enum.inc $(1)/enum_string_mapping.inc: \
--extinst-debuginfo-grammar=$(SPV_DEBUGINFO_GRAMMAR) \
--extinst-cldebuginfo100-grammar=$(SPV_CLDEBUGINFO100_GRAMMAR) \
--extension-enum-output=$(1)/extension_enum.inc \
--enum-string-mapping-output=$(1)/enum_string_mapping.inc
--enum-string-mapping-output=$(1)/enum_string_mapping.inc \
--output-language=c++
@echo "[$(TARGET_ARCH_ABI)] Generate enum<->string mapping <= grammar JSON files"
# Generated header extension_enum.inc is transitively included by table.h, which is
# used pervasively. Capture the pervasive dependency.
@ -334,7 +343,7 @@ LOCAL_C_INCLUDES := \
$(SPVTOOLS_OUT_PATH)
LOCAL_EXPORT_C_INCLUDES := \
$(LOCAL_PATH)/include
LOCAL_CXXFLAGS:=-std=c++11 -fno-exceptions -fno-rtti -Werror
LOCAL_CXXFLAGS:=-std=c++17 -fno-exceptions -fno-rtti -Werror
LOCAL_SRC_FILES:= $(SPVTOOLS_SRC_FILES)
include $(BUILD_STATIC_LIBRARY)
@ -345,7 +354,7 @@ LOCAL_C_INCLUDES := \
$(LOCAL_PATH)/source \
$(SPVHEADERS_LOCAL_PATH)/include \
$(SPVTOOLS_OUT_PATH)
LOCAL_CXXFLAGS:=-std=c++11 -fno-exceptions -fno-rtti -Werror
LOCAL_CXXFLAGS:=-std=c++17 -fno-exceptions -fno-rtti -Werror
LOCAL_STATIC_LIBRARIES:=SPIRV-Tools
LOCAL_SRC_FILES:= $(SPVTOOLS_OPT_SRC_FILES)
include $(BUILD_STATIC_LIBRARY)

View File

@ -1,27 +1,24 @@
load(
":build_defs.bzl",
"CLDEBUGINFO100_GRAMMAR_JSON_FILE",
"COMMON_COPTS",
"DEBUGINFO_GRAMMAR_JSON_FILE",
"CLDEBUGINFO100_GRAMMAR_JSON_FILE",
"SHDEBUGINFO100_GRAMMAR_JSON_FILE",
"TEST_COPTS",
"base_test",
"generate_core_tables",
"generate_enum_string_mapping",
"generate_extinst_lang_headers",
"generate_glsl_tables",
"generate_opencl_tables",
"generate_vendor_tables",
"link_test",
"lint_test",
"opt_test",
"reduce_test",
"util_test",
"val_test",
"incompatible_with",
)
package(
default_visibility = ["//visibility:private"],
features = [
"layering_check",
],
)
licenses(["notice"])
@ -41,35 +38,50 @@ py_binary(
srcs = ["utils/generate_language_headers.py"],
)
generate_core_tables("unified1")
generate_core_tables(version = "unified1")
generate_enum_string_mapping("unified1")
generate_enum_string_mapping(version = "unified1")
generate_opencl_tables("unified1")
generate_opencl_tables(version = "unified1")
generate_glsl_tables("unified1")
generate_glsl_tables(version = "unified1")
generate_vendor_tables("spv-amd-shader-explicit-vertex-parameter")
generate_vendor_tables(extension = "spv-amd-shader-explicit-vertex-parameter")
generate_vendor_tables("spv-amd-shader-trinary-minmax")
generate_vendor_tables(extension = "spv-amd-shader-trinary-minmax")
generate_vendor_tables("spv-amd-gcn-shader")
generate_vendor_tables(extension = "spv-amd-gcn-shader")
generate_vendor_tables("spv-amd-shader-ballot")
generate_vendor_tables(extension = "spv-amd-shader-ballot")
generate_vendor_tables("debuginfo")
generate_vendor_tables(extension = "debuginfo")
generate_vendor_tables("opencl.debuginfo.100", "CLDEBUG100_")
generate_vendor_tables(extension = "nonsemantic.clspvreflection")
generate_vendor_tables("nonsemantic.shader.debuginfo.100", "SHDEBUG100_")
generate_vendor_tables(
extension = "opencl.debuginfo.100",
operand_kind_prefix = "CLDEBUG100_",
)
generate_vendor_tables("nonsemantic.clspvreflection")
generate_vendor_tables(
extension = "nonsemantic.shader.debuginfo.100",
operand_kind_prefix = "SHDEBUG100_",
)
generate_extinst_lang_headers("DebugInfo", DEBUGINFO_GRAMMAR_JSON_FILE)
generate_extinst_lang_headers(
name = "DebugInfo",
grammar = DEBUGINFO_GRAMMAR_JSON_FILE,
)
generate_extinst_lang_headers("OpenCLDebugInfo100", CLDEBUGINFO100_GRAMMAR_JSON_FILE)
generate_extinst_lang_headers(
name = "OpenCLDebugInfo100",
grammar = CLDEBUGINFO100_GRAMMAR_JSON_FILE,
)
generate_extinst_lang_headers("NonSemanticShaderDebugInfo100", SHDEBUGINFO100_GRAMMAR_JSON_FILE)
generate_extinst_lang_headers(
name = "NonSemanticShaderDebugInfo100",
grammar = SHDEBUGINFO100_GRAMMAR_JSON_FILE,
)
py_binary(
name = "generate_registry_tables",
@ -77,12 +89,12 @@ py_binary(
)
genrule(
name = "gen_registry_tables",
name = "generators_inc",
srcs = ["@spirv_headers//:spirv_xml_registry"],
outs = ["generators.inc"],
cmd = "$(location generate_registry_tables) --xml=$(location @spirv_headers//:spirv_xml_registry) --generator-output=$(location generators.inc)",
cmd_bat = "$(location //:generate_registry_tables) --xml=$(location @spirv_headers//:spirv_xml_registry) --generator-output=$(location generators.inc)",
exec_tools = [":generate_registry_tables"],
cmd = "$(location :generate_registry_tables) --xml=$(location @spirv_headers//:spirv_xml_registry) --generator-output=$(location generators.inc)",
cmd_bat = "$(location :generate_registry_tables) --xml=$(location @spirv_headers//:spirv_xml_registry) --generator-output=$(location generators.inc)",
tools = [":generate_registry_tables"],
)
py_binary(
@ -91,120 +103,103 @@ py_binary(
)
genrule(
name = "gen_build_version",
name = "build_version_inc",
srcs = ["CHANGES"],
outs = ["build-version.inc"],
cmd = "SOURCE_DATE_EPOCH=0 $(location update_build_version) $(location CHANGES) $(location build-version.inc)",
cmd_bat = "set SOURCE_DATE_EPOCH=0 && $(location //:update_build_version) $(location CHANGES) $(location build-version.inc)",
exec_tools = [":update_build_version"],
cmd = "SOURCE_DATE_EPOCH=0 $(location :update_build_version) $(location CHANGES) $(location build-version.inc)",
cmd_bat = "set SOURCE_DATE_EPOCH=0 && $(location :update_build_version) $(location CHANGES) $(location build-version.inc)",
local = True,
tools = [":update_build_version"],
)
# Libraries
cc_library(
name = "generated_headers",
name = "spirv_tools",
hdrs = [
":gen_build_version",
"include/spirv-tools/libspirv.h",
"include/spirv-tools/libspirv.hpp",
],
copts = COMMON_COPTS,
includes = ["include"],
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":spirv_tools_internal",
],
)
cc_library(
name = "spirv_tools_internal",
srcs = glob([
"source/*.cpp",
"source/util/*.cpp",
"source/val/*.cpp",
]) + [
":build_version_inc",
":gen_core_tables_unified1",
":gen_enum_string_mapping",
":gen_extinst_lang_headers_DebugInfo",
":gen_extinst_lang_headers_OpenCLDebugInfo100",
":gen_extinst_lang_headers_NonSemanticShaderDebugInfo100",
":gen_extinst_lang_headers_OpenCLDebugInfo100",
":gen_glsl_tables_unified1",
":gen_opencl_tables_unified1",
":gen_registry_tables",
":gen_vendor_tables_debuginfo",
":gen_vendor_tables_nonsemantic_clspvreflection",
":gen_vendor_tables_opencl_debuginfo_100",
":gen_vendor_tables_nonsemantic_shader_debuginfo_100",
":gen_vendor_tables_opencl_debuginfo_100",
":gen_vendor_tables_spv_amd_gcn_shader",
":gen_vendor_tables_spv_amd_shader_ballot",
":gen_vendor_tables_spv_amd_shader_explicit_vertex_parameter",
":gen_vendor_tables_spv_amd_shader_trinary_minmax",
":generators_inc",
],
copts = COMMON_COPTS,
)
cc_library(
name = "spirv_tools_headers",
hdrs = glob([
hdrs = [
"include/spirv-tools/libspirv.h",
"include/spirv-tools/libspirv.hpp",
":gen_extinst_lang_headers_DebugInfo",
":gen_extinst_lang_headers_NonSemanticShaderDebugInfo100",
":gen_extinst_lang_headers_OpenCLDebugInfo100",
] + glob([
"source/*.h",
"source/util/*.h",
"source/val/*.h",
]),
copts = COMMON_COPTS,
includes = ["source"],
deps = [
"@spirv_headers//:spirv_c_headers",
],
)
cc_library(
name = "spirv_tools",
srcs = glob([
"source/*.cpp",
"source/util/*.cpp",
"source/val/*.cpp",
]),
hdrs = [
"include/spirv-tools/libspirv.h",
"include/spirv-tools/libspirv.hpp",
],
copts = COMMON_COPTS + select({
"@bazel_tools//src/conditions:windows": [""],
"//conditions:default": ["-Wno-implicit-fallthrough"],
}),
includes = ["include"],
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":generated_headers",
":spirv_tools_headers",
"@spirv_headers//:spirv_c_headers",
"@spirv_headers//:spirv_common_headers",
"@spirv_headers//:spirv_cpp11_headers",
],
)
cc_library(
name = "spirv_tools_comp",
srcs = glob([
"source/comp/*.cpp",
"source/comp/*.h",
]),
copts = COMMON_COPTS,
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":generated_headers",
":spirv_tools",
":spirv_tools_headers",
"@spirv_headers//:spirv_common_headers",
],
)
cc_library(
name = "spirv_tools_opt_headers",
hdrs = glob(["source/opt/*.h"]),
copts = COMMON_COPTS,
)
cc_library(
name = "spirv_tools_opt",
srcs = glob(["source/opt/*.cpp"]),
hdrs = [
"include/spirv-tools/instrument.hpp",
"include/spirv-tools/optimizer.hpp",
],
copts = COMMON_COPTS,
includes = ["include"],
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_headers",
":spirv_tools_opt_headers",
":spirv_tools_opt_internal",
],
)
cc_library(
name = "spirv_tools_opt_internal",
srcs = glob(["source/opt/*.cpp"]) + [
":gen_vendor_tables_spv_amd_shader_ballot",
],
hdrs = glob(["source/opt/*.h"]) + [
"include/spirv-tools/instrument.hpp",
"include/spirv-tools/optimizer.hpp",
],
copts = COMMON_COPTS,
deps = [
":spirv_tools_internal",
"@spirv_headers//:spirv_common_headers",
],
)
@ -214,11 +209,9 @@ cc_library(
srcs = glob(["source/reduce/*.cpp"]),
hdrs = glob(["source/reduce/*.h"]),
copts = COMMON_COPTS,
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_opt",
":spirv_tools_internal",
":spirv_tools_opt_internal",
],
)
@ -230,21 +223,38 @@ cc_library(
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_opt",
":spirv_tools_internal",
":spirv_tools_opt_internal",
],
)
cc_library(
name = "spirv_tools_lint_internal",
srcs = glob([
"source/lint/*.cpp",
"source/lint/*.h",
]),
hdrs = ["include/spirv-tools/linter.hpp"] + glob([
"source/lint/*.h",
]),
copts = COMMON_COPTS,
includes = ["include"],
deps = [
":spirv_tools_internal",
":spirv_tools_opt_internal",
],
)
cc_library(
name = "spirv_tools_lint",
srcs = glob(["source/lint/*.cpp", "source/lint/*.h"]),
hdrs = ["include/spirv-tools/linter.hpp"],
copts = COMMON_COPTS,
includes = ["include"],
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_opt",
":spirv_tools_lint_internal",
],
)
@ -253,23 +263,28 @@ cc_library(
srcs = glob(["tools/util/*.cpp"]),
hdrs = glob(["tools/util/*.h"]),
copts = COMMON_COPTS,
linkstatic = 1,
visibility = ["//visibility:public"],
deps = [":spirv_tools"],
)
cc_library(
name = "tools_io",
hdrs = ["tools/io.h"],
copts = COMMON_COPTS,
)
# Tools
cc_binary(
name = "spirv-as",
srcs = [
"tools/as/as.cpp",
"tools/io.h",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_internal",
":tools_io",
":tools_util",
],
)
@ -277,25 +292,44 @@ cc_binary(
name = "spirv-dis",
srcs = [
"tools/dis/dis.cpp",
"tools/io.h",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":tools_io",
":tools_util",
],
)
cc_binary(
name = "spirv-objdump",
srcs = [
"tools/objdump/extract_source.cpp",
"tools/objdump/extract_source.h",
"tools/objdump/objdump.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools_internal",
":spirv_tools_opt_internal",
":tools_io",
":tools_util",
"@spirv_headers//:spirv_cpp_headers",
],
)
cc_binary(
name = "spirv-val",
srcs = [
"tools/io.h",
"tools/val/val.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_internal",
":tools_io",
":tools_util",
],
)
@ -303,14 +337,14 @@ cc_binary(
cc_binary(
name = "spirv-opt",
srcs = [
"tools/io.h",
"tools/opt/opt.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_opt",
":spirv_tools_internal",
":spirv_tools_opt_internal",
":tools_io",
":tools_util",
],
)
@ -318,15 +352,15 @@ cc_binary(
cc_binary(
name = "spirv-reduce",
srcs = [
"tools/io.h",
"tools/reduce/reduce.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_opt",
":spirv_tools_internal",
":spirv_tools_opt_internal",
":spirv_tools_reduce",
":tools_io",
":tools_util",
],
)
@ -334,28 +368,29 @@ cc_binary(
cc_binary(
name = "spirv-link",
srcs = [
"tools/io.h",
"tools/link/linker.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_internal",
":spirv_tools_link",
":tools_io",
":tools_util",
],
)
cc_binary(
name = "spirv-lint",
srcs = [
"tools/io.h",
"tools/lint/lint.cpp",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [
":spirv_tools",
":spirv_tools_lint",
":spirv_tools_opt_internal",
":tools_io",
":tools_util",
],
)
@ -366,50 +401,143 @@ cc_binary(
"tools/cfg/bin_to_dot.cpp",
"tools/cfg/bin_to_dot.h",
"tools/cfg/cfg.cpp",
"tools/io.h",
],
copts = COMMON_COPTS,
visibility = ["//visibility:public"],
deps = [":spirv_tools"],
deps = [
":spirv_tools_internal",
":tools_io",
":tools_util",
],
)
# Unit tests
cc_library(
name = "test_common",
name = "test_lib",
testonly = 1,
srcs = [
"test/test_fixture.h",
"test/unit_spirv.cpp",
],
hdrs = [
"test/test_fixture.h",
"test/unit_spirv.h",
],
compatible_with = [],
copts = TEST_COPTS,
includes = ["test"],
linkstatic = 1,
deps = [
":spirv_tools",
":spirv_tools_internal",
"@com_google_googletest//:gtest",
],
)
cc_library(
name = "link_test_common",
testonly = 1,
srcs = ["test/link/linker_fixture.h"],
compatible_with = [],
# PCH (precompiled header) tests only work when using CMake and MSVC on Windows,
# so they will be skipped in the Bazel builds.
[cc_test(
name = "base_{testcase}_test".format(testcase = f[len("test/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS + ["-DTESTING"],
linkstatic = 1,
target_compatible_with = {
"test/timer_test.cpp": incompatible_with(["@bazel_tools//src/conditions:windows"]),
}.get(f, []),
deps = [
"tools_util",
":spirv_tools_internal",
":test_lib",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(
[
"test/*_test.cpp",
"test/tools/*_test.cpp",
],
exclude = [
"test/cpp_interface_test.cpp",
"test/pch_test.cpp",
],
)]
cc_test(
name = "base_cpp_interface_test",
size = "small",
srcs = ["test/cpp_interface_test.cpp"],
linkstatic = 1,
deps = [
":spirv_tools_opt_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
"@spirv_headers//:spirv_cpp11_headers",
],
)
cc_test(
name = "base_ilist_test",
size = "small",
srcs = ["test/util/ilist_test.cpp"],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools_link",
":test_common",
":spirv_tools_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "opt_test_common",
name = "link_test_lib",
testonly = 1,
srcs = ["test/opt/pass_utils.cpp"],
hdrs = ["test/link/linker_fixture.h"],
copts = TEST_COPTS,
deps = [
":spirv_tools_internal",
":spirv_tools_link",
":test_lib",
"@com_google_effcee//:effcee",
"@com_googlesource_code_re2//:re2",
],
)
[cc_test(
name = "link_{testcase}_test".format(testcase = f[len("test/link/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":link_test_lib",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(
["test/link/*_test.cpp"],
)]
[cc_test(
name = "lint_{testcase}_test".format(testcase = f[len("test/lint/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools",
":spirv_tools_lint_internal",
":spirv_tools_opt_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(
["test/lint/*_test.cpp"],
)]
cc_library(
name = "opt_test_lib",
testonly = 1,
srcs = [
"test/opt/pass_utils.cpp",
],
hdrs = [
"test/opt/assembly_builder.h",
"test/opt/function_utils.h",
@ -417,143 +545,181 @@ cc_library(
"test/opt/pass_fixture.h",
"test/opt/pass_utils.h",
],
compatible_with = [],
copts = TEST_COPTS,
deps = [
":spirv_tools_internal",
":spirv_tools_opt_internal",
"@com_google_effcee//:effcee",
"@com_google_googletest//:gtest",
],
)
[cc_test(
name = "opt_{testcase}_test".format(testcase = f[len("test/opt/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools_opt",
":test_common",
":opt_test_lib",
":spirv_tools_internal",
":spirv_tools_opt_internal",
":test_lib",
"@com_google_effcee//:effcee",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
) for f in glob(["test/opt/*_test.cpp"])]
cc_library(
name = "reduce_test_common",
testonly = 1,
srcs = [
"test/reduce/reduce_test_util.cpp",
"tools/io.h",
],
hdrs = ["test/reduce/reduce_test_util.h"],
compatible_with = [],
[cc_test(
name = "opt_dom_tree_{testcase}_test".format(testcase = f[len("test/opt/dominator_tree/"):-len(".cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools_reduce",
":test_common",
":opt_test_lib",
":spirv_tools_opt_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "val_test_common",
testonly = 1,
srcs = [
"test/val/val_code_generator.cpp",
"test/val/val_fixtures.h",
],
hdrs = [
"test/val/val_code_generator.h",
],
compatible_with = [],
copts = TEST_COPTS,
linkstatic = 1,
deps = [":test_common"],
)
# PCH (precompiled header) tests only work when using CMake and MSVC on Windows,
# so they will be skipped in the Bazel builds.
[base_test(
name = f[5:-4], # strip test/, .cpp
srcs = [f],
) for f in glob(
["test/*.cpp"],
exclude = [
"test/cpp_interface_test.cpp", # has its own base_test below.
"test/log_test.cpp", # has its own base_test below.
"test/pch_test.cpp", # pch tests are skipped.
"test/timer_test.cpp", # has its own base_test below.
],
)]
# This test uses unistd.h and does not run on Windows.
base_test(
name = "timer_test",
srcs = select({
"@bazel_tools//src/conditions:windows": [],
"//conditions:default": ["test/timer_test.cpp"],
}),
)
base_test(
name = "cpp_interface_test",
srcs = ["test/cpp_interface_test.cpp"],
deps = [":spirv_tools_opt"],
)
base_test(
name = "log_test",
srcs = ["test/log_test.cpp"],
deps = [":spirv_tools_opt"],
)
[link_test(
name = f[10:-4], # strip test/link/, .cpp
srcs = [f],
) for f in glob(
["test/link/*.cpp"],
)]
[lint_test(
name = f[10:-4], # strip test/lint/, .cpp
srcs = [f],
) for f in glob(
["test/lint/*.cpp"],
)]
[opt_test(
name = f[9:-4], # strip test/opt/, .cpp
srcs = [f],
) for f in glob(
["test/opt/*.cpp"],
# pch tests are skipped.
exclude = ["test/opt/pch_test_opt.cpp"],
)]
[opt_test(
name = "dom_tree_" + f[24:-4], # strip test/opt/dominator_tree/, .cpp
srcs = [f],
) for f in glob(
["test/opt/dominator_tree/*.cpp"],
# pch tests are skipped.
exclude = ["test/opt/dominator_tree/pch_test_opt_dom.cpp"],
)]
[opt_test(
name = "loop_" + f[28:-4], # strip test/opt/loop_optimizations/, .cpp
[cc_test(
name = "opt_loop_{testcase}_test".format(testcase = f[len("test/opt/loop_optimizations/"):-len(".cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":opt_test_lib",
":spirv_tools",
":spirv_tools_opt_internal",
"@com_google_effcee//:effcee",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(
["test/opt/loop_optimizations/*.cpp"],
# pch tests are skipped.
exclude = ["test/opt/loop_optimizations/pch_test_opt_loop.cpp"],
)]
[reduce_test(
name = f[12:-4], # strip test/reduce/, .cpp
srcs = [f],
) for f in glob(["test/reduce/*.cpp"])]
cc_library(
name = "reduce_test_lib",
testonly = 1,
srcs = [
"test/reduce/reduce_test_util.cpp",
],
hdrs = ["test/reduce/reduce_test_util.h"],
copts = TEST_COPTS,
deps = [
":spirv_tools",
":spirv_tools_opt_internal",
":spirv_tools_reduce",
":test_lib",
":tools_io",
"@com_google_googletest//:gtest",
],
)
[util_test(
name = f[10:-4], # strip test/util/, .cpp
[cc_test(
name = "reduce_{testcase}_test".format(testcase = f[len("test/reduce/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
) for f in glob(["test/util/*.cpp"])]
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":reduce_test_lib",
":spirv_tools_internal",
":spirv_tools_opt_internal",
":spirv_tools_reduce",
"@com_google_googletest//:gtest_main",
],
) for f in glob(["test/reduce/*_test.cpp"])]
[val_test(
name = f[9:-4], # strip test/val/, .cpp
[cc_test(
name = "util_{testcase}_test".format(testcase = f[len("test/util/"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(["test/util/*_test.cpp"])]
cc_library(
name = "val_test_lib",
testonly = 1,
srcs = [
"test/val/val_code_generator.cpp",
],
hdrs = [
"test/val/val_code_generator.h",
"test/val/val_fixtures.h",
],
copts = TEST_COPTS,
deps = [
":spirv_tools_internal",
":test_lib",
],
)
[cc_test(
name = "val_{testcase}_test".format(testcase = f[len("test/val/val_"):-len("_test.cpp")]),
size = "small",
srcs = [f],
copts = TEST_COPTS,
linkstatic = 1,
deps = [
":spirv_tools_internal",
":test_lib",
":val_test_lib",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
) for f in glob(
["test/val/*.cpp"],
["test/val/val_*_test.cpp"],
exclude = [
"test/val/pch_test_val.cpp", # pch tests are skipped.
"test/val/val_capability_test.cpp",
"test/val/val_limits_test.cpp",
],
)]
cc_test(
name = "val_capability_test",
size = "large",
timeout = "long",
srcs = ["test/val/val_capability_test.cpp"],
copts = TEST_COPTS + ["-O3"],
linkstatic = 1,
deps = [
":spirv_tools_internal",
":test_lib",
":val_test_lib",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "val_limits_test",
size = "large",
timeout = "long",
srcs = ["test/val/val_limits_test.cpp"],
copts = TEST_COPTS + [
"-O3",
],
linkstatic = 1,
deps = [
":test_lib",
":val_test_lib",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)

1024
BUILD.gn

File diff suppressed because it is too large Load Diff

145
CHANGES
View File

@ -1,5 +1,150 @@
Revision history for SPIRV-Tools
v2023.6 2023-12-18
- General
- update_build_version.py produce deterministic header. (#5426)
- Support missing git in update_build_version.py (#5473)
- Optimizer
- Add ComputeDerivativeGroup*NV capabilities to trim capabilities pass. (#5430)
- Do not crash when tryingto fold unsupported spec constant (#5496)
- instrument: Fix handling of gl_InvocationID (#5493)
- Fix nullptr argument in MarkInsertChain (#5465)
- opt: support 64-bit OpAccessChain index in FixStorageClass (#5446)
- opt: add StorageImageReadWithoutFormat to cap trim (#5475)
- opt: add PhysicalStorageBufferAddresses to trim (#5476)
- Fix array size calculation (#5463
- Validator
- spirv-val: Loosen restriction on base type of DebugTypePointer and DebugTypeQualifier (#5479)
- spirv-val: Add WorkgroupMemoryExplicitLayoutKHR check for Block (#5461)
v2023.5 2023-10-15
- General
- Support 2 Intel extensions (#5357)
- SPV_QCOM_image_processing support (#5223)
- Optimizer
- opt: fix StorageInputOutput16 trimming. (#5359)
- opt: add StoragePushConstant16 to trim pass (#5366)
- opt: enable StorageUniform16 (#5371)
- opt: add bitmask support for capability trimming (#5372)
- opt: Add SwitchDescriptorSetPass (#5375)
- opt: add FragmentShader*InterlockEXT to capability trim pass (#5390)
- opt: add Int64 capability to trim pass (#5398)
- opt: add Float64 capability to trim pass (#5428)
- opt: add raytracing/rayquery to trim pass (#5397)
- opt: add ImageMSArray capability to trim pass. (#5395)
- Add SPV_KHR_physical_storage_buffer to allowlists (#5402)
- Add SPV_EXT_fragment_shader_interlock to allow lists (#5393)
- Make sure that fragment shader interlock instructions are not removed by DCE (#5400)
- instrument: Use Import linkage for instrumentation functions (#5355)
- Add a new legalization pass to dedupe invocation interlock instructions (#5409)
- instrument: Ensure linking works even of nothing is changed (#5419)
- Validator
- Move token version/cap/ext checks from parsing to validation (#5370)
- val: re-add ImageMSArray validation (#5394)
- Linker
- linker: Add --use-highest-version option
v2023.4 2023-07-17
- General
- Set cmake_policy CMP0128 (#5341)
- Add python3 requirement for the script (#5326)
- Add support for LiteralFloat type (#5323)
- SPV_KHR_cooperative_matrix (#5286)
- Allow OpTypeBool in UniformConstant (#5237)
- Allow physical storage buffer pointer in IO (#5251)
- Remove const zero image operands (#5232)
- Optimizer
- Enable vector constant folding (#4913) (#5272)
- Fold negation of integer vectors (#5269)
- Add folding rule for OpTranspose (#5241)
- Add SPV_NV_bindless_texture to spirv optimizations (#5231)
- Fix incorrect half float conversion (#5349)
- Add SPV_EXT_shader_atomic_float_add to allow lists (#5348)
- Instrument
- instrument: Cast gl_VertexIndex and InstanceIndex to uint (#5319)
- instrument: Fix buffer address length calculations (#5257)
- instrument: Reduce number of inst_bindless_stream_write_6 calls (#5327)
- Validator
- Validate GroupNonUniform instructions (#5296)
- spirv-val: Label SPV_KHR_cooperative_matrix VUID (#5301)
- Validate layouts for PhysicalStorageBuffer pointers (#5291)
- spirv-val: Remove VUID from 1.3.251 spec (#5244)
- Diff
- spirv-diff: Update test expectations (#5264)
- spirv-diff: Leave undefined ids unpaired. (#5262)
- spirv-diff: Properly match SPV_KHR_ray_query types. (#5259)
- diff: Don't give up entry point matching too early. (#5224)
v2023.3 2023-05-15
- General
- Update spirv_headers to include SPV_KHR_ray_tracing_position_fetch (#5205)
- spirv-tools: Add support for QNX (#5211)
- build: set std=c++17 for BUILD.gn (#5162)
- Optimizer
- Run ADCE when the printf extension is used. (#5215)
- Don't convert struct members to half (#5201)
- Apply scalar replacement on vars with Pointer decorations (#5208)
- opt: Fix null deref in OpMatrixTimesVector and OpVectorTimesMatrix (#5199)
- instrument: Add set and binding to bindless error records (#5204)
- instrument: Change descriptor state storage format (#5178)
- Fix LICMPass (#5087)
- Add Vulkan memory model to allow lists (#5173)
- Do not remove control barrier after spv1.3 (#5174)
- Validator
- spirv-val: Label Interface Location/Component VUIDs (#5221)
- Add support for SPV_EXT_shader_tile_image (#5188)
- Fix vector OpConstantComposite type validation (#5191)
- spirv-val: Label new Vulkan VUID 07951 (#5154)
- Fuzz
- Do not define GOOGLE_PROTOBUF_INTERNAL_DONATE_STEAL_INLINE if it is already defined. (#5200)
v2023.2 2023-03-10
- General
- build: move from c++11 to c++17 (#4983)
- tools: refactorize tools flags parsing. (#5111)
- Add C interface for Optimizer (#5030)
- libspirv.cpp: adds c++ api for spvBinaryParse (#5109)
- build: change the way we set cxx version for bazel. (#5114)
- Optimizer
- Fix null pointer in FoldInsertWithConstants. (#5093)
- Fix removal of dependent non-semantic instructions (#5122)
- Remove duplicate lists of constant and type opcodes (#5106)
- opt: fix spirv ABI on Linux again. (#5113)
- Validator
- Validate decoration of structs with RuntimeArray (#5094)
- Validate operand type before operating on it (#5092)
- spirv-val: Conditional Branch without an exit is invalid in loop header (#5069)
- spirv-val: Initial SPV_EXT_mesh_shader builtins (#5080)
v2023.1 2023-01-17
- General
- Renamed "master" to "main" (issue#5051)
- Validate version 5 of clspv reflection (#5050)
- Remove testing support for VS2015 (#5027)
- Fix undef behaviour in hex float parsing (#5025)
- Require C++11 *or later* (#5020)
- Instrument
- Instrument: Fix bindless checking for BufferDeviceAddress (#5049)
- Optimizer
- Optimize allocation of spvtools::opt::Instruction::operands_ (#5024)
- spirv-opt: Fix OpCompositeInsert with Null Constant (#5008)
- spirv-opt: Handle null CompositeInsert (#4998)
- Add option to ADCE to remove output variables from interface. (#4994)
- Add support for tesc, tese and geom to EliminateDead*Components (#4990)
- Add pass to eliminate dead output components (#4982)
- spirv-opt: Add const folding for CompositeInsert (#4943)
- Add passes to eliminate dead output stores (#4970)
- Prevent eliminating case constructs in block merging (#4976)
- Validator
- Fix layout validation (#5015)
- Fix use of invalid analysis (#5013)
- Fix infinite loop in validator (#5006)
- Add validation support for SPV_NV_shader_invocation_reorder. (#4979)
- Only validate full layout in Vulkan environments (#4972)
- spirv-val: Label new Vulkan OpPtrAccessChain VUs (#4975)
- spirv-val: Add OpPtrAccessChain Base checks (#4965)
v2022.4 2022-10-12
- General
- Support Narrow Types in BitCast Folding Rule (#4941)

View File

@ -1,4 +1,4 @@
# Copyright (c) 2015-2016 The Khronos Group Inc.
# Copyright (c) 2015-2023 The Khronos Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,26 +12,38 @@
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 2.8.12)
if (POLICY CMP0048)
cmake_policy(SET CMP0048 NEW)
endif()
if (POLICY CMP0054)
# Avoid dereferencing variables or interpret keywords that have been
# quoted or bracketed.
# https://cmake.org/cmake/help/v3.1/policy/CMP0054.html
cmake_policy(SET CMP0054 NEW)
endif()
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
cmake_minimum_required(VERSION 3.17.2)
project(spirv-tools)
# Avoid a bug in CMake 3.22.1. By default it will set -std=c++11 for
# targets in test/*, when those tests need -std=c++17.
# https://github.com/KhronosGroup/SPIRV-Tools/issues/5340
# The bug is fixed in CMake 3.22.2
if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.22.1")
if (${CMAKE_VERSION} VERSION_LESS "3.22.2")
cmake_policy(SET CMP0128 NEW)
endif()
endif()
set_property(GLOBAL PROPERTY USE_FOLDERS ON)
enable_testing()
set(SPIRV_TOOLS "SPIRV-Tools")
include(GNUInstallDirs)
set(CMAKE_POSITION_INDEPENDENT_CODE ON)
set(CMAKE_CXX_STANDARD 11)
# Require at least C++17
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
endif()
if(${CMAKE_CXX_STANDARD} LESS 17)
message(FATAL_ERROR "SPIRV-Tools requires C++17 or later, but is configured for C++${CMAKE_CXX_STANDARD})")
endif()
set(CMAKE_CXX_EXTENSIONS OFF)
option(ENABLE_RTTI "Enables RTTI" OFF)
option(SPIRV_ALLOW_TIMERS "Allow timers via clock_gettime on supported platforms" ON)
@ -51,6 +63,8 @@ elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "iOS")
add_definitions(-DSPIRV_IOS)
elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "tvOS")
add_definitions(-DSPIRV_TVOS)
elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "visionOS")
add_definitions(-DSPIRV_VISIONOS)
elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Android")
add_definitions(-DSPIRV_ANDROID)
set(SPIRV_TIMER_ENABLED ${SPIRV_ALLOW_TIMERS})
@ -62,6 +76,8 @@ elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "Fuchsia")
add_definitions(-DSPIRV_FUCHSIA)
elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "GNU")
add_definitions(-DSPIRV_GNU)
elseif("${CMAKE_SYSTEM_NAME}" STREQUAL "QNX")
add_definitions(-DSPIRV_QNX)
else()
message(FATAL_ERROR "Your platform '${CMAKE_SYSTEM_NAME}' is not supported!")
endif()
@ -188,10 +204,9 @@ function(spvtools_default_compile_options TARGET)
target_compile_options(${TARGET} PRIVATE ${SPIRV_WARNINGS})
if (${COMPILER_IS_LIKE_GNU})
target_compile_options(${TARGET} PRIVATE -std=c++11 -fno-exceptions)
target_compile_options(${TARGET} PRIVATE
-Wall -Wextra -Wno-long-long -Wshadow -Wundef -Wconversion
-Wno-sign-conversion)
-Wno-sign-conversion -fno-exceptions)
if(NOT ENABLE_RTTI)
add_compile_options(-fno-rtti)
@ -200,7 +215,7 @@ function(spvtools_default_compile_options TARGET)
if(NOT "${SPIRV_PERF}" STREQUAL "")
target_compile_options(${TARGET} PRIVATE -fno-omit-frame-pointer)
endif()
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
set(SPIRV_USE_SANITIZER "" CACHE STRING
"Use the clang sanitizer [address|memory|thread|...]")
if(NOT "${SPIRV_USE_SANITIZER}" STREQUAL "")
@ -228,7 +243,7 @@ function(spvtools_default_compile_options TARGET)
# For MinGW cross compile, statically link to the C++ runtime.
# But it still depends on MSVCRT.dll.
if (${CMAKE_SYSTEM_NAME} MATCHES "Windows")
if (${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
if (NOT MSVC)
set_target_properties(${TARGET} PROPERTIES
LINK_FLAGS -static -static-libgcc -static-libstdc++)
endif()
@ -247,7 +262,7 @@ if(NOT COMMAND find_host_program)
endif()
# Tests require Python3
find_host_package(PythonInterp 3 REQUIRED)
find_host_package(Python3 REQUIRED)
# Check for symbol exports on Linux.
# At the moment, this check will fail on the OSX build machines for the Android NDK.
@ -256,7 +271,7 @@ if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
macro(spvtools_check_symbol_exports TARGET)
if (NOT "${SPIRV_SKIP_TESTS}")
add_test(NAME spirv-tools-symbol-exports-${TARGET}
COMMAND ${PYTHON_EXECUTABLE}
COMMAND Python3::Interpreter
${spirv-tools_SOURCE_DIR}/utils/check_symbol_exports.py "$<TARGET_FILE:${TARGET}>")
endif()
endmacro()
@ -269,7 +284,7 @@ else()
endif()
if(ENABLE_SPIRV_TOOLS_INSTALL)
if(WIN32)
if(WIN32 AND NOT MINGW)
macro(spvtools_config_package_dir TARGET PATH)
set(${PATH} ${TARGET}/cmake)
endmacro()
@ -289,15 +304,23 @@ if(ENABLE_SPIRV_TOOLS_INSTALL)
endmacro()
endif()
# Defaults to OFF if the user didn't set it.
option(SPIRV_SKIP_EXECUTABLES
"Skip building the executable and tests along with the library"
${SPIRV_SKIP_EXECUTABLES})
option(SPIRV_SKIP_TESTS
"Skip building tests along with the library" ${SPIRV_SKIP_TESTS})
if ("${SPIRV_SKIP_EXECUTABLES}")
# Currently iOS and Android are very similar.
# They both have their own packaging (APP/APK).
# Which makes regular executables/testing problematic.
#
# Currently the only deliverables for these platforms are
# libraries (either STATIC or SHARED).
#
# Furthermore testing is equally problematic.
if (IOS OR ANDROID)
set(SPIRV_SKIP_EXECUTABLES ON)
endif()
option(SPIRV_SKIP_EXECUTABLES "Skip building the executable and tests along with the library")
if (SPIRV_SKIP_EXECUTABLES)
set(SPIRV_SKIP_TESTS ON)
endif()
option(SPIRV_SKIP_TESTS "Skip building tests along with the library")
# Defaults to ON. The checks can be time consuming.
# Turn off if they take too long.
@ -355,7 +378,7 @@ endif(ENABLE_SPIRV_TOOLS_INSTALL)
if (NOT "${SPIRV_SKIP_TESTS}")
add_test(NAME spirv-tools-copyrights
COMMAND ${PYTHON_EXECUTABLE} utils/check_copyright.py
COMMAND Python3::Interpreter utils/check_copyright.py
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endif()

View File

@ -2,9 +2,8 @@
## For users: Reporting bugs and requesting features
We organize known future work in GitHub projects. See [Tracking SPIRV-Tools work
with GitHub
projects](https://github.com/KhronosGroup/SPIRV-Tools/blob/master/docs/projects.md)
We organize known future work in GitHub projects. See
[Tracking SPIRV-Tools work with GitHub projects](https://github.com/KhronosGroup/SPIRV-Tools/blob/main/docs/projects.md)
for more.
To report a new bug or request a new feature, please file a GitHub issue. Please
@ -36,9 +35,9 @@ create a new issue, as with bugs. In the issue provide
## For developers: Contributing a patch
Before we can use your code, you must sign the [Khronos Open Source Contributor
License Agreement](https://cla-assistant.io/KhronosGroup/SPIRV-Tools) (CLA),
which you can do online. The CLA is necessary mainly because you own the
Before we can use your code, you must sign the
[Khronos Open Source Contributor License Agreement](https://cla-assistant.io/KhronosGroup/SPIRV-Tools)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things -- for instance that you'll tell us if
@ -47,20 +46,20 @@ sign the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
See
[README.md](https://github.com/KhronosGroup/SPIRV-Tools/blob/master/README.md)
[README.md](https://github.com/KhronosGroup/SPIRV-Tools/blob/main/README.md)
for instruction on how to get, build, and test the source. Once you have made
your changes:
* Ensure the code follows the [Google C++ Style
Guide](https://google.github.io/styleguide/cppguide.html). Running
`clang-format -style=file -i [modified-files]` can help.
* Ensure the code follows the
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
Running `clang-format -style=file -i [modified-files]` can help.
* Create a pull request (PR) with your patch.
* Make sure the PR description clearly identified the problem, explains the
solution, and references the issue if applicable.
* If your patch completely fixes bug 1234, the commit message should say
`Fixes https://github.com/KhronosGroup/SPIRV-Tools/issues/1234`
When you do this, the issue will be closed automatically when the commit
goes into master. Also, this helps us update the [CHANGES](CHANGES) file.
`Fixes https://github.com/KhronosGroup/SPIRV-Tools/issues/1234` When you do
this, the issue will be closed automatically when the commit goes into
main. Also, this helps us update the [CHANGES](CHANGES) file.
* Watch the continuous builds to make sure they pass.
* Request a code review.
@ -82,8 +81,8 @@ Instructions for this are given below.
The formal code reviews are done on GitHub. Reviewers are to look for all of the
usual things:
* Coding style follows the [Google C++ Style
Guide](https://google.github.io/styleguide/cppguide.html)
* Coding style follows the
[Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html)
* Identify potential functional problems.
* Identify code duplication.
* Ensure the unit tests have enough coverage.
@ -102,84 +101,49 @@ should pay particular attention to:
updated. For example, a new instruction is added, but the def-use manager is
not updated. Later on, it is possible that the def-use manager will be used,
and give wrong results.
* If a pass gets the id of a type from the type manager, make sure the type is
not a struct or array. It there are two structs that look the same, the type
manager can return the wrong one.
## For maintainers: Merging a PR
We intend to maintain a linear history on the GitHub master branch, and the
We intend to maintain a linear history on the GitHub main branch, and the
build and its tests should pass at each commit in that history. A linear
always-working history is easier to understand and to bisect in case we want to
find which commit introduced a bug.
find which commit introduced a bug. The
[Squash and Merge](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits)
button on the GitHub web interface. All other ways of merging on the web
interface have been disabled.
### Initial merge setup
Before merging, we generally require:
The following steps should be done exactly once (when you are about to merge a
PR for the first time):
1. All tests except for the smoke test pass. See
[failing smoke test](#failing-smoke-test).
1. The PR is approved by at least one of the maintainers. If the PR modifies
different parts of the code, then multiple reviewers might be necessary.
* It is assumed that upstream points to
[git@github.com](mailto:git@github.com):KhronosGroup/SPIRV-Tools.git or
https://github.com/KhronosGroup/SPIRV-Tools.git.
The squash-and-merge button will turn green when these requirements are met.
Maintainers have the to power to merge even if the button is not green, but that
is discouraged.
* Find out the local name for the main github repo in your git configuration.
For example, in this configuration, it is labeled `upstream`.
### Failing smoke test
```
git remote -v
[ ... ]
upstream https://github.com/KhronosGroup/SPIRV-Tools.git (fetch)
upstream https://github.com/KhronosGroup/SPIRV-Tools.git (push)
```
The purpose of the smoke test is to let us know if
[shaderc](https://github.com/google/shaderc) fails to build with the change. If
it fails, the maintainer needs to determine if the reason for the failure is a
problem in the current PR or if another repository needs to be changed. Most of
the time [Glslang](https://github.com/KhronosGroup/glslang) needs to be updated
to account for the change in SPIR-V Tools.
* Make sure that the `upstream` remote is set to fetch from the `refs/pull`
namespace:
The PR can still be merged if the problem is not with that PR.
```
git config --get-all remote.upstream.fetch
+refs/heads/*:refs/remotes/upstream/*
+refs/pull/*/head:refs/remotes/upstream/pr/*
```
## For maintainers: Running tests
* If the line `+refs/pull/*/head:refs/remotes/upstream/pr/*` is not present in
your configuration, you can add it with the command:
For security reasons, not all tests will run automatically. When they do not, a
maintainer will have to start the tests.
```
git config --local --add remote.upstream.fetch '+refs/pull/*/head:refs/remotes/upstream/pr/*'
```
If the Github actions tests do not run on a PR, they can be initiated by closing
and reopening the PR.
### Merge workflow
The following steps should be done for every PR that you intend to merge:
* Make sure your local copy of the master branch is up to date:
```
git checkout master
git pull
```
* Fetch all pull requests refs:
```
git fetch upstream
```
* Checkout the particular pull request you are going to review:
```
git checkout pr/1048
```
* Rebase the PR on top of the master branch. If there are conflicts, send it
back to the author and ask them to rebase. During the interactive rebase be
sure to squash all of the commits down to a single commit.
```
git rebase -i master
```
* **Build and test the PR.**
* If all of the tests pass, push the commit `git push upstream HEAD:master`
* Close the PR and add a comment saying it was push using the commit that you
just pushed. See https://github.com/KhronosGroup/SPIRV-Tools/pull/935 as an
example.
If the kokoro tests are not run, they can be run by adding the label
`kokoro:run` to the PR.

21
DEPS
View File

@ -3,19 +3,32 @@ use_relative_paths = True
vars = {
'github': 'https://github.com',
'effcee_revision': '35912e1b7778ec2ddcff7e7188177761539e59e0',
'googletest_revision': 'd9bb8412d60b993365abb53f00b6dad9b2c01b62',
're2_revision': 'd2836d1b1c34c4e330a85a1006201db474bf2c8a',
'spirv_headers_revision': '85a1ed200d50660786c1a88d9166e871123cce39',
'abseil_revision': '79ca5d7aad63973c83a4962a66ab07cd623131ea',
'effcee_revision': '19b4aa87af25cb4ee779a071409732f34bfc305c',
'googletest_revision': 'b10fad38c4026a29ea6561ab15fc4818170d1c10',
# Use protobufs before they gained the dependency on abseil
'protobuf_revision': 'v21.12',
're2_revision': '7e0c1a9e2417e70e5f0efc323267ac71d1fa0685',
'spirv_headers_revision': '1c6bb2743599e6eb6f37b2969acc0aef812e32e3',
}
deps = {
'external/abseil_cpp':
Var('github') + '/abseil/abseil-cpp.git@' + Var('abseil_revision'),
'external/effcee':
Var('github') + '/google/effcee.git@' + Var('effcee_revision'),
'external/googletest':
Var('github') + '/google/googletest.git@' + Var('googletest_revision'),
'external/protobuf':
Var('github') + '/protocolbuffers/protobuf.git@' + Var('protobuf_revision'),
'external/re2':
Var('github') + '/google/re2.git@' + Var('re2_revision'),

View File

@ -18,6 +18,8 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
USE_PYTHON3 = True
LINT_FILTERS = [
"-build/storage_class",
"-readability/casting",

View File

@ -3,7 +3,7 @@
"Name": "spirv-tools",
"License": "Apache-2.0",
"License File": "LICENSE",
"Version Number": "v2022.4",
"Version Number": "sdk-1.3.275.0",
"Owner": "zhangleiyu1@huawei.com",
"Upstream URL": "https://github.com/KhronosGroup/SPIRV-Tools.git",
"Description": "The SPIR-V Tools project provides an API and commands for processing SPIR-V modules."

View File

@ -1,4 +1,7 @@
# SPIR-V Tools
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/KhronosGroup/SPIRV-Tools/badge)](https://securityscorecards.dev/viewer/?uri=github.com/KhronosGroup/SPIRV-Tools)
NEWS 2023-01-11: Development occurs on the `main` branch.
## Overview
@ -23,7 +26,7 @@ headers, and XML registry.
<img alt="Linux" src="kokoro/img/linux.png" width="20px" height="20px" hspace="2px"/>[![Linux Build Status](https://storage.googleapis.com/spirv-tools/badges/build_status_linux_clang_release.svg)](https://storage.googleapis.com/spirv-tools/badges/build_link_linux_clang_release.html)
<img alt="MacOS" src="kokoro/img/macos.png" width="20px" height="20px" hspace="2px"/>[![MacOS Build Status](https://storage.googleapis.com/spirv-tools/badges/build_status_macos_clang_release.svg)](https://storage.googleapis.com/spirv-tools/badges/build_link_macos_clang_release.html)
<img alt="Windows" src="kokoro/img/windows.png" width="20px" height="20px" hspace="2px"/>[![Windows Build Status](https://storage.googleapis.com/spirv-tools/badges/build_status_windows_release.svg)](https://storage.googleapis.com/spirv-tools/badges/build_link_windows_vs2017_release.html)
<img alt="Windows" src="kokoro/img/windows.png" width="20px" height="20px" hspace="2px"/>[![Windows Build Status](https://storage.googleapis.com/spirv-tools/badges/build_status_windows_release.svg)](https://storage.googleapis.com/spirv-tools/badges/build_link_windows_vs2019_release.html)
[More downloads](docs/downloads.md)
@ -96,10 +99,10 @@ and in-progress work.
*Note*: The validator checks some Universal Limits, from section 2.17 of the SPIR-V spec.
The validator will fail on a module that exceeds those minimum upper bound limits.
It is [future work](https://github.com/KhronosGroup/SPIRV-Tools/projects/1#card-1052403)
to parameterize the validator to allow larger
limits accepted by a more than minimally capable SPIR-V consumer.
The validator has been parameterized to allow larger values, for use when targeting
a more-than-minimally-capable SPIR-V consumer.
See [`tools/val/val.cpp`](tools/val/val.cpp) or run `spirv-val --help` for the command-line help.
### Optimizer
@ -271,7 +274,7 @@ Contributions via merge request are welcome. Changes should:
`clang-format version 5.0.0` for SPIRV-Tools. Settings are defined by
the included [.clang-format](.clang-format) file.
We intend to maintain a linear history on the GitHub `master` branch.
We intend to maintain a linear history on the GitHub `main` branch.
### Getting the source
@ -290,16 +293,18 @@ For some kinds of development, you may need the latest sources from the third-pa
git clone https://github.com/google/googletest.git spirv-tools/external/googletest
git clone https://github.com/google/effcee.git spirv-tools/external/effcee
git clone https://github.com/google/re2.git spirv-tools/external/re2
git clone https://github.com/abseil/abseil-cpp.git spirv-tools/external/abseil_cpp
#### Dependency on Effcee
Some tests depend on the [Effcee][effcee] library for stateful matching.
Effcee itself depends on [RE2][re2].
Effcee itself depends on [RE2][re2], and RE2 depends on [Abseil][abseil-cpp].
* If SPIRV-Tools is configured as part of a larger project that already uses
Effcee, then that project should include Effcee before SPIRV-Tools.
* Otherwise, SPIRV-Tools expects Effcee sources to appear in `external/effcee`
and RE2 sources to appear in `external/re2`.
* Otherwise, SPIRV-Tools expects Effcee sources to appear in `external/effcee`,
RE2 sources to appear in `external/re2`, and Abseil sources to appear in
`external/abseil_cpp`.
### Source code organization
@ -311,6 +316,9 @@ Effcee itself depends on [RE2][re2].
* `external/re2`: Location of [RE2][re2] sources, if the `re2` library is not already
configured by an enclosing project.
(The Effcee project already requires RE2.)
* `external/abseil_cpp`: Location of [Abseil][abseil-cpp] sources, if Abseil is
not already configured by an enclosing project.
(The RE2 project already requires Abseil.)
* `include/`: API clients should add this directory to the include search path
* `external/spirv-headers`: Intended location for
[SPIR-V headers][spirv-headers], not provided
@ -378,10 +386,11 @@ fuzzer tests.
### Build using Bazel
You can also use [Bazel](https://bazel.build/) to build the project.
```sh
cd <spirv-dir>
bazel build :all
```
### Build a node.js package using Emscripten
The SPIRV-Tools core library can be built to a WebAssembly [node.js](https://nodejs.org)
@ -432,10 +441,13 @@ On MacOS
- AppleClang 11.0
On Windows
- Visual Studio 2015
- Visual Studio 2017
- Visual Studio 2019
- Visual Studio 2022
Other compilers or later versions may work, but they are not tested.
Note: Visual Studio 2017 has incomplete c++17 support. We might stop
testing it soon. Other compilers or later versions may work, but they are not
tested.
### CMake options
@ -467,12 +479,12 @@ iterator debugging.
### Android ndk-build
SPIR-V Tools supports building static libraries `libSPIRV-Tools.a` and
`libSPIRV-Tools-opt.a` for Android:
`libSPIRV-Tools-opt.a` for Android. Using the Android NDK r25c or later:
```
cd <spirv-dir>
export ANDROID_NDK=/path/to/your/ndk
export ANDROID_NDK=/path/to/your/ndk # NDK r25c or later
mkdir build && cd build
mkdir libs
@ -496,7 +508,7 @@ The script requires Chromium's
### Usage
The internals of the library use C++11 features, and are exposed via both a C
The internals of the library use C++17 features, and are exposed via both a C
and C++ API.
In order to use the library from an application, the include path should point
@ -718,10 +730,16 @@ Use `bazel test :all` to run all tests. This will run tests in parallel by defau
To run a single test target, specify `:my_test_target` instead of `:all`. Test target
names get printed when you run `bazel test :all`. For example, you can run
`opt_def_use_test` with:
on linux:
```shell
bazel test :opt_def_use_test
bazel test --cxxopt=-std=c++17 :opt_def_use_test
```
on windows:
```shell
bazel test --cxxopt=/std:c++17 :opt_def_use_test
```
## Future Work
<a name="future"></a>
@ -779,6 +797,7 @@ limitations under the License.
[googletest-issue-610]: https://github.com/google/googletest/issues/610
[effcee]: https://github.com/google/effcee
[re2]: https://github.com/google/re2
[abseil-cpp]: https://github.com/abseil/abseil-cpp
[CMake]: https://cmake.org/
[cpp-style-guide]: https://google.github.io/styleguide/cppguide.html
[clang-sanitizers]: http://clang.llvm.org/docs/UsersManual.html#controlling-code-generation

13
SECURITY.md Executable file
View File

@ -0,0 +1,13 @@
# Security Policy
## Supported Versions
Security updates are applied only to the latest release.
## Reporting a Vulnerability
If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
Please disclose it at [security advisory](https://github.com/KhronosGroup/SPIRV-Tools/security/advisories/new).
This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure.

View File

@ -1,3 +1,11 @@
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "bazel_skylib",
strip_prefix = "bazel-skylib-main",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/main.zip"],
)
local_repository(
name = "spirv_headers",
path = "external/spirv-headers",
@ -17,3 +25,8 @@ local_repository(
name = "com_google_effcee",
path = "external/effcee",
)
local_repository(
name = "com_google_absl",
path = "external/abseil_cpp",
)

View File

@ -5,7 +5,7 @@ LOCAL_CPP_EXTENSION := .cc .cpp .cxx
LOCAL_SRC_FILES:=test.cpp
LOCAL_MODULE:=spirvtools_test
LOCAL_LDLIBS:=-landroid
LOCAL_CXXFLAGS:=-std=c++11 -fno-exceptions -fno-rtti -Werror
LOCAL_CXXFLAGS:=-std=c++17 -fno-exceptions -fno-rtti -Werror
LOCAL_STATIC_LIBRARIES=SPIRV-Tools SPIRV-Tools-opt
include $(BUILD_SHARED_LIBRARY)

View File

@ -1,5 +1,5 @@
APP_ABI := all
APP_BUILD_SCRIPT := Android.mk
APP_STL := c++_static
APP_PLATFORM := android-9
APP_PLATFORM := android-24
NDK_TOOLCHAIN_VERSION := 4.9

View File

@ -1,20 +1,21 @@
"""Constants and macros for spirv-tools BUILD."""
COMMON_COPTS = [
"-DSPIRV_CHECK_CONTEXT",
"-DSPIRV_COLOR_TERMINAL",
] + select({
"@bazel_tools//src/conditions:windows": [""],
"-DSPIRV_CHECK_CONTEXT",
"-DSPIRV_COLOR_TERMINAL",
] + select({
"@platforms//os:windows": [],
"//conditions:default": [
"-DSPIRV_LINUX",
"-DSPIRV_TIMER_ENABLED",
"-fvisibility=hidden",
"-fno-exceptions",
"-fno-rtti",
"-Wall",
"-Wextra",
"-Wnon-virtual-dtor",
"-Wno-missing-field-initializers",
"-Werror",
"-std=c++11",
"-fvisibility=hidden",
"-fno-exceptions",
"-fno-rtti",
"-Wno-long-long",
"-Wshadow",
"-Wundef",
@ -23,324 +24,211 @@ COMMON_COPTS = [
],
})
TEST_COPTS = COMMON_COPTS + select({
"@bazel_tools//src/conditions:windows": [
TEST_COPTS = COMMON_COPTS + [
] + select({
"@platforms//os:windows": [
# Disable C4503 "decorated name length exceeded" warning,
# triggered by some heavily templated types.
# We don't care much about that in test code.
# Important to do since we have warnings-as-errors.
"/wd4503"
"/wd4503",
],
"//conditions:default": [
"-Wno-undef",
"-Wno-self-assign",
"-Wno-shadow",
"-Wno-unused-parameter"
"-Wno-unused-parameter",
],
})
def incompatible_with(incompatible_constraints):
return select(_merge_dicts([{"//conditions:default": []}, {
constraint: ["@platforms//:incompatible"]
for constraint in incompatible_constraints
}]))
DEBUGINFO_GRAMMAR_JSON_FILE = "@spirv_headers//:spirv_ext_inst_debuginfo_grammar_unified1"
CLDEBUGINFO100_GRAMMAR_JSON_FILE = "@spirv_headers//:spirv_ext_inst_opencl_debuginfo_100_grammar_unified1"
SHDEBUGINFO100_GRAMMAR_JSON_FILE = "@spirv_headers//:spirv_ext_inst_nonsemantic_shader_debuginfo_100_grammar_unified1"
def generate_core_tables(version = None):
def _merge_dicts(dicts):
merged = {}
for d in dicts:
merged.update(d)
return merged
def generate_core_tables(version):
if not version:
fail("Must specify version", "version")
grammars = [
"@spirv_headers//:spirv_core_grammar_" + version,
DEBUGINFO_GRAMMAR_JSON_FILE,
CLDEBUGINFO100_GRAMMAR_JSON_FILE,
]
outs = [
"core.insts-{}.inc".format(version),
"operand.kinds-{}.inc".format(version),
]
fmtargs = grammars + outs
grammars = dict(
core_grammar = "@spirv_headers//:spirv_core_grammar_{}".format(version),
debuginfo_grammar = DEBUGINFO_GRAMMAR_JSON_FILE,
cldebuginfo_grammar = CLDEBUGINFO100_GRAMMAR_JSON_FILE,
)
outs = dict(
core_insts_output = "core.insts-{}.inc".format(version),
operand_kinds_output = "operand.kinds-{}.inc".format(version),
)
cmd = (
"$(location :generate_grammar_tables)" +
" --spirv-core-grammar=$(location {core_grammar})" +
" --extinst-debuginfo-grammar=$(location {debuginfo_grammar})" +
" --extinst-cldebuginfo100-grammar=$(location {cldebuginfo_grammar})" +
" --core-insts-output=$(location {core_insts_output})" +
" --operand-kinds-output=$(location {operand_kinds_output})" +
" --output-language=c++"
).format(**_merge_dicts([grammars, outs]))
native.genrule(
name = "gen_core_tables_" + version,
srcs = grammars,
outs = outs,
cmd = (
"$(location :generate_grammar_tables) " +
"--spirv-core-grammar=$(location {0}) " +
"--extinst-debuginfo-grammar=$(location {1}) " +
"--extinst-cldebuginfo100-grammar=$(location {2}) " +
"--core-insts-output=$(location {3}) " +
"--operand-kinds-output=$(location {4})"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_grammar_tables) " +
"--spirv-core-grammar=$(location {0}) " +
"--extinst-debuginfo-grammar=$(location {1}) " +
"--extinst-cldebuginfo100-grammar=$(location {2}) " +
"--core-insts-output=$(location {3}) " +
"--operand-kinds-output=$(location {4})"
).format(*fmtargs),
exec_tools = [":generate_grammar_tables"],
srcs = grammars.values(),
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_grammar_tables"],
visibility = ["//visibility:private"],
)
def generate_enum_string_mapping(version = None):
def generate_enum_string_mapping(version):
if not version:
fail("Must specify version", "version")
grammars = [
"@spirv_headers//:spirv_core_grammar_" + version,
DEBUGINFO_GRAMMAR_JSON_FILE,
CLDEBUGINFO100_GRAMMAR_JSON_FILE,
]
outs = [
"extension_enum.inc",
"enum_string_mapping.inc",
]
fmtargs = grammars + outs
grammars = dict(
core_grammar = "@spirv_headers//:spirv_core_grammar_{}".format(version),
debuginfo_grammar = DEBUGINFO_GRAMMAR_JSON_FILE,
cldebuginfo_grammar = CLDEBUGINFO100_GRAMMAR_JSON_FILE,
)
outs = dict(
extension_enum_ouput = "extension_enum.inc",
enum_string_mapping_output = "enum_string_mapping.inc",
)
cmd = (
"$(location :generate_grammar_tables)" +
" --spirv-core-grammar=$(location {core_grammar})" +
" --extinst-debuginfo-grammar=$(location {debuginfo_grammar})" +
" --extinst-cldebuginfo100-grammar=$(location {cldebuginfo_grammar})" +
" --extension-enum-output=$(location {extension_enum_ouput})" +
" --enum-string-mapping-output=$(location {enum_string_mapping_output})" +
" --output-language=c++"
).format(**_merge_dicts([grammars, outs]))
native.genrule(
name = "gen_enum_string_mapping",
srcs = grammars,
outs = outs,
cmd = (
"$(location :generate_grammar_tables) " +
"--spirv-core-grammar=$(location {0}) " +
"--extinst-debuginfo-grammar=$(location {1}) " +
"--extinst-cldebuginfo100-grammar=$(location {2}) " +
"--extension-enum-output=$(location {3}) " +
"--enum-string-mapping-output=$(location {4})"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_grammar_tables) " +
"--spirv-core-grammar=$(location {0}) " +
"--extinst-debuginfo-grammar=$(location {1}) " +
"--extinst-cldebuginfo100-grammar=$(location {2}) " +
"--extension-enum-output=$(location {3}) " +
"--enum-string-mapping-output=$(location {4})"
).format(*fmtargs),
exec_tools = [":generate_grammar_tables"],
srcs = grammars.values(),
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_grammar_tables"],
visibility = ["//visibility:private"],
)
def generate_opencl_tables(version = None):
def generate_opencl_tables(version):
if not version:
fail("Must specify version", "version")
grammars = [
"@spirv_headers//:spirv_opencl_grammar_" + version,
]
outs = ["opencl.std.insts.inc"]
fmtargs = grammars + outs
grammars = dict(
opencl_grammar = "@spirv_headers//:spirv_opencl_grammar_{}".format(version),
)
outs = dict(
opencl_insts_output = "opencl.std.insts.inc",
)
cmd = (
"$(location :generate_grammar_tables)" +
" --extinst-opencl-grammar=$(location {opencl_grammar})" +
" --opencl-insts-output=$(location {opencl_insts_output})"
).format(**_merge_dicts([grammars, outs]))
native.genrule(
name = "gen_opencl_tables_" + version,
srcs = grammars,
outs = outs,
cmd = (
"$(location :generate_grammar_tables) " +
"--extinst-opencl-grammar=$(location {0}) " +
"--opencl-insts-output=$(location {1})"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_grammar_tables) " +
"--extinst-opencl-grammar=$(location {0}) " +
"--opencl-insts-output=$(location {1})"
).format(*fmtargs),
exec_tools = [":generate_grammar_tables"],
srcs = grammars.values(),
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_grammar_tables"],
visibility = ["//visibility:private"],
)
def generate_glsl_tables(version = None):
def generate_glsl_tables(version):
if not version:
fail("Must specify version", "version")
grammars = [
"@spirv_headers//:spirv_glsl_grammar_" + version,
]
outs = ["glsl.std.450.insts.inc"]
fmtargs = grammars + outs
grammars = dict(
gsls_grammar = "@spirv_headers//:spirv_glsl_grammar_{}".format(version),
)
outs = dict(
gsls_insts_outs = "glsl.std.450.insts.inc",
)
cmd = (
"$(location :generate_grammar_tables)" +
" --extinst-glsl-grammar=$(location {gsls_grammar})" +
" --glsl-insts-output=$(location {gsls_insts_outs})" +
" --output-language=c++"
).format(**_merge_dicts([grammars, outs]))
native.genrule(
name = "gen_glsl_tables_" + version,
srcs = grammars,
outs = outs,
cmd = (
"$(location :generate_grammar_tables) " +
"--extinst-glsl-grammar=$(location {0}) " +
"--glsl-insts-output=$(location {1})"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_grammar_tables) " +
"--extinst-glsl-grammar=$(location {0}) " +
"--glsl-insts-output=$(location {1})"
).format(*fmtargs),
exec_tools = [":generate_grammar_tables"],
srcs = grammars.values(),
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_grammar_tables"],
visibility = ["//visibility:private"],
)
def generate_vendor_tables(extension, operand_kind_prefix = ""):
if not extension:
fail("Must specify extension", "extension")
extension_rule = extension.replace("-", "_").replace(".", "_")
grammars = ["@spirv_headers//:spirv_ext_inst_{}_grammar_unified1".format(extension_rule)]
outs = ["{}.insts.inc".format(extension)]
prefices = [operand_kind_prefix]
fmtargs = grammars + outs + prefices
grammars = dict(
vendor_grammar = "@spirv_headers//:spirv_ext_inst_{}_grammar_unified1".format(extension_rule),
)
outs = dict(
vendor_insts_output = "{}.insts.inc".format(extension),
)
cmd = (
"$(location :generate_grammar_tables)" +
" --extinst-vendor-grammar=$(location {vendor_grammar})" +
" --vendor-insts-output=$(location {vendor_insts_output})" +
" --vendor-operand-kind-prefix={operand_kind_prefix}"
).format(operand_kind_prefix = operand_kind_prefix, **_merge_dicts([grammars, outs]))
native.genrule(
name = "gen_vendor_tables_" + extension_rule,
srcs = grammars,
outs = outs,
cmd = (
"$(location :generate_grammar_tables) " +
"--extinst-vendor-grammar=$(location {0}) " +
"--vendor-insts-output=$(location {1}) " +
"--vendor-operand-kind-prefix={2}"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_grammar_tables) " +
"--extinst-vendor-grammar=$(location {0}) " +
"--vendor-insts-output=$(location {1}) " +
"--vendor-operand-kind-prefix={2}"
).format(*fmtargs),
exec_tools = [":generate_grammar_tables"],
srcs = grammars.values(),
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_grammar_tables"],
visibility = ["//visibility:private"],
)
def generate_extinst_lang_headers(name, grammar = None):
if not grammar:
fail("Must specify grammar", "grammar")
outs = [name + ".h"]
fmtargs = outs
outs = dict(
extinst_output_path = name + ".h",
)
cmd = (
"$(location :generate_language_headers)" +
" --extinst-grammar=$<" +
" --extinst-output-path=$(location {extinst_output_path})"
).format(**outs)
native.genrule(
name = "gen_extinst_lang_headers_" + name,
name = "gen_extinst_lang_headers_{}".format(name),
srcs = [grammar],
outs = outs,
cmd = (
"$(location :generate_language_headers) " +
"--extinst-grammar=$< " +
"--extinst-output-path=$(location {0})"
).format(*fmtargs),
cmd_bat = (
"$(location :generate_language_headers) " +
"--extinst-grammar=$< " +
"--extinst-output-path=$(location {0})"
).format(*fmtargs),
exec_tools = [":generate_language_headers"],
outs = outs.values(),
cmd = cmd,
cmd_bat = cmd,
tools = [":generate_language_headers"],
visibility = ["//visibility:private"],
)
def base_test(name, srcs, deps = []):
if srcs == []:
return
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "base_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":test_common",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def lint_test(name, srcs, deps = []):
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "lint_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":spirv_tools_lint",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def link_test(name, srcs, deps = []):
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "link_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":link_test_common",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def opt_test(name, srcs, deps = []):
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "opt_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":opt_test_common",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def reduce_test(name, srcs, deps = []):
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "reduce_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":reduce_test_common",
":spirv_tools_reduce",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def util_test(name, srcs, deps = []):
if name[-5:] != "_test":
name = name + "_test"
native.cc_test(
name = "util_" + name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS,
size = "large",
deps = [
":opt_test_common",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
)
def val_test(name, srcs = [], copts = [], deps = [], **kwargs):
if name[-5:] != "_test":
name = name + "_test"
if name[:4] != "val_":
name = "val_" + name
native.cc_test(
name = name,
srcs = srcs,
compatible_with = [],
copts = TEST_COPTS + copts,
size = "large",
deps = [
":val_test_common",
"@com_google_googletest//:gtest_main",
"@com_google_googletest//:gtest",
"@com_google_effcee//:effcee",
] + deps,
**kwargs
)

View File

@ -2,7 +2,7 @@
## Latest builds
Download the latest builds of the [master](https://github.com/KhronosGroup/SPIRV-Tools/tree/master) branch.
Download the latest builds of the [main](https://github.com/KhronosGroup/SPIRV-Tools/tree/main) branch.
### Release build
| Windows | Linux | MacOS |

View File

@ -34,7 +34,7 @@ through the project workflow:
ones.
* They determine if the work for a card has been completed.
* Normally they are the person (or persons) who can approve and merge a pull
request into the `master` branch.
request into the `main` branch.
Our projects organize cards into the following columns:
* `Ideas`: Work which could be done, captured either as Cards or Notes.
@ -51,7 +51,7 @@ Our projects organize cards into the following columns:
claimed by someone.
* `Done`: Issues which have been resolved, by completing their work.
* The changes have been applied to the repository, typically by being pushed
into the `master` branch.
into the `main` branch.
* Other kinds of work could update repository settings, for example.
* `Rejected ideas`: Work which has been considered, but which we don't want
implemented.

View File

@ -30,11 +30,7 @@ if (DEFINED SPIRV-Headers_SOURCE_DIR)
# This allows flexible position of the SPIRV-Headers repo.
set(SPIRV_HEADER_DIR ${SPIRV-Headers_SOURCE_DIR})
else()
if (IS_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/SPIRV-Headers)
set(SPIRV_HEADER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/SPIRV-Headers)
else()
set(SPIRV_HEADER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/spirv-headers)
endif()
set(SPIRV_HEADER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/spirv-headers)
endif()
if (IS_DIRECTORY ${SPIRV_HEADER_DIR})
@ -45,8 +41,6 @@ if (IS_DIRECTORY ${SPIRV_HEADER_DIR})
# Do this so enclosing projects can use SPIRV-Headers_SOURCE_DIR to find
# headers to include.
if (NOT DEFINED SPIRV-Headers_SOURCE_DIR)
set(SPIRV_HEADERS_SKIP_INSTALL ON)
set(SPIRV_HEADERS_SKIP_EXAMPLES ON)
add_subdirectory(${SPIRV_HEADER_DIR})
endif()
else()
@ -60,7 +54,9 @@ if (NOT ${SPIRV_SKIP_TESTS})
if (TARGET gmock)
message(STATUS "Google Mock already configured")
else()
set(GMOCK_DIR ${CMAKE_CURRENT_SOURCE_DIR}/googletest)
if (NOT GMOCK_DIR)
set(GMOCK_DIR ${CMAKE_CURRENT_SOURCE_DIR}/googletest)
endif()
if(EXISTS ${GMOCK_DIR})
if(MSVC)
# Our tests use ::testing::Combine. Work around a compiler
@ -77,7 +73,7 @@ if (NOT ${SPIRV_SKIP_TESTS})
# gtest requires special defines for building as a shared
# library, simply always build as static.
push_variable(BUILD_SHARED_LIBS 0)
add_subdirectory(${GMOCK_DIR} EXCLUDE_FROM_ALL)
add_subdirectory(${GMOCK_DIR} ${CMAKE_CURRENT_BINARY_DIR}/googletest EXCLUDE_FROM_ALL)
pop_variable(BUILD_SHARED_LIBS)
endif()
endif()
@ -95,10 +91,22 @@ if (NOT ${SPIRV_SKIP_TESTS})
# Find Effcee and RE2, for testing.
# RE2 depends on Abseil. We set absl_SOURCE_DIR if it is not already set, so
# that effcee can find abseil.
if(NOT TARGET absl::base)
if (NOT absl_SOURCE_DIR)
if (EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/abseil_cpp)
set(absl_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/abseil_cpp" CACHE STRING "Abseil source dir" )
endif()
endif()
endif()
# First find RE2, since Effcee depends on it.
# If already configured, then use that. Otherwise, prefer to find it under 're2'
# in this directory.
if (NOT TARGET re2)
# If we are configuring RE2, then turn off its testing. It takes a long time and
# does not add much value for us. If an enclosing project configured RE2, then it
# has already chosen whether to enable RE2 testing.
@ -156,7 +164,7 @@ if(SPIRV_BUILD_FUZZER)
if(NOT TARGET protobuf::libprotobuf OR NOT TARGET protobuf::protoc)
set(SPIRV_TOOLS_PROTOBUF_DIR ${CMAKE_CURRENT_SOURCE_DIR}/protobuf/cmake)
set(SPIRV_TOOLS_PROTOBUF_DIR ${CMAKE_CURRENT_SOURCE_DIR}/protobuf)
if (NOT IS_DIRECTORY ${SPIRV_TOOLS_PROTOBUF_DIR})
message(
FATAL_ERROR

View File

@ -36,16 +36,25 @@ namespace spvtools {
// generated by InstrumentPass::GenDebugStreamWrite. This method is utilized
// by InstBindlessCheckPass, InstBuffAddrCheckPass, and InstDebugPrintfPass.
//
// The first member of the debug output buffer contains the next available word
// The 1st member of the debug output buffer contains a set of flags
// controlling the behavior of instrumentation code.
static const int kDebugOutputFlagsOffset = 0;
// Values stored at kDebugOutputFlagsOffset
enum kInstFlags : unsigned int {
kInstBufferOOBEnable = 0x1,
};
// The 2nd member of the debug output buffer contains the next available word
// in the data stream to be written. Shaders will atomically read and update
// this value so as not to overwrite each others records. This value must be
// initialized to zero
static const int kDebugOutputSizeOffset = 0;
static const int kDebugOutputSizeOffset = 1;
// The second member of the output buffer is the start of the stream of records
// The 3rd member of the output buffer is the start of the stream of records
// written by the instrumented shaders. Each record represents a validation
// error. The format of the records is documented below.
static const int kDebugOutputDataOffset = 1;
static const int kDebugOutputDataOffset = 2;
// Common Stream Record Offsets
//
@ -64,196 +73,14 @@ static const int kInstCommonOutShaderId = 1;
// which generated the validation error.
static const int kInstCommonOutInstructionIdx = 2;
// This is the stage which generated the validation error. This word is used
// to determine the contents of the next two words in the record.
// 0:Vert, 1:TessCtrl, 2:TessEval, 3:Geom, 4:Frag, 5:Compute
static const int kInstCommonOutStageIdx = 3;
static const int kInstCommonOutCnt = 4;
// Stage-specific Stream Record Offsets
//
// Each stage will contain different values in the next set of words of the
// record used to identify which instantiation of the shader generated the
// validation error.
//
// Vertex Shader Output Record Offsets
static const int kInstVertOutVertexIndex = kInstCommonOutCnt;
static const int kInstVertOutInstanceIndex = kInstCommonOutCnt + 1;
static const int kInstVertOutUnused = kInstCommonOutCnt + 2;
// Frag Shader Output Record Offsets
static const int kInstFragOutFragCoordX = kInstCommonOutCnt;
static const int kInstFragOutFragCoordY = kInstCommonOutCnt + 1;
static const int kInstFragOutUnused = kInstCommonOutCnt + 2;
// Compute Shader Output Record Offsets
static const int kInstCompOutGlobalInvocationIdX = kInstCommonOutCnt;
static const int kInstCompOutGlobalInvocationIdY = kInstCommonOutCnt + 1;
static const int kInstCompOutGlobalInvocationIdZ = kInstCommonOutCnt + 2;
// Tessellation Control Shader Output Record Offsets
static const int kInstTessCtlOutInvocationId = kInstCommonOutCnt;
static const int kInstTessCtlOutPrimitiveId = kInstCommonOutCnt + 1;
static const int kInstTessCtlOutUnused = kInstCommonOutCnt + 2;
// Tessellation Eval Shader Output Record Offsets
static const int kInstTessEvalOutPrimitiveId = kInstCommonOutCnt;
static const int kInstTessEvalOutTessCoordU = kInstCommonOutCnt + 1;
static const int kInstTessEvalOutTessCoordV = kInstCommonOutCnt + 2;
// Geometry Shader Output Record Offsets
static const int kInstGeomOutPrimitiveId = kInstCommonOutCnt;
static const int kInstGeomOutInvocationId = kInstCommonOutCnt + 1;
static const int kInstGeomOutUnused = kInstCommonOutCnt + 2;
// Ray Tracing Shader Output Record Offsets
static const int kInstRayTracingOutLaunchIdX = kInstCommonOutCnt;
static const int kInstRayTracingOutLaunchIdY = kInstCommonOutCnt + 1;
static const int kInstRayTracingOutLaunchIdZ = kInstCommonOutCnt + 2;
// Mesh Shader Output Record Offsets
static const int kInstMeshOutGlobalInvocationIdX = kInstCommonOutCnt;
static const int kInstMeshOutGlobalInvocationIdY = kInstCommonOutCnt + 1;
static const int kInstMeshOutGlobalInvocationIdZ = kInstCommonOutCnt + 2;
// Task Shader Output Record Offsets
static const int kInstTaskOutGlobalInvocationIdX = kInstCommonOutCnt;
static const int kInstTaskOutGlobalInvocationIdY = kInstCommonOutCnt + 1;
static const int kInstTaskOutGlobalInvocationIdZ = kInstCommonOutCnt + 2;
// Size of Common and Stage-specific Members
static const int kInstStageOutCnt = kInstCommonOutCnt + 3;
// Validation Error Code Offset
//
// This identifies the validation error. It also helps to identify
// how many words follow in the record and their meaning.
static const int kInstValidationOutError = kInstStageOutCnt;
// Validation-specific Output Record Offsets
//
// Each different validation will generate a potentially different
// number of words at the end of the record giving more specifics
// about the validation error.
//
// A bindless bounds error will output the index and the bound.
static const int kInstBindlessBoundsOutDescIndex = kInstStageOutCnt + 1;
static const int kInstBindlessBoundsOutDescBound = kInstStageOutCnt + 2;
static const int kInstBindlessBoundsOutUnused = kInstStageOutCnt + 3;
static const int kInstBindlessBoundsOutCnt = kInstStageOutCnt + 4;
// A descriptor uninitialized error will output the index.
static const int kInstBindlessUninitOutDescIndex = kInstStageOutCnt + 1;
static const int kInstBindlessUninitOutUnused = kInstStageOutCnt + 2;
static const int kInstBindlessUninitOutUnused2 = kInstStageOutCnt + 3;
static const int kInstBindlessUninitOutCnt = kInstStageOutCnt + 4;
// A buffer out-of-bounds error will output the descriptor
// index, the buffer offset and the buffer size
static const int kInstBindlessBuffOOBOutDescIndex = kInstStageOutCnt + 1;
static const int kInstBindlessBuffOOBOutBuffOff = kInstStageOutCnt + 2;
static const int kInstBindlessBuffOOBOutBuffSize = kInstStageOutCnt + 3;
static const int kInstBindlessBuffOOBOutCnt = kInstStageOutCnt + 4;
// A buffer address unalloc error will output the 64-bit pointer in
// two 32-bit pieces, lower bits first.
static const int kInstBuffAddrUnallocOutDescPtrLo = kInstStageOutCnt + 1;
static const int kInstBuffAddrUnallocOutDescPtrHi = kInstStageOutCnt + 2;
static const int kInstBuffAddrUnallocOutCnt = kInstStageOutCnt + 3;
// Maximum Output Record Member Count
static const int kInstMaxOutCnt = kInstStageOutCnt + 4;
// Validation Error Codes
//
// These are the possible validation error codes.
static const int kInstErrorBindlessBounds = 0;
static const int kInstErrorBindlessUninit = 1;
static const int kInstErrorBuffAddrUnallocRef = 2;
// Deleted: static const int kInstErrorBindlessBuffOOB = 3;
// This comment will will remain for 2 releases to allow
// for the transition of all builds. Buffer OOB is
// generating the following four differentiated codes instead:
static const int kInstErrorBuffOOBUniform = 4;
static const int kInstErrorBuffOOBStorage = 5;
static const int kInstErrorBuffOOBUniformTexel = 6;
static const int kInstErrorBuffOOBStorageTexel = 7;
static const int kInstErrorMax = kInstErrorBuffOOBStorageTexel;
// Direct Input Buffer Offsets
//
// The following values provide member offsets into the input buffers
// consumed by InstrumentPass::GenDebugDirectRead(). This method is utilized
// by InstBindlessCheckPass.
//
// The only object in an input buffer is a runtime array of unsigned
// integers. Each validation will have its own formatting of this array.
static const int kDebugInputDataOffset = 0;
// Debug Buffer Bindings
//
// These are the bindings for the different buffers which are
// read or written by the instrumentation passes.
//
// This is the output buffer written by InstBindlessCheckPass,
// InstBuffAddrCheckPass, and possibly other future validations.
static const int kDebugOutputBindingStream = 0;
// The binding for the input buffer read by InstBindlessCheckPass.
static const int kDebugInputBindingBindless = 1;
// The binding for the input buffer read by InstBuffAddrCheckPass.
static const int kDebugInputBindingBuffAddr = 2;
// This is the output buffer written by InstDebugPrintfPass.
static const int kDebugOutputPrintfStream = 3;
// Bindless Validation Input Buffer Format
//
// An input buffer for bindless validation consists of a single array of
// unsigned integers we will call Data[]. This array is formatted as follows.
//
// At offset kDebugInputBindlessInitOffset in Data[] is a single uint which
// gives an offset to the start of the bindless initialization data. More
// specifically, if the following value is zero, we know that the descriptor at
// (set = s, binding = b, index = i) is not initialized; if the value is
// non-zero, and the descriptor points to a buffer, the value is the length of
// the buffer in bytes and can be used to check for out-of-bounds buffer
// references:
// Data[ i + Data[ b + Data[ s + Data[ kDebugInputBindlessInitOffset ] ] ] ]
static const int kDebugInputBindlessInitOffset = 0;
// At offset kDebugInputBindlessOffsetLengths is some number of uints which
// provide the bindless length data. More specifically, the number of
// descriptors at (set=s, binding=b) is:
// Data[ Data[ s + kDebugInputBindlessOffsetLengths ] + b ]
static const int kDebugInputBindlessOffsetLengths = 1;
// Buffer Device Address Input Buffer Format
//
// An input buffer for buffer device address validation consists of a single
// array of unsigned 64-bit integers we will call Data[]. This array is
// formatted as follows:
//
// At offset kDebugInputBuffAddrPtrOffset is a list of sorted valid buffer
// addresses. The list is terminated with the address 0xffffffffffffffff.
// If 0x0 is not a valid buffer address, this address is inserted at the
// start of the list.
//
static const int kDebugInputBuffAddrPtrOffset = 1;
//
// At offset kDebugInputBuffAddrLengthOffset in Data[] is a single uint64 which
// gives an offset to the start of the buffer length data. More
// specifically, for a buffer whose pointer is located at input buffer offset
// i, the length is located at:
//
// Data[ i - kDebugInputBuffAddrPtrOffset
// + Data[ kDebugInputBuffAddrLengthOffset ] ]
//
// The length associated with the 0xffffffffffffffff address is zero. If
// not a valid buffer, the length associated with the 0x0 address is zero.
static const int kDebugInputBuffAddrLengthOffset = 0;
} // namespace spvtools
#endif // INCLUDE_SPIRV_TOOLS_INSTRUMENT_HPP_

View File

@ -143,6 +143,7 @@ typedef enum spv_operand_type_t {
// may be larger than 32, which would require such a typed literal value to
// occupy multiple SPIR-V words.
SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER,
SPV_OPERAND_TYPE_LITERAL_FLOAT, // Always 32-bit float.
// Set 3: The literal string operand type.
SPV_OPERAND_TYPE_LITERAL_STRING,
@ -285,6 +286,22 @@ typedef enum spv_operand_type_t {
// An optional packed vector format
SPV_OPERAND_TYPE_OPTIONAL_PACKED_VECTOR_FORMAT,
// Concrete operand types for cooperative matrix.
SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS,
// An optional cooperative matrix operands
SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS,
SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_LAYOUT,
SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_USE,
// Enum type from SPV_INTEL_global_variable_fpga_decorations
SPV_OPERAND_TYPE_INITIALIZATION_MODE_QUALIFIER,
// Enum type from SPV_INTEL_global_variable_host_access
SPV_OPERAND_TYPE_HOST_ACCESS_QUALIFIER,
// Enum type from SPV_INTEL_cache_controls
SPV_OPERAND_TYPE_LOAD_CACHE_CONTROL,
// Enum type from SPV_INTEL_cache_controls
SPV_OPERAND_TYPE_STORE_CACHE_CONTROL,
// This is a sentinel value, and does not represent an operand type.
// It should come last.
SPV_OPERAND_TYPE_NUM_OPERAND_TYPES,
@ -402,6 +419,19 @@ typedef struct spv_parsed_instruction_t {
uint16_t num_operands;
} spv_parsed_instruction_t;
typedef struct spv_parsed_header_t {
// The magic number of the SPIR-V module.
uint32_t magic;
// Version number.
uint32_t version;
// Generator's magic number.
uint32_t generator;
// IDs bound for this module (0 < id < bound).
uint32_t bound;
// reserved.
uint32_t reserved;
} spv_parsed_header_t;
typedef struct spv_const_binary_t {
const uint32_t* code;
const size_t wordCount;
@ -441,6 +471,8 @@ typedef struct spv_reducer_options_t spv_reducer_options_t;
typedef struct spv_fuzzer_options_t spv_fuzzer_options_t;
typedef struct spv_optimizer_t spv_optimizer_t;
// Type Definitions
typedef spv_const_binary_t* spv_const_binary;
@ -900,6 +932,63 @@ SPIRV_TOOLS_EXPORT spv_result_t spvBinaryParse(
const size_t num_words, spv_parsed_header_fn_t parse_header,
spv_parsed_instruction_fn_t parse_instruction, spv_diagnostic* diagnostic);
// The optimizer interface.
// A pointer to a function that accepts a log message from an optimizer.
typedef void (*spv_message_consumer)(
spv_message_level_t, const char*, const spv_position_t*, const char*);
// Creates and returns an optimizer object. This object must be passed to
// optimizer APIs below and is valid until passed to spvOptimizerDestroy.
SPIRV_TOOLS_EXPORT spv_optimizer_t* spvOptimizerCreate(spv_target_env env);
// Destroys the given optimizer object.
SPIRV_TOOLS_EXPORT void spvOptimizerDestroy(spv_optimizer_t* optimizer);
// Sets an spv_message_consumer on an optimizer object.
SPIRV_TOOLS_EXPORT void spvOptimizerSetMessageConsumer(
spv_optimizer_t* optimizer, spv_message_consumer consumer);
// Registers passes that attempt to legalize the generated code.
SPIRV_TOOLS_EXPORT void spvOptimizerRegisterLegalizationPasses(
spv_optimizer_t* optimizer);
// Registers passes that attempt to improve performance of generated code.
SPIRV_TOOLS_EXPORT void spvOptimizerRegisterPerformancePasses(
spv_optimizer_t* optimizer);
// Registers passes that attempt to improve the size of generated code.
SPIRV_TOOLS_EXPORT void spvOptimizerRegisterSizePasses(
spv_optimizer_t* optimizer);
// Registers a pass specified by a flag in an optimizer object.
SPIRV_TOOLS_EXPORT bool spvOptimizerRegisterPassFromFlag(
spv_optimizer_t* optimizer, const char* flag);
// Registers passes specified by length number of flags in an optimizer object.
SPIRV_TOOLS_EXPORT bool spvOptimizerRegisterPassesFromFlags(
spv_optimizer_t* optimizer, const char** flags, const size_t flag_count);
// Optimizes the SPIR-V code of size |word_count| pointed to by |binary| and
// returns an optimized spv_binary in |optimized_binary|.
//
// Returns SPV_SUCCESS on successful optimization, whether or not the module is
// modified. Returns an SPV_ERROR_* if the module fails to validate or if
// errors occur when processing using any of the registered passes. In that
// case, no further passes are executed and the |optimized_binary| contents may
// be invalid.
//
// By default, the binary is validated before any transforms are performed,
// and optionally after each transform. Validation uses SPIR-V spec rules
// for the SPIR-V version named in the binary's header (at word offset 1).
// Additionally, if the target environment is a client API (such as
// Vulkan 1.1), then validate for that client API version, to the extent
// that it is verifiable from data in the binary itself, or from the
// validator options set on the optimizer options.
SPIRV_TOOLS_EXPORT spv_result_t spvOptimizerRun(
spv_optimizer_t* optimizer, const uint32_t* binary, const size_t word_count,
spv_binary* optimized_binary, const spv_optimizer_options options);
#ifdef __cplusplus
}
#endif

View File

@ -31,6 +31,11 @@ using MessageConsumer = std::function<void(
const spv_position_t& /* position */, const char* /* message */
)>;
using HeaderParser = std::function<spv_result_t(
const spv_endianness_t endianess, const spv_parsed_header_t& instruction)>;
using InstructionParser =
std::function<spv_result_t(const spv_parsed_instruction_t& instruction)>;
// C++ RAII wrapper around the C context object spv_context.
class Context {
public:
@ -336,6 +341,23 @@ class SpirvTools {
std::string* text,
uint32_t options = kDefaultDisassembleOption) const;
// Parses a SPIR-V binary, specified as counted sequence of 32-bit words.
// Parsing feedback is provided via two callbacks provided as std::function.
// In a valid parse the parsed-header callback is called once, and
// then the parsed-instruction callback is called once for each instruction
// in the stream.
// Returns true on successful parsing.
// If diagnostic is non-null, a diagnostic is emitted on failed parsing.
// If diagnostic is null the context's message consumer
// will be used to emit any errors. If a callback returns anything other than
// SPV_SUCCESS, then that status code is returned, no further callbacks are
// issued, and no additional diagnostics are emitted.
// This is a wrapper around the C API spvBinaryParse.
bool Parse(const std::vector<uint32_t>& binary,
const HeaderParser& header_parser,
const InstructionParser& instruction_parser,
spv_diagnostic* diagnostic = nullptr);
// Validates the given SPIR-V |binary|. Returns true if no issues are found.
// Otherwise, returns false and communicates issues via the message consumer
// registered.

View File

@ -26,11 +26,6 @@ namespace spvtools {
class LinkerOptions {
public:
LinkerOptions()
: create_library_(false),
verify_ids_(false),
allow_partial_linkage_(false) {}
// Returns whether a library or an executable should be produced by the
// linking phase.
//
@ -63,10 +58,16 @@ class LinkerOptions {
allow_partial_linkage_ = allow_partial_linkage;
}
bool GetUseHighestVersion() const { return use_highest_version_; }
void SetUseHighestVersion(bool use_highest_vers) {
use_highest_version_ = use_highest_vers;
}
private:
bool create_library_;
bool verify_ids_;
bool allow_partial_linkage_;
bool create_library_{false};
bool verify_ids_{false};
bool allow_partial_linkage_{false};
bool use_highest_version_{false};
};
// Links one or more SPIR-V modules into a new SPIR-V module. That is, combine

View File

@ -19,6 +19,7 @@
#include <ostream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
@ -96,12 +97,24 @@ class Optimizer {
// Registers passes that attempt to improve performance of generated code.
// This sequence of passes is subject to constant review and will change
// from time to time.
//
// If |preserve_interface| is true, all non-io variables in the entry point
// interface are considered live and are not eliminated.
// |preserve_interface| should be true if HLSL is generated
// from the SPIR-V bytecode.
Optimizer& RegisterPerformancePasses();
Optimizer& RegisterPerformancePasses(bool preserve_interface);
// Registers passes that attempt to improve the size of generated code.
// This sequence of passes is subject to constant review and will change
// from time to time.
//
// If |preserve_interface| is true, all non-io variables in the entry point
// interface are considered live and are not eliminated.
// |preserve_interface| should be true if HLSL is generated
// from the SPIR-V bytecode.
Optimizer& RegisterSizePasses();
Optimizer& RegisterSizePasses(bool preserve_interface);
// Registers passes that attempt to legalize the generated code.
//
@ -111,7 +124,13 @@ class Optimizer {
//
// This sequence of passes is subject to constant review and will change
// from time to time.
//
// If |preserve_interface| is true, all non-io variables in the entry point
// interface are considered live and are not eliminated.
// |preserve_interface| should be true if HLSL is generated
// from the SPIR-V bytecode.
Optimizer& RegisterLegalizationPasses();
Optimizer& RegisterLegalizationPasses(bool preserve_interface);
// Register passes specified in the list of |flags|. Each flag must be a
// string of a form accepted by Optimizer::FlagHasValidForm().
@ -520,8 +539,14 @@ Optimizer::PassToken CreateDeadInsertElimPass();
// interface are considered live and are not eliminated. This mode is needed
// by GPU-Assisted validation instrumentation, where a change in the interface
// is not allowed.
//
// If |remove_outputs| is true, allow outputs to be removed from the interface.
// This is only safe if the caller knows that there is no corresponding input
// variable in the following shader. It is false by default.
Optimizer::PassToken CreateAggressiveDCEPass();
Optimizer::PassToken CreateAggressiveDCEPass(bool preserve_interface);
Optimizer::PassToken CreateAggressiveDCEPass(bool preserve_interface,
bool remove_outputs);
// Creates a remove-unused-interface-variables pass.
// Removes variables referenced on the |OpEntryPoint| instruction that are not
@ -741,19 +766,9 @@ Optimizer::PassToken CreateCombineAccessChainsPass();
// potentially de-optimizing the instrument code, for example, inlining
// the debug record output function throughout the module.
//
// The instrumentation will read and write buffers in debug
// descriptor set |desc_set|. It will write |shader_id| in each output record
// The instrumentation will write |shader_id| in each output record
// to identify the shader module which generated the record.
// |desc_length_enable| controls instrumentation of runtime descriptor array
// references, |desc_init_enable| controls instrumentation of descriptor
// initialization checking, and |buff_oob_enable| controls instrumentation
// of storage and uniform buffer bounds checking, all of which require input
// buffer support. |texbuff_oob_enable| controls instrumentation of texel
// buffers, which does not require input buffer support.
Optimizer::PassToken CreateInstBindlessCheckPass(
uint32_t desc_set, uint32_t shader_id, bool desc_length_enable = false,
bool desc_init_enable = false, bool buff_oob_enable = false,
bool texbuff_oob_enable = false);
Optimizer::PassToken CreateInstBindlessCheckPass(uint32_t shader_id);
// Create a pass to instrument physical buffer address checking
// This pass instruments all physical buffer address references to check that
@ -774,8 +789,7 @@ Optimizer::PassToken CreateInstBindlessCheckPass(
// The instrumentation will read and write buffers in debug
// descriptor set |desc_set|. It will write |shader_id| in each output record
// to identify the shader module which generated the record.
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t desc_set,
uint32_t shader_id);
Optimizer::PassToken CreateInstBuffAddrCheckPass(uint32_t shader_id);
// Create a pass to instrument OpDebugPrintf instructions.
// This pass replaces all OpDebugPrintf instructions with instructions to write
@ -887,12 +901,59 @@ Optimizer::PassToken CreateAmdExtToKhrPass();
Optimizer::PassToken CreateInterpolateFixupPass();
// Removes unused components from composite input variables. Current
// implementation just removes trailing unused components from input arrays.
// The pass performs best after maximizing dead code removal. A subsequent dead
// code elimination pass would be beneficial in removing newly unused component
// types.
// implementation just removes trailing unused components from input arrays
// and structs. The pass performs best after maximizing dead code removal.
// A subsequent dead code elimination pass would be beneficial in removing
// newly unused component types.
//
// WARNING: This pass can only be safely applied standalone to vertex shaders
// as it can otherwise cause interface incompatibilities with the preceding
// shader in the pipeline. If applied to non-vertex shaders, the user should
// follow by applying EliminateDeadOutputStores and
// EliminateDeadOutputComponents to the preceding shader.
Optimizer::PassToken CreateEliminateDeadInputComponentsPass();
// Removes unused components from composite output variables. Current
// implementation just removes trailing unused components from output arrays
// and structs. The pass performs best after eliminating dead output stores.
// A subsequent dead code elimination pass would be beneficial in removing
// newly unused component types. Currently only supports vertex and fragment
// shaders.
//
// WARNING: This pass cannot be safely applied standalone as it can cause
// interface incompatibility with the following shader in the pipeline. The
// user should first apply EliminateDeadInputComponents to the following
// shader, then apply EliminateDeadOutputStores to this shader.
Optimizer::PassToken CreateEliminateDeadOutputComponentsPass();
// Removes unused components from composite input variables. This safe
// version will not cause interface incompatibilities since it only changes
// vertex shaders. The current implementation just removes trailing unused
// components from input structs and input arrays. The pass performs best
// after maximizing dead code removal. A subsequent dead code elimination
// pass would be beneficial in removing newly unused component types.
Optimizer::PassToken CreateEliminateDeadInputComponentsSafePass();
// Analyzes shader and populates |live_locs| and |live_builtins|. Best results
// will be obtained if shader has all dead code eliminated first. |live_locs|
// and |live_builtins| are subsequently used when calling
// CreateEliminateDeadOutputStoresPass on the preceding shader. Currently only
// supports tesc, tese, geom, and frag shaders.
Optimizer::PassToken CreateAnalyzeLiveInputPass(
std::unordered_set<uint32_t>* live_locs,
std::unordered_set<uint32_t>* live_builtins);
// Removes stores to output locations not listed in |live_locs| or
// |live_builtins|. Best results are obtained if constant propagation is
// performed first. A subsequent call to ADCE will eliminate any dead code
// created by the removal of the stores. A subsequent call to
// CreateEliminateDeadOutputComponentsPass will eliminate any dead output
// components created by the elimination of the stores. Currently only supports
// vert, tesc, tese, and geom shaders.
Optimizer::PassToken CreateEliminateDeadOutputStoresPass(
std::unordered_set<uint32_t>* live_locs,
std::unordered_set<uint32_t>* live_builtins);
// Creates a convert-to-sampled-image pass to convert images and/or
// samplers with given pairs of descriptor set and binding to sampled image.
// If a pair of an image and a sampler have the same pair of descriptor set and
@ -917,6 +978,28 @@ Optimizer::PassToken CreateRemoveDontInlinePass();
// object, currently the pass would remove accesschain pointer argument passed
// to the function
Optimizer::PassToken CreateFixFuncCallArgumentsPass();
// Creates a trim-capabilities pass.
// This pass removes unused capabilities for a given module, and if possible,
// associated extensions.
// See `trim_capabilities.h` for the list of supported capabilities.
//
// If the module contains unsupported capabilities, this pass will ignore them.
// This should be fine in most cases, but could yield to incorrect results if
// the unknown capability interacts with one of the trimmed capabilities.
Optimizer::PassToken CreateTrimCapabilitiesPass();
// Creates a switch-descriptorset pass.
// This pass changes any DescriptorSet decorations with the value |ds_from| to
// use the new value |ds_to|.
Optimizer::PassToken CreateSwitchDescriptorSetPass(uint32_t ds_from,
uint32_t ds_to);
// Creates an invocation interlock placement pass.
// This pass ensures that an entry point will have at most one
// OpBeginInterlockInvocationEXT and one OpEndInterlockInvocationEXT, in that
// order.
Optimizer::PassToken CreateInvocationInterlockPlacementPass();
} // namespace spvtools
#endif // INCLUDE_SPIRV_TOOLS_OPTIMIZER_HPP_

View File

@ -23,6 +23,11 @@ set -x
BUILD_ROOT=$PWD
SRC=$PWD/github/SPIRV-Tools
# This is required to run any git command in the docker since owner will
# have changed between the clone environment, and the docker container.
# Marking the root of the repo as safe for ownership changes.
git config --global --add safe.directory $SRC
# Get clang-format-5.0.0.
# Once kokoro upgrades the Ubuntu VMs, we can use 'apt-get install clang-format'
curl -L http://releases.llvm.org/5.0.0/clang+llvm-5.0.0-linux-x86_64-ubuntu14.04.tar.xz -o clang-llvm.tar.xz

View File

@ -22,4 +22,3 @@ set -x
SCRIPT_DIR=`dirname "$BASH_SOURCE"`
source $SCRIPT_DIR/../scripts/macos/build.sh Debug

View File

@ -24,21 +24,22 @@ CC=clang
CXX=clang++
SRC=$PWD/github/SPIRV-Tools
# This is required to run any git command in the docker since owner will
# have changed between the clone environment, and the docker container.
# Marking the root of the repo as safe for ownership changes.
git config --global --add safe.directory $SRC
cd $SRC
git clone --depth=1 https://github.com/KhronosGroup/SPIRV-Headers external/spirv-headers
git clone https://github.com/google/googletest external/googletest
cd external && cd googletest && git reset --hard 1fb1bb23bb8418dc73a5a9a82bbed31dc610fec7 && cd .. && cd ..
git clone --depth=1 https://github.com/google/effcee external/effcee
git clone --depth=1 https://github.com/google/re2 external/re2
/usr/bin/python3 utils/git-sync-deps --treeless
# Get bazel 5.0.0
gsutil cp gs://bazel/5.0.0/release/bazel-5.0.0-darwin-x86_64 .
chmod +x bazel-5.0.0-darwin-x86_64
echo $(date): Build everything...
./bazel-5.0.0-darwin-x86_64 build :all
./bazel-5.0.0-darwin-x86_64 build --cxxopt=-std=c++17 :all
echo $(date): Build completed.
echo $(date): Starting bazel test...
./bazel-5.0.0-darwin-x86_64 test :all
./bazel-5.0.0-darwin-x86_64 test --cxxopt=-std=c++17 :all
echo $(date): Bazel test completed.

View File

@ -22,4 +22,3 @@ set -x
SCRIPT_DIR=`dirname "$BASH_SOURCE"`
source $SCRIPT_DIR/../scripts/macos/build.sh RelWithDebInfo

View File

@ -20,6 +20,11 @@ set -e
# Display commands being run.
set -x
# This is required to run any git command in the docker since owner will
# have changed between the clone environment, and the docker container.
# Marking the root of the repo as safe for ownership changes.
git config --global --add safe.directory $ROOT_DIR
. /bin/using.sh # Declare the bash `using` function for configuring toolchains.
if [ $COMPILER = "clang" ]; then
@ -30,14 +35,6 @@ fi
cd $ROOT_DIR
function clone_if_missing() {
url=$1
dir=$2
if [[ ! -d "$dir" ]]; then
git clone ${@:3} "$url" "$dir"
fi
}
function clean_dir() {
dir=$1
if [[ -d "$dir" ]]; then
@ -46,12 +43,10 @@ function clean_dir() {
mkdir "$dir"
}
clone_if_missing https://github.com/KhronosGroup/SPIRV-Headers external/spirv-headers --depth=1
clone_if_missing https://github.com/google/googletest external/googletest
pushd external/googletest; git reset --hard 1fb1bb23bb8418dc73a5a9a82bbed31dc610fec7; popd
clone_if_missing https://github.com/google/effcee external/effcee --depth=1
clone_if_missing https://github.com/google/re2 external/re2 --depth=1
clone_if_missing https://github.com/protocolbuffers/protobuf external/protobuf --branch v3.13.0.1
if [ $TOOL != "cmake-smoketest" ]; then
# Get source for dependencies, as specified in the DEPS file
/usr/bin/python3 utils/git-sync-deps --treeless
fi
if [ $TOOL = "cmake" ]; then
using cmake-3.17.2
@ -136,6 +131,7 @@ elif [ $TOOL = "cmake-smoketest" ]; then
git clone https://github.com/KhronosGroup/SPIRV-Headers.git spirv-headers
git clone https://github.com/google/re2
git clone https://github.com/google/effcee
git clone https://github.com/abseil/abseil-cpp abseil_cpp
cd $SHADERC_DIR
mkdir build
@ -146,7 +142,7 @@ elif [ $TOOL = "cmake-smoketest" ]; then
cmake -GNinja -DRE2_BUILD_TESTING=OFF -DCMAKE_BUILD_TYPE="Release" ..
echo $(date): Build glslang...
ninja glslangValidator
ninja glslang-standalone
echo $(date): Build everything...
ninja
@ -160,7 +156,7 @@ elif [ $TOOL = "cmake-smoketest" ]; then
echo $(date): ctest completed.
elif [ $TOOL = "cmake-android-ndk" ]; then
using cmake-3.17.2
using ndk-r21d
using ndk-r25c
using ninja-1.10.0
clean_dir "$ROOT_DIR/build"
@ -168,7 +164,7 @@ elif [ $TOOL = "cmake-android-ndk" ]; then
echo $(date): Starting build...
cmake -DCMAKE_BUILD_TYPE=Release \
-DANDROID_NATIVE_API_LEVEL=android-16 \
-DANDROID_NATIVE_API_LEVEL=android-24 \
-DANDROID_ABI="armeabi-v7a with NEON" \
-DSPIRV_SKIP_TESTS=ON \
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake" \
@ -180,7 +176,7 @@ elif [ $TOOL = "cmake-android-ndk" ]; then
ninja
echo $(date): Build completed.
elif [ $TOOL = "android-ndk-build" ]; then
using ndk-r21d
using ndk-r25c
clean_dir "$ROOT_DIR/build"
cd "$ROOT_DIR/build"
@ -198,10 +194,10 @@ elif [ $TOOL = "bazel" ]; then
using bazel-5.0.0
echo $(date): Build everything...
bazel build :all
bazel build --cxxopt=-std=c++17 :all
echo $(date): Build completed.
echo $(date): Starting bazel test...
bazel test :all
bazel test --cxxopt=-std=c++17 :all
echo $(date): Bazel test completed.
fi

View File

@ -26,6 +26,18 @@ COMPILER=$2
TOOL=$3
BUILD_SHA=${KOKORO_GITHUB_COMMIT:-$KOKORO_GITHUB_PULL_REQUEST_COMMIT}
# chown the given directory to the current user, if it exists.
# Docker creates files with the root user - this can upset the Kokoro artifact copier.
function chown_dir() {
dir=$1
if [[ -d "$dir" ]]; then
sudo chown -R "$(id -u):$(id -g)" "$dir"
fi
}
set +e
# Allow build failures
# "--privileged" is required to run ptrace in the asan builds.
docker run --rm -i \
--privileged \
@ -41,16 +53,11 @@ docker run --rm -i \
--env BUILD_SHA="${BUILD_SHA}" \
--entrypoint "${SCRIPT_DIR}/build-docker.sh" \
"gcr.io/shaderc-build/radial-build:latest"
RESULT=$?
# chown the given directory to the current user, if it exists.
# Docker creates files with the root user - this can upset the Kokoro artifact copier.
function chown_dir() {
dir=$1
if [[ -d "$dir" ]]; then
sudo chown -R "$(id -u):$(id -g)" "$dir"
fi
}
# This is important. If the permissions are not fixed, kokoro will fail
# to pull build artifacts, and put the build in tool-failure state, which
# blocks the logs.
chown_dir "${ROOT_DIR}/build"
chown_dir "${ROOT_DIR}/external"
exit $RESULT

View File

@ -24,6 +24,11 @@ BUILD_ROOT=$PWD
SRC=$PWD/github/SPIRV-Tools
BUILD_TYPE=$1
# This is required to run any git command in the docker since owner will
# have changed between the clone environment, and the docker container.
# Marking the root of the repo as safe for ownership changes.
git config --global --add safe.directory $SRC
# Get NINJA.
wget -q https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-mac.zip
unzip -q ninja-mac.zip
@ -31,23 +36,16 @@ chmod +x ninja
export PATH="$PWD:$PATH"
cd $SRC
git clone --depth=1 https://github.com/KhronosGroup/SPIRV-Headers external/spirv-headers
git clone https://github.com/google/googletest external/googletest
cd external && cd googletest && git reset --hard 1fb1bb23bb8418dc73a5a9a82bbed31dc610fec7 && cd .. && cd ..
git clone --depth=1 https://github.com/google/effcee external/effcee
git clone --depth=1 https://github.com/google/re2 external/re2
git clone --depth=1 --branch v3.13.0.1 https://github.com/protocolbuffers/protobuf external/protobuf
python3 utils/git-sync-deps --treeless
mkdir build && cd $SRC/build
# Invoke the build.
BUILD_SHA=${KOKORO_GITHUB_COMMIT:-$KOKORO_GITHUB_PULL_REQUEST_COMMIT}
echo $(date): Starting build...
# We need Python 3. At the moment python3.7 is the newest Python on Kokoro.
cmake \
-GNinja \
-DCMAKE_INSTALL_PREFIX=$KOKORO_ARTIFACTS_DIR/install \
-DPYTHON_EXECUTABLE:FILEPATH=/usr/local/bin/python3.7 \
-DCMAKE_C_COMPILER=clang \
-DCMAKE_CXX_COMPILER=clang++ \
-DCMAKE_BUILD_TYPE=$BUILD_TYPE \

View File

@ -24,26 +24,23 @@ set VS_VERSION=%2
:: Force usage of python 3.6
set PATH=C:\python36;"C:\Program Files\cmake-3.23.1-windows-x86_64\bin";%PATH%
cd %SRC%
git clone --depth=1 https://github.com/KhronosGroup/SPIRV-Headers external/spirv-headers
git clone https://github.com/google/googletest external/googletest
cd external && cd googletest && git reset --hard 1fb1bb23bb8418dc73a5a9a82bbed31dc610fec7 && cd .. && cd ..
git clone --depth=1 https://github.com/google/effcee external/effcee
git clone --depth=1 https://github.com/google/re2 external/re2
git clone --depth=1 --branch v3.13.0.1 https://github.com/protocolbuffers/protobuf external/protobuf
:: #########################################
:: set up msvc build env
:: #########################################
if %VS_VERSION% == 2017 (
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" x64
echo "Using VS 2017..."
) else if %VS_VERSION% == 2015 (
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64
echo "Using VS 2015..."
:: RE2 does not support VS2017, we we must disable tests.
set BUILD_TESTS=NO
) else if %VS_VERSION% == 2019 (
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" x64
echo "Using VS 2019..."
)
cd %SRC%
python utils/git-sync-deps --treeless
mkdir build
cd build
@ -62,6 +59,10 @@ set CMAKE_FLAGS=-DCMAKE_INSTALL_PREFIX=%KOKORO_ARTIFACTS_DIR%\install -GNinja -D
:: Build spirv-fuzz
set CMAKE_FLAGS=%CMAKE_FLAGS% -DSPIRV_BUILD_FUZZER=ON
if "%BUILD_TESTS%" == "NO" (
set CMAKE_FLAGS=-DSPIRV_SKIP_TESTS=ON %CMAKE_FLAGS%
)
cmake %CMAKE_FLAGS% ..
if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL%
@ -77,10 +78,12 @@ setlocal ENABLEDELAYEDEXPANSION
:: ################################################
:: Run the tests
:: ################################################
echo "Running Tests... %DATE% %TIME%"
ctest -C %BUILD_TYPE% --output-on-failure --timeout 300
if !ERRORLEVEL! NEQ 0 exit /b !ERRORLEVEL!
echo "Tests Completed %DATE% %TIME%"
if "%BUILD_TESTS%" NEQ "NO" (
echo "Running Tests... %DATE% %TIME%"
ctest -C %BUILD_TYPE% --output-on-failure --timeout 300
if !ERRORLEVEL! NEQ 0 exit /b !ERRORLEVEL!
echo "Tests Completed %DATE% %TIME%"
)
:: ################################################
:: Install and package.

View File

@ -1,16 +0,0 @@
# Copyright (c) 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Continuous build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2013-release/build.bat"

View File

@ -1,16 +0,0 @@
# Copyright (c) 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Presubmit build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2013-release/build.bat"

View File

@ -1,59 +0,0 @@
:: Copyright (c) 2019 Google LLC.
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
:: Windows Build Script.
@echo on
set SRC=%cd%\github\SPIRV-Tools
:: Force usage of python 3.6
set PATH=C:\python36;%PATH%
:: Get dependencies
cd %SRC%
git clone --depth=1 https://github.com/KhronosGroup/SPIRV-Headers external/spirv-headers
git clone https://github.com/google/googletest external/googletest
cd external && cd googletest && git reset --hard 1fb1bb23bb8418dc73a5a9a82bbed31dc610fec7 && cd .. && cd ..
git clone --depth=1 https://github.com/google/effcee external/effcee
git clone --depth=1 https://github.com/google/re2 external/re2
:: REM Install Bazel.
wget -q https://github.com/bazelbuild/bazel/releases/download/5.0.0/bazel-5.0.0-windows-x86_64.zip
unzip -q bazel-5.0.0-windows-x86_64.zip
:: Set up MSVC
call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64
set BAZEL_VS=C:\Program Files (x86)\Microsoft Visual Studio 14.0
set BAZEL_VC=C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC
set BAZEL_PYTHON=c:\tools\python2\python.exe
:: #########################################
:: Start building.
:: #########################################
echo "Build everything... %DATE% %TIME%"
bazel.exe build :all
if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL%
echo "Build Completed %DATE% %TIME%"
:: ##############
:: Run the tests
:: ##############
echo "Running Tests... %DATE% %TIME%"
bazel.exe test :all
if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL%
echo "Tests Completed %DATE% %TIME%"
exit /b 0

View File

@ -1,16 +0,0 @@
# Copyright (c) 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Continuous build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2015-release-bazel/build.bat"

View File

@ -1,24 +0,0 @@
:: Copyright (c) 2018 Google LLC.
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
:: You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing, software
:: distributed under the License is distributed on an "AS IS" BASIS,
:: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
:: See the License for the specific language governing permissions and
:: limitations under the License.
::
:: Windows Build Script.
@echo on
:: Find out the directory of the common build script.
set SCRIPT_DIR=%~dp0
:: Call with correct parameter
call %SCRIPT_DIR%\..\scripts\windows\build.bat RelWithDebInfo 2015

View File

@ -1,4 +1,4 @@
:: Copyright (c) 2018 Google LLC.
:: Copyright (c) 2023 Google LLC
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
@ -20,4 +20,4 @@
set SCRIPT_DIR=%~dp0
:: Call with correct parameter
call %SCRIPT_DIR%\..\scripts\windows\build.bat Debug 2017
call %SCRIPT_DIR%\..\scripts\windows\build.bat Debug 2019

View File

@ -1,4 +1,4 @@
# Copyright (c) 2018 Google LLC.
# Copyright (c) 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,7 +13,7 @@
# limitations under the License.
# Continuous build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2017-debug/build.bat"
build_file: "SPIRV-Tools/kokoro/windows-msvc-2019-debug/build.bat"
action {
define_artifacts {

View File

@ -1,4 +1,4 @@
# Copyright (c) 2018 Google LLC.
# Copyright (c) 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,4 +13,4 @@
# limitations under the License.
# Presubmit build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2017-debug/build.bat"
build_file: "SPIRV-Tools/kokoro/windows-msvc-2019-debug/build.bat"

View File

@ -1,4 +1,4 @@
:: Copyright (c) 2018 Google LLC.
:: Copyright (c) 2023 Google LLC
::
:: Licensed under the Apache License, Version 2.0 (the "License");
:: you may not use this file except in compliance with the License.
@ -20,5 +20,5 @@
set SCRIPT_DIR=%~dp0
:: Call with correct parameter
call %SCRIPT_DIR%\..\scripts\windows\build.bat RelWithDebInfo 2013
call %SCRIPT_DIR%\..\scripts\windows\build.bat RelWithDebInfo 2019

View File

@ -1,4 +1,4 @@
# Copyright (c) 2018 Google LLC.
# Copyright (c) 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,4 +13,10 @@
# limitations under the License.
# Continuous build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2015-release/build.bat"
build_file: "SPIRV-Tools/kokoro/windows-msvc-2019-release/build.bat"
action {
define_artifacts {
regex: "install.zip"
}
}

View File

@ -1,4 +1,4 @@
# Copyright (c) 2018 Google LLC.
# Copyright (c) 2023 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,4 +13,4 @@
# limitations under the License.
# Presubmit build configuration.
build_file: "SPIRV-Tools/kokoro/windows-msvc-2015-release/build.bat"
build_file: "SPIRV-Tools/kokoro/windows-msvc-2019-release/build.bat"

View File

@ -31,12 +31,13 @@ macro(spvtools_core_tables CONFIG_VERSION)
set(GRAMMAR_INSTS_INC_FILE "${spirv-tools_BINARY_DIR}/core.insts-${CONFIG_VERSION}.inc")
set(GRAMMAR_KINDS_INC_FILE "${spirv-tools_BINARY_DIR}/operand.kinds-${CONFIG_VERSION}.inc")
add_custom_command(OUTPUT ${GRAMMAR_INSTS_INC_FILE} ${GRAMMAR_KINDS_INC_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${GRAMMAR_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${GRAMMAR_PROCESSING_SCRIPT}
--spirv-core-grammar=${GRAMMAR_JSON_FILE}
--extinst-debuginfo-grammar=${DEBUGINFO_GRAMMAR_JSON_FILE}
--extinst-cldebuginfo100-grammar=${CLDEBUGINFO100_GRAMMAR_JSON_FILE}
--core-insts-output=${GRAMMAR_INSTS_INC_FILE}
--operand-kinds-output=${GRAMMAR_KINDS_INC_FILE}
--output-language=c++
DEPENDS ${GRAMMAR_PROCESSING_SCRIPT}
${GRAMMAR_JSON_FILE}
${DEBUGINFO_GRAMMAR_JSON_FILE}
@ -52,12 +53,13 @@ macro(spvtools_enum_string_mapping CONFIG_VERSION)
set(GRAMMAR_ENUM_STRING_MAPPING_INC_FILE "${spirv-tools_BINARY_DIR}/enum_string_mapping.inc")
add_custom_command(OUTPUT ${GRAMMAR_EXTENSION_ENUM_INC_FILE}
${GRAMMAR_ENUM_STRING_MAPPING_INC_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${GRAMMAR_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${GRAMMAR_PROCESSING_SCRIPT}
--spirv-core-grammar=${GRAMMAR_JSON_FILE}
--extinst-debuginfo-grammar=${DEBUGINFO_GRAMMAR_JSON_FILE}
--extinst-cldebuginfo100-grammar=${CLDEBUGINFO100_GRAMMAR_JSON_FILE}
--extension-enum-output=${GRAMMAR_EXTENSION_ENUM_INC_FILE}
--enum-string-mapping-output=${GRAMMAR_ENUM_STRING_MAPPING_INC_FILE}
--output-language=c++
DEPENDS ${GRAMMAR_PROCESSING_SCRIPT}
${GRAMMAR_JSON_FILE}
${DEBUGINFO_GRAMMAR_JSON_FILE}
@ -73,7 +75,7 @@ macro(spvtools_vimsyntax CONFIG_VERSION CLVERSION)
set(OPENCL_GRAMMAR_JSON_FILE "${SPIRV_HEADER_INCLUDE_DIR}/spirv/${CONFIG_VERSION}/extinst.opencl.std.100.grammar.json")
set(VIMSYNTAX_FILE "${spirv-tools_BINARY_DIR}/spvasm.vim")
add_custom_command(OUTPUT ${VIMSYNTAX_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${VIMSYNTAX_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${VIMSYNTAX_PROCESSING_SCRIPT}
--spirv-core-grammar=${GRAMMAR_JSON_FILE}
--extinst-debuginfo-grammar=${DEBUGINFO_GRAMMAR_JSON_FILE}
--extinst-glsl-grammar=${GLSL_GRAMMAR_JSON_FILE}
@ -89,9 +91,10 @@ macro(spvtools_glsl_tables CONFIG_VERSION)
set(GLSL_GRAMMAR_JSON_FILE "${SPIRV_HEADER_INCLUDE_DIR}/spirv/${CONFIG_VERSION}/extinst.glsl.std.450.grammar.json")
set(GRAMMAR_INC_FILE "${spirv-tools_BINARY_DIR}/glsl.std.450.insts.inc")
add_custom_command(OUTPUT ${GRAMMAR_INC_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${GRAMMAR_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${GRAMMAR_PROCESSING_SCRIPT}
--extinst-glsl-grammar=${GLSL_GRAMMAR_JSON_FILE}
--glsl-insts-output=${GRAMMAR_INC_FILE}
--output-language=c++
DEPENDS ${GRAMMAR_PROCESSING_SCRIPT} ${CORE_GRAMMAR_JSON_FILE} ${GLSL_GRAMMAR_JSON_FILE}
COMMENT "Generate info tables for GLSL extended instructions and operands v${CONFIG_VERSION}.")
list(APPEND EXTINST_CPP_DEPENDS ${GRAMMAR_INC_FILE})
@ -102,7 +105,7 @@ macro(spvtools_opencl_tables CONFIG_VERSION)
set(OPENCL_GRAMMAR_JSON_FILE "${SPIRV_HEADER_INCLUDE_DIR}/spirv/${CONFIG_VERSION}/extinst.opencl.std.100.grammar.json")
set(GRAMMAR_INC_FILE "${spirv-tools_BINARY_DIR}/opencl.std.insts.inc")
add_custom_command(OUTPUT ${GRAMMAR_INC_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${GRAMMAR_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${GRAMMAR_PROCESSING_SCRIPT}
--extinst-opencl-grammar=${OPENCL_GRAMMAR_JSON_FILE}
--opencl-insts-output=${GRAMMAR_INC_FILE}
DEPENDS ${GRAMMAR_PROCESSING_SCRIPT} ${CORE_GRAMMAR_JSON_FILE} ${OPENCL_GRAMMAR_JSON_FILE}
@ -117,7 +120,7 @@ macro(spvtools_vendor_tables VENDOR_TABLE SHORT_NAME OPERAND_KIND_PREFIX)
set(GRAMMAR_FILE "${spirv-tools_SOURCE_DIR}/source/extinst.${VENDOR_TABLE}.grammar.json")
endif()
add_custom_command(OUTPUT ${INSTS_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${GRAMMAR_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${GRAMMAR_PROCESSING_SCRIPT}
--extinst-vendor-grammar=${GRAMMAR_FILE}
--vendor-insts-output=${INSTS_FILE}
--vendor-operand-kind-prefix=${OPERAND_KIND_PREFIX}
@ -131,7 +134,7 @@ endmacro(spvtools_vendor_tables)
macro(spvtools_extinst_lang_headers NAME GRAMMAR_FILE)
set(OUT_H ${spirv-tools_BINARY_DIR}/${NAME}.h)
add_custom_command(OUTPUT ${OUT_H}
COMMAND ${PYTHON_EXECUTABLE} ${LANG_HEADER_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${LANG_HEADER_PROCESSING_SCRIPT}
--extinst-grammar=${GRAMMAR_FILE}
--extinst-output-path=${OUT_H}
DEPENDS ${LANG_HEADER_PROCESSING_SCRIPT} ${GRAMMAR_FILE}
@ -165,7 +168,7 @@ set_property(TARGET spirv-tools-vimsyntax PROPERTY FOLDER "SPIRV-Tools utilities
set(GENERATOR_INC_FILE ${spirv-tools_BINARY_DIR}/generators.inc)
set(SPIRV_XML_REGISTRY_FILE ${SPIRV_HEADER_INCLUDE_DIR}/spirv/spir-v.xml)
add_custom_command(OUTPUT ${GENERATOR_INC_FILE}
COMMAND ${PYTHON_EXECUTABLE} ${XML_REGISTRY_PROCESSING_SCRIPT}
COMMAND Python3::Interpreter ${XML_REGISTRY_PROCESSING_SCRIPT}
--xml=${SPIRV_XML_REGISTRY_FILE}
--generator-output=${GENERATOR_INC_FILE}
DEPENDS ${XML_REGISTRY_PROCESSING_SCRIPT} ${SPIRV_XML_REGISTRY_FILE}
@ -195,7 +198,7 @@ set(SPIRV_TOOLS_BUILD_VERSION_INC_GENERATOR
set(SPIRV_TOOLS_CHANGES_FILE
${spirv-tools_SOURCE_DIR}/CHANGES)
add_custom_command(OUTPUT ${SPIRV_TOOLS_BUILD_VERSION_INC}
COMMAND ${PYTHON_EXECUTABLE}
COMMAND Python3::Interpreter
${SPIRV_TOOLS_BUILD_VERSION_INC_GENERATOR}
${SPIRV_TOOLS_CHANGES_FILE} ${SPIRV_TOOLS_BUILD_VERSION_INC}
DEPENDS ${SPIRV_TOOLS_BUILD_VERSION_INC_GENERATOR}
@ -325,6 +328,7 @@ set(SPIRV_SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_primitives.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_ray_query.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_ray_tracing.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_ray_tracing_reorder.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_scopes.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_small_type_uses.cpp
${CMAKE_CURRENT_SOURCE_DIR}/val/validate_type.cpp
@ -414,17 +418,8 @@ if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
endif()
endif()
if (ANDROID)
foreach(target ${SPIRV_TOOLS_TARGETS})
target_link_libraries(${target} PRIVATE android log)
endforeach()
endif()
if(ENABLE_SPIRV_TOOLS_INSTALL)
install(TARGETS ${SPIRV_TOOLS_TARGETS} EXPORT ${SPIRV_TOOLS}Targets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS ${SPIRV_TOOLS_TARGETS} EXPORT ${SPIRV_TOOLS}Targets)
export(EXPORT ${SPIRV_TOOLS}Targets FILE ${SPIRV_TOOLS}Target.cmake)
spvtools_config_package_dir(${SPIRV_TOOLS} PACKAGE_DIR)

View File

@ -21,6 +21,7 @@
#include "source/ext_inst.h"
#include "source/opcode.h"
#include "source/operand.h"
#include "source/spirv_target_env.h"
#include "source/table.h"
namespace spvtools {
@ -78,16 +79,16 @@ spv_result_t spvTextParseMaskOperand(spv_target_env env,
// Associates an opcode with its name.
struct SpecConstantOpcodeEntry {
SpvOp opcode;
spv::Op opcode;
const char* name;
};
// All the opcodes allowed as the operation for OpSpecConstantOp.
// The name does not have the usual "Op" prefix. For example opcode SpvOpIAdd
// is associated with the name "IAdd".
// The name does not have the usual "Op" prefix. For example opcode
// spv::Op::IAdd is associated with the name "IAdd".
//
// clang-format off
#define CASE(NAME) { SpvOp##NAME, #NAME }
#define CASE(NAME) { spv::Op::Op##NAME, #NAME }
const SpecConstantOpcodeEntry kOpSpecConstantOpcodes[] = {
// Conversion
CASE(SConvert),
@ -154,11 +155,12 @@ const SpecConstantOpcodeEntry kOpSpecConstantOpcodes[] = {
CASE(InBoundsAccessChain),
CASE(PtrAccessChain),
CASE(InBoundsPtrAccessChain),
CASE(CooperativeMatrixLengthNV)
CASE(CooperativeMatrixLengthNV),
CASE(CooperativeMatrixLengthKHR)
};
// The 60 is determined by counting the opcodes listed in the spec.
static_assert(60 == sizeof(kOpSpecConstantOpcodes)/sizeof(kOpSpecConstantOpcodes[0]),
static_assert(61 == sizeof(kOpSpecConstantOpcodes)/sizeof(kOpSpecConstantOpcodes[0]),
"OpSpecConstantOp opcode table is incomplete");
#undef CASE
// clang-format on
@ -173,17 +175,20 @@ bool AssemblyGrammar::isValid() const {
}
CapabilitySet AssemblyGrammar::filterCapsAgainstTargetEnv(
const SpvCapability* cap_array, uint32_t count) const {
const spv::Capability* cap_array, uint32_t count) const {
CapabilitySet cap_set;
const auto version = spvVersionForTargetEnv(target_env_);
for (uint32_t i = 0; i < count; ++i) {
spv_operand_desc cap_desc = {};
spv_operand_desc entry = {};
if (SPV_SUCCESS == lookupOperand(SPV_OPERAND_TYPE_CAPABILITY,
static_cast<uint32_t>(cap_array[i]),
&cap_desc)) {
// spvOperandTableValueLookup() filters capabilities internally
// according to the current target environment by itself. So we
// should be safe to add this capability if the lookup succeeds.
cap_set.Add(cap_array[i]);
&entry)) {
// This token is visible in this environment if it's in an appropriate
// core version, or it is enabled by a capability or an extension.
if ((version >= entry->minVersion && version <= entry->lastVersion) ||
entry->numExtensions > 0u || entry->numCapabilities > 0u) {
cap_set.insert(cap_array[i]);
}
}
}
return cap_set;
@ -194,7 +199,7 @@ spv_result_t AssemblyGrammar::lookupOpcode(const char* name,
return spvOpcodeTableNameLookup(target_env_, opcodeTable_, name, desc);
}
spv_result_t AssemblyGrammar::lookupOpcode(SpvOp opcode,
spv_result_t AssemblyGrammar::lookupOpcode(spv::Op opcode,
spv_opcode_desc* desc) const {
return spvOpcodeTableValueLookup(target_env_, opcodeTable_, opcode, desc);
}
@ -214,7 +219,7 @@ spv_result_t AssemblyGrammar::lookupOperand(spv_operand_type_t type,
}
spv_result_t AssemblyGrammar::lookupSpecConstantOpcode(const char* name,
SpvOp* opcode) const {
spv::Op* opcode) const {
const auto* last = kOpSpecConstantOpcodes + kNumOpSpecConstantOpcodes;
const auto* found =
std::find_if(kOpSpecConstantOpcodes, last,
@ -226,7 +231,7 @@ spv_result_t AssemblyGrammar::lookupSpecConstantOpcode(const char* name,
return SPV_SUCCESS;
}
spv_result_t AssemblyGrammar::lookupSpecConstantOpcode(SpvOp opcode) const {
spv_result_t AssemblyGrammar::lookupSpecConstantOpcode(spv::Op opcode) const {
const auto* last = kOpSpecConstantOpcodes + kNumOpSpecConstantOpcodes;
const auto* found =
std::find_if(kOpSpecConstantOpcodes, last,

View File

@ -41,7 +41,7 @@ class AssemblyGrammar {
// Removes capabilities not available in the current target environment and
// returns the rest.
CapabilitySet filterCapsAgainstTargetEnv(const SpvCapability* cap_array,
CapabilitySet filterCapsAgainstTargetEnv(const spv::Capability* cap_array,
uint32_t count) const;
// Fills in the desc parameter with the information about the opcode
@ -52,7 +52,7 @@ class AssemblyGrammar {
// Fills in the desc parameter with the information about the opcode
// of the valid. Returns SPV_SUCCESS if the opcode was found, and
// SPV_ERROR_INVALID_LOOKUP if the opcode does not exist.
spv_result_t lookupOpcode(SpvOp opcode, spv_opcode_desc* desc) const;
spv_result_t lookupOpcode(spv::Op opcode, spv_opcode_desc* desc) const;
// Fills in the desc parameter with the information about the given
// operand. Returns SPV_SUCCESS if the operand was found, and
@ -82,11 +82,12 @@ class AssemblyGrammar {
// the integer add opcode for OpSpecConstantOp. On success, returns
// SPV_SUCCESS and sends the discovered operation code through the opcode
// parameter. On failure, returns SPV_ERROR_INVALID_LOOKUP.
spv_result_t lookupSpecConstantOpcode(const char* name, SpvOp* opcode) const;
spv_result_t lookupSpecConstantOpcode(const char* name,
spv::Op* opcode) const;
// Returns SPV_SUCCESS if the given opcode is valid as the opcode operand
// to OpSpecConstantOp.
spv_result_t lookupSpecConstantOpcode(SpvOp opcode) const;
spv_result_t lookupSpecConstantOpcode(spv::Op opcode) const;
// Parses a mask expression string for the given operand type.
//

View File

@ -156,7 +156,7 @@ class Parser {
// Issues a diagnostic describing an exhaustion of input condition when
// trying to decode an instruction operand, and returns
// SPV_ERROR_INVALID_BINARY.
spv_result_t exhaustedInputDiagnostic(size_t inst_offset, SpvOp opcode,
spv_result_t exhaustedInputDiagnostic(size_t inst_offset, spv::Op opcode,
spv_operand_type_t type) {
return diagnostic() << "End of input reached while decoding Op"
<< spvOpcodeString(opcode) << " starting at word "
@ -318,7 +318,7 @@ spv_result_t Parser::parseInstruction() {
<< inst_word_count;
}
spv_opcode_desc opcode_desc;
if (grammar_.lookupOpcode(static_cast<SpvOp>(inst.opcode), &opcode_desc))
if (grammar_.lookupOpcode(static_cast<spv::Op>(inst.opcode), &opcode_desc))
return diagnostic() << "Invalid opcode: " << inst.opcode;
// Advance past the opcode word. But remember the of the start
@ -418,7 +418,7 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
std::vector<uint32_t>* words,
std::vector<spv_parsed_operand_t>* operands,
spv_operand_pattern_t* expected_operands) {
const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
const spv::Op opcode = static_cast<spv::Op>(inst->opcode);
// We'll fill in this result as we go along.
spv_parsed_operand_t parsed_operand;
parsed_operand.offset = uint16_t(_.word_index - inst_offset);
@ -473,7 +473,7 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
if (!word) return diagnostic(SPV_ERROR_INVALID_ID) << "Id is 0";
parsed_operand.type = SPV_OPERAND_TYPE_ID;
if (opcode == SpvOpExtInst && parsed_operand.offset == 3) {
if (opcode == spv::Op::OpExtInst && parsed_operand.offset == 3) {
// The current word is the extended instruction set Id.
// Set the extended instruction set type for the current instruction.
auto ext_inst_type_iter = _.import_id_to_ext_inst_type.find(word);
@ -494,7 +494,7 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
break;
case SPV_OPERAND_TYPE_EXTENSION_INSTRUCTION_NUMBER: {
assert(SpvOpExtInst == opcode);
assert(spv::Op::OpExtInst == opcode);
assert(inst->ext_inst_type != SPV_EXT_INST_TYPE_NONE);
spv_ext_inst_desc ext_inst;
if (grammar_.lookupExtInst(inst->ext_inst_type, word, &ext_inst) ==
@ -516,14 +516,14 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
} break;
case SPV_OPERAND_TYPE_SPEC_CONSTANT_OP_NUMBER: {
assert(SpvOpSpecConstantOp == opcode);
if (word > static_cast<uint32_t>(SpvOp::SpvOpMax) ||
grammar_.lookupSpecConstantOpcode(SpvOp(word))) {
assert(spv::Op::OpSpecConstantOp == opcode);
if (word > static_cast<uint32_t>(spv::Op::Max) ||
grammar_.lookupSpecConstantOpcode(spv::Op(word))) {
return diagnostic()
<< "Invalid " << spvOperandTypeStr(type) << ": " << word;
}
spv_opcode_desc opcode_entry = nullptr;
if (grammar_.lookupOpcode(SpvOp(word), &opcode_entry)) {
if (grammar_.lookupOpcode(spv::Op(word), &opcode_entry)) {
return diagnostic(SPV_ERROR_INTERNAL)
<< "OpSpecConstant opcode table out of sync";
}
@ -546,10 +546,17 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
parsed_operand.number_bit_width = 32;
break;
case SPV_OPERAND_TYPE_LITERAL_FLOAT:
// These are regular single-word literal float operands.
parsed_operand.type = SPV_OPERAND_TYPE_LITERAL_FLOAT;
parsed_operand.number_kind = SPV_NUMBER_FLOATING;
parsed_operand.number_bit_width = 32;
break;
case SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER:
case SPV_OPERAND_TYPE_OPTIONAL_TYPED_LITERAL_INTEGER:
parsed_operand.type = SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER;
if (opcode == SpvOpSwitch) {
if (opcode == spv::Op::OpSwitch) {
// The literal operands have the same type as the value
// referenced by the selector Id.
const uint32_t selector_id = peekAt(inst_offset + 1);
@ -575,7 +582,8 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
<< " is not a scalar integer";
}
} else {
assert(opcode == SpvOpConstant || opcode == SpvOpSpecConstant);
assert(opcode == spv::Op::OpConstant ||
opcode == spv::Op::OpSpecConstant);
// The literal number type is determined by the type Id for the
// constant.
assert(inst->type_id);
@ -607,7 +615,7 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
parsed_operand.num_words = uint16_t(string_num_words);
parsed_operand.type = SPV_OPERAND_TYPE_LITERAL_STRING;
if (SpvOpExtInstImport == opcode) {
if (spv::Op::OpExtInstImport == opcode) {
// Record the extended instruction type for the ID for this import.
// There is only one string literal argument to OpExtInstImport,
// so it's sufficient to guard this just on the opcode.
@ -625,7 +633,6 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
} break;
case SPV_OPERAND_TYPE_CAPABILITY:
case SPV_OPERAND_TYPE_SOURCE_LANGUAGE:
case SPV_OPERAND_TYPE_EXECUTION_MODEL:
case SPV_OPERAND_TYPE_ADDRESSING_MODEL:
case SPV_OPERAND_TYPE_MEMORY_MODEL:
@ -682,6 +689,21 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
spvPushOperandTypes(entry->operandTypes, expected_operands);
} break;
case SPV_OPERAND_TYPE_SOURCE_LANGUAGE: {
spv_operand_desc entry;
if (grammar_.lookupOperand(type, word, &entry)) {
return diagnostic()
<< "Invalid " << spvOperandTypeStr(parsed_operand.type)
<< " operand: " << word
<< ", if you are creating a new source language please use "
"value 0 "
"(Unknown) and when ready, add your source language to "
"SPRIV-Headers";
}
// Prepare to accept operands to this operand, if needed.
spvPushOperandTypes(entry->operandTypes, expected_operands);
} break;
case SPV_OPERAND_TYPE_FP_FAST_MATH_MODE:
case SPV_OPERAND_TYPE_FUNCTION_CONTROL:
case SPV_OPERAND_TYPE_LOOP_CONTROL:
@ -690,7 +712,9 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
case SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS:
case SPV_OPERAND_TYPE_SELECTION_CONTROL:
case SPV_OPERAND_TYPE_CLDEBUG100_DEBUG_INFO_FLAGS:
case SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS: {
case SPV_OPERAND_TYPE_DEBUG_INFO_FLAGS:
case SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS:
case SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS: {
// This operand is a mask.
// Map an optional operand type to its corresponding concrete type.
@ -698,6 +722,8 @@ spv_result_t Parser::parseOperand(size_t inst_offset,
parsed_operand.type = SPV_OPERAND_TYPE_IMAGE;
else if (type == SPV_OPERAND_TYPE_OPTIONAL_MEMORY_ACCESS)
parsed_operand.type = SPV_OPERAND_TYPE_MEMORY_ACCESS;
if (type == SPV_OPERAND_TYPE_OPTIONAL_COOPERATIVE_MATRIX_OPERANDS)
parsed_operand.type = SPV_OPERAND_TYPE_COOPERATIVE_MATRIX_OPERANDS;
// Check validity of set mask bits. Also prepare for operands for those
// masks if they have any. To get operand order correct, scan from
@ -789,14 +815,14 @@ spv_result_t Parser::setNumericTypeInfoForType(
void Parser::recordNumberType(size_t inst_offset,
const spv_parsed_instruction_t* inst) {
const SpvOp opcode = static_cast<SpvOp>(inst->opcode);
const spv::Op opcode = static_cast<spv::Op>(inst->opcode);
if (spvOpcodeGeneratesType(opcode)) {
NumberType info = {SPV_NUMBER_NONE, 0};
if (SpvOpTypeInt == opcode) {
if (spv::Op::OpTypeInt == opcode) {
const bool is_signed = peekAt(inst_offset + 3) != 0;
info.type = is_signed ? SPV_NUMBER_SIGNED_INT : SPV_NUMBER_UNSIGNED_INT;
info.bit_width = peekAt(inst_offset + 2);
} else if (SpvOpTypeFloat == opcode) {
} else if (spv::Op::OpTypeFloat == opcode) {
info.type = SPV_NUMBER_FLOATING;
info.bit_width = peekAt(inst_offset + 2);
}

View File

@ -275,10 +275,16 @@ std::vector<std::pair<BB*, BB*>> CFA<BB>::CalculateDominators(
std::vector<std::pair<bb_ptr, bb_ptr>> out;
for (auto idom : idoms) {
// At this point if there is no dominator for the node, just make it
// reflexive.
auto dominator = std::get<1>(idom).dominator;
if (dominator == undefined_dom) {
dominator = std::get<1>(idom).postorder_index;
}
// NOTE: performing a const cast for convenient usage with
// UpdateImmediateDominators
out.push_back({const_cast<BB*>(std::get<0>(idom)),
const_cast<BB*>(postorder[std::get<1>(idom).dominator])});
const_cast<BB*>(postorder[dominator])});
}
// Sort by postorder index to generate a deterministic ordering of edges.

View File

@ -39,10 +39,7 @@ set_property(TARGET SPIRV-Tools-diff PROPERTY FOLDER "SPIRV-Tools libraries")
spvtools_check_symbol_exports(SPIRV-Tools-diff)
if(ENABLE_SPIRV_TOOLS_INSTALL)
install(TARGETS SPIRV-Tools-diff EXPORT SPIRV-Tools-diffTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS SPIRV-Tools-diff EXPORT SPIRV-Tools-diffTargets)
export(EXPORT SPIRV-Tools-diffTargets FILE SPIRV-Tools-diffTargets.cmake)
spvtools_config_package_dir(SPIRV-Tools-diff PACKAGE_DIR)

File diff suppressed because it is too large Load Diff

View File

@ -244,7 +244,7 @@ void InstructionDisassembler::EmitHeaderSchema(uint32_t schema) {
void InstructionDisassembler::EmitInstruction(
const spv_parsed_instruction_t& inst, size_t inst_byte_offset) {
auto opcode = static_cast<SpvOp>(inst.opcode);
auto opcode = static_cast<spv::Op>(inst.opcode);
if (inst.result_id) {
SetBlue();
@ -268,7 +268,7 @@ void InstructionDisassembler::EmitInstruction(
EmitOperand(inst, i);
}
if (comment_ && opcode == SpvOpName) {
if (comment_ && opcode == spv::Op::OpName) {
const spv_parsed_operand_t& operand = inst.operands[0];
const uint32_t word = inst.words[operand.offset];
stream_ << " ; id %" << word;
@ -290,8 +290,8 @@ void InstructionDisassembler::EmitInstruction(
void InstructionDisassembler::EmitSectionComment(
const spv_parsed_instruction_t& inst, bool& inserted_decoration_space,
bool& inserted_debug_space, bool& inserted_type_space) {
auto opcode = static_cast<SpvOp>(inst.opcode);
if (comment_ && opcode == SpvOpFunction) {
auto opcode = static_cast<spv::Op>(inst.opcode);
if (comment_ && opcode == spv::Op::OpFunction) {
stream_ << std::endl;
stream_ << std::string(indent_, ' ');
stream_ << "; Function " << name_mapper_(inst.result_id) << std::endl;
@ -351,13 +351,14 @@ void InstructionDisassembler::EmitOperand(const spv_parsed_instruction_t& inst,
} break;
case SPV_OPERAND_TYPE_SPEC_CONSTANT_OP_NUMBER: {
spv_opcode_desc opcode_desc;
if (grammar_.lookupOpcode(SpvOp(word), &opcode_desc))
if (grammar_.lookupOpcode(spv::Op(word), &opcode_desc))
assert(false && "should have caught this earlier");
SetRed();
stream_ << opcode_desc->name;
} break;
case SPV_OPERAND_TYPE_LITERAL_INTEGER:
case SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER: {
case SPV_OPERAND_TYPE_TYPED_LITERAL_NUMBER:
case SPV_OPERAND_TYPE_LITERAL_FLOAT: {
SetRed();
EmitNumericLiteral(&stream_, inst, operand);
ResetColor();

View File

@ -1,4 +1,4 @@
// Copyright (c) 2016 Google Inc.
// Copyright (c) 2023 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,196 +12,457 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <limits>
#include <type_traits>
#include <vector>
#ifndef SOURCE_ENUM_SET_H_
#define SOURCE_ENUM_SET_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <set>
#include <utility>
#include "source/latest_version_spirv_header.h"
#include "source/util/make_unique.h"
namespace spvtools {
// A set of values of a 32-bit enum type.
// It is fast and compact for the common case, where enum values
// are at most 63. But it can represent enums with larger values,
// as may appear in extensions.
template <typename EnumType>
// This container is optimized to store and retrieve unsigned enum values.
// The base model for this implementation is an open-addressing hashtable with
// linear probing. For small enums (max index < 64), all operations are O(1).
//
// - Enums are stored in buckets (64 contiguous values max per bucket)
// - Buckets ranges don't overlap, but don't have to be contiguous.
// - Enums are packed into 64-bits buckets, using 1 bit per enum value.
//
// Example:
// - MyEnum { A = 0, B = 1, C = 64, D = 65 }
// - 2 buckets are required:
// - bucket 0, storing values in the range [ 0; 64[
// - bucket 1, storing values in the range [64; 128[
//
// - Buckets are stored in a sorted vector (sorted by bucket range).
// - Retrieval is done by computing the theoretical bucket index using the enum
// value, and
// doing a linear scan from this position.
// - Insertion is done by retrieving the bucket and either:
// - inserting a new bucket in the sorted vector when no buckets has a
// compatible range.
// - setting the corresponding bit in the bucket.
// This means insertion in the middle/beginning can cause a memmove when no
// bucket is available. In our case, this happens at most 23 times for the
// largest enum we have (Opcodes).
template <typename T>
class EnumSet {
private:
// The ForEach method will call the functor on enum values in
// enum value order (lowest to highest). To make that easier, use
// an ordered set for the overflow values.
using OverflowSetType = std::set<uint32_t>;
using BucketType = uint64_t;
using ElementType = std::underlying_type_t<T>;
static_assert(std::is_enum_v<T>, "EnumSets only works with enums.");
static_assert(std::is_signed_v<ElementType> == false,
"EnumSet doesn't supports signed enums.");
// Each bucket can hold up to `kBucketSize` distinct, contiguous enum values.
// The first value a bucket can hold must be aligned on `kBucketSize`.
struct Bucket {
// bit mask to store `kBucketSize` enums.
BucketType data;
// 1st enum this bucket can represent.
T start;
friend bool operator==(const Bucket& lhs, const Bucket& rhs) {
return lhs.start == rhs.start && lhs.data == rhs.data;
}
};
// How many distinct values can a bucket hold? 1 bit per value.
static constexpr size_t kBucketSize = sizeof(BucketType) * 8ULL;
public:
// Construct an empty set.
EnumSet() {}
// Construct an set with just the given enum value.
explicit EnumSet(EnumType c) { Add(c); }
// Construct an set from an initializer list of enum values.
EnumSet(std::initializer_list<EnumType> cs) {
for (auto c : cs) Add(c);
}
EnumSet(uint32_t count, const EnumType* ptr) {
for (uint32_t i = 0; i < count; ++i) Add(ptr[i]);
}
// Copy constructor.
EnumSet(const EnumSet& other) { *this = other; }
// Move constructor. The moved-from set is emptied.
EnumSet(EnumSet&& other) {
mask_ = other.mask_;
overflow_ = std::move(other.overflow_);
other.mask_ = 0;
other.overflow_.reset(nullptr);
}
// Assignment operator.
EnumSet& operator=(const EnumSet& other) {
if (&other != this) {
mask_ = other.mask_;
overflow_.reset(other.overflow_ ? new OverflowSetType(*other.overflow_)
: nullptr);
class Iterator {
public:
typedef Iterator self_type;
typedef T value_type;
typedef T& reference;
typedef T* pointer;
typedef std::forward_iterator_tag iterator_category;
typedef size_t difference_type;
Iterator(const Iterator& other)
: set_(other.set_),
bucketIndex_(other.bucketIndex_),
bucketOffset_(other.bucketOffset_) {}
Iterator& operator++() {
do {
if (bucketIndex_ >= set_->buckets_.size()) {
bucketIndex_ = set_->buckets_.size();
bucketOffset_ = 0;
break;
}
if (bucketOffset_ + 1 == kBucketSize) {
bucketOffset_ = 0;
++bucketIndex_;
} else {
++bucketOffset_;
}
} while (bucketIndex_ < set_->buckets_.size() &&
!set_->HasEnumAt(bucketIndex_, bucketOffset_));
return *this;
}
Iterator operator++(int) {
Iterator old = *this;
operator++();
return old;
}
T operator*() const {
assert(set_->HasEnumAt(bucketIndex_, bucketOffset_) &&
"operator*() called on an invalid iterator.");
return GetValueFromBucket(set_->buckets_[bucketIndex_], bucketOffset_);
}
bool operator!=(const Iterator& other) const {
return set_ != other.set_ || bucketOffset_ != other.bucketOffset_ ||
bucketIndex_ != other.bucketIndex_;
}
bool operator==(const Iterator& other) const {
return !(operator!=(other));
}
Iterator& operator=(const Iterator& other) {
set_ = other.set_;
bucketIndex_ = other.bucketIndex_;
bucketOffset_ = other.bucketOffset_;
return *this;
}
private:
Iterator(const EnumSet* set, size_t bucketIndex, ElementType bucketOffset)
: set_(set), bucketIndex_(bucketIndex), bucketOffset_(bucketOffset) {}
private:
const EnumSet* set_ = nullptr;
// Index of the bucket in the vector.
size_t bucketIndex_ = 0;
// Offset in bits in the current bucket.
ElementType bucketOffset_ = 0;
friend class EnumSet;
};
// Required to allow the use of std::inserter.
using value_type = T;
using const_iterator = Iterator;
using iterator = Iterator;
public:
iterator cbegin() const noexcept {
auto it = iterator(this, /* bucketIndex= */ 0, /* bucketOffset= */ 0);
if (buckets_.size() == 0) {
return it;
}
// The iterator has the logic to find the next valid bit. If the value 0
// is not stored, use it to find the next valid bit.
if (!HasEnumAt(it.bucketIndex_, it.bucketOffset_)) {
++it;
}
return it;
}
iterator begin() const noexcept { return cbegin(); }
iterator cend() const noexcept {
return iterator(this, buckets_.size(), /* bucketOffset= */ 0);
}
iterator end() const noexcept { return cend(); }
// Creates an empty set.
EnumSet() : buckets_(0), size_(0) {}
// Creates a set and store `value` in it.
EnumSet(T value) : EnumSet() { insert(value); }
// Creates a set and stores each `values` in it.
EnumSet(std::initializer_list<T> values) : EnumSet() {
for (auto item : values) {
insert(item);
}
}
// Creates a set, and insert `count` enum values pointed by `array` in it.
EnumSet(ElementType count, const T* array) : EnumSet() {
for (ElementType i = 0; i < count; i++) {
insert(array[i]);
}
}
// Creates a set initialized with the content of the range [begin; end[.
template <class InputIt>
EnumSet(InputIt begin, InputIt end) : EnumSet() {
for (; begin != end; ++begin) {
insert(*begin);
}
}
// Copies the EnumSet `other` into a new EnumSet.
EnumSet(const EnumSet& other)
: buckets_(other.buckets_), size_(other.size_) {}
// Moves the EnumSet `other` into a new EnumSet.
EnumSet(EnumSet&& other)
: buckets_(std::move(other.buckets_)), size_(other.size_) {}
// Deep-copies the EnumSet `other` into this EnumSet.
EnumSet& operator=(const EnumSet& other) {
buckets_ = other.buckets_;
size_ = other.size_;
return *this;
}
friend bool operator==(const EnumSet& a, const EnumSet& b) {
if (a.mask_ != b.mask_) {
return false;
// Matches std::unordered_set::insert behavior.
std::pair<iterator, bool> insert(const T& value) {
const size_t index = FindBucketForValue(value);
const ElementType offset = ComputeBucketOffset(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
size_ += 1;
InsertBucketFor(index, value);
return std::make_pair(Iterator(this, index, offset), true);
}
if (a.overflow_ == nullptr && b.overflow_ == nullptr) {
auto& bucket = buckets_[index];
const auto mask = ComputeMaskForValue(value);
if (bucket.data & mask) {
return std::make_pair(Iterator(this, index, offset), false);
}
size_ += 1;
bucket.data |= ComputeMaskForValue(value);
return std::make_pair(Iterator(this, index, offset), true);
}
// Inserts `value` in the set if possible.
// Similar to `std::unordered_set::insert`, except the hint is ignored.
// Returns an iterator to the inserted element, or the element preventing
// insertion.
iterator insert(const_iterator, const T& value) {
return insert(value).first;
}
// Inserts `value` in the set if possible.
// Similar to `std::unordered_set::insert`, except the hint is ignored.
// Returns an iterator to the inserted element, or the element preventing
// insertion.
iterator insert(const_iterator, T&& value) { return insert(value).first; }
// Inserts all the values in the range [`first`; `last[.
// Similar to `std::unordered_set::insert`.
template <class InputIt>
void insert(InputIt first, InputIt last) {
for (auto it = first; it != last; ++it) {
insert(*it);
}
}
// Removes the value `value` into the set.
// Similar to `std::unordered_set::erase`.
// Returns the number of erased elements.
size_t erase(const T& value) {
const size_t index = FindBucketForValue(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
return 0;
}
auto& bucket = buckets_[index];
const auto mask = ComputeMaskForValue(value);
if (!(bucket.data & mask)) {
return 0;
}
size_ -= 1;
bucket.data &= ~mask;
if (bucket.data == 0) {
buckets_.erase(buckets_.cbegin() + index);
}
return 1;
}
// Returns true if `value` is present in the set.
bool contains(T value) const {
const size_t index = FindBucketForValue(value);
if (index >= buckets_.size() ||
buckets_[index].start != ComputeBucketStart(value)) {
return false;
}
auto& bucket = buckets_[index];
return bucket.data & ComputeMaskForValue(value);
}
// Returns the 1 if `value` is present in the set, `0` otherwise.
inline size_t count(T value) const { return contains(value) ? 1 : 0; }
// Returns true if the set is holds no values.
inline bool empty() const { return size_ == 0; }
// Returns the number of enums stored in this set.
size_t size() const { return size_; }
// Returns true if this set contains at least one value contained in `in_set`.
// Note: If `in_set` is empty, this function returns true.
bool HasAnyOf(const EnumSet<T>& in_set) const {
if (in_set.empty()) {
return true;
}
if (a.overflow_ == nullptr || b.overflow_ == nullptr) {
return false;
}
auto lhs = buckets_.cbegin();
auto rhs = in_set.buckets_.cbegin();
return *a.overflow_ == *b.overflow_;
}
while (lhs != buckets_.cend() && rhs != in_set.buckets_.cend()) {
if (lhs->start == rhs->start) {
if (lhs->data & rhs->data) {
// At least 1 bit is shared. Early return.
return true;
}
friend bool operator!=(const EnumSet& a, const EnumSet& b) {
return !(a == b);
}
lhs++;
rhs++;
continue;
}
// Adds the given enum value to the set. This has no effect if the
// enum value is already in the set.
void Add(EnumType c) { AddWord(ToWord(c)); }
// LHS bucket is smaller than the current RHS bucket. Catching up on RHS.
if (lhs->start < rhs->start) {
lhs++;
continue;
}
// Removes the given enum value from the set. This has no effect if the
// enum value is not in the set.
void Remove(EnumType c) { RemoveWord(ToWord(c)); }
// Returns true if this enum value is in the set.
bool Contains(EnumType c) const { return ContainsWord(ToWord(c)); }
// Applies f to each enum in the set, in order from smallest enum
// value to largest.
void ForEach(std::function<void(EnumType)> f) const {
for (uint32_t i = 0; i < 64; ++i) {
if (mask_ & AsMask(i)) f(static_cast<EnumType>(i));
}
if (overflow_) {
for (uint32_t c : *overflow_) f(static_cast<EnumType>(c));
}
}
// Returns true if the set is empty.
bool IsEmpty() const {
if (mask_) return false;
if (overflow_ && !overflow_->empty()) return false;
return true;
}
// Returns true if the set contains ANY of the elements of |in_set|,
// or if |in_set| is empty.
bool HasAnyOf(const EnumSet<EnumType>& in_set) const {
if (in_set.IsEmpty()) return true;
if (mask_ & in_set.mask_) return true;
if (!overflow_ || !in_set.overflow_) return false;
for (uint32_t item : *in_set.overflow_) {
if (overflow_->find(item) != overflow_->end()) return true;
// Otherwise, RHS needs to catch up on LHS.
rhs++;
}
return false;
}
private:
// Adds the given enum value (as a 32-bit word) to the set. This has no
// effect if the enum value is already in the set.
void AddWord(uint32_t word) {
if (auto new_bits = AsMask(word)) {
mask_ |= new_bits;
} else {
Overflow().insert(word);
// Returns the index of the last bucket in which `value` could be stored.
static constexpr inline size_t ComputeLargestPossibleBucketIndexFor(T value) {
return static_cast<size_t>(value) / kBucketSize;
}
// Returns the smallest enum value that could be contained in the same bucket
// as `value`.
static constexpr inline T ComputeBucketStart(T value) {
return static_cast<T>(kBucketSize *
ComputeLargestPossibleBucketIndexFor(value));
}
// Returns the index of the bit that corresponds to `value` in the bucket.
static constexpr inline ElementType ComputeBucketOffset(T value) {
return static_cast<ElementType>(value) % kBucketSize;
}
// Returns the bitmask used to represent the enum `value` in its bucket.
static constexpr inline BucketType ComputeMaskForValue(T value) {
return 1ULL << ComputeBucketOffset(value);
}
// Returns the `enum` stored in `bucket` at `offset`.
// `offset` is the bit-offset in the bucket storage.
static constexpr inline T GetValueFromBucket(const Bucket& bucket,
BucketType offset) {
return static_cast<T>(static_cast<ElementType>(bucket.start) + offset);
}
// For a given enum `value`, finds the bucket index that could contain this
// value. If no such bucket is found, the index at which the new bucket should
// be inserted is returned.
size_t FindBucketForValue(T value) const {
// Set is empty, insert at 0.
if (buckets_.size() == 0) {
return 0;
}
}
// Removes the given enum value (as a 32-bit word) from the set. This has no
// effect if the enum value is not in the set.
void RemoveWord(uint32_t word) {
if (auto new_bits = AsMask(word)) {
mask_ &= ~new_bits;
} else {
auto itr = Overflow().find(word);
if (itr != Overflow().end()) Overflow().erase(itr);
const T wanted_start = ComputeBucketStart(value);
assert(buckets_.size() > 0 &&
"Size must not be 0 here. Has the code above changed?");
size_t index = std::min(buckets_.size() - 1,
ComputeLargestPossibleBucketIndexFor(value));
// This loops behaves like std::upper_bound with a reverse iterator.
// Buckets are sorted. 3 main cases:
// - The bucket matches
// => returns the bucket index.
// - The found bucket is larger
// => scans left until it finds the correct bucket, or insertion point.
// - The found bucket is smaller
// => We are at the end, so we return past-end index for insertion.
for (; buckets_[index].start >= wanted_start; index--) {
if (index == 0) {
return 0;
}
}
return index + 1;
}
// Returns true if the enum represented as a 32-bit word is in the set.
bool ContainsWord(uint32_t word) const {
// We shouldn't call Overflow() since this is a const method.
if (auto bits = AsMask(word)) {
return (mask_ & bits) != 0;
} else if (auto overflow = overflow_.get()) {
return overflow->find(word) != overflow->end();
// Creates a new bucket to store `value` and inserts it at `index`.
// If the `index` is past the end, the bucket is inserted at the end of the
// vector.
void InsertBucketFor(size_t index, T value) {
const T bucket_start = ComputeBucketStart(value);
Bucket bucket = {1ULL << ComputeBucketOffset(value), bucket_start};
auto it = buckets_.emplace(buckets_.begin() + index, std::move(bucket));
#if defined(NDEBUG)
(void)it; // Silencing unused variable warning.
#else
assert(std::next(it) == buckets_.end() ||
std::next(it)->start > bucket_start);
assert(it == buckets_.begin() || std::prev(it)->start < bucket_start);
#endif
}
// Returns true if the bucket at `bucketIndex/ stores the enum at
// `bucketOffset`, false otherwise.
bool HasEnumAt(size_t bucketIndex, BucketType bucketOffset) const {
assert(bucketIndex < buckets_.size());
assert(bucketOffset < kBucketSize);
return buckets_[bucketIndex].data & (1ULL << bucketOffset);
}
// Returns true if `lhs` and `rhs` hold the exact same values.
friend bool operator==(const EnumSet& lhs, const EnumSet& rhs) {
if (lhs.size_ != rhs.size_) {
return false;
}
// The word is large, but the set doesn't have large members, so
// it doesn't have an overflow set.
return false;
}
// Returns the enum value as a uint32_t.
uint32_t ToWord(EnumType value) const {
static_assert(sizeof(EnumType) <= sizeof(uint32_t),
"EnumType must statically castable to uint32_t");
return static_cast<uint32_t>(value);
}
// Determines whether the given enum value can be represented
// as a bit in a uint64_t mask. If so, then returns that mask bit.
// Otherwise, returns 0.
uint64_t AsMask(uint32_t word) const {
if (word > 63) return 0;
return uint64_t(1) << word;
}
// Ensures that overflow_set_ references a set. A new empty set is
// allocated if one doesn't exist yet. Returns overflow_set_.
OverflowSetType& Overflow() {
if (overflow_.get() == nullptr) {
overflow_ = MakeUnique<OverflowSetType>();
if (lhs.buckets_.size() != rhs.buckets_.size()) {
return false;
}
return *overflow_;
return lhs.buckets_ == rhs.buckets_;
}
// Enums with values up to 63 are stored as bits in this mask.
uint64_t mask_ = 0;
// Enums with values larger than 63 are stored in this set.
// This set should normally be empty or very small.
std::unique_ptr<OverflowSetType> overflow_ = {};
// Returns true if `lhs` and `rhs` hold at least 1 different value.
friend bool operator!=(const EnumSet& lhs, const EnumSet& rhs) {
return !(lhs == rhs);
}
// Storage for the buckets.
std::vector<Bucket> buckets_;
// How many enums is this set storing.
size_t size_ = 0;
};
// A set of SpvCapability, optimized for small capability values.
using CapabilitySet = EnumSet<SpvCapability>;
// A set of spv::Capability.
using CapabilitySet = EnumSet<spv::Capability>;
} // namespace spvtools

View File

@ -29,7 +29,7 @@ bool GetExtensionFromString(const char* str, Extension* extension);
const char* ExtensionToString(Extension extension);
// Returns text string corresponding to |capability|.
const char* CapabilityToString(SpvCapability capability);
const char* CapabilityToString(spv::Capability capability);
} // namespace spvtools

View File

@ -24,7 +24,9 @@
namespace spvtools {
std::string GetExtensionString(const spv_parsed_instruction_t* inst) {
if (inst->opcode != SpvOpExtension) return "ERROR_not_op_extension";
if (inst->opcode != static_cast<uint16_t>(spv::Op::OpExtension)) {
return "ERROR_not_op_extension";
}
assert(inst->num_operands == 1);
@ -38,8 +40,9 @@ std::string GetExtensionString(const spv_parsed_instruction_t* inst) {
std::string ExtensionSetToString(const ExtensionSet& extensions) {
std::stringstream ss;
extensions.ForEach(
[&ss](Extension ext) { ss << ExtensionToString(ext) << " "; });
for (auto extension : extensions) {
ss << ExtensionToString(extension) << " ";
}
return ss.str();
}

View File

@ -15,6 +15,7 @@
#ifndef SOURCE_EXTENSIONS_H_
#define SOURCE_EXTENSIONS_H_
#include <cstdint>
#include <string>
#include "source/enum_set.h"
@ -23,7 +24,7 @@
namespace spvtools {
// The known SPIR-V extensions.
enum Extension {
enum Extension : uint32_t {
#include "extension_enum.inc"
};

View File

@ -470,10 +470,7 @@ if(SPIRV_BUILD_FUZZER)
spvtools_check_symbol_exports(SPIRV-Tools-fuzz)
if(ENABLE_SPIRV_TOOLS_INSTALL)
install(TARGETS SPIRV-Tools-fuzz EXPORT SPIRV-Tools-fuzzTargets
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(TARGETS SPIRV-Tools-fuzz EXPORT SPIRV-Tools-fuzzTargets)
export(EXPORT SPIRV-Tools-fuzzTargets FILE SPIRV-Tools-fuzzTarget.cmake)
spvtools_config_package_dir(SPIRV-Tools-fuzz PACKAGE_DIR)

View File

@ -130,7 +130,7 @@ bool AddedFunctionReducer::InterestingnessFunctionForReducingAddedFunction(
binary_under_reduction.size());
assert(ir_context != nullptr && "The binary should be parsable.");
for (auto& type_or_value : ir_context->module()->types_values()) {
if (type_or_value.opcode() != SpvOpVariable) {
if (type_or_value.opcode() != spv::Op::OpVariable) {
continue;
}
if (irrelevant_pointee_global_variables.count(type_or_value.result_id())) {
@ -202,7 +202,7 @@ void AddedFunctionReducer::ReplayPrefixAndAddFunction(
auto* ir_context = replay_result.transformed_module.get();
for (auto& type_or_value : ir_context->module()->types_values()) {
if (type_or_value.opcode() != SpvOpVariable) {
if (type_or_value.opcode() != spv::Op::OpVariable) {
continue;
}
if (replay_result.transformation_context->GetFactManager()

View File

@ -54,7 +54,7 @@ void CallGraph::BuildGraphAndGetDepthOfFunctionCalls(
// Consider every function call instruction in every block.
for (auto& block : function) {
for (auto& instruction : block) {
if (instruction.opcode() != SpvOpFunctionCall) {
if (instruction.opcode() != spv::Op::OpFunctionCall) {
continue;
}
// Get the id of the function being called.

View File

@ -63,7 +63,7 @@ std::vector<uint32_t> ConstantUniformFacts::GetConstantWords(
bool ConstantUniformFacts::DataMatches(
const opt::Instruction& constant_instruction,
const protobufs::FactConstantUniform& constant_uniform_fact) {
assert(constant_instruction.opcode() == SpvOpConstant);
assert(constant_instruction.opcode() == spv::Op::OpConstant);
std::vector<uint32_t> data_in_constant;
for (uint32_t i = 0; i < constant_instruction.NumInOperands(); i++) {
data_in_constant.push_back(constant_instruction.GetSingleWordInOperand(i));
@ -95,7 +95,7 @@ ConstantUniformFacts::GetUniformDescriptorsForConstant(
uint32_t constant_id) const {
std::vector<protobufs::UniformBufferElementDescriptor> result;
auto constant_inst = ir_context_->get_def_use_mgr()->GetDef(constant_id);
assert(constant_inst->opcode() == SpvOpConstant &&
assert(constant_inst->opcode() == spv::Op::OpConstant &&
"The given id must be that of a constant");
auto type_id = constant_inst->type_id();
for (auto& fact_and_type_id : facts_and_type_ids_) {
@ -175,8 +175,9 @@ bool ConstantUniformFacts::MaybeAddFact(
return false;
}
assert(SpvOpVariable == uniform_variable->opcode());
assert(SpvStorageClassUniform == uniform_variable->GetSingleWordInOperand(0));
assert(spv::Op::OpVariable == uniform_variable->opcode());
assert(spv::StorageClass::Uniform ==
spv::StorageClass(uniform_variable->GetSingleWordInOperand(0)));
auto should_be_uniform_pointer_type =
ir_context_->get_type_mgr()->GetType(uniform_variable->type_id());
@ -184,7 +185,7 @@ bool ConstantUniformFacts::MaybeAddFact(
return false;
}
if (should_be_uniform_pointer_type->AsPointer()->storage_class() !=
SpvStorageClassUniform) {
spv::StorageClass::Uniform) {
return false;
}
auto should_be_uniform_pointer_instruction =

View File

@ -23,7 +23,7 @@ namespace fact_manager {
size_t DataSynonymAndIdEquationFacts::OperationHash::operator()(
const Operation& operation) const {
std::u32string hash;
hash.push_back(operation.opcode);
hash.push_back(uint32_t(operation.opcode));
for (auto operand : operation.operands) {
hash.push_back(static_cast<uint32_t>(DataDescriptorHash()(operand)));
}
@ -104,7 +104,8 @@ bool DataSynonymAndIdEquationFacts::MaybeAddFact(
}
// Now add the fact.
AddEquationFactRecursive(lhs_dd, static_cast<SpvOp>(fact.opcode()), rhs_dds);
AddEquationFactRecursive(lhs_dd, static_cast<spv::Op>(fact.opcode()),
rhs_dds);
return true;
}
@ -119,7 +120,7 @@ DataSynonymAndIdEquationFacts::GetEquations(
}
void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
const protobufs::DataDescriptor& lhs_dd, SpvOp opcode,
const protobufs::DataDescriptor& lhs_dd, spv::Op opcode,
const std::vector<const protobufs::DataDescriptor*>& rhs_dds) {
assert(synonymous_.Exists(lhs_dd) &&
"The LHS must be known to the equivalence relation.");
@ -155,21 +156,21 @@ void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
// Now try to work out corollaries implied by the new equation and existing
// facts.
switch (opcode) {
case SpvOpConvertSToF:
case SpvOpConvertUToF:
case spv::Op::OpConvertSToF:
case spv::Op::OpConvertUToF:
ComputeConversionDataSynonymFacts(*rhs_dds[0]);
break;
case SpvOpBitcast: {
case spv::Op::OpBitcast: {
assert(DataDescriptorsAreWellFormedAndComparable(lhs_dd, *rhs_dds[0]) &&
"Operands of OpBitcast equation fact must have compatible types");
if (!synonymous_.IsEquivalent(lhs_dd, *rhs_dds[0])) {
AddDataSynonymFactRecursive(lhs_dd, *rhs_dds[0]);
}
} break;
case SpvOpIAdd: {
case spv::Op::OpIAdd: {
// Equation form: "a = b + c"
for (const auto& equation : GetEquations(rhs_dds[0])) {
if (equation.opcode == SpvOpISub) {
if (equation.opcode == spv::Op::OpISub) {
// Equation form: "a = (d - e) + c"
if (synonymous_.IsEquivalent(*equation.operands[1], *rhs_dds[1])) {
// Equation form: "a = (d - c) + c"
@ -179,7 +180,7 @@ void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
}
}
for (const auto& equation : GetEquations(rhs_dds[1])) {
if (equation.opcode == SpvOpISub) {
if (equation.opcode == spv::Op::OpISub) {
// Equation form: "a = b + (d - e)"
if (synonymous_.IsEquivalent(*equation.operands[1], *rhs_dds[0])) {
// Equation form: "a = b + (d - b)"
@ -190,10 +191,10 @@ void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
}
break;
}
case SpvOpISub: {
case spv::Op::OpISub: {
// Equation form: "a = b - c"
for (const auto& equation : GetEquations(rhs_dds[0])) {
if (equation.opcode == SpvOpIAdd) {
if (equation.opcode == spv::Op::OpIAdd) {
// Equation form: "a = (d + e) - c"
if (synonymous_.IsEquivalent(*equation.operands[0], *rhs_dds[1])) {
// Equation form: "a = (c + e) - c"
@ -207,34 +208,34 @@ void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
}
}
if (equation.opcode == SpvOpISub) {
if (equation.opcode == spv::Op::OpISub) {
// Equation form: "a = (d - e) - c"
if (synonymous_.IsEquivalent(*equation.operands[0], *rhs_dds[1])) {
// Equation form: "a = (c - e) - c"
// We can thus infer "a = -e"
AddEquationFactRecursive(lhs_dd, SpvOpSNegate,
AddEquationFactRecursive(lhs_dd, spv::Op::OpSNegate,
{equation.operands[1]});
}
}
}
for (const auto& equation : GetEquations(rhs_dds[1])) {
if (equation.opcode == SpvOpIAdd) {
if (equation.opcode == spv::Op::OpIAdd) {
// Equation form: "a = b - (d + e)"
if (synonymous_.IsEquivalent(*equation.operands[0], *rhs_dds[0])) {
// Equation form: "a = b - (b + e)"
// We can thus infer "a = -e"
AddEquationFactRecursive(lhs_dd, SpvOpSNegate,
AddEquationFactRecursive(lhs_dd, spv::Op::OpSNegate,
{equation.operands[1]});
}
if (synonymous_.IsEquivalent(*equation.operands[1], *rhs_dds[0])) {
// Equation form: "a = b - (d + b)"
// We can thus infer "a = -d"
AddEquationFactRecursive(lhs_dd, SpvOpSNegate,
AddEquationFactRecursive(lhs_dd, spv::Op::OpSNegate,
{equation.operands[0]});
}
}
if (equation.opcode == SpvOpISub) {
if (equation.opcode == spv::Op::OpISub) {
// Equation form: "a = b - (d - e)"
if (synonymous_.IsEquivalent(*equation.operands[0], *rhs_dds[0])) {
// Equation form: "a = b - (b - e)"
@ -245,8 +246,8 @@ void DataSynonymAndIdEquationFacts::AddEquationFactRecursive(
}
break;
}
case SpvOpLogicalNot:
case SpvOpSNegate: {
case spv::Op::OpLogicalNot:
case spv::Op::OpSNegate: {
// Equation form: "a = !b" or "a = -b"
for (const auto& equation : GetEquations(rhs_dds[0])) {
if (equation.opcode == opcode) {
@ -321,9 +322,9 @@ void DataSynonymAndIdEquationFacts::ComputeConversionDataSynonymFacts(
for (const auto& equation : fact.second) {
if (synonymous_.IsEquivalent(*equation.operands[0], dd)) {
if (equation.opcode == SpvOpConvertSToF) {
if (equation.opcode == spv::Op::OpConvertSToF) {
convert_s_to_f_lhs.push_back(*dd_it);
} else if (equation.opcode == SpvOpConvertUToF) {
} else if (equation.opcode == spv::Op::OpConvertUToF) {
convert_u_to_f_lhs.push_back(*dd_it);
}
}
@ -808,9 +809,9 @@ bool DataSynonymAndIdEquationFacts::DataDescriptorsAreWellFormedAndComparable(
}
// Neither end type is allowed to be void.
if (ir_context_->get_def_use_mgr()->GetDef(end_type_id_1)->opcode() ==
SpvOpTypeVoid ||
spv::Op::OpTypeVoid ||
ir_context_->get_def_use_mgr()->GetDef(end_type_id_2)->opcode() ==
SpvOpTypeVoid) {
spv::Op::OpTypeVoid) {
return false;
}
// If the end types are the same, the data descriptors are comparable.

View File

@ -79,7 +79,7 @@ class DataSynonymAndIdEquationFacts {
// This helper struct represents the right hand side of an equation as an
// operator applied to a number of data descriptor operands.
struct Operation {
SpvOp opcode;
spv::Op opcode;
std::vector<const protobufs::DataDescriptor*> operands;
};
@ -144,7 +144,7 @@ class DataSynonymAndIdEquationFacts {
// corollaries, in the form of data synonym or equation facts, that follow
// from this and other known facts.
void AddEquationFactRecursive(
const protobufs::DataDescriptor& lhs_dd, SpvOp opcode,
const protobufs::DataDescriptor& lhs_dd, spv::Op opcode,
const std::vector<const protobufs::DataDescriptor*>& rhs_dds);
// Returns true if and only if |dd.object()| still exists in the module.

View File

@ -64,7 +64,7 @@ std::string ToString(const protobufs::FactDataSynonym& fact) {
std::string ToString(const protobufs::FactIdEquation& fact) {
std::stringstream stream;
stream << fact.lhs_id();
stream << " " << static_cast<SpvOp>(fact.opcode());
stream << " " << fact.opcode();
for (auto rhs_id : fact.rhs_id()) {
stream << " " << rhs_id;
}
@ -255,11 +255,11 @@ void FactManager::AddFactIdIsIrrelevant(uint32_t result_id) {
assert(success && "|result_id| is invalid");
}
void FactManager::AddFactIdEquation(uint32_t lhs_id, SpvOp opcode,
void FactManager::AddFactIdEquation(uint32_t lhs_id, spv::Op opcode,
const std::vector<uint32_t>& rhs_id) {
protobufs::FactIdEquation fact;
fact.set_lhs_id(lhs_id);
fact.set_opcode(opcode);
fact.set_opcode(uint32_t(opcode));
for (auto an_rhs_id : rhs_id) {
fact.add_rhs_id(an_rhs_id);
}

View File

@ -83,7 +83,7 @@ class FactManager {
// |lhs_id| = |opcode| |rhs_id[0]| ... |rhs_id[N-1]|
//
// Neither |lhs_id| nor any of |rhs_id| may be irrelevant.
void AddFactIdEquation(uint32_t lhs_id, SpvOp opcode,
void AddFactIdEquation(uint32_t lhs_id, spv::Op opcode,
const std::vector<uint32_t>& rhs_id);
// Inspects all known facts and adds corollary facts; e.g. if we know that

View File

@ -36,8 +36,9 @@ opt::Function* FindFragmentShaderEntryPoint(opt::IRContext* ir_context,
// Check that this is a fragment shader
bool found_capability_shader = false;
for (auto& capability : ir_context->capabilities()) {
assert(capability.opcode() == SpvOpCapability);
if (capability.GetSingleWordInOperand(0) == SpvCapabilityShader) {
assert(capability.opcode() == spv::Op::OpCapability);
if (spv::Capability(capability.GetSingleWordInOperand(0)) ==
spv::Capability::Shader) {
found_capability_shader = true;
break;
}
@ -51,7 +52,8 @@ opt::Function* FindFragmentShaderEntryPoint(opt::IRContext* ir_context,
opt::Instruction* fragment_entry_point = nullptr;
for (auto& entry_point : ir_context->module()->entry_points()) {
if (entry_point.GetSingleWordInOperand(0) == SpvExecutionModelFragment) {
if (spv::ExecutionModel(entry_point.GetSingleWordInOperand(0)) ==
spv::ExecutionModel::Fragment) {
fragment_entry_point = &entry_point;
break;
}
@ -81,8 +83,9 @@ opt::Instruction* FindVec4OutputVariable(opt::IRContext* ir_context,
MessageConsumer message_consumer) {
opt::Instruction* output_variable = nullptr;
for (auto& inst : ir_context->types_values()) {
if (inst.opcode() == SpvOpVariable &&
inst.GetSingleWordInOperand(0) == SpvStorageClassOutput) {
if (inst.opcode() == spv::Op::OpVariable &&
spv::StorageClass(inst.GetSingleWordInOperand(0)) ==
spv::StorageClass::Output) {
if (output_variable != nullptr) {
message_consumer(SPV_MSG_ERROR, nullptr, {},
"Only one output variable can be handled at present; "
@ -144,10 +147,11 @@ MakeConstantUniformReplacement(opt::IRContext* ir_context,
uint32_t greater_than_instruction,
uint32_t in_operand_index) {
return MakeUnique<TransformationReplaceConstantWithUniform>(
MakeIdUseDescriptor(constant_id,
MakeInstructionDescriptor(greater_than_instruction,
SpvOpFOrdGreaterThan, 0),
in_operand_index),
MakeIdUseDescriptor(
constant_id,
MakeInstructionDescriptor(greater_than_instruction,
spv::Op::OpFOrdGreaterThan, 0),
in_operand_index),
fact_manager.GetUniformDescriptorsForConstant(constant_id)[0],
ir_context->TakeNextId(), ir_context->TakeNextId());
}
@ -204,20 +208,21 @@ bool ForceRenderRed(
// Make the new exit block
auto new_exit_block_id = ir_context->TakeNextId();
{
auto label = MakeUnique<opt::Instruction>(ir_context.get(), SpvOpLabel, 0,
new_exit_block_id,
opt::Instruction::OperandList());
auto label = MakeUnique<opt::Instruction>(
ir_context.get(), spv::Op::OpLabel, 0, new_exit_block_id,
opt::Instruction::OperandList());
auto new_exit_block = MakeUnique<opt::BasicBlock>(std::move(label));
new_exit_block->AddInstruction(MakeUnique<opt::Instruction>(
ir_context.get(), SpvOpReturn, 0, 0, opt::Instruction::OperandList()));
new_exit_block->AddInstruction(
MakeUnique<opt::Instruction>(ir_context.get(), spv::Op::OpReturn, 0, 0,
opt::Instruction::OperandList()));
entry_point_function->AddBasicBlock(std::move(new_exit_block));
}
// Make the new entry block
{
auto label = MakeUnique<opt::Instruction>(ir_context.get(), SpvOpLabel, 0,
ir_context->TakeNextId(),
opt::Instruction::OperandList());
auto label = MakeUnique<opt::Instruction>(
ir_context.get(), spv::Op::OpLabel, 0, ir_context->TakeNextId(),
opt::Instruction::OperandList());
auto new_entry_block = MakeUnique<opt::BasicBlock>(std::move(label));
// Make an instruction to construct vec4(1.0, 0.0, 0.0, 1.0), representing
@ -229,7 +234,7 @@ bool ForceRenderRed(
auto temp_vec4 = opt::analysis::Vector(float_type, 4);
auto vec4_id = ir_context->get_type_mgr()->GetId(&temp_vec4);
auto red = MakeUnique<opt::Instruction>(
ir_context.get(), SpvOpCompositeConstruct, vec4_id,
ir_context.get(), spv::Op::OpCompositeConstruct, vec4_id,
ir_context->TakeNextId(), op_composite_construct_operands);
auto red_id = red->result_id();
new_entry_block->AddInstruction(std::move(red));
@ -241,7 +246,7 @@ bool ForceRenderRed(
opt::Instruction::OperandList op_store_operands = {variable_to_store_into,
value_to_be_stored};
new_entry_block->AddInstruction(MakeUnique<opt::Instruction>(
ir_context.get(), SpvOpStore, 0, 0, op_store_operands));
ir_context.get(), spv::Op::OpStore, 0, 0, op_store_operands));
// We are going to attempt to construct 'false' as an expression of the form
// 'literal1 > literal2'. If we succeed, we will later replace each literal
@ -313,7 +318,7 @@ bool ForceRenderRed(
{SPV_OPERAND_TYPE_ID, {smaller_constant}},
{SPV_OPERAND_TYPE_ID, {larger_constant}}};
new_entry_block->AddInstruction(MakeUnique<opt::Instruction>(
ir_context.get(), SpvOpFOrdGreaterThan,
ir_context.get(), spv::Op::OpFOrdGreaterThan,
ir_context->get_type_mgr()->GetId(registered_bool_type),
id_guaranteed_to_be_false, greater_than_operands));
@ -344,9 +349,9 @@ bool ForceRenderRed(
opt::Operand else_block = {SPV_OPERAND_TYPE_ID, {new_exit_block_id}};
opt::Instruction::OperandList op_branch_conditional_operands = {
false_condition, then_block, else_block};
new_entry_block->AddInstruction(
MakeUnique<opt::Instruction>(ir_context.get(), SpvOpBranchConditional,
0, 0, op_branch_conditional_operands));
new_entry_block->AddInstruction(MakeUnique<opt::Instruction>(
ir_context.get(), spv::Op::OpBranchConditional, 0, 0,
op_branch_conditional_operands));
entry_point_function->InsertBasicBlockBefore(
std::move(new_entry_block), entry_point_function->entry().get());

View File

@ -131,14 +131,15 @@ void FuzzerPass::ForEachInstructionWithInstructionDescriptor(
// should skip when searching from 'base' for the desired instruction.
// (An instruction that has a result id is represented by its own opcode,
// itself as 'base', and a skip-count of 0.)
std::vector<std::tuple<uint32_t, SpvOp, uint32_t>> base_opcode_skip_triples;
std::vector<std::tuple<uint32_t, spv::Op, uint32_t>>
base_opcode_skip_triples;
// The initial base instruction is the block label.
uint32_t base = block->id();
// Counts the number of times we have seen each opcode since we reset the
// base instruction.
std::map<SpvOp, uint32_t> skip_count;
std::map<spv::Op, uint32_t> skip_count;
// Consider every instruction in the block. The label is excluded: it is
// only necessary to consider it as a base in case the first instruction
@ -151,7 +152,7 @@ void FuzzerPass::ForEachInstructionWithInstructionDescriptor(
base = inst_it->result_id();
skip_count.clear();
}
const SpvOp opcode = inst_it->opcode();
const spv::Op opcode = inst_it->opcode();
// Invoke the provided function, which might apply a transformation.
action(block, inst_it,
@ -330,7 +331,7 @@ uint32_t FuzzerPass::FindOrCreateStructType(
}
uint32_t FuzzerPass::FindOrCreatePointerType(uint32_t base_type_id,
SpvStorageClass storage_class) {
spv::StorageClass storage_class) {
// We do not use the type manager here, due to problems related to isomorphic
// but distinct structs not being regarded as different.
auto existing_id = fuzzerutil::MaybeGetPointerType(
@ -345,7 +346,7 @@ uint32_t FuzzerPass::FindOrCreatePointerType(uint32_t base_type_id,
}
uint32_t FuzzerPass::FindOrCreatePointerToIntegerType(
uint32_t width, bool is_signed, SpvStorageClass storage_class) {
uint32_t width, bool is_signed, spv::StorageClass storage_class) {
return FindOrCreatePointerType(FindOrCreateIntegerType(width, is_signed),
storage_class);
}
@ -432,7 +433,7 @@ uint32_t FuzzerPass::FindOrCreateCompositeConstant(
uint32_t FuzzerPass::FindOrCreateGlobalUndef(uint32_t type_id) {
for (auto& inst : GetIRContext()->types_values()) {
if (inst.opcode() == SpvOpUndef && inst.type_id() == type_id) {
if (inst.opcode() == spv::Op::OpUndef && inst.type_id() == type_id) {
return inst.result_id();
}
}
@ -464,7 +465,7 @@ uint32_t FuzzerPass::FindOrCreateNullConstant(uint32_t type_id) {
std::pair<std::vector<uint32_t>, std::map<uint32_t, std::vector<uint32_t>>>
FuzzerPass::GetAvailableBasicTypesAndPointers(
SpvStorageClass storage_class) const {
spv::StorageClass storage_class) const {
// Records all of the basic types available in the module.
std::set<uint32_t> basic_types;
@ -480,23 +481,23 @@ FuzzerPass::GetAvailableBasicTypesAndPointers(
// For pointer types with basic pointee types, associate the pointer type
// with the basic type.
switch (inst.opcode()) {
case SpvOpTypeBool:
case SpvOpTypeFloat:
case SpvOpTypeInt:
case SpvOpTypeMatrix:
case SpvOpTypeVector:
case spv::Op::OpTypeBool:
case spv::Op::OpTypeFloat:
case spv::Op::OpTypeInt:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector:
// These are all basic types.
basic_types.insert(inst.result_id());
basic_type_to_pointers.insert({inst.result_id(), {}});
break;
case SpvOpTypeArray:
case spv::Op::OpTypeArray:
// An array type is basic if its base type is basic.
if (basic_types.count(inst.GetSingleWordInOperand(0))) {
basic_types.insert(inst.result_id());
basic_type_to_pointers.insert({inst.result_id(), {}});
}
break;
case SpvOpTypeStruct: {
case spv::Op::OpTypeStruct: {
// A struct type is basic if it does not have the Block/BufferBlock
// decoration, and if all of its members are basic.
if (!fuzzerutil::HasBlockOrBufferBlockDecoration(GetIRContext(),
@ -515,11 +516,12 @@ FuzzerPass::GetAvailableBasicTypesAndPointers(
}
break;
}
case SpvOpTypePointer: {
case spv::Op::OpTypePointer: {
// We are interested in the pointer if its pointee type is basic and it
// has the right storage class.
auto pointee_type = inst.GetSingleWordInOperand(1);
if (inst.GetSingleWordInOperand(0) == storage_class &&
if (spv::StorageClass(inst.GetSingleWordInOperand(0)) ==
storage_class &&
basic_types.count(pointee_type)) {
// The pointer has the desired storage class, and its pointee type is
// a basic type, so we are interested in it. Associate it with its
@ -541,22 +543,22 @@ uint32_t FuzzerPass::FindOrCreateZeroConstant(
GetIRContext()->get_def_use_mgr()->GetDef(scalar_or_composite_type_id);
assert(type_instruction && "The type instruction must exist.");
switch (type_instruction->opcode()) {
case SpvOpTypeBool:
case spv::Op::OpTypeBool:
return FindOrCreateBoolConstant(false, is_irrelevant);
case SpvOpTypeFloat: {
case spv::Op::OpTypeFloat: {
auto width = type_instruction->GetSingleWordInOperand(0);
auto num_words = (width + 32 - 1) / 32;
return FindOrCreateFloatConstant(std::vector<uint32_t>(num_words, 0),
width, is_irrelevant);
}
case SpvOpTypeInt: {
case spv::Op::OpTypeInt: {
auto width = type_instruction->GetSingleWordInOperand(0);
auto num_words = (width + 32 - 1) / 32;
return FindOrCreateIntegerConstant(
std::vector<uint32_t>(num_words, 0), width,
type_instruction->GetSingleWordInOperand(1), is_irrelevant);
}
case SpvOpTypeArray: {
case spv::Op::OpTypeArray: {
auto component_type_id = type_instruction->GetSingleWordInOperand(0);
auto num_components =
fuzzerutil::GetArraySize(*type_instruction, GetIRContext());
@ -566,8 +568,8 @@ uint32_t FuzzerPass::FindOrCreateZeroConstant(
FindOrCreateZeroConstant(component_type_id, is_irrelevant)),
scalar_or_composite_type_id, is_irrelevant);
}
case SpvOpTypeMatrix:
case SpvOpTypeVector: {
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector: {
auto component_type_id = type_instruction->GetSingleWordInOperand(0);
auto num_components = type_instruction->GetSingleWordInOperand(1);
return FindOrCreateCompositeConstant(
@ -576,7 +578,7 @@ uint32_t FuzzerPass::FindOrCreateZeroConstant(
FindOrCreateZeroConstant(component_type_id, is_irrelevant)),
scalar_or_composite_type_id, is_irrelevant);
}
case SpvOpTypeStruct: {
case spv::Op::OpTypeStruct: {
assert(!fuzzerutil::HasBlockOrBufferBlockDecoration(
GetIRContext(), scalar_or_composite_type_id) &&
"We do not construct constants of struct types decorated with "
@ -646,7 +648,7 @@ opt::BasicBlock* FuzzerPass::GetOrCreateSimpleLoopPreheader(
// |maybe_preheader| is a preheader if it branches unconditionally to
// the header. We also require it not to be a loop header.
if (maybe_preheader->terminator()->opcode() == SpvOpBranch &&
if (maybe_preheader->terminator()->opcode() == spv::Op::OpBranch &&
!maybe_preheader->IsLoopHeader()) {
return maybe_preheader;
}
@ -683,8 +685,8 @@ opt::BasicBlock* FuzzerPass::SplitBlockAfterOpPhiOrOpVariable(
// Find the first non-OpPhi and non-OpVariable instruction.
auto non_phi_or_var_inst = &*block->begin();
while (non_phi_or_var_inst->opcode() == SpvOpPhi ||
non_phi_or_var_inst->opcode() == SpvOpVariable) {
while (non_phi_or_var_inst->opcode() == spv::Op::OpPhi ||
non_phi_or_var_inst->opcode() == spv::Op::OpVariable) {
non_phi_or_var_inst = non_phi_or_var_inst->NextNode();
}
@ -706,7 +708,7 @@ uint32_t FuzzerPass::FindOrCreateLocalVariable(
(void)pointer_type;
assert(pointer_type && pointer_type->AsPointer() &&
pointer_type->AsPointer()->storage_class() ==
SpvStorageClassFunction &&
spv::StorageClass::Function &&
"The pointer_type_id must refer to a defined pointer type with "
"storage class Function");
auto function = fuzzerutil::FindFunction(GetIRContext(), function_id);
@ -715,7 +717,7 @@ uint32_t FuzzerPass::FindOrCreateLocalVariable(
// First we try to find a suitable existing variable.
// All of the local variable declarations are located in the first block.
for (auto& instruction : *function->begin()) {
if (instruction.opcode() != SpvOpVariable) {
if (instruction.opcode() != spv::Op::OpVariable) {
continue;
}
// The existing OpVariable must have type |pointer_type_id|.
@ -749,15 +751,16 @@ uint32_t FuzzerPass::FindOrCreateGlobalVariable(
(void)pointer_type;
assert(
pointer_type && pointer_type->AsPointer() &&
(pointer_type->AsPointer()->storage_class() == SpvStorageClassPrivate ||
(pointer_type->AsPointer()->storage_class() ==
spv::StorageClass::Private ||
pointer_type->AsPointer()->storage_class() ==
SpvStorageClassWorkgroup) &&
spv::StorageClass::Workgroup) &&
"The pointer_type_id must refer to a defined pointer type with storage "
"class Private or Workgroup");
// First we try to find a suitable existing variable.
for (auto& instruction : GetIRContext()->module()->types_values()) {
if (instruction.opcode() != SpvOpVariable) {
if (instruction.opcode() != spv::Op::OpVariable) {
continue;
}
// The existing OpVariable must have type |pointer_type_id|.
@ -781,13 +784,13 @@ uint32_t FuzzerPass::FindOrCreateGlobalVariable(
uint32_t result_id = GetFuzzerContext()->GetFreshId();
// A variable with storage class Workgroup shouldn't have an initializer.
if (storage_class == SpvStorageClassWorkgroup) {
if (storage_class == spv::StorageClass::Workgroup) {
ApplyTransformation(TransformationAddGlobalVariable(
result_id, pointer_type_id, SpvStorageClassWorkgroup, 0,
result_id, pointer_type_id, spv::StorageClass::Workgroup, 0,
pointee_value_is_irrelevant));
} else {
ApplyTransformation(TransformationAddGlobalVariable(
result_id, pointer_type_id, SpvStorageClassPrivate,
result_id, pointer_type_id, spv::StorageClass::Private,
FindOrCreateZeroConstant(pointee_type_id, pointee_value_is_irrelevant),
pointee_value_is_irrelevant));
}

View File

@ -159,14 +159,14 @@ class FuzzerPass {
// already exist) and storage class |storage_class|. A transformation is
// applied to add the pointer if it does not already exist.
uint32_t FindOrCreatePointerType(uint32_t base_type_id,
SpvStorageClass storage_class);
spv::StorageClass storage_class);
// Returns the id of an OpTypePointer instruction, with a integer base
// type of width and signedness specified by |width| and |is_signed|,
// respectively. If the pointer type or required integer base type do not
// exist, transformations are applied to add them.
uint32_t FindOrCreatePointerToIntegerType(uint32_t width, bool is_signed,
SpvStorageClass storage_class);
spv::StorageClass storage_class);
// Returns the id of an OpConstant instruction, with a integer type of
// width and signedness specified by |width| and |is_signed|, respectively,
@ -239,7 +239,7 @@ class FuzzerPass {
// storage class, and the sequence will have multiple elements if there are
// repeated pointer declarations for the same basic type and storage class.
std::pair<std::vector<uint32_t>, std::map<uint32_t, std::vector<uint32_t>>>
GetAvailableBasicTypesAndPointers(SpvStorageClass storage_class) const;
GetAvailableBasicTypesAndPointers(spv::StorageClass storage_class) const;
// Given a type id, |scalar_or_composite_type_id|, which must correspond to
// some scalar or composite type, returns the result id of an instruction

View File

@ -34,15 +34,16 @@ void FuzzerPassAddAccessChains::Apply() {
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(inst_it->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
inst_it->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Check whether it is legitimate to insert an access chain
// instruction before this instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpAccessChain,
inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
spv::Op::OpAccessChain, inst_it)) {
return;
}
@ -64,8 +65,8 @@ void FuzzerPassAddAccessChains::Apply() {
return false;
}
switch (instruction->opcode()) {
case SpvOpConstantNull:
case SpvOpUndef:
case spv::Op::OpConstantNull:
case spv::Op::OpUndef:
// Do not allow making an access chain from a null or
// undefined pointer. (We can eliminate these cases
// before actually checking that the instruction is a
@ -78,7 +79,7 @@ void FuzzerPassAddAccessChains::Apply() {
// make an access chain from it.
return context->get_def_use_mgr()
->GetDef(instruction->type_id())
->opcode() == SpvOpTypePointer;
->opcode() == spv::Op::OpTypePointer;
});
// At this point, |relevant_instructions| contains all the pointers
@ -112,14 +113,14 @@ void FuzzerPassAddAccessChains::Apply() {
}
uint32_t bound;
switch (subobject_type->opcode()) {
case SpvOpTypeArray:
case spv::Op::OpTypeArray:
bound = fuzzerutil::GetArraySize(*subobject_type, GetIRContext());
break;
case SpvOpTypeMatrix:
case SpvOpTypeVector:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector:
bound = subobject_type->GetSingleWordInOperand(1);
break;
case SpvOpTypeStruct:
case spv::Op::OpTypeStruct:
bound = fuzzerutil::GetNumberOfStructMembers(*subobject_type);
break;
default:
@ -140,9 +141,9 @@ void FuzzerPassAddAccessChains::Apply() {
GetFuzzerContext()->GetRandomIndexForAccessChain(bound);
switch (subobject_type->opcode()) {
case SpvOpTypeArray:
case SpvOpTypeMatrix:
case SpvOpTypeVector: {
case spv::Op::OpTypeArray:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector: {
// The index will be clamped
bool is_signed = GetFuzzerContext()->ChooseEven();
@ -164,7 +165,7 @@ void FuzzerPassAddAccessChains::Apply() {
subobject_type_id = subobject_type->GetSingleWordInOperand(0);
} break;
case SpvOpTypeStruct:
case spv::Op::OpTypeStruct:
index_ids.push_back(FindOrCreateIntegerConstant(
{index_value}, 32, GetFuzzerContext()->ChooseEven(), false));
subobject_type_id =
@ -178,7 +179,7 @@ void FuzzerPassAddAccessChains::Apply() {
// pointer suitable for the access chain's result type exists, so we
// create one if it does not.
FindOrCreatePointerType(subobject_type_id,
static_cast<SpvStorageClass>(
static_cast<spv::StorageClass>(
pointer_type->GetSingleWordInOperand(0)));
// Apply the transformation to add an access chain.
ApplyTransformation(TransformationAccessChain(

View File

@ -53,8 +53,8 @@ void FuzzerPassAddCompositeExtract::Apply() {
opt::Function* /*unused*/, opt::BasicBlock* /*unused*/,
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpCompositeExtract,
inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
spv::Op::OpCompositeExtract, inst_it)) {
return;
}
@ -97,15 +97,15 @@ void FuzzerPassAddCompositeExtract::Apply() {
assert(type_inst && "Composite instruction has invalid type id");
switch (type_inst->opcode()) {
case SpvOpTypeArray:
case spv::Op::OpTypeArray:
number_of_members =
fuzzerutil::GetArraySize(*type_inst, GetIRContext());
break;
case SpvOpTypeVector:
case SpvOpTypeMatrix:
case spv::Op::OpTypeVector:
case spv::Op::OpTypeMatrix:
number_of_members = type_inst->GetSingleWordInOperand(1);
break;
case SpvOpTypeStruct:
case spv::Op::OpTypeStruct:
number_of_members = type_inst->NumInOperands();
break;
default:
@ -122,12 +122,12 @@ void FuzzerPassAddCompositeExtract::Apply() {
number_of_members));
switch (type_inst->opcode()) {
case SpvOpTypeArray:
case SpvOpTypeVector:
case SpvOpTypeMatrix:
case spv::Op::OpTypeArray:
case spv::Op::OpTypeVector:
case spv::Op::OpTypeMatrix:
type_id = type_inst->GetSingleWordInOperand(0);
break;
case SpvOpTypeStruct:
case spv::Op::OpTypeStruct:
type_id = type_inst->GetSingleWordInOperand(indices.back());
break;
default:

View File

@ -36,10 +36,11 @@ void FuzzerPassAddCompositeInserts::Apply() {
opt::BasicBlock::iterator instruction_iterator,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(instruction_iterator->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
instruction_iterator->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Randomly decide whether to try adding an OpCompositeInsert
// instruction.
@ -51,7 +52,7 @@ void FuzzerPassAddCompositeInserts::Apply() {
// It must be possible to insert an OpCompositeInsert instruction
// before |instruction_iterator|.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
SpvOpCompositeInsert, instruction_iterator)) {
spv::Op::OpCompositeInsert, instruction_iterator)) {
return;
}

View File

@ -114,15 +114,15 @@ uint32_t FuzzerPassAddCompositeTypes::ChooseScalarOrCompositeType() {
std::vector<uint32_t> candidates;
for (auto& inst : GetIRContext()->types_values()) {
switch (inst.opcode()) {
case SpvOpTypeArray:
case SpvOpTypeBool:
case SpvOpTypeFloat:
case SpvOpTypeInt:
case SpvOpTypeMatrix:
case SpvOpTypeVector:
case spv::Op::OpTypeArray:
case spv::Op::OpTypeBool:
case spv::Op::OpTypeFloat:
case spv::Op::OpTypeInt:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeVector:
candidates.push_back(inst.result_id());
break;
case SpvOpTypeStruct: {
case spv::Op::OpTypeStruct: {
if (!fuzzerutil::MembersHaveBuiltInDecoration(GetIRContext(),
inst.result_id()) &&
!fuzzerutil::HasBlockOrBufferBlockDecoration(GetIRContext(),

View File

@ -36,7 +36,7 @@ void FuzzerPassAddCopyMemory::Apply() {
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor) {
// Check that we can insert an OpCopyMemory before this instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpCopyMemory,
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpCopyMemory,
inst_it)) {
return;
}
@ -61,8 +61,8 @@ void FuzzerPassAddCopyMemory::Apply() {
// Decide whether to create global or local variable.
auto storage_class = GetFuzzerContext()->ChooseEven()
? SpvStorageClassPrivate
: SpvStorageClassFunction;
? spv::StorageClass::Private
: spv::StorageClass::Function;
auto pointee_type_id = fuzzerutil::GetPointeeTypeIdFromPointerType(
GetIRContext(), inst->type_id());

View File

@ -29,12 +29,14 @@ bool IsBitWidthSupported(opt::IRContext* ir_context, uint32_t bit_width) {
return true;
case 64:
return ir_context->get_feature_mgr()->HasCapability(
SpvCapabilityFloat64) &&
ir_context->get_feature_mgr()->HasCapability(SpvCapabilityInt64);
spv::Capability::Float64) &&
ir_context->get_feature_mgr()->HasCapability(
spv::Capability::Int64);
case 16:
return ir_context->get_feature_mgr()->HasCapability(
SpvCapabilityFloat16) &&
ir_context->get_feature_mgr()->HasCapability(SpvCapabilityInt16);
spv::Capability::Float16) &&
ir_context->get_feature_mgr()->HasCapability(
spv::Capability::Int16);
default:
return false;
}
@ -66,7 +68,8 @@ void FuzzerPassAddEquationInstructions::Apply() {
// as an example opcode for this check, to be representative of *some*
// opcode that defines an equation, even though we may choose a
// different opcode below.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpIAdd, inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpIAdd,
inst_it)) {
return;
}
@ -78,7 +81,7 @@ void FuzzerPassAddEquationInstructions::Apply() {
[this](opt::IRContext* /*unused*/,
opt::Instruction* instruction) -> bool {
return instruction->result_id() && instruction->type_id() &&
instruction->opcode() != SpvOpUndef &&
instruction->opcode() != spv::Op::OpUndef &&
!GetTransformationContext()
->GetFactManager()
->IdIsIrrelevant(instruction->result_id());
@ -86,15 +89,16 @@ void FuzzerPassAddEquationInstructions::Apply() {
// Try the opcodes for which we know how to make ids at random until
// something works.
std::vector<SpvOp> candidate_opcodes = {
SpvOpIAdd, SpvOpISub, SpvOpLogicalNot, SpvOpSNegate,
SpvOpConvertUToF, SpvOpConvertSToF, SpvOpBitcast};
std::vector<spv::Op> candidate_opcodes = {
spv::Op::OpIAdd, spv::Op::OpISub, spv::Op::OpLogicalNot,
spv::Op::OpSNegate, spv::Op::OpConvertUToF, spv::Op::OpConvertSToF,
spv::Op::OpBitcast};
do {
auto opcode =
GetFuzzerContext()->RemoveAtRandomIndex(&candidate_opcodes);
switch (opcode) {
case SpvOpConvertSToF:
case SpvOpConvertUToF: {
case spv::Op::OpConvertSToF:
case spv::Op::OpConvertUToF: {
std::vector<const opt::Instruction*> candidate_instructions;
for (const auto* inst :
GetIntegerInstructions(available_instructions)) {
@ -144,7 +148,7 @@ void FuzzerPassAddEquationInstructions::Apply() {
{operand->result_id()}, instruction_descriptor));
return;
}
case SpvOpBitcast: {
case spv::Op::OpBitcast: {
const auto candidate_instructions =
GetNumericalInstructions(available_instructions);
@ -197,8 +201,8 @@ void FuzzerPassAddEquationInstructions::Apply() {
return;
}
} break;
case SpvOpIAdd:
case SpvOpISub: {
case spv::Op::OpIAdd:
case spv::Op::OpISub: {
// Instructions of integer (scalar or vector) result type are
// suitable for these opcodes.
auto integer_instructions =
@ -251,7 +255,7 @@ void FuzzerPassAddEquationInstructions::Apply() {
}
break;
}
case SpvOpLogicalNot: {
case spv::Op::OpLogicalNot: {
// Choose any available instruction of boolean scalar/vector
// result type and equate its negation with a fresh id.
auto boolean_instructions =
@ -268,7 +272,7 @@ void FuzzerPassAddEquationInstructions::Apply() {
}
break;
}
case SpvOpSNegate: {
case spv::Op::OpSNegate: {
// Similar to OpLogicalNot, but for signed integer negation.
auto integer_instructions =
GetIntegerInstructions(available_instructions);

View File

@ -39,8 +39,8 @@ void FuzzerPassAddFunctionCalls::Apply() {
-> void {
// Check whether it is legitimate to insert a function call before the
// instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpFunctionCall,
inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
spv::Op::OpFunctionCall, inst_it)) {
return;
}
@ -112,8 +112,8 @@ std::vector<uint32_t> FuzzerPassAddFunctionCalls::ChooseFunctionCallArguments(
auto available_pointers = FindAvailableInstructions(
caller_function, caller_block, caller_inst_it,
[this, caller_block](opt::IRContext* /*unused*/, opt::Instruction* inst) {
if (inst->opcode() != SpvOpVariable ||
inst->opcode() != SpvOpFunctionParameter) {
if (inst->opcode() != spv::Op::OpVariable ||
inst->opcode() != spv::Op::OpFunctionParameter) {
// Function parameters and variables are the only
// kinds of pointer that can be used as actual
// parameters.
@ -172,15 +172,15 @@ std::vector<uint32_t> FuzzerPassAddFunctionCalls::ChooseFunctionCallArguments(
auto storage_class = param_type->AsPointer()->storage_class();
auto pointee_type_id = fuzzerutil::GetPointeeTypeIdFromPointerType(
GetIRContext(), param->type_id());
if (storage_class == SpvStorageClassFunction) {
if (storage_class == spv::StorageClass::Function) {
// Add a new zero-initialized local variable to the current
// function, noting that its pointee value is irrelevant.
ApplyTransformation(TransformationAddLocalVariable(
fresh_variable_id, param->type_id(), caller_function->result_id(),
FindOrCreateZeroConstant(pointee_type_id, false), true));
} else {
assert((storage_class == SpvStorageClassPrivate ||
storage_class == SpvStorageClassWorkgroup) &&
assert((storage_class == spv::StorageClass::Private ||
storage_class == spv::StorageClass::Workgroup) &&
"Only Function, Private and Workgroup storage classes are "
"supported at present.");
// Add a new global variable to the module, zero-initializing it if
@ -188,7 +188,7 @@ std::vector<uint32_t> FuzzerPassAddFunctionCalls::ChooseFunctionCallArguments(
// irrelevant.
ApplyTransformation(TransformationAddGlobalVariable(
fresh_variable_id, param->type_id(), storage_class,
storage_class == SpvStorageClassPrivate
storage_class == spv::StorageClass::Private
? FindOrCreateZeroConstant(pointee_type_id, false)
: 0,
true));

View File

@ -29,16 +29,17 @@ FuzzerPassAddGlobalVariables::FuzzerPassAddGlobalVariables(
transformations, ignore_inapplicable_transformations) {}
void FuzzerPassAddGlobalVariables::Apply() {
SpvStorageClass variable_storage_class = SpvStorageClassPrivate;
spv::StorageClass variable_storage_class = spv::StorageClass::Private;
for (auto& entry_point : GetIRContext()->module()->entry_points()) {
// If the execution model of some entry point is GLCompute,
// then the variable storage class may be Workgroup.
if (entry_point.GetSingleWordInOperand(0) == SpvExecutionModelGLCompute) {
if (spv::ExecutionModel(entry_point.GetSingleWordInOperand(0)) ==
spv::ExecutionModel::GLCompute) {
variable_storage_class =
GetFuzzerContext()->ChoosePercentage(
GetFuzzerContext()->GetChanceOfChoosingWorkgroupStorageClass())
? SpvStorageClassWorkgroup
: SpvStorageClassPrivate;
? spv::StorageClass::Workgroup
: spv::StorageClass::Private;
break;
}
}
@ -87,7 +88,7 @@ void FuzzerPassAddGlobalVariables::Apply() {
ApplyTransformation(TransformationAddGlobalVariable(
GetFuzzerContext()->GetFreshId(), pointer_type_id,
variable_storage_class,
variable_storage_class == SpvStorageClassPrivate
variable_storage_class == spv::StorageClass::Private
? FindOrCreateZeroConstant(basic_type, false)
: 0,
true));

View File

@ -34,10 +34,11 @@ void FuzzerPassAddLoads::Apply() {
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(inst_it->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
inst_it->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Randomly decide whether to try inserting a load here.
if (!GetFuzzerContext()->ChoosePercentage(
@ -47,10 +48,11 @@ void FuzzerPassAddLoads::Apply() {
// Check whether it is legitimate to insert a load or atomic load before
// this instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpLoad, inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpLoad,
inst_it)) {
return;
}
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpAtomicLoad,
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpAtomicLoad,
inst_it)) {
return;
}
@ -64,8 +66,8 @@ void FuzzerPassAddLoads::Apply() {
return false;
}
switch (instruction->opcode()) {
case SpvOpConstantNull:
case SpvOpUndef:
case spv::Op::OpConstantNull:
case spv::Op::OpUndef:
// Do not allow loading from a null or undefined pointer;
// this might be OK if the block is dead, but for now we
// conservatively avoid it.
@ -75,7 +77,7 @@ void FuzzerPassAddLoads::Apply() {
}
return context->get_def_use_mgr()
->GetDef(instruction->type_id())
->opcode() == SpvOpTypePointer;
->opcode() == spv::Op::OpTypePointer;
});
// At this point, |relevant_instructions| contains all the pointers
@ -92,25 +94,25 @@ void FuzzerPassAddLoads::Apply() {
uint32_t memory_scope_id = 0;
uint32_t memory_semantics_id = 0;
auto storage_class = static_cast<SpvStorageClass>(
auto storage_class = static_cast<spv::StorageClass>(
GetIRContext()
->get_def_use_mgr()
->GetDef(chosen_instruction->type_id())
->GetSingleWordInOperand(0));
switch (storage_class) {
case SpvStorageClassStorageBuffer:
case SpvStorageClassPhysicalStorageBuffer:
case SpvStorageClassWorkgroup:
case SpvStorageClassCrossWorkgroup:
case SpvStorageClassAtomicCounter:
case SpvStorageClassImage:
case spv::StorageClass::StorageBuffer:
case spv::StorageClass::PhysicalStorageBuffer:
case spv::StorageClass::Workgroup:
case spv::StorageClass::CrossWorkgroup:
case spv::StorageClass::AtomicCounter:
case spv::StorageClass::Image:
if (GetFuzzerContext()->ChoosePercentage(
GetFuzzerContext()->GetChanceOfAddingAtomicLoad())) {
is_atomic_load = true;
memory_scope_id = FindOrCreateConstant(
{SpvScopeInvocation},
{uint32_t(spv::Scope::Invocation)},
FindOrCreateIntegerType(32, GetFuzzerContext()->ChooseEven()),
false);

View File

@ -31,7 +31,7 @@ FuzzerPassAddLocalVariables::FuzzerPassAddLocalVariables(
void FuzzerPassAddLocalVariables::Apply() {
auto basic_type_ids_and_pointers =
GetAvailableBasicTypesAndPointers(SpvStorageClassFunction);
GetAvailableBasicTypesAndPointers(spv::StorageClass::Function);
// These are the basic types that are available to this fuzzer pass.
auto& basic_types = basic_type_ids_and_pointers.first;
@ -64,7 +64,7 @@ void FuzzerPassAddLocalVariables::Apply() {
// use it.
pointer_type = GetFuzzerContext()->GetFreshId();
ApplyTransformation(TransformationAddTypePointer(
pointer_type, SpvStorageClassFunction, basic_type));
pointer_type, spv::StorageClass::Function, basic_type));
available_pointers_to_basic_type.push_back(pointer_type);
} else {
// There is - grab one.

View File

@ -176,8 +176,8 @@ FuzzerPassAddOpPhiSynonyms::GetIdEquivalenceClasses() {
// - OpFunction does not yield a value;
// - OpUndef yields an undefined value at each use, so it should never be a
// synonym of another id.
if (pair.second->opcode() == SpvOpFunction ||
pair.second->opcode() == SpvOpUndef) {
if (pair.second->opcode() == spv::Op::OpFunction ||
pair.second->opcode() == spv::Op::OpUndef) {
continue;
}

View File

@ -79,7 +79,7 @@ void FuzzerPassAddParameters::Apply() {
auto storage_class = fuzzerutil::GetStorageClassFromPointerType(
GetIRContext(), current_type_id);
switch (storage_class) {
case SpvStorageClassFunction: {
case spv::StorageClass::Function: {
// In every caller find or create a local variable that has the
// selected type.
for (auto* instr :
@ -91,8 +91,8 @@ void FuzzerPassAddParameters::Apply() {
call_parameter_ids[instr->result_id()] = variable_id;
}
} break;
case SpvStorageClassPrivate:
case SpvStorageClassWorkgroup: {
case spv::StorageClass::Private:
case spv::StorageClass::Workgroup: {
// If there exists at least one caller, find or create a global
// variable that has the selected type.
std::vector<opt::Instruction*> callers =

View File

@ -34,10 +34,11 @@ void FuzzerPassAddStores::Apply() {
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(inst_it->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
inst_it->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Randomly decide whether to try inserting a store here.
if (!GetFuzzerContext()->ChoosePercentage(
@ -47,12 +48,12 @@ void FuzzerPassAddStores::Apply() {
// Check whether it is legitimate to insert a store before this
// instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpStore,
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpStore,
inst_it)) {
return;
}
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpAtomicStore,
inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
spv::Op::OpAtomicStore, inst_it)) {
return;
}
@ -67,7 +68,7 @@ void FuzzerPassAddStores::Apply() {
}
auto type_inst = context->get_def_use_mgr()->GetDef(
instruction->type_id());
if (type_inst->opcode() != SpvOpTypePointer) {
if (type_inst->opcode() != spv::Op::OpTypePointer) {
// Not a pointer.
return false;
}
@ -76,8 +77,8 @@ void FuzzerPassAddStores::Apply() {
return false;
}
switch (instruction->opcode()) {
case SpvOpConstantNull:
case SpvOpUndef:
case spv::Op::OpConstantNull:
case spv::Op::OpUndef:
// Do not allow storing to a null or undefined pointer;
// this might be OK if the block is dead, but for now we
// conservatively avoid it.
@ -126,24 +127,24 @@ void FuzzerPassAddStores::Apply() {
uint32_t memory_semantics_id = 0;
auto storage_class =
static_cast<SpvStorageClass>(GetIRContext()
->get_def_use_mgr()
->GetDef(pointer->type_id())
->GetSingleWordInOperand(0));
static_cast<spv::StorageClass>(GetIRContext()
->get_def_use_mgr()
->GetDef(pointer->type_id())
->GetSingleWordInOperand(0));
switch (storage_class) {
case SpvStorageClassStorageBuffer:
case SpvStorageClassPhysicalStorageBuffer:
case SpvStorageClassWorkgroup:
case SpvStorageClassCrossWorkgroup:
case SpvStorageClassAtomicCounter:
case SpvStorageClassImage:
case spv::StorageClass::StorageBuffer:
case spv::StorageClass::PhysicalStorageBuffer:
case spv::StorageClass::Workgroup:
case spv::StorageClass::CrossWorkgroup:
case spv::StorageClass::AtomicCounter:
case spv::StorageClass::Image:
if (GetFuzzerContext()->ChoosePercentage(
GetFuzzerContext()->GetChanceOfAddingAtomicStore())) {
is_atomic_store = true;
memory_scope_id = FindOrCreateConstant(
{SpvScopeInvocation},
{uint32_t(spv::Scope::Invocation)},
FindOrCreateIntegerType(32, GetFuzzerContext()->ChooseEven()),
false);

View File

@ -44,7 +44,8 @@ void FuzzerPassAddSynonyms::Apply() {
// Skip |inst_it| if we can't insert anything above it. OpIAdd is just
// a representative of some instruction that might be produced by the
// transformation.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpIAdd, inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpIAdd,
inst_it)) {
return;
}

View File

@ -35,10 +35,11 @@ void FuzzerPassAddVectorShuffleInstructions::Apply() {
opt::BasicBlock::iterator instruction_iterator,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(instruction_iterator->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
instruction_iterator->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
// Randomly decide whether to try adding an OpVectorShuffle instruction.
if (!GetFuzzerContext()->ChoosePercentage(
@ -49,7 +50,7 @@ void FuzzerPassAddVectorShuffleInstructions::Apply() {
// It must be valid to insert an OpVectorShuffle instruction
// before |instruction_iterator|.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
SpvOpVectorShuffle, instruction_iterator)) {
spv::Op::OpVectorShuffle, instruction_iterator)) {
return;
}

View File

@ -33,7 +33,7 @@ void FuzzerPassAdjustBranchWeights::Apply() {
// For all OpBranchConditional instructions,
// randomly applies the transformation.
GetIRContext()->module()->ForEachInst([this](opt::Instruction* instruction) {
if (instruction->opcode() == SpvOpBranchConditional &&
if (instruction->opcode() == spv::Op::OpBranchConditional &&
GetFuzzerContext()->ChoosePercentage(
GetFuzzerContext()->GetChanceOfAdjustingBranchWeights())) {
ApplyTransformation(TransformationAdjustBranchWeights(

View File

@ -40,21 +40,21 @@ void FuzzerPassAdjustFunctionControls::Apply() {
// For the new mask, we first randomly select one of three basic masks:
// None, Inline or DontInline. These are always valid (and are mutually
// exclusive).
std::vector<uint32_t> basic_function_control_masks = {
SpvFunctionControlMaskNone, SpvFunctionControlInlineMask,
SpvFunctionControlDontInlineMask};
std::vector<spv::FunctionControlMask> basic_function_control_masks = {
spv::FunctionControlMask::MaskNone, spv::FunctionControlMask::Inline,
spv::FunctionControlMask::DontInline};
uint32_t new_function_control_mask =
basic_function_control_masks[GetFuzzerContext()->RandomIndex(
basic_function_control_masks)];
uint32_t(basic_function_control_masks[GetFuzzerContext()->RandomIndex(
basic_function_control_masks)]);
// We now consider the Pure and Const mask bits. If these are already
// set on the function then it's OK to keep them, but also interesting
// to consider dropping them, so we decide randomly in each case.
for (auto mask_bit :
{SpvFunctionControlPureMask, SpvFunctionControlConstMask}) {
if ((existing_function_control_mask & mask_bit) &&
{spv::FunctionControlMask::Pure, spv::FunctionControlMask::Const}) {
if ((existing_function_control_mask & uint32_t(mask_bit)) &&
GetFuzzerContext()->ChooseEven()) {
new_function_control_mask |= mask_bit;
new_function_control_mask |= uint32_t(mask_bit);
}
}

View File

@ -34,7 +34,7 @@ void FuzzerPassAdjustLoopControls::Apply() {
for (auto& block : function) {
if (auto merge_inst = block.GetMergeInst()) {
// Ignore the instruction if it is not a loop merge.
if (merge_inst->opcode() != SpvOpLoopMerge) {
if (merge_inst->opcode() != spv::Op::OpLoopMerge) {
continue;
}
@ -48,9 +48,10 @@ void FuzzerPassAdjustLoopControls::Apply() {
TransformationSetLoopControl::kLoopControlMaskInOperandIndex);
// First, set the new mask to one of None, Unroll or DontUnroll.
std::vector<uint32_t> basic_masks = {SpvLoopControlMaskNone,
SpvLoopControlUnrollMask,
SpvLoopControlDontUnrollMask};
std::vector<uint32_t> basic_masks = {
uint32_t(spv::LoopControlMask::MaskNone),
uint32_t(spv::LoopControlMask::Unroll),
uint32_t(spv::LoopControlMask::DontUnroll)};
uint32_t new_mask =
basic_masks[GetFuzzerContext()->RandomIndex(basic_masks)];
@ -58,19 +59,20 @@ void FuzzerPassAdjustLoopControls::Apply() {
// does, check which of these were present in the existing mask and
// randomly decide whether to keep them. They are just hints, so
// removing them should not change the semantics of the module.
for (auto mask_bit :
{SpvLoopControlDependencyInfiniteMask,
SpvLoopControlDependencyLengthMask,
SpvLoopControlMinIterationsMask, SpvLoopControlMaxIterationsMask,
SpvLoopControlIterationMultipleMask}) {
if ((existing_mask & mask_bit) && GetFuzzerContext()->ChooseEven()) {
for (auto mask_bit : {spv::LoopControlMask::DependencyInfinite,
spv::LoopControlMask::DependencyLength,
spv::LoopControlMask::MinIterations,
spv::LoopControlMask::MaxIterations,
spv::LoopControlMask::IterationMultiple}) {
if ((existing_mask & uint32_t(mask_bit)) &&
GetFuzzerContext()->ChooseEven()) {
// The mask bits we are considering are not available in all SPIR-V
// versions. However, we only include a mask bit if it was present
// in the original loop control mask, and we work under the
// assumption that we are transforming a valid module, thus we don't
// need to actually check whether the SPIR-V version being used
// supports these loop control mask bits.
new_mask |= mask_bit;
new_mask |= uint32_t(mask_bit);
}
}
@ -81,14 +83,14 @@ void FuzzerPassAdjustLoopControls::Apply() {
// PeelCount and PartialCount are not compatible with DontUnroll, so
// we check whether DontUnroll is set.
if (!(new_mask & SpvLoopControlDontUnrollMask)) {
if (!(new_mask & uint32_t(spv::LoopControlMask::DontUnroll))) {
// If PeelCount is supported by this SPIR-V version, randomly choose
// whether to set it. If it was set in the original mask and is not
// selected for setting here, that amounts to dropping it.
if (TransformationSetLoopControl::PeelCountIsSupported(
GetIRContext()) &&
GetFuzzerContext()->ChooseEven()) {
new_mask |= SpvLoopControlPeelCountMask;
new_mask |= uint32_t(spv::LoopControlMask::PeelCount);
// The peel count is chosen randomly - if PeelCount was already set
// this will overwrite whatever peel count was previously used.
peel_count = GetFuzzerContext()->GetRandomLoopControlPeelCount();
@ -97,7 +99,7 @@ void FuzzerPassAdjustLoopControls::Apply() {
if (TransformationSetLoopControl::PartialCountIsSupported(
GetIRContext()) &&
GetFuzzerContext()->ChooseEven()) {
new_mask |= SpvLoopControlPartialCountMask;
new_mask |= uint32_t(spv::LoopControlMask::PartialCount);
partial_count =
GetFuzzerContext()->GetRandomLoopControlPartialCount();
}

View File

@ -47,8 +47,8 @@ void FuzzerPassAdjustMemoryOperandsMasks::Apply() {
// From SPIR-V 1.4 onwards, OpCopyMemory and OpCopyMemorySized have a
// second mask.
switch (inst_it->opcode()) {
case SpvOpCopyMemory:
case SpvOpCopyMemorySized:
case spv::Op::OpCopyMemory:
case spv::Op::OpCopyMemorySized:
if (TransformationSetMemoryOperandsMask::
MultipleMemoryOperandMasksAreSupported(GetIRContext())) {
indices_of_available_masks_to_adjust.push_back(1);
@ -75,24 +75,26 @@ void FuzzerPassAdjustMemoryOperandsMasks::Apply() {
existing_mask_in_operand_index < inst_it->NumInOperands()
? inst_it->GetSingleWordInOperand(
existing_mask_in_operand_index)
: static_cast<uint32_t>(SpvMemoryAccessMaskNone);
: static_cast<uint32_t>(spv::MemoryAccessMask::MaskNone);
// There are two things we can do to a mask:
// - add Volatile if not already present
// - toggle Nontemporal
// The following ensures that we do at least one of these
bool add_volatile = !(existing_mask & SpvMemoryAccessVolatileMask) &&
GetFuzzerContext()->ChooseEven();
bool add_volatile =
!(existing_mask & uint32_t(spv::MemoryAccessMask::Volatile)) &&
GetFuzzerContext()->ChooseEven();
bool toggle_nontemporal =
!add_volatile || GetFuzzerContext()->ChooseEven();
// These bitwise operations use '|' to add Volatile if desired, and
// '^' to toggle Nontemporal if desired.
uint32_t new_mask =
(existing_mask | (add_volatile ? SpvMemoryAccessVolatileMask
: SpvMemoryAccessMaskNone)) ^
(toggle_nontemporal ? SpvMemoryAccessNontemporalMask
: SpvMemoryAccessMaskNone);
(existing_mask |
(add_volatile ? uint32_t(spv::MemoryAccessMask::Volatile)
: uint32_t(spv::MemoryAccessMask::MaskNone))) ^
(toggle_nontemporal ? uint32_t(spv::MemoryAccessMask::Nontemporal)
: uint32_t(spv::MemoryAccessMask::MaskNone));
TransformationSetMemoryOperandsMask transformation(
MakeInstructionDescriptor(block, inst_it), new_mask, mask_index);

View File

@ -34,7 +34,7 @@ void FuzzerPassAdjustSelectionControls::Apply() {
for (auto& block : function) {
if (auto merge_inst = block.GetMergeInst()) {
// Ignore the instruction if it is not a selection merge.
if (merge_inst->opcode() != SpvOpSelectionMerge) {
if (merge_inst->opcode() != spv::Op::OpSelectionMerge) {
continue;
}
@ -48,13 +48,14 @@ void FuzzerPassAdjustSelectionControls::Apply() {
// The choices to change the selection control to are the set of valid
// controls, minus the current control.
std::vector<uint32_t> choices;
for (auto control :
{SpvSelectionControlMaskNone, SpvSelectionControlFlattenMask,
SpvSelectionControlDontFlattenMask}) {
if (control == merge_inst->GetSingleWordOperand(1)) {
for (auto control : {spv::SelectionControlMask::MaskNone,
spv::SelectionControlMask::Flatten,
spv::SelectionControlMask::DontFlatten}) {
if (control ==
spv::SelectionControlMask(merge_inst->GetSingleWordOperand(1))) {
continue;
}
choices.push_back(control);
choices.push_back(uint32_t(control));
}
// Apply the transformation and add it to the output transformation

View File

@ -107,9 +107,9 @@ void FuzzerPassApplyIdSynonyms::Apply() {
// which case we need to be able to add an extract instruction to get
// that element out.
if (synonym_to_try->index_size() > 0 &&
!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpCompositeExtract,
use_inst) &&
use_inst->opcode() != SpvOpPhi) {
!fuzzerutil::CanInsertOpcodeBeforeInstruction(
spv::Op::OpCompositeExtract, use_inst) &&
use_inst->opcode() != spv::Op::OpPhi) {
// We cannot insert an extract before this instruction, so this
// synonym is no good.
continue;
@ -132,7 +132,7 @@ void FuzzerPassApplyIdSynonyms::Apply() {
id_with_which_to_replace_use = GetFuzzerContext()->GetFreshId();
opt::Instruction* instruction_to_insert_before = nullptr;
if (use_inst->opcode() != SpvOpPhi) {
if (use_inst->opcode() != spv::Op::OpPhi) {
instruction_to_insert_before = use_inst;
} else {
auto parent_block_id =
@ -182,7 +182,7 @@ void FuzzerPassApplyIdSynonyms::Apply() {
}
bool FuzzerPassApplyIdSynonyms::DataDescriptorsHaveCompatibleTypes(
SpvOp opcode, uint32_t use_in_operand_index,
spv::Op opcode, uint32_t use_in_operand_index,
const protobufs::DataDescriptor& dd1,
const protobufs::DataDescriptor& dd2) {
auto base_object_type_id_1 =

View File

@ -38,7 +38,7 @@ class FuzzerPassApplyIdSynonyms : public FuzzerPass {
// with respect to the type. Concretely, returns true if |dd1| and |dd2| have
// the same type or both |dd1| and |dd2| are either a numerical or a vector
// type of integral components with possibly different signedness.
bool DataDescriptorsHaveCompatibleTypes(SpvOp opcode,
bool DataDescriptorsHaveCompatibleTypes(spv::Op opcode,
uint32_t use_in_operand_index,
const protobufs::DataDescriptor& dd1,
const protobufs::DataDescriptor& dd2);

View File

@ -81,7 +81,7 @@ void FuzzerPassConstructComposites::Apply() {
// Check whether it is legitimate to insert a composite construction
// before the instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(
SpvOpCompositeConstruct, inst_it)) {
spv::Op::OpCompositeConstruct, inst_it)) {
return;
}
@ -121,19 +121,19 @@ void FuzzerPassConstructComposites::Apply() {
auto composite_type_inst =
GetIRContext()->get_def_use_mgr()->GetDef(chosen_composite_type);
switch (composite_type_inst->opcode()) {
case SpvOpTypeArray:
case spv::Op::OpTypeArray:
constructor_arguments = FindComponentsToConstructArray(
*composite_type_inst, type_id_to_available_instructions);
break;
case SpvOpTypeMatrix:
case spv::Op::OpTypeMatrix:
constructor_arguments = FindComponentsToConstructMatrix(
*composite_type_inst, type_id_to_available_instructions);
break;
case SpvOpTypeStruct:
case spv::Op::OpTypeStruct:
constructor_arguments = FindComponentsToConstructStruct(
*composite_type_inst, type_id_to_available_instructions);
break;
case SpvOpTypeVector:
case spv::Op::OpTypeVector:
constructor_arguments = FindComponentsToConstructVector(
*composite_type_inst, type_id_to_available_instructions);
break;
@ -156,7 +156,7 @@ std::vector<uint32_t>
FuzzerPassConstructComposites::FindComponentsToConstructArray(
const opt::Instruction& array_type_instruction,
const TypeIdToInstructions& type_id_to_available_instructions) {
assert(array_type_instruction.opcode() == SpvOpTypeArray &&
assert(array_type_instruction.opcode() == spv::Op::OpTypeArray &&
"Precondition: instruction must be an array type.");
// Get the element type for the array.
@ -191,7 +191,7 @@ std::vector<uint32_t>
FuzzerPassConstructComposites::FindComponentsToConstructMatrix(
const opt::Instruction& matrix_type_instruction,
const TypeIdToInstructions& type_id_to_available_instructions) {
assert(matrix_type_instruction.opcode() == SpvOpTypeMatrix &&
assert(matrix_type_instruction.opcode() == spv::Op::OpTypeMatrix &&
"Precondition: instruction must be a matrix type.");
// Get the element type for the matrix.
@ -221,7 +221,7 @@ std::vector<uint32_t>
FuzzerPassConstructComposites::FindComponentsToConstructStruct(
const opt::Instruction& struct_type_instruction,
const TypeIdToInstructions& type_id_to_available_instructions) {
assert(struct_type_instruction.opcode() == SpvOpTypeStruct &&
assert(struct_type_instruction.opcode() == spv::Op::OpTypeStruct &&
"Precondition: instruction must be a struct type.");
std::vector<uint32_t> result;
// Consider the type of each field of the struct.
@ -251,7 +251,7 @@ std::vector<uint32_t>
FuzzerPassConstructComposites::FindComponentsToConstructVector(
const opt::Instruction& vector_type_instruction,
const TypeIdToInstructions& type_id_to_available_instructions) {
assert(vector_type_instruction.opcode() == SpvOpTypeVector &&
assert(vector_type_instruction.opcode() == spv::Op::OpTypeVector &&
"Precondition: instruction must be a vector type.");
// Get details of the type underlying the vector, and the width of the vector,

View File

@ -35,10 +35,11 @@ void FuzzerPassCopyObjects::Apply() {
opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor)
-> void {
assert(inst_it->opcode() ==
instruction_descriptor.target_instruction_opcode() &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
assert(
inst_it->opcode() ==
spv::Op(instruction_descriptor.target_instruction_opcode()) &&
"The opcode of the instruction we might insert before must be "
"the same as the opcode in the descriptor for the instruction");
if (GetTransformationContext()->GetFactManager()->BlockIsDead(
block->id())) {
@ -48,7 +49,7 @@ void FuzzerPassCopyObjects::Apply() {
// Check whether it is legitimate to insert a copy before this
// instruction.
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpCopyObject,
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpCopyObject,
inst_it)) {
return;
}

View File

@ -88,7 +88,7 @@ void FuzzerPassDonateModules::DonateSingleModule(
// module.
for (const auto& capability_inst : donor_ir_context->capabilities()) {
auto capability =
static_cast<SpvCapability>(capability_inst.GetSingleWordInOperand(0));
static_cast<spv::Capability>(capability_inst.GetSingleWordInOperand(0));
if (!GetIRContext()->get_feature_mgr()->HasCapability(capability)) {
return;
}
@ -122,27 +122,27 @@ void FuzzerPassDonateModules::DonateSingleModule(
// kinds of decoration.
}
SpvStorageClass FuzzerPassDonateModules::AdaptStorageClass(
SpvStorageClass donor_storage_class) {
spv::StorageClass FuzzerPassDonateModules::AdaptStorageClass(
spv::StorageClass donor_storage_class) {
switch (donor_storage_class) {
case SpvStorageClassFunction:
case SpvStorageClassPrivate:
case SpvStorageClassWorkgroup:
case spv::StorageClass::Function:
case spv::StorageClass::Private:
case spv::StorageClass::Workgroup:
// We leave these alone
return donor_storage_class;
case SpvStorageClassInput:
case SpvStorageClassOutput:
case SpvStorageClassUniform:
case SpvStorageClassUniformConstant:
case SpvStorageClassPushConstant:
case SpvStorageClassImage:
case SpvStorageClassStorageBuffer:
case spv::StorageClass::Input:
case spv::StorageClass::Output:
case spv::StorageClass::Uniform:
case spv::StorageClass::UniformConstant:
case spv::StorageClass::PushConstant:
case spv::StorageClass::Image:
case spv::StorageClass::StorageBuffer:
// We change these to Private
return SpvStorageClassPrivate;
return spv::StorageClass::Private;
default:
// Handle other cases on demand.
assert(false && "Currently unsupported storage class.");
return SpvStorageClassMax;
return spv::StorageClass::Max;
}
}
@ -200,14 +200,14 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
// that its component types will have been considered previously, and that
// |original_id_to_donated_id| will already contain an entry for them.
switch (type_or_value.opcode()) {
case SpvOpTypeImage:
case SpvOpTypeSampledImage:
case SpvOpTypeSampler:
case spv::Op::OpTypeImage:
case spv::Op::OpTypeSampledImage:
case spv::Op::OpTypeSampler:
// We do not donate types and variables that relate to images and
// samplers, so we skip these types and subsequently skip anything that
// depends on them.
return;
case SpvOpTypeVoid: {
case spv::Op::OpTypeVoid: {
// Void has to exist already in order for us to have an entry point.
// Get the existing id of void.
opt::analysis::Void void_type;
@ -216,7 +216,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
"The module being transformed will always have 'void' type "
"declared.");
} break;
case SpvOpTypeBool: {
case spv::Op::OpTypeBool: {
// Bool cannot be declared multiple times, so use its existing id if
// present, or add a declaration of Bool with a fresh id if not.
opt::analysis::Bool bool_type;
@ -228,7 +228,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
ApplyTransformation(TransformationAddTypeBoolean(new_result_id));
}
} break;
case SpvOpTypeInt: {
case spv::Op::OpTypeInt: {
// Int cannot be declared multiple times with the same width and
// signedness, so check whether an existing identical Int type is
// present and use its id if so. Otherwise add a declaration of the
@ -246,8 +246,8 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
TransformationAddTypeInt(new_result_id, width, is_signed));
}
} break;
case SpvOpTypeFloat: {
// Similar to SpvOpTypeInt.
case spv::Op::OpTypeFloat: {
// Similar to spv::Op::OpTypeInt.
const uint32_t width = type_or_value.GetSingleWordInOperand(0);
opt::analysis::Float float_type(width);
auto float_type_id = GetIRContext()->get_type_mgr()->GetId(&float_type);
@ -258,7 +258,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
ApplyTransformation(TransformationAddTypeFloat(new_result_id, width));
}
} break;
case SpvOpTypeVector: {
case spv::Op::OpTypeVector: {
// It is not legal to have two Vector type declarations with identical
// element types and element counts, so check whether an existing
// identical Vector type is present and use its id if so. Otherwise add
@ -282,8 +282,8 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
new_result_id, component_type_id, component_count));
}
} break;
case SpvOpTypeMatrix: {
// Similar to SpvOpTypeVector.
case spv::Op::OpTypeMatrix: {
// Similar to spv::Op::OpTypeVector.
uint32_t column_type_id = original_id_to_donated_id->at(
type_or_value.GetSingleWordInOperand(0));
auto column_type =
@ -302,7 +302,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
}
} break;
case SpvOpTypeArray: {
case spv::Op::OpTypeArray: {
// It is OK to have multiple structurally identical array types, so
// we go ahead and add a remapped version of the type declared by the
// donor.
@ -318,7 +318,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
original_id_to_donated_id->at(
type_or_value.GetSingleWordInOperand(1))));
} break;
case SpvOpTypeRuntimeArray: {
case spv::Op::OpTypeRuntimeArray: {
// A runtime array is allowed as the final member of an SSBO. During
// donation we turn runtime arrays into fixed-size arrays. For dead
// code donations this is OK because the array is never indexed into at
@ -341,8 +341,8 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
{GetFuzzerContext()->GetRandomSizeForNewArray()}, 32, false,
false)));
} break;
case SpvOpTypeStruct: {
// Similar to SpvOpTypeArray.
case spv::Op::OpTypeStruct: {
// Similar to spv::Op::OpTypeArray.
std::vector<uint32_t> member_type_ids;
for (uint32_t i = 0; i < type_or_value.NumInOperands(); i++) {
auto component_type_id = type_or_value.GetSingleWordInOperand(i);
@ -358,8 +358,8 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
ApplyTransformation(
TransformationAddTypeStruct(new_result_id, member_type_ids));
} break;
case SpvOpTypePointer: {
// Similar to SpvOpTypeArray.
case spv::Op::OpTypePointer: {
// Similar to spv::Op::OpTypeArray.
uint32_t pointee_type_id = type_or_value.GetSingleWordInOperand(1);
if (!original_id_to_donated_id->count(pointee_type_id)) {
// We did not donate the pointee type for this pointer type, so we
@ -369,11 +369,11 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
new_result_id = GetFuzzerContext()->GetFreshId();
ApplyTransformation(TransformationAddTypePointer(
new_result_id,
AdaptStorageClass(static_cast<SpvStorageClass>(
AdaptStorageClass(static_cast<spv::StorageClass>(
type_or_value.GetSingleWordInOperand(0))),
original_id_to_donated_id->at(pointee_type_id)));
} break;
case SpvOpTypeFunction: {
case spv::Op::OpTypeFunction: {
// It is not OK to have multiple function types that use identical ids
// for their return and parameter types. We thus go through all
// existing function types to look for a match. We do not use the
@ -425,10 +425,11 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
argument_type_ids));
}
} break;
case SpvOpSpecConstantOp: {
case spv::Op::OpSpecConstantOp: {
new_result_id = GetFuzzerContext()->GetFreshId();
auto type_id = original_id_to_donated_id->at(type_or_value.type_id());
auto opcode = static_cast<SpvOp>(type_or_value.GetSingleWordInOperand(0));
auto opcode =
static_cast<spv::Op>(type_or_value.GetSingleWordInOperand(0));
// Make sure we take into account |original_id_to_donated_id| when
// computing operands for OpSpecConstantOp.
@ -447,20 +448,20 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
ApplyTransformation(TransformationAddSpecConstantOp(
new_result_id, type_id, opcode, std::move(operands)));
} break;
case SpvOpSpecConstantTrue:
case SpvOpSpecConstantFalse:
case SpvOpConstantTrue:
case SpvOpConstantFalse: {
case spv::Op::OpSpecConstantTrue:
case spv::Op::OpSpecConstantFalse:
case spv::Op::OpConstantTrue:
case spv::Op::OpConstantFalse: {
// It is OK to have duplicate definitions of True and False, so add
// these to the module, using a remapped Bool type.
new_result_id = GetFuzzerContext()->GetFreshId();
auto value = type_or_value.opcode() == SpvOpConstantTrue ||
type_or_value.opcode() == SpvOpSpecConstantTrue;
auto value = type_or_value.opcode() == spv::Op::OpConstantTrue ||
type_or_value.opcode() == spv::Op::OpSpecConstantTrue;
ApplyTransformation(
TransformationAddConstantBoolean(new_result_id, value, false));
} break;
case SpvOpSpecConstant:
case SpvOpConstant: {
case spv::Op::OpSpecConstant:
case spv::Op::OpConstant: {
// It is OK to have duplicate constant definitions, so add this to the
// module using a remapped result type.
new_result_id = GetFuzzerContext()->GetFreshId();
@ -472,8 +473,8 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
new_result_id, original_id_to_donated_id->at(type_or_value.type_id()),
data_words, false));
} break;
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
case spv::Op::OpSpecConstantComposite:
case spv::Op::OpConstantComposite: {
assert(original_id_to_donated_id->count(type_or_value.type_id()) &&
"Composite types for which it is possible to create a constant "
"should have been donated.");
@ -495,7 +496,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
new_result_id, original_id_to_donated_id->at(type_or_value.type_id()),
constituent_ids, false));
} break;
case SpvOpConstantNull: {
case spv::Op::OpConstantNull: {
if (!original_id_to_donated_id->count(type_or_value.type_id())) {
// We did not donate the type associated with this null constant, so
// we cannot donate the null constant.
@ -509,7 +510,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
new_result_id,
original_id_to_donated_id->at(type_or_value.type_id())));
} break;
case SpvOpVariable: {
case spv::Op::OpVariable: {
if (!original_id_to_donated_id->count(type_or_value.type_id())) {
// We did not donate the pointer type associated with this variable,
// so we cannot donate the variable.
@ -536,11 +537,11 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
uint32_t remapped_pointer_type =
original_id_to_donated_id->at(type_or_value.type_id());
uint32_t initializer_id;
SpvStorageClass storage_class =
static_cast<SpvStorageClass>(type_or_value.GetSingleWordInOperand(
0)) == SpvStorageClassWorkgroup
? SpvStorageClassWorkgroup
: SpvStorageClassPrivate;
spv::StorageClass storage_class =
static_cast<spv::StorageClass>(type_or_value.GetSingleWordInOperand(
0)) == spv::StorageClass::Workgroup
? spv::StorageClass::Workgroup
: spv::StorageClass::Private;
if (type_or_value.NumInOperands() == 1) {
// The variable did not have an initializer. Initialize it to zero
// if it has Private storage class (to limit problems associated with
@ -551,7 +552,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
// could initialize Workgroup variables at the start of an entry
// point, and should do so if their uninitialized nature proves
// problematic.
initializer_id = storage_class == SpvStorageClassWorkgroup
initializer_id = storage_class == spv::StorageClass::Workgroup
? 0
: FindOrCreateZeroConstant(
fuzzerutil::GetPointeeTypeIdFromPointerType(
@ -566,7 +567,7 @@ void FuzzerPassDonateModules::HandleTypeOrValue(
TransformationAddGlobalVariable(new_result_id, remapped_pointer_type,
storage_class, initializer_id, true));
} break;
case SpvOpUndef: {
case spv::Op::OpUndef: {
if (!original_id_to_donated_id->count(type_or_value.type_id())) {
// We did not donate the type associated with this undef, so we cannot
// donate the undef.
@ -638,7 +639,7 @@ void FuzzerPassDonateModules::HandleFunctions(
[this, &donated_instructions, donor_ir_context,
&original_id_to_donated_id,
&skipped_instructions](const opt::Instruction* instruction) {
if (instruction->opcode() == SpvOpArrayLength) {
if (instruction->opcode() == spv::Op::OpArrayLength) {
// We treat OpArrayLength specially.
HandleOpArrayLength(*instruction, original_id_to_donated_id,
&donated_instructions);
@ -682,70 +683,70 @@ bool FuzzerPassDonateModules::CanDonateInstruction(
// Now consider instructions we specifically want to skip because we do not
// yet support them.
switch (instruction.opcode()) {
case SpvOpAtomicLoad:
case SpvOpAtomicStore:
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
case SpvOpAtomicUMax:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
case spv::Op::OpAtomicLoad:
case spv::Op::OpAtomicStore:
case spv::Op::OpAtomicExchange:
case spv::Op::OpAtomicCompareExchange:
case spv::Op::OpAtomicCompareExchangeWeak:
case spv::Op::OpAtomicIIncrement:
case spv::Op::OpAtomicIDecrement:
case spv::Op::OpAtomicIAdd:
case spv::Op::OpAtomicISub:
case spv::Op::OpAtomicSMin:
case spv::Op::OpAtomicUMin:
case spv::Op::OpAtomicSMax:
case spv::Op::OpAtomicUMax:
case spv::Op::OpAtomicAnd:
case spv::Op::OpAtomicOr:
case spv::Op::OpAtomicXor:
// We conservatively ignore all atomic instructions at present.
// TODO(https://github.com/KhronosGroup/SPIRV-Tools/issues/3276): Consider
// being less conservative here.
case SpvOpImageSampleImplicitLod:
case SpvOpImageSampleExplicitLod:
case SpvOpImageSampleDrefImplicitLod:
case SpvOpImageSampleDrefExplicitLod:
case SpvOpImageSampleProjImplicitLod:
case SpvOpImageSampleProjExplicitLod:
case SpvOpImageSampleProjDrefImplicitLod:
case SpvOpImageSampleProjDrefExplicitLod:
case SpvOpImageFetch:
case SpvOpImageGather:
case SpvOpImageDrefGather:
case SpvOpImageRead:
case SpvOpImageWrite:
case SpvOpImageSparseSampleImplicitLod:
case SpvOpImageSparseSampleExplicitLod:
case SpvOpImageSparseSampleDrefImplicitLod:
case SpvOpImageSparseSampleDrefExplicitLod:
case SpvOpImageSparseSampleProjImplicitLod:
case SpvOpImageSparseSampleProjExplicitLod:
case SpvOpImageSparseSampleProjDrefImplicitLod:
case SpvOpImageSparseSampleProjDrefExplicitLod:
case SpvOpImageSparseFetch:
case SpvOpImageSparseGather:
case SpvOpImageSparseDrefGather:
case SpvOpImageSparseRead:
case SpvOpImageSampleFootprintNV:
case SpvOpImage:
case SpvOpImageQueryFormat:
case SpvOpImageQueryLevels:
case SpvOpImageQueryLod:
case SpvOpImageQueryOrder:
case SpvOpImageQuerySamples:
case SpvOpImageQuerySize:
case SpvOpImageQuerySizeLod:
case SpvOpSampledImage:
case spv::Op::OpImageSampleImplicitLod:
case spv::Op::OpImageSampleExplicitLod:
case spv::Op::OpImageSampleDrefImplicitLod:
case spv::Op::OpImageSampleDrefExplicitLod:
case spv::Op::OpImageSampleProjImplicitLod:
case spv::Op::OpImageSampleProjExplicitLod:
case spv::Op::OpImageSampleProjDrefImplicitLod:
case spv::Op::OpImageSampleProjDrefExplicitLod:
case spv::Op::OpImageFetch:
case spv::Op::OpImageGather:
case spv::Op::OpImageDrefGather:
case spv::Op::OpImageRead:
case spv::Op::OpImageWrite:
case spv::Op::OpImageSparseSampleImplicitLod:
case spv::Op::OpImageSparseSampleExplicitLod:
case spv::Op::OpImageSparseSampleDrefImplicitLod:
case spv::Op::OpImageSparseSampleDrefExplicitLod:
case spv::Op::OpImageSparseSampleProjImplicitLod:
case spv::Op::OpImageSparseSampleProjExplicitLod:
case spv::Op::OpImageSparseSampleProjDrefImplicitLod:
case spv::Op::OpImageSparseSampleProjDrefExplicitLod:
case spv::Op::OpImageSparseFetch:
case spv::Op::OpImageSparseGather:
case spv::Op::OpImageSparseDrefGather:
case spv::Op::OpImageSparseRead:
case spv::Op::OpImageSampleFootprintNV:
case spv::Op::OpImage:
case spv::Op::OpImageQueryFormat:
case spv::Op::OpImageQueryLevels:
case spv::Op::OpImageQueryLod:
case spv::Op::OpImageQueryOrder:
case spv::Op::OpImageQuerySamples:
case spv::Op::OpImageQuerySize:
case spv::Op::OpImageQuerySizeLod:
case spv::Op::OpSampledImage:
// We ignore all instructions related to accessing images, since we do not
// donate images.
return false;
case SpvOpLoad:
case spv::Op::OpLoad:
switch (donor_ir_context->get_def_use_mgr()
->GetDef(instruction.type_id())
->opcode()) {
case SpvOpTypeImage:
case SpvOpTypeSampledImage:
case SpvOpTypeSampler:
case spv::Op::OpTypeImage:
case spv::Op::OpTypeSampledImage:
case spv::Op::OpTypeSampler:
// Again, we ignore instructions that relate to accessing images.
return false;
default:
@ -783,13 +784,13 @@ bool FuzzerPassDonateModules::CanDonateInstruction(
bool FuzzerPassDonateModules::IsBasicType(
const opt::Instruction& instruction) const {
switch (instruction.opcode()) {
case SpvOpTypeArray:
case SpvOpTypeBool:
case SpvOpTypeFloat:
case SpvOpTypeInt:
case SpvOpTypeMatrix:
case SpvOpTypeStruct:
case SpvOpTypeVector:
case spv::Op::OpTypeArray:
case spv::Op::OpTypeBool:
case spv::Op::OpTypeFloat:
case spv::Op::OpTypeInt:
case spv::Op::OpTypeMatrix:
case spv::Op::OpTypeStruct:
case spv::Op::OpTypeVector:
return true;
default:
return false;
@ -800,7 +801,7 @@ void FuzzerPassDonateModules::HandleOpArrayLength(
const opt::Instruction& instruction,
std::map<uint32_t, uint32_t>* original_id_to_donated_id,
std::vector<protobufs::Instruction>* donated_instructions) const {
assert(instruction.opcode() == SpvOpArrayLength &&
assert(instruction.opcode() == spv::Op::OpArrayLength &&
"Precondition: instruction must be OpArrayLength.");
uint32_t donated_variable_id =
original_id_to_donated_id->at(instruction.GetSingleWordInOperand(0));
@ -809,12 +810,12 @@ void FuzzerPassDonateModules::HandleOpArrayLength(
auto pointer_to_struct_instruction =
GetIRContext()->get_def_use_mgr()->GetDef(
donated_variable_instruction->type_id());
assert(pointer_to_struct_instruction->opcode() == SpvOpTypePointer &&
assert(pointer_to_struct_instruction->opcode() == spv::Op::OpTypePointer &&
"Type of variable must be pointer.");
auto donated_struct_type_instruction =
GetIRContext()->get_def_use_mgr()->GetDef(
pointer_to_struct_instruction->GetSingleWordInOperand(1));
assert(donated_struct_type_instruction->opcode() == SpvOpTypeStruct &&
assert(donated_struct_type_instruction->opcode() == spv::Op::OpTypeStruct &&
"Pointee type of pointer used by OpArrayLength must be struct.");
assert(donated_struct_type_instruction->NumInOperands() ==
instruction.GetSingleWordInOperand(1) + 1 &&
@ -825,7 +826,7 @@ void FuzzerPassDonateModules::HandleOpArrayLength(
donated_struct_type_instruction->NumInOperands() - 1);
auto fixed_size_array_type_instruction =
GetIRContext()->get_def_use_mgr()->GetDef(fixed_size_array_type_id);
assert(fixed_size_array_type_instruction->opcode() == SpvOpTypeArray &&
assert(fixed_size_array_type_instruction->opcode() == spv::Op::OpTypeArray &&
"The donated array type must be fixed-size.");
auto array_size_id =
fixed_size_array_type_instruction->GetSingleWordInOperand(1);
@ -837,7 +838,8 @@ void FuzzerPassDonateModules::HandleOpArrayLength(
}
donated_instructions->push_back(MakeInstructionMessage(
SpvOpCopyObject, original_id_to_donated_id->at(instruction.type_id()),
spv::Op::OpCopyObject,
original_id_to_donated_id->at(instruction.type_id()),
original_id_to_donated_id->at(instruction.result_id()),
opt::Instruction::OperandList({{SPV_OPERAND_TYPE_ID, {array_size_id}}})));
}
@ -892,7 +894,7 @@ void FuzzerPassDonateModules::HandleDifficultInstruction(
// more interesting value later.
auto zero_constant = FindOrCreateZeroConstant(remapped_type_id, true);
donated_instructions->push_back(MakeInstructionMessage(
SpvOpCopyObject, remapped_type_id,
spv::Op::OpCopyObject, remapped_type_id,
original_id_to_donated_id->at(instruction.result_id()),
opt::Instruction::OperandList({{SPV_OPERAND_TYPE_ID, {zero_constant}}})));
}
@ -926,8 +928,8 @@ void FuzzerPassDonateModules::PrepareInstructionForDonation(
(void)(donor_ir_context);
assert((donor_ir_context->get_def_use_mgr()
->GetDef(operand_id)
->opcode() == SpvOpLabel ||
instruction.opcode() == SpvOpPhi) &&
->opcode() == spv::Op::OpLabel ||
instruction.opcode() == spv::Op::OpPhi) &&
"Unsupported forward reference.");
original_id_to_donated_id->insert(
{operand_id, GetFuzzerContext()->GetFreshId()});
@ -942,7 +944,7 @@ void FuzzerPassDonateModules::PrepareInstructionForDonation(
input_operands.push_back({in_operand.type, operand_data});
}
if (instruction.opcode() == SpvOpVariable &&
if (instruction.opcode() == spv::Op::OpVariable &&
instruction.NumInOperands() == 1) {
// This is an uninitialized local variable. Initialize it to zero.
input_operands.push_back(
@ -1017,7 +1019,7 @@ bool FuzzerPassDonateModules::CreateLoopLimiterInfo(
// Adjust OpPhi instructions in the |merge_block|.
for (const auto& inst : *merge_block) {
if (inst.opcode() != SpvOpPhi) {
if (inst.opcode() != spv::Op::OpPhi) {
break;
}
@ -1070,7 +1072,8 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
// live-safe. Add them if not already present.
FindOrCreateBoolType(); // Needed for comparisons
FindOrCreatePointerToIntegerType(
32, false, SpvStorageClassFunction); // Needed for adding loop limiters
32, false,
spv::StorageClass::Function); // Needed for adding loop limiters
FindOrCreateIntegerConstant({0}, 32, false,
false); // Needed for initializing loop limiters
FindOrCreateIntegerConstant({1}, 32, false,
@ -1107,8 +1110,8 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
for (auto& block : function_to_donate) {
for (auto& inst : block) {
switch (inst.opcode()) {
case SpvOpAccessChain:
case SpvOpInBoundsAccessChain: {
case spv::Op::OpAccessChain:
case spv::Op::OpInBoundsAccessChain: {
protobufs::AccessChainClampingInfo clamping_info;
clamping_info.set_access_chain_id(
original_id_to_donated_id.at(inst.result_id()));
@ -1118,7 +1121,8 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
assert(base_object && "The base object must exist.");
auto pointer_type = donor_ir_context->get_def_use_mgr()->GetDef(
base_object->type_id());
assert(pointer_type && pointer_type->opcode() == SpvOpTypePointer &&
assert(pointer_type &&
pointer_type->opcode() == spv::Op::OpTypePointer &&
"The base object must have pointer type.");
auto should_be_composite_type =
@ -1138,7 +1142,8 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
// Get the bound for the component being indexed into.
uint32_t bound;
if (should_be_composite_type->opcode() == SpvOpTypeRuntimeArray) {
if (should_be_composite_type->opcode() ==
spv::Op::OpTypeRuntimeArray) {
// The donor is indexing into a runtime array. We do not
// donate runtime arrays. Instead, we donate a corresponding
// fixed-size array for every runtime array. We should thus
@ -1148,7 +1153,7 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
GetIRContext()->get_def_use_mgr()->GetDef(
original_id_to_donated_id.at(
should_be_composite_type->result_id()));
assert(fixed_size_array_type->opcode() == SpvOpTypeArray &&
assert(fixed_size_array_type->opcode() == spv::Op::OpTypeArray &&
"A runtime array type in the donor should have been "
"replaced by a fixed-sized array in the recipient.");
// The size of this fixed-size array is a suitable bound.
@ -1163,12 +1168,12 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
donor_ir_context->get_def_use_mgr()->GetDef(index_id);
auto index_type_inst = donor_ir_context->get_def_use_mgr()->GetDef(
index_inst->type_id());
assert(index_type_inst->opcode() == SpvOpTypeInt);
assert(index_type_inst->opcode() == spv::Op::OpTypeInt);
opt::analysis::Integer* index_int_type =
donor_ir_context->get_type_mgr()
->GetType(index_type_inst->result_id())
->AsInteger();
if (index_inst->opcode() != SpvOpConstant) {
if (index_inst->opcode() != spv::Op::OpConstant) {
// We will have to clamp this index, so we need a constant
// whose value is one less than the bound, to compare
// against and to use as the clamped value.
@ -1194,7 +1199,7 @@ bool FuzzerPassDonateModules::MaybeAddLivesafeFunction(
uint32_t kill_unreachable_return_value_id = 0;
auto function_return_type_inst =
donor_ir_context->get_def_use_mgr()->GetDef(function_to_donate.type_id());
if (function_return_type_inst->opcode() != SpvOpTypeVoid &&
if (function_return_type_inst->opcode() != spv::Op::OpTypeVoid &&
fuzzerutil::FunctionContainsOpKillOrUnreachable(function_to_donate)) {
kill_unreachable_return_value_id = FindOrCreateZeroConstant(
original_id_to_donated_id.at(function_return_type_inst->result_id()),

View File

@ -45,7 +45,8 @@ class FuzzerPassDonateModules : public FuzzerPass {
private:
// Adapts a storage class coming from a donor module so that it will work
// in a recipient module, e.g. by changing Uniform to Private.
static SpvStorageClass AdaptStorageClass(SpvStorageClass donor_storage_class);
static spv::StorageClass AdaptStorageClass(
spv::StorageClass donor_storage_class);
// Identifies all external instruction set imports in |donor_ir_context| and
// populates |original_id_to_donated_id| with a mapping from the donor's id

View File

@ -40,8 +40,8 @@ void FuzzerPassExpandVectorReductions::Apply() {
}
// |instruction| must be OpAny or OpAll.
if (instruction.opcode() != SpvOpAny &&
instruction.opcode() != SpvOpAll) {
if (instruction.opcode() != spv::Op::OpAny &&
instruction.opcode() != spv::Op::OpAll) {
continue;
}

View File

@ -48,8 +48,8 @@ void FuzzerPassFlattenConditionalBranches::Apply() {
// Only consider this block if it is the header of a conditional, with a
// non-irrelevant condition.
if (block.GetMergeInst() &&
block.GetMergeInst()->opcode() == SpvOpSelectionMerge &&
block.terminator()->opcode() == SpvOpBranchConditional &&
block.GetMergeInst()->opcode() == spv::Op::OpSelectionMerge &&
block.terminator()->opcode() == spv::Op::OpBranchConditional &&
!GetTransformationContext()->GetFactManager()->IdIsIrrelevant(
block.terminator()->GetSingleWordInOperand(0))) {
selection_headers.emplace_back(&block);
@ -94,11 +94,11 @@ void FuzzerPassFlattenConditionalBranches::Apply() {
->get_def_use_mgr()
->GetDef(phi_instruction->type_id())
->opcode()) {
case SpvOpTypeBool:
case SpvOpTypeInt:
case SpvOpTypeFloat:
case SpvOpTypePointer:
case SpvOpTypeVector:
case spv::Op::OpTypeBool:
case spv::Op::OpTypeInt:
case spv::Op::OpTypeFloat:
case spv::Op::OpTypePointer:
case spv::Op::OpTypeVector:
return true;
default:
return false;
@ -143,7 +143,7 @@ void FuzzerPassFlattenConditionalBranches::Apply() {
GetIRContext()->get_def_use_mgr()->GetDef(
phi_instruction->type_id());
switch (type_instruction->opcode()) {
case SpvOpTypeVector: {
case spv::Op::OpTypeVector: {
uint32_t dimension =
type_instruction->GetSingleWordInOperand(1);
switch (dimension) {

View File

@ -64,7 +64,7 @@ void FuzzerPassInlineFunctions::Apply() {
auto* function_call_block =
GetIRContext()->get_instr_block(function_call_instruction);
if ((function_call_instruction != &*--function_call_block->tail() ||
function_call_block->terminator()->opcode() != SpvOpBranch) &&
function_call_block->terminator()->opcode() != spv::Op::OpBranch) &&
!MaybeApplyTransformation(TransformationSplitBlock(
MakeInstructionDescriptor(GetIRContext(),
function_call_instruction->NextNode()),

View File

@ -47,18 +47,20 @@ void FuzzerPassMakeVectorOperationsDynamic::Apply() {
}
// Make sure |instruction| has only one indexing operand.
assert(instruction.NumInOperands() ==
(instruction.opcode() == SpvOpCompositeExtract ? 2 : 3) &&
"FuzzerPassMakeVectorOperationsDynamic: the composite "
"instruction must have "
"only one indexing operand.");
assert(
instruction.NumInOperands() ==
(instruction.opcode() == spv::Op::OpCompositeExtract ? 2 : 3) &&
"FuzzerPassMakeVectorOperationsDynamic: the composite "
"instruction must have "
"only one indexing operand.");
// Applies the make vector operation dynamic transformation.
ApplyTransformation(TransformationMakeVectorOperationDynamic(
instruction.result_id(),
FindOrCreateIntegerConstant(
{instruction.GetSingleWordInOperand(
instruction.opcode() == SpvOpCompositeExtract ? 1 : 2)},
instruction.opcode() == spv::Op::OpCompositeExtract ? 1
: 2)},
32, GetFuzzerContext()->ChooseEven(), false)));
}
}

View File

@ -64,11 +64,11 @@ void FuzzerPassMergeFunctionReturns::Apply() {
[this, function](
opt::BasicBlock* /*unused*/, opt::BasicBlock::iterator inst_it,
const protobufs::InstructionDescriptor& instruction_descriptor) {
const SpvOp opcode = inst_it->opcode();
const spv::Op opcode = inst_it->opcode();
switch (opcode) {
case SpvOpKill:
case SpvOpUnreachable:
case SpvOpTerminateInvocation: {
case spv::Op::OpKill:
case spv::Op::OpUnreachable:
case spv::Op::OpTerminateInvocation: {
// This is an early termination instruction - we need to wrap it
// so that it becomes a return.
if (TransformationWrapEarlyTerminatorInFunction::
@ -85,7 +85,7 @@ void FuzzerPassMergeFunctionReturns::Apply() {
GetIRContext()->get_def_use_mgr()->GetDef(
function->type_id());
uint32_t returned_value_id;
if (function_return_type->opcode() == SpvOpTypeVoid) {
if (function_return_type->opcode() == spv::Op::OpTypeVoid) {
// No value is needed.
returned_value_id = 0;
} else if (fuzzerutil::CanCreateConstant(
@ -130,7 +130,7 @@ void FuzzerPassMergeFunctionReturns::Apply() {
// If the entry block does not branch unconditionally to another block,
// split it.
if (function->entry()->terminator()->opcode() != SpvOpBranch) {
if (function->entry()->terminator()->opcode() != spv::Op::OpBranch) {
SplitBlockAfterOpPhiOrOpVariable(function->entry()->id());
}
@ -149,9 +149,9 @@ void FuzzerPassMergeFunctionReturns::Apply() {
if (GetIRContext()
->get_instr_block(merge_block)
->WhileEachInst([](opt::Instruction* inst) {
return inst->opcode() == SpvOpLabel ||
inst->opcode() == SpvOpPhi ||
inst->opcode() == SpvOpBranch;
return inst->opcode() == spv::Op::OpLabel ||
inst->opcode() == spv::Op::OpPhi ||
inst->opcode() == spv::Op::OpBranch;
})) {
actual_merge_blocks.emplace_back(merge_block);
continue;
@ -324,7 +324,8 @@ FuzzerPassMergeFunctionReturns::GetInfoNeededForMergeBlocks(
bool FuzzerPassMergeFunctionReturns::IsEarlyTerminatorWrapper(
const opt::Function& function) const {
for (SpvOp opcode : {SpvOpKill, SpvOpUnreachable, SpvOpTerminateInvocation}) {
for (spv::Op opcode : {spv::Op::OpKill, spv::Op::OpUnreachable,
spv::Op::OpTerminateInvocation}) {
if (TransformationWrapEarlyTerminatorInFunction::MaybeGetWrapperFunction(
GetIRContext(), opcode) == &function) {
return true;

View File

@ -39,7 +39,8 @@ void FuzzerPassMutatePointers::Apply() {
return;
}
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(SpvOpLoad, inst_it)) {
if (!fuzzerutil::CanInsertOpcodeBeforeInstruction(spv::Op::OpLoad,
inst_it)) {
return;
}

View File

@ -37,21 +37,21 @@ FuzzerPassObfuscateConstants::FuzzerPassObfuscateConstants(
void FuzzerPassObfuscateConstants::ObfuscateBoolConstantViaConstantPair(
uint32_t depth, const protobufs::IdUseDescriptor& bool_constant_use,
const std::vector<SpvOp>& greater_than_opcodes,
const std::vector<SpvOp>& less_than_opcodes, uint32_t constant_id_1,
const std::vector<spv::Op>& greater_than_opcodes,
const std::vector<spv::Op>& less_than_opcodes, uint32_t constant_id_1,
uint32_t constant_id_2, bool first_constant_is_larger) {
auto bool_constant_opcode = GetIRContext()
->get_def_use_mgr()
->GetDef(bool_constant_use.id_of_interest())
->opcode();
assert((bool_constant_opcode == SpvOpConstantFalse ||
bool_constant_opcode == SpvOpConstantTrue) &&
assert((bool_constant_opcode == spv::Op::OpConstantFalse ||
bool_constant_opcode == spv::Op::OpConstantTrue) &&
"Precondition: this must be a usage of a boolean constant.");
// Pick an opcode at random. First randomly decide whether to generate
// a 'greater than' or 'less than' kind of opcode, and then select a
// random opcode from the resulting subset.
SpvOp comparison_opcode;
spv::Op comparison_opcode;
if (GetFuzzerContext()->ChooseEven()) {
comparison_opcode = greater_than_opcodes[GetFuzzerContext()->RandomIndex(
greater_than_opcodes)];
@ -68,9 +68,9 @@ void FuzzerPassObfuscateConstants::ObfuscateBoolConstantViaConstantPair(
comparison_opcode) != greater_than_opcodes.end();
uint32_t lhs_id;
uint32_t rhs_id;
if ((bool_constant_opcode == SpvOpConstantTrue &&
if ((bool_constant_opcode == spv::Op::OpConstantTrue &&
first_constant_is_larger == is_greater_than_opcode) ||
(bool_constant_opcode == SpvOpConstantFalse &&
(bool_constant_opcode == spv::Op::OpConstantFalse &&
first_constant_is_larger != is_greater_than_opcode)) {
lhs_id = constant_id_1;
rhs_id = constant_id_2;
@ -147,12 +147,12 @@ void FuzzerPassObfuscateConstants::ObfuscateBoolConstantViaFloatConstantPair(
first_constant_is_larger =
float_constant_1->GetDouble() > float_constant_2->GetDouble();
}
std::vector<SpvOp> greater_than_opcodes{
SpvOpFOrdGreaterThan, SpvOpFOrdGreaterThanEqual, SpvOpFUnordGreaterThan,
SpvOpFUnordGreaterThanEqual};
std::vector<SpvOp> less_than_opcodes{
SpvOpFOrdGreaterThan, SpvOpFOrdGreaterThanEqual, SpvOpFUnordGreaterThan,
SpvOpFUnordGreaterThanEqual};
std::vector<spv::Op> greater_than_opcodes{
spv::Op::OpFOrdGreaterThan, spv::Op::OpFOrdGreaterThanEqual,
spv::Op::OpFUnordGreaterThan, spv::Op::OpFUnordGreaterThanEqual};
std::vector<spv::Op> less_than_opcodes{
spv::Op::OpFOrdGreaterThan, spv::Op::OpFOrdGreaterThanEqual,
spv::Op::OpFUnordGreaterThan, spv::Op::OpFUnordGreaterThanEqual};
ObfuscateBoolConstantViaConstantPair(
depth, bool_constant_use, greater_than_opcodes, less_than_opcodes,
@ -190,9 +190,10 @@ void FuzzerPassObfuscateConstants::
first_constant_is_larger =
signed_int_constant_1->GetS64() > signed_int_constant_2->GetS64();
}
std::vector<SpvOp> greater_than_opcodes{SpvOpSGreaterThan,
SpvOpSGreaterThanEqual};
std::vector<SpvOp> less_than_opcodes{SpvOpSLessThan, SpvOpSLessThanEqual};
std::vector<spv::Op> greater_than_opcodes{spv::Op::OpSGreaterThan,
spv::Op::OpSGreaterThanEqual};
std::vector<spv::Op> less_than_opcodes{spv::Op::OpSLessThan,
spv::Op::OpSLessThanEqual};
ObfuscateBoolConstantViaConstantPair(
depth, bool_constant_use, greater_than_opcodes, less_than_opcodes,
@ -232,9 +233,10 @@ void FuzzerPassObfuscateConstants::
first_constant_is_larger =
unsigned_int_constant_1->GetU64() > unsigned_int_constant_2->GetU64();
}
std::vector<SpvOp> greater_than_opcodes{SpvOpUGreaterThan,
SpvOpUGreaterThanEqual};
std::vector<SpvOp> less_than_opcodes{SpvOpULessThan, SpvOpULessThanEqual};
std::vector<spv::Op> greater_than_opcodes{spv::Op::OpUGreaterThan,
spv::Op::OpUGreaterThanEqual};
std::vector<spv::Op> less_than_opcodes{spv::Op::OpULessThan,
spv::Op::OpULessThanEqual};
ObfuscateBoolConstantViaConstantPair(
depth, bool_constant_use, greater_than_opcodes, less_than_opcodes,
@ -379,7 +381,7 @@ void FuzzerPassObfuscateConstants::ObfuscateScalarConstant(
uniform_descriptor.index());
assert(element_type_id && "Type of uniform variable is invalid");
FindOrCreatePointerType(element_type_id, SpvStorageClassUniform);
FindOrCreatePointerType(element_type_id, spv::StorageClass::Uniform);
// Create, apply and record a transformation to replace the constant use with
// the result of a load from the chosen uniform.
@ -394,11 +396,11 @@ void FuzzerPassObfuscateConstants::ObfuscateConstant(
->get_def_use_mgr()
->GetDef(constant_use.id_of_interest())
->opcode()) {
case SpvOpConstantTrue:
case SpvOpConstantFalse:
case spv::Op::OpConstantTrue:
case spv::Op::OpConstantFalse:
ObfuscateBoolConstant(depth, constant_use);
break;
case SpvOpConstant:
case spv::Op::OpConstant:
ObfuscateScalarConstant(depth, constant_use);
break;
default:
@ -410,7 +412,7 @@ void FuzzerPassObfuscateConstants::ObfuscateConstant(
void FuzzerPassObfuscateConstants::MaybeAddConstantIdUse(
const opt::Instruction& inst, uint32_t in_operand_index,
uint32_t base_instruction_result_id,
const std::map<SpvOp, uint32_t>& skipped_opcode_count,
const std::map<spv::Op, uint32_t>& skipped_opcode_count,
std::vector<protobufs::IdUseDescriptor>* constant_uses) {
if (inst.GetInOperand(in_operand_index).type != SPV_OPERAND_TYPE_ID) {
// The operand is not an id, so it cannot be a constant id.
@ -420,15 +422,15 @@ void FuzzerPassObfuscateConstants::MaybeAddConstantIdUse(
auto operand_definition =
GetIRContext()->get_def_use_mgr()->GetDef(operand_id);
switch (operand_definition->opcode()) {
case SpvOpConstantFalse:
case SpvOpConstantTrue:
case SpvOpConstant: {
case spv::Op::OpConstantFalse:
case spv::Op::OpConstantTrue:
case spv::Op::OpConstant: {
// The operand is a constant id, so make an id use descriptor and record
// it.
protobufs::IdUseDescriptor id_use_descriptor;
id_use_descriptor.set_id_of_interest(operand_id);
id_use_descriptor.mutable_enclosing_instruction()
->set_target_instruction_opcode(inst.opcode());
->set_target_instruction_opcode(uint32_t(inst.opcode()));
id_use_descriptor.mutable_enclosing_instruction()
->set_base_instruction_result_id(base_instruction_result_id);
id_use_descriptor.mutable_enclosing_instruction()
@ -461,7 +463,7 @@ void FuzzerPassObfuscateConstants::Apply() {
// opcode need to be skipped in order to find the instruction of interest
// from the base instruction. We maintain a mapping that records a skip
// count for each relevant opcode.
std::map<SpvOp, uint32_t> skipped_opcode_count;
std::map<spv::Op, uint32_t> skipped_opcode_count;
// Go through each instruction in the block.
for (auto& inst : block) {
@ -478,7 +480,7 @@ void FuzzerPassObfuscateConstants::Apply() {
// The instruction must not be an OpVariable, the only id that an
// OpVariable uses is an initializer id, which has to remain
// constant.
if (inst.opcode() != SpvOpVariable) {
if (inst.opcode() != spv::Op::OpVariable) {
// Consider each operand of the instruction, and add a constant id
// use for the operand if relevant.
for (uint32_t in_operand_index = 0;

View File

@ -85,8 +85,8 @@ class FuzzerPassObfuscateConstants : public FuzzerPass {
// (similar for |less_than_opcodes|).
void ObfuscateBoolConstantViaConstantPair(
uint32_t depth, const protobufs::IdUseDescriptor& bool_constant_use,
const std::vector<SpvOp>& greater_than_opcodes,
const std::vector<SpvOp>& less_than_opcodes, uint32_t constant_id_1,
const std::vector<spv::Op>& greater_than_opcodes,
const std::vector<spv::Op>& less_than_opcodes, uint32_t constant_id_1,
uint32_t constant_id_2, bool first_constant_is_larger);
// A helper method to determine whether input operand |in_operand_index| of
@ -96,7 +96,7 @@ class FuzzerPassObfuscateConstants : public FuzzerPass {
void MaybeAddConstantIdUse(
const opt::Instruction& inst, uint32_t in_operand_index,
uint32_t base_instruction_result_id,
const std::map<SpvOp, uint32_t>& skipped_opcode_count,
const std::map<spv::Op, uint32_t>& skipped_opcode_count,
std::vector<protobufs::IdUseDescriptor>* constant_uses);
// Returns a vector of unique words that denote constants. Every such constant

Some files were not shown because too many files have changed in this diff Show More