Remove hasing of unknown functions

Use a simple linear search instead. This greatly simplifies the logic and
reduces the places for the logic to go awry.
This commit is contained in:
Charles Giessen 2022-03-10 16:48:14 -07:00 committed by Charles Giessen
parent b5e6d4cb07
commit 2ded39d6f8
20 changed files with 379 additions and 513 deletions

View File

@ -121,11 +121,11 @@ if (!is_android) {
"loader/loader.h",
"loader/log.c",
"loader/log.h",
"loader/murmurhash.c",
"loader/murmurhash.h",
"loader/phys_dev_ext.c",
"loader/terminator.c",
"loader/trampoline.c",
"loader/unknown_function_handling.h",
"loader/unknown_function_handling.c",
"loader/vk_loader_layer.h",
# TODO(jmadill): Use assembler where available.

View File

@ -20,9 +20,9 @@ NAME=vulkan
# Make the library
SRCS = cJSON.c debug_utils.c dev_ext_trampoline.c loader.c murmurhash.c \
SRCS = cJSON.c debug_utils.c dev_ext_trampoline.c loader.c \
phys_dev_ext.c trampoline.c unknown_ext_chain.c wsi.c \
extension_manual.c
extension_manual.c unknown_function_handling.c
LDFLAGS += -Wl,--unresolved-symbols=report-all -Wl,--no-undefined -Wl,-fPIC

View File

@ -106,7 +106,6 @@ set(NORMAL_LOADER_SRCS
gpa_helper.c
loader.c
log.c
murmurhash.c
terminator.c
trampoline.c
unknown_function_handling.c

View File

@ -53,16 +53,14 @@ int main(int argc, char **argv) {
struct ValueInfo values[] = {
// clang-format off
{ .name = "VK_DEBUG_REPORT_ERROR_BIT_EXT", .value = (size_t) VK_DEBUG_REPORT_ERROR_BIT_EXT,
.comment = "The numerical value of the enum value 'VK_DEBUG_REPORT_ERROR_BIT_EXT'" },
{ .name = "VULKAN_LOADER_ERROR_BIT", .value = (size_t) VULKAN_LOADER_ERROR_BIT,
.comment = "The numerical value of the enum value 'VULKAN_LOADER_ERROR_BIT'" },
{ .name = "PTR_SIZE", .value = sizeof(void*),
.comment = "The size of a pointer" },
{ .name = "HASH_SIZE", .value = sizeof(struct loader_dispatch_hash_entry),
.comment = "The size of a 'loader_dispatch_hash_entry' struct" },
{ .name = "HASH_OFFSET_INSTANCE", .value = offsetof(struct loader_instance, phys_dev_ext_disp_hash),
.comment = "The offset of 'phys_dev_ext_disp_hash' within a 'loader_instance' struct" },
{ .name = "CHAR_PTR_SIZE", .value = sizeof(char *),
.comment = "The size of a 'const char *' struct" },
{ .name = "FUNCTION_OFFSET_INSTANCE", .value = offsetof(struct loader_instance, phys_dev_ext_disp_functions),
.comment = "The offset of 'phys_dev_ext_disp_functions' within a 'loader_instance' struct" },
{ .name = "PHYS_DEV_OFFSET_INST_DISPATCH", .value = offsetof(struct loader_instance_dispatch_table, phys_dev_ext),
.comment = "The offset of 'phys_dev_ext' within in 'loader_instance_dispatch_table' struct" },
{ .name = "PHYS_DEV_OFFSET_PHYS_DEV_TRAMP", .value = offsetof(struct loader_physical_device_tramp, phys_dev),
@ -75,8 +73,6 @@ int main(int argc, char **argv) {
.comment = "The offset of 'this_instance' within a 'loader_icd_term' struct" },
{ .name = "DISPATCH_OFFSET_ICD_TERM", .value = offsetof(struct loader_icd_term, phys_dev_ext),
.comment = "The offset of 'phys_dev_ext' within a 'loader_icd_term' struct" },
{ .name = "FUNC_NAME_OFFSET_HASH", .value = offsetof(struct loader_dispatch_hash_entry, func_name),
.comment = "The offset of 'func_name' within a 'loader_dispatch_hash_entry' struct" },
{ .name = "EXT_OFFSET_DEVICE_DISPATCH", .value = offsetof(struct loader_dev_dispatch_table, ext_dispatch),
.comment = "The offset of 'ext_dispatch' within a 'loader_dev_dispatch_table' struct" },
// clang-format on
@ -90,19 +86,20 @@ int main(int argc, char **argv) {
}
} else if (!strcmp(assembler, "GAS")) {
#if defined(__x86_64__) || defined(__i386__)
const char* comment_delimiter = "#";
const char *comment_delimiter = "#";
#if defined(__x86_64__)
fprintf(file, ".set X86_64, 1\n");
#endif // defined(__x86_64__)
#endif // defined(__x86_64__)
#elif defined(__aarch64__)
const char* comment_delimiter = "//";
const char *comment_delimiter = "//";
fprintf(file, ".set AARCH_64, 1\n");
#else
// Default comment delimiter
const char* comment_delimiter = "#";
const char *comment_delimiter = "#";
#endif
for (size_t i = 0; i < sizeof(values) / sizeof(values[0]); ++i) {
fprintf(file, ".set %-32s, " SIZE_T_FMT "%s %s\n", values[i].name, values[i].value, comment_delimiter, values[i].comment);
fprintf(file, ".set %-32s, " SIZE_T_FMT "%s %s\n", values[i].name, values[i].value, comment_delimiter,
values[i].comment);
}
}
return fclose(file);

View File

@ -330,7 +330,7 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_dispatch_table(struct loader_dev_d
VkDevice dev) {
VkLayerDispatchTable *table = &dev_table->core_dispatch;
table->magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;
for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) dev_table->ext_dispatch.dev_ext[i] = (PFN_vkDevExt)vkDevExtError;
for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) dev_table->ext_dispatch[i] = (PFN_vkDevExt)vkDevExtError;
// ---- Core 1_0 commands
table->GetDeviceProcAddr = gpa;

View File

@ -259,7 +259,8 @@ void *trampoline_get_proc_addr(struct loader_instance *inst, const char *funcNam
if (extension_instance_gpa(inst, funcName, &addr)) return addr;
// Unknown physical device extensions
if (loader_phys_dev_ext_gpa(inst, funcName, true, &addr, NULL)) return addr;
addr = loader_phys_dev_ext_gpa_tramp(inst, funcName);
if (NULL != addr) return addr;
// Unknown device extensions
addr = loader_dev_ext_gpa(inst, funcName);

View File

@ -3909,7 +3909,8 @@ static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_internal(V
return addr;
}
if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, true, NULL, &addr)) return addr;
addr = loader_phys_dev_ext_gpa_term(loader_get_instance(inst), pName);
if (NULL != addr) return addr;
// Don't call down the chain, this would be an infinite loop
loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_internal() unrecognized name %s", pName);
@ -3934,9 +3935,8 @@ static VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL loader_gpdpa_instance_terminator
// Get the terminator, but don't perform checking since it should already
// have been setup if we get here.
if (loader_phys_dev_ext_gpa(loader_get_instance(inst), pName, false, NULL, &addr)) {
return addr;
}
addr = loader_phys_dev_ext_gpa_term_no_check(loader_get_instance(inst), pName);
if (NULL != addr) return addr;
// Don't call down the chain, this would be an infinite loop
loader_log(NULL, VULKAN_LOADER_DEBUG_BIT, 0, "loader_gpdpa_instance_terminator() unrecognized name %s", pName);
@ -5358,7 +5358,7 @@ out:
}
VKAPI_ATTR void VKAPI_CALL terminator_DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
struct loader_instance *ptr_instance = loader_instance(instance);
struct loader_instance *ptr_instance = loader_get_instance(instance);
if (NULL == ptr_instance) {
return;
}

View File

@ -31,8 +31,6 @@
#include "loader_common.h"
static inline struct loader_instance *loader_instance(VkInstance instance) { return (struct loader_instance *)instance; }
static inline VkPhysicalDevice loader_unwrap_physical_device(VkPhysicalDevice physicalDevice) {
struct loader_physical_device_tramp *phys_dev = (struct loader_physical_device_tramp *)physicalDevice;
if (PHYS_TRAMP_MAGIC_NUMBER != phys_dev->magic) {

View File

@ -159,29 +159,11 @@ struct loader_layer_list {
struct loader_layer_properties *list;
};
struct loader_dispatch_hash_list {
size_t capacity;
uint32_t count;
uint32_t *index; // index into the dev_ext dispatch table
};
// loader_dispatch_hash_entry and loader_dev_ext_dispatch_table.dev_ext have
// one to one correspondence; one loader_dispatch_hash_entry for one dev_ext
// dispatch entry.
// Also have a one to one correspondence with functions in dev_ext_trampoline.c
struct loader_dispatch_hash_entry {
char *func_name;
struct loader_dispatch_hash_list list; // to handle hashing collisions
};
typedef VkResult(VKAPI_PTR *PFN_vkDevExt)(VkDevice device);
struct loader_dev_ext_dispatch_table {
PFN_vkDevExt dev_ext[MAX_NUM_UNKNOWN_EXTS];
};
struct loader_dev_dispatch_table {
VkLayerDispatchTable core_dispatch;
struct loader_dev_ext_dispatch_table ext_dispatch;
PFN_vkDevExt ext_dispatch[MAX_NUM_UNKNOWN_EXTS];
};
// per CreateDevice structure
@ -278,8 +260,10 @@ struct loader_instance {
struct loader_icd_term *icd_terms;
struct loader_icd_tramp_list icd_tramp_list;
struct loader_dispatch_hash_entry dev_ext_disp_hash[MAX_NUM_UNKNOWN_EXTS];
struct loader_dispatch_hash_entry phys_dev_ext_disp_hash[MAX_NUM_UNKNOWN_EXTS];
uint32_t dev_ext_disp_function_count;
char *dev_ext_disp_functions[MAX_NUM_UNKNOWN_EXTS];
uint32_t phys_dev_ext_disp_function_count;
char *phys_dev_ext_disp_functions[MAX_NUM_UNKNOWN_EXTS];
struct loader_msg_callback_map_entry *icd_msg_callback_map;

View File

@ -1,100 +0,0 @@
/**
* `murmurhash.h' - murmurhash
*
* copyright (c) 2014 joseph werle <joseph.werle@gmail.com>
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and/or associated documentation files (the "Materials"), to
* deal in the Materials without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Materials, and to permit persons to whom the Materials are
* furnished to do so, subject to the following conditions:
*
* The above copyright notice(s) and this permission notice shall be included in
* all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
*
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
* USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#include "murmurhash.h"
#include <memory.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
uint32_t murmurhash(const char *key, size_t len, uint32_t seed) {
uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593;
uint32_t r1 = 15;
uint32_t r2 = 13;
uint32_t m = 5;
uint32_t n = 0xe6546b64;
uint32_t h = 0;
uint32_t k = 0;
uint8_t *d = (uint8_t *)key; // 32 bit extract from `key'
const uint8_t *chunks = NULL;
const uint8_t *tail = NULL; // tail - last 8 bytes
int i = 0;
int l = (int)len / 4; // chunk length
h = seed;
chunks = (const uint8_t *)(d + l * 4); // body
tail = (const uint8_t *)(d + l * 4); // last 8 byte chunk of `key'
// for each 4 byte chunk of `key'
for (i = -l; i != 0; ++i) {
// next 4 byte chunk of `key'
memcpy(&k, chunks + i * 4, sizeof(k));
// encode next 4 byte chunk of `key'
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
// append to hash
h ^= k;
h = (h << r2) | (h >> (32 - r2));
h = h * m + n;
}
k = 0;
// remainder
switch (len & 3) { // `len % 4'
case 3:
k ^= (tail[2] << 16);
// fall through
case 2:
k ^= (tail[1] << 8);
// fall through
case 1:
k ^= tail[0];
k *= c1;
k = (k << r1) | (k >> (32 - r1));
k *= c2;
h ^= k;
}
h ^= len;
h ^= (h >> 16);
h *= 0x85ebca6b;
h ^= (h >> 13);
h *= 0xc2b2ae35;
h ^= (h >> 16);
return h;
}

View File

@ -1,50 +0,0 @@
/**
* `murmurhash.h' - murmurhash
*
* copyright (c) 2014 joseph werle <joseph.werle@gmail.com>
* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and/or associated documentation files (the "Materials"), to
* deal in the Materials without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Materials, and to permit persons to whom the Materials are
* furnished to do so, subject to the following conditions:
*
* The above copyright notice(s) and this permission notice shall be included in
* all copies or substantial portions of the Materials.
*
* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
*
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
* USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#pragma once
#include <stddef.h>
#include <stdint.h>
#define MURMURHASH_VERSION "0.0.3"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Returns a murmur hash of `key' based on `seed'
* using the MurmurHash3 algorithm
*/
uint32_t murmurhash(const char *key, size_t len, uint32_t seed);
#ifdef __cplusplus
}
#endif

View File

@ -50,7 +50,7 @@
struct loader_instance *inst = (struct loader_instance *)icd_term->this_instance; \
if (NULL == icd_term->phys_dev_ext[num]) { \
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "Extension %s not supported for this physical device", \
inst->phys_dev_ext_disp_hash[num].func_name); \
inst->phys_dev_ext_disp_functions[num]); \
} \
icd_term->phys_dev_ext[num](phys_dev_term->phys_dev); \
}
@ -60,7 +60,7 @@
VKAPI_ATTR void VKAPI_CALL vkdev_ext##num(VkDevice device) { \
const struct loader_dev_dispatch_table *disp; \
disp = loader_get_dev_dispatch(device); \
disp->ext_dispatch.dev_ext[num](device); \
disp->ext_dispatch[num](device); \
}
// clang-format off

View File

@ -45,10 +45,10 @@ vkPhysDevExtTermin\num:
ldr x0, [x0, PHYS_DEV_OFFSET_PHYS_DEV_TERM] // Unwrap the VkPhysicalDevice in x0
br x10 // Jump to the next function in the chain
terminError\num:
mov x10, (HASH_OFFSET_INSTANCE + (HASH_SIZE * \num) + FUNC_NAME_OFFSET_HASH) // Offset of the function name string in the instance
mov x10, (FUNCTION_OFFSET_INSTANCE + (CHAR_PTR_SIZE * \num)) // Offset of the function name string in the instance
ldr x11, [x9, INSTANCE_OFFSET_ICD_TERM] // Load the instance pointer
mov x0, x11 // Vulkan instance pointer (first arg)
mov x1, VK_DEBUG_REPORT_ERROR_BIT_EXT // The error logging bit (second arg)
mov x1, VULKAN_LOADER_ERROR_BIT // The error logging bit (second arg)
mov x2, #0 // Zero (third arg)
adrp x9, termin_error_string
add x3, x9, #:lo12:termin_error_string // The error string (fourth arg)

View File

@ -56,10 +56,10 @@ vkPhysDevExtTermin\num:
terminError\num:
sub rsp, 56 # Create the stack frame
mov rdi, [rax + INSTANCE_OFFSET_ICD_TERM] # Load the loader_instance into rdi (first arg)
mov r8, [rdi + (HASH_OFFSET_INSTANCE + (HASH_SIZE * \num) + FUNC_NAME_OFFSET_HASH)] # Load the func name into r8 (fifth arg)
lea rcx, termin_error_string@GOTPCREL # Load the error string into rcx (fourth arg)
xor edx, edx # Set rdx to zero (third arg)
lea esi, [rdx + VK_DEBUG_REPORT_ERROR_BIT_EXT] # Write the error logging bit to rsi (second arg)
lea rsi, [VULKAN_LOADER_ERROR_BIT] # Write the error logging bit to rsi (second arg)
xor rdx, rdx # Set rdx to zero (third arg)
lea rcx, [rip + termin_error_string] # Load the error string into rcx (fourth arg)
mov r8, [rdi + (FUNCTION_OFFSET_INSTANCE + (CHAR_PTR_SIZE * \num))] # Load the func name into r8 (fifth arg)
call loader_log # Log the error message before we crash
add rsp, 56 # Clean up the stack frame
mov rax, 0
@ -99,16 +99,16 @@ vkPhysDevExtTermin\num:
mov [esp + 4], ecx # Copy the unwrapped VkPhysicalDevice into the first arg
jmp [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * \num))] # Jump to the next function in the chain
terminError\num:
mov eax, [eax + INSTANCE_OFFSET_ICD_TERM] # Load the loader_instance into eax
push [eax + (HASH_OFFSET_INSTANCE + (HASH_SIZE * \num) + FUNC_NAME_OFFSET_HASH)] # Push the func name (fifth arg)
push offset termin_error_string@GOT # Push the error string (fourth arg)
push 0 # Push zero (third arg)
push VK_DEBUG_REPORT_ERROR_BIT_EXT # Push the error logging bit (second arg)
push eax # Push the loader_instance (first arg)
call loader_log # Log the error message before we crash
add esp, 20 # Clean up the args
mov eax, [eax + INSTANCE_OFFSET_ICD_TERM] # Load the loader_instance into eax
push [eax + (FUNCTION_OFFSET_INSTANCE + (CHAR_PTR_SIZE * \num))] # Push the func name (fifth arg)
push offset termin_error_string@GOT # Push the error string (fourth arg)
push 0 # Push zero (third arg)
push VULKAN_LOADER_ERROR_BIT # Push the error logging bit (second arg)
push eax # Push the loader_instance (first arg)
call loader_log # Log the error message before we crash
add esp, 20 # Clean up the args
mov eax, 0
jmp eax # Crash intentionally by jumping to address zero
jmp eax # Crash intentionally by jumping to address zero
.endm
.macro DevExtTramp num

View File

@ -51,7 +51,7 @@ vkPhysDevExtTermin&num&:
terminError&num&:
sub rsp, 56 ; Create the stack frame
mov rcx, qword ptr [rax + INSTANCE_OFFSET_ICD_TERM] ; Load the loader_instance into rcx (first arg)
mov rax, qword ptr [rcx + (HASH_OFFSET_INSTANCE + (HASH_SIZE * num) + FUNC_NAME_OFFSET_HASH)] ; Load the func name into rax
mov rax, qword ptr [rcx + (FUNCTION_OFFSET_INSTANCE + (CHAR_PTR_SIZE * num))] ; Load the func name into rax
lea r9, termin_error_string ; Load the error string into r9 (fourth arg)
xor r8d, r8d ; Set r8 to zero (third arg)
mov qword ptr [rsp + 32], rax ; Move the func name onto the stack (fifth arg)
@ -94,7 +94,7 @@ _vkPhysDevExtTermin&num&@4:
jmp dword ptr [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * num))] ; Jump to the next function in the chain
terminError&num&:
mov eax, dword ptr [eax + INSTANCE_OFFSET_ICD_TERM] ; Load the loader_instance into eax
push dword ptr [eax + (HASH_OFFSET_INSTANCE + (HASH_SIZE * num) + FUNC_NAME_OFFSET_HASH)] ; Push the func name (fifth arg)
push dword ptr [eax + (FUNCTION_OFFSET_INSTANCE + (CHAR_PTR_SIZE * num))] ; Push the func name (fifth arg)
push offset termin_error_string ; Push the error string (fourth arg)
push 0 ; Push zero (third arg)
push VULKAN_LOADER_ERROR_BIT ; Push the error logging bit (second arg)

View File

@ -25,7 +25,6 @@
#include "unknown_function_handling.h"
#include "allocation.h"
#include "murmurhash.h"
#include "log.h"
// Forward declarations
@ -33,6 +32,8 @@ void *loader_get_dev_ext_trampoline(uint32_t index);
void *loader_get_phys_dev_ext_tramp(uint32_t index);
void *loader_get_phys_dev_ext_termin(uint32_t index);
// Device function handling
// Initialize device_ext dispatch table entry as follows:
// If dev == NULL find all logical devices created within this instance and
// init the entry (given by idx) in the ext dispatch table.
@ -46,25 +47,25 @@ void loader_init_dispatch_dev_ext_entry(struct loader_instance *inst, struct loa
void *gdpa_value;
if (dev != NULL) {
gdpa_value = dev->loader_dispatch.core_dispatch.GetDeviceProcAddr(dev->chain_device, funcName);
if (gdpa_value != NULL) dev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value;
if (gdpa_value != NULL) dev->loader_dispatch.ext_dispatch[idx] = (PFN_vkDevExt)gdpa_value;
} else {
for (struct loader_icd_term *icd_term = inst->icd_terms; icd_term != NULL; icd_term = icd_term->next) {
struct loader_device *ldev = icd_term->logical_device_list;
while (ldev) {
gdpa_value = ldev->loader_dispatch.core_dispatch.GetDeviceProcAddr(ldev->chain_device, funcName);
if (gdpa_value != NULL) ldev->loader_dispatch.ext_dispatch.dev_ext[idx] = (PFN_vkDevExt)gdpa_value;
if (gdpa_value != NULL) ldev->loader_dispatch.ext_dispatch[idx] = (PFN_vkDevExt)gdpa_value;
ldev = ldev->next;
}
}
}
}
// Find all dev extension in the hash table and initialize the dispatch table
// for dev for each of those extension entrypoints found in hash table.
// Find all dev extension in the function names array and initialize the dispatch table
// for dev for each of those extension entrypoints found in function names array.
void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev) {
for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
if (inst->dev_ext_disp_hash[i].func_name != NULL)
loader_init_dispatch_dev_ext_entry(inst, dev, i, inst->dev_ext_disp_hash[i].func_name);
if (inst->dev_ext_disp_functions[i] != NULL)
loader_init_dispatch_dev_ext_entry(inst, dev, i, inst->dev_ext_disp_functions[i]);
}
}
@ -101,121 +102,35 @@ bool loader_check_layer_list_for_dev_ext_address(const struct loader_layer_list
}
void loader_free_dev_ext_table(struct loader_instance *inst) {
for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].func_name);
loader_instance_heap_free(inst, inst->dev_ext_disp_hash[i].list.index);
for (uint32_t i = 0; i < inst->dev_ext_disp_function_count; i++) {
loader_instance_heap_free(inst, inst->dev_ext_disp_functions[i]);
}
memset(inst->dev_ext_disp_hash, 0, sizeof(inst->dev_ext_disp_hash));
memset(inst->dev_ext_disp_functions, 0, sizeof(inst->dev_ext_disp_functions));
}
bool loader_add_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) {
uint32_t i;
uint32_t idx = *ptr_idx;
struct loader_dispatch_hash_list *list = &inst->dev_ext_disp_hash[idx].list;
if (!inst->dev_ext_disp_hash[idx].func_name) {
// no entry here at this idx, so use it
assert(list->capacity == 0);
inst->dev_ext_disp_hash[idx].func_name =
(char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (inst->dev_ext_disp_hash[idx].func_name == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_dev_ext_table: Failed to allocate memory for func_name %s",
funcName);
return false;
}
strncpy(inst->dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1);
return true;
}
// check for enough capacity
if (list->capacity == 0) {
list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list->index == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_dev_ext_table: Failed to allocate memory for list index of function %s", funcName);
return false;
}
list->capacity = 8 * sizeof(*(list->index));
} else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) {
void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_ptr) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_add_dev_ext_table: Failed to reallocate memory for list index of function %s", funcName);
return false;
}
list->index = new_ptr;
list->capacity *= 2;
}
// find an unused index in the hash table and use it
i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS;
do {
if (!inst->dev_ext_disp_hash[i].func_name) {
assert(inst->dev_ext_disp_hash[i].list.capacity == 0);
inst->dev_ext_disp_hash[i].func_name =
(char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (inst->dev_ext_disp_hash[i].func_name == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_dev_ext_table: Failed to allocate memory for func_name %s",
funcName);
return false;
}
strncpy(inst->dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1);
list->index[list->count] = i;
list->count++;
*ptr_idx = i;
return true;
}
i = (i + 1) % MAX_NUM_UNKNOWN_EXTS;
} while (i != idx);
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_dev_ext_table: Could not insert into hash table; is it full?");
return false;
}
bool loader_name_in_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) {
uint32_t alt_idx;
if (inst->dev_ext_disp_hash[*idx].func_name && !strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) return true;
// funcName wasn't at the primary spot in the hash table
// search the list of secondary locations (shallow search, not deep search)
for (uint32_t i = 0; i < inst->dev_ext_disp_hash[*idx].list.count; i++) {
alt_idx = inst->dev_ext_disp_hash[*idx].list.index[i];
if (inst->dev_ext_disp_hash[*idx].func_name && !strcmp(inst->dev_ext_disp_hash[*idx].func_name, funcName)) {
*idx = alt_idx;
return true;
}
}
return false;
}
// This function returns generic trampoline code address for unknown entry
// points.
// Presumably, these unknown entry points (as given by funcName) are device
// extension entrypoints. A hash table is used to keep a list of unknown entry
// points and their mapping to the device extension dispatch table
// (struct loader_dev_ext_dispatch_table).
// \returns
// For a given entry point string (funcName), if an existing mapping is found
// the
// trampoline address for that mapping is returned. Otherwise, this unknown
// entry point
// has not been seen yet. Next check if a layer or ICD supports it. If so then
// a
// new entry in the hash table is initialized and that trampoline address for
// the new entry is returned. Null is returned if the hash table is full or
// if no discovered layer or ICD returns a non-NULL GetProcAddr for it.
/*
* This function returns generic trampoline code address for unknown entry points.
* Presumably, these unknown entry points (as given by funcName) are device extension
* entrypoints.
* A function name array is used to keep a list of unknown entry points and their
* mapping to the device extension dispatch table.
* \returns
* For a given entry point string (funcName), if an existing mapping is found the
* trampoline address for that mapping is returned.
* Otherwise, this unknown entry point has not been seen yet.
* Next check if a layer or ICD supports it.
* If so then a new entry in the function name array is added and that trampoline
* address for the new entry is returned.
* NULL is returned if the function name array is full or if no discovered layer or
* ICD returns a non-NULL GetProcAddr for it.
*/
void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) {
uint32_t idx;
uint32_t seed = 0;
idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS;
if (loader_name_in_dev_ext_table(inst, &idx, funcName))
// found funcName already in hash
return loader_get_dev_ext_trampoline(idx);
// Linearly look through already added functions to make sure we haven't seen it before
// if we have, return the function at the index found
for (uint32_t i = 0; i < inst->dev_ext_disp_function_count; i++) {
if (inst->dev_ext_disp_functions[i] && !strcmp(inst->dev_ext_disp_functions[i], funcName))
return loader_get_dev_ext_trampoline(i);
}
// Check if funcName is supported in either ICDs or a layer library
if (!loader_check_icds_for_dev_ext_address(inst, funcName) &&
@ -224,16 +139,29 @@ void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName) {
return NULL;
}
if (loader_add_dev_ext_table(inst, &idx, funcName)) {
// successfully added new table entry
// init any dev dispatch table entries as needed
loader_init_dispatch_dev_ext_entry(inst, NULL, idx, funcName);
return loader_get_dev_ext_trampoline(idx);
if (inst->dev_ext_disp_function_count >= MAX_NUM_UNKNOWN_EXTS) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_dev_ext_gpa: Exhausted the unknown device function array!");
return NULL;
}
return NULL;
// add found function to dev_ext_disp_functions;
size_t funcName_len = strlen(funcName) + 1;
inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count] =
(char *)loader_instance_heap_alloc(inst, funcName_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count]) {
// failed to allocate memory, return NULL
return NULL;
}
strncpy(inst->dev_ext_disp_functions[inst->dev_ext_disp_function_count], funcName, funcName_len);
// init any dev dispatch table entries as needed
loader_init_dispatch_dev_ext_entry(inst, NULL, inst->dev_ext_disp_function_count, funcName);
void *out_function = loader_get_dev_ext_trampoline(inst->dev_ext_disp_function_count);
inst->dev_ext_disp_function_count++;
return out_function;
}
// Physical Device function handling
bool loader_check_icds_for_phys_dev_ext_address(struct loader_instance *inst, const char *funcName) {
struct loader_icd_term *icd_term;
icd_term = inst->icd_terms;
@ -267,195 +195,136 @@ bool loader_check_layer_list_for_phys_dev_ext_address(struct loader_instance *in
void loader_free_phys_dev_ext_table(struct loader_instance *inst) {
for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) {
loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].func_name);
loader_instance_heap_free(inst, inst->phys_dev_ext_disp_hash[i].list.index);
loader_instance_heap_free(inst, inst->phys_dev_ext_disp_functions[i]);
}
memset(inst->phys_dev_ext_disp_hash, 0, sizeof(inst->phys_dev_ext_disp_hash));
memset(inst->phys_dev_ext_disp_functions, 0, sizeof(inst->phys_dev_ext_disp_functions));
}
bool loader_add_phys_dev_ext_table(struct loader_instance *inst, uint32_t *ptr_idx, const char *funcName) {
uint32_t i;
uint32_t idx = *ptr_idx;
struct loader_dispatch_hash_list *list = &inst->phys_dev_ext_disp_hash[idx].list;
if (!inst->phys_dev_ext_disp_hash[idx].func_name) {
// no entry here at this idx, so use it
assert(list->capacity == 0);
inst->phys_dev_ext_disp_hash[idx].func_name =
(char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (inst->phys_dev_ext_disp_hash[idx].func_name == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_phys_dev_ext_table() can't allocate memory for func_name");
return false;
}
strncpy(inst->phys_dev_ext_disp_hash[idx].func_name, funcName, strlen(funcName) + 1);
return true;
}
// check for enough capacity
if (list->capacity == 0) {
list->index = loader_instance_heap_alloc(inst, 8 * sizeof(*(list->index)), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list->index == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_phys_dev_ext_table() can't allocate list memory");
return false;
}
list->capacity = 8 * sizeof(*(list->index));
} else if (list->capacity < (list->count + 1) * sizeof(*(list->index))) {
void *new_ptr = loader_instance_heap_realloc(inst, list->index, list->capacity, list->capacity * 2,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_ptr) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_phys_dev_ext_table() can't reallocate list memory");
return false;
}
list->index = new_ptr;
list->capacity *= 2;
}
// find an unused index in the hash table and use it
i = (idx + 1) % MAX_NUM_UNKNOWN_EXTS;
do {
if (!inst->phys_dev_ext_disp_hash[i].func_name) {
assert(inst->phys_dev_ext_disp_hash[i].list.capacity == 0);
inst->phys_dev_ext_disp_hash[i].func_name =
(char *)loader_instance_heap_alloc(inst, strlen(funcName) + 1, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (inst->phys_dev_ext_disp_hash[i].func_name == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_phys_dev_ext_table() can't reallocate func_name memory");
return false;
}
strncpy(inst->phys_dev_ext_disp_hash[i].func_name, funcName, strlen(funcName) + 1);
list->index[list->count] = i;
list->count++;
*ptr_idx = i;
return true;
}
i = (i + 1) % MAX_NUM_UNKNOWN_EXTS;
} while (i != idx);
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_add_phys_dev_ext_table() couldn't insert into hash table; is it full?");
return false;
}
bool loader_name_in_phys_dev_ext_table(struct loader_instance *inst, uint32_t *idx, const char *funcName) {
uint32_t alt_idx;
if (inst->phys_dev_ext_disp_hash[*idx].func_name && !strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName))
return true;
// funcName wasn't at the primary spot in the hash table
// search the list of secondary locations (shallow search, not deep search)
for (uint32_t i = 0; i < inst->phys_dev_ext_disp_hash[*idx].list.count; i++) {
alt_idx = inst->phys_dev_ext_disp_hash[*idx].list.index[i];
if (inst->phys_dev_ext_disp_hash[*idx].func_name && !strcmp(inst->phys_dev_ext_disp_hash[*idx].func_name, funcName)) {
*idx = alt_idx;
return true;
}
}
return false;
}
// This function returns a generic trampoline and/or terminator function
// address for any unknown physical device extension commands. A hash
// table is used to keep a list of unknown entry points and their
// This function returns a generic trampoline or terminator function
// address for any unknown physical device extension commands. An array
// is used to keep a list of unknown entry points and their
// mapping to the physical device extension dispatch table (struct
// loader_phys_dev_ext_dispatch_table).
// For a given entry point string (funcName), if an existing mapping is
// found, then the trampoline address for that mapping is returned in
// tramp_addr (if it is not NULL) and the terminator address for that
// mapping is returned in term_addr (if it is not NULL). Otherwise,
// this unknown entry point has not been seen yet.
// If it has not been seen before, and perform_checking is 'true',
// check if a layer or and ICD supports it. If so then a new entry in
// the hash table is initialized and the trampoline and/or terminator
// addresses are returned.
// Null is returned if the hash table is full or if no discovered layer or
// ICD returns a non-NULL GetProcAddr for it.
bool loader_phys_dev_ext_gpa(struct loader_instance *inst, const char *funcName, bool perform_checking, void **tramp_addr,
void **term_addr) {
uint32_t idx;
uint32_t seed = 0;
bool success = false;
if (inst == NULL) {
goto out;
}
if (NULL != tramp_addr) {
*tramp_addr = NULL;
}
if (NULL != term_addr) {
*term_addr = NULL;
}
// found, then the address for that mapping is returned. The is_tramp
// parameter is used to decide whether to return a trampoline or terminator
// If it has not been seen before check if a layer or and ICD supports it.
// If so then a new entry in the function name array is added.
// Null is returned if discovered layer or ICD returns a non-NULL GetProcAddr for it
// or if the function name table is full.
void *loader_phys_dev_ext_gpa_impl(struct loader_instance *inst, const char *funcName, bool is_tramp) {
assert(NULL != inst);
// We should always check to see if any ICD supports it.
if (!loader_check_icds_for_phys_dev_ext_address(inst, funcName)) {
// If we're not checking layers, or we are and it's not in a layer, just
// return
if (!perform_checking || !loader_check_layer_list_for_phys_dev_ext_address(inst, funcName)) {
goto out;
if (!loader_check_layer_list_for_phys_dev_ext_address(inst, funcName)) {
return NULL;
}
}
idx = murmurhash(funcName, strlen(funcName), seed) % MAX_NUM_UNKNOWN_EXTS;
if (perform_checking && !loader_name_in_phys_dev_ext_table(inst, &idx, funcName)) {
uint32_t i;
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
"loader_phys_dev_ext_gpa: Found unknown physical function %s, using index %u in table", funcName, idx);
// Only need to add first one to get index in Instance. Others will use
// the same index.
if (!loader_add_phys_dev_ext_table(inst, &idx, funcName)) {
// couldn't perform the above function due to insufficient memory available
goto out;
// Linearly look through already added functions to make sure we haven't seen it before
// if we have, return the function at the index found
for (uint32_t i = 0; i < inst->phys_dev_ext_disp_function_count; i++) {
if (inst->phys_dev_ext_disp_functions[i] && !strcmp(inst->phys_dev_ext_disp_functions[i], funcName)) {
if (is_tramp) {
return loader_get_phys_dev_ext_tramp(i);
} else {
return loader_get_phys_dev_ext_termin(i);
}
}
}
// Setup the ICD function pointers
struct loader_icd_term *icd_term = inst->icd_terms;
while (NULL != icd_term) {
if (MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION <= icd_term->scanned_icd->interface_version &&
NULL != icd_term->scanned_icd->GetPhysicalDeviceProcAddr) {
icd_term->phys_dev_ext[idx] =
(PFN_PhysDevExt)icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName);
if (inst->phys_dev_ext_disp_function_count >= MAX_NUM_UNKNOWN_EXTS) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_dev_ext_gpa: Exhausted the unknown physical device function array!");
return NULL;
}
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0,
"loader_phys_dev_ext_gpa: Adding unknown physical function %s to internal store at index %u", funcName,
inst->phys_dev_ext_disp_function_count);
// Make sure we set the instance dispatch to point to the
// loader's terminator now since we can at least handle it
// in one ICD.
inst->disp->phys_dev_ext[idx] = loader_get_phys_dev_ext_termin(idx);
// add found function to phys_dev_ext_disp_functions;
size_t funcName_len = strlen(funcName) + 1;
inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count] =
(char *)loader_instance_heap_alloc(inst, funcName_len, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count]) {
// failed to allocate memory, return NULL
return NULL;
}
strncpy(inst->phys_dev_ext_disp_functions[inst->phys_dev_ext_disp_function_count], funcName, funcName_len);
uint32_t new_function_index = inst->phys_dev_ext_disp_function_count;
// increment the count so that the subsequent logic includes the newly added entry point when searching for functions
inst->phys_dev_ext_disp_function_count++;
// Setup the ICD function pointers
struct loader_icd_term *icd_term = inst->icd_terms;
while (NULL != icd_term) {
if (MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION <= icd_term->scanned_icd->interface_version &&
NULL != icd_term->scanned_icd->GetPhysicalDeviceProcAddr) {
icd_term->phys_dev_ext[new_function_index] =
(PFN_PhysDevExt)icd_term->scanned_icd->GetPhysicalDeviceProcAddr(icd_term->instance, funcName);
if (NULL != icd_term->phys_dev_ext[new_function_index]) {
// Make sure we set the instance dispatch to point to the loader's terminator now since we can at least handle it in
// one ICD.
inst->disp->phys_dev_ext[new_function_index] = loader_get_phys_dev_ext_termin(new_function_index);
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "loader_phys_dev_ext_gpa: Driver %s returned ptr %p for %s",
icd_term->scanned_icd->lib_name, inst->disp->phys_dev_ext[idx], funcName);
} else {
icd_term->phys_dev_ext[idx] = NULL;
icd_term->scanned_icd->lib_name, inst->disp->phys_dev_ext[new_function_index], funcName);
}
icd_term = icd_term->next;
} else {
icd_term->phys_dev_ext[new_function_index] = NULL;
}
// Now, search for the first layer attached and query using it to get
// the first entry point.
for (i = 0; i < inst->expanded_activated_layer_list.count; i++) {
struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i];
if (layer_prop->interface_version > 1 && NULL != layer_prop->functions.get_physical_device_proc_addr) {
inst->disp->phys_dev_ext[idx] =
(PFN_PhysDevExt)layer_prop->functions.get_physical_device_proc_addr((VkInstance)inst->instance, funcName);
if (NULL != inst->disp->phys_dev_ext[idx]) {
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "loader_phys_dev_ext_gpa: Layer %s returned ptr %p for %s",
layer_prop->info.layerName, inst->disp->phys_dev_ext[idx], funcName);
break;
}
icd_term = icd_term->next;
}
// Now, search for the first layer attached and query using it to get the first entry point.
// Only set the instance dispatch table to it if it isn't NULL.
for (uint32_t i = 0; i < inst->expanded_activated_layer_list.count; i++) {
struct loader_layer_properties *layer_prop = &inst->expanded_activated_layer_list.list[i];
if (layer_prop->interface_version > 1 && NULL != layer_prop->functions.get_physical_device_proc_addr) {
void *layer_ret_function =
(PFN_PhysDevExt)layer_prop->functions.get_physical_device_proc_addr(inst->instance, funcName);
if (NULL != layer_ret_function) {
inst->disp->phys_dev_ext[new_function_index] = layer_ret_function;
loader_log(inst, VULKAN_LOADER_DEBUG_BIT, 0, "loader_phys_dev_ext_gpa: Layer %s returned ptr %p for %s",
layer_prop->info.layerName, inst->disp->phys_dev_ext[new_function_index], funcName);
break;
}
}
}
if (NULL != tramp_addr) {
*tramp_addr = loader_get_phys_dev_ext_tramp(idx);
if (is_tramp) {
return loader_get_phys_dev_ext_tramp(new_function_index);
} else {
return loader_get_phys_dev_ext_termin(new_function_index);
}
}
// Main interface functions, makes it clear whether it is getting a terminator or trampoline
void *loader_phys_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName) {
return loader_phys_dev_ext_gpa_impl(inst, funcName, true);
}
void *loader_phys_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName) {
return loader_phys_dev_ext_gpa_impl(inst, funcName, false);
}
// Returns the terminator if the function is supported in an ICD and if there is an entry for it in
// the function name array. Otherwise return NULL.
void *loader_phys_dev_ext_gpa_term_no_check(struct loader_instance *inst, const char *funcName) {
assert(NULL != inst);
// We should always check to see if any ICD supports it.
if (!loader_check_icds_for_phys_dev_ext_address(inst, funcName)) {
return NULL;
}
if (NULL != term_addr) {
*term_addr = loader_get_phys_dev_ext_termin(idx);
// Linearly look through already added functions to make sure we haven't seen it before
// if we have, return the function at the index found
for (uint32_t i = 0; i < inst->phys_dev_ext_disp_function_count; i++) {
if (inst->phys_dev_ext_disp_functions[i] && !strcmp(inst->phys_dev_ext_disp_functions[i], funcName))
return loader_get_phys_dev_ext_termin(i);
}
success = true;
out:
return success;
return NULL;
}

View File

@ -29,8 +29,9 @@
void loader_init_dispatch_dev_ext(struct loader_instance *inst, struct loader_device *dev);
void *loader_dev_ext_gpa(struct loader_instance *inst, const char *funcName);
bool loader_phys_dev_ext_gpa(struct loader_instance *inst, const char *funcName, bool perform_checking, void **tramp_addr,
void **term_addr);
void *loader_phys_dev_ext_gpa_tramp(struct loader_instance *inst, const char *funcName);
void *loader_phys_dev_ext_gpa_term(struct loader_instance *inst, const char *funcName);
void *loader_phys_dev_ext_gpa_term_no_check(struct loader_instance *inst, const char *funcName);
void loader_free_dev_ext_table(struct loader_instance *inst);
void loader_free_phys_dev_ext_table(struct loader_instance *inst);

View File

@ -768,7 +768,7 @@ class LoaderExtensionOutputGenerator(OutputGenerator):
tables += ' VkDevice dev) {\n'
tables += ' VkLayerDispatchTable *table = &dev_table->core_dispatch;\n'
tables += ' table->magic = DEVICE_DISP_TABLE_MAGIC_NUMBER;\n'
tables += ' for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) dev_table->ext_dispatch.dev_ext[i] = (PFN_vkDevExt)vkDevExtError;\n'
tables += ' for (uint32_t i = 0; i < MAX_NUM_UNKNOWN_EXTS; i++) dev_table->ext_dispatch[i] = (PFN_vkDevExt)vkDevExtError;\n'
elif x == 1:
cur_type = 'device'

View File

@ -151,12 +151,9 @@ VKAPI_ATTR VkResult VKAPI_CALL test_vkCreateInstance(const VkInstanceCreateInfo*
return VK_ERROR_INITIALIZATION_FAILED;
}
layer.next_vkGetInstanceProcAddr = fpGetInstanceProcAddr;
layer.next_GetPhysicalDeviceProcAddr =
reinterpret_cast<PFN_GetPhysicalDeviceProcAddr>(fpGetInstanceProcAddr(*pInstance, "vk_layerGetPhysicalDeviceProcAddr"));
// Advance the link info for the next element of the chain
chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
layer.next_vkGetInstanceProcAddr = fpGetInstanceProcAddr;
// Continue call down the chain
VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
@ -164,6 +161,8 @@ VKAPI_ATTR VkResult VKAPI_CALL test_vkCreateInstance(const VkInstanceCreateInfo*
return result;
}
layer.instance_handle = *pInstance;
layer.next_GetPhysicalDeviceProcAddr =
reinterpret_cast<PFN_GetPhysicalDeviceProcAddr>(fpGetInstanceProcAddr(*pInstance, "vk_layerGetPhysicalDeviceProcAddr"));
// Init layer's dispatch table using GetInstanceProcAddr of
// next layer in the chain.

View File

@ -60,8 +60,8 @@ struct custom_functions {
template <typename FunctionStruct>
void fill_custom_functions(std::vector<VulkanFunction>& driver_function_list, std::vector<std::string>& fake_function_names,
FunctionStruct const& funcs, uint32_t function_count) {
for (uint32_t i = 0; i < function_count;) {
FunctionStruct const& funcs, uint32_t function_count, uint32_t function_start = 0) {
for (uint32_t i = function_start; i < function_start + function_count;) {
fake_function_names.push_back(std::string("vkNotIntRealFuncTEST_") + std::to_string(i++));
driver_function_list.push_back(VulkanFunction{fake_function_names.back(), reinterpret_cast<void*>(funcs.func_zero)});
@ -81,8 +81,8 @@ void fill_custom_functions(std::vector<VulkanFunction>& driver_function_list, st
}
template <typename FunctionLoader, typename ParentType, typename DispatchableHandleType, typename FunctionStruct>
void check_custom_functions(FunctionLoader& loader, ParentType parent, DispatchableHandleType handle, FunctionStruct const& s,
std::vector<std::string>& fake_function_names, uint32_t function_count) {
for (uint32_t i = 0; i < function_count;) {
std::vector<std::string>& fake_function_names, uint32_t function_count, uint32_t function_start = 0) {
for (uint32_t i = function_start; i < function_start + function_count;) {
decltype(FunctionStruct::func_zero)* returned_func_i = loader.load(parent, fake_function_names.at(i++).c_str());
ASSERT_NE(returned_func_i, nullptr);
EXPECT_EQ(returned_func_i(handle, i * 10), i * 10);
@ -128,6 +128,90 @@ TEST_F(UnknownFunction, PhysicalDeviceFunction) {
function_count);
}
TEST_F(UnknownFunction, PhysicalDeviceFunctionMultipleDriverSupport) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
#endif
env->add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6));
auto& driver_0 = env->get_test_icd(0);
auto& driver_1 = env->get_test_icd(1);
std::vector<std::string> fake_function_names;
// used to identify the GPUs
VkPhysicalDeviceProperties props{};
driver_0.physical_devices.emplace_back("physical_device_0");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
driver_0.physical_devices.back().set_properties(props);
driver_1.physical_devices.emplace_back("physical_device_1");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
driver_1.physical_devices.back().set_properties(props);
for (uint32_t i = 0; i < 25; i++) {
fill_custom_functions(driver_0.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10);
fill_custom_functions(driver_1.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10 + 5);
}
InstWrapper inst{env->vulkan_functions};
inst.CheckCreate();
auto phys_devs = inst.GetPhysDevs(2);
VkPhysicalDevice phys_dev_0 = phys_devs[0];
VkPhysicalDevice phys_dev_1 = phys_devs[1];
env->vulkan_functions.vkGetPhysicalDeviceProperties(phys_devs[0], &props);
if (props.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
phys_dev_0 = phys_devs[1];
phys_dev_1 = phys_devs[0];
}
for (uint32_t i = 0; i < 25; i++) {
check_custom_functions(env->vulkan_functions, inst.inst, phys_dev_0, custom_physical_device_functions{},
fake_function_names, 5, i * 10);
check_custom_functions(env->vulkan_functions, inst.inst, phys_dev_1, custom_physical_device_functions{},
fake_function_names, 5, i * 10 + 5);
}
}
// Add unknown functions to driver 0, and try to use them on driver 1.
TEST(UnknownFunctionDeathTests, PhysicalDeviceFunctionErrorPath) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
#endif
FrameworkEnvironment env{};
env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6));
env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6));
auto& driver_0 = env.get_test_icd(0);
auto& driver_1 = env.get_test_icd(1);
std::vector<std::string> fake_function_names;
// used to identify the GPUs
VkPhysicalDeviceProperties props{};
driver_0.physical_devices.emplace_back("physical_device_0");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
driver_0.physical_devices.back().set_properties(props);
driver_1.physical_devices.emplace_back("physical_device_1");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
driver_1.physical_devices.back().set_properties(props);
fake_function_names.push_back(std::string("vkNotIntRealFuncTEST_0"));
custom_physical_device_functions funcs{};
driver_0.custom_physical_device_functions.push_back(
VulkanFunction{fake_function_names.back(), reinterpret_cast<void*>(funcs.func_zero)});
InstWrapper inst{env.vulkan_functions};
inst.CheckCreate();
auto phys_devs = inst.GetPhysDevs(2);
VkPhysicalDevice phys_dev_to_use = phys_devs[1];
env.vulkan_functions.vkGetPhysicalDeviceProperties(phys_devs[1], &props);
if (props.deviceType != VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU) phys_dev_to_use = phys_devs[0];
// use the wrong GPU to query the functions, should get 5 errors
decltype(custom_physical_device_functions::func_zero)* returned_func_i =
env.vulkan_functions.load(inst.inst, fake_function_names.at(0).c_str());
ASSERT_NE(returned_func_i, nullptr);
ASSERT_DEATH(returned_func_i(phys_dev_to_use, 0), "");
}
TEST_F(UnknownFunction, PhysicalDeviceFunctionWithImplicitLayer) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
@ -154,6 +238,55 @@ TEST_F(UnknownFunction, PhysicalDeviceFunctionWithImplicitLayer) {
function_count);
}
TEST_F(UnknownFunction, PhysicalDeviceFunctionMultipleDriverSupportWithImplicitLayer) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
#endif
env->add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_6));
auto& driver_0 = env->get_test_icd(0);
auto& driver_1 = env->get_test_icd(1);
std::vector<std::string> fake_function_names;
// used to identify the GPUs
VkPhysicalDeviceProperties props{};
driver_0.physical_devices.emplace_back("physical_device_0");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
driver_0.physical_devices.back().set_properties(props);
driver_1.physical_devices.emplace_back("physical_device_1");
props.deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
driver_1.physical_devices.back().set_properties(props);
for (uint32_t i = 0; i < 25; i++) {
fill_custom_functions(driver_0.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10);
fill_custom_functions(driver_1.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10 + 5);
}
env->add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
.set_name("VK_LAYER_implicit_layer_unknown_function_intercept")
.set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
.set_disable_environment("DISABLE_ME")),
"implicit_layer_unknown_function_intercept.json");
InstWrapper inst{env->vulkan_functions};
inst.CheckCreate();
auto phys_devs = inst.GetPhysDevs(2);
VkPhysicalDevice phys_dev_0 = phys_devs[0];
VkPhysicalDevice phys_dev_1 = phys_devs[1];
env->vulkan_functions.vkGetPhysicalDeviceProperties(phys_devs[0], &props);
if (props.deviceType != VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU) {
phys_dev_0 = phys_devs[1];
phys_dev_1 = phys_devs[0];
}
for (uint32_t i = 0; i < 25; i++) {
check_custom_functions(env->vulkan_functions, inst.inst, phys_dev_0, custom_physical_device_functions{},
fake_function_names, 5, i * 10);
check_custom_functions(env->vulkan_functions, inst.inst, phys_dev_1, custom_physical_device_functions{},
fake_function_names, 5, i * 10 + 5);
}
}
TEST_F(UnknownFunction, PhysicalDeviceFunctionWithImplicitLayerInterception) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
@ -181,6 +314,41 @@ TEST_F(UnknownFunction, PhysicalDeviceFunctionWithImplicitLayerInterception) {
function_count);
}
TEST_F(UnknownFunction, PhysicalDeviceFunctionWithMultipleImplicitLayersInterception) {
#if defined(__APPLE__)
GTEST_SKIP() << "Skip this test as currently macOS doesn't fully support unknown functions.";
#endif
auto& driver = env->get_test_icd();
std::vector<std::string> fake_function_names;
driver.physical_devices.emplace_back("physical_device_0");
env->add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
.set_name("VK_LAYER_implicit_layer_unknown_function_intercept_0")
.set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
.set_disable_environment("DISABLE_ME")),
"implicit_layer_unknown_function_intercept_0.json");
auto& layer_0 = env->get_test_layer();
env->add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
.set_name("VK_LAYER_implicit_layer_unknown_function_intercept_1")
.set_lib_path(TEST_LAYER_PATH_EXPORT_VERSION_2)
.set_disable_environment("DISABLE_ME")),
"implicit_layer_unknown_function_intercept_1.json");
auto& layer_1 = env->get_test_layer();
for (uint32_t i = 0; i < 25; i++) {
fill_custom_functions(layer_0.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10);
fill_custom_functions(layer_1.custom_physical_device_functions, fake_function_names, custom_physical_device_functions{}, 5,
i * 10 + 5);
}
InstWrapper inst{env->vulkan_functions};
inst.CheckCreate();
VkPhysicalDevice phys_dev = inst.GetPhysDev();
check_custom_functions(env->vulkan_functions, inst.inst, phys_dev, custom_physical_device_functions{}, fake_function_names,
250);
}
using custom_device_functions = custom_functions<VkDevice>;
TEST_F(UnknownFunction, DeviceFunctionFromGetInstanceProcAddr) {