Add files

This commit is contained in:
2025-01-29 10:55:49 +01:00
commit 98fba39c36
1017 changed files with 403715 additions and 0 deletions

View File

@ -0,0 +1,43 @@
#include <kinc/graphics4/compute.h>
#include <kinc/math/core.h>
void kinc_compute_shader_init(kinc_compute_shader_t *shader, void *source, int length) {}
void kinc_compute_shader_destroy(kinc_compute_shader_t *shader) {}
kinc_compute_constant_location_t kinc_compute_shader_get_constant_location(kinc_compute_shader_t *shader, const char *name) {
kinc_compute_constant_location_t location;
location.impl.nothing = 0;
return location;
}
kinc_compute_texture_unit_t kinc_compute_shader_get_texture_unit(kinc_compute_shader_t *shader, const char *name) {
kinc_compute_texture_unit_t unit;
unit.impl.nothing = 0;
return unit;
}
void kinc_compute_set_bool(kinc_compute_constant_location_t location, bool value) {}
void kinc_compute_set_int(kinc_compute_constant_location_t location, int value) {}
void kinc_compute_set_float(kinc_compute_constant_location_t location, float value) {}
void kinc_compute_set_float2(kinc_compute_constant_location_t location, float value1, float value2) {}
void kinc_compute_set_float3(kinc_compute_constant_location_t location, float value1, float value2, float value3) {}
void kinc_compute_set_float4(kinc_compute_constant_location_t location, float value1, float value2, float value3, float value4) {}
void kinc_compute_set_floats(kinc_compute_constant_location_t location, float *values, int count) {}
void kinc_compute_set_matrix4(kinc_compute_constant_location_t location, kinc_matrix4x4_t *value) {}
void kinc_compute_set_matrix3(kinc_compute_constant_location_t location, kinc_matrix3x3_t *value) {}
void kinc_compute_set_texture(kinc_compute_texture_unit_t unit, struct kinc_g4_texture *texture, kinc_compute_access_t access) {}
void kinc_compute_set_render_target(kinc_compute_texture_unit_t unit, struct kinc_g4_render_target *texture, kinc_compute_access_t access) {}
void kinc_compute_set_sampled_texture(kinc_compute_texture_unit_t unit, struct kinc_g4_texture *texture) {}
void kinc_compute_set_sampled_render_target(kinc_compute_texture_unit_t unit, struct kinc_g4_render_target *target) {}
void kinc_compute_set_sampled_depth_from_render_target(kinc_compute_texture_unit_t unit, struct kinc_g4_render_target *target) {}
void kinc_compute_set_texture_addressing(kinc_compute_texture_unit_t unit, kinc_g4_texture_direction_t dir, kinc_g4_texture_addressing_t addressing) {}
void kinc_compute_set_texture_magnification_filter(kinc_compute_texture_unit_t unit, kinc_g4_texture_filter_t filter) {}
void kinc_compute_set_texture_minification_filter(kinc_compute_texture_unit_t unit, kinc_g4_texture_filter_t filter) {}
void kinc_compute_set_texture_mipmap_filter(kinc_compute_texture_unit_t unit, kinc_g4_mipmap_filter_t filter) {}
void kinc_compute_set_texture3d_addressing(kinc_compute_texture_unit_t unit, kinc_g4_texture_direction_t dir, kinc_g4_texture_addressing_t addressing) {}
void kinc_compute_set_texture3d_magnification_filter(kinc_compute_texture_unit_t unit, kinc_g4_texture_filter_t filter) {}
void kinc_compute_set_texture3d_minification_filter(kinc_compute_texture_unit_t unit, kinc_g4_texture_filter_t filter) {}
void kinc_compute_set_texture3d_mipmap_filter(kinc_compute_texture_unit_t unit, kinc_g4_mipmap_filter_t filter) {}
void kinc_compute_set_shader(kinc_compute_shader_t *shader) {}
void kinc_compute(int x, int y, int z) {}

View File

@ -0,0 +1,13 @@
#pragma once
typedef struct {
int nothing;
} kinc_compute_constant_location_impl_t;
typedef struct {
int nothing;
} kinc_compute_texture_unit_impl_t;
typedef struct {
int nothing;
} kinc_compute_shader_impl_t;

View File

@ -0,0 +1,3 @@
#pragma once
#include <vulkan/vulkan_core.h>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,897 @@
#include "kinc/graphics5/sampler.h"
#include "vulkan.h"
#include <kinc/graphics5/commandlist.h>
#include <kinc/graphics5/indexbuffer.h>
#include <kinc/graphics5/pipeline.h>
#include <kinc/graphics5/vertexbuffer.h>
#include <kinc/log.h>
#include <kinc/window.h>
#include <vulkan/vulkan_core.h>
extern kinc_g5_texture_t *vulkanTextures[16];
extern kinc_g5_render_target_t *vulkanRenderTargets[16];
VkDescriptorSet getDescriptorSet(void);
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
void setImageLayout(VkCommandBuffer _buffer, VkImage image, VkImageAspectFlags aspectMask, VkImageLayout oldImageLayout, VkImageLayout newImageLayout);
VkRenderPassBeginInfo currentRenderPassBeginInfo;
VkPipeline currentVulkanPipeline;
kinc_g5_render_target_t *currentRenderTargets[8] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL};
static bool onBackBuffer = false;
static uint32_t lastVertexConstantBufferOffset = 0;
static uint32_t lastFragmentConstantBufferOffset = 0;
static kinc_g5_pipeline_t *currentPipeline = NULL;
static int mrtIndex = 0;
static VkFramebuffer mrtFramebuffer[16];
static VkRenderPass mrtRenderPass[16];
static void endPass(kinc_g5_command_list_t *list) {
vkCmdEndRenderPass(list->impl._buffer);
for (int i = 0; i < 16; ++i) {
vulkanTextures[i] = NULL;
vulkanRenderTargets[i] = NULL;
}
}
static int formatSize(VkFormat format) {
switch (format) {
case VK_FORMAT_R32G32B32A32_SFLOAT:
return 16;
case VK_FORMAT_R16G16B16A16_SFLOAT:
return 8;
case VK_FORMAT_R16_SFLOAT:
return 2;
case VK_FORMAT_R8_UNORM:
return 1;
default:
return 4;
}
}
void set_image_layout(VkImage image, VkImageAspectFlags aspectMask, VkImageLayout old_image_layout, VkImageLayout new_image_layout) {
VkResult err;
if (vk_ctx.setup_cmd == VK_NULL_HANDLE) {
VkCommandBufferAllocateInfo cmd = {0};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = vk_ctx.cmd_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
err = vkAllocateCommandBuffers(vk_ctx.device, &cmd, &vk_ctx.setup_cmd);
assert(!err);
VkCommandBufferBeginInfo cmd_buf_info = {0};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = NULL;
cmd_buf_info.flags = 0;
cmd_buf_info.pInheritanceInfo = NULL;
err = vkBeginCommandBuffer(vk_ctx.setup_cmd, &cmd_buf_info);
assert(!err);
}
VkImageMemoryBarrier image_memory_barrier = {0};
image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_memory_barrier.pNext = NULL;
image_memory_barrier.srcAccessMask = 0;
image_memory_barrier.dstAccessMask = 0;
image_memory_barrier.oldLayout = old_image_layout;
image_memory_barrier.newLayout = new_image_layout;
image_memory_barrier.image = image;
image_memory_barrier.subresourceRange.aspectMask = aspectMask;
image_memory_barrier.subresourceRange.baseMipLevel = 0;
image_memory_barrier.subresourceRange.levelCount = 1;
image_memory_barrier.subresourceRange.baseArrayLayer = 0;
image_memory_barrier.subresourceRange.layerCount = 1;
if (old_image_layout != VK_IMAGE_LAYOUT_UNDEFINED) {
image_memory_barrier.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
image_memory_barrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
// Make sure anything that was copying from this image has completed
image_memory_barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
image_memory_barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
// Make sure any Copy or CPU writes to image are flushed
image_memory_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
}
VkPipelineStageFlags srcStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
if (new_image_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
dstStageMask = VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
}
if (new_image_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
}
vkCmdPipelineBarrier(vk_ctx.setup_cmd, srcStageMask, dstStageMask, 0, 0, NULL, 0, NULL, 1, &image_memory_barrier);
}
void setup_init_cmd() {
if (vk_ctx.setup_cmd == VK_NULL_HANDLE) {
VkCommandBufferAllocateInfo cmd = {0};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = vk_ctx.cmd_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkResult err = vkAllocateCommandBuffers(vk_ctx.device, &cmd, &vk_ctx.setup_cmd);
assert(!err);
VkCommandBufferBeginInfo cmd_buf_info = {0};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = NULL;
cmd_buf_info.flags = 0;
cmd_buf_info.pInheritanceInfo = NULL;
err = vkBeginCommandBuffer(vk_ctx.setup_cmd, &cmd_buf_info);
assert(!err);
}
}
void flush_init_cmd() {
VkResult err;
if (vk_ctx.setup_cmd == VK_NULL_HANDLE)
return;
err = vkEndCommandBuffer(vk_ctx.setup_cmd);
assert(!err);
const VkCommandBuffer cmd_bufs[] = {vk_ctx.setup_cmd};
VkFence nullFence = {VK_NULL_HANDLE};
VkSubmitInfo submit_info = {0};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = cmd_bufs;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
err = vkQueueSubmit(vk_ctx.queue, 1, &submit_info, nullFence);
assert(!err);
err = vkQueueWaitIdle(vk_ctx.queue);
assert(!err);
vkFreeCommandBuffers(vk_ctx.device, vk_ctx.cmd_pool, 1, cmd_bufs);
vk_ctx.setup_cmd = VK_NULL_HANDLE;
}
void set_viewport_and_scissor(kinc_g5_command_list_t *list) {
VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
if (currentRenderTargets[0] == NULL || currentRenderTargets[0]->framebuffer_index >= 0) {
viewport.x = 0;
viewport.y = (float)kinc_window_height(vk_ctx.current_window);
viewport.width = (float)kinc_window_width(vk_ctx.current_window);
viewport.height = -(float)kinc_window_height(vk_ctx.current_window);
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
scissor.extent.width = kinc_window_width(vk_ctx.current_window);
scissor.extent.height = kinc_window_height(vk_ctx.current_window);
scissor.offset.x = 0;
scissor.offset.y = 0;
}
else {
viewport.x = 0;
viewport.y = (float)currentRenderTargets[0]->height;
viewport.width = (float)currentRenderTargets[0]->width;
viewport.height = -(float)currentRenderTargets[0]->height;
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
scissor.extent.width = currentRenderTargets[0]->width;
scissor.extent.height = currentRenderTargets[0]->height;
scissor.offset.x = 0;
scissor.offset.y = 0;
}
vkCmdSetViewport(list->impl._buffer, 0, 1, &viewport);
vkCmdSetScissor(list->impl._buffer, 0, 1, &scissor);
}
void kinc_g5_command_list_init(kinc_g5_command_list_t *list) {
VkCommandBufferAllocateInfo cmd = {0};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = vk_ctx.cmd_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkResult err = vkAllocateCommandBuffers(vk_ctx.device, &cmd, &list->impl._buffer);
assert(!err);
VkFenceCreateInfo fenceInfo = {0};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
err = vkCreateFence(vk_ctx.device, &fenceInfo, NULL, &list->impl.fence);
assert(!err);
list->impl._indexCount = 0;
}
void kinc_g5_command_list_destroy(kinc_g5_command_list_t *list) {
vkFreeCommandBuffers(vk_ctx.device, vk_ctx.cmd_pool, 1, &list->impl._buffer);
vkDestroyFence(vk_ctx.device, list->impl.fence, NULL);
}
void kinc_g5_command_list_begin(kinc_g5_command_list_t *list) {
VkResult err = vkWaitForFences(vk_ctx.device, 1, &list->impl.fence, VK_TRUE, UINT64_MAX);
assert(!err);
vkResetCommandBuffer(list->impl._buffer, 0);
VkCommandBufferBeginInfo cmd_buf_info = {0};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = NULL;
cmd_buf_info.flags = 0;
cmd_buf_info.pInheritanceInfo = NULL;
VkClearValue clear_values[2];
memset(clear_values, 0, sizeof(VkClearValue) * 2);
clear_values[0].color.float32[0] = 0.0f;
clear_values[0].color.float32[1] = 0.0f;
clear_values[0].color.float32[2] = 0.0f;
clear_values[0].color.float32[3] = 1.0f;
if (vk_ctx.windows[vk_ctx.current_window].depth_bits > 0) {
clear_values[1].depthStencil.depth = 1.0;
clear_values[1].depthStencil.stencil = 0;
}
VkRenderPassBeginInfo rp_begin = {0};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.pNext = NULL;
rp_begin.renderPass = vk_ctx.windows[vk_ctx.current_window].framebuffer_render_pass;
rp_begin.framebuffer = vk_ctx.windows[vk_ctx.current_window].framebuffers[vk_ctx.windows[vk_ctx.current_window].current_image];
rp_begin.renderArea.offset.x = 0;
rp_begin.renderArea.offset.y = 0;
rp_begin.renderArea.extent.width = vk_ctx.windows[vk_ctx.current_window].width;
rp_begin.renderArea.extent.height = vk_ctx.windows[vk_ctx.current_window].height;
rp_begin.clearValueCount = vk_ctx.windows[vk_ctx.current_window].depth_bits > 0 ? 2 : 1;
rp_begin.pClearValues = clear_values;
err = vkBeginCommandBuffer(list->impl._buffer, &cmd_buf_info);
assert(!err);
VkImageMemoryBarrier prePresentBarrier = {0};
prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
prePresentBarrier.pNext = NULL;
prePresentBarrier.srcAccessMask = 0;
prePresentBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
prePresentBarrier.subresourceRange.baseMipLevel = 0;
prePresentBarrier.subresourceRange.levelCount = 1;
prePresentBarrier.subresourceRange.baseArrayLayer = 0;
prePresentBarrier.subresourceRange.layerCount = 1;
prePresentBarrier.image = vk_ctx.windows[vk_ctx.current_window].images[vk_ctx.windows[vk_ctx.current_window].current_image];
VkImageMemoryBarrier *pmemory_barrier = &prePresentBarrier;
vkCmdPipelineBarrier(list->impl._buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0, NULL, 0, NULL, 1,
pmemory_barrier);
vkCmdBeginRenderPass(list->impl._buffer, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
currentRenderPassBeginInfo = rp_begin;
set_viewport_and_scissor(list);
onBackBuffer = true;
for (int i = 0; i < mrtIndex; ++i) {
vkDestroyFramebuffer(vk_ctx.device, mrtFramebuffer[i], NULL);
vkDestroyRenderPass(vk_ctx.device, mrtRenderPass[i], NULL);
}
mrtIndex = 0;
}
void kinc_g5_command_list_end(kinc_g5_command_list_t *list) {
vkCmdEndRenderPass(list->impl._buffer);
VkImageMemoryBarrier prePresentBarrier = {0};
prePresentBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
prePresentBarrier.pNext = NULL;
prePresentBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
prePresentBarrier.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
prePresentBarrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
prePresentBarrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
prePresentBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
prePresentBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
prePresentBarrier.subresourceRange.baseMipLevel = 0;
prePresentBarrier.subresourceRange.levelCount = 1;
prePresentBarrier.subresourceRange.baseArrayLayer = 0;
prePresentBarrier.subresourceRange.layerCount = 1;
prePresentBarrier.image = vk_ctx.windows[vk_ctx.current_window].images[vk_ctx.windows[vk_ctx.current_window].current_image];
VkImageMemoryBarrier *pmemory_barrier = &prePresentBarrier;
vkCmdPipelineBarrier(list->impl._buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, 0, 0, NULL, 0, NULL, 1, pmemory_barrier);
VkResult err = vkEndCommandBuffer(list->impl._buffer);
assert(!err);
}
void kinc_g5_command_list_clear(kinc_g5_command_list_t *list, struct kinc_g5_render_target *renderTarget, unsigned flags, unsigned color, float depth,
int stencil) {
VkClearRect clearRect = {0};
clearRect.rect.offset.x = 0;
clearRect.rect.offset.y = 0;
clearRect.rect.extent.width = renderTarget->width;
clearRect.rect.extent.height = renderTarget->height;
clearRect.baseArrayLayer = 0;
clearRect.layerCount = 1;
int count = 0;
VkClearAttachment attachments[2];
if (flags & KINC_G5_CLEAR_COLOR) {
VkClearColorValue clearColor = {0};
clearColor.float32[0] = ((color & 0x00ff0000) >> 16) / 255.0f;
clearColor.float32[1] = ((color & 0x0000ff00) >> 8) / 255.0f;
clearColor.float32[2] = (color & 0x000000ff) / 255.0f;
clearColor.float32[3] = ((color & 0xff000000) >> 24) / 255.0f;
attachments[count].aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
attachments[count].colorAttachment = 0;
attachments[count].clearValue.color = clearColor;
count++;
}
if (((flags & KINC_G5_CLEAR_DEPTH) || (flags & KINC_G5_CLEAR_STENCIL)) && renderTarget->impl.depthBufferBits > 0) {
attachments[count].aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // | VK_IMAGE_ASPECT_STENCIL_BIT;
attachments[count].clearValue.depthStencil.depth = depth;
attachments[count].clearValue.depthStencil.stencil = stencil;
count++;
}
vkCmdClearAttachments(list->impl._buffer, count, attachments, 1, &clearRect);
}
void kinc_g5_command_list_render_target_to_framebuffer_barrier(kinc_g5_command_list_t *list, struct kinc_g5_render_target *renderTarget) {}
void kinc_g5_command_list_framebuffer_to_render_target_barrier(kinc_g5_command_list_t *list, struct kinc_g5_render_target *renderTarget) {}
void kinc_g5_command_list_draw_indexed_vertices(kinc_g5_command_list_t *list) {
kinc_g5_command_list_draw_indexed_vertices_from_to(list, 0, list->impl._indexCount);
}
void kinc_g5_command_list_draw_indexed_vertices_from_to(kinc_g5_command_list_t *list, int start, int count) {
vkCmdDrawIndexed(list->impl._buffer, count, 1, start, 0, 0);
}
void kinc_g5_command_list_draw_indexed_vertices_from_to_from(kinc_g5_command_list_t *list, int start, int count, int vertex_offset) {
vkCmdDrawIndexed(list->impl._buffer, count, 1, start, vertex_offset, 0);
}
void kinc_g5_command_list_draw_indexed_vertices_instanced(kinc_g5_command_list_t *list, int instanceCount) {
kinc_g5_command_list_draw_indexed_vertices_instanced_from_to(list, instanceCount, 0, list->impl._indexCount);
}
void kinc_g5_command_list_draw_indexed_vertices_instanced_from_to(kinc_g5_command_list_t *list, int instanceCount, int start, int count) {
vkCmdDrawIndexed(list->impl._buffer, count, instanceCount, start, 0, 0);
}
void kinc_g5_command_list_viewport(kinc_g5_command_list_t *list, int x, int y, int width, int height) {
VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
viewport.x = (float)x;
viewport.y = y + (float)height;
viewport.width = (float)width;
viewport.height = (float)-height;
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
vkCmdSetViewport(list->impl._buffer, 0, 1, &viewport);
}
void kinc_g5_command_list_scissor(kinc_g5_command_list_t *list, int x, int y, int width, int height) {
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = width;
scissor.extent.height = height;
scissor.offset.x = x;
scissor.offset.y = y;
vkCmdSetScissor(list->impl._buffer, 0, 1, &scissor);
}
void kinc_g5_command_list_disable_scissor(kinc_g5_command_list_t *list) {
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
if (currentRenderTargets[0] == NULL || currentRenderTargets[0]->framebuffer_index >= 0) {
scissor.extent.width = kinc_window_width(vk_ctx.current_window);
scissor.extent.height = kinc_window_height(vk_ctx.current_window);
}
else {
scissor.extent.width = currentRenderTargets[0]->width;
scissor.extent.height = currentRenderTargets[0]->height;
}
vkCmdSetScissor(list->impl._buffer, 0, 1, &scissor);
}
void kinc_g5_command_list_set_pipeline(kinc_g5_command_list_t *list, struct kinc_g5_pipeline *pipeline) {
currentPipeline = pipeline;
lastVertexConstantBufferOffset = 0;
lastFragmentConstantBufferOffset = 0;
if (onBackBuffer) {
currentVulkanPipeline = currentPipeline->impl.framebuffer_pipeline;
vkCmdBindPipeline(list->impl._buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, currentPipeline->impl.framebuffer_pipeline);
}
else {
currentVulkanPipeline = currentPipeline->impl.rendertarget_pipeline;
vkCmdBindPipeline(list->impl._buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, currentPipeline->impl.rendertarget_pipeline);
}
}
void kinc_g5_command_list_set_blend_constant(kinc_g5_command_list_t *list, float r, float g, float b, float a) {
const float blendConstants[4] = {r, g, b, a};
vkCmdSetBlendConstants(list->impl._buffer, blendConstants);
}
void kinc_g5_command_list_set_vertex_buffers(kinc_g5_command_list_t *list, struct kinc_g5_vertex_buffer **vertexBuffers, int *offsets_, int count) {
// this seems to be a no-op function?
// kinc_g5_internal_vertex_buffer_set(vertexBuffers[0], 0);
#ifdef KORE_WINDOWS
VkBuffer *buffers = (VkBuffer *)alloca(sizeof(VkBuffer) * count);
VkDeviceSize *offsets = (VkDeviceSize *)alloca(sizeof(VkDeviceSize) * count);
#else
VkBuffer buffers[count];
VkDeviceSize offsets[count];
#endif
for (int i = 0; i < count; ++i) {
buffers[i] = vertexBuffers[i]->impl.vertices.buf;
offsets[i] = (VkDeviceSize)(offsets_[i] * kinc_g5_vertex_buffer_stride(vertexBuffers[i]));
}
vkCmdBindVertexBuffers(list->impl._buffer, 0, count, buffers, offsets);
}
void kinc_g5_command_list_set_index_buffer(kinc_g5_command_list_t *list, struct kinc_g5_index_buffer *indexBuffer) {
list->impl._indexCount = kinc_g5_index_buffer_count(indexBuffer);
vkCmdBindIndexBuffer(list->impl._buffer, indexBuffer->impl.buf, 0,
indexBuffer->impl.format == KINC_G5_INDEX_BUFFER_FORMAT_16BIT ? VK_INDEX_TYPE_UINT16 : VK_INDEX_TYPE_UINT32);
}
void kinc_internal_restore_render_target(kinc_g5_command_list_t *list, struct kinc_g5_render_target *target) {
VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
viewport.x = 0;
viewport.y = (float)kinc_window_height(vk_ctx.current_window);
viewport.width = (float)kinc_window_width(vk_ctx.current_window);
viewport.height = -(float)kinc_window_height(vk_ctx.current_window);
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
vkCmdSetViewport(list->impl._buffer, 0, 1, &viewport);
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = kinc_window_width(vk_ctx.current_window);
scissor.extent.height = kinc_window_height(vk_ctx.current_window);
scissor.offset.x = 0;
scissor.offset.y = 0;
vkCmdSetScissor(list->impl._buffer, 0, 1, &scissor);
if (onBackBuffer) {
return;
}
endPass(list);
currentRenderTargets[0] = NULL;
onBackBuffer = true;
VkClearValue clear_values[2];
memset(clear_values, 0, sizeof(VkClearValue) * 2);
clear_values[0].color.float32[0] = 0.0f;
clear_values[0].color.float32[1] = 0.0f;
clear_values[0].color.float32[2] = 0.0f;
clear_values[0].color.float32[3] = 1.0f;
clear_values[1].depthStencil.depth = 1.0;
clear_values[1].depthStencil.stencil = 0;
VkRenderPassBeginInfo rp_begin = {0};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.pNext = NULL;
rp_begin.renderPass = vk_ctx.windows[vk_ctx.current_window].framebuffer_render_pass;
rp_begin.framebuffer = vk_ctx.windows[vk_ctx.current_window].framebuffers[vk_ctx.windows[vk_ctx.current_window].current_image];
rp_begin.renderArea.offset.x = 0;
rp_begin.renderArea.offset.y = 0;
rp_begin.renderArea.extent.width = kinc_window_width(vk_ctx.current_window);
rp_begin.renderArea.extent.height = kinc_window_height(vk_ctx.current_window);
rp_begin.clearValueCount = 2;
rp_begin.pClearValues = clear_values;
vkCmdBeginRenderPass(list->impl._buffer, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
currentRenderPassBeginInfo = rp_begin;
if (currentPipeline != NULL) {
currentVulkanPipeline = currentPipeline->impl.framebuffer_pipeline;
vkCmdBindPipeline(list->impl._buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, currentPipeline->impl.framebuffer_pipeline);
}
}
void kinc_g5_command_list_set_render_targets(kinc_g5_command_list_t *list, struct kinc_g5_render_target **targets, int count) {
if (targets[0]->framebuffer_index >= 0) {
kinc_internal_restore_render_target(list, targets[0]);
return;
}
endPass(list);
for (int i = 0; i < count; ++i) {
currentRenderTargets[i] = targets[i];
}
for (int i = count; i < 8; ++i) {
currentRenderTargets[i] = NULL;
}
onBackBuffer = false;
VkClearValue clear_values[9];
memset(clear_values, 0, sizeof(VkClearValue));
for (int i = 0; i < count; ++i) {
clear_values[i].color.float32[0] = 0.0f;
clear_values[i].color.float32[1] = 0.0f;
clear_values[i].color.float32[2] = 0.0f;
clear_values[i].color.float32[3] = 1.0f;
}
clear_values[count].depthStencil.depth = 1.0;
clear_values[count].depthStencil.stencil = 0;
VkRenderPassBeginInfo rp_begin = {0};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.pNext = NULL;
rp_begin.renderArea.offset.x = 0;
rp_begin.renderArea.offset.y = 0;
rp_begin.renderArea.extent.width = targets[0]->width;
rp_begin.renderArea.extent.height = targets[0]->height;
rp_begin.clearValueCount = count + 1;
rp_begin.pClearValues = clear_values;
if (count == 1) {
if (targets[0]->impl.depthBufferBits > 0) {
rp_begin.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass_with_depth;
}
else {
rp_begin.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass;
}
rp_begin.framebuffer = targets[0]->impl.framebuffer;
}
else {
VkAttachmentDescription attachments[9];
for (int i = 0; i < count; ++i) {
attachments[i].format = targets[i]->impl.format;
attachments[i].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[i].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[i].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attachments[i].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[i].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[i].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachments[i].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
attachments[i].flags = 0;
}
if (targets[0]->impl.depthBufferBits > 0) {
attachments[count].format = VK_FORMAT_D16_UNORM;
attachments[count].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[count].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[count].storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attachments[count].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[count].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[count].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachments[count].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachments[count].flags = 0;
}
VkAttachmentReference color_references[8];
for (int i = 0; i < count; ++i) {
color_references[i].attachment = i;
color_references[i].layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
VkAttachmentReference depth_reference = {0};
depth_reference.attachment = count;
depth_reference.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {0};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.flags = 0;
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = NULL;
subpass.colorAttachmentCount = count;
subpass.pColorAttachments = color_references;
subpass.pResolveAttachments = NULL;
subpass.pDepthStencilAttachment = targets[0]->impl.depthBufferBits > 0 ? &depth_reference : NULL;
subpass.preserveAttachmentCount = 0;
subpass.pPreserveAttachments = NULL;
VkSubpassDependency dependencies[2];
memset(&dependencies, 0, sizeof(dependencies));
// TODO: For multi-targets-rendering
dependencies[0].srcSubpass = VK_SUBPASS_EXTERNAL;
dependencies[0].dstSubpass = 0;
dependencies[0].srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
dependencies[0].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependencies[0].srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
dependencies[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dependencies[0].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
dependencies[1].srcSubpass = 0;
dependencies[1].dstSubpass = VK_SUBPASS_EXTERNAL;
dependencies[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
dependencies[1].dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
dependencies[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
dependencies[1].dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
dependencies[1].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT;
VkRenderPassCreateInfo rp_info = {0};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.pNext = NULL;
rp_info.attachmentCount = targets[0]->impl.depthBufferBits > 0 ? count + 1 : count;
rp_info.pAttachments = attachments;
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
rp_info.dependencyCount = 2;
rp_info.pDependencies = dependencies;
VkResult err = vkCreateRenderPass(vk_ctx.device, &rp_info, NULL, &mrtRenderPass[mrtIndex]);
assert(!err);
VkImageView attachmentsViews[9];
for (int i = 0; i < count; ++i) {
attachmentsViews[i] = targets[i]->impl.sourceView;
}
if (targets[0]->impl.depthBufferBits > 0) {
attachmentsViews[count] = targets[0]->impl.depthView;
}
VkFramebufferCreateInfo fbufCreateInfo = {0};
fbufCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fbufCreateInfo.pNext = NULL;
fbufCreateInfo.renderPass = mrtRenderPass[mrtIndex];
fbufCreateInfo.attachmentCount = targets[0]->impl.depthBufferBits > 0 ? count + 1 : count;
fbufCreateInfo.pAttachments = attachmentsViews;
fbufCreateInfo.width = targets[0]->width;
fbufCreateInfo.height = targets[0]->height;
fbufCreateInfo.layers = 1;
err = vkCreateFramebuffer(vk_ctx.device, &fbufCreateInfo, NULL, &mrtFramebuffer[mrtIndex]);
assert(!err);
rp_begin.renderPass = mrtRenderPass[mrtIndex];
rp_begin.framebuffer = mrtFramebuffer[mrtIndex];
mrtIndex++;
}
vkCmdBeginRenderPass(list->impl._buffer, &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
currentRenderPassBeginInfo = rp_begin;
VkViewport viewport;
memset(&viewport, 0, sizeof(viewport));
viewport.x = 0;
viewport.y = (float)targets[0]->height;
viewport.width = (float)targets[0]->width;
viewport.height = -(float)targets[0]->height;
viewport.minDepth = (float)0.0f;
viewport.maxDepth = (float)1.0f;
vkCmdSetViewport(list->impl._buffer, 0, 1, &viewport);
VkRect2D scissor;
memset(&scissor, 0, sizeof(scissor));
scissor.extent.width = targets[0]->width;
scissor.extent.height = targets[0]->height;
scissor.offset.x = 0;
scissor.offset.y = 0;
vkCmdSetScissor(list->impl._buffer, 0, 1, &scissor);
if (currentPipeline != NULL) {
currentVulkanPipeline = currentPipeline->impl.rendertarget_pipeline;
vkCmdBindPipeline(list->impl._buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, currentPipeline->impl.rendertarget_pipeline);
}
}
void kinc_g5_command_list_upload_index_buffer(kinc_g5_command_list_t *list, struct kinc_g5_index_buffer *buffer) {}
void kinc_g5_command_list_upload_vertex_buffer(kinc_g5_command_list_t *list, struct kinc_g5_vertex_buffer *buffer) {}
void kinc_g5_command_list_upload_texture(kinc_g5_command_list_t *list, struct kinc_g5_texture *texture) {}
void kinc_g5_command_list_get_render_target_pixels(kinc_g5_command_list_t *list, kinc_g5_render_target_t *render_target, uint8_t *data) {
VkFormat format = render_target->impl.format;
int formatByteSize = formatSize(format);
// Create readback buffer
if (!render_target->impl.readbackBufferCreated) {
VkBufferCreateInfo buf_info = {0};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.size = render_target->width * render_target->height * formatByteSize;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.flags = 0;
vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &render_target->impl.readbackBuffer);
VkMemoryRequirements mem_reqs = {0};
vkGetBufferMemoryRequirements(vk_ctx.device, render_target->impl.readbackBuffer, &mem_reqs);
VkMemoryAllocateInfo mem_alloc;
memset(&mem_alloc, 0, sizeof(VkMemoryAllocateInfo));
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
mem_alloc.allocationSize = mem_reqs.size;
memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &mem_alloc.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &mem_alloc, NULL, &render_target->impl.readbackMemory);
vkBindBufferMemory(vk_ctx.device, render_target->impl.readbackBuffer, render_target->impl.readbackMemory, 0);
render_target->impl.readbackBufferCreated = true;
}
vkCmdEndRenderPass(list->impl._buffer);
setImageLayout(list->impl._buffer, render_target->impl.sourceImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
VkBufferImageCopy region;
region.bufferOffset = 0;
region.bufferRowLength = render_target->width;
region.bufferImageHeight = render_target->height;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageSubresource.mipLevel = 0;
region.imageOffset.x = 0;
region.imageOffset.y = 0;
region.imageOffset.z = 0;
region.imageExtent.width = (uint32_t)render_target->width;
region.imageExtent.height = (uint32_t)render_target->height;
region.imageExtent.depth = 1;
vkCmdCopyImageToBuffer(list->impl._buffer, render_target->impl.sourceImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, render_target->impl.readbackBuffer, 1,
&region);
setImageLayout(list->impl._buffer, render_target->impl.sourceImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
vkCmdBeginRenderPass(list->impl._buffer, &currentRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
kinc_g5_command_list_end(list);
kinc_g5_command_list_execute(list);
kinc_g5_command_list_wait_for_execution_to_finish(list);
kinc_g5_command_list_begin(list);
// Read buffer
void *p;
vkMapMemory(vk_ctx.device, render_target->impl.readbackMemory, 0, VK_WHOLE_SIZE, 0, (void **)&p);
memcpy(data, p, render_target->texWidth * render_target->texHeight * formatByteSize);
vkUnmapMemory(vk_ctx.device, render_target->impl.readbackMemory);
}
void kinc_g5_command_list_texture_to_render_target_barrier(kinc_g5_command_list_t *list, struct kinc_g5_render_target *renderTarget) {
// render-passes are used to transition render-targets
}
void kinc_g5_command_list_render_target_to_texture_barrier(kinc_g5_command_list_t *list, struct kinc_g5_render_target *renderTarget) {
// render-passes are used to transition render-targets
}
void kinc_g5_command_list_set_vertex_constant_buffer(kinc_g5_command_list_t *list, struct kinc_g5_constant_buffer *buffer, int offset, size_t size) {
lastVertexConstantBufferOffset = offset;
}
void kinc_g5_command_list_set_fragment_constant_buffer(kinc_g5_command_list_t *list, struct kinc_g5_constant_buffer *buffer, int offset, size_t size) {
lastFragmentConstantBufferOffset = offset;
VkDescriptorSet descriptor_set = getDescriptorSet();
uint32_t offsets[2] = {lastVertexConstantBufferOffset, lastFragmentConstantBufferOffset};
vkCmdBindDescriptorSets(list->impl._buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, currentPipeline->impl.pipeline_layout, 0, 1, &descriptor_set, 2, offsets);
}
static bool wait_for_framebuffer = false;
static void command_list_should_wait_for_framebuffer(void) {
wait_for_framebuffer = true;
}
void kinc_g5_command_list_execute(kinc_g5_command_list_t *list) {
// Make sure the previous execution is done, so we can reuse the fence
// Not optimal of course
VkResult err = vkWaitForFences(vk_ctx.device, 1, &list->impl.fence, VK_TRUE, UINT64_MAX);
assert(!err);
vkResetFences(vk_ctx.device, 1, &list->impl.fence);
VkPipelineStageFlags pipe_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkSubmitInfo submit_info = {0};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
VkSemaphore semaphores[2] = {
framebuffer_available,
relay_semaphore
};
VkPipelineStageFlags dst_stage_flags[2] = {
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
};
if (wait_for_framebuffer) {
submit_info.pWaitSemaphores = semaphores;
submit_info.pWaitDstStageMask = dst_stage_flags;
submit_info.waitSemaphoreCount = wait_for_relay ? 2 : 1;
wait_for_framebuffer = false;
}
else if(wait_for_relay) {
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphores[1];
submit_info.pWaitDstStageMask = &dst_stage_flags[1];
}
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &list->impl._buffer;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &relay_semaphore;
wait_for_relay = true;
err = vkQueueSubmit(vk_ctx.queue, 1, &submit_info, list->impl.fence);
assert(!err);
}
void kinc_g5_command_list_wait_for_execution_to_finish(kinc_g5_command_list_t *list) {
VkResult err = vkWaitForFences(vk_ctx.device, 1, &list->impl.fence, VK_TRUE, UINT64_MAX);
assert(!err);
}
void kinc_g5_command_list_set_texture(kinc_g5_command_list_t *list, kinc_g5_texture_unit_t unit, kinc_g5_texture_t *texture) {
vulkanTextures[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = texture;
vulkanRenderTargets[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = NULL;
}
void kinc_g5_command_list_set_sampler(kinc_g5_command_list_t *list, kinc_g5_texture_unit_t unit, kinc_g5_sampler_t *sampler) {
vulkanSamplers[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = sampler->impl.sampler;
}
void kinc_g5_command_list_set_image_texture(kinc_g5_command_list_t *list, kinc_g5_texture_unit_t unit, kinc_g5_texture_t *texture) {}
void kinc_g5_command_list_set_render_target_face(kinc_g5_command_list_t *list, kinc_g5_render_target_t *texture, int face) {}
bool kinc_g5_command_list_init_occlusion_query(kinc_g5_command_list_t *list, unsigned *occlusionQuery) {
return false;
}
void kinc_g5_command_list_delete_occlusion_query(kinc_g5_command_list_t *list, unsigned occlusionQuery) {}
void kinc_g5_command_list_render_occlusion_query(kinc_g5_command_list_t *list, unsigned occlusionQuery, int triangles) {}
bool kinc_g5_command_list_are_query_results_available(kinc_g5_command_list_t *list, unsigned occlusionQuery) {
return false;
}
void kinc_g5_command_list_get_query_result(kinc_g5_command_list_t *list, unsigned occlusionQuery, unsigned *pixelCount) {}
void kinc_g5_command_list_set_texture_from_render_target(kinc_g5_command_list_t *list, kinc_g5_texture_unit_t unit, kinc_g5_render_target_t *target) {
target->impl.stage = unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT];
vulkanRenderTargets[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = target;
vulkanTextures[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = NULL;
}
void kinc_g5_command_list_set_texture_from_render_target_depth(kinc_g5_command_list_t *list, kinc_g5_texture_unit_t unit, kinc_g5_render_target_t *target) {
target->impl.stage_depth = unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT];
vulkanRenderTargets[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = target;
vulkanTextures[unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT]] = NULL;
}

View File

@ -0,0 +1,9 @@
#pragma once
#include "MiniVulkan.h"
typedef struct {
int _indexCount;
VkCommandBuffer _buffer;
VkFence fence;
} CommandList5Impl;

View File

@ -0,0 +1,86 @@
#include "vulkan.h"
#include <kinc/graphics5/constantbuffer.h>
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
// VkBuffer *vk_ctx.vertex_uniform_buffer = NULL;
// VkBuffer *vk_ctx.fragment_uniform_buffer = NULL;
bool kinc_g5_transposeMat3 = true;
bool kinc_g5_transposeMat4 = true;
static void createUniformBuffer(VkBuffer *buf, VkMemoryAllocateInfo *mem_alloc, VkDeviceMemory *mem, VkDescriptorBufferInfo *buffer_info, int size) {
VkBufferCreateInfo buf_info;
memset(&buf_info, 0, sizeof(buf_info));
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
buf_info.size = size;
VkResult err = vkCreateBuffer(vk_ctx.device, &buf_info, NULL, buf);
assert(!err);
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(vk_ctx.device, *buf, &mem_reqs);
mem_alloc->sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc->pNext = NULL;
mem_alloc->allocationSize = mem_reqs.size;
mem_alloc->memoryTypeIndex = 0;
bool pass = memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &mem_alloc->memoryTypeIndex);
assert(pass);
err = vkAllocateMemory(vk_ctx.device, mem_alloc, NULL, mem);
assert(!err);
err = vkBindBufferMemory(vk_ctx.device, *buf, *mem, 0);
assert(!err);
buffer_info->buffer = *buf;
buffer_info->offset = 0;
buffer_info->range = size;
}
void kinc_g5_constant_buffer_init(kinc_g5_constant_buffer_t *buffer, int size) {
buffer->impl.mySize = size;
buffer->data = NULL;
createUniformBuffer(&buffer->impl.buf, &buffer->impl.mem_alloc, &buffer->impl.mem, &buffer->impl.buffer_info, size);
// buffer hack
if (vk_ctx.vertex_uniform_buffer == NULL) {
vk_ctx.vertex_uniform_buffer = &buffer->impl.buf;
}
else if (vk_ctx.fragment_uniform_buffer == NULL) {
vk_ctx.fragment_uniform_buffer = &buffer->impl.buf;
}
void *p;
VkResult err = vkMapMemory(vk_ctx.device, buffer->impl.mem, 0, buffer->impl.mem_alloc.allocationSize, 0, (void **)&p);
assert(!err);
memset(p, 0, buffer->impl.mem_alloc.allocationSize);
vkUnmapMemory(vk_ctx.device, buffer->impl.mem);
}
void kinc_g5_constant_buffer_destroy(kinc_g5_constant_buffer_t *buffer) {
vkFreeMemory(vk_ctx.device, buffer->impl.mem, NULL);
vkDestroyBuffer(vk_ctx.device, buffer->impl.buf, NULL);
}
void kinc_g5_constant_buffer_lock_all(kinc_g5_constant_buffer_t *buffer) {
kinc_g5_constant_buffer_lock(buffer, 0, kinc_g5_constant_buffer_size(buffer));
}
void kinc_g5_constant_buffer_lock(kinc_g5_constant_buffer_t *buffer, int start, int count) {
VkResult err = vkMapMemory(vk_ctx.device, buffer->impl.mem, start, count, 0, (void **)&buffer->data);
assert(!err);
}
void kinc_g5_constant_buffer_unlock(kinc_g5_constant_buffer_t *buffer) {
vkUnmapMemory(vk_ctx.device, buffer->impl.mem);
buffer->data = NULL;
}
int kinc_g5_constant_buffer_size(kinc_g5_constant_buffer_t *buffer) {
return buffer->impl.mySize;
}

View File

@ -0,0 +1,13 @@
#pragma once
#include "MiniVulkan.h"
typedef struct {
VkBuffer buf;
VkDescriptorBufferInfo buffer_info;
VkMemoryAllocateInfo mem_alloc;
VkDeviceMemory mem;
int lastStart;
int lastCount;
int mySize;
} ConstantBuffer5Impl;

View File

@ -0,0 +1,6 @@
#pragma once
#include <kinc/backend/graphics5/indexbuffer.h>
#include <kinc/backend/graphics5/rendertarget.h>
#include <kinc/backend/graphics5/texture.h>
#include <kinc/backend/graphics5/vertexbuffer.h>

View File

@ -0,0 +1,93 @@
#include "vulkan.h"
#include <kinc/graphics5/indexbuffer.h>
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
kinc_g5_index_buffer_t *currentIndexBuffer = NULL;
static void unset(kinc_g5_index_buffer_t *buffer) {
if (currentIndexBuffer == buffer) {
currentIndexBuffer = NULL;
}
}
void kinc_g5_index_buffer_init(kinc_g5_index_buffer_t *buffer, int indexCount, kinc_g5_index_buffer_format_t format, bool gpuMemory) {
buffer->impl.count = indexCount;
buffer->impl.format = format;
VkBufferCreateInfo buf_info = {0};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.size = format == KINC_G5_INDEX_BUFFER_FORMAT_16BIT ? indexCount * sizeof(uint16_t) : indexCount * sizeof(uint32_t);
buf_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
#ifdef KORE_VKRT
buf_info.usage |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
#endif
buf_info.flags = 0;
memset(&buffer->impl.mem_alloc, 0, sizeof(VkMemoryAllocateInfo));
buffer->impl.mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer->impl.mem_alloc.pNext = NULL;
buffer->impl.mem_alloc.allocationSize = 0;
buffer->impl.mem_alloc.memoryTypeIndex = 0;
buffer->impl.buf = NULL;
buffer->impl.mem = NULL;
VkResult err = vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &buffer->impl.buf);
assert(!err);
VkMemoryRequirements mem_reqs = {0};
vkGetBufferMemoryRequirements(vk_ctx.device, buffer->impl.buf, &mem_reqs);
buffer->impl.mem_alloc.allocationSize = mem_reqs.size;
bool pass = memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &buffer->impl.mem_alloc.memoryTypeIndex);
assert(pass);
#ifdef KORE_VKRT
VkMemoryAllocateFlagsInfo memory_allocate_flags_info = {0};
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
buffer->impl.mem_alloc.pNext = &memory_allocate_flags_info;
#endif
err = vkAllocateMemory(vk_ctx.device, &buffer->impl.mem_alloc, NULL, &buffer->impl.mem);
assert(!err);
err = vkBindBufferMemory(vk_ctx.device, buffer->impl.buf, buffer->impl.mem, 0);
assert(!err);
}
void kinc_g5_index_buffer_destroy(kinc_g5_index_buffer_t *buffer) {
unset(buffer);
vkFreeMemory(vk_ctx.device, buffer->impl.mem, NULL);
vkDestroyBuffer(vk_ctx.device, buffer->impl.buf, NULL);
}
static int kinc_g5_internal_index_buffer_stride(kinc_g5_index_buffer_t *buffer) {
return buffer->impl.format == KINC_G5_INDEX_BUFFER_FORMAT_16BIT ? 2 : 4;
}
void *kinc_g5_index_buffer_lock_all(kinc_g5_index_buffer_t *buffer) {
return kinc_g5_index_buffer_lock(buffer, 0, kinc_g5_index_buffer_count(buffer));
}
void *kinc_g5_index_buffer_lock(kinc_g5_index_buffer_t *buffer, int start, int count) {
uint8_t *data;
VkResult err = vkMapMemory(vk_ctx.device, buffer->impl.mem, 0, buffer->impl.mem_alloc.allocationSize, 0, (void **)&data);
assert(!err);
return &data[start * kinc_g5_internal_index_buffer_stride(buffer)];
}
void kinc_g5_index_buffer_unlock_all(kinc_g5_index_buffer_t *buffer) {
vkUnmapMemory(vk_ctx.device, buffer->impl.mem);
}
void kinc_g5_index_buffer_unlock(kinc_g5_index_buffer_t *buffer, int count) {
kinc_g5_index_buffer_unlock_all(buffer);
}
int kinc_g5_index_buffer_count(kinc_g5_index_buffer_t *buffer) {
return buffer->impl.count;
}

View File

@ -0,0 +1,12 @@
#pragma once
#include "MiniVulkan.h"
typedef struct {
int count;
int format;
VkBuffer buf;
VkDeviceMemory mem;
VkMemoryAllocateInfo mem_alloc;
} IndexBuffer5Impl;

View File

@ -0,0 +1,948 @@
#include "vulkan.h"
#include <kinc/graphics5/pipeline.h>
#include <kinc/graphics5/shader.h>
#include <vulkan/vulkan_core.h>
#include <assert.h>
VkDescriptorSetLayout desc_layout;
extern kinc_g5_texture_t *vulkanTextures[16];
extern kinc_g5_render_target_t *vulkanRenderTargets[16];
extern uint32_t swapchainImageCount;
extern uint32_t current_buffer;
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
static VkDescriptorPool descriptor_pool;
static bool has_number(kinc_internal_named_number *named_numbers, const char *name) {
for (int i = 0; i < KINC_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
if (strcmp(named_numbers[i].name, name) == 0) {
return true;
}
}
return false;
}
static uint32_t find_number(kinc_internal_named_number *named_numbers, const char *name) {
for (int i = 0; i < KINC_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
if (strcmp(named_numbers[i].name, name) == 0) {
return named_numbers[i].number;
}
}
return -1;
}
static void set_number(kinc_internal_named_number *named_numbers, const char *name, uint32_t number) {
for (int i = 0; i < KINC_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
if (strcmp(named_numbers[i].name, name) == 0) {
named_numbers[i].number = number;
return;
}
}
for (int i = 0; i < KINC_INTERNAL_NAMED_NUMBER_COUNT; ++i) {
if (named_numbers[i].name[0] == 0) {
strcpy(named_numbers[i].name, name);
named_numbers[i].number = number;
return;
}
}
assert(false);
}
struct indexed_name {
uint32_t id;
char *name;
};
struct indexed_index {
uint32_t id;
uint32_t value;
};
#define MAX_THINGS 256
static struct indexed_name names[MAX_THINGS];
static uint32_t names_size = 0;
static struct indexed_name memberNames[MAX_THINGS];
static uint32_t memberNames_size = 0;
static struct indexed_index locs[MAX_THINGS];
static uint32_t locs_size = 0;
static struct indexed_index bindings[MAX_THINGS];
static uint32_t bindings_size = 0;
static struct indexed_index offsets[MAX_THINGS];
static uint32_t offsets_size = 0;
static void add_name(uint32_t id, char *name) {
names[names_size].id = id;
names[names_size].name = name;
++names_size;
}
static char *find_name(uint32_t id) {
for (uint32_t i = 0; i < names_size; ++i) {
if (names[i].id == id) {
return names[i].name;
}
}
return NULL;
}
static void add_member_name(uint32_t id, char *name) {
memberNames[memberNames_size].id = id;
memberNames[memberNames_size].name = name;
++memberNames_size;
}
static char *find_member_name(uint32_t id) {
for (uint32_t i = 0; i < memberNames_size; ++i) {
if (memberNames[i].id == id) {
return memberNames[i].name;
}
}
return NULL;
}
static void add_location(uint32_t id, uint32_t location) {
locs[locs_size].id = id;
locs[locs_size].value = location;
++locs_size;
}
static void add_binding(uint32_t id, uint32_t binding) {
bindings[bindings_size].id = id;
bindings[bindings_size].value = binding;
++bindings_size;
}
static void add_offset(uint32_t id, uint32_t offset) {
offsets[offsets_size].id = id;
offsets[offsets_size].value = offset;
++offsets_size;
}
static void parseShader(kinc_g5_shader_t *shader, kinc_internal_named_number *locations, kinc_internal_named_number *textureBindings,
kinc_internal_named_number *uniformOffsets) {
names_size = 0;
memberNames_size = 0;
locs_size = 0;
bindings_size = 0;
offsets_size = 0;
uint32_t *spirv = (uint32_t *)shader->impl.source;
int spirvsize = shader->impl.length / 4;
int index = 0;
uint32_t magicNumber = spirv[index++];
uint32_t version = spirv[index++];
uint32_t generator = spirv[index++];
uint32_t bound = spirv[index++];
index++;
while (index < spirvsize) {
int wordCount = spirv[index] >> 16;
uint32_t opcode = spirv[index] & 0xffff;
uint32_t *operands = wordCount > 1 ? &spirv[index + 1] : NULL;
uint32_t length = wordCount - 1;
switch (opcode) {
case 5: { // OpName
uint32_t id = operands[0];
char *string = (char *)&operands[1];
add_name(id, string);
break;
}
case 6: { // OpMemberName
uint32_t type = operands[0];
char *name = find_name(type);
if (name != NULL && strcmp(name, "_k_global_uniform_buffer_type") == 0) {
uint32_t member = operands[1];
char *string = (char *)&operands[2];
add_member_name(member, string);
}
break;
}
case 71: { // OpDecorate
uint32_t id = operands[0];
uint32_t decoration = operands[1];
if (decoration == 30) { // location
uint32_t location = operands[2];
add_location(id, location);
}
if (decoration == 33) { // binding
uint32_t binding = operands[2];
add_binding(id, binding);
}
break;
}
case 72: { // OpMemberDecorate
uint32_t type = operands[0];
char *name = find_name(type);
if (name != NULL && strcmp(name, "_k_global_uniform_buffer_type") == 0) {
uint32_t member = operands[1];
uint32_t decoration = operands[2];
if (decoration == 35) { // offset
uint32_t offset = operands[3];
add_offset(member, offset);
}
}
break;
}
}
index += wordCount;
}
for (uint32_t i = 0; i < locs_size; ++i) {
char *name = find_name(locs[i].id);
if (name != NULL) {
set_number(locations, name, locs[i].value);
}
}
for (uint32_t i = 0; i < bindings_size; ++i) {
char *name = find_name(bindings[i].id);
if (name != NULL) {
set_number(textureBindings, name, bindings[i].value);
}
}
for (uint32_t i = 0; i < offsets_size; ++i) {
char *name = find_member_name(offsets[i].id);
if (name != NULL) {
set_number(uniformOffsets, name, offsets[i].value);
}
}
}
static VkShaderModule prepare_shader_module(const void *code, size_t size) {
VkShaderModuleCreateInfo moduleCreateInfo;
VkShaderModule module;
VkResult err;
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.pNext = NULL;
moduleCreateInfo.codeSize = size;
moduleCreateInfo.pCode = (const uint32_t *)code;
moduleCreateInfo.flags = 0;
err = vkCreateShaderModule(vk_ctx.device, &moduleCreateInfo, NULL, &module);
assert(!err);
return module;
}
static VkShaderModule prepare_vs(VkShaderModule *vert_shader_module, kinc_g5_shader_t *vertexShader) {
*vert_shader_module = prepare_shader_module(vertexShader->impl.source, vertexShader->impl.length);
return *vert_shader_module;
}
static VkShaderModule prepare_fs(VkShaderModule *frag_shader_module, kinc_g5_shader_t *fragmentShader) {
*frag_shader_module = prepare_shader_module(fragmentShader->impl.source, fragmentShader->impl.length);
return *frag_shader_module;
}
static VkFormat convert_format(kinc_g5_render_target_format_t format) {
switch (format) {
case KINC_G5_RENDER_TARGET_FORMAT_128BIT_FLOAT:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_64BIT_FLOAT:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_32BIT_RED_FLOAT:
return VK_FORMAT_R32_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_16BIT_RED_FLOAT:
return VK_FORMAT_R16_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_8BIT_RED:
return VK_FORMAT_R8_UNORM;
case KINC_G5_RENDER_TARGET_FORMAT_32BIT:
default:
return VK_FORMAT_B8G8R8A8_UNORM;
}
}
void kinc_g5_pipeline_init(kinc_g5_pipeline_t *pipeline) {
kinc_g5_internal_pipeline_init(pipeline);
}
void kinc_g5_pipeline_destroy(kinc_g5_pipeline_t *pipeline) {
vkDestroyPipeline(vk_ctx.device, pipeline->impl.framebuffer_pipeline, NULL);
vkDestroyPipeline(vk_ctx.device, pipeline->impl.rendertarget_pipeline, NULL);
vkDestroyPipelineLayout(vk_ctx.device, pipeline->impl.pipeline_layout, NULL);
}
kinc_g5_constant_location_t kinc_g5_pipeline_get_constant_location(kinc_g5_pipeline_t *pipeline, const char *name) {
kinc_g5_constant_location_t location;
location.impl.vertexOffset = -1;
location.impl.fragmentOffset = -1;
if (has_number(pipeline->impl.vertexOffsets, name)) {
location.impl.vertexOffset = find_number(pipeline->impl.vertexOffsets, name);
}
if (has_number(pipeline->impl.fragmentOffsets, name)) {
location.impl.fragmentOffset = find_number(pipeline->impl.fragmentOffsets, name);
}
return location;
}
kinc_g5_texture_unit_t kinc_g5_pipeline_get_texture_unit(kinc_g5_pipeline_t *pipeline, const char *name) {
kinc_g5_texture_unit_t unit;
int number = find_number(pipeline->impl.textureBindings, name);
assert(number == -1 || number >= 2); // something wrong with the SPIR-V when this triggers
for (int i = 0; i < KINC_G5_SHADER_TYPE_COUNT; ++i) {
unit.stages[i] = -1;
}
if (number >= 0) {
unit.stages[KINC_G5_SHADER_TYPE_FRAGMENT] = number - 2;
}
return unit;
}
static VkCullModeFlagBits convert_cull_mode(kinc_g5_cull_mode_t cullMode) {
switch (cullMode) {
case KINC_G5_CULL_MODE_CLOCKWISE:
return VK_CULL_MODE_BACK_BIT;
case KINC_G5_CULL_MODE_COUNTERCLOCKWISE:
return VK_CULL_MODE_FRONT_BIT;
case KINC_G5_CULL_MODE_NEVER:
default:
return VK_CULL_MODE_NONE;
}
}
static VkCompareOp convert_compare_mode(kinc_g5_compare_mode_t compare) {
switch (compare) {
default:
case KINC_G5_COMPARE_MODE_ALWAYS:
return VK_COMPARE_OP_ALWAYS;
case KINC_G5_COMPARE_MODE_NEVER:
return VK_COMPARE_OP_NEVER;
case KINC_G5_COMPARE_MODE_EQUAL:
return VK_COMPARE_OP_EQUAL;
case KINC_G5_COMPARE_MODE_NOT_EQUAL:
return VK_COMPARE_OP_NOT_EQUAL;
case KINC_G5_COMPARE_MODE_LESS:
return VK_COMPARE_OP_LESS;
case KINC_G5_COMPARE_MODE_LESS_EQUAL:
return VK_COMPARE_OP_LESS_OR_EQUAL;
case KINC_G5_COMPARE_MODE_GREATER:
return VK_COMPARE_OP_GREATER;
case KINC_G5_COMPARE_MODE_GREATER_EQUAL:
return VK_COMPARE_OP_GREATER_OR_EQUAL;
}
}
static VkBlendFactor convert_blend_factor(kinc_g5_blending_factor_t factor) {
switch (factor) {
case KINC_G5_BLEND_ONE:
return VK_BLEND_FACTOR_ONE;
case KINC_G5_BLEND_ZERO:
return VK_BLEND_FACTOR_ZERO;
case KINC_G5_BLEND_SOURCE_ALPHA:
return VK_BLEND_FACTOR_SRC_ALPHA;
case KINC_G5_BLEND_DEST_ALPHA:
return VK_BLEND_FACTOR_DST_ALPHA;
case KINC_G5_BLEND_INV_SOURCE_ALPHA:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
case KINC_G5_BLEND_INV_DEST_ALPHA:
return VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
case KINC_G5_BLEND_SOURCE_COLOR:
return VK_BLEND_FACTOR_SRC_COLOR;
case KINC_G5_BLEND_DEST_COLOR:
return VK_BLEND_FACTOR_DST_COLOR;
case KINC_G5_BLEND_INV_SOURCE_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
case KINC_G5_BLEND_INV_DEST_COLOR:
return VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
case KINC_G5_BLEND_CONSTANT:
return VK_BLEND_FACTOR_CONSTANT_COLOR;
case KINC_G5_BLEND_INV_CONSTANT:
return VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR;
default:
assert(false);
return VK_BLEND_FACTOR_ONE;
}
}
static VkBlendOp convert_blend_operation(kinc_g5_blending_operation_t op) {
switch (op) {
case KINC_G5_BLENDOP_ADD:
return VK_BLEND_OP_ADD;
case KINC_G5_BLENDOP_SUBTRACT:
return VK_BLEND_OP_SUBTRACT;
case KINC_G5_BLENDOP_REVERSE_SUBTRACT:
return VK_BLEND_OP_REVERSE_SUBTRACT;
case KINC_G5_BLENDOP_MIN:
return VK_BLEND_OP_MIN;
case KINC_G5_BLENDOP_MAX:
return VK_BLEND_OP_MAX;
default:
assert(false);
return VK_BLEND_OP_ADD;
}
}
void kinc_g5_pipeline_compile(kinc_g5_pipeline_t *pipeline) {
memset(pipeline->impl.vertexLocations, 0, sizeof(kinc_internal_named_number) * KINC_INTERNAL_NAMED_NUMBER_COUNT);
memset(pipeline->impl.vertexOffsets, 0, sizeof(kinc_internal_named_number) * KINC_INTERNAL_NAMED_NUMBER_COUNT);
memset(pipeline->impl.fragmentLocations, 0, sizeof(kinc_internal_named_number) * KINC_INTERNAL_NAMED_NUMBER_COUNT);
memset(pipeline->impl.fragmentOffsets, 0, sizeof(kinc_internal_named_number) * KINC_INTERNAL_NAMED_NUMBER_COUNT);
memset(pipeline->impl.textureBindings, 0, sizeof(kinc_internal_named_number) * KINC_INTERNAL_NAMED_NUMBER_COUNT);
parseShader(pipeline->vertexShader, pipeline->impl.vertexLocations, pipeline->impl.textureBindings, pipeline->impl.vertexOffsets);
parseShader(pipeline->fragmentShader, pipeline->impl.fragmentLocations, pipeline->impl.textureBindings, pipeline->impl.fragmentOffsets);
VkPipelineLayoutCreateInfo pPipelineLayoutCreateInfo = {0};
pPipelineLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pPipelineLayoutCreateInfo.pNext = NULL;
pPipelineLayoutCreateInfo.setLayoutCount = 1;
pPipelineLayoutCreateInfo.pSetLayouts = &desc_layout;
VkResult err = vkCreatePipelineLayout(vk_ctx.device, &pPipelineLayoutCreateInfo, NULL, &pipeline->impl.pipeline_layout);
assert(!err);
VkGraphicsPipelineCreateInfo pipeline_info = {0};
VkPipelineInputAssemblyStateCreateInfo ia = {0};
VkPipelineRasterizationStateCreateInfo rs = {0};
VkPipelineColorBlendStateCreateInfo cb = {0};
VkPipelineDepthStencilStateCreateInfo ds = {0};
VkPipelineViewportStateCreateInfo vp = {0};
VkPipelineMultisampleStateCreateInfo ms = {0};
#define dynamicStatesCount 2
VkDynamicState dynamicStateEnables[dynamicStatesCount];
VkPipelineDynamicStateCreateInfo dynamicState = {0};
memset(dynamicStateEnables, 0, sizeof(dynamicStateEnables));
memset(&dynamicState, 0, sizeof(dynamicState));
dynamicState.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamicState.pDynamicStates = dynamicStateEnables;
memset(&pipeline_info, 0, sizeof(pipeline_info));
pipeline_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipeline_info.layout = pipeline->impl.pipeline_layout;
int vertexAttributeCount = 0;
int vertexBindingCount = 0;
for (int i = 0; i < 16; ++i) {
if (pipeline->inputLayout[i] == NULL) {
break;
}
vertexAttributeCount += pipeline->inputLayout[i]->size;
vertexBindingCount++;
}
#ifdef KORE_WINDOWS
VkVertexInputBindingDescription *vi_bindings = (VkVertexInputBindingDescription *)alloca(sizeof(VkVertexInputBindingDescription) * vertexBindingCount);
#else
VkVertexInputBindingDescription vi_bindings[vertexBindingCount];
#endif
#ifdef KORE_WINDOWS
VkVertexInputAttributeDescription *vi_attrs = (VkVertexInputAttributeDescription *)alloca(sizeof(VkVertexInputAttributeDescription) * vertexAttributeCount);
#else
VkVertexInputAttributeDescription vi_attrs[vertexAttributeCount];
#endif
VkPipelineVertexInputStateCreateInfo vi = {0};
memset(&vi, 0, sizeof(vi));
vi.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vi.pNext = NULL;
vi.vertexBindingDescriptionCount = vertexBindingCount;
vi.pVertexBindingDescriptions = vi_bindings;
vi.vertexAttributeDescriptionCount = vertexAttributeCount;
vi.pVertexAttributeDescriptions = vi_attrs;
uint32_t attr = 0;
for (int binding = 0; binding < vertexBindingCount; ++binding) {
uint32_t offset = 0;
uint32_t stride = 0;
for (int i = 0; i < pipeline->inputLayout[binding]->size; ++i) {
kinc_g5_vertex_element_t element = pipeline->inputLayout[binding]->elements[i];
vi_attrs[attr].binding = binding;
vi_attrs[attr].location = find_number(pipeline->impl.vertexLocations, element.name);
vi_attrs[attr].offset = offset;
offset += kinc_g4_vertex_data_size(element.data);
stride += kinc_g4_vertex_data_size(element.data);
switch (element.data) {
case KINC_G4_VERTEX_DATA_F32_1X:
vi_attrs[attr].format = VK_FORMAT_R32_SFLOAT;
break;
case KINC_G4_VERTEX_DATA_F32_2X:
vi_attrs[attr].format = VK_FORMAT_R32G32_SFLOAT;
break;
case KINC_G4_VERTEX_DATA_F32_3X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32_SFLOAT;
break;
case KINC_G4_VERTEX_DATA_F32_4X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32A32_SFLOAT;
break;
case KINC_G4_VERTEX_DATA_I8_1X:
vi_attrs[attr].format = VK_FORMAT_R8_SINT;
break;
case KINC_G4_VERTEX_DATA_U8_1X:
vi_attrs[attr].format = VK_FORMAT_R8_UINT;
break;
case KINC_G4_VERTEX_DATA_I8_1X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8_SNORM;
break;
case KINC_G4_VERTEX_DATA_U8_1X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8_UNORM;
break;
case KINC_G4_VERTEX_DATA_I8_2X:
vi_attrs[attr].format = VK_FORMAT_R8G8_SINT;
break;
case KINC_G4_VERTEX_DATA_U8_2X:
vi_attrs[attr].format = VK_FORMAT_R8G8_UINT;
break;
case KINC_G4_VERTEX_DATA_I8_2X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8G8_SNORM;
break;
case KINC_G4_VERTEX_DATA_U8_2X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8G8_UNORM;
break;
case KINC_G4_VERTEX_DATA_I8_4X:
vi_attrs[attr].format = VK_FORMAT_R8G8B8A8_SINT;
break;
case KINC_G4_VERTEX_DATA_U8_4X:
vi_attrs[attr].format = VK_FORMAT_R8G8B8A8_UINT;
break;
case KINC_G4_VERTEX_DATA_I8_4X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8G8B8A8_SNORM;
break;
case KINC_G4_VERTEX_DATA_U8_4X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R8G8B8A8_UNORM;
break;
case KINC_G4_VERTEX_DATA_I16_1X:
vi_attrs[attr].format = VK_FORMAT_R16_SINT;
break;
case KINC_G4_VERTEX_DATA_U16_1X:
vi_attrs[attr].format = VK_FORMAT_R16_UINT;
break;
case KINC_G4_VERTEX_DATA_I16_1X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16_SNORM;
break;
case KINC_G4_VERTEX_DATA_U16_1X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16_UNORM;
break;
case KINC_G4_VERTEX_DATA_I16_2X:
vi_attrs[attr].format = VK_FORMAT_R16G16_SINT;
break;
case KINC_G4_VERTEX_DATA_U16_2X:
vi_attrs[attr].format = VK_FORMAT_R16G16_UINT;
break;
case KINC_G4_VERTEX_DATA_I16_2X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16G16_SNORM;
break;
case KINC_G4_VERTEX_DATA_U16_2X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16G16_UNORM;
break;
case KINC_G4_VERTEX_DATA_I16_4X:
vi_attrs[attr].format = VK_FORMAT_R16G16B16A16_SINT;
break;
case KINC_G4_VERTEX_DATA_U16_4X:
vi_attrs[attr].format = VK_FORMAT_R16G16B16A16_UINT;
break;
case KINC_G4_VERTEX_DATA_I16_4X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16G16B16A16_SNORM;
break;
case KINC_G4_VERTEX_DATA_U16_4X_NORMALIZED:
vi_attrs[attr].format = VK_FORMAT_R16G16B16A16_UNORM;
break;
case KINC_G4_VERTEX_DATA_I32_1X:
vi_attrs[attr].format = VK_FORMAT_R32_SINT;
break;
case KINC_G4_VERTEX_DATA_U32_1X:
vi_attrs[attr].format = VK_FORMAT_R32_UINT;
break;
case KINC_G4_VERTEX_DATA_I32_2X:
vi_attrs[attr].format = VK_FORMAT_R32G32_SINT;
break;
case KINC_G4_VERTEX_DATA_U32_2X:
vi_attrs[attr].format = VK_FORMAT_R32G32_UINT;
break;
case KINC_G4_VERTEX_DATA_I32_3X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32_SINT;
break;
case KINC_G4_VERTEX_DATA_U32_3X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32_UINT;
break;
case KINC_G4_VERTEX_DATA_I32_4X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32A32_SINT;
break;
case KINC_G4_VERTEX_DATA_U32_4X:
vi_attrs[attr].format = VK_FORMAT_R32G32B32A32_UINT;
break;
default:
assert(false);
break;
}
attr++;
}
vi_bindings[binding].binding = binding;
vi_bindings[binding].stride = stride;
vi_bindings[binding].inputRate = pipeline->inputLayout[binding]->instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
}
memset(&ia, 0, sizeof(ia));
ia.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
memset(&rs, 0, sizeof(rs));
rs.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs.polygonMode = VK_POLYGON_MODE_FILL;
rs.cullMode = convert_cull_mode(pipeline->cullMode);
rs.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs.depthClampEnable = VK_FALSE;
rs.rasterizerDiscardEnable = VK_FALSE;
rs.depthBiasEnable = VK_FALSE;
rs.lineWidth = 1.0f;
memset(&cb, 0, sizeof(cb));
cb.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
VkPipelineColorBlendAttachmentState att_state[8];
memset(att_state, 0, sizeof(att_state));
for (int i = 0; i < pipeline->colorAttachmentCount; ++i) {
att_state[i].colorWriteMask =
(pipeline->colorWriteMaskRed[i] ? VK_COLOR_COMPONENT_R_BIT : 0) | (pipeline->colorWriteMaskGreen[i] ? VK_COLOR_COMPONENT_G_BIT : 0) |
(pipeline->colorWriteMaskBlue[i] ? VK_COLOR_COMPONENT_B_BIT : 0) | (pipeline->colorWriteMaskAlpha[i] ? VK_COLOR_COMPONENT_A_BIT : 0);
att_state[i].blendEnable = pipeline->blend_source != KINC_G5_BLEND_ONE || pipeline->blend_destination != KINC_G5_BLEND_ZERO ||
pipeline->alpha_blend_source != KINC_G5_BLEND_ONE || pipeline->alpha_blend_destination != KINC_G5_BLEND_ZERO;
att_state[i].srcColorBlendFactor = convert_blend_factor(pipeline->blend_source);
att_state[i].dstColorBlendFactor = convert_blend_factor(pipeline->blend_destination);
att_state[i].colorBlendOp = convert_blend_operation(pipeline->blend_operation);
att_state[i].srcAlphaBlendFactor = convert_blend_factor(pipeline->alpha_blend_source);
att_state[i].dstAlphaBlendFactor = convert_blend_factor(pipeline->alpha_blend_destination);
att_state[i].alphaBlendOp = convert_blend_operation(pipeline->alpha_blend_operation);
}
cb.attachmentCount = pipeline->colorAttachmentCount;
cb.pAttachments = att_state;
memset(&vp, 0, sizeof(vp));
vp.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp.viewportCount = 1;
dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_VIEWPORT;
vp.scissorCount = 1;
dynamicStateEnables[dynamicState.dynamicStateCount++] = VK_DYNAMIC_STATE_SCISSOR;
memset(&ds, 0, sizeof(ds));
ds.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
ds.depthTestEnable = pipeline->depthMode != KINC_G5_COMPARE_MODE_ALWAYS;
ds.depthWriteEnable = pipeline->depthWrite;
ds.depthCompareOp = convert_compare_mode(pipeline->depthMode);
ds.depthBoundsTestEnable = VK_FALSE;
ds.back.failOp = VK_STENCIL_OP_KEEP;
ds.back.passOp = VK_STENCIL_OP_KEEP;
ds.back.compareOp = VK_COMPARE_OP_ALWAYS;
ds.stencilTestEnable = VK_FALSE;
ds.front = ds.back;
memset(&ms, 0, sizeof(ms));
ms.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms.pSampleMask = NULL;
ms.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipeline_info.stageCount = 2;
VkPipelineShaderStageCreateInfo shaderStages[2];
memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo));
shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].module = prepare_vs(&pipeline->impl.vert_shader_module, pipeline->vertexShader);
shaderStages[0].pName = "main";
shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[1].stage = VK_SHADER_STAGE_FRAGMENT_BIT;
shaderStages[1].module = prepare_fs(&pipeline->impl.frag_shader_module, pipeline->fragmentShader);
shaderStages[1].pName = "main";
pipeline_info.pVertexInputState = &vi;
pipeline_info.pInputAssemblyState = &ia;
pipeline_info.pRasterizationState = &rs;
pipeline_info.pColorBlendState = &cb;
pipeline_info.pMultisampleState = &ms;
pipeline_info.pViewportState = &vp;
pipeline_info.pDepthStencilState = &ds;
pipeline_info.pStages = shaderStages;
pipeline_info.renderPass = vk_ctx.windows[vk_ctx.current_window].framebuffer_render_pass;
pipeline_info.pDynamicState = &dynamicState;
err = vkCreateGraphicsPipelines(vk_ctx.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline->impl.framebuffer_pipeline);
assert(!err);
if (pipeline->depthAttachmentBits > 0) {
pipeline_info.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass_with_depth;
}
else {
pipeline_info.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass;
}
err = vkCreateGraphicsPipelines(vk_ctx.device, VK_NULL_HANDLE, 1, &pipeline_info, NULL, &pipeline->impl.rendertarget_pipeline);
assert(!err);
vkDestroyShaderModule(vk_ctx.device, pipeline->impl.frag_shader_module, NULL);
vkDestroyShaderModule(vk_ctx.device, pipeline->impl.vert_shader_module, NULL);
}
void createDescriptorLayout() {
VkDescriptorSetLayoutBinding layoutBindings[18];
memset(layoutBindings, 0, sizeof(layoutBindings));
layoutBindings[0].binding = 0;
layoutBindings[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
layoutBindings[0].descriptorCount = 1;
layoutBindings[0].stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
layoutBindings[0].pImmutableSamplers = NULL;
layoutBindings[1].binding = 1;
layoutBindings[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
layoutBindings[1].descriptorCount = 1;
layoutBindings[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
layoutBindings[1].pImmutableSamplers = NULL;
for (int i = 2; i < 18; ++i) {
layoutBindings[i].binding = i;
layoutBindings[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
layoutBindings[i].descriptorCount = 1;
layoutBindings[i].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
layoutBindings[i].pImmutableSamplers = NULL;
}
VkDescriptorSetLayoutCreateInfo descriptor_layout = {0};
descriptor_layout.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
descriptor_layout.pNext = NULL;
descriptor_layout.bindingCount = 18;
descriptor_layout.pBindings = layoutBindings;
VkResult err = vkCreateDescriptorSetLayout(vk_ctx.device, &descriptor_layout, NULL, &desc_layout);
assert(!err);
VkDescriptorPoolSize typeCounts[2];
memset(typeCounts, 0, sizeof(typeCounts));
typeCounts[0].type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
typeCounts[0].descriptorCount = 2 * 1024;
typeCounts[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
typeCounts[1].descriptorCount = 16 * 1024;
VkDescriptorPoolCreateInfo pool_info = {0};
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.pNext = NULL;
pool_info.maxSets = 1024;
pool_info.poolSizeCount = 2;
pool_info.pPoolSizes = typeCounts;
err = vkCreateDescriptorPool(vk_ctx.device, &pool_info, NULL, &descriptor_pool);
assert(!err);
}
int calc_descriptor_id(void) {
int texture_count = 0;
for (int i = 0; i < 16; ++i) {
if (vulkanTextures[i] != NULL) {
texture_count++;
}
else if (vulkanRenderTargets[i] != NULL) {
texture_count++;
}
}
bool uniform_buffer = vk_ctx.vertex_uniform_buffer != NULL && vk_ctx.fragment_uniform_buffer != NULL;
return 1 | (texture_count << 1) | ((uniform_buffer ? 1 : 0) << 8);
}
#define MAX_DESCRIPTOR_SETS 256
struct destriptor_set {
int id;
bool in_use;
VkDescriptorImageInfo tex_desc[16];
VkDescriptorSet set;
};
static struct destriptor_set descriptor_sets[MAX_DESCRIPTOR_SETS] = {0};
static int descriptor_sets_count = 0;
static int write_tex_descs(VkDescriptorImageInfo *tex_descs) {
memset(tex_descs, 0, sizeof(VkDescriptorImageInfo) * 16);
int texture_count = 0;
for (int i = 0; i < 16; ++i) {
if (vulkanTextures[i] != NULL) {
tex_descs[i].sampler = vulkanSamplers[i];
tex_descs[i].imageView = vulkanTextures[i]->impl.texture.view;
texture_count++;
}
else if (vulkanRenderTargets[i] != NULL) {
tex_descs[i].sampler = vulkanSamplers[i];
if (vulkanRenderTargets[i]->impl.stage_depth == i) {
tex_descs[i].imageView = vulkanRenderTargets[i]->impl.depthView;
vulkanRenderTargets[i]->impl.stage_depth = -1;
}
else {
tex_descs[i].imageView = vulkanRenderTargets[i]->impl.sourceView;
}
texture_count++;
}
tex_descs[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
return texture_count;
}
static bool textures_changed(struct destriptor_set *set) {
VkDescriptorImageInfo tex_desc[16];
write_tex_descs(tex_desc);
return memcmp(&tex_desc, &set->tex_desc, sizeof(tex_desc)) != 0;
}
static void update_textures(struct destriptor_set *set) {
memset(&set->tex_desc, 0, sizeof(set->tex_desc));
int texture_count = write_tex_descs(set->tex_desc);
VkWriteDescriptorSet writes[16];
memset(&writes, 0, sizeof(writes));
for (int i = 0; i < 16; ++i) {
writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[i].dstSet = set->set;
writes[i].dstBinding = i + 2;
writes[i].descriptorCount = 1;
writes[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
writes[i].pImageInfo = &set->tex_desc[i];
}
if (vulkanTextures[0] != NULL || vulkanRenderTargets[0] != NULL) {
vkUpdateDescriptorSets(vk_ctx.device, texture_count, writes, 0, NULL);
}
}
void reuse_descriptor_sets(void) {
for (int i = 0; i < descriptor_sets_count; ++i) {
descriptor_sets[i].in_use = false;
}
}
VkDescriptorSet getDescriptorSet() {
int id = calc_descriptor_id();
for (int i = 0; i < descriptor_sets_count; ++i) {
if (descriptor_sets[i].id == id) {
if (!descriptor_sets[i].in_use) {
descriptor_sets[i].in_use = true;
update_textures(&descriptor_sets[i]);
return descriptor_sets[i].set;
}
else {
if (!textures_changed(&descriptor_sets[i])) {
return descriptor_sets[i].set;
}
}
}
}
VkDescriptorSetAllocateInfo alloc_info = {0};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.descriptorPool = descriptor_pool;
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &desc_layout;
VkDescriptorSet descriptor_set;
VkResult err = vkAllocateDescriptorSets(vk_ctx.device, &alloc_info, &descriptor_set);
assert(!err);
VkDescriptorBufferInfo buffer_descs[2];
memset(&buffer_descs, 0, sizeof(buffer_descs));
if (vk_ctx.vertex_uniform_buffer != NULL) {
buffer_descs[0].buffer = *vk_ctx.vertex_uniform_buffer;
}
buffer_descs[0].offset = 0;
buffer_descs[0].range = 256 * sizeof(float);
if (vk_ctx.fragment_uniform_buffer != NULL) {
buffer_descs[1].buffer = *vk_ctx.fragment_uniform_buffer;
}
buffer_descs[1].offset = 0;
buffer_descs[1].range = 256 * sizeof(float);
VkDescriptorImageInfo tex_desc[16];
memset(&tex_desc, 0, sizeof(tex_desc));
int texture_count = 0;
for (int i = 0; i < 16; ++i) {
if (vulkanTextures[i] != NULL) {
assert(vulkanSamplers[i] != VK_NULL_HANDLE);
tex_desc[i].sampler = vulkanSamplers[i];
tex_desc[i].imageView = vulkanTextures[i]->impl.texture.view;
texture_count++;
}
else if (vulkanRenderTargets[i] != NULL) {
tex_desc[i].sampler = vulkanSamplers[i];
if (vulkanRenderTargets[i]->impl.stage_depth == i) {
tex_desc[i].imageView = vulkanRenderTargets[i]->impl.depthView;
vulkanRenderTargets[i]->impl.stage_depth = -1;
}
else {
tex_desc[i].imageView = vulkanRenderTargets[i]->impl.sourceView;
}
texture_count++;
}
tex_desc[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
VkWriteDescriptorSet writes[18];
memset(&writes, 0, sizeof(writes));
writes[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[0].dstSet = descriptor_set;
writes[0].dstBinding = 0;
writes[0].descriptorCount = 1;
writes[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
writes[0].pBufferInfo = &buffer_descs[0];
writes[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[1].dstSet = descriptor_set;
writes[1].dstBinding = 1;
writes[1].descriptorCount = 1;
writes[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
writes[1].pBufferInfo = &buffer_descs[1];
for (int i = 2; i < 18; ++i) {
writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
writes[i].dstSet = descriptor_set;
writes[i].dstBinding = i;
writes[i].descriptorCount = 1;
writes[i].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
writes[i].pImageInfo = &tex_desc[i - 2];
}
if (vulkanTextures[0] != NULL || vulkanRenderTargets[0] != NULL) {
if (vk_ctx.vertex_uniform_buffer != NULL && vk_ctx.fragment_uniform_buffer != NULL) {
vkUpdateDescriptorSets(vk_ctx.device, 2 + texture_count, writes, 0, NULL);
}
else {
vkUpdateDescriptorSets(vk_ctx.device, texture_count, writes + 2, 0, NULL);
}
}
else {
if (vk_ctx.vertex_uniform_buffer != NULL && vk_ctx.fragment_uniform_buffer != NULL) {
vkUpdateDescriptorSets(vk_ctx.device, 2, writes, 0, NULL);
}
}
assert(descriptor_sets_count + 1 < MAX_DESCRIPTOR_SETS);
descriptor_sets[descriptor_sets_count].id = id;
descriptor_sets[descriptor_sets_count].in_use = true;
descriptor_sets[descriptor_sets_count].set = descriptor_set;
write_tex_descs(descriptor_sets[descriptor_sets_count].tex_desc);
descriptor_sets_count += 1;
return descriptor_set;
}

View File

@ -0,0 +1,44 @@
#pragma once
#include "MiniVulkan.h"
struct kinc_g5_shader;
#define KINC_INTERNAL_NAMED_NUMBER_COUNT 32
typedef struct {
char name[256];
uint32_t number;
} kinc_internal_named_number;
typedef struct PipelineState5Impl_s {
const char **textures;
int *textureValues;
int textureCount;
VkPipeline framebuffer_pipeline;
VkPipeline rendertarget_pipeline;
VkShaderModule vert_shader_module;
VkShaderModule frag_shader_module;
kinc_internal_named_number vertexLocations[KINC_INTERNAL_NAMED_NUMBER_COUNT];
kinc_internal_named_number fragmentLocations[KINC_INTERNAL_NAMED_NUMBER_COUNT];
kinc_internal_named_number textureBindings[KINC_INTERNAL_NAMED_NUMBER_COUNT];
kinc_internal_named_number vertexOffsets[KINC_INTERNAL_NAMED_NUMBER_COUNT];
kinc_internal_named_number fragmentOffsets[KINC_INTERNAL_NAMED_NUMBER_COUNT];
VkPipelineLayout pipeline_layout;
} PipelineState5Impl;
typedef struct ComputePipelineState5Impl_t {
int a;
} ComputePipelineState5Impl;
typedef struct {
int vertexOffset;
int fragmentOffset;
} ConstantLocation5Impl;
typedef struct {
int nothing;
} AttributeLocation5Impl;

View File

@ -0,0 +1,769 @@
#include "vulkan.h"
#include "raytrace.h"
#ifndef KORE_ANDROID
#include <kinc/graphics5/commandlist.h>
#include <kinc/graphics5/constantbuffer.h>
#include <kinc/graphics5/graphics.h>
#include <kinc/graphics5/indexbuffer.h>
#include <kinc/graphics5/pipeline.h>
#include <kinc/graphics5/raytrace.h>
#include <kinc/graphics5/vertexbuffer.h>
extern VkRenderPassBeginInfo currentRenderPassBeginInfo;
extern VkFramebuffer *framebuffers;
extern uint32_t current_buffer;
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
static const int INDEX_RAYGEN = 0;
static const int INDEX_MISS = 1;
static const int INDEX_CLOSEST_HIT = 2;
static const char *raygen_shader_name = "raygeneration";
static const char *closesthit_shader_name = "closesthit";
static const char *miss_shader_name = "miss";
static VkDescriptorPool raytrace_descriptor_pool;
static kinc_raytrace_acceleration_structure_t *accel;
static kinc_raytrace_pipeline_t *pipeline;
static kinc_g5_texture_t *output = NULL;
static PFN_vkCreateRayTracingPipelinesKHR _vkCreateRayTracingPipelinesKHR = NULL;
static PFN_vkGetRayTracingShaderGroupHandlesKHR _vkGetRayTracingShaderGroupHandlesKHR = NULL;
static PFN_vkGetBufferDeviceAddressKHR _vkGetBufferDeviceAddressKHR = NULL;
static PFN_vkCreateAccelerationStructureKHR _vkCreateAccelerationStructureKHR = NULL;
static PFN_vkGetAccelerationStructureDeviceAddressKHR _vkGetAccelerationStructureDeviceAddressKHR = NULL;
static PFN_vkGetAccelerationStructureBuildSizesKHR _vkGetAccelerationStructureBuildSizesKHR = NULL;
static PFN_vkCmdBuildAccelerationStructuresKHR _vkCmdBuildAccelerationStructuresKHR = NULL;
static PFN_vkDestroyAccelerationStructureKHR _vkDestroyAccelerationStructureKHR = NULL;
static PFN_vkCmdTraceRaysKHR _vkCmdTraceRaysKHR = NULL;
void kinc_raytrace_pipeline_init(kinc_raytrace_pipeline_t *pipeline, kinc_g5_command_list_t *command_list, void *ray_shader, int ray_shader_size,
kinc_g5_constant_buffer_t *constant_buffer) {
pipeline->_constant_buffer = constant_buffer;
{
VkDescriptorSetLayoutBinding acceleration_structure_layout_binding = {0};
acceleration_structure_layout_binding.binding = 0;
acceleration_structure_layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
acceleration_structure_layout_binding.descriptorCount = 1;
acceleration_structure_layout_binding.stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_KHR;
VkDescriptorSetLayoutBinding result_image_layout_binding = {0};
result_image_layout_binding.binding = 1;
result_image_layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
result_image_layout_binding.descriptorCount = 1;
result_image_layout_binding.stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_KHR;
VkDescriptorSetLayoutBinding uniform_buffer_binding = {0};
uniform_buffer_binding.binding = 2;
uniform_buffer_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uniform_buffer_binding.descriptorCount = 1;
uniform_buffer_binding.stageFlags = VK_SHADER_STAGE_RAYGEN_BIT_KHR;
VkDescriptorSetLayoutBinding bindings[3] = {acceleration_structure_layout_binding, result_image_layout_binding, uniform_buffer_binding};
VkDescriptorSetLayoutCreateInfo layout_info = {0};
layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
layout_info.pNext = NULL;
layout_info.bindingCount = 3;
layout_info.pBindings = &bindings[0];
vkCreateDescriptorSetLayout(vk_ctx.device, &layout_info, NULL, &pipeline->impl.descriptor_set_layout);
VkPipelineLayoutCreateInfo pipeline_layout_create_info = {0};
pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_create_info.pNext = NULL;
pipeline_layout_create_info.setLayoutCount = 1;
pipeline_layout_create_info.pSetLayouts = &pipeline->impl.descriptor_set_layout;
vkCreatePipelineLayout(vk_ctx.device, &pipeline_layout_create_info, NULL, &pipeline->impl.pipeline_layout);
VkShaderModuleCreateInfo module_create_info = {0};
memset(&module_create_info, 0, sizeof(VkShaderModuleCreateInfo));
module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
module_create_info.codeSize = ray_shader_size;
module_create_info.pCode = (const uint32_t *)ray_shader;
module_create_info.pNext = NULL;
module_create_info.flags = 0;
VkShaderModule shader_module;
vkCreateShaderModule(vk_ctx.device, &module_create_info, NULL, &shader_module);
VkPipelineShaderStageCreateInfo shader_stages[3];
shader_stages[INDEX_RAYGEN].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[INDEX_RAYGEN].pNext = NULL;
shader_stages[INDEX_RAYGEN].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR;
shader_stages[INDEX_RAYGEN].module = shader_module;
shader_stages[INDEX_RAYGEN].pName = raygen_shader_name;
shader_stages[INDEX_RAYGEN].flags = 0;
shader_stages[INDEX_RAYGEN].pSpecializationInfo = NULL;
shader_stages[INDEX_MISS].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[INDEX_MISS].pNext = NULL;
shader_stages[INDEX_MISS].stage = VK_SHADER_STAGE_MISS_BIT_KHR;
shader_stages[INDEX_MISS].module = shader_module;
shader_stages[INDEX_MISS].pName = miss_shader_name;
shader_stages[INDEX_MISS].flags = 0;
shader_stages[INDEX_MISS].pSpecializationInfo = NULL;
shader_stages[INDEX_CLOSEST_HIT].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shader_stages[INDEX_CLOSEST_HIT].pNext = NULL;
shader_stages[INDEX_CLOSEST_HIT].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR;
shader_stages[INDEX_CLOSEST_HIT].module = shader_module;
shader_stages[INDEX_CLOSEST_HIT].pName = closesthit_shader_name;
shader_stages[INDEX_CLOSEST_HIT].flags = 0;
shader_stages[INDEX_CLOSEST_HIT].pSpecializationInfo = NULL;
VkRayTracingShaderGroupCreateInfoKHR groups[3];
groups[INDEX_RAYGEN].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR;
groups[INDEX_RAYGEN].pNext = NULL;
groups[INDEX_RAYGEN].generalShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_RAYGEN].closestHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_RAYGEN].anyHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_RAYGEN].intersectionShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_RAYGEN].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
groups[INDEX_RAYGEN].generalShader = INDEX_RAYGEN;
groups[INDEX_MISS].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR;
groups[INDEX_MISS].pNext = NULL;
groups[INDEX_MISS].generalShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_MISS].closestHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_MISS].anyHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_MISS].intersectionShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_MISS].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR;
groups[INDEX_MISS].generalShader = INDEX_MISS;
groups[INDEX_CLOSEST_HIT].sType = VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR;
groups[INDEX_CLOSEST_HIT].pNext = NULL;
groups[INDEX_CLOSEST_HIT].generalShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_CLOSEST_HIT].closestHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_CLOSEST_HIT].anyHitShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_CLOSEST_HIT].intersectionShader = VK_SHADER_UNUSED_KHR;
groups[INDEX_CLOSEST_HIT].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR;
groups[INDEX_CLOSEST_HIT].closestHitShader = INDEX_CLOSEST_HIT;
VkRayTracingPipelineCreateInfoKHR raytracing_pipeline_create_info = {0};
raytracing_pipeline_create_info.sType = VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR;
raytracing_pipeline_create_info.pNext = NULL;
raytracing_pipeline_create_info.flags = 0;
raytracing_pipeline_create_info.stageCount = 3;
raytracing_pipeline_create_info.pStages = &shader_stages[0];
raytracing_pipeline_create_info.groupCount = 3;
raytracing_pipeline_create_info.pGroups = &groups[0];
raytracing_pipeline_create_info.maxPipelineRayRecursionDepth = 1;
raytracing_pipeline_create_info.layout = pipeline->impl.pipeline_layout;
_vkCreateRayTracingPipelinesKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkCreateRayTracingPipelinesKHR");
_vkCreateRayTracingPipelinesKHR(vk_ctx.device, VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &raytracing_pipeline_create_info, NULL, &pipeline->impl.pipeline);
}
{
VkPhysicalDeviceRayTracingPipelinePropertiesKHR ray_tracing_pipeline_properties;
ray_tracing_pipeline_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR;
ray_tracing_pipeline_properties.pNext = NULL;
VkPhysicalDeviceProperties2 device_properties = {0};
device_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
device_properties.pNext = &ray_tracing_pipeline_properties;
vkGetPhysicalDeviceProperties2(vk_ctx.gpu, &device_properties);
_vkGetRayTracingShaderGroupHandlesKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkGetRayTracingShaderGroupHandlesKHR");
uint32_t handle_size = ray_tracing_pipeline_properties.shaderGroupHandleSize;
uint32_t handle_size_aligned =
(ray_tracing_pipeline_properties.shaderGroupHandleSize + ray_tracing_pipeline_properties.shaderGroupHandleAlignment - 1) &
~(ray_tracing_pipeline_properties.shaderGroupHandleAlignment - 1);
VkBufferCreateInfo buf_info = {0};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.size = handle_size;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
buf_info.flags = 0;
vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &pipeline->impl.raygen_shader_binding_table);
vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &pipeline->impl.hit_shader_binding_table);
vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &pipeline->impl.miss_shader_binding_table);
uint8_t shader_handle_storage[1024];
_vkGetRayTracingShaderGroupHandlesKHR(vk_ctx.device, pipeline->impl.pipeline, 0, 3, handle_size_aligned * 3, shader_handle_storage);
VkMemoryAllocateFlagsInfo memory_allocate_flags_info = {0};
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
VkMemoryAllocateInfo memory_allocate_info = {0};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &memory_allocate_flags_info;
VkMemoryRequirements mem_reqs = {0};
vkGetBufferMemoryRequirements(vk_ctx.device, pipeline->impl.raygen_shader_binding_table, &mem_reqs);
memory_allocate_info.allocationSize = mem_reqs.size;
memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
&memory_allocate_info.memoryTypeIndex);
VkDeviceMemory mem;
void *data;
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, pipeline->impl.raygen_shader_binding_table, mem, 0);
vkMapMemory(vk_ctx.device, mem, 0, handle_size, 0, (void **)&data);
memcpy(data, shader_handle_storage, handle_size);
vkUnmapMemory(vk_ctx.device, mem);
vkGetBufferMemoryRequirements(vk_ctx.device, pipeline->impl.miss_shader_binding_table, &mem_reqs);
memory_allocate_info.allocationSize = mem_reqs.size;
memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memory_allocate_info.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, pipeline->impl.miss_shader_binding_table, mem, 0);
vkMapMemory(vk_ctx.device, mem, 0, handle_size, 0, (void **)&data);
memcpy(data, shader_handle_storage + handle_size_aligned, handle_size);
vkUnmapMemory(vk_ctx.device, mem);
vkGetBufferMemoryRequirements(vk_ctx.device, pipeline->impl.hit_shader_binding_table, &mem_reqs);
memory_allocate_info.allocationSize = mem_reqs.size;
memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &memory_allocate_info.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, pipeline->impl.hit_shader_binding_table, mem, 0);
vkMapMemory(vk_ctx.device, mem, 0, handle_size, 0, (void **)&data);
memcpy(data, shader_handle_storage + handle_size_aligned * 2, handle_size);
vkUnmapMemory(vk_ctx.device, mem);
}
{
VkDescriptorPoolSize type_counts[3];
memset(type_counts, 0, sizeof(type_counts));
type_counts[0].type = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
type_counts[0].descriptorCount = 1;
type_counts[1].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
type_counts[1].descriptorCount = 1;
type_counts[2].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
type_counts[2].descriptorCount = 1;
VkDescriptorPoolCreateInfo descriptor_pool_create_info = {0};
descriptor_pool_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
descriptor_pool_create_info.pNext = NULL;
descriptor_pool_create_info.maxSets = 1024;
descriptor_pool_create_info.poolSizeCount = 3;
descriptor_pool_create_info.pPoolSizes = type_counts;
vkCreateDescriptorPool(vk_ctx.device, &descriptor_pool_create_info, NULL, &raytrace_descriptor_pool);
VkDescriptorSetAllocateInfo alloc_info = {0};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.descriptorPool = raytrace_descriptor_pool;
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &pipeline->impl.descriptor_set_layout;
vkAllocateDescriptorSets(vk_ctx.device, &alloc_info, &pipeline->impl.descriptor_set);
}
}
void kinc_raytrace_pipeline_destroy(kinc_raytrace_pipeline_t *pipeline) {
vkDestroyPipeline(vk_ctx.device, pipeline->impl.pipeline, NULL);
vkDestroyPipelineLayout(vk_ctx.device, pipeline->impl.pipeline_layout, NULL);
vkDestroyDescriptorSetLayout(vk_ctx.device, pipeline->impl.descriptor_set_layout, NULL);
}
uint64_t get_buffer_device_address(VkBuffer buffer) {
VkBufferDeviceAddressInfoKHR buffer_device_address_info = {0};
buffer_device_address_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO;
buffer_device_address_info.buffer = buffer;
_vkGetBufferDeviceAddressKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkGetBufferDeviceAddressKHR");
return _vkGetBufferDeviceAddressKHR(vk_ctx.device, &buffer_device_address_info);
}
void kinc_raytrace_acceleration_structure_init(kinc_raytrace_acceleration_structure_t *accel, kinc_g5_command_list_t *command_list, kinc_g5_vertex_buffer_t *vb,
kinc_g5_index_buffer_t *ib) {
_vkGetBufferDeviceAddressKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkGetBufferDeviceAddressKHR");
_vkCreateAccelerationStructureKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkCreateAccelerationStructureKHR");
_vkGetAccelerationStructureDeviceAddressKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkGetAccelerationStructureDeviceAddressKHR");
_vkGetAccelerationStructureBuildSizesKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkGetAccelerationStructureBuildSizesKHR");
{
VkDeviceOrHostAddressConstKHR vertex_data_device_address = {0};
VkDeviceOrHostAddressConstKHR index_data_device_address = {0};
vertex_data_device_address.deviceAddress = get_buffer_device_address(vb->impl.vertices.buf);
index_data_device_address.deviceAddress = get_buffer_device_address(ib->impl.buf);
VkAccelerationStructureGeometryKHR acceleration_geometry = {0};
acceleration_geometry.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR;
acceleration_geometry.flags = VK_GEOMETRY_OPAQUE_BIT_KHR;
acceleration_geometry.geometryType = VK_GEOMETRY_TYPE_TRIANGLES_KHR;
acceleration_geometry.geometry.triangles.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR;
acceleration_geometry.geometry.triangles.vertexFormat = VK_FORMAT_R32G32B32_SFLOAT;
acceleration_geometry.geometry.triangles.vertexData.deviceAddress = vertex_data_device_address.deviceAddress;
acceleration_geometry.geometry.triangles.vertexStride = vb->impl.myStride;
acceleration_geometry.geometry.triangles.indexType = VK_INDEX_TYPE_UINT32;
acceleration_geometry.geometry.triangles.indexData.deviceAddress = index_data_device_address.deviceAddress;
VkAccelerationStructureBuildGeometryInfoKHR acceleration_structure_build_geometry_info = {0};
acceleration_structure_build_geometry_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR;
acceleration_structure_build_geometry_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
acceleration_structure_build_geometry_info.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR;
acceleration_structure_build_geometry_info.geometryCount = 1;
acceleration_structure_build_geometry_info.pGeometries = &acceleration_geometry;
VkAccelerationStructureBuildSizesInfoKHR acceleration_build_sizes_info = {0};
acceleration_build_sizes_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR;
const uint32_t primitive_count = 1;
_vkGetAccelerationStructureBuildSizesKHR(vk_ctx.device, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &acceleration_structure_build_geometry_info,
&primitive_count, &acceleration_build_sizes_info);
VkBufferCreateInfo buffer_create_info = {0};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = acceleration_build_sizes_info.accelerationStructureSize;
buffer_create_info.usage = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer bottom_level_buffer = VK_NULL_HANDLE;
vkCreateBuffer(vk_ctx.device, &buffer_create_info, NULL, &bottom_level_buffer);
VkMemoryRequirements memory_requirements2;
vkGetBufferMemoryRequirements(vk_ctx.device, bottom_level_buffer, &memory_requirements2);
VkMemoryAllocateFlagsInfo memory_allocate_flags_info2 = {0};
memory_allocate_flags_info2.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info2.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
VkMemoryAllocateInfo memory_allocate_info = {0};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &memory_allocate_flags_info2;
memory_allocate_info.allocationSize = memory_requirements2.size;
memory_type_from_properties(memory_requirements2.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memory_allocate_info.memoryTypeIndex);
VkDeviceMemory mem;
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, bottom_level_buffer, mem, 0);
VkAccelerationStructureCreateInfoKHR acceleration_create_info = {0};
acceleration_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR;
acceleration_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
acceleration_create_info.buffer = bottom_level_buffer;
acceleration_create_info.size = acceleration_build_sizes_info.accelerationStructureSize;
_vkCreateAccelerationStructureKHR(vk_ctx.device, &acceleration_create_info, NULL, &accel->impl.bottom_level_acceleration_structure);
VkBuffer scratch_buffer = VK_NULL_HANDLE;
VkDeviceMemory scratch_memory = VK_NULL_HANDLE;
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = acceleration_build_sizes_info.buildScratchSize;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
vkCreateBuffer(vk_ctx.device, &buffer_create_info, NULL, &scratch_buffer);
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(vk_ctx.device, scratch_buffer, &memory_requirements);
VkMemoryAllocateFlagsInfo memory_allocate_flags_info = {0};
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &memory_allocate_flags_info;
memory_allocate_info.allocationSize = memory_requirements.size;
memory_type_from_properties(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memory_allocate_info.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &scratch_memory);
vkBindBufferMemory(vk_ctx.device, scratch_buffer, scratch_memory, 0);
VkBufferDeviceAddressInfoKHR buffer_device_address_info = {0};
buffer_device_address_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO;
buffer_device_address_info.buffer = scratch_buffer;
uint64_t scratch_buffer_device_address = _vkGetBufferDeviceAddressKHR(vk_ctx.device, &buffer_device_address_info);
VkAccelerationStructureBuildGeometryInfoKHR acceleration_build_geometry_info = {0};
acceleration_build_geometry_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR;
acceleration_build_geometry_info.type = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR;
acceleration_build_geometry_info.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR;
acceleration_build_geometry_info.mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR;
acceleration_build_geometry_info.dstAccelerationStructure = accel->impl.bottom_level_acceleration_structure;
acceleration_build_geometry_info.geometryCount = 1;
acceleration_build_geometry_info.pGeometries = &acceleration_geometry;
acceleration_build_geometry_info.scratchData.deviceAddress = scratch_buffer_device_address;
VkAccelerationStructureBuildRangeInfoKHR acceleration_build_range_info = {0};
acceleration_build_range_info.primitiveCount = 1;
acceleration_build_range_info.primitiveOffset = 0x0;
acceleration_build_range_info.firstVertex = 0;
acceleration_build_range_info.transformOffset = 0x0;
const VkAccelerationStructureBuildRangeInfoKHR *acceleration_build_infos[1] = {&acceleration_build_range_info};
{
VkCommandBufferAllocateInfo cmd_buf_allocate_info = {0};
cmd_buf_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd_buf_allocate_info.commandPool = vk_ctx.cmd_pool;
cmd_buf_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_buf_allocate_info.commandBufferCount = 1;
VkCommandBuffer command_buffer;
vkAllocateCommandBuffers(vk_ctx.device, &cmd_buf_allocate_info, &command_buffer);
VkCommandBufferBeginInfo command_buffer_info = {0};
command_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer, &command_buffer_info);
_vkCmdBuildAccelerationStructuresKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkCmdBuildAccelerationStructuresKHR");
_vkCmdBuildAccelerationStructuresKHR(command_buffer, 1, &acceleration_build_geometry_info, &acceleration_build_infos[0]);
vkEndCommandBuffer(command_buffer);
VkSubmitInfo submit_info = {0};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
VkFenceCreateInfo fence_info = {0};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = 0;
VkFence fence;
vkCreateFence(vk_ctx.device, &fence_info, NULL, &fence);
VkResult result = vkQueueSubmit(vk_ctx.queue, 1, &submit_info, fence);
assert(!result);
vkWaitForFences(vk_ctx.device, 1, &fence, VK_TRUE, 100000000000);
vkDestroyFence(vk_ctx.device, fence, NULL);
vkFreeCommandBuffers(vk_ctx.device, vk_ctx.cmd_pool, 1, &command_buffer);
}
VkAccelerationStructureDeviceAddressInfoKHR acceleration_device_address_info = {0};
acceleration_device_address_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR;
acceleration_device_address_info.accelerationStructure = accel->impl.bottom_level_acceleration_structure;
accel->impl.bottom_level_acceleration_structure_handle = _vkGetAccelerationStructureDeviceAddressKHR(vk_ctx.device, &acceleration_device_address_info);
vkFreeMemory(vk_ctx.device, scratch_memory, NULL);
vkDestroyBuffer(vk_ctx.device, scratch_buffer, NULL);
}
{
VkTransformMatrixKHR transform_matrix = {1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f};
VkAccelerationStructureInstanceKHR instance = {0};
instance.transform = transform_matrix;
instance.instanceCustomIndex = 0;
instance.mask = 0xFF;
instance.instanceShaderBindingTableRecordOffset = 0;
instance.flags = VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR;
instance.accelerationStructureReference = accel->impl.bottom_level_acceleration_structure_handle;
VkBufferCreateInfo buf_info = {0};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.size = sizeof(instance);
buf_info.usage = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
buf_info.flags = 0;
VkMemoryAllocateInfo mem_alloc;
memset(&mem_alloc, 0, sizeof(VkMemoryAllocateInfo));
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
VkBuffer instances_buffer;
vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &instances_buffer);
VkMemoryRequirements mem_reqs = {0};
vkGetBufferMemoryRequirements(vk_ctx.device, instances_buffer, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &mem_alloc.memoryTypeIndex);
VkMemoryAllocateFlagsInfo memory_allocate_flags_info = {0};
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
mem_alloc.pNext = &memory_allocate_flags_info;
VkDeviceMemory mem;
vkAllocateMemory(vk_ctx.device, &mem_alloc, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, instances_buffer, mem, 0);
void *data;
vkMapMemory(vk_ctx.device, mem, 0, sizeof(VkAccelerationStructureInstanceKHR), 0, (void **)&data);
memcpy(data, &instance, sizeof(VkAccelerationStructureInstanceKHR));
vkUnmapMemory(vk_ctx.device, mem);
VkDeviceOrHostAddressConstKHR instance_data_device_address = {0};
instance_data_device_address.deviceAddress = get_buffer_device_address(instances_buffer);
VkAccelerationStructureGeometryKHR acceleration_geometry = {0};
acceleration_geometry.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR;
acceleration_geometry.flags = VK_GEOMETRY_OPAQUE_BIT_KHR;
acceleration_geometry.geometryType = VK_GEOMETRY_TYPE_INSTANCES_KHR;
acceleration_geometry.geometry.instances.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR;
acceleration_geometry.geometry.instances.arrayOfPointers = VK_FALSE;
acceleration_geometry.geometry.instances.data.deviceAddress = instance_data_device_address.deviceAddress;
VkAccelerationStructureBuildGeometryInfoKHR acceleration_structure_build_geometry_info = {0};
acceleration_structure_build_geometry_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR;
acceleration_structure_build_geometry_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
acceleration_structure_build_geometry_info.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR;
acceleration_structure_build_geometry_info.geometryCount = 1;
acceleration_structure_build_geometry_info.pGeometries = &acceleration_geometry;
VkAccelerationStructureBuildSizesInfoKHR acceleration_build_sizes_info = {0};
acceleration_build_sizes_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR;
const uint32_t primitive_count = 1;
_vkGetAccelerationStructureBuildSizesKHR(vk_ctx.device, VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, &acceleration_structure_build_geometry_info,
&primitive_count, &acceleration_build_sizes_info);
VkBufferCreateInfo buffer_create_info = {0};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = acceleration_build_sizes_info.accelerationStructureSize;
buffer_create_info.usage = VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer top_level_buffer = VK_NULL_HANDLE;
vkCreateBuffer(vk_ctx.device, &buffer_create_info, NULL, &top_level_buffer);
VkMemoryRequirements memory_requirements2;
vkGetBufferMemoryRequirements(vk_ctx.device, top_level_buffer, &memory_requirements2);
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
VkMemoryAllocateInfo memory_allocate_info = {0};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &memory_allocate_flags_info;
memory_allocate_info.allocationSize = memory_requirements2.size;
memory_type_from_properties(memory_requirements2.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memory_allocate_info.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &mem);
vkBindBufferMemory(vk_ctx.device, top_level_buffer, mem, 0);
VkAccelerationStructureCreateInfoKHR acceleration_create_info = {0};
acceleration_create_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR;
acceleration_create_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
acceleration_create_info.buffer = top_level_buffer;
acceleration_create_info.size = acceleration_build_sizes_info.accelerationStructureSize;
_vkCreateAccelerationStructureKHR(vk_ctx.device, &acceleration_create_info, NULL, &accel->impl.top_level_acceleration_structure);
VkBuffer scratch_buffer = VK_NULL_HANDLE;
VkDeviceMemory scratch_memory = VK_NULL_HANDLE;
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = acceleration_build_sizes_info.buildScratchSize;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
vkCreateBuffer(vk_ctx.device, &buffer_create_info, NULL, &scratch_buffer);
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(vk_ctx.device, scratch_buffer, &memory_requirements);
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.pNext = &memory_allocate_flags_info;
memory_allocate_info.allocationSize = memory_requirements.size;
memory_type_from_properties(memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memory_allocate_info.memoryTypeIndex);
vkAllocateMemory(vk_ctx.device, &memory_allocate_info, NULL, &scratch_memory);
vkBindBufferMemory(vk_ctx.device, scratch_buffer, scratch_memory, 0);
VkBufferDeviceAddressInfoKHR buffer_device_address_info = {0};
buffer_device_address_info.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO;
buffer_device_address_info.buffer = scratch_buffer;
uint64_t scratch_buffer_device_address = _vkGetBufferDeviceAddressKHR(vk_ctx.device, &buffer_device_address_info);
VkAccelerationStructureBuildGeometryInfoKHR acceleration_build_geometry_info = {0};
acceleration_build_geometry_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR;
acceleration_build_geometry_info.type = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR;
acceleration_build_geometry_info.flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR;
acceleration_build_geometry_info.mode = VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR;
acceleration_build_geometry_info.srcAccelerationStructure = VK_NULL_HANDLE;
acceleration_build_geometry_info.dstAccelerationStructure = accel->impl.top_level_acceleration_structure;
acceleration_build_geometry_info.geometryCount = 1;
acceleration_build_geometry_info.pGeometries = &acceleration_geometry;
acceleration_build_geometry_info.scratchData.deviceAddress = scratch_buffer_device_address;
VkAccelerationStructureBuildRangeInfoKHR acceleration_build_range_info = {0};
acceleration_build_range_info.primitiveCount = 1;
acceleration_build_range_info.primitiveOffset = 0x0;
acceleration_build_range_info.firstVertex = 0;
acceleration_build_range_info.transformOffset = 0x0;
const VkAccelerationStructureBuildRangeInfoKHR *acceleration_build_infos[1] = {&acceleration_build_range_info};
{
VkCommandBufferAllocateInfo cmd_buf_allocate_info = {0};
cmd_buf_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd_buf_allocate_info.commandPool = vk_ctx.cmd_pool;
cmd_buf_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_buf_allocate_info.commandBufferCount = 1;
VkCommandBuffer command_buffer;
vkAllocateCommandBuffers(vk_ctx.device, &cmd_buf_allocate_info, &command_buffer);
VkCommandBufferBeginInfo command_buffer_info = {0};
command_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer, &command_buffer_info);
_vkCmdBuildAccelerationStructuresKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkCmdBuildAccelerationStructuresKHR");
_vkCmdBuildAccelerationStructuresKHR(command_buffer, 1, &acceleration_build_geometry_info, &acceleration_build_infos[0]);
vkEndCommandBuffer(command_buffer);
VkSubmitInfo submit_info = {0};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
VkFenceCreateInfo fence_info = {0};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = 0;
VkFence fence;
vkCreateFence(vk_ctx.device, &fence_info, NULL, &fence);
VkResult result = vkQueueSubmit(vk_ctx.queue, 1, &submit_info, fence);
assert(!result);
vkWaitForFences(vk_ctx.device, 1, &fence, VK_TRUE, 100000000000);
vkDestroyFence(vk_ctx.device, fence, NULL);
vkFreeCommandBuffers(vk_ctx.device, vk_ctx.cmd_pool, 1, &command_buffer);
}
VkAccelerationStructureDeviceAddressInfoKHR acceleration_device_address_info = {0};
acceleration_device_address_info.sType = VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR;
acceleration_device_address_info.accelerationStructure = accel->impl.top_level_acceleration_structure;
accel->impl.top_level_acceleration_structure_handle = _vkGetAccelerationStructureDeviceAddressKHR(vk_ctx.device, &acceleration_device_address_info);
vkFreeMemory(vk_ctx.device, scratch_memory, NULL);
vkDestroyBuffer(vk_ctx.device, scratch_buffer, NULL);
}
}
void kinc_raytrace_acceleration_structure_destroy(kinc_raytrace_acceleration_structure_t *accel) {
_vkDestroyAccelerationStructureKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkDestroyAccelerationStructureKHR");
_vkDestroyAccelerationStructureKHR(vk_ctx.device, accel->impl.bottom_level_acceleration_structure, NULL);
_vkDestroyAccelerationStructureKHR(vk_ctx.device, accel->impl.top_level_acceleration_structure, NULL);
}
void kinc_raytrace_set_acceleration_structure(kinc_raytrace_acceleration_structure_t *_accel) {
accel = _accel;
}
void kinc_raytrace_set_pipeline(kinc_raytrace_pipeline_t *_pipeline) {
pipeline = _pipeline;
}
void kinc_raytrace_set_target(kinc_g5_texture_t *_output) {
output = _output;
}
void kinc_raytrace_dispatch_rays(kinc_g5_command_list_t *command_list) {
VkWriteDescriptorSetAccelerationStructureKHR descriptor_acceleration_structure_info = {0};
descriptor_acceleration_structure_info.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR;
descriptor_acceleration_structure_info.accelerationStructureCount = 1;
descriptor_acceleration_structure_info.pAccelerationStructures = &accel->impl.top_level_acceleration_structure;
VkWriteDescriptorSet acceleration_structure_write = {0};
acceleration_structure_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
acceleration_structure_write.pNext = &descriptor_acceleration_structure_info;
acceleration_structure_write.dstSet = pipeline->impl.descriptor_set;
acceleration_structure_write.dstBinding = 0;
acceleration_structure_write.descriptorCount = 1;
acceleration_structure_write.descriptorType = VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR;
VkDescriptorImageInfo image_descriptor = {0};
image_descriptor.imageView = output->impl.texture.view;
image_descriptor.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
VkDescriptorBufferInfo buffer_descriptor = {0};
buffer_descriptor.buffer = pipeline->_constant_buffer->impl.buf;
buffer_descriptor.range = VK_WHOLE_SIZE;
buffer_descriptor.offset = 0;
VkWriteDescriptorSet result_image_write = {0};
result_image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
result_image_write.pNext = NULL;
result_image_write.dstSet = pipeline->impl.descriptor_set;
result_image_write.dstBinding = 1;
result_image_write.descriptorCount = 1;
result_image_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
result_image_write.pImageInfo = &image_descriptor;
VkWriteDescriptorSet uniform_buffer_write = {0};
uniform_buffer_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
uniform_buffer_write.pNext = NULL;
uniform_buffer_write.dstSet = pipeline->impl.descriptor_set;
uniform_buffer_write.dstBinding = 2;
uniform_buffer_write.descriptorCount = 1;
uniform_buffer_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
uniform_buffer_write.pBufferInfo = &buffer_descriptor;
VkWriteDescriptorSet write_descriptor_sets[3] = {acceleration_structure_write, result_image_write, uniform_buffer_write};
vkUpdateDescriptorSets(vk_ctx.device, 3, write_descriptor_sets, 0, VK_NULL_HANDLE);
VkPhysicalDeviceRayTracingPipelinePropertiesKHR ray_tracing_pipeline_properties;
ray_tracing_pipeline_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR;
ray_tracing_pipeline_properties.pNext = NULL;
VkPhysicalDeviceProperties2 device_properties = {0};
device_properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
device_properties.pNext = &ray_tracing_pipeline_properties;
vkGetPhysicalDeviceProperties2(vk_ctx.gpu, &device_properties);
// Setup the strided buffer regions pointing to the shaders in our shader binding table
const uint32_t handle_size_aligned =
(ray_tracing_pipeline_properties.shaderGroupHandleSize + ray_tracing_pipeline_properties.shaderGroupHandleAlignment - 1) &
~(ray_tracing_pipeline_properties.shaderGroupHandleAlignment - 1);
VkStridedDeviceAddressRegionKHR raygen_shader_sbt_entry = {0};
raygen_shader_sbt_entry.deviceAddress = get_buffer_device_address(pipeline->impl.raygen_shader_binding_table);
raygen_shader_sbt_entry.stride = handle_size_aligned;
raygen_shader_sbt_entry.size = handle_size_aligned;
VkStridedDeviceAddressRegionKHR miss_shader_sbt_entry = {0};
miss_shader_sbt_entry.deviceAddress = get_buffer_device_address(pipeline->impl.miss_shader_binding_table);
miss_shader_sbt_entry.stride = handle_size_aligned;
miss_shader_sbt_entry.size = handle_size_aligned;
VkStridedDeviceAddressRegionKHR hit_shader_sbt_entry = {0};
hit_shader_sbt_entry.deviceAddress = get_buffer_device_address(pipeline->impl.hit_shader_binding_table);
hit_shader_sbt_entry.stride = handle_size_aligned;
hit_shader_sbt_entry.size = handle_size_aligned;
VkStridedDeviceAddressRegionKHR callable_shader_sbt_entry = {0};
vkCmdEndRenderPass(command_list->impl._buffer);
// Dispatch the ray tracing commands
vkCmdBindPipeline(command_list->impl._buffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipeline->impl.pipeline);
vkCmdBindDescriptorSets(command_list->impl._buffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, pipeline->impl.pipeline_layout, 0, 1,
&pipeline->impl.descriptor_set, 0, 0);
_vkCmdTraceRaysKHR = (void *)vkGetDeviceProcAddr(vk_ctx.device, "vkCmdTraceRaysKHR");
_vkCmdTraceRaysKHR(command_list->impl._buffer, &raygen_shader_sbt_entry, &miss_shader_sbt_entry, &hit_shader_sbt_entry, &callable_shader_sbt_entry,
output->texWidth, output->texHeight, 1);
vkCmdBeginRenderPass(command_list->impl._buffer, &currentRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
}
void kinc_raytrace_copy(kinc_g5_command_list_t *command_list, kinc_g5_render_target_t *target, kinc_g5_texture_t *source) {
vkCmdEndRenderPass(command_list->impl._buffer);
VkImageCopy copy_region = {0};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.layerCount = 1;
copy_region.extent.width = (uint32_t)output->texWidth;
copy_region.extent.height = (uint32_t)output->texHeight;
copy_region.extent.depth = 1;
if (target->framebuffer_index >= 0) {
vkCmdCopyImage(command_list->impl._buffer, output->impl.texture.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
vk_ctx.windows[vk_ctx.current_window].images[vk_ctx.windows[vk_ctx.current_window].current_image], VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &copy_region);
}
else {
vkCmdCopyImage(command_list->impl._buffer, output->impl.texture.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, target->impl.sourceImage,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
}
vkCmdBeginRenderPass(command_list->impl._buffer, &currentRenderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
}
#endif

View File

@ -0,0 +1,32 @@
#pragma once
#ifndef KORE_ANDROID
#include "MiniVulkan.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct {
VkPipeline pipeline;
VkPipelineLayout pipeline_layout;
VkDescriptorSet descriptor_set;
VkDescriptorSetLayout descriptor_set_layout;
VkBuffer raygen_shader_binding_table;
VkBuffer miss_shader_binding_table;
VkBuffer hit_shader_binding_table;
} kinc_raytrace_pipeline_impl_t;
typedef struct {
VkAccelerationStructureKHR top_level_acceleration_structure;
VkAccelerationStructureKHR bottom_level_acceleration_structure;
uint64_t top_level_acceleration_structure_handle;
uint64_t bottom_level_acceleration_structure_handle;
} kinc_raytrace_acceleration_structure_impl_t;
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,331 @@
#include "vulkan.h"
#include "rendertarget.h"
#include <kinc/graphics5/rendertarget.h>
#include <kinc/graphics5/texture.h>
#include <kinc/log.h>
extern uint32_t swapchainImageCount;
extern kinc_g5_texture_t *vulkanTextures[16];
extern kinc_g5_render_target_t *vulkanRenderTargets[16];
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
void setup_init_cmd();
/*static VkFormat convert_format(kinc_g5_render_target_format_t format) {
switch (format) {
case KINC_G5_RENDER_TARGET_FORMAT_128BIT_FLOAT:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_64BIT_FLOAT:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_32BIT_RED_FLOAT:
return VK_FORMAT_R32_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_16BIT_RED_FLOAT:
return VK_FORMAT_R16_SFLOAT;
case KINC_G5_RENDER_TARGET_FORMAT_8BIT_RED:
return VK_FORMAT_R8_UNORM;
case KINC_G5_RENDER_TARGET_FORMAT_32BIT:
default:
return VK_FORMAT_B8G8R8A8_UNORM;
}
}*/
void setImageLayout(VkCommandBuffer _buffer, VkImage image, VkImageAspectFlags aspectMask, VkImageLayout oldImageLayout, VkImageLayout newImageLayout) {
VkImageMemoryBarrier imageMemoryBarrier = {0};
imageMemoryBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imageMemoryBarrier.pNext = NULL;
imageMemoryBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
imageMemoryBarrier.oldLayout = oldImageLayout;
imageMemoryBarrier.newLayout = newImageLayout;
imageMemoryBarrier.image = image;
imageMemoryBarrier.subresourceRange.aspectMask = aspectMask;
imageMemoryBarrier.subresourceRange.baseMipLevel = 0;
imageMemoryBarrier.subresourceRange.levelCount = 1;
imageMemoryBarrier.subresourceRange.layerCount = 1;
if (oldImageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
imageMemoryBarrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
}
if (oldImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
imageMemoryBarrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
if (oldImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
}
if (oldImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
imageMemoryBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
}
if (newImageLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
}
if (newImageLayout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
if (oldImageLayout != VK_IMAGE_LAYOUT_UNDEFINED)
imageMemoryBarrier.srcAccessMask = imageMemoryBarrier.srcAccessMask | VK_ACCESS_TRANSFER_READ_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
}
if (newImageLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
imageMemoryBarrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
if (oldImageLayout != VK_IMAGE_LAYOUT_UNDEFINED)
imageMemoryBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
}
if (newImageLayout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
imageMemoryBarrier.dstAccessMask = imageMemoryBarrier.dstAccessMask | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
}
if (newImageLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
if (oldImageLayout != VK_IMAGE_LAYOUT_UNDEFINED)
imageMemoryBarrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
imageMemoryBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
}
VkPipelineStageFlags srcStageFlags = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
VkPipelineStageFlags dstStageFlags = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
vkCmdPipelineBarrier(_buffer, srcStageFlags, dstStageFlags, 0, 0, NULL, 0, NULL, 1, &imageMemoryBarrier);
}
static void render_target_init(kinc_g5_render_target_t *target, int width, int height, kinc_g5_render_target_format_t format, int depthBufferBits,
int stencilBufferBits, int samples_per_pixel, int framebuffer_index) {
target->width = width;
target->height = height;
target->framebuffer_index = framebuffer_index;
target->texWidth = width;
target->texHeight = height;
target->impl.format = convert_format(format);
target->impl.depthBufferBits = depthBufferBits;
target->impl.stage = 0;
target->impl.stage_depth = -1;
target->impl.readbackBufferCreated = false;
if (framebuffer_index < 0) {
{
VkFormatProperties formatProperties;
VkResult err;
vkGetPhysicalDeviceFormatProperties(vk_ctx.gpu, target->impl.format, &formatProperties);
assert(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT);
VkImageCreateInfo image = {0};
image.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image.pNext = NULL;
image.imageType = VK_IMAGE_TYPE_2D;
image.format = target->impl.format;
image.extent.width = width;
image.extent.height = height;
image.extent.depth = 1;
image.mipLevels = 1;
image.arrayLayers = 1;
image.samples = VK_SAMPLE_COUNT_1_BIT;
image.tiling = VK_IMAGE_TILING_OPTIMAL;
image.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image.flags = 0;
VkImageViewCreateInfo colorImageView = {0};
colorImageView.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
colorImageView.pNext = NULL;
colorImageView.viewType = VK_IMAGE_VIEW_TYPE_2D;
colorImageView.format = target->impl.format;
colorImageView.flags = 0;
colorImageView.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
colorImageView.subresourceRange.baseMipLevel = 0;
colorImageView.subresourceRange.levelCount = 1;
colorImageView.subresourceRange.baseArrayLayer = 0;
colorImageView.subresourceRange.layerCount = 1;
err = vkCreateImage(vk_ctx.device, &image, NULL, &target->impl.sourceImage);
assert(!err);
VkMemoryRequirements memoryRequirements;
vkGetImageMemoryRequirements(vk_ctx.device, target->impl.sourceImage, &memoryRequirements);
VkMemoryAllocateInfo allocationInfo = {0};
allocationInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
allocationInfo.pNext = NULL;
allocationInfo.memoryTypeIndex = 0;
allocationInfo.allocationSize = memoryRequirements.size;
bool pass = memory_type_from_properties(memoryRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &allocationInfo.memoryTypeIndex);
assert(pass);
err = vkAllocateMemory(vk_ctx.device, &allocationInfo, NULL, &target->impl.sourceMemory);
assert(!err);
err = vkBindImageMemory(vk_ctx.device, target->impl.sourceImage, target->impl.sourceMemory, 0);
assert(!err);
setup_init_cmd();
setImageLayout(vk_ctx.setup_cmd, target->impl.sourceImage, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
flush_init_cmd();
colorImageView.image = target->impl.sourceImage;
err = vkCreateImageView(vk_ctx.device, &colorImageView, NULL, &target->impl.sourceView);
assert(!err);
}
if (depthBufferBits > 0) {
const VkFormat depth_format = VK_FORMAT_D16_UNORM;
VkImageCreateInfo image = {0};
image.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image.pNext = NULL;
image.imageType = VK_IMAGE_TYPE_2D;
image.format = depth_format;
image.extent.width = width;
image.extent.height = height;
image.extent.depth = 1;
image.mipLevels = 1;
image.arrayLayers = 1;
image.samples = VK_SAMPLE_COUNT_1_BIT;
image.tiling = VK_IMAGE_TILING_OPTIMAL;
image.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image.flags = 0;
VkResult err = vkCreateImage(vk_ctx.device, &image, NULL, &target->impl.depthImage);
assert(!err);
VkMemoryAllocateInfo mem_alloc = {0};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
VkImageViewCreateInfo view = {0};
view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view.pNext = NULL;
view.image = target->impl.depthImage;
view.format = depth_format;
view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
view.subresourceRange.baseMipLevel = 0;
view.subresourceRange.levelCount = 1;
view.subresourceRange.baseArrayLayer = 0;
view.subresourceRange.layerCount = 1;
view.flags = 0;
view.viewType = VK_IMAGE_VIEW_TYPE_2D;
VkMemoryRequirements mem_reqs = {0};
bool pass;
/* get memory requirements for this object */
vkGetImageMemoryRequirements(vk_ctx.device, target->impl.depthImage, &mem_reqs);
/* select memory size and type */
mem_alloc.allocationSize = mem_reqs.size;
pass = memory_type_from_properties(mem_reqs.memoryTypeBits, 0, /* No requirements */ &mem_alloc.memoryTypeIndex);
assert(pass);
/* allocate memory */
err = vkAllocateMemory(vk_ctx.device, &mem_alloc, NULL, &target->impl.depthMemory);
assert(!err);
/* bind memory */
err = vkBindImageMemory(vk_ctx.device, target->impl.depthImage, target->impl.depthMemory, 0);
assert(!err);
setup_init_cmd();
setImageLayout(vk_ctx.setup_cmd, target->impl.depthImage, VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
flush_init_cmd();
/* create image view */
err = vkCreateImageView(vk_ctx.device, &view, NULL, &target->impl.depthView);
assert(!err);
}
VkImageView attachments[2];
attachments[0] = target->impl.sourceView;
if (depthBufferBits > 0) {
attachments[1] = target->impl.depthView;
}
VkFramebufferCreateInfo fbufCreateInfo = {0};
fbufCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fbufCreateInfo.pNext = NULL;
if (framebuffer_index >= 0) {
fbufCreateInfo.renderPass = vk_ctx.windows[vk_ctx.current_window].framebuffer_render_pass;
}
else if (depthBufferBits > 0) {
fbufCreateInfo.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass_with_depth;
}
else {
fbufCreateInfo.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass;
}
fbufCreateInfo.attachmentCount = depthBufferBits > 0 ? 2 : 1;
fbufCreateInfo.pAttachments = attachments;
fbufCreateInfo.width = width;
fbufCreateInfo.height = height;
fbufCreateInfo.layers = 1;
VkResult err = vkCreateFramebuffer(vk_ctx.device, &fbufCreateInfo, NULL, &target->impl.framebuffer);
assert(!err);
}
}
void kinc_g5_render_target_init_with_multisampling(kinc_g5_render_target_t *target, int width, int height, kinc_g5_render_target_format_t format,
int depthBufferBits, int stencilBufferBits, int samples_per_pixel) {
render_target_init(target, width, height, format, depthBufferBits, stencilBufferBits, samples_per_pixel, -1);
}
static int framebuffer_count = 0;
void kinc_g5_render_target_init_framebuffer_with_multisampling(kinc_g5_render_target_t *target, int width, int height, kinc_g5_render_target_format_t format,
int depthBufferBits, int stencilBufferBits, int samples_per_pixel) {
render_target_init(target, width, height, format, depthBufferBits, stencilBufferBits, samples_per_pixel, framebuffer_count);
framebuffer_count += 1;
}
void kinc_g5_render_target_init_cube_with_multisampling(kinc_g5_render_target_t *target, int cubeMapSize, kinc_g5_render_target_format_t format,
int depthBufferBits, int stencilBufferBits, int samples_per_pixel) {}
void kinc_g5_render_target_destroy(kinc_g5_render_target_t *target) {
if (target->framebuffer_index >= 0) {
framebuffer_count -= 1;
}
else {
vkDestroyFramebuffer(vk_ctx.device, target->impl.framebuffer, NULL);
if (target->impl.depthBufferBits > 0) {
vkDestroyImageView(vk_ctx.device, target->impl.depthView, NULL);
vkDestroyImage(vk_ctx.device, target->impl.depthImage, NULL);
vkFreeMemory(vk_ctx.device, target->impl.depthMemory, NULL);
}
vkDestroyImageView(vk_ctx.device, target->impl.sourceView, NULL);
vkDestroyImage(vk_ctx.device, target->impl.sourceImage, NULL);
vkFreeMemory(vk_ctx.device, target->impl.sourceMemory, NULL);
}
}
void kinc_g5_render_target_set_depth_stencil_from(kinc_g5_render_target_t *target, kinc_g5_render_target_t *source) {
target->impl.depthImage = source->impl.depthImage;
target->impl.depthMemory = source->impl.depthMemory;
target->impl.depthView = source->impl.depthView;
target->impl.depthBufferBits = source->impl.depthBufferBits;
// vkDestroyFramebuffer(vk_ctx.device, target->impl.framebuffer, nullptr);
{
VkImageView attachments[2];
attachments[0] = target->impl.sourceView;
if (target->impl.depthBufferBits > 0) {
attachments[1] = target->impl.depthView;
}
VkFramebufferCreateInfo fbufCreateInfo = {0};
fbufCreateInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fbufCreateInfo.pNext = NULL;
if (target->impl.depthBufferBits > 0) {
fbufCreateInfo.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass_with_depth;
}
else {
fbufCreateInfo.renderPass = vk_ctx.windows[vk_ctx.current_window].rendertarget_render_pass;
}
fbufCreateInfo.attachmentCount = target->impl.depthBufferBits > 0 ? 2 : 1;
fbufCreateInfo.pAttachments = attachments;
fbufCreateInfo.width = target->width;
fbufCreateInfo.height = target->height;
fbufCreateInfo.layers = 1;
VkResult err = vkCreateFramebuffer(vk_ctx.device, &fbufCreateInfo, NULL, &target->impl.framebuffer);
assert(!err);
}
}

View File

@ -0,0 +1,25 @@
#pragma once
#include "MiniVulkan.h"
typedef struct {
VkImage sourceImage;
VkDeviceMemory sourceMemory;
VkImageView sourceView;
VkImage depthImage;
VkDeviceMemory depthMemory;
VkImageView depthView;
int depthBufferBits;
VkFramebuffer framebuffer;
VkFormat format;
VkBuffer readbackBuffer;
VkDeviceMemory readbackMemory;
bool readbackBufferCreated;
int stage;
int stage_depth;
} RenderTarget5Impl;

View File

@ -0,0 +1,78 @@
#include <kinc/graphics5/sampler.h>
static VkCompareOp convert_compare_mode(kinc_g5_compare_mode_t compare);
static VkSamplerAddressMode convert_addressing(kinc_g5_texture_addressing_t mode) {
switch (mode) {
case KINC_G5_TEXTURE_ADDRESSING_REPEAT:
return VK_SAMPLER_ADDRESS_MODE_REPEAT;
case KINC_G5_TEXTURE_ADDRESSING_BORDER:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
case KINC_G5_TEXTURE_ADDRESSING_CLAMP:
return VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
case KINC_G5_TEXTURE_ADDRESSING_MIRROR:
return VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT;
default:
assert(false);
return VK_SAMPLER_ADDRESS_MODE_REPEAT;
}
}
static VkSamplerMipmapMode convert_mipmap_mode(kinc_g5_mipmap_filter_t filter) {
switch (filter) {
case KINC_G5_MIPMAP_FILTER_NONE:
case KINC_G5_MIPMAP_FILTER_POINT:
return VK_SAMPLER_MIPMAP_MODE_NEAREST;
case KINC_G5_MIPMAP_FILTER_LINEAR:
return VK_SAMPLER_MIPMAP_MODE_LINEAR;
default:
assert(false);
return VK_SAMPLER_MIPMAP_MODE_NEAREST;
}
}
static VkFilter convert_texture_filter(kinc_g5_texture_filter_t filter) {
switch (filter) {
case KINC_G5_TEXTURE_FILTER_POINT:
return VK_FILTER_NEAREST;
case KINC_G5_TEXTURE_FILTER_LINEAR:
return VK_FILTER_LINEAR;
case KINC_G5_TEXTURE_FILTER_ANISOTROPIC:
return VK_FILTER_LINEAR; // ?
default:
assert(false);
return VK_FILTER_NEAREST;
}
}
void kinc_g5_sampler_init(kinc_g5_sampler_t *sampler, const kinc_g5_sampler_options_t *options) {
VkSamplerCreateInfo info = {0};
info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
info.pNext = NULL;
info.flags = 0;
info.addressModeU = convert_addressing(options->u_addressing);
info.addressModeV = convert_addressing(options->v_addressing);
info.addressModeW = convert_addressing(options->w_addressing);
info.mipmapMode = convert_mipmap_mode(options->mipmap_filter);
info.magFilter = convert_texture_filter(options->magnification_filter);
info.minFilter = convert_texture_filter(options->minification_filter);
info.compareEnable = options->is_comparison;
info.compareOp = convert_compare_mode(options->compare_mode);
info.anisotropyEnable =
(options->magnification_filter == KINC_G5_TEXTURE_FILTER_ANISOTROPIC || options->minification_filter == KINC_G5_TEXTURE_FILTER_ANISOTROPIC);
info.maxAnisotropy = options->max_anisotropy;
info.maxLod = options->lod_max_clamp;
info.minLod = options->lod_min_clamp;
vkCreateSampler(vk_ctx.device, &info, NULL, &sampler->impl.sampler);
}
void kinc_g5_sampler_destroy(kinc_g5_sampler_t *sampler) {
vkDestroySampler(vk_ctx.device, sampler->impl.sampler, NULL);
}

View File

@ -0,0 +1,7 @@
#pragma once
#include <vulkan/vulkan_core.h>
typedef struct kinc_g5_sampler_impl {
VkSampler sampler;
} kinc_g5_sampler_impl_t;

View File

@ -0,0 +1,16 @@
#include <kinc/graphics5/shader.h>
void kinc_g5_shader_init(kinc_g5_shader_t *shader, const void *source, size_t length, kinc_g5_shader_type_t type) {
shader->impl.length = (int)length;
shader->impl.id = 0;
shader->impl.source = (char *)malloc(length + 1);
for (int i = 0; i < length; ++i) {
shader->impl.source[i] = ((char *)source)[i];
}
shader->impl.source[length] = 0;
}
void kinc_g5_shader_destroy(kinc_g5_shader_t *shader) {
free(shader->impl.source);
shader->impl.source = NULL;
}

View File

@ -0,0 +1,7 @@
#pragma once
typedef struct {
unsigned id;
char *source;
int length;
} Shader5Impl;

View File

@ -0,0 +1,418 @@
#include "vulkan.h"
#include <kinc/graphics5/texture.h>
#include <kinc/image.h>
#include <kinc/log.h>
bool use_staging_buffer = false;
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
void set_image_layout(VkImage image, VkImageAspectFlags aspectMask, VkImageLayout old_image_layout, VkImageLayout new_image_layout);
static void prepare_texture_image(uint8_t *tex_colors, uint32_t tex_width, uint32_t tex_height, struct texture_object *tex_obj, VkImageTiling tiling,
VkImageUsageFlags usage, VkFlags required_props, VkDeviceSize *deviceSize, VkFormat tex_format) {
VkResult err;
bool pass;
tex_obj->tex_width = tex_width;
tex_obj->tex_height = tex_height;
VkImageCreateInfo image_create_info = {0};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = tiling;
image_create_info.usage = usage;
image_create_info.flags = 0;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
VkMemoryAllocateInfo mem_alloc = {0};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
VkMemoryRequirements mem_reqs;
err = vkCreateImage(vk_ctx.device, &image_create_info, NULL, &tex_obj->image);
assert(!err);
vkGetImageMemoryRequirements(vk_ctx.device, tex_obj->image, &mem_reqs);
*deviceSize = mem_alloc.allocationSize = mem_reqs.size;
pass = memory_type_from_properties(mem_reqs.memoryTypeBits, required_props, &mem_alloc.memoryTypeIndex);
assert(pass);
// allocate memory
err = vkAllocateMemory(vk_ctx.device, &mem_alloc, NULL, &tex_obj->mem);
assert(!err);
// bind memory
err = vkBindImageMemory(vk_ctx.device, tex_obj->image, tex_obj->mem, 0);
assert(!err);
if (required_props & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT && tex_colors != NULL) {
VkImageSubresource subres = {0};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout;
uint8_t *data;
vkGetImageSubresourceLayout(vk_ctx.device, tex_obj->image, &subres, &layout);
err = vkMapMemory(vk_ctx.device, tex_obj->mem, 0, mem_alloc.allocationSize, 0, (void **)&data);
assert(!err);
if (tex_format == VK_FORMAT_R8_UNORM) {
for (uint32_t y = 0; y < tex_height; y++) {
for (uint32_t x = 0; x < tex_width; x++) {
data[y * layout.rowPitch + x] = tex_colors[y * tex_width + x];
}
}
}
else if (tex_format == VK_FORMAT_R32_SFLOAT) {
uint32_t *data32 = (uint32_t *)data;
uint32_t *tex_colors32 = (uint32_t *)tex_colors;
for (uint32_t y = 0; y < tex_height; y++) {
for (uint32_t x = 0; x < tex_width; x++) {
data32[y * (layout.rowPitch / 4) + x] = tex_colors32[y * tex_width + x];
}
}
}
else if (tex_format == VK_FORMAT_R16_SFLOAT) {
uint16_t *data16 = (uint16_t *)data;
uint16_t *tex_colors16 = (uint16_t *)tex_colors;
for (uint32_t y = 0; y < tex_height; y++) {
for (uint32_t x = 0; x < tex_width; x++) {
data16[y * (layout.rowPitch / 4) + x] = tex_colors16[y * tex_width + x];
}
}
}
else if (tex_format == VK_FORMAT_R32G32B32A32_SFLOAT) {
uint32_t *data32 = (uint32_t *)data;
uint32_t *tex_colors32 = (uint32_t *)tex_colors;
for (uint32_t y = 0; y < tex_height; y++) {
for (uint32_t x = 0; x < tex_width; x++) {
data32[y * (layout.rowPitch / 4) + x * 4 + 0] = tex_colors32[y * tex_width * 4 + x * 4 + 0];
data32[y * (layout.rowPitch / 4) + x * 4 + 1] = tex_colors32[y * tex_width * 4 + x * 4 + 1];
data32[y * (layout.rowPitch / 4) + x * 4 + 2] = tex_colors32[y * tex_width * 4 + x * 4 + 2];
data32[y * (layout.rowPitch / 4) + x * 4 + 3] = tex_colors32[y * tex_width * 4 + x * 4 + 3];
}
}
}
else if (tex_format == VK_FORMAT_R16G16B16A16_SFLOAT) {
uint16_t *data16 = (uint16_t *)data;
uint16_t *tex_colors16 = (uint16_t *)tex_colors;
for (uint32_t y = 0; y < tex_height; y++) {
for (uint32_t x = 0; x < tex_width; x++) {
data16[y * (layout.rowPitch / 4) + x * 4 + 0] = tex_colors16[y * tex_width * 4 + x * 4 + 0];
data16[y * (layout.rowPitch / 4) + x * 4 + 1] = tex_colors16[y * tex_width * 4 + x * 4 + 1];
data16[y * (layout.rowPitch / 4) + x * 4 + 2] = tex_colors16[y * tex_width * 4 + x * 4 + 2];
data16[y * (layout.rowPitch / 4) + x * 4 + 3] = tex_colors16[y * tex_width * 4 + x * 4 + 3];
}
}
}
else if (tex_format == VK_FORMAT_B8G8R8A8_UNORM) {
for (uint32_t y = 0; y < tex_height; y++) {
// uint32_t *row = (uint32_t *)((char *)data + layout.rowPitch * y);
for (uint32_t x = 0; x < tex_width; x++) {
data[y * layout.rowPitch + x * 4 + 0] = tex_colors[y * tex_width * 4 + x * 4 + 2];
data[y * layout.rowPitch + x * 4 + 1] = tex_colors[y * tex_width * 4 + x * 4 + 1];
data[y * layout.rowPitch + x * 4 + 2] = tex_colors[y * tex_width * 4 + x * 4 + 0];
data[y * layout.rowPitch + x * 4 + 3] = tex_colors[y * tex_width * 4 + x * 4 + 3];
// row[x] = tex_colors[(x & 1) ^ (y & 1)];
}
}
}
else {
for (uint32_t y = 0; y < tex_height; y++) {
// uint32_t *row = (uint32_t *)((char *)data + layout.rowPitch * y);
for (uint32_t x = 0; x < tex_width; x++) {
data[y * layout.rowPitch + x * 4 + 0] = tex_colors[y * tex_width * 4 + x * 4 + 0];
data[y * layout.rowPitch + x * 4 + 1] = tex_colors[y * tex_width * 4 + x * 4 + 1];
data[y * layout.rowPitch + x * 4 + 2] = tex_colors[y * tex_width * 4 + x * 4 + 2];
data[y * layout.rowPitch + x * 4 + 3] = tex_colors[y * tex_width * 4 + x * 4 + 3];
// row[x] = tex_colors[(x & 1) ^ (y & 1)];
}
}
}
vkUnmapMemory(vk_ctx.device, tex_obj->mem);
}
if (usage & VK_IMAGE_USAGE_STORAGE_BIT) {
tex_obj->imageLayout = VK_IMAGE_LAYOUT_GENERAL;
}
else {
tex_obj->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
set_image_layout(tex_obj->image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_UNDEFINED, tex_obj->imageLayout);
// setting the image layout does not reference the actual memory so no need to add a mem ref
}
static void destroy_texture_image(struct texture_object *tex_obj) {
// clean up staging resources
vkDestroyImage(vk_ctx.device, tex_obj->image, NULL);
vkFreeMemory(vk_ctx.device, tex_obj->mem, NULL);
}
static VkFormat convert_image_format(kinc_image_format_t format) {
switch (format) {
case KINC_IMAGE_FORMAT_RGBA128:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case KINC_IMAGE_FORMAT_RGBA64:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case KINC_IMAGE_FORMAT_RGB24:
return VK_FORMAT_B8G8R8A8_UNORM;
case KINC_IMAGE_FORMAT_A32:
return VK_FORMAT_R32_SFLOAT;
case KINC_IMAGE_FORMAT_A16:
return VK_FORMAT_R16_SFLOAT;
case KINC_IMAGE_FORMAT_GREY8:
return VK_FORMAT_R8_UNORM;
case KINC_IMAGE_FORMAT_BGRA32:
return VK_FORMAT_B8G8R8A8_UNORM;
case KINC_IMAGE_FORMAT_RGBA32:
return VK_FORMAT_R8G8B8A8_UNORM;
default:
return VK_FORMAT_B8G8R8A8_UNORM;
}
}
static int format_byte_size(kinc_image_format_t format) {
switch (format) {
case KINC_IMAGE_FORMAT_RGBA128:
return 16;
case KINC_IMAGE_FORMAT_RGBA64:
return 8;
case KINC_IMAGE_FORMAT_RGB24:
return 4;
case KINC_IMAGE_FORMAT_A32:
return 4;
case KINC_IMAGE_FORMAT_A16:
return 2;
case KINC_IMAGE_FORMAT_GREY8:
return 1;
case KINC_IMAGE_FORMAT_BGRA32:
case KINC_IMAGE_FORMAT_RGBA32:
return 4;
default:
return 4;
}
}
static void update_stride(kinc_g5_texture_t *texture) {
VkImageSubresource subres = {0};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout;
vkGetImageSubresourceLayout(vk_ctx.device, texture->impl.texture.image, &subres, &layout);
texture->impl.stride = (int)layout.rowPitch;
}
void kinc_g5_texture_init_from_image(kinc_g5_texture_t *texture, kinc_image_t *image) {
texture->texWidth = image->width;
texture->texHeight = image->height;
const VkFormat tex_format = convert_image_format(image->format);
VkFormatProperties props;
VkResult err;
vkGetPhysicalDeviceFormatProperties(vk_ctx.gpu, tex_format, &props);
if ((props.linearTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) && !use_staging_buffer) {
// Device can texture using linear textures
prepare_texture_image((uint8_t *)image->data, (uint32_t)image->width, (uint32_t)image->height, &texture->impl.texture, VK_IMAGE_TILING_LINEAR,
VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &texture->impl.deviceSize, tex_format);
flush_init_cmd();
}
else if (props.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) {
// Must use staging buffer to copy linear texture to optimized
struct texture_object staging_texture;
memset(&staging_texture, 0, sizeof(staging_texture));
prepare_texture_image((uint8_t *)image->data, (uint32_t)image->width, (uint32_t)image->height, &staging_texture, VK_IMAGE_TILING_LINEAR,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &texture->impl.deviceSize, tex_format);
prepare_texture_image((uint8_t *)image->data, (uint32_t)image->width, (uint32_t)image->height, &texture->impl.texture, VK_IMAGE_TILING_OPTIMAL,
(VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT), VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &texture->impl.deviceSize,
tex_format);
set_image_layout(staging_texture.image, VK_IMAGE_ASPECT_COLOR_BIT, staging_texture.imageLayout, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
set_image_layout(texture->impl.texture.image, VK_IMAGE_ASPECT_COLOR_BIT, texture->impl.texture.imageLayout, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageCopy copy_region = {0};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.srcOffset.x = copy_region.srcOffset.y = copy_region.srcOffset.z = 0;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.mipLevel = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.layerCount = 1;
copy_region.dstOffset.x = copy_region.dstOffset.y = copy_region.dstOffset.z = 0;
copy_region.extent.width = (uint32_t)staging_texture.tex_width;
copy_region.extent.height = (uint32_t)staging_texture.tex_height;
copy_region.extent.depth = 1;
vkCmdCopyImage(vk_ctx.setup_cmd, staging_texture.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, texture->impl.texture.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
set_image_layout(texture->impl.texture.image, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, texture->impl.texture.imageLayout);
flush_init_cmd();
destroy_texture_image(&staging_texture);
}
else {
assert(!"No support for B8G8R8A8_UNORM as texture image format");
}
update_stride(texture);
VkImageViewCreateInfo view = {0};
view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view.pNext = NULL;
view.image = VK_NULL_HANDLE;
view.viewType = VK_IMAGE_VIEW_TYPE_2D;
view.format = tex_format;
view.components.r = VK_COMPONENT_SWIZZLE_R;
view.components.g = VK_COMPONENT_SWIZZLE_G;
view.components.b = VK_COMPONENT_SWIZZLE_B;
view.components.a = VK_COMPONENT_SWIZZLE_A;
view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
view.subresourceRange.baseMipLevel = 0;
view.subresourceRange.levelCount = 1;
view.subresourceRange.baseArrayLayer = 0;
view.subresourceRange.layerCount = 1;
view.flags = 0;
view.image = texture->impl.texture.image;
err = vkCreateImageView(vk_ctx.device, &view, NULL, &texture->impl.texture.view);
assert(!err);
}
void kinc_g5_texture_init(kinc_g5_texture_t *texture, int width, int height, kinc_image_format_t format) {
texture->texWidth = width;
texture->texHeight = height;
const VkFormat tex_format = convert_image_format(format);
VkFormatProperties props;
VkResult err;
vkGetPhysicalDeviceFormatProperties(vk_ctx.gpu, tex_format, &props);
// Device can texture using linear textures
prepare_texture_image(NULL, (uint32_t)width, (uint32_t)height, &texture->impl.texture, VK_IMAGE_TILING_LINEAR, VK_IMAGE_USAGE_SAMPLED_BIT,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &texture->impl.deviceSize, tex_format);
flush_init_cmd();
update_stride(texture);
VkImageViewCreateInfo view = {0};
view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view.pNext = NULL;
view.image = VK_NULL_HANDLE;
view.viewType = VK_IMAGE_VIEW_TYPE_2D;
view.format = tex_format;
view.components.r = VK_COMPONENT_SWIZZLE_R;
view.components.g = VK_COMPONENT_SWIZZLE_G;
view.components.b = VK_COMPONENT_SWIZZLE_B;
view.components.a = VK_COMPONENT_SWIZZLE_A;
view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
view.subresourceRange.baseMipLevel = 0;
view.subresourceRange.levelCount = 1;
view.subresourceRange.baseArrayLayer = 0;
view.subresourceRange.layerCount = 1;
view.flags = 0;
view.image = texture->impl.texture.image;
err = vkCreateImageView(vk_ctx.device, &view, NULL, &texture->impl.texture.view);
assert(!err);
}
void kinc_g5_texture_init3d(kinc_g5_texture_t *texture, int width, int height, int depth, kinc_image_format_t format) {}
void kinc_g5_texture_init_non_sampled_access(kinc_g5_texture_t *texture, int width, int height, kinc_image_format_t format) {
texture->texWidth = width;
texture->texHeight = height;
const VkFormat tex_format = convert_image_format(format);
VkFormatProperties props;
VkResult err;
vkGetPhysicalDeviceFormatProperties(vk_ctx.gpu, tex_format, &props);
// Device can texture using linear textures
prepare_texture_image(NULL, (uint32_t)width, (uint32_t)height, &texture->impl.texture, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &texture->impl.deviceSize,
tex_format);
flush_init_cmd();
update_stride(texture);
VkImageViewCreateInfo view = {0};
view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view.pNext = NULL;
view.image = VK_NULL_HANDLE;
view.viewType = VK_IMAGE_VIEW_TYPE_2D;
view.format = tex_format;
view.components.r = VK_COMPONENT_SWIZZLE_R;
view.components.g = VK_COMPONENT_SWIZZLE_G;
view.components.b = VK_COMPONENT_SWIZZLE_B;
view.components.a = VK_COMPONENT_SWIZZLE_A;
view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
view.subresourceRange.baseMipLevel = 0;
view.subresourceRange.levelCount = 1;
view.subresourceRange.baseArrayLayer = 0;
view.subresourceRange.layerCount = 1;
view.flags = 0;
view.image = texture->impl.texture.image;
err = vkCreateImageView(vk_ctx.device, &view, NULL, &texture->impl.texture.view);
assert(!err);
}
void kinc_g5_texture_destroy(kinc_g5_texture_t *texture) {
vkDestroyImageView(vk_ctx.device, texture->impl.texture.view, NULL);
destroy_texture_image(&texture->impl.texture);
}
void kinc_g5_internal_texture_set(kinc_g5_texture_t *texture, int unit) {}
int kinc_g5_texture_stride(kinc_g5_texture_t *texture) {
return texture->impl.stride;
}
uint8_t *kinc_g5_texture_lock(kinc_g5_texture_t *texture) {
void *data;
VkResult err = vkMapMemory(vk_ctx.device, texture->impl.texture.mem, 0, texture->impl.deviceSize, 0, &data);
assert(!err);
return (uint8_t *)data;
}
void kinc_g5_texture_unlock(kinc_g5_texture_t *texture) {
vkUnmapMemory(vk_ctx.device, texture->impl.texture.mem);
}
void kinc_g5_texture_clear(kinc_g5_texture_t *texture, int x, int y, int z, int width, int height, int depth, unsigned color) {}
void kinc_g5_texture_generate_mipmaps(kinc_g5_texture_t *texture, int levels) {}
void kinc_g5_texture_set_mipmap(kinc_g5_texture_t *texture, kinc_image_t *mipmap, int level) {}

View File

@ -0,0 +1,20 @@
#pragma once
#include "MiniVulkan.h"
struct texture_object {
VkImage image;
VkImageLayout imageLayout;
VkDeviceMemory mem;
VkImageView view;
int32_t tex_width, tex_height;
};
typedef struct {
struct texture_object texture;
VkDeviceSize deviceSize;
uint8_t *conversionBuffer;
int stride;
} Texture5Impl;

View File

@ -0,0 +1,117 @@
#include "vulkan.h"
#include "shader.h"
#include "vertexbuffer.h"
#include <kinc/graphics5/indexbuffer.h>
#include <kinc/graphics5/vertexbuffer.h>
bool memory_type_from_properties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex);
kinc_g5_vertex_buffer_t *currentVertexBuffer = NULL;
extern kinc_g5_index_buffer_t *currentIndexBuffer;
void kinc_g5_vertex_buffer_init(kinc_g5_vertex_buffer_t *buffer, int vertexCount, kinc_g5_vertex_structure_t *structure, bool gpuMemory,
int instanceDataStepRate) {
buffer->impl.myCount = vertexCount;
buffer->impl.instanceDataStepRate = instanceDataStepRate;
buffer->impl.myStride = 0;
for (int i = 0; i < structure->size; ++i) {
kinc_g5_vertex_element_t element = structure->elements[i];
buffer->impl.myStride += kinc_g4_vertex_data_size(element.data);
}
buffer->impl.structure = *structure;
VkBufferCreateInfo buf_info = {0};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.size = vertexCount * buffer->impl.myStride;
buf_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
#ifdef KORE_VKRT
buf_info.usage |= VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT;
#endif
buf_info.flags = 0;
memset(&buffer->impl.mem_alloc, 0, sizeof(VkMemoryAllocateInfo));
buffer->impl.mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer->impl.mem_alloc.pNext = NULL;
buffer->impl.mem_alloc.allocationSize = 0;
buffer->impl.mem_alloc.memoryTypeIndex = 0;
VkMemoryRequirements mem_reqs = {0};
VkResult err;
bool pass;
memset(&buffer->impl.vertices, 0, sizeof(buffer->impl.vertices));
err = vkCreateBuffer(vk_ctx.device, &buf_info, NULL, &buffer->impl.vertices.buf);
assert(!err);
vkGetBufferMemoryRequirements(vk_ctx.device, buffer->impl.vertices.buf, &mem_reqs);
assert(!err);
buffer->impl.mem_alloc.allocationSize = mem_reqs.size;
pass = memory_type_from_properties(mem_reqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, &buffer->impl.mem_alloc.memoryTypeIndex);
assert(pass);
#ifdef KORE_VKRT
VkMemoryAllocateFlagsInfo memory_allocate_flags_info = {0};
memory_allocate_flags_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO;
memory_allocate_flags_info.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
buffer->impl.mem_alloc.pNext = &memory_allocate_flags_info;
#endif
err = vkAllocateMemory(vk_ctx.device, &buffer->impl.mem_alloc, NULL, &buffer->impl.vertices.mem);
assert(!err);
err = vkBindBufferMemory(vk_ctx.device, buffer->impl.vertices.buf, buffer->impl.vertices.mem, 0);
assert(!err);
}
static void unset_vertex_buffer(kinc_g5_vertex_buffer_t *buffer) {
if (currentVertexBuffer == buffer) {
currentVertexBuffer = NULL;
}
}
void kinc_g5_vertex_buffer_destroy(kinc_g5_vertex_buffer_t *buffer) {
unset_vertex_buffer(buffer);
vkFreeMemory(vk_ctx.device, buffer->impl.vertices.mem, NULL);
vkDestroyBuffer(vk_ctx.device, buffer->impl.vertices.buf, NULL);
}
float *kinc_g5_vertex_buffer_lock_all(kinc_g5_vertex_buffer_t *buffer) {
return kinc_g5_vertex_buffer_lock(buffer, 0, buffer->impl.myCount);
}
float *kinc_g5_vertex_buffer_lock(kinc_g5_vertex_buffer_t *buffer, int start, int count) {
VkResult err =
vkMapMemory(vk_ctx.device, buffer->impl.vertices.mem, start * buffer->impl.myStride, count * buffer->impl.myStride, 0, (void **)&buffer->impl.data);
assert(!err);
return buffer->impl.data;
}
void kinc_g5_vertex_buffer_unlock_all(kinc_g5_vertex_buffer_t *buffer) {
vkUnmapMemory(vk_ctx.device, buffer->impl.vertices.mem);
}
void kinc_g5_vertex_buffer_unlock(kinc_g5_vertex_buffer_t *buffer, int count) {
vkUnmapMemory(vk_ctx.device, buffer->impl.vertices.mem);
}
static int setVertexAttributes(int offset) {
return 0;
}
int kinc_g5_internal_vertex_buffer_set(kinc_g5_vertex_buffer_t *buffer, int offset) {
int offsetoffset = setVertexAttributes(offset);
return offsetoffset;
}
int kinc_g5_vertex_buffer_count(kinc_g5_vertex_buffer_t *buffer) {
return buffer->impl.myCount;
}
int kinc_g5_vertex_buffer_stride(kinc_g5_vertex_buffer_t *buffer) {
return buffer->impl.myStride;
}

View File

@ -0,0 +1,21 @@
#pragma once
#include <kinc/graphics5/vertexstructure.h>
#include "MiniVulkan.h"
struct Vertices {
VkBuffer buf;
VkDeviceMemory mem;
};
typedef struct {
float *data;
int myCount;
int myStride;
unsigned bufferId;
kinc_g5_vertex_structure_t structure;
VkMemoryAllocateInfo mem_alloc;
int instanceDataStepRate;
struct Vertices vertices;
} VertexBuffer5Impl;

View File

@ -0,0 +1,92 @@
#pragma once
#include <stdbool.h>
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_core.h>
struct vk_funs {
PFN_vkGetPhysicalDeviceSurfaceSupportKHR fpGetPhysicalDeviceSurfaceSupportKHR;
PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR fpGetPhysicalDeviceSurfaceCapabilitiesKHR;
PFN_vkGetPhysicalDeviceSurfaceFormatsKHR fpGetPhysicalDeviceSurfaceFormatsKHR;
PFN_vkGetPhysicalDeviceSurfacePresentModesKHR fpGetPhysicalDeviceSurfacePresentModesKHR;
PFN_vkCreateSwapchainKHR fpCreateSwapchainKHR;
PFN_vkDestroySwapchainKHR fpDestroySwapchainKHR;
PFN_vkGetSwapchainImagesKHR fpGetSwapchainImagesKHR;
PFN_vkDestroySurfaceKHR fpDestroySurfaceKHR;
PFN_vkCreateDebugUtilsMessengerEXT fpCreateDebugUtilsMessengerEXT;
PFN_vkDestroyDebugUtilsMessengerEXT fpDestroyDebugUtilsMessengerEXT;
PFN_vkQueuePresentKHR fpQueuePresentKHR;
PFN_vkAcquireNextImageKHR fpAcquireNextImageKHR;
};
struct vk_depth {
VkImage image;
VkImageView view;
VkDeviceMemory memory;
};
struct vk_window {
int width;
int height;
bool resized;
bool surface_destroyed;
int depth_bits;
int stencil_bits;
bool vsynced;
uint32_t current_image;
VkSurfaceKHR surface;
VkSurfaceFormatKHR format;
VkSwapchainKHR swapchain;
uint32_t image_count;
VkImage *images;
VkImageView *views;
VkFramebuffer *framebuffers;
VkRenderPass framebuffer_render_pass;
VkRenderPass rendertarget_render_pass;
VkRenderPass rendertarget_render_pass_with_depth;
struct vk_depth depth;
};
#define MAXIMUM_WINDOWS 16
struct vk_context {
VkInstance instance;
VkPhysicalDevice gpu;
VkDevice device;
VkPhysicalDeviceMemoryProperties memory_properties;
VkCommandBuffer setup_cmd;
VkCommandPool cmd_pool;
VkQueue queue;
struct vk_window windows[MAXIMUM_WINDOWS];
// buffer hack
VkBuffer *vertex_uniform_buffer;
VkBuffer *fragment_uniform_buffer;
int current_window;
#ifdef VALIDATE
bool validation_found;
VkDebugUtilsMessengerEXT debug_messenger;
#endif
};
extern struct vk_funs vk;
extern struct vk_context vk_ctx;
extern void flush_init_cmd(void);
extern void reuse_descriptor_sets(void);
#include <assert.h>

View File

@ -0,0 +1,86 @@
#ifdef KORE_WINDOWS
// Windows 7
#define WINVER 0x0601
#define _WIN32_WINNT 0x0601
#define NOATOM
#define NOCLIPBOARD
#define NOCOLOR
#define NOCOMM
#define NOCTLMGR
#define NODEFERWINDOWPOS
#define NODRAWTEXT
#define NOGDI
#define NOGDICAPMASKS
#define NOHELP
#define NOICONS
#define NOKANJI
#define NOKEYSTATES
//#define NOMB
#define NOMCX
#define NOMEMMGR
#define NOMENUS
#define NOMETAFILE
#define NOMINMAX
#define NOMSG
#define NONLS
#define NOOPENFILE
#define NOPROFILER
#define NORASTEROPS
#define NOSCROLL
#define NOSERVICE
#define NOSHOWWINDOW
#define NOSOUND
#define NOSYSCOMMANDS
#define NOSYSMETRICS
#define NOTEXTMETRIC
//#define NOUSER
#define NOVIRTUALKEYCODES
#define NOWH
#define NOWINMESSAGES
#define NOWINOFFSETS
#define NOWINSTYLES
#define WIN32_LEAN_AND_MEAN
// avoids a warning in the Windows headers
#define MICROSOFT_WINDOWS_WINBASE_H_DEFINE_INTERLOCKED_CPLUSPLUS_OVERLOADS 0
#endif
#ifndef NDEBUG
#define VALIDATE
#endif
#include <vulkan/vulkan.h>
#include <assert.h>
#include <malloc.h>
#include <memory.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <kinc/graphics5/rendertarget.h>
#include <kinc/graphics5/texture.h>
#include <kinc/image.h>
#include <kinc/math/matrix.h>
#include "vulkan.h"
static VkSemaphore framebuffer_available;
static VkSemaphore relay_semaphore;
static bool wait_for_relay = false;
static void command_list_should_wait_for_framebuffer(void);
#include "Vulkan.c.h"
#include "sampler.c.h"
#include "commandlist.c.h"
#include "constantbuffer.c.h"
#include "indexbuffer.c.h"
#include "pipeline.c.h"
#include "raytrace.c.h"
#include "rendertarget.c.h"
#include "shader.c.h"
#include "texture.c.h"
#include "vertexbuffer.c.h"