initial commit, 4.5 stable
Some checks failed
🔗 GHA / 📊 Static checks (push) Has been cancelled
🔗 GHA / 🤖 Android (push) Has been cancelled
🔗 GHA / 🍏 iOS (push) Has been cancelled
🔗 GHA / 🐧 Linux (push) Has been cancelled
🔗 GHA / 🍎 macOS (push) Has been cancelled
🔗 GHA / 🏁 Windows (push) Has been cancelled
🔗 GHA / 🌐 Web (push) Has been cancelled

This commit is contained in:
2025-09-16 20:46:46 -04:00
commit 9d30169a8d
13378 changed files with 7050105 additions and 0 deletions

View File

@@ -0,0 +1,27 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find just the include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")]
# find all shader code (all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile include files
for glsl_file in gl_include_files:
env.GLSL_HEADER(glsl_file)
# compile RD shader
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)
SConscript("effects/SCsub")
SConscript("environment/SCsub")
SConscript("forward_clustered/SCsub")
SConscript("forward_mobile/SCsub")

View File

@@ -0,0 +1,151 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(push_constant, std140) uniform Pos {
vec4 src_rect;
vec4 dst_rect;
float rotation_sin;
float rotation_cos;
vec2 eye_center;
float k1;
float k2;
float upscale;
float aspect_ratio;
uint layer;
bool convert_to_srgb;
bool use_debanding;
float pad;
}
data;
layout(location = 0) out vec2 uv;
void main() {
mat4 swapchain_transform = mat4(1.0);
swapchain_transform[0][0] = data.rotation_cos;
swapchain_transform[0][1] = -data.rotation_sin;
swapchain_transform[1][0] = data.rotation_sin;
swapchain_transform[1][1] = data.rotation_cos;
vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
uv = data.src_rect.xy + base_arr[gl_VertexIndex] * data.src_rect.zw;
vec2 vtx = data.dst_rect.xy + base_arr[gl_VertexIndex] * data.dst_rect.zw;
gl_Position = swapchain_transform * vec4(vtx * 2.0 - 1.0, 0.0, 1.0);
}
#[fragment]
#version 450
#VERSION_DEFINES
layout(push_constant, std140) uniform Pos {
vec4 src_rect;
vec4 dst_rect;
float rotation_sin;
float rotation_cos;
vec2 eye_center;
float k1;
float k2;
float upscale;
float aspect_ratio;
uint layer;
bool convert_to_srgb;
bool use_debanding;
float pad;
}
data;
layout(location = 0) in vec2 uv;
layout(location = 0) out vec4 color;
#ifdef USE_LAYER
layout(binding = 0) uniform sampler2DArray src_rt;
#else
layout(binding = 0) uniform sampler2D src_rt;
#endif
vec3 linear_to_srgb(vec3 color) {
const vec3 a = vec3(0.055f);
return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
}
// From https://alex.vlachos.com/graphics/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf
// and https://www.shadertoy.com/view/MslGR8 (5th one starting from the bottom)
// NOTE: `frag_coord` is in pixels (i.e. not normalized UV).
// This dithering must be applied after encoding changes (linear/nonlinear) have been applied
// as the final step before quantization from floating point to integer values.
vec3 screen_space_dither(vec2 frag_coord) {
// Iestyn's RGB dither (7 asm instructions) from Portal 2 X360, slightly modified for VR.
// Removed the time component to avoid passing time into this shader.
vec3 dither = vec3(dot(vec2(171.0, 231.0), frag_coord));
dither.rgb = fract(dither.rgb / vec3(103.0, 71.0, 97.0));
// Subtract 0.5 to avoid slightly brightening the whole viewport.
// Use a dither strength of 100% rather than the 37.5% suggested by the original source.
// Divide by 255 to align to 8-bit quantization.
return (dither.rgb - 0.5) / 255.0;
}
void main() {
#ifdef APPLY_LENS_DISTORTION
vec2 coords = uv * 2.0 - 1.0;
vec2 offset = coords - data.eye_center;
// take aspect ratio into account
offset.y /= data.aspect_ratio;
// distort
vec2 offset_sq = offset * offset;
float radius_sq = offset_sq.x + offset_sq.y;
float radius_s4 = radius_sq * radius_sq;
float distortion_scale = 1.0 + (data.k1 * radius_sq) + (data.k2 * radius_s4);
offset *= distortion_scale;
// reapply aspect ratio
offset.y *= data.aspect_ratio;
// add our eye center back in
coords = offset + data.eye_center;
coords /= data.upscale;
// and check our color
if (coords.x < -1.0 || coords.y < -1.0 || coords.x > 1.0 || coords.y > 1.0) {
color = vec4(0.0, 0.0, 0.0, 1.0);
} else {
// layer is always used here
coords = (coords + vec2(1.0)) / vec2(2.0);
color = texture(src_rt, vec3(coords, data.layer));
}
#elif defined(USE_LAYER)
color = texture(src_rt, vec3(uv, data.layer));
#else
color = texture(src_rt, uv);
#endif
if (data.convert_to_srgb) {
color.rgb = linear_to_srgb(color.rgb); // Regular linear -> SRGB conversion.
// Even if debanding was applied earlier in the rendering process, it must
// be reapplied after the linear_to_srgb floating point operations.
// When the linear_to_srgb operation was not performed, the source is
// already an 8-bit format and debanding cannot be effective. In this
// case, GPU driver rounding error can add noise so debanding should be
// skipped entirely.
if (data.use_debanding) {
color.rgb += screen_space_dither(gl_FragCoord.xy);
}
color.rgb = clamp(color.rgb, vec3(0.0), vec3(1.0));
}
}

View File

@@ -0,0 +1,760 @@
#[vertex]
#version 450
#VERSION_DEFINES
#ifdef USE_ATTRIBUTES
layout(location = 0) in vec2 vertex_attrib;
layout(location = 3) in vec4 color_attrib;
layout(location = 4) in vec2 uv_attrib;
#if defined(CUSTOM0_USED)
layout(location = 6) in vec4 custom0_attrib;
#endif
#if defined(CUSTOM1_USED)
layout(location = 7) in vec4 custom1_attrib;
#endif
layout(location = 10) in uvec4 bone_attrib;
layout(location = 11) in vec4 weight_attrib;
#endif
#include "canvas_uniforms_inc.glsl"
#ifndef USE_ATTRIBUTES
layout(location = 4) out flat uint instance_index;
#else
#define instance_index params.base_instance_index
#endif // USE_ATTRIBUTES
layout(location = 0) out vec2 uv_interp;
layout(location = 1) out vec4 color_interp;
layout(location = 2) out vec2 vertex_interp;
#ifdef USE_NINEPATCH
layout(location = 3) out vec2 pixel_size_interp;
#endif
#ifdef MATERIAL_UNIFORMS_USED
/* clang-format off */
layout(set = 1, binding = 0, std140) uniform MaterialUniforms {
#MATERIAL_UNIFORMS
} material;
/* clang-format on */
#endif
#GLOBALS
#ifdef USE_ATTRIBUTES
vec3 srgb_to_linear(vec3 color) {
return mix(pow((color.rgb + vec3(0.055)) * (1.0 / (1.0 + 0.055)), vec3(2.4)), color.rgb * (1.0 / 12.92), lessThan(color.rgb, vec3(0.04045)));
}
#endif
void main() {
vec4 instance_custom = vec4(0.0);
#if defined(CUSTOM0_USED)
vec4 custom0 = vec4(0.0);
#endif
#if defined(CUSTOM1_USED)
vec4 custom1 = vec4(0.0);
#endif
#ifndef USE_ATTRIBUTES
instance_index = gl_InstanceIndex + params.base_instance_index;
#endif // USE_ATTRIBUTES
const InstanceData draw_data = instances.data[instance_index];
#ifdef USE_PRIMITIVE
//weird bug,
//this works
vec2 vertex;
vec2 uv;
vec4 color;
if (gl_VertexIndex == 0) {
vertex = draw_data.points[0];
uv = draw_data.uvs[0];
color = vec4(unpackHalf2x16(draw_data.colors[0]), unpackHalf2x16(draw_data.colors[1]));
} else if (gl_VertexIndex == 1) {
vertex = draw_data.points[1];
uv = draw_data.uvs[1];
color = vec4(unpackHalf2x16(draw_data.colors[2]), unpackHalf2x16(draw_data.colors[3]));
} else {
vertex = draw_data.points[2];
uv = draw_data.uvs[2];
color = vec4(unpackHalf2x16(draw_data.colors[4]), unpackHalf2x16(draw_data.colors[5]));
}
uvec4 bones = uvec4(0, 0, 0, 0);
vec4 bone_weights = vec4(0.0);
#elif defined(USE_ATTRIBUTES)
vec2 vertex = vertex_attrib;
vec4 color = color_attrib;
if (bool(canvas_data.flags & CANVAS_FLAGS_CONVERT_ATTRIBUTES_TO_LINEAR)) {
color.rgb = srgb_to_linear(color.rgb);
}
color *= draw_data.modulation;
vec2 uv = uv_attrib;
#if defined(CUSTOM0_USED)
custom0 = custom0_attrib;
#endif
#if defined(CUSTOM1_USED)
custom1 = custom1_attrib;
#endif
uvec4 bones = bone_attrib;
vec4 bone_weights = weight_attrib;
#else // !USE_ATTRIBUTES
vec2 vertex_base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
vec2 vertex_base = vertex_base_arr[gl_VertexIndex];
vec2 uv = draw_data.src_rect.xy + abs(draw_data.src_rect.zw) * ((draw_data.flags & INSTANCE_FLAGS_TRANSPOSE_RECT) != 0 ? vertex_base.yx : vertex_base.xy);
vec4 color = draw_data.modulation;
vec2 vertex = draw_data.dst_rect.xy + abs(draw_data.dst_rect.zw) * mix(vertex_base, vec2(1.0, 1.0) - vertex_base, lessThan(draw_data.src_rect.zw, vec2(0.0, 0.0)));
uvec4 bones = uvec4(0, 0, 0, 0);
#endif // USE_ATTRIBUTES
mat4 model_matrix = mat4(vec4(draw_data.world_x, 0.0, 0.0), vec4(draw_data.world_y, 0.0, 0.0), vec4(0.0, 0.0, 1.0, 0.0), vec4(draw_data.world_ofs, 0.0, 1.0));
#ifdef USE_ATTRIBUTES
uint instancing = params.batch_flags & BATCH_FLAGS_INSTANCING_MASK;
if (instancing > 1) {
// trails
uint stride = 2 + 1 + 1; //particles always uses this format
uint trail_size = instancing;
uint offset = trail_size * stride * gl_InstanceIndex;
vec4 pcolor;
vec2 new_vertex;
{
uint boffset = offset + bone_attrib.x * stride;
new_vertex = (vec4(vertex, 0.0, 1.0) * mat4(transforms.data[boffset + 0], transforms.data[boffset + 1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy * weight_attrib.x;
pcolor = transforms.data[boffset + 2] * weight_attrib.x;
}
if (weight_attrib.y > 0.001) {
uint boffset = offset + bone_attrib.y * stride;
new_vertex += (vec4(vertex, 0.0, 1.0) * mat4(transforms.data[boffset + 0], transforms.data[boffset + 1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy * weight_attrib.y;
pcolor += transforms.data[boffset + 2] * weight_attrib.y;
}
if (weight_attrib.z > 0.001) {
uint boffset = offset + bone_attrib.z * stride;
new_vertex += (vec4(vertex, 0.0, 1.0) * mat4(transforms.data[boffset + 0], transforms.data[boffset + 1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy * weight_attrib.z;
pcolor += transforms.data[boffset + 2] * weight_attrib.z;
}
if (weight_attrib.w > 0.001) {
uint boffset = offset + bone_attrib.w * stride;
new_vertex += (vec4(vertex, 0.0, 1.0) * mat4(transforms.data[boffset + 0], transforms.data[boffset + 1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy * weight_attrib.w;
pcolor += transforms.data[boffset + 2] * weight_attrib.w;
}
instance_custom = transforms.data[offset + 3];
vertex = new_vertex;
color *= pcolor;
} else if (instancing == 1) {
uint stride = 2 + bitfieldExtract(params.batch_flags, BATCH_FLAGS_INSTANCING_HAS_COLORS_SHIFT, 1) + bitfieldExtract(params.batch_flags, BATCH_FLAGS_INSTANCING_HAS_CUSTOM_DATA_SHIFT, 1);
uint offset = stride * gl_InstanceIndex;
mat4 matrix = mat4(transforms.data[offset + 0], transforms.data[offset + 1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0));
offset += 2;
if (bool(params.batch_flags & BATCH_FLAGS_INSTANCING_HAS_COLORS)) {
color *= transforms.data[offset];
offset += 1;
}
if (bool(params.batch_flags & BATCH_FLAGS_INSTANCING_HAS_CUSTOM_DATA)) {
instance_custom = transforms.data[offset];
}
matrix = transpose(matrix);
model_matrix = model_matrix * matrix;
}
#endif // USE_ATTRIBUTES
float point_size = 1.0;
#ifdef USE_WORLD_VERTEX_COORDS
vertex = (model_matrix * vec4(vertex, 0.0, 1.0)).xy;
#endif
{
#CODE : VERTEX
}
#ifdef USE_NINEPATCH
pixel_size_interp = abs(draw_data.dst_rect.zw) * vertex_base;
#endif
#if !defined(SKIP_TRANSFORM_USED) && !defined(USE_WORLD_VERTEX_COORDS)
vertex = (model_matrix * vec4(vertex, 0.0, 1.0)).xy;
#endif
color_interp = color;
vertex = (canvas_data.canvas_transform * vec4(vertex, 0.0, 1.0)).xy;
if (canvas_data.use_pixel_snap) {
vertex = floor(vertex + 0.5);
// precision issue on some hardware creates artifacts within texture
// offset uv by a small amount to avoid
uv += 1e-5;
}
vertex_interp = vertex;
uv_interp = uv;
gl_Position = canvas_data.screen_transform * vec4(vertex, 0.0, 1.0);
#ifdef USE_POINT_SIZE
gl_PointSize = point_size;
#endif
}
#[fragment]
#version 450
#VERSION_DEFINES
#include "canvas_uniforms_inc.glsl"
#ifndef USE_ATTRIBUTES
layout(location = 4) in flat uint instance_index;
#else
#define instance_index params.base_instance_index
#endif // USE_ATTRIBUTES
layout(location = 0) in vec2 uv_interp;
layout(location = 1) in vec4 color_interp;
layout(location = 2) in vec2 vertex_interp;
#ifdef USE_NINEPATCH
layout(location = 3) in vec2 pixel_size_interp;
#endif
layout(location = 0) out vec4 frag_color;
#ifdef MATERIAL_UNIFORMS_USED
/* clang-format off */
layout(set = 1, binding = 0, std140) uniform MaterialUniforms {
#MATERIAL_UNIFORMS
} material;
/* clang-format on */
#endif
vec2 screen_uv_to_sdf(vec2 p_uv) {
return canvas_data.screen_to_sdf * p_uv;
}
float texture_sdf(vec2 p_sdf) {
vec2 uv = p_sdf * canvas_data.sdf_to_tex.xy + canvas_data.sdf_to_tex.zw;
float d = texture(sampler2D(sdf_texture, SAMPLER_LINEAR_CLAMP), uv).r;
d *= SDF_MAX_LENGTH;
return d * canvas_data.tex_to_sdf;
}
vec2 texture_sdf_normal(vec2 p_sdf) {
vec2 uv = p_sdf * canvas_data.sdf_to_tex.xy + canvas_data.sdf_to_tex.zw;
const float EPSILON = 0.001;
return normalize(vec2(
texture(sampler2D(sdf_texture, SAMPLER_LINEAR_CLAMP), uv + vec2(EPSILON, 0.0)).r - texture(sampler2D(sdf_texture, SAMPLER_LINEAR_CLAMP), uv - vec2(EPSILON, 0.0)).r,
texture(sampler2D(sdf_texture, SAMPLER_LINEAR_CLAMP), uv + vec2(0.0, EPSILON)).r - texture(sampler2D(sdf_texture, SAMPLER_LINEAR_CLAMP), uv - vec2(0.0, EPSILON)).r));
}
vec2 sdf_to_screen_uv(vec2 p_sdf) {
return p_sdf * canvas_data.sdf_to_screen;
}
// Emulate textureProjLod by doing it manually because the source texture is not an actual depth texture that can be used for this operation.
// Since the sampler is configured to nearest use one textureGather tap to emulate bilinear.
float texture_shadow(vec4 p) {
// Manually round p to the nearest texel because textureGather uses strange rounding rules.
vec2 unit_p = floor(p.xy / canvas_data.shadow_pixel_size) * canvas_data.shadow_pixel_size;
float depth = p.z;
float fx = fract(p.x / canvas_data.shadow_pixel_size);
vec2 tap = textureGather(sampler2D(shadow_atlas_texture, shadow_sampler), unit_p.xy).zw;
return mix(step(tap.y, depth), step(tap.x, depth), fx);
}
#GLOBALS
#ifdef LIGHT_CODE_USED
vec4 light_compute(
vec3 light_vertex,
vec3 light_position,
vec3 normal,
vec4 light_color,
float light_energy,
vec4 specular_shininess,
inout vec4 shadow_modulate,
vec2 screen_uv,
vec2 uv,
vec4 color, bool is_directional) {
const InstanceData draw_data = instances.data[instance_index];
vec4 light = vec4(0.0);
vec3 light_direction = vec3(0.0);
if (is_directional) {
light_direction = normalize(mix(vec3(light_position.xy, 0.0), vec3(0, 0, 1), light_position.z));
light_position = vec3(0.0);
} else {
light_direction = normalize(light_position - light_vertex);
}
#CODE : LIGHT
return light;
}
#endif
#ifdef USE_NINEPATCH
float map_ninepatch_axis(float pixel, float draw_size, float tex_pixel_size, float margin_begin, float margin_end, int np_repeat, inout int draw_center) {
const InstanceData draw_data = instances.data[instance_index];
float tex_size = 1.0 / tex_pixel_size;
if (pixel < margin_begin) {
return pixel * tex_pixel_size;
} else if (pixel >= draw_size - margin_end) {
return (tex_size - (draw_size - pixel)) * tex_pixel_size;
} else {
draw_center -= 1 - int(bitfieldExtract(draw_data.flags, INSTANCE_FLAGS_NINEPATCH_DRAW_CENTER_SHIFT, 1));
// np_repeat is passed as uniform using NinePatchRect::AxisStretchMode enum.
if (np_repeat == 0) { // Stretch.
// Convert to ratio.
float ratio = (pixel - margin_begin) / (draw_size - margin_begin - margin_end);
// Scale to source texture.
return (margin_begin + ratio * (tex_size - margin_begin - margin_end)) * tex_pixel_size;
} else if (np_repeat == 1) { // Tile.
// Convert to offset.
float ofs = mod((pixel - margin_begin), tex_size - margin_begin - margin_end);
// Scale to source texture.
return (margin_begin + ofs) * tex_pixel_size;
} else if (np_repeat == 2) { // Tile Fit.
// Calculate scale.
float src_area = draw_size - margin_begin - margin_end;
float dst_area = tex_size - margin_begin - margin_end;
float scale = max(1.0, floor(src_area / max(dst_area, 0.0000001) + 0.5));
// Convert to ratio.
float ratio = (pixel - margin_begin) / src_area;
ratio = mod(ratio * scale, 1.0);
// Scale to source texture.
return (margin_begin + ratio * dst_area) * tex_pixel_size;
} else { // Shouldn't happen, but silences compiler warning.
return 0.0;
}
}
}
#endif
vec3 light_normal_compute(vec3 light_vec, vec3 normal, vec3 base_color, vec3 light_color, vec4 specular_shininess, bool specular_shininess_used) {
float cNdotL = max(0.0, dot(normal, light_vec));
if (specular_shininess_used) {
//blinn
vec3 view = vec3(0.0, 0.0, 1.0); // not great but good enough
vec3 half_vec = normalize(view + light_vec);
float cNdotV = max(dot(normal, view), 0.0);
float cNdotH = max(dot(normal, half_vec), 0.0);
float cVdotH = max(dot(view, half_vec), 0.0);
float cLdotH = max(dot(light_vec, half_vec), 0.0);
float shininess = exp2(15.0 * specular_shininess.a + 1.0) * 0.25;
float blinn = pow(cNdotH, shininess);
blinn *= (shininess + 8.0) * (1.0 / (8.0 * M_PI));
float s = (blinn) / max(4.0 * cNdotV * cNdotL, 0.75);
return specular_shininess.rgb * light_color * s + light_color * base_color * cNdotL;
} else {
return light_color * base_color * cNdotL;
}
}
//float distance = length(shadow_pos);
vec4 light_shadow_compute(uint light_base, vec4 light_color, vec4 shadow_uv
#ifdef LIGHT_CODE_USED
,
vec3 shadow_modulate
#endif
) {
float shadow;
uint shadow_mode = light_array.data[light_base].flags & LIGHT_FLAGS_FILTER_MASK;
if (shadow_mode == LIGHT_FLAGS_SHADOW_NEAREST) {
shadow = texture_shadow(shadow_uv);
} else if (shadow_mode == LIGHT_FLAGS_SHADOW_PCF5) {
vec4 shadow_pixel_size = vec4(light_array.data[light_base].shadow_pixel_size, 0.0, 0.0, 0.0);
shadow = 0.0;
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 2.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size);
shadow += texture_shadow(shadow_uv);
shadow += texture_shadow(shadow_uv + shadow_pixel_size);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 2.0);
shadow /= 5.0;
} else { //PCF13
vec4 shadow_pixel_size = vec4(light_array.data[light_base].shadow_pixel_size, 0.0, 0.0, 0.0);
shadow = 0.0;
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 6.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 5.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 4.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 3.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size * 2.0);
shadow += texture_shadow(shadow_uv - shadow_pixel_size);
shadow += texture_shadow(shadow_uv);
shadow += texture_shadow(shadow_uv + shadow_pixel_size);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 2.0);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 3.0);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 4.0);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 5.0);
shadow += texture_shadow(shadow_uv + shadow_pixel_size * 6.0);
shadow /= 13.0;
}
vec4 shadow_color = unpackUnorm4x8(light_array.data[light_base].shadow_color);
#ifdef LIGHT_CODE_USED
shadow_color.rgb *= shadow_modulate;
#endif
shadow_color.a *= light_color.a; //respect light alpha
return mix(light_color, shadow_color, shadow);
}
void light_blend_compute(uint light_base, vec4 light_color, inout vec3 color) {
uint blend_mode = light_array.data[light_base].flags & LIGHT_FLAGS_BLEND_MASK;
switch (blend_mode) {
case LIGHT_FLAGS_BLEND_MODE_ADD: {
color.rgb += light_color.rgb * light_color.a;
} break;
case LIGHT_FLAGS_BLEND_MODE_SUB: {
color.rgb -= light_color.rgb * light_color.a;
} break;
case LIGHT_FLAGS_BLEND_MODE_MIX: {
color.rgb = mix(color.rgb, light_color.rgb, light_color.a);
} break;
}
}
float msdf_median(float r, float g, float b) {
return max(min(r, g), min(max(r, g), b));
}
void main() {
vec4 color = color_interp;
vec2 uv = uv_interp;
vec2 vertex = vertex_interp;
const InstanceData draw_data = instances.data[instance_index];
#if !defined(USE_ATTRIBUTES) && !defined(USE_PRIMITIVE)
vec4 region_rect = draw_data.src_rect;
#else
vec4 region_rect = vec4(0.0, 0.0, 1.0 / draw_data.color_texture_pixel_size);
#endif
#if !defined(USE_ATTRIBUTES) && !defined(USE_PRIMITIVE)
#ifdef USE_NINEPATCH
int draw_center = 2;
uv = vec2(
map_ninepatch_axis(pixel_size_interp.x, abs(draw_data.dst_rect.z), draw_data.color_texture_pixel_size.x, draw_data.ninepatch_margins.x, draw_data.ninepatch_margins.z, int(bitfieldExtract(draw_data.flags, INSTANCE_FLAGS_NINEPATCH_H_MODE_SHIFT, 2)), draw_center),
map_ninepatch_axis(pixel_size_interp.y, abs(draw_data.dst_rect.w), draw_data.color_texture_pixel_size.y, draw_data.ninepatch_margins.y, draw_data.ninepatch_margins.w, int(bitfieldExtract(draw_data.flags, INSTANCE_FLAGS_NINEPATCH_V_MODE_SHIFT, 2)), draw_center));
if (draw_center == 0) {
color.a = 0.0;
}
uv = uv * draw_data.src_rect.zw + draw_data.src_rect.xy; //apply region if needed
#endif
if (bool(draw_data.flags & INSTANCE_FLAGS_CLIP_RECT_UV)) {
vec2 half_texpixel = draw_data.color_texture_pixel_size * 0.5;
uv = clamp(uv, draw_data.src_rect.xy + half_texpixel, draw_data.src_rect.xy + abs(draw_data.src_rect.zw) - half_texpixel);
}
#endif
#ifndef USE_PRIMITIVE
if (bool(draw_data.flags & INSTANCE_FLAGS_USE_MSDF)) {
float px_range = draw_data.ninepatch_margins.x;
float outline_thickness = draw_data.ninepatch_margins.y;
//float reserved1 = draw_data.ninepatch_margins.z;
//float reserved2 = draw_data.ninepatch_margins.w;
vec4 msdf_sample = texture(sampler2D(color_texture, texture_sampler), uv);
vec2 msdf_size = vec2(textureSize(sampler2D(color_texture, texture_sampler), 0));
vec2 dest_size = vec2(1.0) / fwidth(uv);
float px_size = max(0.5 * dot((vec2(px_range) / msdf_size), dest_size), 1.0);
float d = msdf_median(msdf_sample.r, msdf_sample.g, msdf_sample.b);
if (outline_thickness > 0) {
float cr = clamp(outline_thickness, 0.0, (px_range / 2.0) - 1.0) / px_range;
d = min(d, msdf_sample.a);
float a = clamp((d - 0.5 + cr) * px_size, 0.0, 1.0);
color.a = a * color.a;
} else {
float a = clamp((d - 0.5) * px_size + 0.5, 0.0, 1.0);
color.a = a * color.a;
}
} else if (bool(draw_data.flags & INSTANCE_FLAGS_USE_LCD)) {
vec4 lcd_sample = texture(sampler2D(color_texture, texture_sampler), uv);
if (lcd_sample.a == 1.0) {
color.rgb = lcd_sample.rgb * color.a;
} else {
color = vec4(0.0, 0.0, 0.0, 0.0);
}
} else {
#else
{
#endif
color *= texture(sampler2D(color_texture, texture_sampler), uv);
}
uint light_count = draw_data.flags & 15u; //max 15 lights
bool using_light = (light_count + canvas_data.directional_light_count) > 0;
vec3 normal;
#if defined(NORMAL_USED)
bool normal_used = true;
#else
bool normal_used = false;
#endif
if (normal_used || (using_light && bool(params.batch_flags & BATCH_FLAGS_DEFAULT_NORMAL_MAP_USED))) {
normal.xy = texture(sampler2D(normal_texture, texture_sampler), uv).xy * vec2(2.0, -2.0) - vec2(1.0, -1.0);
#if !defined(USE_ATTRIBUTES) && !defined(USE_PRIMITIVE)
if (bool(draw_data.flags & INSTANCE_FLAGS_TRANSPOSE_RECT)) {
normal.xy = normal.yx;
}
normal.xy *= sign(draw_data.src_rect.zw);
#endif
normal.z = sqrt(max(0.0, 1.0 - dot(normal.xy, normal.xy)));
normal_used = true;
} else {
normal = vec3(0.0, 0.0, 1.0);
}
vec4 specular_shininess;
#if defined(SPECULAR_SHININESS_USED)
bool specular_shininess_used = true;
#else
bool specular_shininess_used = false;
#endif
if (specular_shininess_used || (using_light && normal_used && bool(params.batch_flags & BATCH_FLAGS_DEFAULT_SPECULAR_MAP_USED))) {
specular_shininess = texture(sampler2D(specular_texture, texture_sampler), uv);
specular_shininess *= unpackUnorm4x8(params.specular_shininess);
specular_shininess_used = true;
} else {
specular_shininess = vec4(1.0);
}
#if defined(SCREEN_UV_USED)
vec2 screen_uv = gl_FragCoord.xy * canvas_data.screen_pixel_size;
#else
vec2 screen_uv = vec2(0.0);
#endif
vec3 light_vertex = vec3(vertex, 0.0);
vec2 shadow_vertex = vertex;
{
float normal_map_depth = 1.0;
#if defined(NORMAL_MAP_USED)
vec3 normal_map = vec3(0.0, 0.0, 1.0);
normal_used = true;
#endif
#CODE : FRAGMENT
#if defined(NORMAL_MAP_USED)
normal = mix(vec3(0.0, 0.0, 1.0), normal_map * vec3(2.0, -2.0, 1.0) - vec3(1.0, -1.0, 0.0), normal_map_depth);
#endif
}
if (normal_used) {
//convert by item transform
normal.xy = mat2(normalize(draw_data.world_x), normalize(draw_data.world_y)) * normal.xy;
//convert by canvas transform
normal = normalize((canvas_data.canvas_normal_transform * vec4(normal, 0.0)).xyz);
}
vec4 base_color = color;
#ifdef MODE_LIGHT_ONLY
float light_only_alpha = 0.0;
#elif !defined(MODE_UNSHADED)
color *= canvas_data.canvas_modulation;
#endif
#if !defined(MODE_UNSHADED)
if (sc_use_lighting()) {
// Directional Lights
for (uint i = 0; i < canvas_data.directional_light_count; i++) {
uint light_base = i;
vec2 direction = light_array.data[light_base].position;
vec4 light_color = light_array.data[light_base].color;
#ifdef LIGHT_CODE_USED
vec4 shadow_modulate = vec4(1.0);
light_color = light_compute(light_vertex, vec3(direction, light_array.data[light_base].height), normal, light_color, light_color.a, specular_shininess, shadow_modulate, screen_uv, uv, base_color, true);
#else
if (normal_used) {
vec3 light_vec = normalize(mix(vec3(direction, 0.0), vec3(0, 0, 1), light_array.data[light_base].height));
light_color.rgb = light_normal_compute(light_vec, normal, base_color.rgb, light_color.rgb, specular_shininess, specular_shininess_used);
} else {
light_color.rgb *= base_color.rgb;
}
#endif
if (bool(light_array.data[light_base].flags & LIGHT_FLAGS_HAS_SHADOW)) {
vec2 shadow_pos = (vec4(shadow_vertex, 0.0, 1.0) * mat4(light_array.data[light_base].shadow_matrix[0], light_array.data[light_base].shadow_matrix[1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy; //multiply inverse given its transposed. Optimizer removes useless operations.
vec4 shadow_uv = vec4(shadow_pos.x, light_array.data[light_base].shadow_y_ofs, shadow_pos.y * light_array.data[light_base].shadow_zfar_inv, 1.0);
light_color = light_shadow_compute(light_base, light_color, shadow_uv
#ifdef LIGHT_CODE_USED
,
shadow_modulate.rgb
#endif
);
}
light_blend_compute(light_base, light_color, color.rgb);
#ifdef MODE_LIGHT_ONLY
light_only_alpha += light_color.a;
#endif
}
// Positional Lights
for (uint i = 0; i < MAX_LIGHTS_PER_ITEM; i++) {
if (i >= light_count) {
break;
}
uint light_base = bitfieldExtract(draw_data.lights[i >> 2], (int(i) & 0x3) * 8, 8);
vec2 tex_uv = (vec4(vertex, 0.0, 1.0) * mat4(light_array.data[light_base].texture_matrix[0], light_array.data[light_base].texture_matrix[1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy; //multiply inverse given its transposed. Optimizer removes useless operations.
vec2 tex_uv_atlas = tex_uv * light_array.data[light_base].atlas_rect.zw + light_array.data[light_base].atlas_rect.xy;
if (any(lessThan(tex_uv, vec2(0.0, 0.0))) || any(greaterThanEqual(tex_uv, vec2(1.0, 1.0)))) {
//if outside the light texture, light color is zero
continue;
}
vec4 light_color = textureLod(sampler2D(atlas_texture, texture_sampler), tex_uv_atlas, 0.0);
vec4 light_base_color = light_array.data[light_base].color;
#ifdef LIGHT_CODE_USED
vec4 shadow_modulate = vec4(1.0);
vec3 light_position = vec3(light_array.data[light_base].position, light_array.data[light_base].height);
light_color.rgb *= light_base_color.rgb;
light_color = light_compute(light_vertex, light_position, normal, light_color, light_base_color.a, specular_shininess, shadow_modulate, screen_uv, uv, base_color, false);
#else
light_color.rgb *= light_base_color.rgb * light_base_color.a;
if (normal_used) {
vec3 light_pos = vec3(light_array.data[light_base].position, light_array.data[light_base].height);
vec3 pos = light_vertex;
vec3 light_vec = normalize(light_pos - pos);
light_color.rgb = light_normal_compute(light_vec, normal, base_color.rgb, light_color.rgb, specular_shininess, specular_shininess_used);
} else {
light_color.rgb *= base_color.rgb;
}
#endif
if (bool(light_array.data[light_base].flags & LIGHT_FLAGS_HAS_SHADOW) && bool(draw_data.flags & (INSTANCE_FLAGS_SHADOW_MASKED << i))) {
vec2 shadow_pos = (vec4(shadow_vertex, 0.0, 1.0) * mat4(light_array.data[light_base].shadow_matrix[0], light_array.data[light_base].shadow_matrix[1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0))).xy; //multiply inverse given its transposed. Optimizer removes useless operations.
vec2 pos_norm = normalize(shadow_pos);
vec2 pos_abs = abs(pos_norm);
vec2 pos_box = pos_norm / max(pos_abs.x, pos_abs.y);
vec2 pos_rot = pos_norm * mat2(vec2(0.7071067811865476, -0.7071067811865476), vec2(0.7071067811865476, 0.7071067811865476)); //is there a faster way to 45 degrees rot?
float tex_ofs;
float distance;
if (pos_rot.y > 0) {
if (pos_rot.x > 0) {
tex_ofs = pos_box.y * 0.125 + 0.125;
distance = shadow_pos.x;
} else {
tex_ofs = pos_box.x * -0.125 + (0.25 + 0.125);
distance = shadow_pos.y;
}
} else {
if (pos_rot.x < 0) {
tex_ofs = pos_box.y * -0.125 + (0.5 + 0.125);
distance = -shadow_pos.x;
} else {
tex_ofs = pos_box.x * 0.125 + (0.75 + 0.125);
distance = -shadow_pos.y;
}
}
distance *= light_array.data[light_base].shadow_zfar_inv;
//float distance = length(shadow_pos);
vec4 shadow_uv = vec4(tex_ofs, light_array.data[light_base].shadow_y_ofs, distance, 1.0);
light_color = light_shadow_compute(light_base, light_color, shadow_uv
#ifdef LIGHT_CODE_USED
,
shadow_modulate.rgb
#endif
);
}
light_blend_compute(light_base, light_color, color.rgb);
#ifdef MODE_LIGHT_ONLY
light_only_alpha += light_color.a;
#endif
}
}
#endif
#ifdef MODE_LIGHT_ONLY
color.a *= light_only_alpha;
#endif
frag_color = color;
}

View File

@@ -0,0 +1,130 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(location = 0) in highp vec3 vertex;
#ifdef POSITIONAL_SHADOW
layout(push_constant, std430) uniform Constants {
mat2x4 modelview;
vec4 rotation;
vec2 direction;
float z_far;
uint pad;
float z_near;
uint cull_mode;
float pad3;
float pad4;
}
constants;
layout(set = 0, binding = 0, std430) restrict readonly buffer OccluderTransforms {
mat2x4 transforms[];
}
occluder_transforms;
#else
layout(push_constant, std430) uniform Constants {
mat4 projection;
mat2x4 modelview;
vec2 direction;
float z_far;
uint cull_mode;
}
constants;
#endif
#ifdef MODE_SHADOW
layout(location = 0) out highp float depth;
#endif
void main() {
#ifdef POSITIONAL_SHADOW
float c = -(constants.z_far + constants.z_near) / (constants.z_far - constants.z_near);
float d = -2.0 * constants.z_far * constants.z_near / (constants.z_far - constants.z_near);
mat4 projection = mat4(vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, c, -1.0),
vec4(0.0, 0.0, d, 0.0));
// Precomputed:
// Vector3 cam_target = Basis::from_euler(Vector3(0, 0, Math_TAU * ((i + 3) / 4.0))).xform(Vector3(0, 1, 0));
// projection = projection * Projection(Transform3D().looking_at(cam_targets[i], Vector3(0, 0, -1)).affine_inverse());
projection *= mat4(vec4(constants.rotation.x, 0.0, constants.rotation.y, 0.0), vec4(constants.rotation.z, 0.0, constants.rotation.w, 0.0), vec4(0.0, -1.0, 0.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0));
mat4 modelview = mat4(occluder_transforms.transforms[constants.pad]) * mat4(constants.modelview);
#else
mat4 projection = constants.projection;
mat4 modelview = mat4(constants.modelview[0], constants.modelview[1], vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0));
#endif
highp vec4 vtx = vec4(vertex, 1.0) * modelview;
#ifdef MODE_SHADOW
depth = dot(constants.direction, vtx.xy);
#endif
gl_Position = projection * vtx;
}
#[fragment]
#version 450
#VERSION_DEFINES
#ifdef POSITIONAL_SHADOW
layout(push_constant, std430) uniform Constants {
mat2x4 modelview;
vec4 rotation;
vec2 direction;
float z_far;
uint pad;
float z_near;
uint cull_mode;
float pad3;
float pad4;
}
constants;
#else
layout(push_constant, std430) uniform Constants {
mat4 projection;
mat2x4 modelview;
vec2 direction;
float z_far;
uint cull_mode;
}
constants;
#endif
#ifdef MODE_SHADOW
layout(location = 0) in highp float depth;
layout(location = 0) out highp float distance_buf;
#else
layout(location = 0) out highp float sdf_buf;
#endif
#define POLYGON_CULL_DISABLED 0
#define POLYGON_CULL_FRONT 1
#define POLYGON_CULL_BACK 2
void main() {
#ifdef MODE_SHADOW
bool front_facing = gl_FrontFacing;
if (constants.cull_mode == POLYGON_CULL_BACK && !front_facing) {
discard;
} else if (constants.cull_mode == POLYGON_CULL_FRONT && front_facing) {
discard;
}
distance_buf = depth / constants.z_far;
#else
sdf_buf = 1.0;
#endif
}

View File

@@ -0,0 +1,179 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(r8, set = 0, binding = 1) uniform restrict readonly image2D src_pixels;
layout(r16_snorm, set = 0, binding = 2) uniform restrict writeonly image2D dst_sdf;
layout(rg16i, set = 0, binding = 3) uniform restrict readonly iimage2D src_process;
layout(rg16i, set = 0, binding = 4) uniform restrict writeonly iimage2D dst_process;
layout(push_constant, std430) uniform Params {
ivec2 size;
int stride;
int shift;
ivec2 base_size;
uvec2 pad;
}
params;
#define SDF_MAX_LENGTH 16384.0
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(pos, params.size))) { //too large, do nothing
return;
}
#ifdef MODE_LOAD
bool solid = imageLoad(src_pixels, pos).r > 0.5;
imageStore(dst_process, pos, solid ? ivec4(ivec2(-32767), 0, 0) : ivec4(ivec2(32767), 0, 0));
#endif
#ifdef MODE_LOAD_SHRINK
int s = 1 << params.shift;
ivec2 base = pos << params.shift;
ivec2 center = base + ivec2(params.shift);
ivec2 rel = ivec2(32767);
float d = 1e20;
int found = 0;
int solid_found = 0;
for (int i = 0; i < s; i++) {
for (int j = 0; j < s; j++) {
ivec2 src_pos = base + ivec2(i, j);
if (any(greaterThanEqual(src_pos, params.base_size))) {
continue;
}
bool solid = imageLoad(src_pixels, src_pos).r > 0.5;
if (solid) {
float dist = length(vec2(src_pos - center));
if (dist < d) {
d = dist;
rel = src_pos;
}
solid_found++;
}
found++;
}
}
if (solid_found == found) {
//mark solid only if all are solid
rel = ivec2(-32767);
}
imageStore(dst_process, pos, ivec4(rel, 0, 0));
#endif
#ifdef MODE_PROCESS
ivec2 base = pos << params.shift;
ivec2 center = base + ivec2(params.shift);
ivec2 rel = imageLoad(src_process, pos).xy;
bool solid = rel.x < 0;
if (solid) {
rel = -rel - ivec2(1);
}
if (center != rel) {
//only process if it does not point to itself
const int ofs_table_size = 8;
const ivec2 ofs_table[ofs_table_size] = ivec2[](
ivec2(-1, -1),
ivec2(0, -1),
ivec2(+1, -1),
ivec2(-1, 0),
ivec2(+1, 0),
ivec2(-1, +1),
ivec2(0, +1),
ivec2(+1, +1));
float dist = length(vec2(rel - center));
for (int i = 0; i < ofs_table_size; i++) {
ivec2 src_pos = pos + ofs_table[i] * params.stride;
if (any(lessThan(src_pos, ivec2(0))) || any(greaterThanEqual(src_pos, params.size))) {
continue;
}
ivec2 src_rel = imageLoad(src_process, src_pos).xy;
bool src_solid = src_rel.x < 0;
if (src_solid) {
src_rel = -src_rel - ivec2(1);
}
if (src_solid != solid) {
src_rel = ivec2(src_pos << params.shift); //point to itself if of different type
}
float src_dist = length(vec2(src_rel - center));
if (src_dist < dist) {
dist = src_dist;
rel = src_rel;
}
}
}
if (solid) {
rel = -rel - ivec2(1);
}
imageStore(dst_process, pos, ivec4(rel, 0, 0));
#endif
#ifdef MODE_STORE
ivec2 rel = imageLoad(src_process, pos).xy;
bool solid = rel.x < 0;
if (solid) {
rel = -rel - ivec2(1);
}
float d = length(vec2(rel - pos));
if (solid) {
d = -d;
}
d /= SDF_MAX_LENGTH;
d = clamp(d, -1.0, 1.0);
imageStore(dst_sdf, pos, vec4(d));
#endif
#ifdef MODE_STORE_SHRINK
ivec2 base = pos << params.shift;
ivec2 center = base + ivec2(params.shift);
ivec2 rel = imageLoad(src_process, pos).xy;
bool solid = rel.x < 0;
if (solid) {
rel = -rel - ivec2(1);
}
float d = length(vec2(rel - center));
if (solid) {
d = -d;
}
d /= SDF_MAX_LENGTH;
d = clamp(d, -1.0, 1.0);
imageStore(dst_sdf, pos, vec4(d));
#endif
}

View File

@@ -0,0 +1,187 @@
#define MAX_LIGHTS_PER_ITEM 16
#define M_PI 3.14159265359
#define SDF_MAX_LENGTH 16384.0
#define INSTANCE_FLAGS_LIGHT_COUNT_SHIFT 0 // 4 bits.
#define INSTANCE_FLAGS_CLIP_RECT_UV (1 << 4)
#define INSTANCE_FLAGS_TRANSPOSE_RECT (1 << 5)
#define INSTANCE_FLAGS_USE_MSDF (1 << 6)
#define INSTANCE_FLAGS_USE_LCD (1 << 7)
#define INSTANCE_FLAGS_NINEPATCH_DRAW_CENTER_SHIFT 8
#define INSTANCE_FLAGS_NINEPATCH_H_MODE_SHIFT 9
#define INSTANCE_FLAGS_NINEPATCH_V_MODE_SHIFT 11
#define INSTANCE_FLAGS_SHADOW_MASKED_SHIFT 13 // 16 bits.
#define INSTANCE_FLAGS_SHADOW_MASKED (1 << INSTANCE_FLAGS_SHADOW_MASKED_SHIFT)
struct InstanceData {
vec2 world_x;
vec2 world_y;
vec2 world_ofs;
uint flags;
uint instance_uniforms_ofs;
#ifdef USE_PRIMITIVE
vec2 points[3];
vec2 uvs[3];
uint colors[6];
#else
vec4 modulation;
vec4 ninepatch_margins;
vec4 dst_rect; //for built-in rect and UV
vec4 src_rect;
vec2 pad;
#endif
vec2 color_texture_pixel_size;
uvec4 lights;
};
//1 means enabled, 2+ means trails in use
#define BATCH_FLAGS_INSTANCING_MASK 0x7F
#define BATCH_FLAGS_INSTANCING_HAS_COLORS_SHIFT 7
#define BATCH_FLAGS_INSTANCING_HAS_COLORS (1 << BATCH_FLAGS_INSTANCING_HAS_COLORS_SHIFT)
#define BATCH_FLAGS_INSTANCING_HAS_CUSTOM_DATA_SHIFT 8
#define BATCH_FLAGS_INSTANCING_HAS_CUSTOM_DATA (1 << BATCH_FLAGS_INSTANCING_HAS_CUSTOM_DATA_SHIFT)
#define BATCH_FLAGS_DEFAULT_NORMAL_MAP_USED (1 << 9)
#define BATCH_FLAGS_DEFAULT_SPECULAR_MAP_USED (1 << 10)
layout(push_constant, std430) uniform Params {
uint base_instance_index; // base index to instance data
uint sc_packed_0;
uint specular_shininess;
uint batch_flags;
}
params;
// Specialization constants.
#ifdef UBERSHADER
// Pull the constants from the draw call's push constants.
uint sc_packed_0() {
return params.sc_packed_0;
}
#else
// Pull the constants from the pipeline's specialization constants.
layout(constant_id = 0) const uint pso_sc_packed_0 = 0;
uint sc_packed_0() {
return pso_sc_packed_0;
}
#endif
bool sc_use_lighting() {
return ((sc_packed_0() >> 0) & 1U) != 0;
}
// In vulkan, sets should always be ordered using the following logic:
// Lower Sets: Sets that change format and layout less often
// Higher sets: Sets that change format and layout very often
// This is because changing a set for another with a different layout or format,
// invalidates all the upper ones (as likely internal base offset changes)
/* SET0: Globals */
#define CANVAS_FLAGS_CONVERT_ATTRIBUTES_TO_LINEAR (1 << 0)
// The values passed per draw primitives are cached within it
layout(set = 0, binding = 1, std140) uniform CanvasData {
mat4 canvas_transform;
mat4 screen_transform;
mat4 canvas_normal_transform;
vec4 canvas_modulation;
vec2 screen_pixel_size;
float time;
bool use_pixel_snap;
vec4 sdf_to_tex;
vec2 screen_to_sdf;
vec2 sdf_to_screen;
uint directional_light_count;
float tex_to_sdf;
float shadow_pixel_size;
uint flags;
}
canvas_data;
#define LIGHT_FLAGS_BLEND_MASK (3 << 16)
#define LIGHT_FLAGS_BLEND_MODE_ADD (0 << 16)
#define LIGHT_FLAGS_BLEND_MODE_SUB (1 << 16)
#define LIGHT_FLAGS_BLEND_MODE_MIX (2 << 16)
#define LIGHT_FLAGS_BLEND_MODE_MASK (3 << 16)
#define LIGHT_FLAGS_HAS_SHADOW (1 << 20)
#define LIGHT_FLAGS_FILTER_SHIFT 22
#define LIGHT_FLAGS_FILTER_MASK (3 << 22)
#define LIGHT_FLAGS_SHADOW_NEAREST (0 << 22)
#define LIGHT_FLAGS_SHADOW_PCF5 (1 << 22)
#define LIGHT_FLAGS_SHADOW_PCF13 (2 << 22)
struct Light {
mat2x4 texture_matrix; //light to texture coordinate matrix (transposed)
mat2x4 shadow_matrix; //light to shadow coordinate matrix (transposed)
vec4 color;
uint shadow_color; // packed
uint flags; //index to light texture
float shadow_pixel_size;
float height;
vec2 position;
float shadow_zfar_inv;
float shadow_y_ofs;
vec4 atlas_rect;
};
layout(set = 0, binding = 2, std430) restrict readonly buffer LightData {
Light data[];
}
light_array;
layout(set = 0, binding = 3) uniform texture2D atlas_texture;
layout(set = 0, binding = 4) uniform texture2D shadow_atlas_texture;
layout(set = 0, binding = 5) uniform sampler shadow_sampler;
layout(set = 0, binding = 6) uniform texture2D color_buffer;
layout(set = 0, binding = 7) uniform texture2D sdf_texture;
#include "samplers_inc.glsl"
layout(set = 0, binding = 9, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
/* SET1: Is reserved for the material */
//
/* SET2: Instancing and Skeleton */
layout(set = 2, binding = 0, std430) restrict readonly buffer Transforms {
vec4 data[];
}
transforms;
/* SET3: Texture */
layout(set = 3, binding = 0) uniform texture2D color_texture;
layout(set = 3, binding = 1) uniform texture2D normal_texture;
layout(set = 3, binding = 2) uniform texture2D specular_texture;
layout(set = 3, binding = 3) uniform sampler texture_sampler;
layout(set = 3, binding = 4, std430) restrict readonly buffer DrawData {
InstanceData data[];
}
instances;

View File

@@ -0,0 +1,3 @@
#define CLUSTER_COUNTER_SHIFT 20
#define CLUSTER_POINTER_MASK ((1 << CLUSTER_COUNTER_SHIFT) - 1)
#define CLUSTER_COUNTER_MASK 0xfff

View File

@@ -0,0 +1,115 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
const vec3 usage_gradient[33] = vec3[]( // 1 (none) + 32
vec3(0.14, 0.17, 0.23),
vec3(0.24, 0.44, 0.83),
vec3(0.23, 0.57, 0.84),
vec3(0.22, 0.71, 0.84),
vec3(0.22, 0.85, 0.83),
vec3(0.21, 0.85, 0.72),
vec3(0.21, 0.85, 0.57),
vec3(0.20, 0.85, 0.42),
vec3(0.20, 0.85, 0.27),
vec3(0.27, 0.86, 0.19),
vec3(0.51, 0.85, 0.19),
vec3(0.57, 0.86, 0.19),
vec3(0.62, 0.85, 0.19),
vec3(0.67, 0.86, 0.20),
vec3(0.73, 0.85, 0.20),
vec3(0.78, 0.85, 0.20),
vec3(0.83, 0.85, 0.20),
vec3(0.85, 0.82, 0.20),
vec3(0.85, 0.76, 0.20),
vec3(0.85, 0.81, 0.20),
vec3(0.85, 0.65, 0.20),
vec3(0.84, 0.60, 0.21),
vec3(0.84, 0.56, 0.21),
vec3(0.84, 0.51, 0.21),
vec3(0.84, 0.46, 0.21),
vec3(0.84, 0.41, 0.21),
vec3(0.84, 0.36, 0.21),
vec3(0.84, 0.31, 0.21),
vec3(0.84, 0.27, 0.21),
vec3(0.83, 0.22, 0.22),
vec3(0.83, 0.22, 0.27),
vec3(0.83, 0.22, 0.32),
vec3(1.00, 0.63, 0.70));
layout(push_constant, std430) uniform Params {
uvec2 screen_size;
uvec2 cluster_screen_size;
uint cluster_shift;
uint cluster_type;
float z_near;
float z_far;
bool orthogonal;
uint max_cluster_element_count_div_32;
uint pad1;
uint pad2;
}
params;
layout(set = 0, binding = 1, std430) buffer restrict readonly ClusterData {
uint data[];
}
cluster_data;
layout(rgba16f, set = 0, binding = 2) uniform restrict writeonly image2D screen_buffer;
layout(set = 0, binding = 3) uniform texture2D depth_buffer;
layout(set = 0, binding = 4) uniform sampler depth_buffer_sampler;
void main() {
uvec2 screen_pos = gl_GlobalInvocationID.xy;
if (any(greaterThanEqual(screen_pos, params.screen_size))) {
return;
}
uvec2 cluster_pos = screen_pos >> params.cluster_shift;
uint offset = cluster_pos.y * params.cluster_screen_size.x + cluster_pos.x;
offset += params.cluster_screen_size.x * params.cluster_screen_size.y * params.cluster_type;
offset *= (params.max_cluster_element_count_div_32 + 32);
//depth buffers generally can't be accessed via image API
float depth = texelFetch(sampler2D(depth_buffer, depth_buffer_sampler), ivec2(screen_pos), 0).r * 2.0 - 1.0;
if (params.orthogonal) {
depth = ((depth + (params.z_far + params.z_near) / (params.z_far - params.z_near)) * (params.z_far - params.z_near)) / 2.0;
} else {
depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near - depth * (params.z_far - params.z_near));
}
depth /= params.z_far;
uint slice = uint(clamp(floor(depth * 32.0), 0.0, 31.0));
uint slice_minmax = cluster_data.data[offset + params.max_cluster_element_count_div_32 + slice];
uint item_min = slice_minmax & 0xFFFF;
uint item_max = slice_minmax >> 16;
uint item_count = 0;
for (uint i = 0; i < params.max_cluster_element_count_div_32; i++) {
uint slice_bits = cluster_data.data[offset + i];
while (slice_bits != 0) {
uint bit = findLSB(slice_bits);
uint item = i * 32 + bit;
if ((item >= item_min && item < item_max)) {
item_count++;
}
slice_bits &= ~(1 << bit);
}
}
item_count = min(item_count, 32);
vec3 color = usage_gradient[item_count];
color = mix(color * 1.2, color * 0.3, float(slice) / 31.0);
imageStore(screen_buffer, ivec2(screen_pos), vec4(color, 1.0));
}

View File

@@ -0,0 +1,168 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(location = 0) in vec3 vertex_attrib;
layout(location = 0) out float depth_interp;
layout(location = 1) out flat uint element_index;
layout(push_constant, std430) uniform Params {
uint base_index;
uint pad0;
uint pad1;
uint pad2;
}
params;
layout(set = 0, binding = 1, std140) uniform State {
mat4 projection;
float inv_z_far;
uint screen_to_clusters_shift; // shift to obtain coordinates in block indices
uint cluster_screen_width; //
uint cluster_data_size; // how much data for a single cluster takes
uint cluster_depth_offset;
uint pad0;
uint pad1;
uint pad2;
}
state;
struct RenderElement {
uint type; //0-4
bool touches_near;
bool touches_far;
uint original_index;
mat3x4 transform_inv;
vec3 scale;
uint pad;
};
layout(set = 0, binding = 2, std430) buffer restrict readonly RenderElements {
RenderElement data[];
}
render_elements;
void main() {
element_index = params.base_index + gl_InstanceIndex;
vec3 vertex = vertex_attrib;
vertex *= render_elements.data[element_index].scale;
vertex = vec4(vertex, 1.0) * render_elements.data[element_index].transform_inv;
depth_interp = -vertex.z;
gl_Position = state.projection * vec4(vertex, 1.0);
}
#[fragment]
#version 450
#VERSION_DEFINES
#extension GL_KHR_shader_subgroup_ballot : enable
#extension GL_KHR_shader_subgroup_arithmetic : enable
#extension GL_KHR_shader_subgroup_vote : enable
layout(location = 0) in float depth_interp;
layout(location = 1) in flat uint element_index;
layout(set = 0, binding = 1, std140) uniform State {
mat4 projection;
float inv_z_far;
uint screen_to_clusters_shift; // shift to obtain coordinates in block indices
uint cluster_screen_width; //
uint cluster_data_size; // how much data for a single cluster takes
uint cluster_depth_offset;
uint pad0;
uint pad1;
uint pad2;
}
state;
//cluster data is layout linearly, each cell contains the follow information:
// - list of bits for every element to mark as used, so (max_elem_count/32)*4 uints
// - a uint for each element to mark the depth bits used when rendering (0-31)
layout(set = 0, binding = 3, std430) buffer restrict ClusterRender {
uint data[];
}
cluster_render;
#ifdef USE_ATTACHMENT
layout(location = 0) out vec4 frag_color;
#endif
void main() {
//convert from screen to cluster
uvec2 cluster = uvec2(gl_FragCoord.xy) >> state.screen_to_clusters_shift;
//get linear cluster offset from screen poss
uint cluster_offset = cluster.x + state.cluster_screen_width * cluster.y;
//multiply by data size to position at the beginning of the element list for this cluster
cluster_offset *= state.cluster_data_size;
//find the current element in the list and plot the bit to mark it as used
uint usage_write_offset = cluster_offset + (element_index >> 5);
uint usage_write_bit = 1 << (element_index & 0x1F);
uint aux = 0;
uint cluster_thread_group_index;
#ifndef MOLTENVK_USED
if (!gl_HelperInvocation) {
#else
{
#endif
//https://advances.realtimerendering.com/s2017/2017_Sig_Improved_Culling_final.pdf
uvec4 mask;
while (true) {
// find the cluster offset of the first active thread
// threads that did break; go inactive and no longer count
uint first = subgroupBroadcastFirst(cluster_offset);
// update the mask for thread that match this cluster
mask = subgroupBallot(first == cluster_offset);
if (first == cluster_offset) {
// This thread belongs to the group of threads that match this offset,
// so exit the loop.
break;
}
}
cluster_thread_group_index = subgroupBallotExclusiveBitCount(mask);
if (cluster_thread_group_index == 0) {
aux = atomicOr(cluster_render.data[usage_write_offset], usage_write_bit);
}
}
//find the current element in the depth usage list and mark the current depth as used
float unit_depth = depth_interp * state.inv_z_far;
uint z_bit = clamp(uint(floor(unit_depth * 32.0)), 0, 31);
uint z_write_offset = cluster_offset + state.cluster_depth_offset + element_index;
uint z_write_bit = 1 << z_bit;
#ifndef MOLTENVK_USED
if (!gl_HelperInvocation) {
#else
{
#endif
z_write_bit = subgroupOr(z_write_bit); //merge all Zs
if (cluster_thread_group_index == 0) {
aux = atomicOr(cluster_render.data[z_write_offset], z_write_bit);
}
}
#ifdef USE_ATTACHMENT
frag_color = vec4(float(aux));
#endif
}

View File

@@ -0,0 +1,119 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(push_constant, std430) uniform Params {
uint cluster_render_data_size; // how much data for a single cluster takes
uint max_render_element_count_div_32; //divided by 32
uvec2 cluster_screen_size;
uint render_element_count_div_32; //divided by 32
uint max_cluster_element_count_div_32; //divided by 32
uint pad1;
uint pad2;
}
params;
layout(set = 0, binding = 1, std430) buffer restrict readonly ClusterRender {
uint data[];
}
cluster_render;
layout(set = 0, binding = 2, std430) buffer restrict ClusterStore {
uint data[];
}
cluster_store;
struct RenderElement {
uint type; //0-4
bool touches_near;
bool touches_far;
uint original_index;
mat3x4 transform_inv;
vec3 scale;
uint pad;
};
layout(set = 0, binding = 3, std430) buffer restrict readonly RenderElements {
RenderElement data[];
}
render_elements;
void main() {
uvec2 pos = gl_GlobalInvocationID.xy;
if (any(greaterThanEqual(pos, params.cluster_screen_size))) {
return;
}
//counter for each type of render_element
//base offset for this cluster
uint base_offset = (pos.x + params.cluster_screen_size.x * pos.y);
uint src_offset = base_offset * params.cluster_render_data_size;
uint render_element_offset = 0;
//check all render_elements and see which one was written to
while (render_element_offset < params.render_element_count_div_32) {
uint bits = cluster_render.data[src_offset + render_element_offset];
while (bits != 0) {
//if bits exist, check the render_element
uint index_bit = findLSB(bits);
uint index = render_element_offset * 32 + index_bit;
uint type = render_elements.data[index].type;
uint z_range_offset = src_offset + params.max_render_element_count_div_32 + index;
uint z_range = cluster_render.data[z_range_offset];
//if object was written, z was written, but check just in case
if (z_range != 0) { //should always be > 0
uint from_z = findLSB(z_range);
uint to_z = findMSB(z_range) + 1;
if (render_elements.data[index].touches_near) {
from_z = 0;
}
if (render_elements.data[index].touches_far) {
to_z = 32;
}
// find cluster offset in the buffer used for indexing in the renderer
uint dst_offset = (base_offset + type * (params.cluster_screen_size.x * params.cluster_screen_size.y)) * (params.max_cluster_element_count_div_32 + 32);
uint orig_index = render_elements.data[index].original_index;
//store this index in the Z slices by setting the relevant bit
for (uint i = from_z; i < to_z; i++) {
uint slice_ofs = dst_offset + params.max_cluster_element_count_div_32 + i;
uint minmax = cluster_store.data[slice_ofs];
if (minmax == 0) {
minmax = 0xFFFF; //min 0, max 0xFFFF
}
uint elem_min = min(orig_index, minmax & 0xFFFF);
uint elem_max = max(orig_index + 1, minmax >> 16); //always store plus one, so zero means range is empty when not written to
minmax = elem_min | (elem_max << 16);
cluster_store.data[slice_ofs] = minmax;
}
uint store_word = orig_index >> 5;
uint store_bit = orig_index & 0x1F;
//store the actual render_element index at the end, so the rendering code can reference it
cluster_store.data[dst_offset + store_word] |= 1 << store_bit;
}
bits &= ~(1 << index_bit); //clear the bit to continue iterating
}
render_element_offset++;
}
}

View File

@@ -0,0 +1,17 @@
struct DecalData {
mat4 xform; //to decal transform
vec3 inv_extents;
float albedo_mix;
vec4 albedo_rect;
vec4 normal_rect;
vec4 orm_rect;
vec4 emission_rect;
vec4 modulate;
float emission_energy;
uint mask;
float upper_fade;
float lower_fade;
mat3x4 normal_xform;
vec3 normal;
float normal_fade;
};

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find all include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")] + [str(f) for f in Glob("../*_inc.glsl")]
# find all shader code(all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile shaders
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)
SConscript("fsr2/SCsub")

View File

@@ -0,0 +1,161 @@
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
#include "blur_raster_inc.glsl"
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
// old code, ARM driver bug on Mali-GXXx GPUs and Vulkan API 1.3.xxx
// https://github.com/godotengine/godot/pull/92817#issuecomment-2168625982
//vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
//gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
//uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
uv_interp = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
#include "blur_raster_inc.glsl"
layout(location = 0) in vec2 uv_interp;
/* clang-format on */
layout(set = 0, binding = 0) uniform sampler2D source_color;
#ifdef GLOW_USE_AUTO_EXPOSURE
layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
#endif
layout(location = 0) out vec4 frag_color;
void main() {
// We do not apply our color scale for our mobile renderer here, we'll leave our colors at half brightness and apply scale in the tonemap raster.
#ifdef MODE_MIPMAP
vec2 pix_size = blur.pixel_size;
vec4 color = texture(source_color, uv_interp + vec2(-0.5, -0.5) * pix_size);
color += texture(source_color, uv_interp + vec2(0.5, -0.5) * pix_size);
color += texture(source_color, uv_interp + vec2(0.5, 0.5) * pix_size);
color += texture(source_color, uv_interp + vec2(-0.5, 0.5) * pix_size);
frag_color = color / 4.0;
#endif
#ifdef MODE_GAUSSIAN_BLUR
// For Gaussian Blur we use 13 taps in a single pass instead of 12 taps over 2 passes.
// This minimizes the number of times we change framebuffers which is very important for mobile.
// Source: http://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare
vec4 A = texture(source_color, uv_interp + blur.pixel_size * vec2(-1.0, -1.0));
vec4 B = texture(source_color, uv_interp + blur.pixel_size * vec2(0.0, -1.0));
vec4 C = texture(source_color, uv_interp + blur.pixel_size * vec2(1.0, -1.0));
vec4 D = texture(source_color, uv_interp + blur.pixel_size * vec2(-0.5, -0.5));
vec4 E = texture(source_color, uv_interp + blur.pixel_size * vec2(0.5, -0.5));
vec4 F = texture(source_color, uv_interp + blur.pixel_size * vec2(-1.0, 0.0));
vec4 G = texture(source_color, uv_interp);
vec4 H = texture(source_color, uv_interp + blur.pixel_size * vec2(1.0, 0.0));
vec4 I = texture(source_color, uv_interp + blur.pixel_size * vec2(-0.5, 0.5));
vec4 J = texture(source_color, uv_interp + blur.pixel_size * vec2(0.5, 0.5));
vec4 K = texture(source_color, uv_interp + blur.pixel_size * vec2(-1.0, 1.0));
vec4 L = texture(source_color, uv_interp + blur.pixel_size * vec2(0.0, 1.0));
vec4 M = texture(source_color, uv_interp + blur.pixel_size * vec2(1.0, 1.0));
float base_weight = 0.5 / 4.0;
float lesser_weight = 0.125 / 4.0;
frag_color = (D + E + I + J) * base_weight;
frag_color += (A + B + G + F) * lesser_weight;
frag_color += (B + C + H + G) * lesser_weight;
frag_color += (F + G + L + K) * lesser_weight;
frag_color += (G + H + M + L) * lesser_weight;
#endif
#ifdef MODE_GAUSSIAN_GLOW
//Glow uses larger sigma 1 for a more rounded blur effect
#define GLOW_ADD(m_ofs, m_mult) \
{ \
vec2 ofs = uv_interp + m_ofs * pix_size; \
vec4 c = texture(source_color, ofs) * m_mult; \
if (any(lessThan(ofs, vec2(0.0))) || any(greaterThan(ofs, vec2(1.0)))) { \
c *= 0.0; \
} \
color += c; \
}
if (bool(blur.flags & FLAG_HORIZONTAL)) {
vec2 pix_size = blur.pixel_size;
pix_size *= 0.5; //reading from larger buffer, so use more samples
vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.174938;
GLOW_ADD(vec2(1.0, 0.0), 0.165569);
GLOW_ADD(vec2(2.0, 0.0), 0.140367);
GLOW_ADD(vec2(3.0, 0.0), 0.106595);
GLOW_ADD(vec2(-1.0, 0.0), 0.165569);
GLOW_ADD(vec2(-2.0, 0.0), 0.140367);
GLOW_ADD(vec2(-3.0, 0.0), 0.106595);
// only do this in the horizontal pass, if we also do this in the vertical pass we're doubling up.
color *= blur.glow_strength;
frag_color = color;
} else {
vec2 pix_size = blur.pixel_size;
vec4 color = texture(source_color, uv_interp + vec2(0.0, 0.0) * pix_size) * 0.288713;
GLOW_ADD(vec2(0.0, 1.0), 0.233062);
GLOW_ADD(vec2(0.0, 2.0), 0.122581);
GLOW_ADD(vec2(0.0, -1.0), 0.233062);
GLOW_ADD(vec2(0.0, -2.0), 0.122581);
frag_color = color;
}
#undef GLOW_ADD
if (bool(blur.flags & FLAG_GLOW_FIRST_PASS)) {
// In the first pass bring back to correct color range else we're applying the wrong threshold
// in subsequent passes we can use it as is as we'd just be undoing it right after.
frag_color *= blur.luminance_multiplier;
#ifdef GLOW_USE_AUTO_EXPOSURE
frag_color /= texelFetch(source_auto_exposure, ivec2(0, 0), 0).r / blur.glow_auto_exposure_scale;
#endif
frag_color *= blur.glow_exposure;
float luminance = max(frag_color.r, max(frag_color.g, frag_color.b));
float feedback = max(smoothstep(blur.glow_hdr_threshold, blur.glow_hdr_threshold + blur.glow_hdr_scale, luminance), blur.glow_bloom);
frag_color = min(frag_color * feedback, vec4(blur.glow_luminance_cap)) / blur.luminance_multiplier;
}
#endif // MODE_GAUSSIAN_GLOW
#ifdef MODE_COPY
vec4 color = textureLod(source_color, uv_interp, 0.0);
frag_color = color;
#endif
}

View File

@@ -0,0 +1,26 @@
#define FLAG_HORIZONTAL (1 << 0)
#define FLAG_USE_ORTHOGONAL_PROJECTION (1 << 1)
#define FLAG_GLOW_FIRST_PASS (1 << 2)
layout(push_constant, std430) uniform Blur {
vec2 pixel_size; // 08 - 08
uint flags; // 04 - 12
uint pad; // 04 - 16
// Glow.
float glow_strength; // 04 - 20
float glow_bloom; // 04 - 24
float glow_hdr_threshold; // 04 - 28
float glow_hdr_scale; // 04 - 32
float glow_exposure; // 04 - 36
float glow_white; // 04 - 40
float glow_luminance_cap; // 04 - 44
float glow_auto_exposure_scale; // 04 - 48
float luminance_multiplier; // 04 - 52
float res1; // 04 - 56
float res2; // 04 - 60
float res3; // 04 - 64
}
blur;

View File

@@ -0,0 +1,230 @@
#[compute]
#version 450
#VERSION_DEFINES
#define BLOCK_SIZE 8
layout(local_size_x = BLOCK_SIZE, local_size_y = BLOCK_SIZE, local_size_z = 1) in;
#ifdef MODE_GEN_BLUR_SIZE
layout(rgba16f, set = 0, binding = 0) uniform restrict image2D color_image;
layout(set = 1, binding = 0) uniform sampler2D source_depth;
#endif
#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL) || defined(MODE_BOKEH_CIRCULAR)
layout(set = 1, binding = 0) uniform sampler2D color_texture;
layout(rgba16f, set = 0, binding = 0) uniform restrict writeonly image2D bokeh_image;
#endif
#ifdef MODE_COMPOSITE_BOKEH
layout(rgba16f, set = 0, binding = 0) uniform restrict image2D color_image;
layout(set = 1, binding = 0) uniform sampler2D source_bokeh;
#endif
// based on https://www.shadertoy.com/view/Xd3GDl
#include "bokeh_dof_inc.glsl"
#ifdef MODE_GEN_BLUR_SIZE
float get_depth_at_pos(vec2 uv) {
float depth = textureLod(source_depth, uv, 0.0).x * 2.0 - 1.0;
if (params.orthogonal) {
depth = -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
} else {
depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near + depth * (params.z_far - params.z_near));
}
return depth;
}
float get_blur_size(float depth) {
if (params.blur_near_active && depth < params.blur_near_begin) {
if (params.use_physical_near) {
// Physically-based.
float d = abs(params.blur_near_begin - depth);
return -(d / (params.blur_near_begin - d)) * params.blur_size_near - DEPTH_GAP; // Near blur is negative.
} else {
// Non-physically-based.
return -(1.0 - smoothstep(params.blur_near_end, params.blur_near_begin, depth)) * params.blur_size - DEPTH_GAP; // Near blur is negative.
}
}
if (params.blur_far_active && depth > params.blur_far_begin) {
if (params.use_physical_far) {
// Physically-based.
float d = abs(params.blur_far_begin - depth);
return (d / (params.blur_far_begin + d)) * params.blur_size_far + DEPTH_GAP;
} else {
// Non-physically-based.
return smoothstep(params.blur_far_begin, params.blur_far_end, depth) * params.blur_size + DEPTH_GAP;
}
}
return 0.0;
}
#endif
#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL)
vec4 weighted_filter_dir(vec2 dir, vec2 uv, vec2 pixel_size) {
dir *= pixel_size;
vec4 color = texture(color_texture, uv);
vec4 accum = color;
float total = 1.0;
float blur_scale = params.blur_size / float(params.blur_steps);
if (params.use_jitter) {
uv += dir * (hash12n(uv + params.jitter_seed) - 0.5);
}
for (int i = -params.blur_steps; i <= params.blur_steps; i++) {
if (i == 0) {
continue;
}
float radius = float(i) * blur_scale;
vec2 suv = uv + dir * radius;
radius = abs(radius);
vec4 sample_color = texture(color_texture, suv);
float limit;
if (sample_color.a < color.a) {
limit = abs(sample_color.a);
} else {
limit = abs(color.a);
}
limit -= DEPTH_GAP;
float m = smoothstep(radius - 0.5, radius + 0.5, limit);
accum += mix(color, sample_color, m);
total += 1.0;
}
return accum / total;
}
#endif
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThan(pos, params.size))) { //too large, do nothing
return;
}
vec2 pixel_size = 1.0 / vec2(params.size);
vec2 uv = vec2(pos) / vec2(params.size);
#ifdef MODE_GEN_BLUR_SIZE
uv += pixel_size * 0.5;
//precompute size in alpha channel
float depth = get_depth_at_pos(uv);
float size = get_blur_size(depth);
vec4 color = imageLoad(color_image, pos);
color.a = size;
imageStore(color_image, pos, color);
#endif
#ifdef MODE_BOKEH_BOX
//pixel_size*=0.5; //resolution is doubled
if (params.second_pass || !params.half_size) {
uv += pixel_size * 0.5; //half pixel to read centers
} else {
uv += pixel_size * 0.25; //half pixel to read centers from full res
}
vec2 dir = (params.second_pass ? vec2(0.0, 1.0) : vec2(1.0, 0.0));
vec4 color = weighted_filter_dir(dir, uv, pixel_size);
imageStore(bokeh_image, pos, color);
#endif
#ifdef MODE_BOKEH_HEXAGONAL
//pixel_size*=0.5; //resolution is doubled
if (params.second_pass || !params.half_size) {
uv += pixel_size * 0.5; //half pixel to read centers
} else {
uv += pixel_size * 0.25; //half pixel to read centers from full res
}
vec2 dir = (params.second_pass ? normalize(vec2(1.0, 0.577350269189626)) : vec2(0.0, 1.0));
vec4 color = weighted_filter_dir(dir, uv, pixel_size);
if (params.second_pass) {
dir = normalize(vec2(-1.0, 0.577350269189626));
vec4 color2 = weighted_filter_dir(dir, uv, pixel_size);
color.rgb = min(color.rgb, color2.rgb);
color.a = (color.a + color2.a) * 0.5;
}
imageStore(bokeh_image, pos, color);
#endif
#ifdef MODE_BOKEH_CIRCULAR
if (params.half_size) {
pixel_size *= 0.5; //resolution is doubled
}
uv += pixel_size * 0.5; //half pixel to read centers
vec4 color = texture(color_texture, uv);
float initial_blur = color.a;
float accum = 1.0;
float radius = params.blur_scale;
for (float ang = 0.0; radius < params.blur_size; ang += GOLDEN_ANGLE) {
vec2 suv = uv + vec2(cos(ang), sin(ang)) * pixel_size * radius;
vec4 sample_color = texture(color_texture, suv);
float sample_size = abs(sample_color.a);
if (sample_color.a > initial_blur) {
sample_size = clamp(sample_size, 0.0, abs(initial_blur) * 2.0);
}
float m = smoothstep(radius - 0.5, radius + 0.5, sample_size);
color += mix(color / accum, sample_color, m);
accum += 1.0;
radius += params.blur_scale / radius;
}
color /= accum;
imageStore(bokeh_image, pos, color);
#endif
#ifdef MODE_COMPOSITE_BOKEH
uv += pixel_size * 0.5;
vec4 color = imageLoad(color_image, pos);
vec4 bokeh = texture(source_bokeh, uv);
float mix_amount;
if (bokeh.a < color.a) {
mix_amount = min(1.0, max(0.0, max(abs(color.a), abs(bokeh.a)) - DEPTH_GAP));
} else {
mix_amount = min(1.0, max(0.0, abs(color.a) - DEPTH_GAP));
}
color.rgb = mix(color.rgb, bokeh.rgb, mix_amount); //blend between hires and lowres
color.a = 0; //reset alpha
imageStore(color_image, pos, color);
#endif
}

View File

@@ -0,0 +1,42 @@
layout(push_constant, std430) uniform Params {
ivec2 size;
float z_far;
float z_near;
bool orthogonal;
float blur_size;
float blur_scale;
int blur_steps;
bool blur_near_active;
float blur_near_begin;
float blur_near_end;
bool blur_far_active;
float blur_far_begin;
float blur_far_end;
bool second_pass;
bool half_size;
bool use_jitter;
float jitter_seed;
bool use_physical_near;
bool use_physical_far;
float blur_size_near;
float blur_size_far;
uint pad[2];
}
params;
//used to work around downsampling filter
#define DEPTH_GAP 0.0
const float GOLDEN_ANGLE = 2.39996323;
//note: uniform pdf rand [0;1[
float hash12n(vec2 p) {
p = fract(p * vec2(5.3987, 5.4421));
p += dot(p.yx, p.xy + vec2(21.5351, 14.3137));
return fract(p.x * p.y * 95.4307);
}

View File

@@ -0,0 +1,276 @@
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
#include "bokeh_dof_inc.glsl"
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
// old code, ARM driver bug on Mali-GXXx GPUs and Vulkan API 1.3.xxx
// https://github.com/godotengine/godot/pull/92817#issuecomment-2168625982
//vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
//gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
//uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
uv_interp = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
#include "bokeh_dof_inc.glsl"
layout(location = 0) in vec2 uv_interp;
/* clang-format on */
#ifdef MODE_GEN_BLUR_SIZE
layout(location = 0) out float weight;
layout(set = 0, binding = 0) uniform sampler2D source_depth;
#else
layout(location = 0) out vec4 frag_color;
#ifdef OUTPUT_WEIGHT
layout(location = 1) out float weight;
#endif
layout(set = 0, binding = 0) uniform sampler2D source_color;
layout(set = 1, binding = 0) uniform sampler2D source_weight;
#ifdef MODE_COMPOSITE_BOKEH
layout(set = 2, binding = 0) uniform sampler2D original_weight;
#endif
#endif
//DOF
// Bokeh single pass implementation based on https://tuxedolabs.blogspot.com/2018/05/bokeh-depth-of-field-in-single-pass.html
#ifdef MODE_GEN_BLUR_SIZE
float get_depth_at_pos(vec2 uv) {
float depth = textureLod(source_depth, uv, 0.0).x * 2.0 - 1.0;
if (params.orthogonal) {
depth = -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
} else {
depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near + depth * (params.z_far - params.z_near));
}
return depth;
}
float get_blur_size(float depth) {
if (params.blur_near_active && depth < params.blur_near_begin) {
if (params.use_physical_near) {
// Physically-based.
float d = abs(params.blur_near_begin - depth);
return -(d / (params.blur_near_begin - d)) * params.blur_size_near - DEPTH_GAP; // Near blur is negative.
} else {
// Non-physically-based.
return -(1.0 - smoothstep(params.blur_near_end, params.blur_near_begin, depth)) * params.blur_size - DEPTH_GAP; // Near blur is negative.
}
}
if (params.blur_far_active && depth > params.blur_far_begin) {
if (params.use_physical_far) {
// Physically-based.
float d = abs(params.blur_far_begin - depth);
return (d / (params.blur_far_begin + d)) * params.blur_size_far + DEPTH_GAP;
} else {
// Non-physically-based.
return smoothstep(params.blur_far_begin, params.blur_far_end, depth) * params.blur_size + DEPTH_GAP;
}
}
return 0.0;
}
#endif
#if defined(MODE_BOKEH_BOX) || defined(MODE_BOKEH_HEXAGONAL)
vec4 weighted_filter_dir(vec2 dir, vec2 uv, vec2 pixel_size) {
dir *= pixel_size;
vec4 color = texture(source_color, uv);
color.a = texture(source_weight, uv).r;
vec4 accum = color;
float total = 1.0;
float blur_scale = params.blur_size / float(params.blur_steps);
if (params.use_jitter) {
uv += dir * (hash12n(uv + params.jitter_seed) - 0.5);
}
for (int i = -params.blur_steps; i <= params.blur_steps; i++) {
if (i == 0) {
continue;
}
float radius = float(i) * blur_scale;
vec2 suv = uv + dir * radius;
radius = abs(radius);
vec4 sample_color = texture(source_color, suv);
sample_color.a = texture(source_weight, suv).r;
float limit;
if (sample_color.a < color.a) {
limit = abs(sample_color.a);
} else {
limit = abs(color.a);
}
limit -= DEPTH_GAP;
float m = smoothstep(radius - 0.5, radius + 0.5, limit);
accum += mix(color, sample_color, m);
total += 1.0;
}
return accum / total;
}
#endif
void main() {
vec2 pixel_size = 1.0 / vec2(params.size);
vec2 uv = uv_interp;
#ifdef MODE_GEN_BLUR_SIZE
uv += pixel_size * 0.5;
float center_depth = get_depth_at_pos(uv);
weight = get_blur_size(center_depth);
#endif
#ifdef MODE_BOKEH_BOX
//pixel_size*=0.5; //resolution is doubled
if (params.second_pass || !params.half_size) {
uv += pixel_size * 0.5; //half pixel to read centers
} else {
uv += pixel_size * 0.25; //half pixel to read centers from full res
}
float alpha = texture(source_color, uv).a; // retain this
vec2 dir = (params.second_pass ? vec2(0.0, 1.0) : vec2(1.0, 0.0));
vec4 color = weighted_filter_dir(dir, uv, pixel_size);
frag_color = color;
frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
#ifdef OUTPUT_WEIGHT
weight = color.a;
#endif
#endif
#ifdef MODE_BOKEH_HEXAGONAL
//pixel_size*=0.5; //resolution is doubled
if (params.second_pass || !params.half_size) {
uv += pixel_size * 0.5; //half pixel to read centers
} else {
uv += pixel_size * 0.25; //half pixel to read centers from full res
}
float alpha = texture(source_color, uv).a; // retain this
vec2 dir = (params.second_pass ? normalize(vec2(1.0, 0.577350269189626)) : vec2(0.0, 1.0));
vec4 color = weighted_filter_dir(dir, uv, pixel_size);
if (params.second_pass) {
dir = normalize(vec2(-1.0, 0.577350269189626));
vec4 color2 = weighted_filter_dir(dir, uv, pixel_size);
color.rgb = min(color.rgb, color2.rgb);
color.a = (color.a + color2.a) * 0.5;
}
frag_color = color;
frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
#ifdef OUTPUT_WEIGHT
weight = color.a;
#endif
#endif
#ifdef MODE_BOKEH_CIRCULAR
if (params.half_size) {
pixel_size *= 0.5; //resolution is doubled
}
uv += pixel_size * 0.5; //half pixel to read centers
vec4 color = texture(source_color, uv);
float alpha = color.a; // retain this
color.a = texture(source_weight, uv).r;
vec4 color_accum = color;
float accum = 1.0;
float radius = params.blur_scale;
for (float ang = 0.0; radius < params.blur_size; ang += GOLDEN_ANGLE) {
vec2 uv_adj = uv + vec2(cos(ang), sin(ang)) * pixel_size * radius;
vec4 sample_color = texture(source_color, uv_adj);
sample_color.a = texture(source_weight, uv_adj).r;
float limit = abs(sample_color.a);
if (sample_color.a > color.a) {
limit = clamp(limit, 0.0, abs(color.a) * 2.0);
}
limit -= DEPTH_GAP;
float m = smoothstep(radius - 0.5, radius + 0.5, limit);
color_accum += mix(color_accum / accum, sample_color, m);
accum += 1.0;
radius += params.blur_scale / radius;
}
color_accum = color_accum / accum;
frag_color.rgb = color_accum.rgb;
frag_color.a = alpha; // attempt to retain this in case we have a transparent background, ignored if half_size
#ifdef OUTPUT_WEIGHT
weight = color_accum.a;
#endif
#endif
#ifdef MODE_COMPOSITE_BOKEH
frag_color.rgb = texture(source_color, uv).rgb;
float center_weight = texture(source_weight, uv).r;
float sample_weight = texture(original_weight, uv).r;
float mix_amount;
if (sample_weight < center_weight) {
mix_amount = min(1.0, max(0.0, max(abs(center_weight), abs(sample_weight)) - DEPTH_GAP));
} else {
mix_amount = min(1.0, max(0.0, abs(center_weight) - DEPTH_GAP));
}
// let alpha blending take care of mixing
frag_color.a = mix_amount;
#endif
}

View File

@@ -0,0 +1,285 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#define FLAG_HORIZONTAL (1 << 0)
#define FLAG_USE_BLUR_SECTION (1 << 1)
#define FLAG_USE_ORTHOGONAL_PROJECTION (1 << 2)
#define FLAG_DOF_NEAR_FIRST_TAP (1 << 3)
#define FLAG_GLOW_FIRST_PASS (1 << 4)
#define FLAG_FLIP_Y (1 << 5)
#define FLAG_FORCE_LUMINANCE (1 << 6)
#define FLAG_COPY_ALL_SOURCE (1 << 7)
#define FLAG_ALPHA_TO_ONE (1 << 8)
layout(push_constant, std430) uniform Params {
ivec4 section;
ivec2 target;
uint flags;
uint pad;
// Glow.
float glow_strength;
float glow_bloom;
float glow_hdr_threshold;
float glow_hdr_scale;
float glow_exposure;
float glow_white;
float glow_luminance_cap;
float glow_auto_exposure_scale;
// DOF.
float camera_z_far;
float camera_z_near;
uint pad2[2];
vec4 set_color;
}
params;
#ifdef MODE_CUBEMAP_ARRAY_TO_PANORAMA
layout(set = 0, binding = 0) uniform samplerCubeArray source_color;
#elif defined(MODE_CUBEMAP_TO_PANORAMA)
layout(set = 0, binding = 0) uniform samplerCube source_color;
#elif !defined(MODE_SET_COLOR)
layout(set = 0, binding = 0) uniform sampler2D source_color;
#endif
#ifdef GLOW_USE_AUTO_EXPOSURE
layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
#endif
#if defined(MODE_LINEARIZE_DEPTH_COPY) || defined(MODE_SIMPLE_COPY_DEPTH)
layout(r32f, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
#elif defined(DST_IMAGE_8BIT)
layout(rgba8, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
#else
layout(rgba16f, set = 3, binding = 0) uniform restrict writeonly image2D dest_buffer;
#endif
#ifdef MODE_GAUSSIAN_BLUR
shared vec4 local_cache[256];
shared vec4 temp_cache[128];
#endif
void main() {
// Pixel being shaded
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
#ifndef MODE_GAUSSIAN_BLUR // Gaussian blur needs the extra threads
if (any(greaterThanEqual(pos, params.section.zw))) { //too large, do nothing
return;
}
#endif
#ifdef MODE_MIPMAP
ivec2 base_pos = (pos + params.section.xy) << 1;
vec4 color = texelFetch(source_color, base_pos, 0);
color += texelFetch(source_color, base_pos + ivec2(0, 1), 0);
color += texelFetch(source_color, base_pos + ivec2(1, 0), 0);
color += texelFetch(source_color, base_pos + ivec2(1, 1), 0);
color /= 4.0;
color = mix(color, vec4(100.0, 100.0, 100.0, 1.0), isinf(color));
color = mix(color, vec4(100.0, 100.0, 100.0, 1.0), isnan(color));
imageStore(dest_buffer, pos + params.target, color);
#endif
#ifdef MODE_GAUSSIAN_BLUR
// First pass copy texture into 16x16 local memory for every 8x8 thread block
vec2 quad_center_uv = clamp(vec2(params.section.xy + gl_GlobalInvocationID.xy + gl_LocalInvocationID.xy - 3.5) / params.section.zw, vec2(0.5 / params.section.zw), vec2(1.0 - 1.5 / params.section.zw));
uint dest_index = gl_LocalInvocationID.x * 2 + gl_LocalInvocationID.y * 2 * 16;
local_cache[dest_index] = textureLod(source_color, quad_center_uv, 0);
local_cache[dest_index + 1] = textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.z, 0.0), 0);
local_cache[dest_index + 16] = textureLod(source_color, quad_center_uv + vec2(0.0, 1.0 / params.section.w), 0);
local_cache[dest_index + 16 + 1] = textureLod(source_color, quad_center_uv + vec2(1.0 / params.section.zw), 0);
#ifdef MODE_GLOW
if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
// Tonemap initial samples to reduce weight of fireflies: https://graphicrants.blogspot.com/2013/12/tone-mapping.html
vec3 tonemap_col = vec3(0.299, 0.587, 0.114) / max(params.glow_luminance_cap, 6.0);
local_cache[dest_index] /= 1.0 + dot(local_cache[dest_index].rgb, tonemap_col);
local_cache[dest_index + 1] /= 1.0 + dot(local_cache[dest_index + 1].rgb, tonemap_col);
local_cache[dest_index + 16] /= 1.0 + dot(local_cache[dest_index + 16].rgb, tonemap_col);
local_cache[dest_index + 16 + 1] /= 1.0 + dot(local_cache[dest_index + 16 + 1].rgb, tonemap_col);
}
const float kernel[5] = { 0.2024, 0.1790, 0.1240, 0.0672, 0.0285 };
#else
// Simpler blur uses SIGMA2 for the gaussian kernel for a stronger effect.
const float kernel[4] = { 0.214607, 0.189879, 0.131514, 0.071303 };
#endif
memoryBarrierShared();
barrier();
// Horizontal pass. Needs to copy into 8x16 chunk of local memory so vertical pass has full resolution
uint read_index = gl_LocalInvocationID.x + gl_LocalInvocationID.y * 32 + 4;
vec4 color_top = vec4(0.0);
color_top += local_cache[read_index] * kernel[0];
color_top += local_cache[read_index + 1] * kernel[1];
color_top += local_cache[read_index + 2] * kernel[2];
color_top += local_cache[read_index + 3] * kernel[3];
color_top += local_cache[read_index - 1] * kernel[1];
color_top += local_cache[read_index - 2] * kernel[2];
color_top += local_cache[read_index - 3] * kernel[3];
#ifdef MODE_GLOW
color_top += local_cache[read_index + 4] * kernel[4];
color_top += local_cache[read_index - 4] * kernel[4];
#endif // MODE_GLOW
vec4 color_bottom = vec4(0.0);
color_bottom += local_cache[read_index + 16] * kernel[0];
color_bottom += local_cache[read_index + 1 + 16] * kernel[1];
color_bottom += local_cache[read_index + 2 + 16] * kernel[2];
color_bottom += local_cache[read_index + 3 + 16] * kernel[3];
color_bottom += local_cache[read_index - 1 + 16] * kernel[1];
color_bottom += local_cache[read_index - 2 + 16] * kernel[2];
color_bottom += local_cache[read_index - 3 + 16] * kernel[3];
#ifdef MODE_GLOW
color_bottom += local_cache[read_index + 4 + 16] * kernel[4];
color_bottom += local_cache[read_index - 4 + 16] * kernel[4];
#endif // MODE_GLOW
// rotate samples to take advantage of cache coherency
uint write_index = gl_LocalInvocationID.y * 2 + gl_LocalInvocationID.x * 16;
temp_cache[write_index] = color_top;
temp_cache[write_index + 1] = color_bottom;
memoryBarrierShared();
barrier();
// If destination outside of texture, can stop doing work now
if (any(greaterThanEqual(pos, params.section.zw))) {
return;
}
// Vertical pass
uint index = gl_LocalInvocationID.y + gl_LocalInvocationID.x * 16 + 4;
vec4 color = vec4(0.0);
color += temp_cache[index] * kernel[0];
color += temp_cache[index + 1] * kernel[1];
color += temp_cache[index + 2] * kernel[2];
color += temp_cache[index + 3] * kernel[3];
color += temp_cache[index - 1] * kernel[1];
color += temp_cache[index - 2] * kernel[2];
color += temp_cache[index - 3] * kernel[3];
#ifdef MODE_GLOW
color += temp_cache[index + 4] * kernel[4];
color += temp_cache[index - 4] * kernel[4];
#endif // MODE_GLOW
#ifdef MODE_GLOW
if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
// Undo tonemap to restore range: https://graphicrants.blogspot.com/2013/12/tone-mapping.html
color /= 1.0 - dot(color.rgb, vec3(0.299, 0.587, 0.114) / max(params.glow_luminance_cap, 6.0));
}
color *= params.glow_strength;
if (bool(params.flags & FLAG_GLOW_FIRST_PASS)) {
#ifdef GLOW_USE_AUTO_EXPOSURE
color /= texelFetch(source_auto_exposure, ivec2(0, 0), 0).r / params.glow_auto_exposure_scale;
#endif
color *= params.glow_exposure;
float luminance = max(color.r, max(color.g, color.b));
float feedback = max(smoothstep(params.glow_hdr_threshold, params.glow_hdr_threshold + params.glow_hdr_scale, luminance), params.glow_bloom);
color = min(color * feedback, vec4(params.glow_luminance_cap));
}
#endif // MODE_GLOW
imageStore(dest_buffer, pos + params.target, color);
#endif // MODE_GAUSSIAN_BLUR
#ifdef MODE_SIMPLE_COPY
vec4 color;
if (bool(params.flags & FLAG_COPY_ALL_SOURCE)) {
vec2 uv = vec2(pos) / vec2(params.section.zw);
if (bool(params.flags & FLAG_FLIP_Y)) {
uv.y = 1.0 - uv.y;
}
color = textureLod(source_color, uv, 0.0);
} else {
color = texelFetch(source_color, pos + params.section.xy, 0);
if (bool(params.flags & FLAG_FLIP_Y)) {
pos.y = params.section.w - pos.y - 1;
}
}
if (bool(params.flags & FLAG_FORCE_LUMINANCE)) {
color.rgb = vec3(max(max(color.r, color.g), color.b));
}
if (bool(params.flags & FLAG_ALPHA_TO_ONE)) {
color.a = 1.0;
}
imageStore(dest_buffer, pos + params.target, color);
#endif // MODE_SIMPLE_COPY
#ifdef MODE_SIMPLE_COPY_DEPTH
vec4 color = texelFetch(source_color, pos + params.section.xy, 0);
if (bool(params.flags & FLAG_FLIP_Y)) {
pos.y = params.section.w - pos.y - 1;
}
imageStore(dest_buffer, pos + params.target, vec4(color.r));
#endif // MODE_SIMPLE_COPY_DEPTH
#ifdef MODE_LINEARIZE_DEPTH_COPY
float depth = texelFetch(source_color, pos + params.section.xy, 0).r;
depth = depth * 2.0 - 1.0;
depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
vec4 color = vec4(depth / params.camera_z_far);
if (bool(params.flags & FLAG_FLIP_Y)) {
pos.y = params.section.w - pos.y - 1;
}
imageStore(dest_buffer, pos + params.target, color);
#endif // MODE_LINEARIZE_DEPTH_COPY
#if defined(MODE_CUBEMAP_TO_PANORAMA) || defined(MODE_CUBEMAP_ARRAY_TO_PANORAMA)
const float PI = 3.14159265359;
vec2 uv = vec2(pos) / vec2(params.section.zw);
if (bool(params.flags & FLAG_FLIP_Y)) {
uv.y = 1.0 - uv.y;
}
float phi = uv.x * 2.0 * PI;
float theta = uv.y * PI;
vec3 normal;
normal.x = sin(phi) * sin(theta) * -1.0;
normal.y = cos(theta);
normal.z = cos(phi) * sin(theta) * -1.0;
#ifdef MODE_CUBEMAP_TO_PANORAMA
vec4 color = textureLod(source_color, normal, params.camera_z_far); //the biggest the lod the least the acne
#else
vec4 color = textureLod(source_color, vec4(normal, params.camera_z_far), 0.0); //the biggest the lod the least the acne
#endif
imageStore(dest_buffer, pos + params.target, color);
#endif // defined(MODE_CUBEMAP_TO_PANORAMA) || defined(MODE_CUBEMAP_ARRAY_TO_PANORAMA)
#ifdef MODE_SET_COLOR
imageStore(dest_buffer, pos + params.target, params.set_color);
#endif
}

View File

@@ -0,0 +1,195 @@
#[vertex]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#endif // USE_MULTIVIEW
#define FLAG_FLIP_Y (1 << 0)
#define FLAG_USE_SECTION (1 << 1)
#define FLAG_FORCE_LUMINANCE (1 << 2)
#define FLAG_ALPHA_TO_ZERO (1 << 3)
#define FLAG_SRGB (1 << 4)
#define FLAG_ALPHA_TO_ONE (1 << 5)
#define FLAG_LINEAR (1 << 6)
#define FLAG_NORMAL (1 << 7)
#define FLAG_USE_SRC_SECTION (1 << 8)
#ifdef USE_MULTIVIEW
layout(location = 0) out vec3 uv_interp;
#else
layout(location = 0) out vec2 uv_interp;
#endif
layout(push_constant, std430) uniform Params {
vec4 section;
vec2 pixel_size;
float luminance_multiplier;
uint flags;
vec4 color;
}
params;
void main() {
vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
uv_interp.xy = base_arr[gl_VertexIndex];
#ifdef USE_MULTIVIEW
uv_interp.z = ViewIndex;
#endif
vec2 vpos = uv_interp.xy;
if (bool(params.flags & FLAG_USE_SECTION)) {
vpos = params.section.xy + vpos * params.section.zw;
}
gl_Position = vec4(vpos * 2.0 - 1.0, 0.0, 1.0);
if (bool(params.flags & FLAG_FLIP_Y)) {
uv_interp.y = 1.0 - uv_interp.y;
}
if (bool(params.flags & FLAG_USE_SRC_SECTION)) {
uv_interp.xy = params.section.xy + uv_interp.xy * params.section.zw;
}
}
#[fragment]
#version 450
#VERSION_DEFINES
#define FLAG_FLIP_Y (1 << 0)
#define FLAG_USE_SECTION (1 << 1)
#define FLAG_FORCE_LUMINANCE (1 << 2)
#define FLAG_ALPHA_TO_ZERO (1 << 3)
#define FLAG_SRGB (1 << 4)
#define FLAG_ALPHA_TO_ONE (1 << 5)
#define FLAG_LINEAR (1 << 6)
#define FLAG_NORMAL (1 << 7)
layout(push_constant, std430) uniform Params {
vec4 section;
vec2 pixel_size;
float luminance_multiplier;
uint flags;
vec4 color;
}
params;
#ifndef MODE_SET_COLOR
#ifdef USE_MULTIVIEW
layout(location = 0) in vec3 uv_interp;
#else
layout(location = 0) in vec2 uv_interp;
#endif
#ifdef USE_MULTIVIEW
layout(set = 0, binding = 0) uniform sampler2DArray source_color;
#ifdef MODE_TWO_SOURCES
layout(set = 1, binding = 0) uniform sampler2DArray source_depth;
layout(location = 1) out float depth;
#endif /* MODE_TWO_SOURCES */
#else /* USE_MULTIVIEW */
layout(set = 0, binding = 0) uniform sampler2D source_color;
#ifdef MODE_TWO_SOURCES
layout(set = 1, binding = 0) uniform sampler2D source_color2;
#endif /* MODE_TWO_SOURCES */
#endif /* USE_MULTIVIEW */
#endif /* !SET_COLOR */
layout(location = 0) out vec4 frag_color;
vec3 linear_to_srgb(vec3 color) {
//if going to srgb, clamp from 0 to 1.
color = clamp(color, vec3(0.0), vec3(1.0));
const vec3 a = vec3(0.055f);
return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
}
vec3 srgb_to_linear(vec3 color) {
return mix(pow((color.rgb + vec3(0.055)) * (1.0 / (1.0 + 0.055)), vec3(2.4)), color.rgb * (1.0 / 12.92), lessThan(color.rgb, vec3(0.04045)));
}
void main() {
#ifdef MODE_SET_COLOR
frag_color = params.color;
#else
#ifdef USE_MULTIVIEW
vec3 uv = uv_interp;
#else
vec2 uv = uv_interp;
#endif
#ifdef MODE_PANORAMA_TO_DP
// Note, multiview and panorama should not be mixed at this time
//obtain normal from dual paraboloid uv
#define M_PI 3.14159265359
float side;
uv.y = modf(uv.y * 2.0, side);
side = side * 2.0 - 1.0;
vec3 normal = vec3(uv * 2.0 - 1.0, 0.0);
normal.z = 0.5 - 0.5 * ((normal.x * normal.x) + (normal.y * normal.y));
normal *= -side;
normal = normalize(normal);
//now convert normal to panorama uv
vec2 st = vec2(atan(normal.x, normal.z), acos(normal.y));
if (st.x < 0.0) {
st.x += M_PI * 2.0;
}
uv = st / vec2(M_PI * 2.0, M_PI);
if (side < 0.0) {
//uv.y = 1.0 - uv.y;
uv = 1.0 - uv;
}
#endif /* MODE_PANORAMA_TO_DP */
#ifdef USE_MULTIVIEW
vec4 color = textureLod(source_color, uv, 0.0);
#ifdef MODE_TWO_SOURCES
// In multiview our 2nd input will be our depth map
depth = textureLod(source_depth, uv, 0.0).r;
#endif /* MODE_TWO_SOURCES */
#else /* USE_MULTIVIEW */
vec4 color = textureLod(source_color, uv, 0.0);
#ifdef MODE_TWO_SOURCES
color += textureLod(source_color2, uv, 0.0);
#endif /* MODE_TWO_SOURCES */
#endif /* USE_MULTIVIEW */
if (bool(params.flags & FLAG_FORCE_LUMINANCE)) {
color.rgb = vec3(max(max(color.r, color.g), color.b));
}
if (bool(params.flags & FLAG_ALPHA_TO_ZERO)) {
color.rgb *= color.a;
}
if (bool(params.flags & FLAG_SRGB)) {
color.rgb = linear_to_srgb(color.rgb);
}
if (bool(params.flags & FLAG_ALPHA_TO_ONE)) {
color.a = 1.0;
}
if (bool(params.flags & FLAG_LINEAR)) {
color.rgb = srgb_to_linear(color.rgb);
}
if (bool(params.flags & FLAG_NORMAL)) {
color.rgb = normalize(color.rgb * 2.0 - 1.0) * 0.5 + 0.5;
}
frag_color = color / params.luminance_multiplier;
#endif // MODE_SET_COLOR
}

View File

@@ -0,0 +1,81 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(push_constant, std430) uniform Params {
float z_far;
float z_near;
vec2 texel_size;
}
params;
layout(location = 0) out vec2 uv_interp;
void main() {
vec2 base_arr[4] = vec2[](vec2(0.0, 0.0), vec2(0.0, 1.0), vec2(1.0, 1.0), vec2(1.0, 0.0));
uv_interp = base_arr[gl_VertexIndex];
gl_Position = vec4(uv_interp * 2.0 - 1.0, 0.0, 1.0);
}
#[fragment]
#version 450
#VERSION_DEFINES
layout(location = 0) in vec2 uv_interp;
layout(set = 0, binding = 0) uniform samplerCube source_cube;
layout(push_constant, std430) uniform Params {
float z_far;
float z_near;
vec2 texel_size;
}
params;
void main() {
vec2 uv = uv_interp;
vec2 texel_size = abs(params.texel_size);
uv = clamp(uv * (1.0 + 2.0 * texel_size) - texel_size, vec2(0.0), vec2(1.0));
vec3 normal = vec3(uv * 2.0 - 1.0, 0.0);
normal.z = 0.5 * (1.0 - dot(normal.xy, normal.xy)); // z = 1/2 - 1/2 * (x^2 + y^2)
normal = normalize(normal);
normal.y = -normal.y; //needs to be flipped to match projection matrix
if (params.texel_size.x >= 0.0) { // Sign is used to encode Z flip
normal.z = -normal.z;
}
float depth = texture(source_cube, normal).r;
// absolute values for direction cosines, bigger value equals closer to basis axis
vec3 unorm = abs(normal);
if ((unorm.x >= unorm.y) && (unorm.x >= unorm.z)) {
// x code
unorm = normal.x > 0.0 ? vec3(1.0, 0.0, 0.0) : vec3(-1.0, 0.0, 0.0);
} else if ((unorm.y > unorm.x) && (unorm.y >= unorm.z)) {
// y code
unorm = normal.y > 0.0 ? vec3(0.0, 1.0, 0.0) : vec3(0.0, -1.0, 0.0);
} else if ((unorm.z > unorm.x) && (unorm.z > unorm.y)) {
// z code
unorm = normal.z > 0.0 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 0.0, -1.0);
} else {
// oh-no we messed up code
// has to be
unorm = vec3(1.0, 0.0, 0.0);
}
float depth_fix = 1.0 / dot(normal, unorm);
depth = 2.0 * depth - 1.0;
float linear_depth = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near + depth * (params.z_far - params.z_near));
// linear_depth equal to view space depth
depth = (params.z_far - linear_depth * depth_fix) / params.z_far;
gl_FragDepth = depth;
}

View File

@@ -0,0 +1,145 @@
// Copyright 2016 Activision Publishing, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#[compute]
#version 450
#VERSION_DEFINES
#define BLOCK_SIZE 8
layout(local_size_x = BLOCK_SIZE, local_size_y = BLOCK_SIZE, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly imageCube dest_cubemap;
#include "cubemap_downsampler_inc.glsl"
void main() {
uvec3 id = gl_GlobalInvocationID;
uint face_size = params.face_size;
if (id.x < face_size && id.y < face_size) {
float inv_face_size = 1.0 / float(face_size);
float u0 = (float(id.x) * 2.0 + 1.0 - 0.75) * inv_face_size - 1.0;
float u1 = (float(id.x) * 2.0 + 1.0 + 0.75) * inv_face_size - 1.0;
float v0 = (float(id.y) * 2.0 + 1.0 - 0.75) * -inv_face_size + 1.0;
float v1 = (float(id.y) * 2.0 + 1.0 + 0.75) * -inv_face_size + 1.0;
float weights[4];
weights[0] = calcWeight(u0, v0);
weights[1] = calcWeight(u1, v0);
weights[2] = calcWeight(u0, v1);
weights[3] = calcWeight(u1, v1);
const float wsum = 0.5 / (weights[0] + weights[1] + weights[2] + weights[3]);
for (int i = 0; i < 4; i++) {
weights[i] = weights[i] * wsum + .125;
}
vec3 dir;
vec4 color;
switch (id.z) {
case 0:
get_dir_0(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_0(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_0(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_0(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 1:
get_dir_1(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_1(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_1(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_1(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 2:
get_dir_2(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_2(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_2(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_2(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 3:
get_dir_3(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_3(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_3(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_3(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 4:
get_dir_4(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_4(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_4(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_4(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
default:
get_dir_5(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_5(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_5(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_5(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
}
imageStore(dest_cubemap, ivec3(id), color);
}
}

View File

@@ -0,0 +1,48 @@
layout(push_constant, std430) uniform Params {
uint face_size;
uint face_id; // only used in raster shader
}
params;
#define M_PI 3.14159265359
void get_dir_0(out vec3 dir, in float u, in float v) {
dir[0] = 1.0;
dir[1] = v;
dir[2] = -u;
}
void get_dir_1(out vec3 dir, in float u, in float v) {
dir[0] = -1.0;
dir[1] = v;
dir[2] = u;
}
void get_dir_2(out vec3 dir, in float u, in float v) {
dir[0] = u;
dir[1] = 1.0;
dir[2] = -v;
}
void get_dir_3(out vec3 dir, in float u, in float v) {
dir[0] = u;
dir[1] = -1.0;
dir[2] = v;
}
void get_dir_4(out vec3 dir, in float u, in float v) {
dir[0] = u;
dir[1] = v;
dir[2] = 1.0;
}
void get_dir_5(out vec3 dir, in float u, in float v) {
dir[0] = -u;
dir[1] = v;
dir[2] = -1.0;
}
float calcWeight(float u, float v) {
float val = u * u + v * v + 1.0;
return val * sqrt(val);
}

View File

@@ -0,0 +1,161 @@
// Copyright 2016 Activision Publishing, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
#include "cubemap_downsampler_inc.glsl"
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0 * float(params.face_size); // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
#include "cubemap_downsampler_inc.glsl"
layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
layout(location = 0) in vec2 uv_interp;
layout(location = 0) out vec4 frag_color;
/* clang-format on */
void main() {
// Converted from compute shader which uses absolute coordinates.
// Could possibly simplify this
float face_size = float(params.face_size);
float inv_face_size = 1.0 / face_size;
vec2 id = floor(uv_interp);
float u1 = (id.x * 2.0 + 1.0 + 0.75) * inv_face_size - 1.0;
float u0 = (id.x * 2.0 + 1.0 - 0.75) * inv_face_size - 1.0;
float v0 = (id.y * 2.0 + 1.0 - 0.75) * -inv_face_size + 1.0;
float v1 = (id.y * 2.0 + 1.0 + 0.75) * -inv_face_size + 1.0;
float weights[4];
weights[0] = calcWeight(u0, v0);
weights[1] = calcWeight(u1, v0);
weights[2] = calcWeight(u0, v1);
weights[3] = calcWeight(u1, v1);
const float wsum = 0.5 / (weights[0] + weights[1] + weights[2] + weights[3]);
for (int i = 0; i < 4; i++) {
weights[i] = weights[i] * wsum + .125;
}
vec3 dir;
vec4 color;
switch (params.face_id) {
case 0:
get_dir_0(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_0(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_0(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_0(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 1:
get_dir_1(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_1(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_1(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_1(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 2:
get_dir_2(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_2(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_2(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_2(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 3:
get_dir_3(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_3(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_3(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_3(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
case 4:
get_dir_4(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_4(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_4(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_4(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
default:
get_dir_5(dir, u0, v0);
color = textureLod(source_cubemap, normalize(dir), 0.0) * weights[0];
get_dir_5(dir, u1, v0);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[1];
get_dir_5(dir, u0, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[2];
get_dir_5(dir, u1, v1);
color += textureLod(source_cubemap, normalize(dir), 0.0) * weights[3];
break;
}
frag_color = color;
}

View File

@@ -0,0 +1,329 @@
// Copyright 2016 Activision Publishing, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#[compute]
#version 450
#VERSION_DEFINES
#define GROUP_SIZE 64
layout(local_size_x = GROUP_SIZE, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly imageCube dest_cubemap0;
layout(rgba16f, set = 2, binding = 1) uniform restrict writeonly imageCube dest_cubemap1;
layout(rgba16f, set = 2, binding = 2) uniform restrict writeonly imageCube dest_cubemap2;
layout(rgba16f, set = 2, binding = 3) uniform restrict writeonly imageCube dest_cubemap3;
layout(rgba16f, set = 2, binding = 4) uniform restrict writeonly imageCube dest_cubemap4;
layout(rgba16f, set = 2, binding = 5) uniform restrict writeonly imageCube dest_cubemap5;
layout(rgba16f, set = 2, binding = 6) uniform restrict writeonly imageCube dest_cubemap6;
#ifdef USE_HIGH_QUALITY
#define NUM_TAPS 32
#else
#define NUM_TAPS 8
#endif
#define BASE_RESOLUTION 128
#ifdef USE_HIGH_QUALITY
layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
vec4[7][5][3][24] coeffs;
}
data;
#else
layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
vec4[7][5][6] coeffs;
}
data;
#endif
void get_dir(out vec3 dir, in vec2 uv, in uint face) {
switch (face) {
case 0:
dir = vec3(1.0, uv[1], -uv[0]);
break;
case 1:
dir = vec3(-1.0, uv[1], uv[0]);
break;
case 2:
dir = vec3(uv[0], 1.0, -uv[1]);
break;
case 3:
dir = vec3(uv[0], -1.0, uv[1]);
break;
case 4:
dir = vec3(uv[0], uv[1], 1.0);
break;
default:
dir = vec3(-uv[0], uv[1], -1.0);
break;
}
}
void main() {
// INPUT:
// id.x = the linear address of the texel (ignoring face)
// id.y = the face
// -> use to index output texture
// id.x = texel x
// id.y = texel y
// id.z = face
uvec3 id = gl_GlobalInvocationID;
// determine which texel this is
#ifndef USE_TEXTURE_ARRAY
// NOTE (macOS/MoltenVK): Do not rename, "level" variable name conflicts with the Metal "level(float lod)" mipmap sampling function name.
int mip_level = 0;
if (id.x < (128 * 128)) {
mip_level = 0;
} else if (id.x < (128 * 128 + 64 * 64)) {
mip_level = 1;
id.x -= (128 * 128);
} else if (id.x < (128 * 128 + 64 * 64 + 32 * 32)) {
mip_level = 2;
id.x -= (128 * 128 + 64 * 64);
} else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16)) {
mip_level = 3;
id.x -= (128 * 128 + 64 * 64 + 32 * 32);
} else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8)) {
mip_level = 4;
id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16);
} else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4)) {
mip_level = 5;
id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8);
} else if (id.x < (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4 + 2 * 2)) {
mip_level = 6;
id.x -= (128 * 128 + 64 * 64 + 32 * 32 + 16 * 16 + 8 * 8 + 4 * 4);
} else {
return;
}
int res = BASE_RESOLUTION >> mip_level;
#else // Using Texture Arrays so all levels are the same resolution
int res = BASE_RESOLUTION;
int mip_level = int(id.x / (BASE_RESOLUTION * BASE_RESOLUTION));
id.x -= mip_level * BASE_RESOLUTION * BASE_RESOLUTION;
#endif
// determine dir / pos for the texel
vec3 dir, adir, frameZ;
{
id.z = id.y;
id.y = id.x / res;
id.x -= id.y * res;
vec2 uv;
uv.x = (float(id.x) * 2.0 + 1.0) / float(res) - 1.0;
uv.y = -(float(id.y) * 2.0 + 1.0) / float(res) + 1.0;
get_dir(dir, uv, id.z);
frameZ = normalize(dir);
adir = abs(dir);
}
// GGX gather colors
vec4 color = vec4(0.0);
for (int axis = 0; axis < 3; axis++) {
const int otherAxis0 = 1 - (axis & 1) - (axis >> 1);
const int otherAxis1 = 2 - (axis >> 1);
float frameweight = (max(adir[otherAxis0], adir[otherAxis1]) - .75) / .25;
if (frameweight > 0.0) {
// determine frame
vec3 UpVector;
switch (axis) {
case 0:
UpVector = vec3(1, 0, 0);
break;
case 1:
UpVector = vec3(0, 1, 0);
break;
default:
UpVector = vec3(0, 0, 1);
break;
}
vec3 frameX = normalize(cross(UpVector, frameZ));
vec3 frameY = cross(frameZ, frameX);
// calculate parametrization for polynomial
float Nx = dir[otherAxis0];
float Ny = dir[otherAxis1];
float Nz = adir[axis];
float NmaxXY = max(abs(Ny), abs(Nx));
Nx /= NmaxXY;
Ny /= NmaxXY;
float theta;
if (Ny < Nx) {
if (Ny <= -0.999) {
theta = Nx;
} else {
theta = Ny;
}
} else {
if (Ny >= 0.999) {
theta = -Nx;
} else {
theta = -Ny;
}
}
float phi;
if (Nz <= -0.999) {
phi = -NmaxXY;
} else if (Nz >= 0.999) {
phi = NmaxXY;
} else {
phi = Nz;
}
float theta2 = theta * theta;
float phi2 = phi * phi;
// sample
for (int iSuperTap = 0; iSuperTap < NUM_TAPS / 4; iSuperTap++) {
const int index = (NUM_TAPS / 4) * axis + iSuperTap;
#ifdef USE_HIGH_QUALITY
vec4 coeffsDir0[3];
vec4 coeffsDir1[3];
vec4 coeffsDir2[3];
vec4 coeffsLevel[3];
vec4 coeffsWeight[3];
for (int iCoeff = 0; iCoeff < 3; iCoeff++) {
coeffsDir0[iCoeff] = data.coeffs[mip_level][0][iCoeff][index];
coeffsDir1[iCoeff] = data.coeffs[mip_level][1][iCoeff][index];
coeffsDir2[iCoeff] = data.coeffs[mip_level][2][iCoeff][index];
coeffsLevel[iCoeff] = data.coeffs[mip_level][3][iCoeff][index];
coeffsWeight[iCoeff] = data.coeffs[mip_level][4][iCoeff][index];
}
for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
// determine sample attributes (dir, weight, mip_level)
vec3 sample_dir = frameX * (coeffsDir0[0][iSubTap] + coeffsDir0[1][iSubTap] * theta2 + coeffsDir0[2][iSubTap] * phi2) + frameY * (coeffsDir1[0][iSubTap] + coeffsDir1[1][iSubTap] * theta2 + coeffsDir1[2][iSubTap] * phi2) + frameZ * (coeffsDir2[0][iSubTap] + coeffsDir2[1][iSubTap] * theta2 + coeffsDir2[2][iSubTap] * phi2);
float sample_level = coeffsLevel[0][iSubTap] + coeffsLevel[1][iSubTap] * theta2 + coeffsLevel[2][iSubTap] * phi2;
float sample_weight = coeffsWeight[0][iSubTap] + coeffsWeight[1][iSubTap] * theta2 + coeffsWeight[2][iSubTap] * phi2;
#else
vec4 coeffsDir0 = data.coeffs[mip_level][0][index];
vec4 coeffsDir1 = data.coeffs[mip_level][1][index];
vec4 coeffsDir2 = data.coeffs[mip_level][2][index];
vec4 coeffsLevel = data.coeffs[mip_level][3][index];
vec4 coeffsWeight = data.coeffs[mip_level][4][index];
for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
// determine sample attributes (dir, weight, mip_level)
vec3 sample_dir = frameX * coeffsDir0[iSubTap] + frameY * coeffsDir1[iSubTap] + frameZ * coeffsDir2[iSubTap];
float sample_level = coeffsLevel[iSubTap];
float sample_weight = coeffsWeight[iSubTap];
#endif
sample_weight *= frameweight;
// adjust for jacobian
sample_dir /= max(abs(sample_dir[0]), max(abs(sample_dir[1]), abs(sample_dir[2])));
sample_level += 0.75 * log2(dot(sample_dir, sample_dir));
#ifndef USE_TEXTURE_ARRAY
sample_level += float(mip_level) / 6.0; // Hack to increase the perceived roughness and reduce upscaling artifacts
#endif
// sample cubemap
color.xyz += textureLod(source_cubemap, normalize(sample_dir), sample_level).xyz * sample_weight;
color.w += sample_weight;
}
}
}
}
color /= color.w;
// write color
color.xyz = max(vec3(0.0), color.xyz);
color.w = 1.0;
#ifdef USE_TEXTURE_ARRAY
id.xy *= uvec2(2, 2);
#endif
switch (mip_level) {
case 0:
imageStore(dest_cubemap0, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap0, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap0, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap0, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
case 1:
imageStore(dest_cubemap1, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap1, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap1, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap1, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
case 2:
imageStore(dest_cubemap2, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap2, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap2, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap2, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
case 3:
imageStore(dest_cubemap3, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap3, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap3, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap3, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
case 4:
imageStore(dest_cubemap4, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap4, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap4, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap4, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
case 5:
imageStore(dest_cubemap5, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap5, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap5, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap5, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
default:
imageStore(dest_cubemap6, ivec3(id), color);
#ifdef USE_TEXTURE_ARRAY
imageStore(dest_cubemap6, ivec3(id) + ivec3(1.0, 0.0, 0.0), color);
imageStore(dest_cubemap6, ivec3(id) + ivec3(0.0, 1.0, 0.0), color);
imageStore(dest_cubemap6, ivec3(id) + ivec3(1.0, 1.0, 0.0), color);
#endif
break;
}
}

View File

@@ -0,0 +1,259 @@
// Copyright 2016 Activision Publishing, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
layout(push_constant, std430) uniform Params {
int mip_level;
uint face_id;
}
params;
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
layout(push_constant, std430) uniform Params {
int mip_level;
uint face_id;
}
params;
layout(set = 0, binding = 0) uniform samplerCube source_cubemap;
layout(location = 0) in vec2 uv_interp;
layout(location = 0) out vec4 frag_color;
/* clang-format on */
#ifdef USE_HIGH_QUALITY
#define NUM_TAPS 32
#else
#define NUM_TAPS 8
#endif
#define BASE_RESOLUTION 128
#ifdef USE_HIGH_QUALITY
layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
vec4[7][5][3][24] coeffs;
}
data;
#else
layout(set = 1, binding = 0, std430) buffer restrict readonly Data {
vec4[7][5][6] coeffs;
}
data;
#endif
void get_dir(out vec3 dir, in vec2 uv, in uint face) {
switch (face) {
case 0:
dir = vec3(1.0, uv[1], -uv[0]);
break;
case 1:
dir = vec3(-1.0, uv[1], uv[0]);
break;
case 2:
dir = vec3(uv[0], 1.0, -uv[1]);
break;
case 3:
dir = vec3(uv[0], -1.0, uv[1]);
break;
case 4:
dir = vec3(uv[0], uv[1], 1.0);
break;
default:
dir = vec3(-uv[0], uv[1], -1.0);
break;
}
}
void main() {
// determine dir / pos for the texel
vec3 dir, adir, frameZ;
{
vec2 uv;
uv.x = uv_interp.x;
uv.y = 1.0 - uv_interp.y;
uv = uv * 2.0 - 1.0;
get_dir(dir, uv, params.face_id);
frameZ = normalize(dir);
adir = abs(dir);
}
// determine which texel this is
// NOTE (macOS/MoltenVK): Do not rename, "level" variable name conflicts with the Metal "level(float lod)" mipmap sampling function name.
int mip_level = 0;
if (params.mip_level < 0) {
// return as is
frag_color.rgb = textureLod(source_cubemap, frameZ, 0.0).rgb;
frag_color.a = 1.0;
return;
} else if (params.mip_level > 6) {
// maximum level
mip_level = 6;
} else {
mip_level = params.mip_level;
}
// GGX gather colors
vec4 color = vec4(0.0);
for (int axis = 0; axis < 3; axis++) {
const int otherAxis0 = 1 - (axis & 1) - (axis >> 1);
const int otherAxis1 = 2 - (axis >> 1);
float frameweight = (max(adir[otherAxis0], adir[otherAxis1]) - .75) / .25;
if (frameweight > 0.0) {
// determine frame
vec3 UpVector;
switch (axis) {
case 0:
UpVector = vec3(1, 0, 0);
break;
case 1:
UpVector = vec3(0, 1, 0);
break;
default:
UpVector = vec3(0, 0, 1);
break;
}
vec3 frameX = normalize(cross(UpVector, frameZ));
vec3 frameY = cross(frameZ, frameX);
// calculate parametrization for polynomial
float Nx = dir[otherAxis0];
float Ny = dir[otherAxis1];
float Nz = adir[axis];
float NmaxXY = max(abs(Ny), abs(Nx));
Nx /= NmaxXY;
Ny /= NmaxXY;
float theta;
if (Ny < Nx) {
if (Ny <= -0.999) {
theta = Nx;
} else {
theta = Ny;
}
} else {
if (Ny >= 0.999) {
theta = -Nx;
} else {
theta = -Ny;
}
}
float phi;
if (Nz <= -0.999) {
phi = -NmaxXY;
} else if (Nz >= 0.999) {
phi = NmaxXY;
} else {
phi = Nz;
}
float theta2 = theta * theta;
float phi2 = phi * phi;
// sample
for (int iSuperTap = 0; iSuperTap < NUM_TAPS / 4; iSuperTap++) {
const int index = (NUM_TAPS / 4) * axis + iSuperTap;
#ifdef USE_HIGH_QUALITY
vec4 coeffsDir0[3];
vec4 coeffsDir1[3];
vec4 coeffsDir2[3];
vec4 coeffsLevel[3];
vec4 coeffsWeight[3];
for (int iCoeff = 0; iCoeff < 3; iCoeff++) {
coeffsDir0[iCoeff] = data.coeffs[mip_level][0][iCoeff][index];
coeffsDir1[iCoeff] = data.coeffs[mip_level][1][iCoeff][index];
coeffsDir2[iCoeff] = data.coeffs[mip_level][2][iCoeff][index];
coeffsLevel[iCoeff] = data.coeffs[mip_level][3][iCoeff][index];
coeffsWeight[iCoeff] = data.coeffs[mip_level][4][iCoeff][index];
}
for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
// determine sample attributes (dir, weight, mip_level)
vec3 sample_dir = frameX * (coeffsDir0[0][iSubTap] + coeffsDir0[1][iSubTap] * theta2 + coeffsDir0[2][iSubTap] * phi2) + frameY * (coeffsDir1[0][iSubTap] + coeffsDir1[1][iSubTap] * theta2 + coeffsDir1[2][iSubTap] * phi2) + frameZ * (coeffsDir2[0][iSubTap] + coeffsDir2[1][iSubTap] * theta2 + coeffsDir2[2][iSubTap] * phi2);
float sample_level = coeffsLevel[0][iSubTap] + coeffsLevel[1][iSubTap] * theta2 + coeffsLevel[2][iSubTap] * phi2;
float sample_weight = coeffsWeight[0][iSubTap] + coeffsWeight[1][iSubTap] * theta2 + coeffsWeight[2][iSubTap] * phi2;
#else
vec4 coeffsDir0 = data.coeffs[mip_level][0][index];
vec4 coeffsDir1 = data.coeffs[mip_level][1][index];
vec4 coeffsDir2 = data.coeffs[mip_level][2][index];
vec4 coeffsLevel = data.coeffs[mip_level][3][index];
vec4 coeffsWeight = data.coeffs[mip_level][4][index];
for (int iSubTap = 0; iSubTap < 4; iSubTap++) {
// determine sample attributes (dir, weight, mip_level)
vec3 sample_dir = frameX * coeffsDir0[iSubTap] + frameY * coeffsDir1[iSubTap] + frameZ * coeffsDir2[iSubTap];
float sample_level = coeffsLevel[iSubTap];
float sample_weight = coeffsWeight[iSubTap];
#endif
sample_weight *= frameweight;
// adjust for jacobian
sample_dir /= max(abs(sample_dir[0]), max(abs(sample_dir[1]), abs(sample_dir[2])));
sample_level += 0.75 * log2(dot(sample_dir, sample_dir));
// sample cubemap
color.xyz += textureLod(source_cubemap, normalize(sample_dir), sample_level).xyz * sample_weight;
color.w += sample_weight;
}
}
}
}
color /= color.w;
// write color
color.xyz = max(vec3(0.0), color.xyz);
color.w = 1.0;
frag_color = color;
}

View File

@@ -0,0 +1,63 @@
#[compute]
#version 450
#VERSION_DEFINES
#define GROUP_SIZE 8
layout(local_size_x = GROUP_SIZE, local_size_y = GROUP_SIZE, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform samplerCube source_cube;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly imageCube dest_cubemap;
#include "cubemap_roughness_inc.glsl"
void main() {
uvec3 id = gl_GlobalInvocationID;
id.z += params.face_id;
vec2 uv = ((vec2(id.xy) * 2.0 + 1.0) / (params.face_size) - 1.0);
vec3 N = texelCoordToVec(uv, id.z);
if (params.use_direct_write) {
imageStore(dest_cubemap, ivec3(id), vec4(texture(source_cube, N).rgb, 1.0));
} else {
vec4 sum = vec4(0.0, 0.0, 0.0, 0.0);
float solid_angle_texel = 4.0 * M_PI / (6.0 * params.face_size * params.face_size);
float roughness2 = params.roughness * params.roughness;
float roughness4 = roughness2 * roughness2;
vec3 UpVector = abs(N.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(1.0, 0.0, 0.0);
mat3 T;
T[0] = normalize(cross(UpVector, N));
T[1] = cross(N, T[0]);
T[2] = N;
for (uint sampleNum = 0u; sampleNum < params.sample_count; sampleNum++) {
vec2 xi = Hammersley(sampleNum, params.sample_count);
vec3 H = T * ImportanceSampleGGX(xi, roughness4);
float NdotH = dot(N, H);
vec3 L = (2.0 * NdotH * H - N);
float ndotl = clamp(dot(N, L), 0.0, 1.0);
if (ndotl > 0.0) {
float D = DistributionGGX(NdotH, roughness4);
float pdf = D * NdotH / (4.0 * NdotH) + 0.0001;
float solid_angle_sample = 1.0 / (float(params.sample_count) * pdf + 0.0001);
float mipLevel = params.roughness == 0.0 ? 0.0 : 0.5 * log2(solid_angle_sample / solid_angle_texel);
sum.rgb += textureLod(source_cube, L, mipLevel).rgb * ndotl;
sum.a += ndotl;
}
}
sum /= sum.a;
imageStore(dest_cubemap, ivec3(id), vec4(sum.rgb, 1.0));
}
}

View File

@@ -0,0 +1,84 @@
#define M_PI 3.14159265359
layout(push_constant, std430) uniform Params {
uint face_id;
uint sample_count;
float roughness;
bool use_direct_write;
float face_size;
}
params;
vec3 texelCoordToVec(vec2 uv, uint faceID) {
mat3 faceUvVectors[6];
// -x
faceUvVectors[1][0] = vec3(0.0, 0.0, 1.0); // u -> +z
faceUvVectors[1][1] = vec3(0.0, -1.0, 0.0); // v -> -y
faceUvVectors[1][2] = vec3(-1.0, 0.0, 0.0); // -x face
// +x
faceUvVectors[0][0] = vec3(0.0, 0.0, -1.0); // u -> -z
faceUvVectors[0][1] = vec3(0.0, -1.0, 0.0); // v -> -y
faceUvVectors[0][2] = vec3(1.0, 0.0, 0.0); // +x face
// -y
faceUvVectors[3][0] = vec3(1.0, 0.0, 0.0); // u -> +x
faceUvVectors[3][1] = vec3(0.0, 0.0, -1.0); // v -> -z
faceUvVectors[3][2] = vec3(0.0, -1.0, 0.0); // -y face
// +y
faceUvVectors[2][0] = vec3(1.0, 0.0, 0.0); // u -> +x
faceUvVectors[2][1] = vec3(0.0, 0.0, 1.0); // v -> +z
faceUvVectors[2][2] = vec3(0.0, 1.0, 0.0); // +y face
// -z
faceUvVectors[5][0] = vec3(-1.0, 0.0, 0.0); // u -> -x
faceUvVectors[5][1] = vec3(0.0, -1.0, 0.0); // v -> -y
faceUvVectors[5][2] = vec3(0.0, 0.0, -1.0); // -z face
// +z
faceUvVectors[4][0] = vec3(1.0, 0.0, 0.0); // u -> +x
faceUvVectors[4][1] = vec3(0.0, -1.0, 0.0); // v -> -y
faceUvVectors[4][2] = vec3(0.0, 0.0, 1.0); // +z face
// out = u * s_faceUv[0] + v * s_faceUv[1] + s_faceUv[2].
vec3 result = (faceUvVectors[faceID][0] * uv.x) + (faceUvVectors[faceID][1] * uv.y) + faceUvVectors[faceID][2];
return normalize(result);
}
vec3 ImportanceSampleGGX(vec2 xi, float roughness4) {
// Compute distribution direction
float Phi = 2.0 * M_PI * xi.x;
float CosTheta = sqrt((1.0 - xi.y) / (1.0 + (roughness4 - 1.0) * xi.y));
float SinTheta = sqrt(1.0 - CosTheta * CosTheta);
// Convert to spherical direction
vec3 H;
H.x = SinTheta * cos(Phi);
H.y = SinTheta * sin(Phi);
H.z = CosTheta;
return H;
}
float DistributionGGX(float NdotH, float roughness4) {
float NdotH2 = NdotH * NdotH;
float denom = (NdotH2 * (roughness4 - 1.0) + 1.0);
denom = M_PI * denom * denom;
return roughness4 / denom;
}
float radicalInverse_VdC(uint bits) {
bits = (bits << 16u) | (bits >> 16u);
bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
return float(bits) * 2.3283064365386963e-10; // / 0x100000000
}
vec2 Hammersley(uint i, uint N) {
return vec2(float(i) / float(N), radicalInverse_VdC(i));
}

View File

@@ -0,0 +1,79 @@
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
#include "cubemap_roughness_inc.glsl"
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
#include "cubemap_roughness_inc.glsl"
layout(location = 0) in vec2 uv_interp;
layout(set = 0, binding = 0) uniform samplerCube source_cube;
layout(location = 0) out vec4 frag_color;
/* clang-format on */
void main() {
vec3 N = texelCoordToVec(uv_interp * 2.0 - 1.0, params.face_id);
//vec4 color = color_interp;
if (params.use_direct_write) {
frag_color = vec4(texture(source_cube, N).rgb, 1.0);
} else {
vec4 sum = vec4(0.0, 0.0, 0.0, 0.0);
float solid_angle_texel = 4.0 * M_PI / (6.0 * params.face_size * params.face_size);
float roughness2 = params.roughness * params.roughness;
float roughness4 = roughness2 * roughness2;
vec3 UpVector = abs(N.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(1.0, 0.0, 0.0);
mat3 T;
T[0] = normalize(cross(UpVector, N));
T[1] = cross(N, T[0]);
T[2] = N;
for (uint sampleNum = 0u; sampleNum < params.sample_count; sampleNum++) {
vec2 xi = Hammersley(sampleNum, params.sample_count);
vec3 H = T * ImportanceSampleGGX(xi, roughness4);
float NdotH = dot(N, H);
vec3 L = (2.0 * NdotH * H - N);
float ndotl = clamp(dot(N, L), 0.0, 1.0);
if (ndotl > 0.0) {
float D = DistributionGGX(NdotH, roughness4);
float pdf = D * NdotH / (4.0 * NdotH) + 0.0001;
float solid_angle_sample = 1.0 / (float(params.sample_count) * pdf + 0.0001);
float mipLevel = params.roughness == 0.0 ? 0.0 : 0.5 * log2(solid_angle_sample / solid_angle_texel);
sum.rgb += textureLod(source_cube, L, mipLevel).rgb * ndotl;
sum.a += ndotl;
}
}
sum /= sum.a;
frag_color = vec4(sum.rgb, 1.0);
}
}

View File

@@ -0,0 +1,23 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find all include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")] + [str(f) for f in Glob("../*_inc.glsl")]
# Add all FSR2 shader and header files.
fsr2_dir = "#thirdparty/amd-fsr2/shaders"
gl_include_files += [str(f) for f in Glob(fsr2_dir + "/*.h")]
gl_include_files += [str(f) for f in Glob(fsr2_dir + "/*.glsl")]
# find all shader code(all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile shaders
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)

View File

@@ -0,0 +1,8 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "../motion_vector_inc.glsl"
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_accumulate_pass.glsl"

View File

@@ -0,0 +1,8 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "../motion_vector_inc.glsl"
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_autogen_reactive_pass.glsl"

View File

@@ -0,0 +1,7 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_compute_luminance_pyramid_pass.glsl"

View File

@@ -0,0 +1,8 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "../motion_vector_inc.glsl"
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_depth_clip_pass.glsl"

View File

@@ -0,0 +1,7 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_lock_pass.glsl"

View File

@@ -0,0 +1,7 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_rcas_pass.glsl"

View File

@@ -0,0 +1,8 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "../motion_vector_inc.glsl"
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_reconstruct_previous_depth_pass.glsl"

View File

@@ -0,0 +1,8 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "../motion_vector_inc.glsl"
#include "thirdparty/amd-fsr2/shaders/ffx_fsr2_tcr_autogen_pass.glsl"

View File

@@ -0,0 +1,173 @@
/**************************************************************************/
/* fsr_upscale.glsl */
/**************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/**************************************************************************/
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/**************************************************************************/
#[compute]
#version 450
#VERSION_DEFINES
#define A_GPU
#define A_GLSL
#ifdef MODE_FSR_UPSCALE_NORMAL
#define A_HALF
#endif
#include "thirdparty/amd-fsr/ffx_a.h"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D fsr_image;
layout(set = 0, binding = 0) uniform sampler2D source_image;
#define FSR_UPSCALE_PASS_TYPE_EASU 0
#define FSR_UPSCALE_PASS_TYPE_RCAS 1
layout(push_constant, std430) uniform Params {
float resolution_width;
float resolution_height;
float upscaled_width;
float upscaled_height;
float sharpness;
int pass;
}
params;
AU4 Const0, Const1, Const2, Const3;
#ifdef MODE_FSR_UPSCALE_FALLBACK
#define FSR_EASU_F
AF4 FsrEasuRF(AF2 p) {
AF4 res = textureGather(source_image, p, 0);
return res;
}
AF4 FsrEasuGF(AF2 p) {
AF4 res = textureGather(source_image, p, 1);
return res;
}
AF4 FsrEasuBF(AF2 p) {
AF4 res = textureGather(source_image, p, 2);
return res;
}
#define FSR_RCAS_F
AF4 FsrRcasLoadF(ASU2 p) {
return AF4(texelFetch(source_image, ASU2(p), 0));
}
void FsrRcasInputF(inout AF1 r, inout AF1 g, inout AF1 b) {}
#else
#define FSR_EASU_H
AH4 FsrEasuRH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 0));
return res;
}
AH4 FsrEasuGH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 1));
return res;
}
AH4 FsrEasuBH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 2));
return res;
}
#define FSR_RCAS_H
AH4 FsrRcasLoadH(ASW2 p) {
return AH4(texelFetch(source_image, ASU2(p), 0));
}
void FsrRcasInputH(inout AH1 r, inout AH1 g, inout AH1 b) {}
#endif
#include "thirdparty/amd-fsr/ffx_fsr1.h"
void fsr_easu_pass(AU2 pos) {
#ifdef MODE_FSR_UPSCALE_NORMAL
AH3 Gamma2Color = AH3(0, 0, 0);
FsrEasuH(Gamma2Color, pos, Const0, Const1, Const2, Const3);
imageStore(fsr_image, ASU2(pos), AH4(Gamma2Color, 1));
#else
AF3 Gamma2Color = AF3(0, 0, 0);
FsrEasuF(Gamma2Color, pos, Const0, Const1, Const2, Const3);
imageStore(fsr_image, ASU2(pos), AF4(Gamma2Color, 1));
#endif
}
void fsr_rcas_pass(AU2 pos) {
#ifdef MODE_FSR_UPSCALE_NORMAL
AH3 Gamma2Color = AH3(0, 0, 0);
FsrRcasH(Gamma2Color.r, Gamma2Color.g, Gamma2Color.b, pos, Const0);
imageStore(fsr_image, ASU2(pos), AH4(Gamma2Color, 1));
#else
AF3 Gamma2Color = AF3(0, 0, 0);
FsrRcasF(Gamma2Color.r, Gamma2Color.g, Gamma2Color.b, pos, Const0);
imageStore(fsr_image, ASU2(pos), AF4(Gamma2Color, 1));
#endif
}
void fsr_pass(AU2 pos) {
if (params.pass == FSR_UPSCALE_PASS_TYPE_EASU) {
fsr_easu_pass(pos);
} else if (params.pass == FSR_UPSCALE_PASS_TYPE_RCAS) {
fsr_rcas_pass(pos);
}
}
void main() {
// Clang does not like unused functions. If ffx_a.h is included in the binary, clang will throw a fit and not compile so we must configure FSR in this shader
if (params.pass == FSR_UPSCALE_PASS_TYPE_EASU) {
FsrEasuCon(Const0, Const1, Const2, Const3, params.resolution_width, params.resolution_height, params.resolution_width, params.resolution_height, params.upscaled_width, params.upscaled_height);
} else if (params.pass == FSR_UPSCALE_PASS_TYPE_RCAS) {
FsrRcasCon(Const0, params.sharpness);
}
AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u);
fsr_pass(gxy);
gxy.x += 8u;
fsr_pass(gxy);
gxy.y += 8u;
fsr_pass(gxy);
gxy.x -= 8u;
fsr_pass(gxy);
}

View File

@@ -0,0 +1,82 @@
#[compute]
#version 450
#VERSION_DEFINES
#define BLOCK_SIZE 8
layout(local_size_x = BLOCK_SIZE, local_size_y = BLOCK_SIZE, local_size_z = 1) in;
shared float tmp_data[BLOCK_SIZE * BLOCK_SIZE];
#ifdef READ_TEXTURE
//use for main texture
layout(set = 0, binding = 0) uniform sampler2D source_texture;
#else
//use for intermediate textures
layout(r32f, set = 0, binding = 0) uniform restrict readonly image2D source_luminance;
#endif
layout(r32f, set = 1, binding = 0) uniform restrict writeonly image2D dest_luminance;
#ifdef WRITE_LUMINANCE
layout(set = 2, binding = 0) uniform sampler2D prev_luminance;
#endif
layout(push_constant, std430) uniform Params {
ivec2 source_size;
float max_luminance;
float min_luminance;
float exposure_adjust;
float pad[3];
}
params;
void main() {
uint t = gl_LocalInvocationID.y * BLOCK_SIZE + gl_LocalInvocationID.x;
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(lessThan(pos, params.source_size))) {
#ifdef READ_TEXTURE
vec3 v = texelFetch(source_texture, pos, 0).rgb;
tmp_data[t] = max(v.r, max(v.g, v.b));
#else
tmp_data[t] = imageLoad(source_luminance, pos).r;
#endif
} else {
tmp_data[t] = 0.0;
}
groupMemoryBarrier();
barrier();
uint size = (BLOCK_SIZE * BLOCK_SIZE) >> 1;
do {
if (t < size) {
tmp_data[t] += tmp_data[t + size];
}
groupMemoryBarrier();
barrier();
size >>= 1;
} while (size >= 1);
if (t == 0) {
//compute rect size
ivec2 rect_size = min(params.source_size - pos, ivec2(BLOCK_SIZE));
float avg = tmp_data[0] / float(rect_size.x * rect_size.y);
//float avg = tmp_data[0] / float(BLOCK_SIZE*BLOCK_SIZE);
pos /= ivec2(BLOCK_SIZE);
#ifdef WRITE_LUMINANCE
float prev_lum = texelFetch(prev_luminance, ivec2(0, 0), 0).r; //1 pixel previous exposure
avg = clamp(prev_lum + (avg - prev_lum) * params.exposure_adjust, params.min_luminance, params.max_luminance);
#endif
imageStore(dest_luminance, pos, vec4(avg));
}
}

View File

@@ -0,0 +1,73 @@
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
#include "luminance_reduce_raster_inc.glsl"
layout(location = 0) out vec2 uv_interp;
/* clang-format on */
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
#include "luminance_reduce_raster_inc.glsl"
layout(location = 0) in vec2 uv_interp;
/* clang-format on */
layout(set = 0, binding = 0) uniform sampler2D source_exposure;
#ifdef FINAL_PASS
layout(set = 1, binding = 0) uniform sampler2D prev_luminance;
#endif
layout(location = 0) out highp float luminance;
void main() {
ivec2 dest_pos = ivec2(uv_interp * settings.dest_size);
ivec2 src_pos = ivec2(uv_interp * settings.source_size);
ivec2 next_pos = (dest_pos + ivec2(1)) * settings.source_size / settings.dest_size;
next_pos = max(next_pos, src_pos + ivec2(1)); //so it at least reads one pixel
highp vec3 source_color = vec3(0.0);
for (int i = src_pos.x; i < next_pos.x; i++) {
for (int j = src_pos.y; j < next_pos.y; j++) {
source_color += texelFetch(source_exposure, ivec2(i, j), 0).rgb;
}
}
source_color /= float((next_pos.x - src_pos.x) * (next_pos.y - src_pos.y));
#ifdef FIRST_PASS
luminance = max(source_color.r, max(source_color.g, source_color.b));
// This formula should be more "accurate" but gave an overexposed result when testing.
// Leaving it here so we can revisit it if we want.
// luminance = source_color.r * 0.21 + source_color.g * 0.71 + source_color.b * 0.07;
#else
luminance = source_color.r;
#endif
#ifdef FINAL_PASS
// Obtain our target luminance
luminance = clamp(luminance, settings.min_luminance, settings.max_luminance);
// Now smooth to our transition
highp float prev_lum = texelFetch(prev_luminance, ivec2(0, 0), 0).r; //1 pixel previous luminance
luminance = prev_lum + (luminance - prev_lum) * clamp(settings.exposure_adjust, 0.0, 1.0);
#endif
}

View File

@@ -0,0 +1,10 @@
layout(push_constant, std430) uniform PushConstant {
ivec2 source_size;
ivec2 dest_size;
float exposure_adjust;
float min_luminance;
float max_luminance;
uint pad1;
}
settings;

View File

@@ -0,0 +1,6 @@
vec2 derive_motion_vector(vec2 uv, float depth, mat4 reprojection_matrix) {
vec4 previous_pos_ndc = reprojection_matrix * vec4(uv * 2.0f - 1.0f, depth * 2.0f - 1.0f, 1.0f);
return 0.5f + (previous_pos_ndc.xy / previous_pos_ndc.w) * 0.5f - uv;
}
#define FFX_FSR2_OPTION_GODOT_DERIVE_INVALID_MOTION_VECTORS_FUNCTION(i, j, k) derive_motion_vector(i, j, k)

View File

@@ -0,0 +1,97 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(location = 0) out vec2 uv_interp;
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
#[fragment]
#version 450
#VERSION_DEFINES
#include "motion_vector_inc.glsl"
layout(location = 0) in vec2 uv_interp;
layout(set = 0, binding = 0) uniform sampler2D source_velocity;
layout(set = 0, binding = 1) uniform sampler2D source_depth;
layout(location = 0) out vec4 frag_color;
layout(push_constant, std430) uniform Params {
highp mat4 reprojection_matrix;
vec2 resolution;
bool force_derive_from_depth;
}
params;
// Based on distance to line segment from https://www.shadertoy.com/view/3tdSDj
float line_segment(in vec2 p, in vec2 a, in vec2 b) {
vec2 aspect = vec2(params.resolution.x / params.resolution.y, 1.0f);
vec2 ba = (b - a) * aspect;
vec2 pa = (p - a) * aspect;
float h = clamp(dot(pa, ba) / dot(ba, ba), 0.0f, 1.0f);
return length(pa - h * ba) * (params.resolution.y / 2.0f);
}
void main() {
// Retrieve motion vector data.
float cell_size = 32.0f;
float circle_radius = 2.0f;
vec3 nan_color = vec3(1.0f, 0.0f, 0.0f);
vec3 active_color = vec3(1.0f, 0.8f, 0.1f);
vec3 inactive_color = vec3(0.5f, 0.5f, 0.5f);
vec2 pos_pixel = uv_interp * params.resolution;
vec2 cell_pos_pixel = floor(pos_pixel / cell_size) * cell_size + (cell_size * 0.5f);
vec2 cell_pos_uv = cell_pos_pixel / params.resolution;
vec2 cell_pos_velocity = textureLod(source_velocity, cell_pos_uv, 0.0f).xy;
bool derive_velocity = params.force_derive_from_depth || all(lessThanEqual(cell_pos_velocity, vec2(-1.0f, -1.0f)));
if (derive_velocity) {
float depth = textureLod(source_depth, cell_pos_uv, 0.0f).x;
cell_pos_velocity = derive_motion_vector(cell_pos_uv, depth, params.reprojection_matrix);
}
vec2 cell_pos_previous_uv = cell_pos_uv + cell_pos_velocity;
// Draw the shapes.
float epsilon = 1e-6f;
vec2 cell_pos_delta_uv = cell_pos_uv - cell_pos_previous_uv;
bool motion_active = length(cell_pos_delta_uv) > epsilon;
vec3 color;
if (any(isnan(cell_pos_delta_uv))) {
color = nan_color;
} else if (motion_active) {
color = active_color;
} else {
color = inactive_color;
}
float alpha;
if (length(cell_pos_pixel - pos_pixel) <= circle_radius) {
// Circle center.
alpha = 1.0f;
} else if (motion_active) {
// Motion vector line.
alpha = 1.0f - line_segment(uv_interp, cell_pos_uv, cell_pos_previous_uv);
} else {
// Ignore pixel.
alpha = 0.0f;
}
if (derive_velocity) {
color = vec3(1.0f, 1.0f, 1.0f) - color;
alpha *= 0.5f;
}
frag_color = vec4(color, alpha);
}

View File

@@ -0,0 +1,32 @@
#[compute]
#version 450
#VERSION_DEFINES
#include "motion_vector_inc.glsl"
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2D depth_buffer;
layout(rg16f, set = 0, binding = 1) uniform restrict writeonly image2D velocity_buffer;
layout(push_constant, std430) uniform Params {
highp mat4 reprojection_matrix;
vec2 resolution;
uint pad[2];
}
params;
void main() {
// Out of bounds check.
if (any(greaterThanEqual(vec2(gl_GlobalInvocationID.xy), params.resolution))) {
return;
}
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
float depth = texelFetch(depth_buffer, pos, 0).x;
vec2 uv = (vec2(pos) + 0.5f) / params.resolution;
vec2 velocity = derive_motion_vector(uv, depth, params.reprojection_matrix);
imageStore(velocity_buffer, pos, vec4(velocity, 0.0f, 0.0f));
}

View File

@@ -0,0 +1,236 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#ifdef MODE_RESOLVE_DEPTH
layout(set = 0, binding = 0) uniform sampler2DMS source_depth;
layout(r32f, set = 1, binding = 0) uniform restrict writeonly image2D dest_depth;
#endif
#ifdef MODE_RESOLVE_GI
layout(set = 0, binding = 0) uniform sampler2DMS source_depth;
layout(set = 0, binding = 1) uniform sampler2DMS source_normal_roughness;
layout(r32f, set = 1, binding = 0) uniform restrict writeonly image2D dest_depth;
layout(rgba8, set = 1, binding = 1) uniform restrict writeonly image2D dest_normal_roughness;
#ifdef VOXEL_GI_RESOLVE
layout(set = 2, binding = 0) uniform usampler2DMS source_voxel_gi;
layout(rg8ui, set = 3, binding = 0) uniform restrict writeonly uimage2D dest_voxel_gi;
#endif
#endif
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
int sample_count;
uint pad;
}
params;
void main() {
// Pixel being shaded
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(pos, params.screen_size))) { //too large, do nothing
return;
}
#ifdef MODE_RESOLVE_DEPTH
float depth_avg = 0.0;
for (int i = 0; i < params.sample_count; i++) {
depth_avg += texelFetch(source_depth, pos, i).r;
}
depth_avg /= float(params.sample_count);
imageStore(dest_depth, pos, vec4(depth_avg));
#endif
#ifdef MODE_RESOLVE_GI
float best_depth = 1e20;
vec4 best_normal_roughness = vec4(0.0);
#ifdef VOXEL_GI_RESOLVE
uvec2 best_voxel_gi;
#endif
#if 0
for(int i=0;i<params.sample_count;i++) {
float depth = texelFetch(source_depth,pos,i).r;
if (depth < best_depth) { //use the depth closest to camera
best_depth = depth;
best_normal_roughness = texelFetch(source_normal_roughness,pos,i);
#ifdef VOXEL_GI_RESOLVE
best_voxel_gi = texelFetch(source_voxel_gi,pos,i).rg;
#endif
}
}
#else
#if 1
vec4 group1;
vec4 group2;
vec4 group3;
vec4 group4;
int best_index = 0;
//2X
group1.x = texelFetch(source_depth, pos, 0).r;
group1.y = texelFetch(source_depth, pos, 1).r;
//4X
if (params.sample_count >= 4) {
group1.z = texelFetch(source_depth, pos, 2).r;
group1.w = texelFetch(source_depth, pos, 3).r;
}
//8X
if (params.sample_count >= 8) {
group2.x = texelFetch(source_depth, pos, 4).r;
group2.y = texelFetch(source_depth, pos, 5).r;
group2.z = texelFetch(source_depth, pos, 6).r;
group2.w = texelFetch(source_depth, pos, 7).r;
}
//16X
if (params.sample_count >= 16) {
group3.x = texelFetch(source_depth, pos, 8).r;
group3.y = texelFetch(source_depth, pos, 9).r;
group3.z = texelFetch(source_depth, pos, 10).r;
group3.w = texelFetch(source_depth, pos, 11).r;
group4.x = texelFetch(source_depth, pos, 12).r;
group4.y = texelFetch(source_depth, pos, 13).r;
group4.z = texelFetch(source_depth, pos, 14).r;
group4.w = texelFetch(source_depth, pos, 15).r;
}
if (params.sample_count == 2) {
best_index = (pos.x & 1) ^ ((pos.y >> 1) & 1); //not much can be done here
} else if (params.sample_count == 4) {
vec4 freq = vec4(equal(group1, vec4(group1.x)));
freq += vec4(equal(group1, vec4(group1.y)));
freq += vec4(equal(group1, vec4(group1.z)));
freq += vec4(equal(group1, vec4(group1.w)));
float min_f = freq.x;
best_index = 0;
if (freq.y < min_f) {
best_index = 1;
min_f = freq.y;
}
if (freq.z < min_f) {
best_index = 2;
min_f = freq.z;
}
if (freq.w < min_f) {
best_index = 3;
}
} else if (params.sample_count == 8) {
vec4 freq0 = vec4(equal(group1, vec4(group1.x)));
vec4 freq1 = vec4(equal(group2, vec4(group1.x)));
freq0 += vec4(equal(group1, vec4(group1.y)));
freq1 += vec4(equal(group2, vec4(group1.y)));
freq0 += vec4(equal(group1, vec4(group1.z)));
freq1 += vec4(equal(group2, vec4(group1.z)));
freq0 += vec4(equal(group1, vec4(group1.w)));
freq1 += vec4(equal(group2, vec4(group1.w)));
freq0 += vec4(equal(group1, vec4(group2.x)));
freq1 += vec4(equal(group2, vec4(group2.x)));
freq0 += vec4(equal(group1, vec4(group2.y)));
freq1 += vec4(equal(group2, vec4(group2.y)));
freq0 += vec4(equal(group1, vec4(group2.z)));
freq1 += vec4(equal(group2, vec4(group2.z)));
freq0 += vec4(equal(group1, vec4(group2.w)));
freq1 += vec4(equal(group2, vec4(group2.w)));
float min_f0 = freq0.x;
int best_index0 = 0;
if (freq0.y < min_f0) {
best_index0 = 1;
min_f0 = freq0.y;
}
if (freq0.z < min_f0) {
best_index0 = 2;
min_f0 = freq0.z;
}
if (freq0.w < min_f0) {
best_index0 = 3;
min_f0 = freq0.w;
}
float min_f1 = freq1.x;
int best_index1 = 4;
if (freq1.y < min_f1) {
best_index1 = 5;
min_f1 = freq1.y;
}
if (freq1.z < min_f1) {
best_index1 = 6;
min_f1 = freq1.z;
}
if (freq1.w < min_f1) {
best_index1 = 7;
min_f1 = freq1.w;
}
best_index = mix(best_index0, best_index1, min_f0 < min_f1);
}
#else
float depths[16];
int depth_indices[16];
int depth_amount[16];
int depth_count = 0;
for (int i = 0; i < params.sample_count; i++) {
float depth = texelFetch(source_depth, pos, i).r;
int depth_index = -1;
for (int j = 0; j < depth_count; j++) {
if (abs(depths[j] - depth) < 0.000001) {
depth_index = j;
break;
}
}
if (depth_index == -1) {
depths[depth_count] = depth;
depth_indices[depth_count] = i;
depth_amount[depth_count] = 1;
depth_count += 1;
} else {
depth_amount[depth_index] += 1;
}
}
int depth_least = 0xFFFF;
int best_index = 0;
for (int j = 0; j < depth_count; j++) {
if (depth_amount[j] < depth_least) {
best_index = depth_indices[j];
depth_least = depth_amount[j];
}
}
#endif
best_depth = texelFetch(source_depth, pos, best_index).r;
best_normal_roughness = texelFetch(source_normal_roughness, pos, best_index);
#ifdef VOXEL_GI_RESOLVE
best_voxel_gi = texelFetch(source_voxel_gi, pos, best_index).rg;
#endif
#endif
imageStore(dest_depth, pos, vec4(best_depth));
imageStore(dest_normal_roughness, pos, vec4(best_normal_roughness));
#ifdef VOXEL_GI_RESOLVE
imageStore(dest_voxel_gi, pos, uvec4(best_voxel_gi, 0, 0));
#endif
#endif
}

View File

@@ -0,0 +1,70 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2D source_normal;
layout(r8, set = 1, binding = 0) uniform restrict writeonly image2D dest_roughness;
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
float curve;
uint pad;
}
params;
#define HALF_PI 1.5707963267948966
void main() {
// Pixel being shaded
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThan(pos, params.screen_size))) { //too large, do nothing
return;
}
vec3 normal_accum = vec3(0.0);
float accum = 0.0;
for (int i = 0; i <= 1; i++) {
for (int j = 0; j <= 1; j++) {
normal_accum += normalize(texelFetch(source_normal, pos + ivec2(i, j), 0).xyz * 2.0 - 1.0);
accum += 1.0;
}
}
normal_accum /= accum;
float r = length(normal_accum);
float limit;
if (r < 1.0) {
float threshold = 0.4;
/*
//Formula from Filament, does not make sense to me.
float r2 = r * r;
float kappa = (3.0f * r - r * r2) / (1.0f - r2);
float variance = 0.25f / kappa;
limit = sqrt(min(2.0f * variance, threshold * threshold));
*/
/*
//Formula based on probability distribution graph
float width = acos(max(0.0,r)); // convert to angle (width)
float roughness = pow(width,1.7)*0.854492; //approximate (crappy) formula to convert to roughness
limit = min(sqrt(roughness), threshold); //convert to perceptual roughness and apply threshold
*/
limit = min(sqrt(pow(acos(max(0.0, r)) / HALF_PI, params.curve)), threshold); //convert to perceptual roughness and apply threshold
//limit = 0.5;
} else {
limit = 0.0;
}
imageStore(dest_roughness, pos, vec4(limit));
}

View File

@@ -0,0 +1,297 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D source_diffuse;
layout(r32f, set = 0, binding = 1) uniform restrict readonly image2D source_depth;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D ssr_image;
#ifdef MODE_ROUGH
layout(r8, set = 1, binding = 1) uniform restrict writeonly image2D blur_radius_image;
#endif
layout(rgba8, set = 2, binding = 0) uniform restrict readonly image2D source_normal_roughness;
layout(set = 3, binding = 0) uniform sampler2D source_metallic;
layout(push_constant, std430) uniform Params {
vec4 proj_info;
ivec2 screen_size;
float camera_z_near;
float camera_z_far;
int num_steps;
float depth_tolerance;
float distance_fade;
float curve_fade_in;
bool orthogonal;
float filter_mipmap_levels;
bool use_half_res;
uint view_index;
}
params;
#include "screen_space_reflection_inc.glsl"
vec2 view_to_screen(vec3 view_pos, out float w) {
vec4 projected = scene_data.projection[params.view_index] * vec4(view_pos, 1.0);
projected.xyz /= projected.w;
projected.xy = projected.xy * 0.5 + 0.5;
w = projected.w;
return projected.xy;
}
#define M_PI 3.14159265359
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
return;
}
vec2 pixel_size = 1.0 / vec2(params.screen_size);
vec2 uv = vec2(ssC.xy) * pixel_size;
uv += pixel_size * 0.5;
float base_depth = imageLoad(source_depth, ssC).r;
// World space point being shaded
vec3 vertex = reconstructCSPosition(uv * vec2(params.screen_size), base_depth);
vec4 normal_roughness = imageLoad(source_normal_roughness, ssC);
vec3 normal = normalize(normal_roughness.xyz * 2.0 - 1.0);
float roughness = normal_roughness.w;
if (roughness > 0.5) {
roughness = 1.0 - roughness;
}
roughness /= (127.0 / 255.0);
// The roughness cutoff of 0.6 is chosen to match the roughness fadeout from GH-69828.
if (roughness > 0.6) {
// Do not compute SSR for rough materials to improve performance at the cost of
// subtle artifacting.
#ifdef MODE_ROUGH
imageStore(blur_radius_image, ssC, vec4(0.0));
#endif
imageStore(ssr_image, ssC, vec4(0.0));
return;
}
normal = normalize(normal);
normal.y = -normal.y; //because this code reads flipped
vec3 view_dir;
if (sc_multiview) {
view_dir = normalize(vertex + scene_data.eye_offset[params.view_index].xyz);
} else {
view_dir = params.orthogonal ? vec3(0.0, 0.0, -1.0) : normalize(vertex);
}
vec3 ray_dir = normalize(reflect(view_dir, normal));
if (dot(ray_dir, normal) < 0.001) {
imageStore(ssr_image, ssC, vec4(0.0));
return;
}
////////////////
// make ray length and clip it against the near plane (don't want to trace beyond visible)
float ray_len = (vertex.z + ray_dir.z * params.camera_z_far) > -params.camera_z_near ? (-params.camera_z_near - vertex.z) / ray_dir.z : params.camera_z_far;
vec3 ray_end = vertex + ray_dir * ray_len;
float w_begin;
vec2 vp_line_begin = view_to_screen(vertex, w_begin);
float w_end;
vec2 vp_line_end = view_to_screen(ray_end, w_end);
vec2 vp_line_dir = vp_line_end - vp_line_begin;
// we need to interpolate w along the ray, to generate perspective correct reflections
w_begin = 1.0 / w_begin;
w_end = 1.0 / w_end;
float z_begin = vertex.z * w_begin;
float z_end = ray_end.z * w_end;
vec2 line_begin = vp_line_begin / pixel_size;
vec2 line_dir = vp_line_dir / pixel_size;
float z_dir = z_end - z_begin;
float w_dir = w_end - w_begin;
// clip the line to the viewport edges
float scale_max_x = min(1.0, 0.99 * (1.0 - vp_line_begin.x) / max(1e-5, vp_line_dir.x));
float scale_max_y = min(1.0, 0.99 * (1.0 - vp_line_begin.y) / max(1e-5, vp_line_dir.y));
float scale_min_x = min(1.0, 0.99 * vp_line_begin.x / max(1e-5, -vp_line_dir.x));
float scale_min_y = min(1.0, 0.99 * vp_line_begin.y / max(1e-5, -vp_line_dir.y));
float line_clip = min(scale_max_x, scale_max_y) * min(scale_min_x, scale_min_y);
line_dir *= line_clip;
z_dir *= line_clip;
w_dir *= line_clip;
// clip z and w advance to line advance
vec2 line_advance = normalize(line_dir); // down to pixel
float step_size = 1.0 / length(line_dir);
float z_advance = z_dir * step_size; // adapt z advance to line advance
float w_advance = w_dir * step_size; // adapt w advance to line advance
// make line advance faster if direction is closer to pixel edges (this avoids sampling the same pixel twice)
float advance_angle_adj = 1.0 / max(abs(line_advance.x), abs(line_advance.y));
line_advance *= advance_angle_adj; // adapt z advance to line advance
z_advance *= advance_angle_adj;
w_advance *= advance_angle_adj;
vec2 pos = line_begin;
float z = z_begin;
float w = w_begin;
float z_from = z / w;
float z_to = z_from;
float depth;
vec2 prev_pos = pos;
if (ivec2(pos + line_advance - 0.5) == ssC) {
// It is possible for rounding to cause our first pixel to check to be the pixel we're reflecting.
// Make sure we skip it
pos += line_advance;
z += z_advance;
w += w_advance;
}
bool found = false;
float steps_taken = 0.0;
for (int i = 0; i < params.num_steps; i++) {
pos += line_advance;
z += z_advance;
w += w_advance;
// convert to linear depth
ivec2 test_pos = ivec2(pos - 0.5);
depth = imageLoad(source_depth, test_pos).r;
if (sc_multiview) {
depth = depth * 2.0 - 1.0;
depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
depth = -depth;
}
z_from = z_to;
z_to = z / w;
if (depth > z_to) {
// Test if our ray is hitting the "right" side of the surface, if not we're likely self reflecting and should skip.
vec4 test_normal_roughness = imageLoad(source_normal_roughness, test_pos);
vec3 test_normal = test_normal_roughness.xyz * 2.0 - 1.0;
test_normal = normalize(test_normal);
test_normal.y = -test_normal.y; // Because this code reads flipped.
if (dot(ray_dir, test_normal) < 0.001) {
// if depth was surpassed
if (depth <= max(z_to, z_from) + params.depth_tolerance && -depth < params.camera_z_far * 0.95) {
// check the depth tolerance and far clip
// check that normal is valid
found = true;
}
break;
}
}
steps_taken += 1.0;
prev_pos = pos;
}
if (found) {
float margin_blend = 1.0;
vec2 final_pos = pos;
vec2 margin = vec2((params.screen_size.x + params.screen_size.y) * 0.05); // make a uniform margin
if (any(bvec4(lessThan(pos, vec2(0.0, 0.0)), greaterThan(pos, params.screen_size)))) {
// clip at the screen edges
imageStore(ssr_image, ssC, vec4(0.0));
return;
}
{
//blend fading out towards inner margin
// 0.5 = midpoint of reflection
vec2 margin_grad = mix(params.screen_size - pos, pos, lessThan(pos, params.screen_size * 0.5));
margin_blend = smoothstep(0.0, margin.x * margin.y, margin_grad.x * margin_grad.y);
//margin_blend = 1.0;
}
// Fade In / Fade Out
float grad = (steps_taken + 1.0) / float(params.num_steps);
float initial_fade = params.curve_fade_in == 0.0 ? 1.0 : pow(clamp(grad, 0.0, 1.0), params.curve_fade_in);
float fade = pow(clamp(1.0 - grad, 0.0, 1.0), params.distance_fade) * initial_fade;
// Ensure that precision errors do not introduce any fade. Even if it is just slightly below 1.0,
// strong specular light can leak through the reflection.
if (fade > 0.999) {
fade = 1.0;
}
// This is an ad-hoc term to fade out the SSR as roughness increases. Values used
// are meant to match the visual appearance of a ReflectionProbe.
float roughness_fade = smoothstep(0.4, 0.7, 1.0 - roughness);
// Schlick term.
float metallic = texelFetch(source_metallic, ssC << 1, 0).w;
// F0 is the reflectance of normally incident light (perpendicular to the surface).
// Dielectric materials have a widely accepted default value of 0.04. We assume that metals reflect all light, so their F0 is 1.0.
float f0 = mix(0.04, 1.0, metallic);
float m = clamp(1.0 - dot(normal, -view_dir), 0.0, 1.0);
float m2 = m * m;
m = m2 * m2 * m; // pow(m,5)
float fresnel_term = f0 + (1.0 - f0) * m; // Fresnel Schlick term.
// The alpha value of final_color controls the blending with specular light in specular_merge.glsl.
// Note that the Fresnel term is multiplied with the RGB color instead of being a part of the alpha value.
// There is a key difference:
// - multiplying a term with RGB darkens the SSR light without introducing/taking away specular light.
// - combining a term into the Alpha value introduces specular light at the expense of the SSR light.
vec4 final_color = vec4(imageLoad(source_diffuse, ivec2(final_pos - 0.5)).rgb * fresnel_term, fade * margin_blend * roughness_fade);
imageStore(ssr_image, ssC, final_color);
#ifdef MODE_ROUGH
// if roughness is enabled, do screen space cone tracing
float blur_radius = 0.0;
if (roughness > 0.001) {
float cone_angle = min(roughness, 0.999) * M_PI * 0.5;
float cone_len = length(final_pos - line_begin);
float op_len = 2.0 * tan(cone_angle) * cone_len; // opposite side of iso triangle
{
// fit to sphere inside cone (sphere ends at end of cone), something like this:
// ___
// \O/
// V
//
// as it avoids bleeding from beyond the reflection as much as possible. As a plus
// it also makes the rough reflection more elongated.
float a = op_len;
float h = cone_len;
float a2 = a * a;
float fh2 = 4.0f * h * h;
blur_radius = (a * (sqrt(a2 + fh2) - a)) / (4.0f * h);
}
}
imageStore(blur_radius_image, ssC, vec4(blur_radius / 255.0)); //stored in r8
#endif // MODE_ROUGH
} else {
#ifdef MODE_ROUGH
imageStore(blur_radius_image, ssC, vec4(0.0));
#endif
imageStore(ssr_image, ssC, vec4(0.0));
}
}

View File

@@ -0,0 +1,148 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D source_ssr;
layout(r8, set = 0, binding = 1) uniform restrict readonly image2D source_radius;
layout(rgba8, set = 1, binding = 0) uniform restrict readonly image2D source_normal;
layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly image2D dest_ssr;
#ifndef VERTICAL_PASS
layout(r8, set = 2, binding = 1) uniform restrict writeonly image2D dest_radius;
#endif
layout(r32f, set = 3, binding = 0) uniform restrict readonly image2D source_depth;
layout(push_constant, std430) uniform Params {
vec4 proj_info;
bool orthogonal;
float edge_tolerance;
int increment;
uint view_index;
ivec2 screen_size;
bool vertical;
uint steps;
}
params;
#include "screen_space_reflection_inc.glsl"
#define GAUSS_TABLE_SIZE 15
const float gauss_table[GAUSS_TABLE_SIZE + 1] = float[](
0.1847392078702266,
0.16595854345772326,
0.12031364177766891,
0.07038755277896766,
0.03322925565155569,
0.012657819729901945,
0.0038903040680094217,
0.0009646503390864025,
0.00019297087402915717,
0.000031139936308099136,
0.000004053309048174758,
4.255228059965837e-7,
3.602517634249573e-8,
2.4592560765896795e-9,
1.3534945386863618e-10,
0.0 //one more for interpolation
);
float gauss_weight(float p_val) {
float idxf;
float c = modf(max(0.0, p_val * float(GAUSS_TABLE_SIZE)), idxf);
int idx = int(idxf);
if (idx >= GAUSS_TABLE_SIZE + 1) {
return 0.0;
}
return mix(gauss_table[idx], gauss_table[idx + 1], c);
}
#define M_PI 3.14159265359
void do_filter(inout vec4 accum, inout float accum_radius, inout float divisor, ivec2 texcoord, ivec2 increment, vec3 p_pos, vec3 normal, float p_limit_radius) {
for (int i = 1; i < params.steps; i++) {
float d = float(i * params.increment);
ivec2 tc = texcoord + increment * i;
float depth = imageLoad(source_depth, tc).r;
vec3 view_pos = reconstructCSPosition(vec2(tc) + 0.5, depth);
vec3 view_normal = normalize(imageLoad(source_normal, tc).rgb * 2.0 - 1.0);
view_normal.y = -view_normal.y;
float r = imageLoad(source_radius, tc).r;
float radius = round(r * 255.0);
float angle_n = 1.0 - abs(dot(normal, view_normal));
if (angle_n > params.edge_tolerance) {
break;
}
float angle = abs(dot(normal, normalize(view_pos - p_pos)));
if (angle > params.edge_tolerance) {
break;
}
if (d < radius) {
float w = gauss_weight(d / radius);
accum += imageLoad(source_ssr, tc) * w;
#ifndef VERTICAL_PASS
accum_radius += r * w;
#endif
divisor += w;
}
}
}
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
return;
}
float base_contrib = gauss_table[0];
vec4 accum = imageLoad(source_ssr, ssC);
float accum_radius = imageLoad(source_radius, ssC).r;
float radius = accum_radius * 255.0;
float divisor = gauss_table[0];
accum *= divisor;
accum_radius *= divisor;
#ifdef VERTICAL_PASS
ivec2 direction = ivec2(0, params.increment);
#else
ivec2 direction = ivec2(params.increment, 0);
#endif
float depth = imageLoad(source_depth, ssC).r;
vec3 pos = reconstructCSPosition(vec2(ssC.xy) + 0.5, depth);
vec3 normal = imageLoad(source_normal, ssC).xyz * 2.0 - 1.0;
normal = normalize(normal);
normal.y = -normal.y;
do_filter(accum, accum_radius, divisor, ssC.xy, direction, pos, normal, radius);
do_filter(accum, accum_radius, divisor, ssC.xy, -direction, pos, normal, radius);
if (divisor > 0.0) {
accum /= divisor;
accum_radius /= divisor;
} else {
accum = vec4(0.0);
accum_radius = 0.0;
}
imageStore(dest_ssr, ssC, accum);
#ifndef VERTICAL_PASS
imageStore(dest_radius, ssC, vec4(accum_radius));
#endif
}

View File

@@ -0,0 +1,28 @@
layout(constant_id = 0) const bool sc_multiview = false;
layout(set = 4, binding = 0, std140) uniform SceneData {
mat4x4 projection[2];
mat4x4 inv_projection[2];
vec4 eye_offset[2];
}
scene_data;
vec3 reconstructCSPosition(vec2 screen_pos, float z) {
if (sc_multiview) {
vec4 pos;
pos.xy = (2.0 * vec2(screen_pos) / vec2(params.screen_size)) - 1.0;
pos.z = z * 2.0 - 1.0;
pos.w = 1.0;
pos = scene_data.inv_projection[params.view_index] * pos;
pos.xyz /= pos.w;
return pos.xyz;
} else {
if (params.orthogonal) {
return vec3(-(screen_pos.xy * params.proj_info.xy + params.proj_info.zw), z);
} else {
return vec3((screen_pos.xy * params.proj_info.xy + params.proj_info.zw) * z, z);
}
}
}

View File

@@ -0,0 +1,112 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
/* Specialization Constants (Toggles) */
layout(constant_id = 0) const bool sc_multiview = false;
/* inputs */
layout(set = 0, binding = 0) uniform sampler2D source_ssr;
layout(set = 1, binding = 0) uniform sampler2D source_depth;
layout(set = 1, binding = 1) uniform sampler2D source_normal;
layout(rgba16f, set = 2, binding = 0) uniform restrict writeonly image2D dest_ssr;
layout(r32f, set = 3, binding = 0) uniform restrict writeonly image2D dest_depth;
layout(rgba8, set = 3, binding = 1) uniform restrict writeonly image2D dest_normal;
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
float camera_z_near;
float camera_z_far;
bool orthogonal;
bool filtered;
uint pad[2];
}
params;
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC.xy, params.screen_size))) { //too large, do nothing
return;
}
//do not filter, SSR will generate artifacts if this is done
float divisor = 0.0;
vec4 color;
float depth;
vec4 normal;
if (params.filtered) {
color = vec4(0.0);
depth = 0.0;
normal = vec4(0.0);
for (int i = 0; i < 4; i++) {
ivec2 ofs = ssC << 1;
if (bool(i & 1)) {
ofs.x += 1;
}
if (bool(i & 2)) {
ofs.y += 1;
}
color += texelFetch(source_ssr, ofs, 0);
float d = texelFetch(source_depth, ofs, 0).r;
vec4 nr = texelFetch(source_normal, ofs, 0);
normal.xyz += normalize(nr.xyz * 2.0 - 1.0);
float roughness = normal.w;
if (roughness > 0.5) {
roughness = 1.0 - roughness;
}
roughness /= (127.0 / 255.0);
normal.w += roughness;
if (sc_multiview) {
// we're doing a full unproject so we need the value as is.
depth += d;
} else {
// unproject our Z value so we can use it directly.
d = d * 2.0 - 1.0;
if (params.orthogonal) {
d = ((d + (params.camera_z_far + params.camera_z_near) / (params.camera_z_far - params.camera_z_near)) * (params.camera_z_far - params.camera_z_near)) / 2.0;
} else {
d = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - d * (params.camera_z_far - params.camera_z_near));
}
depth += -d;
}
}
color /= 4.0;
depth /= 4.0;
normal.xyz = normalize(normal.xyz / 4.0) * 0.5 + 0.5;
normal.w /= 4.0;
normal.w = normal.w * (127.0 / 255.0);
} else {
ivec2 ofs = ssC << 1;
color = texelFetch(source_ssr, ofs, 0);
depth = texelFetch(source_depth, ofs, 0).r;
normal = texelFetch(source_normal, ofs, 0);
if (!sc_multiview) {
// unproject our Z value so we can use it directly.
depth = depth * 2.0 - 1.0;
if (params.orthogonal) {
depth = -(depth * (params.camera_z_far - params.camera_z_near) - (params.camera_z_far + params.camera_z_near)) / 2.0;
} else {
depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near + depth * (params.camera_z_far - params.camera_z_near));
}
depth = -depth;
}
}
imageStore(dest_ssr, ssC, color);
imageStore(dest_depth, ssC, vec4(depth));
imageStore(dest_normal, ssC, normal);
}

View File

@@ -0,0 +1,41 @@
/* clang-format off */
#[vertex]
#version 450
#VERSION_DEFINES
/* clang-format on */
layout(push_constant, std430) uniform Info {
mat4 mvp;
vec4 color;
}
info;
layout(location = 0) in vec3 vertex_attrib;
void main() {
vec4 vertex = info.mvp * vec4(vertex_attrib, 1.0);
vertex.xyz /= vertex.w;
gl_Position = vec4(vertex.xy, 0.0, 1.0);
}
/* clang-format off */
#[fragment]
#version 450
#VERSION_DEFINES
layout(push_constant, std430) uniform Info {
mat4 mvp;
vec4 color;
}
info;
layout(location = 0) out vec4 frag_color;
void main() {
frag_color = info.color;
}

View File

@@ -0,0 +1,150 @@
/**
* Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
* Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
* Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
* Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
* Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software. As clarification, there
* is no requirement that the copyright notice and permission be included in
* binary distributions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#[vertex]
#version 450
layout(location = 0) out vec2 tex_coord;
layout(location = 1) out vec4 offset;
layout(push_constant, std430) uniform Params {
vec2 inv_size;
vec2 pad;
}
params;
void main() {
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
tex_coord = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
offset = fma(params.inv_size.xyxy, vec4(1.0, 0.0, 0.0, 1.0), tex_coord.xyxy);
}
#[fragment]
#version 450
layout(location = 0) in vec2 tex_coord;
layout(location = 1) in vec4 offset;
layout(set = 0, binding = 0) uniform sampler2D color_tex;
layout(set = 1, binding = 0) uniform sampler2D blend_tex;
layout(location = 0) out vec4 out_color;
#define FLAG_USE_8_BIT_DEBANDING (1 << 0)
#define FLAG_USE_10_BIT_DEBANDING (1 << 1)
layout(push_constant, std430) uniform Params {
vec2 inv_size;
uint flags;
float pad;
}
params;
#define textureLinear(tex, uv) srgb_to_linear(textureLod(tex, uv, 0.0).rgb)
vec3 linear_to_srgb(vec3 color) {
// If going to srgb, clamp from 0 to 1.
color = clamp(color, vec3(0.0), vec3(1.0));
const vec3 a = vec3(0.055f);
return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
}
vec3 srgb_to_linear(vec3 color) {
return mix(pow((color.rgb + vec3(0.055)) * (1.0 / (1.0 + 0.055)), vec3(2.4)), color.rgb * (1.0 / 12.92), lessThan(color.rgb, vec3(0.04045)));
}
void SMAAMovc(bvec2 cond, inout vec2 variable, vec2 value) {
if (cond.x) {
variable.x = value.x;
}
if (cond.y) {
variable.y = value.y;
}
}
void SMAAMovc(bvec4 cond, inout vec4 variable, vec4 value) {
SMAAMovc(cond.xy, variable.xy, value.xy);
SMAAMovc(cond.zw, variable.zw, value.zw);
}
// From https://alex.vlachos.com/graphics/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf
// and https://www.shadertoy.com/view/MslGR8 (5th one starting from the bottom)
// NOTE: `frag_coord` is in pixels (i.e. not normalized UV).
// This dithering must be applied after encoding changes (linear/nonlinear) have been applied
// as the final step before quantization from floating point to integer values.
vec3 screen_space_dither(vec2 frag_coord, float bit_alignment_diviser) {
// Iestyn's RGB dither (7 asm instructions) from Portal 2 X360, slightly modified for VR.
// Removed the time component to avoid passing time into this shader.
vec3 dither = vec3(dot(vec2(171.0, 231.0), frag_coord));
dither.rgb = fract(dither.rgb / vec3(103.0, 71.0, 97.0));
// Subtract 0.5 to avoid slightly brightening the whole viewport.
// Use a dither strength of 100% rather than the 37.5% suggested by the original source.
return (dither.rgb - 0.5) / bit_alignment_diviser;
}
void main() {
vec4 a;
a.x = texture(blend_tex, offset.xy).a;
a.y = texture(blend_tex, offset.zw).g;
a.wz = texture(blend_tex, tex_coord).xz;
if (dot(a, vec4(1.0, 1.0, 1.0, 1.0)) < 1e-5) {
out_color = textureLod(color_tex, tex_coord, 0.0);
} else {
bool h = max(a.x, a.z) > max(a.y, a.w);
vec4 blending_offset = vec4(0.0, a.y, 0.0, a.w);
vec2 blending_weight = a.yw;
SMAAMovc(bvec4(h, h, h, h), blending_offset, vec4(a.x, 0.0, a.z, 0.0));
SMAAMovc(bvec2(h, h), blending_weight, a.xz);
blending_weight /= dot(blending_weight, vec2(1.0, 1.0));
vec4 blending_coord = fma(blending_offset, vec4(params.inv_size.xy, -params.inv_size.xy), tex_coord.xyxy);
out_color.rgb = blending_weight.x * textureLinear(color_tex, blending_coord.xy);
out_color.rgb += blending_weight.y * textureLinear(color_tex, blending_coord.zw);
out_color.rgb = linear_to_srgb(out_color.rgb);
out_color.a = texture(color_tex, tex_coord).a;
}
if (bool(params.flags & FLAG_USE_8_BIT_DEBANDING)) {
// Divide by 255 to align to 8-bit quantization.
out_color.rgb += screen_space_dither(gl_FragCoord.xy, 255.0);
} else if (bool(params.flags & FLAG_USE_10_BIT_DEBANDING)) {
// Divide by 1023 to align to 10-bit quantization.
out_color.rgb += screen_space_dither(gl_FragCoord.xy, 1023.0);
}
}

View File

@@ -0,0 +1,120 @@
/**
* Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
* Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
* Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
* Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
* Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software. As clarification, there
* is no requirement that the copyright notice and permission be included in
* binary distributions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#[vertex]
#version 450
layout(location = 0) out vec2 tex_coord;
layout(location = 1) out vec4 offset[3];
layout(push_constant, std430) uniform Params {
vec2 inv_size;
float threshold;
float reserved;
}
params;
void main() {
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
tex_coord = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
offset[0] = fma(params.inv_size.xyxy, vec4(-1.0, 0.0, 0.0, -1.0), tex_coord.xyxy);
offset[1] = fma(params.inv_size.xyxy, vec4(1.0, 0.0, 0.0, 1.0), tex_coord.xyxy);
offset[2] = fma(params.inv_size.xyxy, vec4(-2.0, 0.0, 0.0, -2.0), tex_coord.xyxy);
}
#[fragment]
#version 450
layout(location = 0) in vec2 tex_coord;
layout(location = 1) in vec4 offset[3];
layout(set = 0, binding = 0) uniform sampler2D color_tex;
layout(location = 0) out vec2 edges;
layout(push_constant, std430) uniform Params {
vec2 inv_size;
float threshold;
float reserved;
}
params;
#define SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR 2.0
void main() {
vec2 threshold = vec2(params.threshold);
vec4 delta;
vec3 C = texture(color_tex, tex_coord).rgb;
vec3 Cleft = texture(color_tex, offset[0].xy).rgb;
vec3 t = abs(C - Cleft);
delta.x = max(max(t.r, t.g), t.b);
vec3 Ctop = texture(color_tex, offset[0].zw).rgb;
t = abs(C - Ctop);
delta.y = max(max(t.r, t.g), t.b);
edges = step(threshold, delta.xy);
if (dot(edges, vec2(1.0, 1.0)) == 0.0) {
discard;
}
vec3 Cright = texture(color_tex, offset[1].xy).rgb;
t = abs(C - Cright);
delta.z = max(max(t.r, t.g), t.b);
vec3 Cbottom = texture(color_tex, offset[1].zw).rgb;
t = abs(C - Cbottom);
delta.w = max(max(t.r, t.g), t.b);
vec2 max_delta = max(delta.xy, delta.zw);
vec3 Cleftleft = texture(color_tex, offset[2].xy).rgb;
t = abs(Cleft - Cleftleft);
delta.z = max(max(t.r, t.g), t.b);
vec3 Ctoptop = texture(color_tex, offset[2].zw).rgb;
t = abs(Ctop - Ctoptop);
delta.w = max(max(t.r, t.g), t.b);
max_delta = max(max_delta.xy, delta.zw);
float final_delta = max(max_delta.x, max_delta.y);
edges.xy *= step(final_delta, SMAA_LOCAL_CONTRAST_ADAPTATION_FACTOR * delta.xy);
}

View File

@@ -0,0 +1,376 @@
/**
* Copyright (C) 2013 Jorge Jimenez (jorge@iryoku.com)
* Copyright (C) 2013 Jose I. Echevarria (joseignacioechevarria@gmail.com)
* Copyright (C) 2013 Belen Masia (bmasia@unizar.es)
* Copyright (C) 2013 Fernando Navarro (fernandn@microsoft.com)
* Copyright (C) 2013 Diego Gutierrez (diegog@unizar.es)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to
* do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software. As clarification, there
* is no requirement that the copyright notice and permission be included in
* binary distributions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#[vertex]
#version 450
layout(location = 0) out vec2 tex_coord;
layout(location = 1) out vec2 pix_coord;
layout(location = 2) out vec4 offset[3];
layout(push_constant, std430) uniform Params {
vec2 inv_size;
ivec2 size;
vec4 subsample_indices;
}
params;
#define SMAA_MAX_SEARCH_STEPS 32
void main() {
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
tex_coord = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
pix_coord = tex_coord * params.size.xy;
offset[0] = fma(params.inv_size.xyxy, vec4(-0.25, -0.125, 1.25, -0.125), tex_coord.xyxy);
offset[1] = fma(params.inv_size.xyxy, vec4(-0.125, -0.25, -0.125, 1.25), tex_coord.xyxy);
offset[2] = fma(params.inv_size.xxyy,
vec4(-2.0, 2.0, -2.0, 2.0) * SMAA_MAX_SEARCH_STEPS,
vec4(offset[0].xz, offset[1].yw));
}
#[fragment]
#version 450
layout(location = 0) in vec2 tex_coord;
layout(location = 1) in vec2 pix_coord;
layout(location = 2) in vec4 offset[3];
layout(set = 0, binding = 0) uniform sampler2D edges_tex;
layout(set = 1, binding = 0) uniform sampler2D area_tex;
layout(set = 1, binding = 1) uniform sampler2D search_tex;
layout(location = 0) out vec4 weights;
layout(push_constant, std430) uniform Params {
vec2 inv_size;
ivec2 size;
vec4 subsample_indices;
}
params;
#define SMAA_MAX_SEARCH_STEPS 32
#define SMAA_MAX_SEARCH_STEPS_DIAG 16
#define SMAA_CORNER_ROUNDING 25
#ifndef SMAA_AREATEX_SELECT
#define SMAA_AREATEX_SELECT(sample) sample.rg
#endif
#ifndef SMAA_SEARCHTEX_SELECT
#define SMAA_SEARCHTEX_SELECT(sample) sample.r
#endif
#define SMAA_AREATEX_MAX_DISTANCE 16
#define SMAA_AREATEX_MAX_DISTANCE_DIAG 20
#define SMAA_AREATEX_PIXEL_SIZE (1.0 / vec2(160.0, 560.0))
#define SMAA_AREATEX_SUBTEX_SIZE (1.0 / 7.0)
#define SMAA_SEARCHTEX_SIZE vec2(66.0, 33.0)
#define SMAA_SEARCHTEX_PACKED_SIZE vec2(64.0, 16.0)
#define SMAA_CORNER_ROUNDING_NORM (float(SMAA_CORNER_ROUNDING) / 100.0)
void SMAAMovc(bvec2 cond, inout vec2 variable, vec2 value) {
if (cond.x) {
variable.x = value.x;
}
if (cond.y) {
variable.y = value.y;
}
}
vec2 SMAADecodeDiagBilinearAccess(vec2 e) {
e.r = e.r * abs(5.0 * e.r - 5.0 * 0.75);
return round(e);
}
vec4 SMAADecodeDiagBilinearAccess(vec4 e) {
e.rb = e.rb * abs(5.0 * e.rb - 5.0 * 0.75);
return round(e);
}
vec2 SMAASearchDiag1(vec2 tex_coord, vec2 dir, out vec2 e) {
vec4 coord = vec4(tex_coord, -1.0, 1.0);
vec3 t = vec3(params.inv_size.xy, 1.0);
while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
coord.w > 0.9) {
coord.xyz = fma(t, vec3(dir, 1.0), coord.xyz);
e = textureLod(edges_tex, coord.xy, 0.0).rg;
coord.w = dot(e, vec2(0.5, 0.5));
}
return coord.zw;
}
vec2 SMAASearchDiag2(vec2 tex_coord, vec2 dir, out vec2 e) {
vec4 coord = vec4(tex_coord, -1.0, 1.0);
coord.x += 0.25 * params.inv_size.x;
vec3 t = vec3(params.inv_size.xy, 1.0);
while (coord.z < float(SMAA_MAX_SEARCH_STEPS_DIAG - 1) &&
coord.w > 0.9) {
coord.xyz = fma(t, vec3(dir, 1.0), coord.xyz);
e = textureLod(edges_tex, coord.xy, 0.0).rg;
e = SMAADecodeDiagBilinearAccess(e);
coord.w = dot(e, vec2(0.5, 0.5));
}
return coord.zw;
}
vec2 SMAAAreaDiag(vec2 dist, vec2 e, float offset) {
vec2 coord = fma(vec2(SMAA_AREATEX_MAX_DISTANCE_DIAG, SMAA_AREATEX_MAX_DISTANCE_DIAG), e, dist);
coord = fma(SMAA_AREATEX_PIXEL_SIZE, coord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
coord.x += 0.5;
coord.y += SMAA_AREATEX_SUBTEX_SIZE * offset;
return SMAA_AREATEX_SELECT(textureLod(area_tex, coord, 0.0));
}
vec2 SMAACalculateDiagWeights(vec2 tex_coord, vec2 e, vec4 subsample_indices) {
vec2 weights = vec2(0.0, 0.0);
vec4 d;
vec2 end;
if (e.r > 0.0) {
d.xz = SMAASearchDiag1(tex_coord, vec2(-1.0, 1.0), end);
d.x += float(end.y > 0.9);
} else {
d.xz = vec2(0.0, 0.0);
}
d.yw = SMAASearchDiag1(tex_coord, vec2(1.0, -1.0), end);
if (d.x + d.y > 2.0) {
vec4 coords = fma(vec4(-d.x + 0.25, d.x, d.y, -d.y - 0.25), params.inv_size.xyxy, tex_coord.xyxy);
vec4 c;
c.xy = textureLodOffset(edges_tex, coords.xy, 0.0, ivec2(-1, 0)).rg;
c.zw = textureLodOffset(edges_tex, coords.zw, 0.0, ivec2(1, 0)).rg;
c.yxwz = SMAADecodeDiagBilinearAccess(c.xyzw);
vec2 cc = fma(vec2(2.0, 2.0), c.xz, c.yw);
SMAAMovc(bvec2(step(0.9, d.zw)), cc, vec2(0.0, 0.0));
weights += SMAAAreaDiag(d.xy, cc, subsample_indices.z);
}
d.xz = SMAASearchDiag2(tex_coord, vec2(-1.0, -1.0), end);
if (textureLodOffset(edges_tex, tex_coord, 0.0, ivec2(1, 0)).r > 0.0) {
d.yw = SMAASearchDiag2(tex_coord, vec2(1.0, 1.0), end);
d.y += float(end.y > 0.9);
} else {
d.yw = vec2(0.0, 0.0);
}
if (d.x + d.y > 2.0) {
vec4 coords = fma(vec4(-d.x, -d.x, d.y, d.y), params.inv_size.xyxy, tex_coord.xyxy);
vec4 c;
c.x = textureLodOffset(edges_tex, coords.xy, 0.0, ivec2(-1, 0)).g;
c.y = textureLodOffset(edges_tex, coords.xy, 0.0, ivec2(0, -1)).r;
c.zw = textureLodOffset(edges_tex, coords.zw, 0.0, ivec2(1, 0)).gr;
vec2 cc = fma(vec2(2.0, 2.0), c.xz, c.yw);
SMAAMovc(bvec2(step(0.9, d.zw)), cc, vec2(0.0, 0.0));
weights += SMAAAreaDiag(d.xy, cc, subsample_indices.w).gr;
}
return weights;
}
float SMAASearchLength(vec2 e, float offset) {
vec2 scale = SMAA_SEARCHTEX_SIZE * vec2(0.5, -1.0);
vec2 bias = SMAA_SEARCHTEX_SIZE * vec2(offset, 1.0);
scale += vec2(-1.0, 1.0);
bias += vec2(0.5, -0.5);
scale *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
bias *= 1.0 / SMAA_SEARCHTEX_PACKED_SIZE;
return SMAA_SEARCHTEX_SELECT(textureLod(search_tex, fma(scale, e, bias), 0.0));
}
float SMAASearchXLeft(vec2 tex_coord, float end) {
vec2 e = vec2(0.0, 1.0);
while (tex_coord.x > end &&
e.g > 0.8281 &&
e.r == 0.0) {
e = textureLod(edges_tex, tex_coord, 0.0).rg;
tex_coord = fma(-vec2(2.0, 0.0), params.inv_size.xy, tex_coord);
}
float offset = fma(-(255.0 / 127.0), SMAASearchLength(e, 0.0), 3.25);
return fma(params.inv_size.x, offset, tex_coord.x);
}
float SMAASearchXRight(vec2 tex_coord, float end) {
vec2 e = vec2(0.0, 1.0);
while (tex_coord.x < end &&
e.g > 0.8281 &&
e.r == 0.0) {
e = textureLod(edges_tex, tex_coord, 0.0).rg;
tex_coord = fma(vec2(2.0, 0.0), params.inv_size.xy, tex_coord);
}
float offset = fma(-(255.0 / 127.0), SMAASearchLength(e, 0.5), 3.25);
return fma(-params.inv_size.x, offset, tex_coord.x);
}
float SMAASearchYUp(vec2 tex_coord, float end) {
vec2 e = vec2(1.0, 0.0);
while (tex_coord.y > end &&
e.r > 0.8281 &&
e.g == 0.0) {
e = textureLod(edges_tex, tex_coord, 0.0).rg;
tex_coord = fma(-vec2(0.0, 2.0), params.inv_size.xy, tex_coord);
}
float offset = fma(-(255.0 / 127.0), SMAASearchLength(e.gr, 0.0), 3.25);
return fma(params.inv_size.y, offset, tex_coord.y);
}
float SMAASearchYDown(vec2 tex_coord, float end) {
vec2 e = vec2(1.0, 0.0);
while (tex_coord.y < end &&
e.r > 0.8281 &&
e.g == 0.0) {
e = textureLod(edges_tex, tex_coord, 0.0).rg;
tex_coord = fma(vec2(0.0, 2.0), params.inv_size.xy, tex_coord);
}
float offset = fma(-(255.0 / 127.0), SMAASearchLength(e.gr, 0.5), 3.25);
return fma(-params.inv_size.y, offset, tex_coord.y);
}
vec2 SMAAArea(vec2 dist, float e1, float e2, float offset) {
vec2 tex_coord = fma(vec2(SMAA_AREATEX_MAX_DISTANCE, SMAA_AREATEX_MAX_DISTANCE), round(4.0 * vec2(e1, e2)), dist);
tex_coord = fma(SMAA_AREATEX_PIXEL_SIZE, tex_coord, 0.5 * SMAA_AREATEX_PIXEL_SIZE);
tex_coord.y = fma(SMAA_AREATEX_SUBTEX_SIZE, offset, tex_coord.y);
return SMAA_AREATEX_SELECT(textureLod(area_tex, tex_coord, 0.0));
}
void SMAADetectHorizontalCornerPattern(inout vec2 weights, vec4 coord, vec2 d) {
vec2 left_right = step(d.xy, d.yx);
vec2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right;
rounding /= left_right.x + left_right.y;
vec2 factor = vec2(1.0, 1.0);
factor.x -= rounding.x * textureLodOffset(edges_tex, coord.xy, 0.0, ivec2(0, 1)).r;
factor.x -= rounding.y * textureLodOffset(edges_tex, coord.zw, 0.0, ivec2(1, 1)).r;
factor.y -= rounding.x * textureLodOffset(edges_tex, coord.xy, 0.0, ivec2(0, -2)).r;
factor.y -= rounding.y * textureLodOffset(edges_tex, coord.zw, 0.0, ivec2(1, -2)).r;
weights *= clamp(factor, 0.0, 1.0);
}
void SMAADetectVerticalCornerPattern(inout vec2 weights, vec4 coord, vec2 d) {
vec2 left_right = step(d.xy, d.yx);
vec2 rounding = (1.0 - SMAA_CORNER_ROUNDING_NORM) * left_right;
rounding /= left_right.x + left_right.y;
vec2 factor = vec2(1.0, 1.0);
factor.x -= rounding.x * textureLodOffset(edges_tex, coord.xy, 0.0, ivec2(1, 0)).g;
factor.x -= rounding.y * textureLodOffset(edges_tex, coord.zw, 0.0, ivec2(1, 1)).g;
factor.y -= rounding.x * textureLodOffset(edges_tex, coord.xy, 0.0, ivec2(-2, 0)).g;
factor.y -= rounding.y * textureLodOffset(edges_tex, coord.zw, 0.0, ivec2(-2, 1)).g;
weights *= clamp(factor, 0.0, 1.0);
}
void main() {
weights = vec4(0.0, 0.0, 0.0, 0.0);
vec2 e = textureLod(edges_tex, tex_coord, 0.0).rg;
if (e.g > 0.0) { // Edge at north.
weights.rg = SMAACalculateDiagWeights(tex_coord, e, params.subsample_indices);
if (weights.r == -weights.g) {
vec2 d;
vec3 coords;
coords.x = SMAASearchXLeft(offset[0].xy, offset[2].x);
coords.y = offset[1].y;
d.x = coords.x;
float e1 = textureLod(edges_tex, coords.xy, 0.0).r;
coords.z = SMAASearchXRight(offset[0].zw, offset[2].y);
d.y = coords.z;
d = abs(round(fma(params.size.xx, d, -pix_coord.xx)));
vec2 sqrt_d = sqrt(d);
float e2 = textureLodOffset(edges_tex, coords.zy, 0.0, ivec2(1, 0)).r;
weights.rg = SMAAArea(sqrt_d, e1, e2, params.subsample_indices.y);
coords.y = tex_coord.y;
SMAADetectHorizontalCornerPattern(weights.rg, coords.xyzy, d);
} else {
e.r = 0.0;
}
}
if (e.r > 0.0) { // Edge at west.
vec2 d;
vec3 coords;
coords.y = SMAASearchYUp(offset[1].xy, offset[2].z);
coords.x = offset[0].x;
d.x = coords.y;
float e1 = textureLod(edges_tex, coords.xy, 0.0).g;
coords.z = SMAASearchYDown(offset[1].zw, offset[2].w);
d.y = coords.z;
d = abs(round(fma(params.size.yy, d, -pix_coord.yy)));
vec2 sqrt_d = sqrt(d);
float e2 = textureLodOffset(edges_tex, coords.xz, 0.0, ivec2(0, 1)).g;
weights.ba = SMAAArea(sqrt_d, e1, e2, params.subsample_indices.x);
coords.x = tex_coord.x;
SMAADetectVerticalCornerPattern(weights.ba, coords.xyxz, d);
}
}

View File

@@ -0,0 +1,205 @@
#[compute]
#version 450
#VERSION_DEFINES
// Original version here:
// https://github.com/GPUOpen-LibrariesAndSDKs/GPUParticles11/blob/master/gpuparticles11/src/Shaders
//
// Copyright (c) 2016 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#define SORT_SIZE 512
#define NUM_THREADS (SORT_SIZE / 2)
#define INVERSION (16 * 2 + 8 * 3)
#define ITERATIONS 1
layout(local_size_x = NUM_THREADS, local_size_y = 1, local_size_z = 1) in;
#ifndef MODE_SORT_STEP
shared vec2 g_LDS[SORT_SIZE];
#endif
layout(set = 1, binding = 0, std430) restrict buffer SortBuffer {
vec2 data[];
}
sort_buffer;
layout(push_constant, std430) uniform Params {
uint total_elements;
uint pad[3];
ivec4 job_params;
}
params;
void main() {
#ifdef MODE_SORT_BLOCK
uvec3 Gid = gl_WorkGroupID;
uvec3 DTid = gl_GlobalInvocationID;
uvec3 GTid = gl_LocalInvocationID;
uint GI = gl_LocalInvocationIndex;
int GlobalBaseIndex = int((Gid.x * SORT_SIZE) + GTid.x);
int LocalBaseIndex = int(GI);
int numElementsInThreadGroup = int(min(SORT_SIZE, params.total_elements - (Gid.x * SORT_SIZE)));
// Load shared data
int i;
for (i = 0; i < 2 * ITERATIONS; ++i) {
if (GI + i * NUM_THREADS < numElementsInThreadGroup) {
g_LDS[LocalBaseIndex + i * NUM_THREADS] = sort_buffer.data[GlobalBaseIndex + i * NUM_THREADS];
}
}
groupMemoryBarrier();
barrier();
// Bitonic sort
for (int nMergeSize = 2; nMergeSize <= SORT_SIZE; nMergeSize = nMergeSize * 2) {
for (int nMergeSubSize = nMergeSize >> 1; nMergeSubSize > 0; nMergeSubSize = nMergeSubSize >> 1) {
for (i = 0; i < ITERATIONS; ++i) {
int tmp_index = int(GI + NUM_THREADS * i);
int index_low = tmp_index & (nMergeSubSize - 1);
int index_high = 2 * (tmp_index - index_low);
int index = index_high + index_low;
int nSwapElem = nMergeSubSize == nMergeSize >> 1 ? index_high + (2 * nMergeSubSize - 1) - index_low : index_high + nMergeSubSize + index_low;
if (nSwapElem < numElementsInThreadGroup) {
vec2 a = g_LDS[index];
vec2 b = g_LDS[nSwapElem];
if (a.x > b.x) {
g_LDS[index] = b;
g_LDS[nSwapElem] = a;
}
}
groupMemoryBarrier();
barrier();
}
}
}
// Store shared data
for (i = 0; i < 2 * ITERATIONS; ++i) {
if (GI + i * NUM_THREADS < numElementsInThreadGroup) {
sort_buffer.data[GlobalBaseIndex + i * NUM_THREADS] = g_LDS[LocalBaseIndex + i * NUM_THREADS];
}
}
#endif
#ifdef MODE_SORT_STEP
uvec3 Gid = gl_WorkGroupID;
uvec3 GTid = gl_LocalInvocationID;
ivec4 tgp;
tgp.x = int(Gid.x) * 256;
tgp.y = 0;
tgp.z = int(params.total_elements);
tgp.w = min(512, max(0, tgp.z - int(Gid.x) * 512));
uint localID = int(tgp.x) + GTid.x; // calculate threadID within this sortable-array
uint index_low = localID & (params.job_params.x - 1);
uint index_high = 2 * (localID - index_low);
uint index = tgp.y + index_high + index_low;
uint nSwapElem = tgp.y + index_high + params.job_params.y + params.job_params.z * index_low;
if (nSwapElem < tgp.y + tgp.z) {
vec2 a = sort_buffer.data[index];
vec2 b = sort_buffer.data[nSwapElem];
if (a.x > b.x) {
sort_buffer.data[index] = b;
sort_buffer.data[nSwapElem] = a;
}
}
#endif
#ifdef MODE_SORT_INNER
uvec3 Gid = gl_WorkGroupID;
uvec3 DTid = gl_GlobalInvocationID;
uvec3 GTid = gl_LocalInvocationID;
uint GI = gl_LocalInvocationIndex;
ivec4 tgp;
tgp.x = int(Gid.x * 256);
tgp.y = 0;
tgp.z = int(params.total_elements.x);
tgp.w = int(min(512, max(0, params.total_elements - Gid.x * 512)));
int GlobalBaseIndex = int(tgp.y + tgp.x * 2 + GTid.x);
int LocalBaseIndex = int(GI);
int i;
// Load shared data
for (i = 0; i < 2; ++i) {
if (GI + i * NUM_THREADS < tgp.w) {
g_LDS[LocalBaseIndex + i * NUM_THREADS] = sort_buffer.data[GlobalBaseIndex + i * NUM_THREADS];
}
}
groupMemoryBarrier();
barrier();
// sort threadgroup shared memory
for (int nMergeSubSize = SORT_SIZE >> 1; nMergeSubSize > 0; nMergeSubSize = nMergeSubSize >> 1) {
int tmp_index = int(GI);
int index_low = tmp_index & (nMergeSubSize - 1);
int index_high = 2 * (tmp_index - index_low);
int index = index_high + index_low;
int nSwapElem = index_high + nMergeSubSize + index_low;
if (nSwapElem < tgp.w) {
vec2 a = g_LDS[index];
vec2 b = g_LDS[nSwapElem];
if (a.x > b.x) {
g_LDS[index] = b;
g_LDS[nSwapElem] = a;
}
}
groupMemoryBarrier();
barrier();
}
// Store shared data
for (i = 0; i < 2; ++i) {
if (GI + i * NUM_THREADS < tgp.w) {
sort_buffer.data[GlobalBaseIndex + i * NUM_THREADS] = g_LDS[LocalBaseIndex + i * NUM_THREADS];
}
}
#endif
}

View File

@@ -0,0 +1,80 @@
#[vertex]
#version 450
#VERSION_DEFINES
#if defined(USE_MULTIVIEW)
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#endif // USE_MULTIVIEW
#ifdef USE_MULTIVIEW
layout(location = 0) out vec3 uv_interp;
#else // USE_MULTIVIEW
layout(location = 0) out vec2 uv_interp;
#endif //USE_MULTIVIEW
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp.xy = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
#ifdef USE_MULTIVIEW
uv_interp.z = ViewIndex;
#endif
}
#[fragment]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
layout(location = 0) in vec3 uv_interp;
#else // USE_MULTIVIEW
layout(location = 0) in vec2 uv_interp;
#endif //USE_MULTIVIEW
#ifdef USE_MULTIVIEW
layout(set = 0, binding = 0) uniform sampler2DArray specular;
#else // USE_MULTIVIEW
layout(set = 0, binding = 0) uniform sampler2D specular;
#endif //USE_MULTIVIEW
#ifdef MODE_SSR
#ifdef USE_MULTIVIEW
layout(set = 1, binding = 0) uniform sampler2DArray ssr;
#else // USE_MULTIVIEW
layout(set = 1, binding = 0) uniform sampler2D ssr;
#endif //USE_MULTIVIEW
#endif
#ifdef MODE_MERGE
#ifdef USE_MULTIVIEW
layout(set = 2, binding = 0) uniform sampler2DArray diffuse;
#else // USE_MULTIVIEW
layout(set = 2, binding = 0) uniform sampler2D diffuse;
#endif //USE_MULTIVIEW
#endif
layout(location = 0) out vec4 frag_color;
void main() {
frag_color.rgb = texture(specular, uv_interp).rgb;
frag_color.a = 0.0;
#ifdef MODE_SSR
vec4 ssr_color = texture(ssr, uv_interp);
frag_color.rgb = mix(frag_color.rgb, ssr_color.rgb, ssr_color.a);
#endif
#ifdef MODE_MERGE
frag_color += texture(diffuse, uv_interp);
#endif
//added using additive blend
}

View File

@@ -0,0 +1,224 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(push_constant, std430) uniform Params {
vec2 pixel_size;
float z_far;
float z_near;
bool orthogonal;
float radius_sq;
uvec2 pad;
}
params;
layout(set = 0, binding = 0) uniform sampler2D source_depth;
layout(r16f, set = 1, binding = 0) uniform restrict writeonly image2DArray dest_image0; //rename
#ifdef GENERATE_MIPS
layout(r16f, set = 2, binding = 0) uniform restrict writeonly image2DArray dest_image1;
layout(r16f, set = 2, binding = 1) uniform restrict writeonly image2DArray dest_image2;
layout(r16f, set = 2, binding = 2) uniform restrict writeonly image2DArray dest_image3;
#ifdef GENERATE_FULL_MIPS
layout(r16f, set = 2, binding = 3) uniform restrict writeonly image2DArray dest_image4;
#endif
#endif
vec4 screen_space_to_view_space_depth(vec4 p_depth) {
if (params.orthogonal) {
vec4 depth = p_depth * 2.0 - 1.0;
return -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
}
float depth_linearize_mul = params.z_near;
float depth_linearize_add = params.z_far;
// Optimized version of "-cameraClipNear / (cameraClipFar - projDepth * (cameraClipFar - cameraClipNear)) * cameraClipFar"
// Set your depth_linearize_mul and depth_linearize_add to:
// depth_linearize_mul = ( cameraClipFar * cameraClipNear) / ( cameraClipFar - cameraClipNear );
// depth_linearize_add = cameraClipFar / ( cameraClipFar - cameraClipNear );
return depth_linearize_mul / (depth_linearize_add - p_depth);
}
float screen_space_to_view_space_depth(float p_depth) {
if (params.orthogonal) {
float depth = p_depth * 2.0 - 1.0;
return -(depth * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
}
float depth_linearize_mul = params.z_near;
float depth_linearize_add = params.z_far;
return depth_linearize_mul / (depth_linearize_add - p_depth);
}
#ifdef GENERATE_MIPS
shared float depth_buffer[4][8][8];
float mip_smart_average(vec4 p_depths) {
float closest = min(min(p_depths.x, p_depths.y), min(p_depths.z, p_depths.w));
float fallof_sq = -1.0f / params.radius_sq;
vec4 dists = p_depths - closest.xxxx;
vec4 weights = clamp(dists * dists * fallof_sq + 1.0, 0.0, 1.0);
return dot(weights, p_depths) / dot(weights, vec4(1.0, 1.0, 1.0, 1.0));
}
void prepare_depths_and_mips(vec4 p_samples, uvec2 p_output_coord, uvec2 p_gtid) {
p_samples = screen_space_to_view_space_depth(p_samples);
depth_buffer[0][p_gtid.x][p_gtid.y] = p_samples.w;
depth_buffer[1][p_gtid.x][p_gtid.y] = p_samples.z;
depth_buffer[2][p_gtid.x][p_gtid.y] = p_samples.x;
depth_buffer[3][p_gtid.x][p_gtid.y] = p_samples.y;
imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 0), vec4(p_samples.w));
imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 1), vec4(p_samples.z));
imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 2), vec4(p_samples.x));
imageStore(dest_image0, ivec3(p_output_coord.x, p_output_coord.y, 3), vec4(p_samples.y));
uint depth_array_index = 2 * (p_gtid.y % 2) + (p_gtid.x % 2);
uvec2 depth_array_offset = ivec2(p_gtid.x % 2, p_gtid.y % 2);
ivec2 buffer_coord = ivec2(p_gtid) - ivec2(depth_array_offset);
p_output_coord /= 2;
groupMemoryBarrier();
barrier();
// if (still_alive) <-- all threads alive here
{
float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 1];
float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 0];
float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 1][buffer_coord.y + 1];
float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
imageStore(dest_image1, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
}
bool still_alive = p_gtid.x % 4 == depth_array_offset.x && p_gtid.y % 4 == depth_array_offset.y;
p_output_coord /= 2;
groupMemoryBarrier();
barrier();
if (still_alive) {
float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 2];
float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 0];
float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 2][buffer_coord.y + 2];
float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
imageStore(dest_image2, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
}
still_alive = p_gtid.x % 8 == depth_array_offset.x && depth_array_offset.y % 8 == depth_array_offset.y;
p_output_coord /= 2;
groupMemoryBarrier();
barrier();
if (still_alive) {
float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
float sample_01 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 4];
float sample_10 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 0];
float sample_11 = depth_buffer[depth_array_index][buffer_coord.x + 4][buffer_coord.y + 4];
float avg = mip_smart_average(vec4(sample_00, sample_01, sample_10, sample_11));
imageStore(dest_image3, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(avg));
#ifndef GENERATE_FULL_MIPS
}
#else
depth_buffer[depth_array_index][buffer_coord.x][buffer_coord.y] = avg;
}
still_alive = p_gtid.x % 16 == depth_array_offset.x && depth_array_offset.y % 16 == depth_array_offset.y;
p_output_coord /= 2;
if (still_alive) {
// Use the previous average, not ideal, but still not bad.
float sample_00 = depth_buffer[depth_array_index][buffer_coord.x + 0][buffer_coord.y + 0];
imageStore(dest_image4, ivec3(p_output_coord.x, p_output_coord.y, depth_array_index), vec4(sample_00));
}
#endif
}
#else
#ifndef USE_HALF_BUFFERS
void prepare_depths(vec4 p_samples, uvec2 p_tid) {
p_samples = screen_space_to_view_space_depth(p_samples);
imageStore(dest_image0, ivec3(p_tid, 0), vec4(p_samples.w));
imageStore(dest_image0, ivec3(p_tid, 1), vec4(p_samples.z));
imageStore(dest_image0, ivec3(p_tid, 2), vec4(p_samples.x));
imageStore(dest_image0, ivec3(p_tid, 3), vec4(p_samples.y));
}
#endif
#endif
void main() {
#ifdef USE_HALF_BUFFERS
// Half buffers means that we divide depth into two half res buffers (we only capture 1/4 of pixels).
#ifdef USE_HALF_SIZE
float sample_00 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 0, 4 * gl_GlobalInvocationID.y + 0), 0).x;
float sample_11 = texelFetch(source_depth, ivec2(4 * gl_GlobalInvocationID.x + 2, 4 * gl_GlobalInvocationID.y + 2), 0).x;
#else
float sample_00 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 0, 2 * gl_GlobalInvocationID.y + 0), 0).x;
float sample_11 = texelFetch(source_depth, ivec2(2 * gl_GlobalInvocationID.x + 1, 2 * gl_GlobalInvocationID.y + 1), 0).x;
#endif
sample_00 = screen_space_to_view_space_depth(sample_00);
sample_11 = screen_space_to_view_space_depth(sample_11);
imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 0), vec4(sample_00));
imageStore(dest_image0, ivec3(gl_GlobalInvocationID.xy, 3), vec4(sample_11));
#else //!USE_HALF_BUFFERS
#ifdef USE_HALF_SIZE
ivec2 depth_buffer_coord = 4 * ivec2(gl_GlobalInvocationID.xy);
ivec2 output_coord = ivec2(gl_GlobalInvocationID);
vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
vec4 samples;
samples.x = textureLodOffset(source_depth, uv, 0, ivec2(0, 2)).x;
samples.y = textureLodOffset(source_depth, uv, 0, ivec2(2, 2)).x;
samples.z = textureLodOffset(source_depth, uv, 0, ivec2(2, 0)).x;
samples.w = textureLodOffset(source_depth, uv, 0, ivec2(0, 0)).x;
#else
ivec2 depth_buffer_coord = 2 * ivec2(gl_GlobalInvocationID.xy);
ivec2 output_coord = ivec2(gl_GlobalInvocationID);
vec2 uv = (vec2(depth_buffer_coord) + 0.5f) * params.pixel_size;
vec4 samples = textureGather(source_depth, uv);
#endif //USE_HALF_SIZE
#ifdef GENERATE_MIPS
prepare_depths_and_mips(samples, output_coord, gl_LocalInvocationID.xy);
#else
prepare_depths(samples, gl_GlobalInvocationID.xy);
#endif
#endif //USE_HALF_BUFFERS
}

View File

@@ -0,0 +1,481 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
#define INTELSSAO_MAIN_DISK_SAMPLE_COUNT (32)
const vec4 sample_pattern[INTELSSAO_MAIN_DISK_SAMPLE_COUNT] = {
vec4(0.78488064, 0.56661671, 1.500000, -0.126083), vec4(0.26022232, -0.29575172, 1.500000, -1.064030), vec4(0.10459357, 0.08372527, 1.110000, -2.730563), vec4(-0.68286800, 0.04963045, 1.090000, -0.498827),
vec4(-0.13570161, -0.64190155, 1.250000, -0.532765), vec4(-0.26193795, -0.08205118, 0.670000, -1.783245), vec4(-0.61177456, 0.66664219, 0.710000, -0.044234), vec4(0.43675563, 0.25119025, 0.610000, -1.167283),
vec4(0.07884444, 0.86618668, 0.640000, -0.459002), vec4(-0.12790935, -0.29869005, 0.600000, -1.729424), vec4(-0.04031125, 0.02413622, 0.600000, -4.792042), vec4(0.16201244, -0.52851415, 0.790000, -1.067055),
vec4(-0.70991218, 0.47301072, 0.640000, -0.335236), vec4(0.03277707, -0.22349690, 0.600000, -1.982384), vec4(0.68921727, 0.36800742, 0.630000, -0.266718), vec4(0.29251814, 0.37775412, 0.610000, -1.422520),
vec4(-0.12224089, 0.96582592, 0.600000, -0.426142), vec4(0.11071457, -0.16131058, 0.600000, -2.165947), vec4(0.46562141, -0.59747696, 0.600000, -0.189760), vec4(-0.51548797, 0.11804193, 0.600000, -1.246800),
vec4(0.89141309, -0.42090443, 0.600000, 0.028192), vec4(-0.32402530, -0.01591529, 0.600000, -1.543018), vec4(0.60771245, 0.41635221, 0.600000, -0.605411), vec4(0.02379565, -0.08239821, 0.600000, -3.809046),
vec4(0.48951152, -0.23657045, 0.600000, -1.189011), vec4(-0.17611565, -0.81696892, 0.600000, -0.513724), vec4(-0.33930185, -0.20732205, 0.600000, -1.698047), vec4(-0.91974425, 0.05403209, 0.600000, 0.062246),
vec4(-0.15064627, -0.14949332, 0.600000, -1.896062), vec4(0.53180975, -0.35210401, 0.600000, -0.758838), vec4(0.41487166, 0.81442589, 0.600000, -0.505648), vec4(-0.24106961, -0.32721516, 0.600000, -1.665244)
};
// these values can be changed (up to SSAO_MAX_TAPS) with no changes required elsewhere; values for 4th and 5th preset are ignored but array needed to avoid compilation errors
// the actual number of texture samples is two times this value (each "tap" has two symmetrical depth texture samples)
const int num_taps[5] = { 3, 5, 12, 0, 0 };
#define SSAO_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET (99) // to disable simply set to 99 or similar
#define SSAO_TILT_SAMPLES_AMOUNT (0.4)
//
#define SSAO_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET (1) // to disable simply set to 99 or similar
#define SSAO_HALOING_REDUCTION_AMOUNT (0.6) // values from 0.0 - 1.0, 1.0 means max weighting (will cause artifacts, 0.8 is more reasonable)
//
#define SSAO_NORMAL_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (2) // to disable simply set to 99 or similar
#define SSAO_NORMAL_BASED_EDGES_DOT_THRESHOLD (0.5) // use 0-0.1 for super-sharp normal-based edges
//
#define SSAO_DETAIL_AO_ENABLE_AT_QUALITY_PRESET (1) // whether to use detail; to disable simply set to 99 or similar
//
// WARNING: The MIP generation on the C++ side will be enabled on quality preset 2 regardless of
// this value, so if changing here, change the C++ side too.
#define SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET (2)
#define SSAO_DEPTH_MIPS_GLOBAL_OFFSET (-4.3) // best noise/quality/performance tradeoff, found empirically
//
// WARNING: The edge handling is hard-coded to 'disabled' on quality level 0, and enabled above,
// on the C++ side; while toggling it here will work for testing purposes, it will not yield
// performance gains (or correct results).
#define SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (1)
//
#define SSAO_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET (1)
#define SSAO_MAX_TAPS 32
#define SSAO_ADAPTIVE_TAP_BASE_COUNT 5
#define SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT (SSAO_MAX_TAPS - SSAO_ADAPTIVE_TAP_BASE_COUNT)
#define SSAO_DEPTH_MIP_LEVELS 4
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2DArray source_depth_mipmaps;
layout(rgba8, set = 0, binding = 1) uniform restrict readonly image2D source_normal;
layout(set = 0, binding = 2) uniform Constants { //get into a lower set
vec4 rotation_matrices[20];
}
constants;
#ifdef ADAPTIVE
layout(rg8, set = 1, binding = 0) uniform restrict readonly image2DArray source_ssao;
layout(set = 1, binding = 1) uniform sampler2D source_importance;
layout(set = 1, binding = 2, std430) buffer Counter {
uint sum;
}
counter;
#endif
layout(rg8, set = 2, binding = 0) uniform restrict writeonly image2D dest_image;
// This push_constant is full - 128 bytes - if you need to add more data, consider adding to the uniform buffer instead
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
int pass;
int quality;
vec2 half_screen_pixel_size;
int size_multiplier;
float detail_intensity;
vec2 NDC_to_view_mul;
vec2 NDC_to_view_add;
vec2 pad2;
vec2 half_screen_pixel_size_x025;
float radius;
float intensity;
float shadow_power;
float shadow_clamp;
float fade_out_mul;
float fade_out_add;
float horizon_angle_threshold;
float inv_radius_near_limit;
bool is_orthogonal;
float neg_inv_radius;
float load_counter_avg_div;
float adaptive_sample_limit;
ivec2 pass_coord_offset;
vec2 pass_uv_offset;
}
params;
// packing/unpacking for edges; 2 bits per edge mean 4 gradient values (0, 0.33, 0.66, 1) for smoother transitions!
float pack_edges(vec4 p_edgesLRTB) {
p_edgesLRTB = round(clamp(p_edgesLRTB, 0.0, 1.0) * 3.05);
return dot(p_edgesLRTB, vec4(64.0 / 255.0, 16.0 / 255.0, 4.0 / 255.0, 1.0 / 255.0));
}
vec3 NDC_to_view_space(vec2 p_pos, float p_viewspace_depth) {
if (params.is_orthogonal) {
return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add), p_viewspace_depth);
} else {
return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add) * p_viewspace_depth, p_viewspace_depth);
}
}
// calculate effect radius and fit our screen sampling pattern inside it
void calculate_radius_parameters(const float p_pix_center_length, const vec2 p_pixel_size_at_center, out float r_lookup_radius, out float r_radius, out float r_fallof_sq) {
r_radius = params.radius;
// when too close, on-screen sampling disk will grow beyond screen size; limit this to avoid closeup temporal artifacts
const float too_close_limit = clamp(p_pix_center_length * params.inv_radius_near_limit, 0.0, 1.0) * 0.8 + 0.2;
r_radius *= too_close_limit;
// 0.85 is to reduce the radius to allow for more samples on a slope to still stay within influence
r_lookup_radius = (0.85 * r_radius) / p_pixel_size_at_center.x;
// used to calculate falloff (both for AO samples and per-sample weights)
r_fallof_sq = -1.0 / (r_radius * r_radius);
}
vec4 calculate_edges(const float p_center_z, const float p_left_z, const float p_right_z, const float p_top_z, const float p_bottom_z) {
// slope-sensitive depth-based edge detection
vec4 edgesLRTB = vec4(p_left_z, p_right_z, p_top_z, p_bottom_z) - p_center_z;
vec4 edgesLRTB_slope_adjusted = edgesLRTB + edgesLRTB.yxwz;
edgesLRTB = min(abs(edgesLRTB), abs(edgesLRTB_slope_adjusted));
return clamp((1.3 - edgesLRTB / (p_center_z * 0.040)), 0.0, 1.0);
}
vec3 load_normal(ivec2 p_pos) {
vec3 encoded_normal = normalize(imageLoad(source_normal, p_pos).xyz * 2.0 - 1.0);
encoded_normal.z = -encoded_normal.z;
return encoded_normal;
}
vec3 load_normal(ivec2 p_pos, ivec2 p_offset) {
vec3 encoded_normal = normalize(imageLoad(source_normal, p_pos + p_offset).xyz * 2.0 - 1.0);
encoded_normal.z = -encoded_normal.z;
return encoded_normal;
}
// all vectors in viewspace
float calculate_pixel_obscurance(vec3 p_pixel_normal, vec3 p_hit_delta, float p_fallof_sq) {
float length_sq = dot(p_hit_delta, p_hit_delta);
float NdotD = dot(p_pixel_normal, p_hit_delta) / sqrt(length_sq);
float falloff_mult = max(0.0, length_sq * p_fallof_sq + 1.0);
return max(0, NdotD - params.horizon_angle_threshold) * falloff_mult;
}
void SSAO_tap_inner(const int p_quality_level, inout float r_obscurance_sum, inout float r_weight_sum, const vec2 p_sampling_uv, const float p_mip_level, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const float p_fallof_sq, const float p_weight_mod) {
// get depth at sample
float viewspace_sample_z = textureLod(source_depth_mipmaps, vec3(p_sampling_uv, params.pass), p_mip_level).x;
// convert to viewspace
vec3 hit_pos = NDC_to_view_space(p_sampling_uv.xy, viewspace_sample_z).xyz;
vec3 hit_delta = hit_pos - p_pix_center_pos;
float obscurance = calculate_pixel_obscurance(p_pixel_normal, hit_delta, p_fallof_sq);
float weight = 1.0;
if (p_quality_level >= SSAO_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET) {
float reduce = max(0, -hit_delta.z);
reduce = clamp(reduce * params.neg_inv_radius + 2.0, 0.0, 1.0);
weight = SSAO_HALOING_REDUCTION_AMOUNT * reduce + (1.0 - SSAO_HALOING_REDUCTION_AMOUNT);
}
weight *= p_weight_mod;
r_obscurance_sum += obscurance * weight;
r_weight_sum += weight;
}
void SSAOTap(const int p_quality_level, inout float r_obscurance_sum, inout float r_weight_sum, const int p_tap_index, const mat2 p_rot_scale, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const vec2 p_normalized_screen_pos, const float p_mip_offset, const float p_fallof_sq, float p_weight_mod, vec2 p_norm_xy, float p_norm_xy_length) {
vec2 sample_offset;
float sample_pow_2_len;
// patterns
{
vec4 new_sample = sample_pattern[p_tap_index];
sample_offset = new_sample.xy * p_rot_scale;
sample_pow_2_len = new_sample.w; // precalculated, same as: sample_pow_2_len = log2( length( new_sample.xy ) );
p_weight_mod *= new_sample.z;
}
// snap to pixel center (more correct obscurance math, avoids artifacts)
sample_offset = round(sample_offset);
// calculate MIP based on the sample distance from the center, similar to as described
// in http://graphics.cs.williams.edu/papers/SAOHPG12/.
float mip_level = (p_quality_level < SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (sample_pow_2_len + p_mip_offset);
vec2 sampling_uv = sample_offset * params.half_screen_pixel_size + p_normalized_screen_pos;
SSAO_tap_inner(p_quality_level, r_obscurance_sum, r_weight_sum, sampling_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
// for the second tap, just use the mirrored offset
vec2 sample_offset_mirrored_uv = -sample_offset;
// tilt the second set of samples so that the disk is effectively rotated by the normal
// effective at removing one set of artifacts, but too expensive for lower quality settings
if (p_quality_level >= SSAO_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET) {
float dot_norm = dot(sample_offset_mirrored_uv, p_norm_xy);
sample_offset_mirrored_uv -= dot_norm * p_norm_xy_length * p_norm_xy;
sample_offset_mirrored_uv = round(sample_offset_mirrored_uv);
}
// snap to pixel center (more correct obscurance math, avoids artifacts)
vec2 sampling_mirrored_uv = sample_offset_mirrored_uv * params.half_screen_pixel_size + p_normalized_screen_pos;
SSAO_tap_inner(p_quality_level, r_obscurance_sum, r_weight_sum, sampling_mirrored_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
}
void generate_SSAO_shadows_internal(out float r_shadow_term, out vec4 r_edges, out float r_weight, const vec2 p_pos, int p_quality_level, bool p_adaptive_base) {
vec2 pos_rounded = trunc(p_pos);
uvec2 upos = uvec2(pos_rounded);
const int number_of_taps = (p_adaptive_base) ? (SSAO_ADAPTIVE_TAP_BASE_COUNT) : (num_taps[p_quality_level]);
float pix_z, pix_left_z, pix_top_z, pix_right_z, pix_bottom_z;
vec4 valuesUL = textureGather(source_depth_mipmaps, vec3(pos_rounded * params.half_screen_pixel_size, params.pass));
vec4 valuesBR = textureGather(source_depth_mipmaps, vec3((pos_rounded + vec2(1.0)) * params.half_screen_pixel_size, params.pass));
// get this pixel's viewspace depth
pix_z = valuesUL.y;
// get left right top bottom neighboring pixels for edge detection (gets compiled out on quality_level == 0)
pix_left_z = valuesUL.x;
pix_top_z = valuesUL.z;
pix_right_z = valuesBR.z;
pix_bottom_z = valuesBR.x;
vec2 normalized_screen_pos = pos_rounded * params.half_screen_pixel_size + params.half_screen_pixel_size_x025;
vec3 pix_center_pos = NDC_to_view_space(normalized_screen_pos, pix_z);
// Load this pixel's viewspace normal
uvec2 full_res_coord = upos * 2 * params.size_multiplier + params.pass_coord_offset.xy;
vec3 pixel_normal = load_normal(ivec2(full_res_coord));
const vec2 pixel_size_at_center = NDC_to_view_space(normalized_screen_pos.xy + params.half_screen_pixel_size, pix_center_pos.z).xy - pix_center_pos.xy;
float pixel_lookup_radius;
float fallof_sq;
// calculate effect radius and fit our screen sampling pattern inside it
float viewspace_radius;
calculate_radius_parameters(length(pix_center_pos), pixel_size_at_center, pixel_lookup_radius, viewspace_radius, fallof_sq);
// calculate samples rotation/scaling
mat2 rot_scale_matrix;
uint pseudo_random_index;
{
vec4 rotation_scale;
// reduce effect radius near the screen edges slightly; ideally, one would render a larger depth buffer (5% on each side) instead
if (!p_adaptive_base && (p_quality_level >= SSAO_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET)) {
float near_screen_border = min(min(normalized_screen_pos.x, 1.0 - normalized_screen_pos.x), min(normalized_screen_pos.y, 1.0 - normalized_screen_pos.y));
near_screen_border = clamp(10.0 * near_screen_border + 0.6, 0.0, 1.0);
pixel_lookup_radius *= near_screen_border;
}
// load & update pseudo-random rotation matrix
pseudo_random_index = uint(pos_rounded.y * 2 + pos_rounded.x) % 5;
rotation_scale = constants.rotation_matrices[params.pass * 5 + pseudo_random_index];
rot_scale_matrix = mat2(rotation_scale.x * pixel_lookup_radius, rotation_scale.y * pixel_lookup_radius, rotation_scale.z * pixel_lookup_radius, rotation_scale.w * pixel_lookup_radius);
}
// the main obscurance & sample weight storage
float obscurance_sum = 0.0;
float weight_sum = 0.0;
// edge mask for between this and left/right/top/bottom neighbor pixels - not used in quality level 0 so initialize to "no edge" (1 is no edge, 0 is edge)
vec4 edgesLRTB = vec4(1.0, 1.0, 1.0, 1.0);
// Move center pixel slightly towards camera to avoid imprecision artifacts due to using of 16bit depth buffer.
pix_center_pos *= 0.99;
if (!p_adaptive_base && (p_quality_level >= SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
edgesLRTB = calculate_edges(pix_z, pix_left_z, pix_right_z, pix_top_z, pix_bottom_z);
}
// adds a more high definition sharp effect, which gets blurred out (reuses left/right/top/bottom samples that we used for edge detection)
if (!p_adaptive_base && (p_quality_level >= SSAO_DETAIL_AO_ENABLE_AT_QUALITY_PRESET)) {
// disable in case of quality level 4 (reference)
if (p_quality_level != 4) {
//approximate neighboring pixels positions (actually just deltas or "positions - pix_center_pos" )
vec3 normalized_viewspace_dir = vec3(pix_center_pos.xy / pix_center_pos.zz, 1.0);
vec3 pixel_left_delta = vec3(-pixel_size_at_center.x, 0.0, 0.0) + normalized_viewspace_dir * (pix_left_z - pix_center_pos.z);
vec3 pixel_right_delta = vec3(+pixel_size_at_center.x, 0.0, 0.0) + normalized_viewspace_dir * (pix_right_z - pix_center_pos.z);
vec3 pixel_top_delta = vec3(0.0, -pixel_size_at_center.y, 0.0) + normalized_viewspace_dir * (pix_top_z - pix_center_pos.z);
vec3 pixel_bottom_delta = vec3(0.0, +pixel_size_at_center.y, 0.0) + normalized_viewspace_dir * (pix_bottom_z - pix_center_pos.z);
const float range_reduction = 4.0f; // this is to avoid various artifacts
const float modified_fallof_sq = range_reduction * fallof_sq;
vec4 additional_obscurance;
additional_obscurance.x = calculate_pixel_obscurance(pixel_normal, pixel_left_delta, modified_fallof_sq);
additional_obscurance.y = calculate_pixel_obscurance(pixel_normal, pixel_right_delta, modified_fallof_sq);
additional_obscurance.z = calculate_pixel_obscurance(pixel_normal, pixel_top_delta, modified_fallof_sq);
additional_obscurance.w = calculate_pixel_obscurance(pixel_normal, pixel_bottom_delta, modified_fallof_sq);
obscurance_sum += params.detail_intensity * dot(additional_obscurance, edgesLRTB);
}
}
// Sharp normals also create edges - but this adds to the cost as well
if (!p_adaptive_base && (p_quality_level >= SSAO_NORMAL_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
vec3 neighbour_normal_left = load_normal(ivec2(full_res_coord), ivec2(-2, 0));
vec3 neighbour_normal_right = load_normal(ivec2(full_res_coord), ivec2(2, 0));
vec3 neighbour_normal_top = load_normal(ivec2(full_res_coord), ivec2(0, -2));
vec3 neighbour_normal_bottom = load_normal(ivec2(full_res_coord), ivec2(0, 2));
const float dot_threshold = SSAO_NORMAL_BASED_EDGES_DOT_THRESHOLD;
vec4 normal_edgesLRTB;
normal_edgesLRTB.x = clamp((dot(pixel_normal, neighbour_normal_left) + dot_threshold), 0.0, 1.0);
normal_edgesLRTB.y = clamp((dot(pixel_normal, neighbour_normal_right) + dot_threshold), 0.0, 1.0);
normal_edgesLRTB.z = clamp((dot(pixel_normal, neighbour_normal_top) + dot_threshold), 0.0, 1.0);
normal_edgesLRTB.w = clamp((dot(pixel_normal, neighbour_normal_bottom) + dot_threshold), 0.0, 1.0);
edgesLRTB *= normal_edgesLRTB;
}
const float global_mip_offset = SSAO_DEPTH_MIPS_GLOBAL_OFFSET;
float mip_offset = (p_quality_level < SSAO_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (log2(pixel_lookup_radius) + global_mip_offset);
// Used to tilt the second set of samples so that the disk is effectively rotated by the normal
// effective at removing one set of artifacts, but too expensive for lower quality settings
vec2 norm_xy = vec2(pixel_normal.x, pixel_normal.y);
float norm_xy_length = length(norm_xy);
norm_xy /= vec2(norm_xy_length, -norm_xy_length);
norm_xy_length *= SSAO_TILT_SAMPLES_AMOUNT;
// standard, non-adaptive approach
if ((p_quality_level != 3) || p_adaptive_base) {
for (int i = 0; i < number_of_taps; i++) {
SSAOTap(p_quality_level, obscurance_sum, weight_sum, i, rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, 1.0, norm_xy, norm_xy_length);
}
}
#ifdef ADAPTIVE
else {
// add new ones if needed
vec2 full_res_uv = normalized_screen_pos + params.pass_uv_offset.xy;
float importance = textureLod(source_importance, full_res_uv, 0.0).x;
// this is to normalize SSAO_DETAIL_AO_AMOUNT across all pixel regardless of importance
obscurance_sum *= (SSAO_ADAPTIVE_TAP_BASE_COUNT / float(SSAO_MAX_TAPS)) + (importance * SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT / float(SSAO_MAX_TAPS));
// load existing base values
vec2 base_values = imageLoad(source_ssao, ivec3(upos, params.pass)).xy;
weight_sum += base_values.y * float(SSAO_ADAPTIVE_TAP_BASE_COUNT * 4.0);
obscurance_sum += (base_values.x) * weight_sum;
// increase importance around edges
float edge_count = dot(1.0 - edgesLRTB, vec4(1.0, 1.0, 1.0, 1.0));
float avg_total_importance = float(counter.sum) * params.load_counter_avg_div;
float importance_limiter = clamp(params.adaptive_sample_limit / avg_total_importance, 0.0, 1.0);
importance *= importance_limiter;
float additional_sample_count = SSAO_ADAPTIVE_TAP_FLEXIBLE_COUNT * importance;
const float blend_range = 3.0;
const float blend_range_inv = 1.0 / blend_range;
additional_sample_count += 0.5;
uint additional_samples = uint(additional_sample_count);
uint additional_samples_to = min(SSAO_MAX_TAPS, additional_samples + SSAO_ADAPTIVE_TAP_BASE_COUNT);
for (uint i = SSAO_ADAPTIVE_TAP_BASE_COUNT; i < additional_samples_to; i++) {
additional_sample_count -= 1.0f;
float weight_mod = clamp(additional_sample_count * blend_range_inv, 0.0, 1.0);
SSAOTap(p_quality_level, obscurance_sum, weight_sum, int(i), rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, weight_mod, norm_xy, norm_xy_length);
}
}
#endif
// early out for adaptive base - just output weight (used for the next pass)
if (p_adaptive_base) {
float obscurance = obscurance_sum / weight_sum;
r_shadow_term = obscurance;
r_edges = vec4(0.0);
r_weight = weight_sum;
return;
}
// calculate weighted average
float obscurance = obscurance_sum / weight_sum;
// calculate fadeout (1 close, gradient, 0 far)
float fade_out = clamp(pix_center_pos.z * params.fade_out_mul + params.fade_out_add, 0.0, 1.0);
// Reduce the SSAO shadowing if we're on the edge to remove artifacts on edges (we don't care for the lower quality one)
if (!p_adaptive_base && (p_quality_level >= SSAO_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
// when there's more than 2 opposite edges, start fading out the occlusion to reduce aliasing artifacts
float edge_fadeout_factor = clamp((1.0 - edgesLRTB.x - edgesLRTB.y) * 0.35, 0.0, 1.0) + clamp((1.0 - edgesLRTB.z - edgesLRTB.w) * 0.35, 0.0, 1.0);
fade_out *= clamp(1.0 - edge_fadeout_factor, 0.0, 1.0);
}
// strength
obscurance = params.intensity * obscurance;
// clamp
obscurance = min(obscurance, params.shadow_clamp);
// fadeout
obscurance *= fade_out;
// conceptually switch to occlusion with the meaning being visibility (grows with visibility, occlusion == 1 implies full visibility),
// to be in line with what is more commonly used.
float occlusion = 1.0 - obscurance;
// modify the gradient
// note: this cannot be moved to a later pass because of loss of precision after storing in the render target
occlusion = pow(clamp(occlusion, 0.0, 1.0), params.shadow_power);
// outputs!
r_shadow_term = occlusion; // Our final 'occlusion' term (0 means fully occluded, 1 means fully lit)
r_edges = edgesLRTB; // These are used to prevent blurring across edges, 1 means no edge, 0 means edge, 0.5 means half way there, etc.
r_weight = weight_sum;
}
void main() {
float out_shadow_term;
float out_weight;
vec4 out_edges;
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
return;
}
vec2 uv = vec2(gl_GlobalInvocationID) + vec2(0.5);
#ifdef SSAO_BASE
generate_SSAO_shadows_internal(out_shadow_term, out_edges, out_weight, uv, params.quality, true);
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(out_shadow_term, out_weight / (float(SSAO_ADAPTIVE_TAP_BASE_COUNT) * 4.0), 0.0, 0.0));
#else
generate_SSAO_shadows_internal(out_shadow_term, out_edges, out_weight, uv, params.quality, false); // pass in quality levels
if (params.quality == 0) {
out_edges = vec4(1.0);
}
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(out_shadow_term, pack_edges(out_edges), 0.0, 0.0));
#endif
}

View File

@@ -0,0 +1,154 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2D source_ssao;
layout(rg8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
layout(push_constant, std430) uniform Params {
float edge_sharpness;
float pad;
vec2 half_screen_pixel_size;
}
params;
vec4 unpack_edges(float p_packed_val) {
uint packed_val = uint(p_packed_val * 255.5);
vec4 edgesLRTB;
edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
return clamp(edgesLRTB + params.edge_sharpness, 0.0, 1.0);
}
void add_sample(float p_ssao_value, float p_edge_value, inout float r_sum, inout float r_sum_weight) {
float weight = p_edge_value;
r_sum += (weight * p_ssao_value);
r_sum_weight += weight;
}
#ifdef MODE_WIDE
vec2 sample_blurred_wide(vec2 p_coord) {
vec2 vC = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, 0)).xy;
vec2 vL = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(-2, 0)).xy;
vec2 vT = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, -2)).xy;
vec2 vR = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(2, 0)).xy;
vec2 vB = textureLodOffset(source_ssao, vec2(p_coord), 0.0, ivec2(0, 2)).xy;
float packed_edges = vC.y;
vec4 edgesLRTB = unpack_edges(packed_edges);
edgesLRTB.x *= unpack_edges(vL.y).y;
edgesLRTB.z *= unpack_edges(vT.y).w;
edgesLRTB.y *= unpack_edges(vR.y).x;
edgesLRTB.w *= unpack_edges(vB.y).z;
float ssao_value = vC.x;
float ssao_valueL = vL.x;
float ssao_valueT = vT.x;
float ssao_valueR = vR.x;
float ssao_valueB = vB.x;
float sum_weight = 0.8f;
float sum = ssao_value * sum_weight;
add_sample(ssao_valueL, edgesLRTB.x, sum, sum_weight);
add_sample(ssao_valueR, edgesLRTB.y, sum, sum_weight);
add_sample(ssao_valueT, edgesLRTB.z, sum, sum_weight);
add_sample(ssao_valueB, edgesLRTB.w, sum, sum_weight);
float ssao_avg = sum / sum_weight;
ssao_value = ssao_avg;
return vec2(ssao_value, packed_edges);
}
#endif
#ifdef MODE_SMART
vec2 sample_blurred(vec3 p_pos, vec2 p_coord) {
float packed_edges = texelFetch(source_ssao, ivec2(p_pos.xy), 0).y;
vec4 edgesLRTB = unpack_edges(packed_edges);
vec4 valuesUL = textureGather(source_ssao, vec2(p_coord - params.half_screen_pixel_size * 0.5));
vec4 valuesBR = textureGather(source_ssao, vec2(p_coord + params.half_screen_pixel_size * 0.5));
float ssao_value = valuesUL.y;
float ssao_valueL = valuesUL.x;
float ssao_valueT = valuesUL.z;
float ssao_valueR = valuesBR.z;
float ssao_valueB = valuesBR.x;
float sum_weight = 0.5;
float sum = ssao_value * sum_weight;
add_sample(ssao_valueL, edgesLRTB.x, sum, sum_weight);
add_sample(ssao_valueR, edgesLRTB.y, sum, sum_weight);
add_sample(ssao_valueT, edgesLRTB.z, sum, sum_weight);
add_sample(ssao_valueB, edgesLRTB.w, sum, sum_weight);
float ssao_avg = sum / sum_weight;
ssao_value = ssao_avg;
return vec2(ssao_value, packed_edges);
}
#endif
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
#ifdef MODE_NON_SMART
vec2 half_pixel = params.half_screen_pixel_size * 0.5;
vec2 uv = (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size;
vec2 center = textureLod(source_ssao, vec2(uv), 0.0).xy;
vec4 vals;
vals.x = textureLod(source_ssao, vec2(uv + vec2(-half_pixel.x * 3, -half_pixel.y)), 0.0).x;
vals.y = textureLod(source_ssao, vec2(uv + vec2(+half_pixel.x, -half_pixel.y * 3)), 0.0).x;
vals.z = textureLod(source_ssao, vec2(uv + vec2(-half_pixel.x, +half_pixel.y * 3)), 0.0).x;
vals.w = textureLod(source_ssao, vec2(uv + vec2(+half_pixel.x * 3, +half_pixel.y)), 0.0).x;
vec2 sampled = vec2(dot(vals, vec4(0.2)) + center.x * 0.2, center.y);
#else
#ifdef MODE_SMART
vec2 sampled = sample_blurred(vec3(gl_GlobalInvocationID), (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
#else // MODE_WIDE
vec2 sampled = sample_blurred_wide((vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
#endif
#endif
imageStore(dest_image, ivec2(ssC), vec4(sampled, 0.0, 0.0));
}

View File

@@ -0,0 +1,123 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#ifdef GENERATE_MAP
layout(set = 0, binding = 0) uniform sampler2DArray source_texture;
#else
layout(set = 0, binding = 0) uniform sampler2D source_importance;
#endif
layout(r8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
#ifdef PROCESS_MAPB
layout(set = 2, binding = 0, std430) buffer Counter {
uint sum;
}
counter;
#endif
layout(push_constant, std430) uniform Params {
vec2 half_screen_pixel_size;
float intensity;
float power;
}
params;
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
#ifdef GENERATE_MAP
// importance map stuff
uvec2 base_position = ssC * 2;
vec2 base_uv = (vec2(base_position) + vec2(0.5f, 0.5f)) * params.half_screen_pixel_size;
float minV = 1.0;
float maxV = 0.0;
for (int i = 0; i < 4; i++) {
vec4 vals = textureGather(source_texture, vec3(base_uv, i));
// apply the same modifications that would have been applied in the main shader
vals = params.intensity * vals;
vals = 1 - vals;
vals = pow(clamp(vals, 0.0, 1.0), vec4(params.power));
maxV = max(maxV, max(max(vals.x, vals.y), max(vals.z, vals.w)));
minV = min(minV, min(min(vals.x, vals.y), min(vals.z, vals.w)));
}
float min_max_diff = maxV - minV;
imageStore(dest_image, ssC, vec4(pow(clamp(min_max_diff * 2.0, 0.0, 1.0), 0.8)));
#endif
#ifdef PROCESS_MAPA
vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
float center = textureLod(source_importance, uv, 0.0).x;
vec2 half_pixel = params.half_screen_pixel_size;
vec4 vals;
vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, -half_pixel.y), 0.0).x;
vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x, -half_pixel.y * 3), 0.0).x;
vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, +half_pixel.y), 0.0).x;
vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x, +half_pixel.y * 3), 0.0).x;
float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
imageStore(dest_image, ssC, vec4(avg));
#endif
#ifdef PROCESS_MAPB
vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
float center = textureLod(source_importance, uv, 0.0).x;
vec2 half_pixel = params.half_screen_pixel_size;
vec4 vals;
vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x, -half_pixel.y * 3), 0.0).x;
vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, -half_pixel.y), 0.0).x;
vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x, +half_pixel.y * 3), 0.0).x;
vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, +half_pixel.y), 0.0).x;
float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
imageStore(dest_image, ssC, vec4(avg));
// sum the average; to avoid overflowing we assume max AO resolution is not bigger than 16384x16384; so quarter res (used here) will be 4096x4096, which leaves us with 8 bits per pixel
uint sum = uint(clamp(avg, 0.0, 1.0) * 255.0 + 0.5);
// save every 9th to avoid InterlockedAdd congestion - since we're blurring, this is good enough; compensated by multiplying load_counter_avg_div by 9
if (((ssC.x % 3) + (ssC.y % 3)) == 0) {
atomicAdd(counter.sum, sum);
}
#endif
}

View File

@@ -0,0 +1,119 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba8, set = 0, binding = 0) uniform restrict writeonly image2D dest_image;
layout(set = 1, binding = 0) uniform sampler2DArray source_texture;
layout(push_constant, std430) uniform Params {
float inv_sharpness;
uint size_modifier;
vec2 pixel_size;
}
params;
vec4 unpack_edges(float p_packed_val) {
uint packed_val = uint(p_packed_val * 255.5);
vec4 edgesLRTB;
edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
return clamp(edgesLRTB + params.inv_sharpness, 0.0, 1.0);
}
void main() {
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, ivec2(1.0 / params.pixel_size)))) { //too large, do nothing
return;
}
#ifdef MODE_SMART
float ao;
uvec2 pix_pos = uvec2(gl_GlobalInvocationID.xy);
vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
// calculate index in the four deinterleaved source array texture
int mx = int(pix_pos.x % 2);
int my = int(pix_pos.y % 2);
int index_center = mx + my * 2; // center index
int index_horizontal = (1 - mx) + my * 2; // neighboring, horizontal
int index_vertical = mx + (1 - my) * 2; // neighboring, vertical
int index_diagonal = (1 - mx) + (1 - my) * 2; // diagonal
vec2 center_val = texelFetch(source_texture, ivec3(pix_pos / uvec2(params.size_modifier), index_center), 0).xy;
ao = center_val.x;
vec4 edgesLRTB = unpack_edges(center_val.y);
// convert index shifts to sampling offsets
float fmx = float(mx);
float fmy = float(my);
// in case of an edge, push sampling offsets away from the edge (towards pixel center)
float fmxe = (edgesLRTB.y - edgesLRTB.x);
float fmye = (edgesLRTB.w - edgesLRTB.z);
// calculate final sampling offsets and sample using bilinear filter
vec2 uv_horizontal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx + fmxe - 0.5, 0.5 - fmy)) * params.pixel_size;
float ao_horizontal = textureLod(source_texture, vec3(uv_horizontal, index_horizontal), 0.0).x;
vec2 uv_vertical = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(0.5 - fmx, fmy - 0.5 + fmye)) * params.pixel_size;
float ao_vertical = textureLod(source_texture, vec3(uv_vertical, index_vertical), 0.0).x;
vec2 uv_diagonal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx - 0.5 + fmxe, fmy - 0.5 + fmye)) * params.pixel_size;
float ao_diagonal = textureLod(source_texture, vec3(uv_diagonal, index_diagonal), 0.0).x;
// reduce weight for samples near edge - if the edge is on both sides, weight goes to 0
vec4 blendWeights;
blendWeights.x = 1.0;
blendWeights.y = (edgesLRTB.x + edgesLRTB.y) * 0.5;
blendWeights.z = (edgesLRTB.z + edgesLRTB.w) * 0.5;
blendWeights.w = (blendWeights.y + blendWeights.z) * 0.5;
// calculate weighted average
float blendWeightsSum = dot(blendWeights, vec4(1.0, 1.0, 1.0, 1.0));
ao = dot(vec4(ao, ao_horizontal, ao_vertical, ao_diagonal), blendWeights) / blendWeightsSum;
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(ao));
#else // !MODE_SMART
vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
#ifdef MODE_HALF
float a = textureLod(source_texture, vec3(uv, 0), 0.0).x;
float d = textureLod(source_texture, vec3(uv, 3), 0.0).x;
float avg = (a + d) * 0.5;
#else
float a = textureLod(source_texture, vec3(uv, 0), 0.0).x;
float b = textureLod(source_texture, vec3(uv, 1), 0.0).x;
float c = textureLod(source_texture, vec3(uv, 2), 0.0).x;
float d = textureLod(source_texture, vec3(uv, 3), 0.0).x;
float avg = (a + b + c + d) * 0.25;
#endif
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), vec4(avg));
#endif
}

View File

@@ -0,0 +1,440 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
// 2021-05-27: clayjohn: convert SSAO to SSIL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
#define SSIL_MAIN_DISK_SAMPLE_COUNT (32)
const vec4 sample_pattern[SSIL_MAIN_DISK_SAMPLE_COUNT] = {
vec4(0.78488064, 0.56661671, 1.500000, -0.126083), vec4(0.26022232, -0.29575172, 1.500000, -1.064030), vec4(0.10459357, 0.08372527, 1.110000, -2.730563), vec4(-0.68286800, 0.04963045, 1.090000, -0.498827),
vec4(-0.13570161, -0.64190155, 1.250000, -0.532765), vec4(-0.26193795, -0.08205118, 0.670000, -1.783245), vec4(-0.61177456, 0.66664219, 0.710000, -0.044234), vec4(0.43675563, 0.25119025, 0.610000, -1.167283),
vec4(0.07884444, 0.86618668, 0.640000, -0.459002), vec4(-0.12790935, -0.29869005, 0.600000, -1.729424), vec4(-0.04031125, 0.02413622, 0.600000, -4.792042), vec4(0.16201244, -0.52851415, 0.790000, -1.067055),
vec4(-0.70991218, 0.47301072, 0.640000, -0.335236), vec4(0.03277707, -0.22349690, 0.600000, -1.982384), vec4(0.68921727, 0.36800742, 0.630000, -0.266718), vec4(0.29251814, 0.37775412, 0.610000, -1.422520),
vec4(-0.12224089, 0.96582592, 0.600000, -0.426142), vec4(0.11071457, -0.16131058, 0.600000, -2.165947), vec4(0.46562141, -0.59747696, 0.600000, -0.189760), vec4(-0.51548797, 0.11804193, 0.600000, -1.246800),
vec4(0.89141309, -0.42090443, 0.600000, 0.028192), vec4(-0.32402530, -0.01591529, 0.600000, -1.543018), vec4(0.60771245, 0.41635221, 0.600000, -0.605411), vec4(0.02379565, -0.08239821, 0.600000, -3.809046),
vec4(0.48951152, -0.23657045, 0.600000, -1.189011), vec4(-0.17611565, -0.81696892, 0.600000, -0.513724), vec4(-0.33930185, -0.20732205, 0.600000, -1.698047), vec4(-0.91974425, 0.05403209, 0.600000, 0.062246),
vec4(-0.15064627, -0.14949332, 0.600000, -1.896062), vec4(0.53180975, -0.35210401, 0.600000, -0.758838), vec4(0.41487166, 0.81442589, 0.600000, -0.505648), vec4(-0.24106961, -0.32721516, 0.600000, -1.665244)
};
// these values can be changed (up to SSIL_MAX_TAPS) with no changes required elsewhere; values for 4th and 5th preset are ignored but array needed to avoid compilation errors
// the actual number of texture samples is two times this value (each "tap" has two symmetrical depth texture samples)
const int num_taps[5] = { 3, 5, 12, 0, 0 };
#define SSIL_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET (99) // to disable simply set to 99 or similar
#define SSIL_TILT_SAMPLES_AMOUNT (0.4)
//
#define SSIL_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET (1) // to disable simply set to 99 or similar
#define SSIL_HALOING_REDUCTION_AMOUNT (0.8) // values from 0.0 - 1.0, 1.0 means max weighting (will cause artifacts, 0.8 is more reasonable)
//
#define SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET (2)
#define SSIL_DEPTH_MIPS_GLOBAL_OFFSET (-4.3) // best noise/quality/performance tradeoff, found empirically
//
// WARNING: The edge handling is hard-coded to 'disabled' on quality level 0, and enabled above,
// on the C++ side; while toggling it here will work for testing purposes, it will not yield
// performance gains (or correct results).
#define SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET (1)
//
#define SSIL_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET (1)
#define SSIL_MAX_TAPS 32
#define SSIL_ADAPTIVE_TAP_BASE_COUNT 5
#define SSIL_ADAPTIVE_TAP_FLEXIBLE_COUNT (SSIL_MAX_TAPS - SSIL_ADAPTIVE_TAP_BASE_COUNT)
#define SSIL_DEPTH_MIP_LEVELS 4
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2DArray source_depth_mipmaps;
layout(rgba8, set = 0, binding = 1) uniform restrict readonly image2D source_normal;
layout(set = 0, binding = 2) uniform Constants { //get into a lower set
vec4 rotation_matrices[20];
}
constants;
#ifdef ADAPTIVE
layout(rgba16, set = 1, binding = 0) uniform restrict readonly image2DArray source_ssil;
layout(set = 1, binding = 1) uniform sampler2D source_importance;
layout(set = 1, binding = 2, std430) buffer Counter {
uint sum;
}
counter;
#endif
layout(rgba16, set = 2, binding = 0) uniform restrict writeonly image2D dest_image;
layout(r8, set = 2, binding = 1) uniform image2D edges_weights_image;
layout(set = 3, binding = 0) uniform sampler2D last_frame;
layout(set = 3, binding = 1) uniform ProjectionConstants {
mat4 reprojection;
}
projection_constants;
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
int pass;
int quality;
vec2 half_screen_pixel_size;
vec2 half_screen_pixel_size_x025;
vec2 NDC_to_view_mul;
vec2 NDC_to_view_add;
vec2 pad2;
float z_near;
float z_far;
float radius;
float intensity;
int size_multiplier;
int pad;
float fade_out_mul;
float fade_out_add;
float normal_rejection_amount;
float inv_radius_near_limit;
bool is_orthogonal;
float neg_inv_radius;
float load_counter_avg_div;
float adaptive_sample_limit;
ivec2 pass_coord_offset;
vec2 pass_uv_offset;
}
params;
float pack_edges(vec4 p_edgesLRTB) {
p_edgesLRTB = round(clamp(p_edgesLRTB, 0.0, 1.0) * 3.05);
return dot(p_edgesLRTB, vec4(64.0 / 255.0, 16.0 / 255.0, 4.0 / 255.0, 1.0 / 255.0));
}
vec3 NDC_to_view_space(vec2 p_pos, float p_viewspace_depth) {
if (params.is_orthogonal) {
return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add), p_viewspace_depth);
} else {
return vec3((params.NDC_to_view_mul * p_pos.xy + params.NDC_to_view_add) * p_viewspace_depth, p_viewspace_depth);
}
}
// calculate effect radius and fit our screen sampling pattern inside it
void calculate_radius_parameters(const float p_pix_center_length, const vec2 p_pixel_size_at_center, out float r_lookup_radius, out float r_radius, out float r_fallof_sq) {
r_radius = params.radius;
// when too close, on-screen sampling disk will grow beyond screen size; limit this to avoid closeup temporal artifacts
const float too_close_limit = clamp(p_pix_center_length * params.inv_radius_near_limit, 0.0, 1.0) * 0.8 + 0.2;
r_radius *= too_close_limit;
// 0.85 is to reduce the radius to allow for more samples on a slope to still stay within influence
r_lookup_radius = (0.85 * r_radius) / p_pixel_size_at_center.x;
// used to calculate falloff (both for AO samples and per-sample weights)
r_fallof_sq = -1.0 / (r_radius * r_radius);
}
vec4 calculate_edges(const float p_center_z, const float p_left_z, const float p_right_z, const float p_top_z, const float p_bottom_z) {
// slope-sensitive depth-based edge detection
vec4 edgesLRTB = vec4(p_left_z, p_right_z, p_top_z, p_bottom_z) - p_center_z;
vec4 edgesLRTB_slope_adjusted = edgesLRTB + edgesLRTB.yxwz;
edgesLRTB = min(abs(edgesLRTB), abs(edgesLRTB_slope_adjusted));
return clamp((1.3 - edgesLRTB / (p_center_z * 0.040)), 0.0, 1.0);
}
vec3 load_normal(ivec2 p_pos) {
vec3 encoded_normal = normalize(imageLoad(source_normal, p_pos).xyz * 2.0 - 1.0);
encoded_normal.z = -encoded_normal.z;
return encoded_normal;
}
vec3 load_normal(ivec2 p_pos, ivec2 p_offset) {
vec3 encoded_normal = normalize(imageLoad(source_normal, p_pos + p_offset).xyz * 2.0 - 1.0);
encoded_normal.z = -encoded_normal.z;
return encoded_normal;
}
// all vectors in viewspace
float calculate_pixel_obscurance(vec3 p_pixel_normal, vec3 p_hit_delta, float p_fallof_sq) {
float length_sq = dot(p_hit_delta, p_hit_delta);
float NdotD = dot(p_pixel_normal, p_hit_delta) / sqrt(length_sq);
float falloff_mult = max(0.0, length_sq * p_fallof_sq + 1.0);
return max(0, NdotD - 0.05) * falloff_mult;
}
void SSIL_tap_inner(const int p_quality_level, inout vec3 r_color_sum, inout float r_obscurance_sum, inout float r_weight_sum, const vec2 p_sampling_uv, const float p_mip_level, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const float p_fallof_sq, const float p_weight_mod) {
// get depth at sample
float viewspace_sample_z = textureLod(source_depth_mipmaps, vec3(p_sampling_uv, params.pass), p_mip_level).x;
vec3 sample_normal = load_normal(ivec2(p_sampling_uv * vec2(params.screen_size)));
// convert to viewspace
vec3 hit_pos = NDC_to_view_space(p_sampling_uv.xy, viewspace_sample_z);
vec3 hit_delta = hit_pos - p_pix_center_pos;
float obscurance = calculate_pixel_obscurance(p_pixel_normal, hit_delta, p_fallof_sq);
float weight = 1.0;
if (p_quality_level >= SSIL_HALOING_REDUCTION_ENABLE_AT_QUALITY_PRESET) {
float reduce = max(0, -hit_delta.z);
reduce = clamp(reduce * params.neg_inv_radius + 2.0, 0.0, 1.0);
weight = SSIL_HALOING_REDUCTION_AMOUNT * reduce + (1.0 - SSIL_HALOING_REDUCTION_AMOUNT);
}
// Translate sampling_uv to last screen's coordinates
const vec4 sample_pos = projection_constants.reprojection * vec4(p_sampling_uv * 2.0 - 1.0, (viewspace_sample_z - params.z_near) / (params.z_far - params.z_near) * 2.0 - 1.0, 1.0);
vec2 reprojected_sampling_uv = (sample_pos.xy / sample_pos.w) * 0.5 + 0.5;
weight *= p_weight_mod;
r_obscurance_sum += obscurance * weight;
vec3 sample_color = textureLod(last_frame, reprojected_sampling_uv, 5.0).rgb;
// Reduce impact of fireflies by tonemapping before averaging: http://graphicrants.blogspot.com/2013/12/tone-mapping.html
sample_color /= (1.0 + dot(sample_color, vec3(0.299, 0.587, 0.114)));
r_color_sum += sample_color * obscurance * weight * mix(1.0, smoothstep(0.0, 0.1, -dot(sample_normal, normalize(hit_delta))), params.normal_rejection_amount);
r_weight_sum += weight;
}
void SSILTap(const int p_quality_level, inout vec3 r_color_sum, inout float r_obscurance_sum, inout float r_weight_sum, const int p_tap_index, const mat2 p_rot_scale, const vec3 p_pix_center_pos, vec3 p_pixel_normal, const vec2 p_normalized_screen_pos, const float p_mip_offset, const float p_fallof_sq, float p_weight_mod, vec2 p_norm_xy, float p_norm_xy_length) {
vec2 sample_offset;
float sample_pow_2_len;
// patterns
{
vec4 new_sample = sample_pattern[p_tap_index];
sample_offset = new_sample.xy * p_rot_scale;
sample_pow_2_len = new_sample.w; // precalculated, same as: sample_pow_2_len = log2( length( new_sample.xy ) );
p_weight_mod *= new_sample.z;
}
// snap to pixel center (more correct obscurance math, avoids artifacts)
sample_offset = round(sample_offset);
// calculate MIP based on the sample distance from the center, similar to as described
// in http://graphics.cs.williams.edu/papers/SAOHPG12/.
float mip_level = (p_quality_level < SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (sample_pow_2_len + p_mip_offset);
vec2 sampling_uv = sample_offset * params.half_screen_pixel_size + p_normalized_screen_pos;
SSIL_tap_inner(p_quality_level, r_color_sum, r_obscurance_sum, r_weight_sum, sampling_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
// for the second tap, just use the mirrored offset
vec2 sample_offset_mirrored_uv = -sample_offset;
// tilt the second set of samples so that the disk is effectively rotated by the normal
// effective at removing one set of artifacts, but too expensive for lower quality settings
if (p_quality_level >= SSIL_TILT_SAMPLES_ENABLE_AT_QUALITY_PRESET) {
float dot_norm = dot(sample_offset_mirrored_uv, p_norm_xy);
sample_offset_mirrored_uv -= dot_norm * p_norm_xy_length * p_norm_xy;
sample_offset_mirrored_uv = round(sample_offset_mirrored_uv);
}
// snap to pixel center (more correct obscurance math, avoids artifacts)
vec2 sampling_mirrored_uv = sample_offset_mirrored_uv * params.half_screen_pixel_size + p_normalized_screen_pos;
SSIL_tap_inner(p_quality_level, r_color_sum, r_obscurance_sum, r_weight_sum, sampling_mirrored_uv, mip_level, p_pix_center_pos, p_pixel_normal, p_fallof_sq, p_weight_mod);
}
void generate_SSIL(out vec3 r_color, out vec4 r_edges, out float r_obscurance, out float r_weight, const vec2 p_pos, int p_quality_level, bool p_adaptive_base) {
vec2 pos_rounded = trunc(p_pos);
uvec2 upos = uvec2(pos_rounded);
const int number_of_taps = (p_adaptive_base) ? (SSIL_ADAPTIVE_TAP_BASE_COUNT) : (num_taps[p_quality_level]);
float pix_z, pix_left_z, pix_top_z, pix_right_z, pix_bottom_z;
vec4 valuesUL = textureGather(source_depth_mipmaps, vec3(pos_rounded * params.half_screen_pixel_size, params.pass));
vec4 valuesBR = textureGather(source_depth_mipmaps, vec3((pos_rounded + vec2(1.0)) * params.half_screen_pixel_size, params.pass));
// get this pixel's viewspace depth
pix_z = valuesUL.y;
// get left right top bottom neighboring pixels for edge detection (gets compiled out on quality_level == 0)
pix_left_z = valuesUL.x;
pix_top_z = valuesUL.z;
pix_right_z = valuesBR.z;
pix_bottom_z = valuesBR.x;
vec2 normalized_screen_pos = pos_rounded * params.half_screen_pixel_size + params.half_screen_pixel_size_x025;
vec3 pix_center_pos = NDC_to_view_space(normalized_screen_pos, pix_z);
// Load this pixel's viewspace normal
uvec2 full_res_coord = upos * 2 * params.size_multiplier + params.pass_coord_offset.xy;
vec3 pixel_normal = load_normal(ivec2(full_res_coord));
const vec2 pixel_size_at_center = NDC_to_view_space(normalized_screen_pos.xy + params.half_screen_pixel_size, pix_center_pos.z).xy - pix_center_pos.xy;
float pixel_lookup_radius;
float fallof_sq;
// calculate effect radius and fit our screen sampling pattern inside it
float viewspace_radius;
calculate_radius_parameters(length(pix_center_pos), pixel_size_at_center, pixel_lookup_radius, viewspace_radius, fallof_sq);
// calculate samples rotation/scaling
mat2 rot_scale_matrix;
uint pseudo_random_index;
{
vec4 rotation_scale;
// reduce effect radius near the screen edges slightly; ideally, one would render a larger depth buffer (5% on each side) instead
if (!p_adaptive_base && (p_quality_level >= SSIL_REDUCE_RADIUS_NEAR_SCREEN_BORDER_ENABLE_AT_QUALITY_PRESET)) {
float near_screen_border = min(min(normalized_screen_pos.x, 1.0 - normalized_screen_pos.x), min(normalized_screen_pos.y, 1.0 - normalized_screen_pos.y));
near_screen_border = clamp(10.0 * near_screen_border + 0.6, 0.0, 1.0);
pixel_lookup_radius *= near_screen_border;
}
// load & update pseudo-random rotation matrix
pseudo_random_index = uint(pos_rounded.y * 2 + pos_rounded.x) % 5;
rotation_scale = constants.rotation_matrices[params.pass * 5 + pseudo_random_index];
rot_scale_matrix = mat2(rotation_scale.x * pixel_lookup_radius, rotation_scale.y * pixel_lookup_radius, rotation_scale.z * pixel_lookup_radius, rotation_scale.w * pixel_lookup_radius);
}
// the main obscurance & sample weight storage
vec3 color_sum = vec3(0.0);
float obscurance_sum = 0.0;
float weight_sum = 0.0;
// edge mask for between this and left/right/top/bottom neighbor pixels - not used in quality level 0 so initialize to "no edge" (1 is no edge, 0 is edge)
vec4 edgesLRTB = vec4(1.0, 1.0, 1.0, 1.0);
// Move center pixel slightly towards camera to avoid imprecision artifacts due to using of 16bit depth buffer.
pix_center_pos *= 0.99;
if (!p_adaptive_base && (p_quality_level >= SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
edgesLRTB = calculate_edges(pix_z, pix_left_z, pix_right_z, pix_top_z, pix_bottom_z);
}
const float global_mip_offset = SSIL_DEPTH_MIPS_GLOBAL_OFFSET;
float mip_offset = (p_quality_level < SSIL_DEPTH_MIPS_ENABLE_AT_QUALITY_PRESET) ? (0) : (log2(pixel_lookup_radius) + global_mip_offset);
// Used to tilt the second set of samples so that the disk is effectively rotated by the normal
// effective at removing one set of artifacts, but too expensive for lower quality settings
vec2 norm_xy = vec2(pixel_normal.x, pixel_normal.y);
float norm_xy_length = length(norm_xy);
norm_xy /= vec2(norm_xy_length, -norm_xy_length);
norm_xy_length *= SSIL_TILT_SAMPLES_AMOUNT;
// standard, non-adaptive approach
if ((p_quality_level != 3) || p_adaptive_base) {
for (int i = 0; i < number_of_taps; i++) {
SSILTap(p_quality_level, color_sum, obscurance_sum, weight_sum, i, rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, 1.0, norm_xy, norm_xy_length);
}
}
#ifdef ADAPTIVE
else {
// add new ones if needed
vec2 full_res_uv = normalized_screen_pos + params.pass_uv_offset.xy;
float importance = textureLod(source_importance, full_res_uv, 0.0).x;
//Need to store obscurance from base pass
// load existing base values
vec4 base_values = imageLoad(source_ssil, ivec3(upos, params.pass));
weight_sum += imageLoad(edges_weights_image, ivec2(upos)).r * float(SSIL_ADAPTIVE_TAP_BASE_COUNT * 4.0);
color_sum += (base_values.rgb) * weight_sum;
obscurance_sum += (base_values.a) * weight_sum;
// increase importance around edges
float edge_count = dot(1.0 - edgesLRTB, vec4(1.0, 1.0, 1.0, 1.0));
float avg_total_importance = float(counter.sum) * params.load_counter_avg_div;
float importance_limiter = clamp(params.adaptive_sample_limit / avg_total_importance, 0.0, 1.0);
importance *= importance_limiter;
float additional_sample_count = SSIL_ADAPTIVE_TAP_FLEXIBLE_COUNT * importance;
const float blend_range = 3.0;
const float blend_range_inv = 1.0 / blend_range;
additional_sample_count += 0.5;
uint additional_samples = uint(additional_sample_count);
uint additional_samples_to = min(SSIL_MAX_TAPS, additional_samples + SSIL_ADAPTIVE_TAP_BASE_COUNT);
for (uint i = SSIL_ADAPTIVE_TAP_BASE_COUNT; i < additional_samples_to; i++) {
additional_sample_count -= 1.0f;
float weight_mod = clamp(additional_sample_count * blend_range_inv, 0.0, 1.0);
SSILTap(p_quality_level, color_sum, obscurance_sum, weight_sum, int(i), rot_scale_matrix, pix_center_pos, pixel_normal, normalized_screen_pos, mip_offset, fallof_sq, weight_mod, norm_xy, norm_xy_length);
}
}
#endif
// Early out for adaptive base
if (p_adaptive_base) {
vec3 color = color_sum / weight_sum;
r_color = color;
r_edges = vec4(0.0);
r_obscurance = obscurance_sum / weight_sum;
r_weight = weight_sum;
return;
}
// Calculate weighted average
vec3 color = color_sum / weight_sum;
color /= 1.0 - dot(color, vec3(0.299, 0.587, 0.114));
// Calculate fadeout (1 close, gradient, 0 far)
float fade_out = clamp(pix_center_pos.z * params.fade_out_mul + params.fade_out_add, 0.0, 1.0);
// Reduce the SSIL if we're on the edge to remove artifacts on edges (we don't care for the lower quality one)
if (!p_adaptive_base && (p_quality_level >= SSIL_DEPTH_BASED_EDGES_ENABLE_AT_QUALITY_PRESET)) {
// when there's more than 2 opposite edges, start fading out the occlusion to reduce aliasing artifacts
float edge_fadeout_factor = clamp((1.0 - edgesLRTB.x - edgesLRTB.y) * 0.35, 0.0, 1.0) + clamp((1.0 - edgesLRTB.z - edgesLRTB.w) * 0.35, 0.0, 1.0);
fade_out *= clamp(1.0 - edge_fadeout_factor, 0.0, 1.0);
}
color = params.intensity * color;
color *= fade_out;
// outputs!
r_color = color;
r_edges = edgesLRTB; // These are used to prevent blurring across edges, 1 means no edge, 0 means edge, 0.5 means half way there, etc.
r_obscurance = clamp((obscurance_sum / weight_sum) * params.intensity, 0.0, 1.0);
r_weight = weight_sum;
}
void main() {
vec3 out_color;
float out_obscurance;
float out_weight;
vec4 out_edges;
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
return;
}
vec2 uv = vec2(gl_GlobalInvocationID) + vec2(0.5);
#ifdef SSIL_BASE
generate_SSIL(out_color, out_edges, out_obscurance, out_weight, uv, params.quality, true);
imageStore(dest_image, ssC, vec4(out_color, out_obscurance));
imageStore(edges_weights_image, ssC, vec4(out_weight / (float(SSIL_ADAPTIVE_TAP_BASE_COUNT) * 4.0)));
#else
generate_SSIL(out_color, out_edges, out_obscurance, out_weight, uv, params.quality, false); // pass in quality levels
imageStore(dest_image, ssC, vec4(out_color, out_obscurance));
imageStore(edges_weights_image, ssC, vec4(pack_edges(out_edges)));
#endif
}

View File

@@ -0,0 +1,144 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
// 2021-05-27: clayjohn: convert SSAO to SSIL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(set = 0, binding = 0) uniform sampler2D source_ssil;
layout(rgba16, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
layout(r8, set = 2, binding = 0) uniform restrict readonly image2D source_edges;
layout(push_constant, std430) uniform Params {
float edge_sharpness;
float pad;
vec2 half_screen_pixel_size;
}
params;
vec4 unpack_edges(float p_packed_val) {
uint packed_val = uint(p_packed_val * 255.5);
vec4 edgesLRTB;
edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
return clamp(edgesLRTB + params.edge_sharpness, 0.0, 1.0);
}
void add_sample(vec4 p_ssil_value, float p_edge_value, inout vec4 r_sum, inout float r_sum_weight) {
float weight = p_edge_value;
r_sum += (weight * p_ssil_value);
r_sum_weight += weight;
}
#ifdef MODE_WIDE
vec4 sample_blurred_wide(ivec2 p_pos, vec2 p_coord) {
vec4 ssil_value = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 0));
vec4 ssil_valueL = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(-2, 0));
vec4 ssil_valueT = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, -2));
vec4 ssil_valueR = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(2, 0));
vec4 ssil_valueB = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 2));
vec4 edgesLRTB = unpack_edges(imageLoad(source_edges, p_pos).r);
edgesLRTB.x *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(-2, 0)).r).y;
edgesLRTB.z *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(0, -2)).r).w;
edgesLRTB.y *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(2, 0)).r).x;
edgesLRTB.w *= unpack_edges(imageLoad(source_edges, p_pos + ivec2(0, 2)).r).z;
float sum_weight = 0.8;
vec4 sum = ssil_value * sum_weight;
add_sample(ssil_valueL, edgesLRTB.x, sum, sum_weight);
add_sample(ssil_valueR, edgesLRTB.y, sum, sum_weight);
add_sample(ssil_valueT, edgesLRTB.z, sum, sum_weight);
add_sample(ssil_valueB, edgesLRTB.w, sum, sum_weight);
vec4 ssil_avg = sum / sum_weight;
ssil_value = ssil_avg;
return ssil_value;
}
#endif
#ifdef MODE_SMART
vec4 sample_blurred(ivec2 p_pos, vec2 p_coord) {
vec4 vC = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 0));
vec4 vL = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(-1, 0));
vec4 vT = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, -1));
vec4 vR = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(1, 0));
vec4 vB = textureLodOffset(source_ssil, vec2(p_coord), 0.0, ivec2(0, 1));
float packed_edges = imageLoad(source_edges, p_pos).r;
vec4 edgesLRTB = unpack_edges(packed_edges);
float sum_weight = 0.5;
vec4 sum = vC * sum_weight;
add_sample(vL, edgesLRTB.x, sum, sum_weight);
add_sample(vR, edgesLRTB.y, sum, sum_weight);
add_sample(vT, edgesLRTB.z, sum, sum_weight);
add_sample(vB, edgesLRTB.w, sum, sum_weight);
vec4 ssil_avg = sum / sum_weight;
vec4 ssil_value = ssil_avg;
return ssil_value;
}
#endif
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
#ifdef MODE_NON_SMART
vec2 half_pixel = params.half_screen_pixel_size * 0.5;
vec2 uv = (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size;
vec4 center = textureLod(source_ssil, uv, 0.0);
vec4 value = textureLod(source_ssil, vec2(uv + vec2(-half_pixel.x * 3, -half_pixel.y)), 0.0) * 0.2;
value += textureLod(source_ssil, vec2(uv + vec2(+half_pixel.x, -half_pixel.y * 3)), 0.0) * 0.2;
value += textureLod(source_ssil, vec2(uv + vec2(-half_pixel.x, +half_pixel.y * 3)), 0.0) * 0.2;
value += textureLod(source_ssil, vec2(uv + vec2(+half_pixel.x * 3, +half_pixel.y)), 0.0) * 0.2;
vec4 sampled = value + center * 0.2;
#else
#ifdef MODE_SMART
vec4 sampled = sample_blurred(ssC, (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
#else // MODE_WIDE
vec4 sampled = sample_blurred_wide(ssC, (vec2(gl_GlobalInvocationID.xy) + vec2(0.5, 0.5)) * params.half_screen_pixel_size);
#endif
#endif // MODE_NON_SMART
imageStore(dest_image, ssC, sampled);
}

View File

@@ -0,0 +1,125 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
// 2021-05-27: clayjohn: convert SSAO to SSIL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#ifdef GENERATE_MAP
layout(set = 0, binding = 0) uniform sampler2DArray source_texture;
#else
layout(set = 0, binding = 0) uniform sampler2D source_importance;
#endif
layout(r8, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
#ifdef PROCESS_MAPB
layout(set = 2, binding = 0, std430) buffer Counter {
uint sum;
}
counter;
#endif
layout(push_constant, std430) uniform Params {
vec2 half_screen_pixel_size;
float intensity;
float pad;
}
params;
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
#ifdef GENERATE_MAP
// importance map stuff
uvec2 base_position = ssC * 2;
float avg = 0.0;
float minV = 1.0;
float maxV = 0.0;
for (int i = 0; i < 4; i++) {
vec3 value_a = texelFetch(source_texture, ivec3(base_position, i), 0).rgb * params.intensity;
vec3 value_b = texelFetch(source_texture, ivec3(base_position, i) + ivec3(0, 1, 0), 0).rgb * params.intensity;
vec3 value_c = texelFetch(source_texture, ivec3(base_position, i) + ivec3(1, 0, 0), 0).rgb * params.intensity;
vec3 value_d = texelFetch(source_texture, ivec3(base_position, i) + ivec3(1, 1, 0), 0).rgb * params.intensity;
// Calculate luminance (black and white value)
float a = dot(value_a, vec3(0.2125, 0.7154, 0.0721));
float b = dot(value_b, vec3(0.2125, 0.7154, 0.0721));
float c = dot(value_c, vec3(0.2125, 0.7154, 0.0721));
float d = dot(value_d, vec3(0.2125, 0.7154, 0.0721));
maxV = max(maxV, max(max(a, b), max(c, d)));
minV = min(minV, min(min(a, b), min(c, d)));
}
float min_max_diff = maxV - minV;
imageStore(dest_image, ssC, vec4(pow(clamp(min_max_diff * 2.0, 0.0, 1.0), 0.6)));
#endif
#ifdef PROCESS_MAPA
vec2 uv = (vec2(ssC) + 0.5) * params.half_screen_pixel_size * 2.0;
float center = textureLod(source_importance, uv, 0.0).x;
vec2 half_pixel = params.half_screen_pixel_size;
vec4 vals;
vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, -half_pixel.y), 0.0).x;
vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x, -half_pixel.y * 3), 0.0).x;
vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, +half_pixel.y), 0.0).x;
vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x, +half_pixel.y * 3), 0.0).x;
float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
imageStore(dest_image, ssC, vec4(avg));
#endif
#ifdef PROCESS_MAPB
vec2 uv = (vec2(ssC) + 0.5f) * params.half_screen_pixel_size * 2.0;
float center = textureLod(source_importance, uv, 0.0).x;
vec2 half_pixel = params.half_screen_pixel_size;
vec4 vals;
vals.x = textureLod(source_importance, uv + vec2(-half_pixel.x, -half_pixel.y * 3), 0.0).x;
vals.y = textureLod(source_importance, uv + vec2(+half_pixel.x * 3, -half_pixel.y), 0.0).x;
vals.z = textureLod(source_importance, uv + vec2(+half_pixel.x, +half_pixel.y * 3), 0.0).x;
vals.w = textureLod(source_importance, uv + vec2(-half_pixel.x * 3, +half_pixel.y), 0.0).x;
float avg = dot(vals, vec4(0.25, 0.25, 0.25, 0.25));
imageStore(dest_image, ssC, vec4(avg));
// sum the average; to avoid overflowing we assume max AO resolution is not bigger than 16384x16384; so quarter res (used here) will be 4096x4096, which leaves us with 8 bits per pixel
uint sum = uint(clamp(avg, 0.0, 1.0) * 255.0 + 0.5);
// save every 9th to avoid InterlockedAdd congestion - since we're blurring, this is good enough; compensated by multiplying load_counter_avg_div by 9
if (((ssC.x % 3) + (ssC.y % 3)) == 0) {
atomicAdd(counter.sum, sum);
}
#endif
}

View File

@@ -0,0 +1,122 @@
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2016, Intel Corporation
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of
// the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
// THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2016-09-07: filip.strugar@intel.com: first commit
// 2020-12-05: clayjohn: convert to Vulkan and Godot
// 2021-05-27: clayjohn: convert SSAO to SSIL
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16, set = 0, binding = 0) uniform restrict writeonly image2D dest_image;
layout(set = 1, binding = 0) uniform sampler2DArray source_texture;
layout(r8, set = 2, binding = 0) uniform restrict readonly image2DArray source_edges;
layout(push_constant, std430) uniform Params {
float inv_sharpness;
uint size_modifier;
vec2 pixel_size;
}
params;
vec4 unpack_edges(float p_packed_val) {
uint packed_val = uint(p_packed_val * 255.5);
vec4 edgesLRTB;
edgesLRTB.x = float((packed_val >> 6) & 0x03) / 3.0;
edgesLRTB.y = float((packed_val >> 4) & 0x03) / 3.0;
edgesLRTB.z = float((packed_val >> 2) & 0x03) / 3.0;
edgesLRTB.w = float((packed_val >> 0) & 0x03) / 3.0;
return clamp(edgesLRTB + params.inv_sharpness, 0.0, 1.0);
}
void main() {
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, ivec2(1.0 / params.pixel_size)))) { //too large, do nothing
return;
}
#ifdef MODE_SMART
uvec2 pix_pos = uvec2(gl_GlobalInvocationID.xy);
vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
// calculate index in the four deinterleaved source array texture
int mx = int(pix_pos.x % 2);
int my = int(pix_pos.y % 2);
int index_center = mx + my * 2; // center index
int index_horizontal = (1 - mx) + my * 2; // neighboring, horizontal
int index_vertical = mx + (1 - my) * 2; // neighboring, vertical
int index_diagonal = (1 - mx) + (1 - my) * 2; // diagonal
vec4 color = texelFetch(source_texture, ivec3(pix_pos / uvec2(params.size_modifier), index_center), 0);
vec4 edgesLRTB = unpack_edges(imageLoad(source_edges, ivec3(pix_pos / uvec2(params.size_modifier), index_center)).r);
// convert index shifts to sampling offsets
float fmx = float(mx);
float fmy = float(my);
// in case of an edge, push sampling offsets away from the edge (towards pixel center)
float fmxe = (edgesLRTB.y - edgesLRTB.x);
float fmye = (edgesLRTB.w - edgesLRTB.z);
// calculate final sampling offsets and sample using bilinear filter
vec2 uv_horizontal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx + fmxe - 0.5, 0.5 - fmy)) * params.pixel_size;
vec4 color_horizontal = textureLod(source_texture, vec3(uv_horizontal, index_horizontal), 0.0);
vec2 uv_vertical = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(0.5 - fmx, fmy - 0.5 + fmye)) * params.pixel_size;
vec4 color_vertical = textureLod(source_texture, vec3(uv_vertical, index_vertical), 0.0);
vec2 uv_diagonal = (gl_GlobalInvocationID.xy + vec2(0.5) + vec2(fmx - 0.5 + fmxe, fmy - 0.5 + fmye)) * params.pixel_size;
vec4 color_diagonal = textureLod(source_texture, vec3(uv_diagonal, index_diagonal), 0.0);
// reduce weight for samples near edge - if the edge is on both sides, weight goes to 0
vec4 blendWeights;
blendWeights.x = 1.0;
blendWeights.y = (edgesLRTB.x + edgesLRTB.y) * 0.5;
blendWeights.z = (edgesLRTB.z + edgesLRTB.w) * 0.5;
blendWeights.w = (blendWeights.y + blendWeights.z) * 0.5;
// calculate weighted average
float blendWeightsSum = dot(blendWeights, vec4(1.0, 1.0, 1.0, 1.0));
color += color_horizontal * blendWeights.y;
color += color_vertical * blendWeights.z;
color += color_diagonal * blendWeights.w;
color /= blendWeightsSum;
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), color);
#else // !MODE_SMART
vec2 uv = (gl_GlobalInvocationID.xy + vec2(0.5)) * params.pixel_size;
#ifdef MODE_HALF
vec4 a = textureLod(source_texture, vec3(uv, 0), 0.0);
vec4 d = textureLod(source_texture, vec3(uv, 3), 0.0);
vec4 avg = (a + d) * 0.5;
#else
vec4 a = textureLod(source_texture, vec3(uv, 0), 0.0);
vec4 b = textureLod(source_texture, vec3(uv, 1), 0.0);
vec4 c = textureLod(source_texture, vec3(uv, 2), 0.0);
vec4 d = textureLod(source_texture, vec3(uv, 3), 0.0);
vec4 avg = (a + b + c + d) * 0.25;
#endif
imageStore(dest_image, ivec2(gl_GlobalInvocationID.xy), avg);
#endif
}

View File

@@ -0,0 +1,189 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#ifdef USE_25_SAMPLES
const int kernel_size = 13;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.530605, 0.0),
vec2(0.0211412, 0.0208333),
vec2(0.0402784, 0.0833333),
vec2(0.0493588, 0.1875),
vec2(0.0410172, 0.333333),
vec2(0.0263642, 0.520833),
vec2(0.017924, 0.75),
vec2(0.0128496, 1.02083),
vec2(0.0094389, 1.33333),
vec2(0.00700976, 1.6875),
vec2(0.00500364, 2.08333),
vec2(0.00333804, 2.52083),
vec2(0.000973794, 3.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.530605, 0.613514, 0.739601, 0),
vec4(0.0211412, 0.0459286, 0.0378196, 0.0208333),
vec4(0.0402784, 0.0657244, 0.04631, 0.0833333),
vec4(0.0493588, 0.0367726, 0.0219485, 0.1875),
vec4(0.0410172, 0.0199899, 0.0118481, 0.333333),
vec4(0.0263642, 0.0119715, 0.00684598, 0.520833),
vec4(0.017924, 0.00711691, 0.00347194, 0.75),
vec4(0.0128496, 0.00356329, 0.00132016, 1.02083),
vec4(0.0094389, 0.00139119, 0.000416598, 1.33333),
vec4(0.00700976, 0.00049366, 0.000151938, 1.6875),
vec4(0.00500364, 0.00020094, 5.28848e-005, 2.08333),
vec4(0.00333804, 7.85443e-005, 1.2945e-005, 2.52083),
vec4(0.000973794, 1.11862e-005, 9.43437e-007, 3));
#endif //USE_25_SAMPLES
#ifdef USE_17_SAMPLES
const int kernel_size = 9;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.536343, 0.0),
vec2(0.0324462, 0.03125),
vec2(0.0582416, 0.125),
vec2(0.0571056, 0.28125),
vec2(0.0347317, 0.5),
vec2(0.0216301, 0.78125),
vec2(0.0144609, 1.125),
vec2(0.0100386, 1.53125),
vec2(0.00317394, 2.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.536343, 0.624624, 0.748867, 0),
vec4(0.0324462, 0.0656718, 0.0532821, 0.03125),
vec4(0.0582416, 0.0659959, 0.0411329, 0.125),
vec4(0.0571056, 0.0287432, 0.0172844, 0.28125),
vec4(0.0347317, 0.0151085, 0.00871983, 0.5),
vec4(0.0216301, 0.00794618, 0.00376991, 0.78125),
vec4(0.0144609, 0.00317269, 0.00106399, 1.125),
vec4(0.0100386, 0.000914679, 0.000275702, 1.53125),
vec4(0.00317394, 0.000134823, 3.77269e-005, 2));
#endif //USE_17_SAMPLES
#ifdef USE_11_SAMPLES
const int kernel_size = 6;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.560479, 0.0),
vec2(0.0771802, 0.08),
vec2(0.0821904, 0.32),
vec2(0.03639, 0.72),
vec2(0.0192831, 1.28),
vec2(0.00471691, 2.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.560479, 0.669086, 0.784728, 0),
vec4(0.0771802, 0.113491, 0.0793803, 0.08),
vec4(0.0821904, 0.0358608, 0.0209261, 0.32),
vec4(0.03639, 0.0130999, 0.00643685, 0.72),
vec4(0.0192831, 0.00282018, 0.00084214, 1.28),
vec4(0.00471691, 0.000184771, 5.07565e-005, 2));
#endif //USE_11_SAMPLES
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
float camera_z_far;
float camera_z_near;
bool vertical;
bool orthogonal;
float unit_size;
float scale;
float depth_scale;
uint pad[3];
}
params;
layout(set = 0, binding = 0) uniform sampler2D source_image;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
layout(set = 2, binding = 0) uniform sampler2D source_depth;
void do_filter(inout vec3 color_accum, inout vec3 divisor, vec2 uv, vec2 step, bool p_skin) {
// Accumulate the other samples:
for (int i = 1; i < kernel_size; i++) {
// Fetch color and depth for current sample:
vec2 offset = uv + kernel[i].y * step;
vec4 color = texture(source_image, offset);
if (abs(color.a) < 0.001) {
break; //mix no more
}
vec3 w;
if (p_skin) {
//skin
w = skin_kernel[i].rgb;
} else {
w = vec3(kernel[i].x);
}
color_accum += color.rgb * w;
divisor += w;
}
}
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
return;
}
vec2 uv = (vec2(ssC) + 0.5) / vec2(params.screen_size);
// Fetch color of current pixel:
vec4 base_color = texture(source_image, uv);
float strength = abs(base_color.a);
if (strength > 0.0) {
vec2 dir = params.vertical ? vec2(0.0, 1.0) : vec2(1.0, 0.0);
// Fetch linear depth of current pixel:
float depth = texture(source_depth, uv).r * 2.0 - 1.0;
float depth_scale;
if (params.orthogonal) {
depth = -(depth * (params.camera_z_far - params.camera_z_near) - (params.camera_z_far + params.camera_z_near)) / 2.0;
depth_scale = params.unit_size; //remember depth is negative by default in OpenGL
} else {
depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near + depth * (params.camera_z_far - params.camera_z_near));
depth_scale = params.unit_size / depth; //remember depth is negative by default in OpenGL
}
float scale = mix(params.scale, depth_scale, params.depth_scale);
// Calculate the final step to fetch the surrounding pixels:
vec2 step = scale * dir;
step *= strength;
step /= 3.0;
// Accumulate the center sample:
vec3 divisor;
bool skin = bool(base_color.a < 0.0);
if (skin) {
//skin
divisor = skin_kernel[0].rgb;
} else {
divisor = vec3(kernel[0].x);
}
vec3 color = base_color.rgb * divisor;
do_filter(color, divisor, uv, step, skin);
do_filter(color, divisor, uv, -step, skin);
base_color.rgb = color / divisor;
}
imageStore(dest_image, ssC, base_color);
}

View File

@@ -0,0 +1,381 @@
///////////////////////////////////////////////////////////////////////////////////
// Copyright(c) 2016-2022 Panos Karabelas
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2022-05-06: Panos Karabelas: first commit
// 2020-12-05: Joan Fons: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
// Based on Spartan Engine's TAA implementation (without TAA upscale).
// <https://github.com/PanosK92/SpartanEngine/blob/a8338d0609b85dc32f3732a5c27fb4463816a3b9/Data/shaders/temporal_antialiasing.hlsl>
#define GROUP_SIZE 8
#define FLT_MIN 0.00000001
#define FLT_MAX 32767.0
#define RPC_9 0.11111111111
#define RPC_16 0.0625
layout(local_size_x = GROUP_SIZE, local_size_y = GROUP_SIZE, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D color_buffer;
layout(set = 0, binding = 1) uniform sampler2D depth_buffer;
layout(rg16f, set = 0, binding = 2) uniform restrict readonly image2D velocity_buffer;
layout(rg16f, set = 0, binding = 3) uniform restrict readonly image2D last_velocity_buffer;
layout(set = 0, binding = 4) uniform sampler2D history_buffer;
layout(rgba16f, set = 0, binding = 5) uniform restrict writeonly image2D output_buffer;
layout(push_constant, std430) uniform Params {
vec2 resolution;
float disocclusion_threshold; // 0.1 / max(params.resolution.x, params.resolution.y
float disocclusion_scale;
}
params;
const ivec2 kOffsets3x3[9] = {
ivec2(-1, -1),
ivec2(0, -1),
ivec2(1, -1),
ivec2(-1, 0),
ivec2(0, 0),
ivec2(1, 0),
ivec2(-1, 1),
ivec2(0, 1),
ivec2(1, 1),
};
/*------------------------------------------------------------------------------
THREAD GROUP SHARED MEMORY (LDS)
------------------------------------------------------------------------------*/
const int kBorderSize = 1;
const int kGroupSize = GROUP_SIZE;
const int kTileDimension = kGroupSize + kBorderSize * 2;
const int kTileDimension2 = kTileDimension * kTileDimension;
vec3 reinhard(vec3 hdr) {
return hdr / (hdr + 1.0);
}
vec3 reinhard_inverse(vec3 sdr) {
return sdr / (1.0 - sdr);
}
float get_depth(ivec2 thread_id) {
return texelFetch(depth_buffer, thread_id, 0).r;
}
shared vec3 tile_color[kTileDimension][kTileDimension];
shared float tile_depth[kTileDimension][kTileDimension];
vec3 load_color(uvec2 group_thread_id) {
group_thread_id += kBorderSize;
return tile_color[group_thread_id.x][group_thread_id.y];
}
void store_color(uvec2 group_thread_id, vec3 color) {
tile_color[group_thread_id.x][group_thread_id.y] = color;
}
float load_depth(uvec2 group_thread_id) {
group_thread_id += kBorderSize;
return tile_depth[group_thread_id.x][group_thread_id.y];
}
void store_depth(uvec2 group_thread_id, float depth) {
tile_depth[group_thread_id.x][group_thread_id.y] = depth;
}
void store_color_depth(uvec2 group_thread_id, ivec2 thread_id) {
// out of bounds clamp
thread_id = clamp(thread_id, ivec2(0, 0), ivec2(params.resolution) - ivec2(1, 1));
store_color(group_thread_id, imageLoad(color_buffer, thread_id).rgb);
store_depth(group_thread_id, get_depth(thread_id));
}
void populate_group_shared_memory(uvec2 group_id, uint group_index) {
// Populate group shared memory
ivec2 group_top_left = ivec2(group_id) * kGroupSize - kBorderSize;
if (group_index < (kTileDimension2 >> 2)) {
ivec2 group_thread_id_1 = ivec2(group_index % kTileDimension, group_index / kTileDimension);
ivec2 group_thread_id_2 = ivec2((group_index + (kTileDimension2 >> 2)) % kTileDimension, (group_index + (kTileDimension2 >> 2)) / kTileDimension);
ivec2 group_thread_id_3 = ivec2((group_index + (kTileDimension2 >> 1)) % kTileDimension, (group_index + (kTileDimension2 >> 1)) / kTileDimension);
ivec2 group_thread_id_4 = ivec2((group_index + kTileDimension2 * 3 / 4) % kTileDimension, (group_index + kTileDimension2 * 3 / 4) / kTileDimension);
store_color_depth(group_thread_id_1, group_top_left + group_thread_id_1);
store_color_depth(group_thread_id_2, group_top_left + group_thread_id_2);
store_color_depth(group_thread_id_3, group_top_left + group_thread_id_3);
store_color_depth(group_thread_id_4, group_top_left + group_thread_id_4);
}
// Wait for group threads to load store data.
groupMemoryBarrier();
barrier();
}
/*------------------------------------------------------------------------------
VELOCITY
------------------------------------------------------------------------------*/
void depth_test_min(uvec2 pos, inout float min_depth, inout uvec2 min_pos) {
float depth = load_depth(pos);
if (depth < min_depth) {
min_depth = depth;
min_pos = pos;
}
}
// Returns velocity with closest depth (3x3 neighborhood)
void get_closest_pixel_velocity_3x3(in uvec2 group_pos, uvec2 group_top_left, out vec2 velocity) {
float min_depth = 1.0;
uvec2 min_pos = group_pos;
depth_test_min(group_pos + kOffsets3x3[0], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[1], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[2], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[3], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[4], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[5], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[6], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[7], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[8], min_depth, min_pos);
// Velocity out
velocity = imageLoad(velocity_buffer, ivec2(group_top_left + min_pos)).xy;
}
/*------------------------------------------------------------------------------
HISTORY SAMPLING
------------------------------------------------------------------------------*/
vec3 sample_catmull_rom_9(sampler2D stex, vec2 uv, vec2 resolution) {
// Source: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1
// License: https://gist.github.com/TheRealMJP/bc503b0b87b643d3505d41eab8b332ae
// We're going to sample a 4x4 grid of texels surrounding the target UV coordinate. We'll do this by rounding
// down the sample location to get the exact center of our "starting" texel. The starting texel will be at
// location [1, 1] in the grid, where [0, 0] is the top left corner.
vec2 sample_pos = uv * resolution;
vec2 texPos1 = floor(sample_pos - 0.5f) + 0.5f;
// Compute the fractional offset from our starting texel to our original sample location, which we'll
// feed into the Catmull-Rom spline function to get our filter weights.
vec2 f = sample_pos - texPos1;
// Compute the Catmull-Rom weights using the fractional offset that we calculated earlier.
// These equations are pre-expanded based on our knowledge of where the texels will be located,
// which lets us avoid having to evaluate a piece-wise function.
vec2 w0 = f * (-0.5f + f * (1.0f - 0.5f * f));
vec2 w1 = 1.0f + f * f * (-2.5f + 1.5f * f);
vec2 w2 = f * (0.5f + f * (2.0f - 1.5f * f));
vec2 w3 = f * f * (-0.5f + 0.5f * f);
// Work out weighting factors and sampling offsets that will let us use bilinear filtering to
// simultaneously evaluate the middle 2 samples from the 4x4 grid.
vec2 w12 = w1 + w2;
vec2 offset12 = w2 / (w1 + w2);
// Compute the final UV coordinates we'll use for sampling the texture
vec2 texPos0 = texPos1 - 1.0f;
vec2 texPos3 = texPos1 + 2.0f;
vec2 texPos12 = texPos1 + offset12;
texPos0 /= resolution;
texPos3 /= resolution;
texPos12 /= resolution;
vec3 result = vec3(0.0f, 0.0f, 0.0f);
result += textureLod(stex, vec2(texPos0.x, texPos0.y), 0.0).xyz * w0.x * w0.y;
result += textureLod(stex, vec2(texPos12.x, texPos0.y), 0.0).xyz * w12.x * w0.y;
result += textureLod(stex, vec2(texPos3.x, texPos0.y), 0.0).xyz * w3.x * w0.y;
result += textureLod(stex, vec2(texPos0.x, texPos12.y), 0.0).xyz * w0.x * w12.y;
result += textureLod(stex, vec2(texPos12.x, texPos12.y), 0.0).xyz * w12.x * w12.y;
result += textureLod(stex, vec2(texPos3.x, texPos12.y), 0.0).xyz * w3.x * w12.y;
result += textureLod(stex, vec2(texPos0.x, texPos3.y), 0.0).xyz * w0.x * w3.y;
result += textureLod(stex, vec2(texPos12.x, texPos3.y), 0.0).xyz * w12.x * w3.y;
result += textureLod(stex, vec2(texPos3.x, texPos3.y), 0.0).xyz * w3.x * w3.y;
return max(result, 0.0f);
}
/*------------------------------------------------------------------------------
HISTORY CLIPPING
------------------------------------------------------------------------------*/
// Based on "Temporal Reprojection Anti-Aliasing" - https://github.com/playdeadgames/temporal
vec3 clip_aabb(vec3 aabb_min, vec3 aabb_max, vec3 p, vec3 q) {
vec3 r = q - p;
vec3 rmax = (aabb_max - p.xyz);
vec3 rmin = (aabb_min - p.xyz);
if (r.x > rmax.x + FLT_MIN) {
r *= (rmax.x / r.x);
}
if (r.y > rmax.y + FLT_MIN) {
r *= (rmax.y / r.y);
}
if (r.z > rmax.z + FLT_MIN) {
r *= (rmax.z / r.z);
}
if (r.x < rmin.x - FLT_MIN) {
r *= (rmin.x / r.x);
}
if (r.y < rmin.y - FLT_MIN) {
r *= (rmin.y / r.y);
}
if (r.z < rmin.z - FLT_MIN) {
r *= (rmin.z / r.z);
}
return p + r;
}
// Clip history to the neighbourhood of the current sample
vec3 clip_history_3x3(uvec2 group_pos, vec3 color_history, vec2 velocity_closest) {
// Sample a 3x3 neighbourhood
vec3 s1 = load_color(group_pos + kOffsets3x3[0]);
vec3 s2 = load_color(group_pos + kOffsets3x3[1]);
vec3 s3 = load_color(group_pos + kOffsets3x3[2]);
vec3 s4 = load_color(group_pos + kOffsets3x3[3]);
vec3 s5 = load_color(group_pos + kOffsets3x3[4]);
vec3 s6 = load_color(group_pos + kOffsets3x3[5]);
vec3 s7 = load_color(group_pos + kOffsets3x3[6]);
vec3 s8 = load_color(group_pos + kOffsets3x3[7]);
vec3 s9 = load_color(group_pos + kOffsets3x3[8]);
// Compute min and max (with an adaptive box size, which greatly reduces ghosting)
vec3 color_avg = (s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9) * RPC_9;
vec3 color_avg2 = ((s1 * s1) + (s2 * s2) + (s3 * s3) + (s4 * s4) + (s5 * s5) + (s6 * s6) + (s7 * s7) + (s8 * s8) + (s9 * s9)) * RPC_9;
float box_size = mix(0.0f, 2.5f, smoothstep(0.02f, 0.0f, length(velocity_closest)));
vec3 dev = sqrt(abs(color_avg2 - (color_avg * color_avg))) * box_size;
vec3 color_min = color_avg - dev;
vec3 color_max = color_avg + dev;
// Variance clipping
vec3 color = clip_aabb(color_min, color_max, clamp(color_avg, color_min, color_max), color_history);
// Clamp to prevent NaNs
color = clamp(color, FLT_MIN, FLT_MAX);
return color;
}
/*------------------------------------------------------------------------------
TAA
------------------------------------------------------------------------------*/
const vec3 lumCoeff = vec3(0.299f, 0.587f, 0.114f);
float luminance(vec3 color) {
return max(dot(color, lumCoeff), 0.0001f);
}
// This is "velocity disocclusion" as described by https://www.elopezr.com/temporal-aa-and-the-quest-for-the-holy-trail/.
// We use texel space, so our scale and threshold differ.
float get_factor_disocclusion(vec2 uv_reprojected, vec2 velocity) {
vec2 velocity_previous = imageLoad(last_velocity_buffer, ivec2(uv_reprojected * params.resolution)).xy;
vec2 velocity_texels = velocity * params.resolution;
vec2 prev_velocity_texels = velocity_previous * params.resolution;
float disocclusion = length(prev_velocity_texels - velocity_texels) - params.disocclusion_threshold;
return clamp(disocclusion * params.disocclusion_scale, 0.0, 1.0);
}
vec3 temporal_antialiasing(uvec2 pos_group_top_left, uvec2 pos_group, uvec2 pos_screen, vec2 uv, sampler2D tex_history) {
// Get the velocity of the current pixel
vec2 velocity = imageLoad(velocity_buffer, ivec2(pos_screen)).xy;
// Get reprojected uv
vec2 uv_reprojected = uv + velocity;
// Get input color
vec3 color_input = load_color(pos_group);
// Get history color (catmull-rom reduces a lot of the blurring that you get under motion)
vec3 color_history = sample_catmull_rom_9(tex_history, uv_reprojected, params.resolution).rgb;
// Clip history to the neighbourhood of the current sample (fixes a lot of the ghosting).
vec2 velocity_closest = vec2(0.0); // This is best done by using the velocity with the closest depth.
get_closest_pixel_velocity_3x3(pos_group, pos_group_top_left, velocity_closest);
color_history = clip_history_3x3(pos_group, color_history, velocity_closest);
// Compute blend factor
float blend_factor = RPC_16; // We want to be able to accumulate as many jitter samples as we generated, that is, 16.
{
// If re-projected UV is out of screen, converge to current color immediately.
float factor_screen = any(lessThan(uv_reprojected, vec2(0.0))) || any(greaterThan(uv_reprojected, vec2(1.0))) ? 1.0 : 0.0;
// Increase blend factor when there is disocclusion (fixes a lot of the remaining ghosting).
float factor_disocclusion = get_factor_disocclusion(uv_reprojected, velocity);
// Add to the blend factor
blend_factor = clamp(blend_factor + factor_screen + factor_disocclusion, 0.0, 1.0);
}
// Resolve
vec3 color_resolved = vec3(0.0);
{
// Tonemap
color_history = reinhard(color_history);
color_input = reinhard(color_input);
// Reduce flickering
float lum_color = luminance(color_input);
float lum_history = luminance(color_history);
float diff = abs(lum_color - lum_history) / max(lum_color, max(lum_history, 1.001));
diff = 1.0 - diff;
diff = diff * diff;
blend_factor = mix(0.0, blend_factor, diff);
// Lerp/blend
color_resolved = mix(color_history, color_input, blend_factor);
// Inverse tonemap
color_resolved = reinhard_inverse(color_resolved);
}
return color_resolved;
}
void main() {
populate_group_shared_memory(gl_WorkGroupID.xy, gl_LocalInvocationIndex);
// Out of bounds check
if (any(greaterThanEqual(vec2(gl_GlobalInvocationID.xy), params.resolution))) {
return;
}
const uvec2 pos_group = gl_LocalInvocationID.xy;
const uvec2 pos_group_top_left = gl_WorkGroupID.xy * kGroupSize - kBorderSize;
const uvec2 pos_screen = gl_GlobalInvocationID.xy;
const vec2 uv = (gl_GlobalInvocationID.xy + 0.5f) / params.resolution;
vec3 result = temporal_antialiasing(pos_group_top_left, pos_group, pos_screen, uv, history_buffer);
imageStore(output_buffer, ivec2(gl_GlobalInvocationID.xy), vec4(result, 1.0));
}

View File

@@ -0,0 +1,930 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(location = 0) out vec2 uv_interp;
void main() {
// old code, ARM driver bug on Mali-GXXx GPUs and Vulkan API 1.3.xxx
// https://github.com/godotengine/godot/pull/92817#issuecomment-2168625982
//vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
//gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
//uv_interp = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
vec2 vertex_base;
if (gl_VertexIndex == 0) {
vertex_base = vec2(-1.0, -1.0);
} else if (gl_VertexIndex == 1) {
vertex_base = vec2(-1.0, 3.0);
} else {
vertex_base = vec2(3.0, -1.0);
}
gl_Position = vec4(vertex_base, 0.0, 1.0);
uv_interp = clamp(vertex_base, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
}
#[fragment]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#endif //USE_MULTIVIEW
layout(location = 0) in vec2 uv_interp;
#ifdef SUBPASS
layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInput input_color;
#elif defined(USE_MULTIVIEW)
layout(set = 0, binding = 0) uniform sampler2DArray source_color;
#else
layout(set = 0, binding = 0) uniform sampler2D source_color;
#endif
layout(set = 1, binding = 0) uniform sampler2D source_auto_exposure;
#ifdef USE_MULTIVIEW
layout(set = 2, binding = 0) uniform sampler2DArray source_glow;
#else
layout(set = 2, binding = 0) uniform sampler2D source_glow;
#endif
layout(set = 2, binding = 1) uniform sampler2D glow_map;
#ifdef USE_1D_LUT
layout(set = 3, binding = 0) uniform sampler2D source_color_correction;
#else
layout(set = 3, binding = 0) uniform sampler3D source_color_correction;
#endif
#define FLAG_USE_BCS (1 << 0)
#define FLAG_USE_GLOW (1 << 1)
#define FLAG_USE_AUTO_EXPOSURE (1 << 2)
#define FLAG_USE_COLOR_CORRECTION (1 << 3)
#define FLAG_USE_FXAA (1 << 4)
#define FLAG_USE_8_BIT_DEBANDING (1 << 5)
#define FLAG_USE_10_BIT_DEBANDING (1 << 6)
#define FLAG_CONVERT_TO_SRGB (1 << 7)
layout(push_constant, std430) uniform Params {
vec3 bcs;
uint flags;
vec2 pixel_size;
uint tonemapper;
uint pad;
uvec2 glow_texture_size;
float glow_intensity;
float glow_map_strength;
uint glow_mode;
float glow_levels[7];
float exposure;
float white;
float auto_exposure_scale;
float luminance_multiplier;
}
params;
layout(location = 0) out vec4 frag_color;
#ifdef USE_GLOW_FILTER_BICUBIC
// w0, w1, w2, and w3 are the four cubic B-spline basis functions
float w0(float a) {
return (1.0f / 6.0f) * (a * (a * (-a + 3.0f) - 3.0f) + 1.0f);
}
float w1(float a) {
return (1.0f / 6.0f) * (a * a * (3.0f * a - 6.0f) + 4.0f);
}
float w2(float a) {
return (1.0f / 6.0f) * (a * (a * (-3.0f * a + 3.0f) + 3.0f) + 1.0f);
}
float w3(float a) {
return (1.0f / 6.0f) * (a * a * a);
}
// g0 and g1 are the two amplitude functions
float g0(float a) {
return w0(a) + w1(a);
}
float g1(float a) {
return w2(a) + w3(a);
}
// h0 and h1 are the two offset functions
float h0(float a) {
return -1.0f + w1(a) / (w0(a) + w1(a));
}
float h1(float a) {
return 1.0f + w3(a) / (w2(a) + w3(a));
}
#ifdef USE_MULTIVIEW
vec4 texture2D_bicubic(sampler2DArray tex, vec2 uv, int p_lod) {
float lod = float(p_lod);
vec2 tex_size = vec2(params.glow_texture_size >> p_lod);
vec2 pixel_size = vec2(1.0f) / tex_size;
uv = uv * tex_size + vec2(0.5f);
vec2 iuv = floor(uv);
vec2 fuv = fract(uv);
float g0x = g0(fuv.x);
float g1x = g1(fuv.x);
float h0x = h0(fuv.x);
float h1x = h1(fuv.x);
float h0y = h0(fuv.y);
float h1y = h1(fuv.y);
vec3 p0 = vec3((vec2(iuv.x + h0x, iuv.y + h0y) - vec2(0.5f)) * pixel_size, ViewIndex);
vec3 p1 = vec3((vec2(iuv.x + h1x, iuv.y + h0y) - vec2(0.5f)) * pixel_size, ViewIndex);
vec3 p2 = vec3((vec2(iuv.x + h0x, iuv.y + h1y) - vec2(0.5f)) * pixel_size, ViewIndex);
vec3 p3 = vec3((vec2(iuv.x + h1x, iuv.y + h1y) - vec2(0.5f)) * pixel_size, ViewIndex);
return (g0(fuv.y) * (g0x * textureLod(tex, p0, lod) + g1x * textureLod(tex, p1, lod))) +
(g1(fuv.y) * (g0x * textureLod(tex, p2, lod) + g1x * textureLod(tex, p3, lod)));
}
#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) texture2D_bicubic(m_tex, m_uv, m_lod)
#else // USE_MULTIVIEW
vec4 texture2D_bicubic(sampler2D tex, vec2 uv, int p_lod) {
float lod = float(p_lod);
vec2 tex_size = vec2(params.glow_texture_size >> p_lod);
vec2 pixel_size = vec2(1.0f) / tex_size;
uv = uv * tex_size + vec2(0.5f);
vec2 iuv = floor(uv);
vec2 fuv = fract(uv);
float g0x = g0(fuv.x);
float g1x = g1(fuv.x);
float h0x = h0(fuv.x);
float h1x = h1(fuv.x);
float h0y = h0(fuv.y);
float h1y = h1(fuv.y);
vec2 p0 = (vec2(iuv.x + h0x, iuv.y + h0y) - vec2(0.5f)) * pixel_size;
vec2 p1 = (vec2(iuv.x + h1x, iuv.y + h0y) - vec2(0.5f)) * pixel_size;
vec2 p2 = (vec2(iuv.x + h0x, iuv.y + h1y) - vec2(0.5f)) * pixel_size;
vec2 p3 = (vec2(iuv.x + h1x, iuv.y + h1y) - vec2(0.5f)) * pixel_size;
return (g0(fuv.y) * (g0x * textureLod(tex, p0, lod) + g1x * textureLod(tex, p1, lod))) +
(g1(fuv.y) * (g0x * textureLod(tex, p2, lod) + g1x * textureLod(tex, p3, lod)));
}
#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) texture2D_bicubic(m_tex, m_uv, m_lod)
#endif // !USE_MULTIVIEW
#else // USE_GLOW_FILTER_BICUBIC
#ifdef USE_MULTIVIEW
#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) textureLod(m_tex, vec3(m_uv, ViewIndex), float(m_lod))
#else // USE_MULTIVIEW
#define GLOW_TEXTURE_SAMPLE(m_tex, m_uv, m_lod) textureLod(m_tex, m_uv, float(m_lod))
#endif // !USE_MULTIVIEW
#endif // !USE_GLOW_FILTER_BICUBIC
// Based on Reinhard's extended formula, see equation 4 in https://doi.org/cjbgrt
vec3 tonemap_reinhard(vec3 color, float white) {
float white_squared = white * white;
vec3 white_squared_color = white_squared * color;
// Equivalent to color * (1 + color / white_squared) / (1 + color)
return (white_squared_color + color * color) / (white_squared_color + white_squared);
}
vec3 tonemap_filmic(vec3 color, float white) {
// exposure bias: input scale (color *= bias, white *= bias) to make the brightness consistent with other tonemappers
// also useful to scale the input to the range that the tonemapper is designed for (some require very high input values)
// has no effect on the curve's general shape or visual properties
const float exposure_bias = 2.0f;
const float A = 0.22f * exposure_bias * exposure_bias; // bias baked into constants for performance
const float B = 0.30f * exposure_bias;
const float C = 0.10f;
const float D = 0.20f;
const float E = 0.01f;
const float F = 0.30f;
vec3 color_tonemapped = ((color * (A * color + C * B) + D * E) / (color * (A * color + B) + D * F)) - E / F;
float white_tonemapped = ((white * (A * white + C * B) + D * E) / (white * (A * white + B) + D * F)) - E / F;
return color_tonemapped / white_tonemapped;
}
// Adapted from https://github.com/TheRealMJP/BakingLab/blob/master/BakingLab/ACES.hlsl
// (MIT License).
vec3 tonemap_aces(vec3 color, float white) {
const float exposure_bias = 1.8f;
const float A = 0.0245786f;
const float B = 0.000090537f;
const float C = 0.983729f;
const float D = 0.432951f;
const float E = 0.238081f;
// Exposure bias baked into transform to save shader instructions. Equivalent to `color *= exposure_bias`
const mat3 rgb_to_rrt = mat3(
vec3(0.59719f * exposure_bias, 0.35458f * exposure_bias, 0.04823f * exposure_bias),
vec3(0.07600f * exposure_bias, 0.90834f * exposure_bias, 0.01566f * exposure_bias),
vec3(0.02840f * exposure_bias, 0.13383f * exposure_bias, 0.83777f * exposure_bias));
const mat3 odt_to_rgb = mat3(
vec3(1.60475f, -0.53108f, -0.07367f),
vec3(-0.10208f, 1.10813f, -0.00605f),
vec3(-0.00327f, -0.07276f, 1.07602f));
color *= rgb_to_rrt;
vec3 color_tonemapped = (color * (color + A) - B) / (color * (C * color + D) + E);
color_tonemapped *= odt_to_rgb;
white *= exposure_bias;
float white_tonemapped = (white * (white + A) - B) / (white * (C * white + D) + E);
return color_tonemapped / white_tonemapped;
}
// Polynomial approximation of EaryChow's AgX sigmoid curve.
// x must be within the range [0.0, 1.0]
vec3 agx_contrast_approx(vec3 x) {
// Generated with Excel trendline
// Input data: Generated using python sigmoid with EaryChow's configuration and 57 steps
// Additional padding values were added to give correct intersections at 0.0 and 1.0
// 6th order, intercept of 0.0 to remove an operation and ensure intersection at 0.0
vec3 x2 = x * x;
vec3 x4 = x2 * x2;
return 0.021 * x + 4.0111 * x2 - 25.682 * x2 * x + 70.359 * x4 - 74.778 * x4 * x + 27.069 * x4 * x2;
}
// This is an approximation and simplification of EaryChow's AgX implementation that is used by Blender.
// This code is based off of the script that generates the AgX_Base_sRGB.cube LUT that Blender uses.
// Source: https://github.com/EaryChow/AgX_LUT_Gen/blob/main/AgXBasesRGB.py
vec3 tonemap_agx(vec3 color) {
// Combined linear sRGB to linear Rec 2020 and Blender AgX inset matrices:
const mat3 srgb_to_rec2020_agx_inset_matrix = mat3(
0.54490813676363087053, 0.14044005884001287035, 0.088827411851915368603,
0.37377945959812267119, 0.75410959864013760045, 0.17887712465043811023,
0.081384976686407536266, 0.10543358536857773485, 0.73224999956948382528);
// Combined inverse AgX outset matrix and linear Rec 2020 to linear sRGB matrices.
const mat3 agx_outset_rec2020_to_srgb_matrix = mat3(
1.9645509602733325934, -0.29932243390911083839, -0.16436833806080403409,
-0.85585845117807513559, 1.3264510741502356555, -0.23822464068860595117,
-0.10886710826831608324, -0.027084020983874825605, 1.402665347143271889);
// LOG2_MIN = -10.0
// LOG2_MAX = +6.5
// MIDDLE_GRAY = 0.18
const float min_ev = -12.4739311883324; // log2(pow(2, LOG2_MIN) * MIDDLE_GRAY)
const float max_ev = 4.02606881166759; // log2(pow(2, LOG2_MAX) * MIDDLE_GRAY)
// Large negative values in one channel and large positive values in other
// channels can result in a colour that appears darker and more saturated than
// desired after passing it through the inset matrix. For this reason, it is
// best to prevent negative input values.
// This is done before the Rec. 2020 transform to allow the Rec. 2020
// transform to be combined with the AgX inset matrix. This results in a loss
// of color information that could be correctly interpreted within the
// Rec. 2020 color space as positive RGB values, but it is less common for Godot
// to provide this function with negative sRGB values and therefore not worth
// the performance cost of an additional matrix multiplication.
// A value of 2e-10 intentionally introduces insignificant error to prevent
// log2(0.0) after the inset matrix is applied; color will be >= 1e-10 after
// the matrix transform.
color = max(color, 2e-10);
// Do AGX in rec2020 to match Blender and then apply inset matrix.
color = srgb_to_rec2020_agx_inset_matrix * color;
// Log2 space encoding.
// Must be clamped because agx_contrast_approx may not work
// well with values outside of the range [0.0, 1.0]
color = clamp(log2(color), min_ev, max_ev);
color = (color - min_ev) / (max_ev - min_ev);
// Apply sigmoid function approximation.
color = agx_contrast_approx(color);
// Convert back to linear before applying outset matrix.
color = pow(color, vec3(2.4));
// Apply outset to make the result more chroma-laden and then go back to linear sRGB.
color = agx_outset_rec2020_to_srgb_matrix * color;
// Blender's lusRGB.compensate_low_side is too complex for this shader, so
// simply return the color, even if it has negative components. These negative
// components may be useful for subsequent color adjustments.
return color;
}
vec3 linear_to_srgb(vec3 color) {
// Clamping is not strictly necessary for floating point nonlinear sRGB encoding,
// but many cases that call this function need the result clamped.
color = clamp(color, vec3(0.0), vec3(1.0));
const vec3 a = vec3(0.055f);
return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
}
#define TONEMAPPER_LINEAR 0
#define TONEMAPPER_REINHARD 1
#define TONEMAPPER_FILMIC 2
#define TONEMAPPER_ACES 3
#define TONEMAPPER_AGX 4
vec3 apply_tonemapping(vec3 color, float white) { // inputs are LINEAR
// Ensure color values passed to tonemappers are positive.
// They can be negative in the case of negative lights, which leads to undesired behavior.
if (params.tonemapper == TONEMAPPER_LINEAR) {
return color;
} else if (params.tonemapper == TONEMAPPER_REINHARD) {
return tonemap_reinhard(max(vec3(0.0f), color), white);
} else if (params.tonemapper == TONEMAPPER_FILMIC) {
return tonemap_filmic(max(vec3(0.0f), color), white);
} else if (params.tonemapper == TONEMAPPER_ACES) {
return tonemap_aces(max(vec3(0.0f), color), white);
} else { // TONEMAPPER_AGX
return tonemap_agx(color);
}
}
#ifdef USE_MULTIVIEW
vec3 gather_glow(sampler2DArray tex, vec2 uv) { // sample all selected glow levels, view is added to uv later
#else
vec3 gather_glow(sampler2D tex, vec2 uv) { // sample all selected glow levels
#endif // defined(USE_MULTIVIEW)
vec3 glow = vec3(0.0f);
if (params.glow_levels[0] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 0).rgb * params.glow_levels[0];
}
if (params.glow_levels[1] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 1).rgb * params.glow_levels[1];
}
if (params.glow_levels[2] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 2).rgb * params.glow_levels[2];
}
if (params.glow_levels[3] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 3).rgb * params.glow_levels[3];
}
if (params.glow_levels[4] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 4).rgb * params.glow_levels[4];
}
if (params.glow_levels[5] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 5).rgb * params.glow_levels[5];
}
if (params.glow_levels[6] > 0.0001) {
glow += GLOW_TEXTURE_SAMPLE(tex, uv, 6).rgb * params.glow_levels[6];
}
return glow;
}
#define GLOW_MODE_ADD 0
#define GLOW_MODE_SCREEN 1
#define GLOW_MODE_SOFTLIGHT 2
#define GLOW_MODE_REPLACE 3
#define GLOW_MODE_MIX 4
vec3 apply_glow(vec3 color, vec3 glow) { // apply glow using the selected blending mode
if (params.glow_mode == GLOW_MODE_ADD) {
return color + glow;
} else if (params.glow_mode == GLOW_MODE_SCREEN) {
// Needs color clamping.
glow.rgb = clamp(glow.rgb, vec3(0.0f), vec3(1.0f));
return max((color + glow) - (color * glow), vec3(0.0));
} else if (params.glow_mode == GLOW_MODE_SOFTLIGHT) {
// Needs color clamping.
glow.rgb = clamp(glow.rgb, vec3(0.0f), vec3(1.0f));
glow = glow * vec3(0.5f) + vec3(0.5f);
color.r = (glow.r <= 0.5f) ? (color.r - (1.0f - 2.0f * glow.r) * color.r * (1.0f - color.r)) : (((glow.r > 0.5f) && (color.r <= 0.25f)) ? (color.r + (2.0f * glow.r - 1.0f) * (4.0f * color.r * (4.0f * color.r + 1.0f) * (color.r - 1.0f) + 7.0f * color.r)) : (color.r + (2.0f * glow.r - 1.0f) * (sqrt(color.r) - color.r)));
color.g = (glow.g <= 0.5f) ? (color.g - (1.0f - 2.0f * glow.g) * color.g * (1.0f - color.g)) : (((glow.g > 0.5f) && (color.g <= 0.25f)) ? (color.g + (2.0f * glow.g - 1.0f) * (4.0f * color.g * (4.0f * color.g + 1.0f) * (color.g - 1.0f) + 7.0f * color.g)) : (color.g + (2.0f * glow.g - 1.0f) * (sqrt(color.g) - color.g)));
color.b = (glow.b <= 0.5f) ? (color.b - (1.0f - 2.0f * glow.b) * color.b * (1.0f - color.b)) : (((glow.b > 0.5f) && (color.b <= 0.25f)) ? (color.b + (2.0f * glow.b - 1.0f) * (4.0f * color.b * (4.0f * color.b + 1.0f) * (color.b - 1.0f) + 7.0f * color.b)) : (color.b + (2.0f * glow.b - 1.0f) * (sqrt(color.b) - color.b)));
return color;
} else { //replace
return glow;
}
}
vec3 apply_bcs(vec3 color, vec3 bcs) {
color = mix(vec3(0.0f), color, bcs.x);
color = mix(vec3(0.5f), color, bcs.y);
color = mix(vec3(dot(vec3(1.0f), color) * 0.33333f), color, bcs.z);
return color;
}
#ifdef USE_1D_LUT
vec3 apply_color_correction(vec3 color) {
color.r = texture(source_color_correction, vec2(color.r, 0.0f)).r;
color.g = texture(source_color_correction, vec2(color.g, 0.0f)).g;
color.b = texture(source_color_correction, vec2(color.b, 0.0f)).b;
return color;
}
#else
vec3 apply_color_correction(vec3 color) {
return textureLod(source_color_correction, color, 0.0).rgb;
}
#endif
#ifndef SUBPASS
// FXAA 3.11 compact, Ported from https://github.com/kosua20/Rendu/blob/master/resources/common/shaders/screens/fxaa.frag
///////////////////////////////////////////////////////////////////////////////////
// MIT License
//
// Copyright (c) 2017 Simon Rodriguez
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////
// Nvidia Original FXAA 3.11 License
//----------------------------------------------------------------------------------
// File: es3-kepler\FXAA/FXAA3_11.h
// SDK Version: v3.00
// Email: gameworks@nvidia.com
// Site: http://developer.nvidia.com/
//
// Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------------
//
// NVIDIA FXAA 3.11 by TIMOTHY LOTTES
//
//----------------------------------------------------------------------------------
float QUALITY(float q) {
return (q < 5 ? 1.0 : (q > 5 ? (q < 10 ? 2.0 : (q < 11 ? 4.0 : 8.0)) : 1.5));
}
float rgb2luma(vec3 rgb) {
return sqrt(dot(rgb, vec3(0.299, 0.587, 0.114)));
}
vec3 do_fxaa(vec3 color, float exposure, vec2 uv_interp) {
const float EDGE_THRESHOLD_MIN = 0.0312;
const float EDGE_THRESHOLD_MAX = 0.125;
const int ITERATIONS = 12;
const float SUBPIXEL_QUALITY = 0.75;
#ifdef USE_MULTIVIEW
float lumaUp = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(0, 1)).xyz * exposure * params.luminance_multiplier);
float lumaDown = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(0, -1)).xyz * exposure * params.luminance_multiplier);
float lumaLeft = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(-1, 0)).xyz * exposure * params.luminance_multiplier);
float lumaRight = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(1, 0)).xyz * exposure * params.luminance_multiplier);
float lumaCenter = rgb2luma(color);
float lumaMin = min(lumaCenter, min(min(lumaUp, lumaDown), min(lumaLeft, lumaRight)));
float lumaMax = max(lumaCenter, max(max(lumaUp, lumaDown), max(lumaLeft, lumaRight)));
float lumaRange = lumaMax - lumaMin;
if (lumaRange < max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) {
return color;
}
float lumaDownLeft = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(-1, -1)).xyz * exposure * params.luminance_multiplier);
float lumaUpRight = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(1, 1)).xyz * exposure * params.luminance_multiplier);
float lumaUpLeft = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(-1, 1)).xyz * exposure * params.luminance_multiplier);
float lumaDownRight = rgb2luma(textureLodOffset(source_color, vec3(uv_interp, ViewIndex), 0.0, ivec2(1, -1)).xyz * exposure * params.luminance_multiplier);
float lumaDownUp = lumaDown + lumaUp;
float lumaLeftRight = lumaLeft + lumaRight;
float lumaLeftCorners = lumaDownLeft + lumaUpLeft;
float lumaDownCorners = lumaDownLeft + lumaDownRight;
float lumaRightCorners = lumaDownRight + lumaUpRight;
float lumaUpCorners = lumaUpRight + lumaUpLeft;
float edgeHorizontal = abs(-2.0 * lumaLeft + lumaLeftCorners) + abs(-2.0 * lumaCenter + lumaDownUp) * 2.0 + abs(-2.0 * lumaRight + lumaRightCorners);
float edgeVertical = abs(-2.0 * lumaUp + lumaUpCorners) + abs(-2.0 * lumaCenter + lumaLeftRight) * 2.0 + abs(-2.0 * lumaDown + lumaDownCorners);
bool isHorizontal = (edgeHorizontal >= edgeVertical);
float stepLength = isHorizontal ? params.pixel_size.y : params.pixel_size.x;
float luma1 = isHorizontal ? lumaDown : lumaLeft;
float luma2 = isHorizontal ? lumaUp : lumaRight;
float gradient1 = luma1 - lumaCenter;
float gradient2 = luma2 - lumaCenter;
bool is1Steepest = abs(gradient1) >= abs(gradient2);
float gradientScaled = 0.25 * max(abs(gradient1), abs(gradient2));
float lumaLocalAverage = 0.0;
if (is1Steepest) {
stepLength = -stepLength;
lumaLocalAverage = 0.5 * (luma1 + lumaCenter);
} else {
lumaLocalAverage = 0.5 * (luma2 + lumaCenter);
}
vec2 currentUv = uv_interp;
if (isHorizontal) {
currentUv.y += stepLength * 0.5;
} else {
currentUv.x += stepLength * 0.5;
}
vec2 offset = isHorizontal ? vec2(params.pixel_size.x, 0.0) : vec2(0.0, params.pixel_size.y);
vec3 uv1 = vec3(currentUv - offset * QUALITY(0), ViewIndex);
vec3 uv2 = vec3(currentUv + offset * QUALITY(0), ViewIndex);
float lumaEnd1 = rgb2luma(textureLod(source_color, uv1, 0.0).xyz * exposure * params.luminance_multiplier);
float lumaEnd2 = rgb2luma(textureLod(source_color, uv2, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd1 -= lumaLocalAverage;
lumaEnd2 -= lumaLocalAverage;
bool reached1 = abs(lumaEnd1) >= gradientScaled;
bool reached2 = abs(lumaEnd2) >= gradientScaled;
bool reachedBoth = reached1 && reached2;
if (!reached1) {
uv1 -= vec3(offset * QUALITY(1), 0.0);
}
if (!reached2) {
uv2 += vec3(offset * QUALITY(1), 0.0);
}
if (!reachedBoth) {
for (int i = 2; i < ITERATIONS; i++) {
if (!reached1) {
lumaEnd1 = rgb2luma(textureLod(source_color, uv1, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd1 = lumaEnd1 - lumaLocalAverage;
}
if (!reached2) {
lumaEnd2 = rgb2luma(textureLod(source_color, uv2, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd2 = lumaEnd2 - lumaLocalAverage;
}
reached1 = abs(lumaEnd1) >= gradientScaled;
reached2 = abs(lumaEnd2) >= gradientScaled;
reachedBoth = reached1 && reached2;
if (!reached1) {
uv1 -= vec3(offset * QUALITY(i), 0.0);
}
if (!reached2) {
uv2 += vec3(offset * QUALITY(i), 0.0);
}
if (reachedBoth) {
break;
}
}
}
float distance1 = isHorizontal ? (uv_interp.x - uv1.x) : (uv_interp.y - uv1.y);
float distance2 = isHorizontal ? (uv2.x - uv_interp.x) : (uv2.y - uv_interp.y);
bool isDirection1 = distance1 < distance2;
float distanceFinal = min(distance1, distance2);
float edgeThickness = (distance1 + distance2);
bool isLumaCenterSmaller = lumaCenter < lumaLocalAverage;
bool correctVariation1 = (lumaEnd1 < 0.0) != isLumaCenterSmaller;
bool correctVariation2 = (lumaEnd2 < 0.0) != isLumaCenterSmaller;
bool correctVariation = isDirection1 ? correctVariation1 : correctVariation2;
float pixelOffset = -distanceFinal / edgeThickness + 0.5;
float finalOffset = correctVariation ? pixelOffset : 0.0;
float lumaAverage = (1.0 / 12.0) * (2.0 * (lumaDownUp + lumaLeftRight) + lumaLeftCorners + lumaRightCorners);
float subPixelOffset1 = clamp(abs(lumaAverage - lumaCenter) / lumaRange, 0.0, 1.0);
float subPixelOffset2 = (-2.0 * subPixelOffset1 + 3.0) * subPixelOffset1 * subPixelOffset1;
float subPixelOffsetFinal = subPixelOffset2 * subPixelOffset2 * SUBPIXEL_QUALITY;
finalOffset = max(finalOffset, subPixelOffsetFinal);
vec3 finalUv = vec3(uv_interp, ViewIndex);
if (isHorizontal) {
finalUv.y += finalOffset * stepLength;
} else {
finalUv.x += finalOffset * stepLength;
}
vec3 finalColor = textureLod(source_color, finalUv, 0.0).xyz * exposure * params.luminance_multiplier;
return finalColor;
#else
float lumaUp = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(0, 1)).xyz * exposure * params.luminance_multiplier);
float lumaDown = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(0, -1)).xyz * exposure * params.luminance_multiplier);
float lumaLeft = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(-1, 0)).xyz * exposure * params.luminance_multiplier);
float lumaRight = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(1, 0)).xyz * exposure * params.luminance_multiplier);
float lumaCenter = rgb2luma(color);
float lumaMin = min(lumaCenter, min(min(lumaUp, lumaDown), min(lumaLeft, lumaRight)));
float lumaMax = max(lumaCenter, max(max(lumaUp, lumaDown), max(lumaLeft, lumaRight)));
float lumaRange = lumaMax - lumaMin;
if (lumaRange < max(EDGE_THRESHOLD_MIN, lumaMax * EDGE_THRESHOLD_MAX)) {
return color;
}
float lumaDownLeft = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(-1, -1)).xyz * exposure * params.luminance_multiplier);
float lumaUpRight = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(1, 1)).xyz * exposure * params.luminance_multiplier);
float lumaUpLeft = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(-1, 1)).xyz * exposure * params.luminance_multiplier);
float lumaDownRight = rgb2luma(textureLodOffset(source_color, uv_interp, 0.0, ivec2(1, -1)).xyz * exposure * params.luminance_multiplier);
float lumaDownUp = lumaDown + lumaUp;
float lumaLeftRight = lumaLeft + lumaRight;
float lumaLeftCorners = lumaDownLeft + lumaUpLeft;
float lumaDownCorners = lumaDownLeft + lumaDownRight;
float lumaRightCorners = lumaDownRight + lumaUpRight;
float lumaUpCorners = lumaUpRight + lumaUpLeft;
float edgeHorizontal = abs(-2.0 * lumaLeft + lumaLeftCorners) + abs(-2.0 * lumaCenter + lumaDownUp) * 2.0 + abs(-2.0 * lumaRight + lumaRightCorners);
float edgeVertical = abs(-2.0 * lumaUp + lumaUpCorners) + abs(-2.0 * lumaCenter + lumaLeftRight) * 2.0 + abs(-2.0 * lumaDown + lumaDownCorners);
bool isHorizontal = (edgeHorizontal >= edgeVertical);
float stepLength = isHorizontal ? params.pixel_size.y : params.pixel_size.x;
float luma1 = isHorizontal ? lumaDown : lumaLeft;
float luma2 = isHorizontal ? lumaUp : lumaRight;
float gradient1 = luma1 - lumaCenter;
float gradient2 = luma2 - lumaCenter;
bool is1Steepest = abs(gradient1) >= abs(gradient2);
float gradientScaled = 0.25 * max(abs(gradient1), abs(gradient2));
float lumaLocalAverage = 0.0;
if (is1Steepest) {
stepLength = -stepLength;
lumaLocalAverage = 0.5 * (luma1 + lumaCenter);
} else {
lumaLocalAverage = 0.5 * (luma2 + lumaCenter);
}
vec2 currentUv = uv_interp;
if (isHorizontal) {
currentUv.y += stepLength * 0.5;
} else {
currentUv.x += stepLength * 0.5;
}
vec2 offset = isHorizontal ? vec2(params.pixel_size.x, 0.0) : vec2(0.0, params.pixel_size.y);
vec2 uv1 = currentUv - offset * QUALITY(0);
vec2 uv2 = currentUv + offset * QUALITY(0);
float lumaEnd1 = rgb2luma(textureLod(source_color, uv1, 0.0).xyz * exposure * params.luminance_multiplier);
float lumaEnd2 = rgb2luma(textureLod(source_color, uv2, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd1 -= lumaLocalAverage;
lumaEnd2 -= lumaLocalAverage;
bool reached1 = abs(lumaEnd1) >= gradientScaled;
bool reached2 = abs(lumaEnd2) >= gradientScaled;
bool reachedBoth = reached1 && reached2;
if (!reached1) {
uv1 -= offset * QUALITY(1);
}
if (!reached2) {
uv2 += offset * QUALITY(1);
}
if (!reachedBoth) {
for (int i = 2; i < ITERATIONS; i++) {
if (!reached1) {
lumaEnd1 = rgb2luma(textureLod(source_color, uv1, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd1 = lumaEnd1 - lumaLocalAverage;
}
if (!reached2) {
lumaEnd2 = rgb2luma(textureLod(source_color, uv2, 0.0).xyz * exposure * params.luminance_multiplier);
lumaEnd2 = lumaEnd2 - lumaLocalAverage;
}
reached1 = abs(lumaEnd1) >= gradientScaled;
reached2 = abs(lumaEnd2) >= gradientScaled;
reachedBoth = reached1 && reached2;
if (!reached1) {
uv1 -= offset * QUALITY(i);
}
if (!reached2) {
uv2 += offset * QUALITY(i);
}
if (reachedBoth) {
break;
}
}
}
float distance1 = isHorizontal ? (uv_interp.x - uv1.x) : (uv_interp.y - uv1.y);
float distance2 = isHorizontal ? (uv2.x - uv_interp.x) : (uv2.y - uv_interp.y);
bool isDirection1 = distance1 < distance2;
float distanceFinal = min(distance1, distance2);
float edgeThickness = (distance1 + distance2);
bool isLumaCenterSmaller = lumaCenter < lumaLocalAverage;
bool correctVariation1 = (lumaEnd1 < 0.0) != isLumaCenterSmaller;
bool correctVariation2 = (lumaEnd2 < 0.0) != isLumaCenterSmaller;
bool correctVariation = isDirection1 ? correctVariation1 : correctVariation2;
float pixelOffset = -distanceFinal / edgeThickness + 0.5;
float finalOffset = correctVariation ? pixelOffset : 0.0;
float lumaAverage = (1.0 / 12.0) * (2.0 * (lumaDownUp + lumaLeftRight) + lumaLeftCorners + lumaRightCorners);
float subPixelOffset1 = clamp(abs(lumaAverage - lumaCenter) / lumaRange, 0.0, 1.0);
float subPixelOffset2 = (-2.0 * subPixelOffset1 + 3.0) * subPixelOffset1 * subPixelOffset1;
float subPixelOffsetFinal = subPixelOffset2 * subPixelOffset2 * SUBPIXEL_QUALITY;
finalOffset = max(finalOffset, subPixelOffsetFinal);
vec2 finalUv = uv_interp;
if (isHorizontal) {
finalUv.y += finalOffset * stepLength;
} else {
finalUv.x += finalOffset * stepLength;
}
vec3 finalColor = textureLod(source_color, finalUv, 0.0).xyz * exposure * params.luminance_multiplier;
return finalColor;
#endif
}
#endif // !SUBPASS
// From https://alex.vlachos.com/graphics/Alex_Vlachos_Advanced_VR_Rendering_GDC2015.pdf
// and https://www.shadertoy.com/view/MslGR8 (5th one starting from the bottom)
// NOTE: `frag_coord` is in pixels (i.e. not normalized UV).
// This dithering must be applied after encoding changes (linear/nonlinear) have been applied
// as the final step before quantization from floating point to integer values.
vec3 screen_space_dither(vec2 frag_coord, float bit_alignment_diviser) {
// Iestyn's RGB dither (7 asm instructions) from Portal 2 X360, slightly modified for VR.
// Removed the time component to avoid passing time into this shader.
vec3 dither = vec3(dot(vec2(171.0, 231.0), frag_coord));
dither.rgb = fract(dither.rgb / vec3(103.0, 71.0, 97.0));
// Subtract 0.5 to avoid slightly brightening the whole viewport.
// Use a dither strength of 100% rather than the 37.5% suggested by the original source.
return (dither.rgb - 0.5) / bit_alignment_diviser;
}
void main() {
#ifdef SUBPASS
// SUBPASS and USE_MULTIVIEW can be combined but in that case we're already reading from the correct layer
#ifdef USE_MULTIVIEW
// In order to ensure the `SpvCapabilityMultiView` is included in the SPIR-V capabilities, gl_ViewIndex must
// be read in the shader. Without this, transpilation to Metal fails to include the multi-view variant.
uint vi = ViewIndex;
#endif
vec4 color = subpassLoad(input_color);
#elif defined(USE_MULTIVIEW)
vec4 color = textureLod(source_color, vec3(uv_interp, ViewIndex), 0.0f);
#else
vec4 color = textureLod(source_color, uv_interp, 0.0f);
#endif
color.rgb *= params.luminance_multiplier;
// Exposure
float exposure = params.exposure;
#ifndef SUBPASS
if (bool(params.flags & FLAG_USE_AUTO_EXPOSURE)) {
exposure *= 1.0 / (texelFetch(source_auto_exposure, ivec2(0, 0), 0).r * params.luminance_multiplier / params.auto_exposure_scale);
}
#endif
color.rgb *= exposure;
// Early Tonemap & SRGB Conversion
#ifndef SUBPASS
if (bool(params.flags & FLAG_USE_FXAA)) {
// FXAA must be performed before glow to preserve the "bleed" effect of glow.
color.rgb = do_fxaa(color.rgb, exposure, uv_interp);
}
if (bool(params.flags & FLAG_USE_GLOW) && params.glow_mode == GLOW_MODE_MIX) {
vec3 glow = gather_glow(source_glow, uv_interp) * params.luminance_multiplier;
if (params.glow_map_strength > 0.001) {
glow = mix(glow, texture(glow_map, uv_interp).rgb * glow, params.glow_map_strength);
}
color.rgb = mix(color.rgb, glow, params.glow_intensity);
}
#endif
color.rgb = apply_tonemapping(color.rgb, params.white);
bool convert_to_srgb = bool(params.flags & FLAG_CONVERT_TO_SRGB);
if (convert_to_srgb) {
color.rgb = linear_to_srgb(color.rgb); // Regular linear -> SRGB conversion.
}
#ifndef SUBPASS
// Glow
if (bool(params.flags & FLAG_USE_GLOW) && params.glow_mode != GLOW_MODE_MIX) {
vec3 glow = gather_glow(source_glow, uv_interp) * params.glow_intensity * params.luminance_multiplier;
if (params.glow_map_strength > 0.001) {
glow = mix(glow, texture(glow_map, uv_interp).rgb * glow, params.glow_map_strength);
}
// high dynamic range -> SRGB
glow = apply_tonemapping(glow, params.white);
if (convert_to_srgb) {
glow = linear_to_srgb(glow);
}
color.rgb = apply_glow(color.rgb, glow);
}
#endif
// Additional effects
if (bool(params.flags & FLAG_USE_BCS)) {
color.rgb = apply_bcs(color.rgb, params.bcs);
}
if (bool(params.flags & FLAG_USE_COLOR_CORRECTION)) {
// apply_color_correction requires nonlinear sRGB encoding
if (!convert_to_srgb) {
color.rgb = linear_to_srgb(color.rgb);
}
color.rgb = apply_color_correction(color.rgb);
// When convert_to_srgb is false, there is no need to convert back to
// linear because the color correction texture sampling does this for us.
}
// Debanding should be done at the end of tonemapping, but before writing to the LDR buffer.
// Otherwise, we're adding noise to an already-quantized image.
if (bool(params.flags & FLAG_USE_8_BIT_DEBANDING)) {
// Divide by 255 to align to 8-bit quantization.
color.rgb += screen_space_dither(gl_FragCoord.xy, 255.0);
} else if (bool(params.flags & FLAG_USE_10_BIT_DEBANDING)) {
// Divide by 1023 to align to 10-bit quantization.
color.rgb += screen_space_dither(gl_FragCoord.xy, 1023.0);
}
frag_color = color;
}

View File

@@ -0,0 +1,93 @@
#[vertex]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#endif //USE_MULTIVIEW
#ifdef USE_MULTIVIEW
layout(location = 0) out vec3 uv_interp;
#else
layout(location = 0) out vec2 uv_interp;
#endif
layout(push_constant, std430) uniform Params {
float max_texel_factor;
float res1;
float res2;
float res3;
}
params;
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -1.0), vec2(-1.0, 3.0), vec2(3.0, -1.0));
gl_Position = vec4(base_arr[gl_VertexIndex], 0.0, 1.0);
uv_interp.xy = clamp(gl_Position.xy, vec2(0.0, 0.0), vec2(1.0, 1.0)) * 2.0; // saturate(x) * 2.0
#ifdef USE_MULTIVIEW
uv_interp.z = ViewIndex;
#endif
}
#[fragment]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
layout(location = 0) in vec3 uv_interp;
layout(set = 0, binding = 0) uniform sampler2DArray source_color;
#else /* USE_MULTIVIEW */
layout(location = 0) in vec2 uv_interp;
layout(set = 0, binding = 0) uniform sampler2D source_color;
#endif /* USE_MULTIVIEW */
#ifdef SPLIT_RG
layout(location = 0) out vec2 frag_color;
#else
layout(location = 0) out uint frag_color;
#endif
layout(push_constant, std430) uniform Params {
float max_texel_factor;
float res1;
float res2;
float res3;
}
params;
void main() {
#ifdef USE_MULTIVIEW
vec3 uv = uv_interp;
#else
vec2 uv = uv_interp;
#endif
// Input is standardized. R for X, G for Y, 0.0 (0) = 1, 0.33 (85) = 2, 0.66 (170) = 3, 1.0 (255) = 8
vec4 color = textureLod(source_color, uv, 0.0);
#ifdef SPLIT_RG
// Density map for VRS according to VK_EXT_fragment_density_map, we can use as is.
frag_color = max(vec2(1.0f) - color.rg, vec2(1.0f / 255.0f));
#else
// Output image shading rate image for VRS according to VK_KHR_fragment_shading_rate.
color.r = clamp(floor(color.r * params.max_texel_factor + 0.1), 0.0, params.max_texel_factor);
color.g = clamp(floor(color.g * params.max_texel_factor + 0.1), 0.0, params.max_texel_factor);
// Note 1x4, 4x1, 1x8, 8x1, 2x8 and 8x2 are not supported:
if (color.r < (color.g - 1.0)) {
color.r = color.g - 1.0;
}
if (color.g < (color.r - 1.0)) {
color.g = color.r - 1.0;
}
// Encode to frag_color;
frag_color = int(color.r + 0.1) << 2;
frag_color += int(color.g + 0.1);
#endif
}

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find all include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")] + [str(f) for f in Glob("../*_inc.glsl")]
# find all shader code(all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile shaders
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)

View File

@@ -0,0 +1,788 @@
#[compute]
#version 450
#VERSION_DEFINES
#ifdef SAMPLE_VOXEL_GI_NEAREST
#extension GL_EXT_samplerless_texture_functions : enable
#endif
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#define M_PI 3.141592
/* Specialization Constants (Toggles) */
layout(constant_id = 0) const bool sc_half_res = false;
layout(constant_id = 1) const bool sc_use_full_projection_matrix = false;
layout(constant_id = 2) const bool sc_use_vrs = false;
#define SDFGI_MAX_CASCADES 8
//set 0 for SDFGI and render buffers
layout(set = 0, binding = 1) uniform texture3D sdf_cascades[SDFGI_MAX_CASCADES];
layout(set = 0, binding = 2) uniform texture3D light_cascades[SDFGI_MAX_CASCADES];
layout(set = 0, binding = 3) uniform texture3D aniso0_cascades[SDFGI_MAX_CASCADES];
layout(set = 0, binding = 4) uniform texture3D aniso1_cascades[SDFGI_MAX_CASCADES];
layout(set = 0, binding = 5) uniform texture3D occlusion_texture;
layout(set = 0, binding = 6) uniform sampler linear_sampler;
layout(set = 0, binding = 7) uniform sampler linear_sampler_with_mipmaps;
struct ProbeCascadeData {
vec3 position;
float to_probe;
ivec3 probe_world_offset;
float to_cell; // 1/bounds * grid_size
vec3 pad;
float exposure_normalization;
};
layout(rgba16f, set = 0, binding = 9) uniform restrict writeonly image2D ambient_buffer;
layout(rgba16f, set = 0, binding = 10) uniform restrict writeonly image2D reflection_buffer;
layout(set = 0, binding = 11) uniform texture2DArray lightprobe_texture;
layout(set = 0, binding = 12) uniform texture2D depth_buffer;
layout(set = 0, binding = 13) uniform texture2D normal_roughness_buffer;
layout(set = 0, binding = 14) uniform utexture2D voxel_gi_buffer;
layout(set = 0, binding = 15, std140) uniform SDFGI {
vec3 grid_size;
uint max_cascades;
bool use_occlusion;
int probe_axis_size;
float probe_to_uvw;
float normal_bias;
vec3 lightprobe_tex_pixel_size;
float energy;
vec3 lightprobe_uv_offset;
float y_mult;
vec3 occlusion_clamp;
uint pad3;
vec3 occlusion_renormalize;
uint pad4;
vec3 cascade_probe_size;
uint pad5;
ProbeCascadeData cascades[SDFGI_MAX_CASCADES];
}
sdfgi;
#define MAX_VOXEL_GI_INSTANCES 8
struct VoxelGIData {
mat4 xform; // 64 - 64
vec3 bounds; // 12 - 76
float dynamic_range; // 4 - 80
float bias; // 4 - 84
float normal_bias; // 4 - 88
bool blend_ambient; // 4 - 92
uint mipmaps; // 4 - 96
vec3 pad; // 12 - 108
float exposure_normalization; // 4 - 112
};
layout(set = 0, binding = 16, std140) uniform VoxelGIs {
VoxelGIData data[MAX_VOXEL_GI_INSTANCES];
}
voxel_gi_instances;
layout(set = 0, binding = 17) uniform texture3D voxel_gi_textures[MAX_VOXEL_GI_INSTANCES];
layout(set = 0, binding = 18, std140) uniform SceneData {
mat4x4 inv_projection[2];
mat4x4 cam_transform;
vec4 eye_offset[2];
ivec2 screen_size;
float pad1;
float pad2;
}
scene_data;
#ifdef USE_VRS
layout(r8ui, set = 0, binding = 19) uniform restrict readonly uimage2D vrs_buffer;
#endif
layout(push_constant, std430) uniform Params {
uint max_voxel_gi_instances;
bool high_quality_vct;
bool orthogonal;
uint view_index;
vec4 proj_info;
float z_near;
float z_far;
float pad2;
float pad3;
}
params;
vec2 octahedron_wrap(vec2 v) {
vec2 signVal;
signVal.x = v.x >= 0.0 ? 1.0 : -1.0;
signVal.y = v.y >= 0.0 ? 1.0 : -1.0;
return (1.0 - abs(v.yx)) * signVal;
}
vec2 octahedron_encode(vec3 n) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
n /= (abs(n.x) + abs(n.y) + abs(n.z));
n.xy = n.z >= 0.0 ? n.xy : octahedron_wrap(n.xy);
n.xy = n.xy * 0.5 + 0.5;
return n.xy;
}
vec4 blend_color(vec4 src, vec4 dst) {
vec4 res;
float sa = 1.0 - src.a;
res.a = dst.a * sa + src.a;
if (res.a == 0.0) {
res.rgb = vec3(0);
} else {
res.rgb = (dst.rgb * dst.a * sa + src.rgb * src.a) / res.a;
}
return res;
}
vec3 reconstruct_position(ivec2 screen_pos) {
if (sc_use_full_projection_matrix) {
vec4 pos;
pos.xy = (2.0 * vec2(screen_pos) / vec2(scene_data.screen_size)) - 1.0;
pos.z = texelFetch(sampler2D(depth_buffer, linear_sampler), screen_pos, 0).r * 2.0 - 1.0;
pos.w = 1.0;
pos = scene_data.inv_projection[params.view_index] * pos;
return pos.xyz / pos.w;
} else {
vec3 pos;
pos.z = texelFetch(sampler2D(depth_buffer, linear_sampler), screen_pos, 0).r;
pos.z = pos.z * 2.0 - 1.0;
if (params.orthogonal) {
pos.z = -(pos.z * (params.z_far - params.z_near) - (params.z_far + params.z_near)) / 2.0;
} else {
pos.z = 2.0 * params.z_near * params.z_far / (params.z_far + params.z_near + pos.z * (params.z_far - params.z_near));
}
pos.z = -pos.z;
pos.xy = vec2(screen_pos) * params.proj_info.xy + params.proj_info.zw;
if (!params.orthogonal) {
pos.xy *= pos.z;
}
return pos;
}
}
void sdfvoxel_gi_process(uint cascade, vec3 cascade_pos, vec3 cam_pos, vec3 cam_normal, vec3 cam_specular_normal, float roughness, out vec3 diffuse_light, out vec3 specular_light) {
cascade_pos += cam_normal * sdfgi.normal_bias;
vec3 base_pos = floor(cascade_pos);
//cascade_pos += mix(vec3(0.0),vec3(0.01),lessThan(abs(cascade_pos-base_pos),vec3(0.01))) * cam_normal;
ivec3 probe_base_pos = ivec3(base_pos);
vec4 diffuse_accum = vec4(0.0);
vec3 specular_accum;
ivec3 tex_pos = ivec3(probe_base_pos.xy, int(cascade));
tex_pos.x += probe_base_pos.z * sdfgi.probe_axis_size;
tex_pos.xy = tex_pos.xy * (SDFGI_OCT_SIZE + 2) + ivec2(1);
vec3 diffuse_posf = (vec3(tex_pos) + vec3(octahedron_encode(cam_normal) * float(SDFGI_OCT_SIZE), 0.0)) * sdfgi.lightprobe_tex_pixel_size;
vec3 specular_posf = (vec3(tex_pos) + vec3(octahedron_encode(cam_specular_normal) * float(SDFGI_OCT_SIZE), 0.0)) * sdfgi.lightprobe_tex_pixel_size;
specular_accum = vec3(0.0);
vec4 light_accum = vec4(0.0);
float weight_accum = 0.0;
for (uint j = 0; j < 8; j++) {
ivec3 offset = (ivec3(j) >> ivec3(0, 1, 2)) & ivec3(1, 1, 1);
ivec3 probe_posi = probe_base_pos;
probe_posi += offset;
// Compute weight
vec3 probe_pos = vec3(probe_posi);
vec3 probe_to_pos = cascade_pos - probe_pos;
vec3 probe_dir = normalize(-probe_to_pos);
vec3 trilinear = vec3(1.0) - abs(probe_to_pos);
float weight = trilinear.x * trilinear.y * trilinear.z * max(0.005, dot(cam_normal, probe_dir));
// Compute lightprobe occlusion
if (sdfgi.use_occlusion) {
ivec3 occ_indexv = abs((sdfgi.cascades[cascade].probe_world_offset + probe_posi) & ivec3(1, 1, 1)) * ivec3(1, 2, 4);
vec4 occ_mask = mix(vec4(0.0), vec4(1.0), equal(ivec4(occ_indexv.x | occ_indexv.y), ivec4(0, 1, 2, 3)));
vec3 occ_pos = clamp(cascade_pos, probe_pos - sdfgi.occlusion_clamp, probe_pos + sdfgi.occlusion_clamp) * sdfgi.probe_to_uvw;
occ_pos.z += float(cascade);
if (occ_indexv.z != 0) { //z bit is on, means index is >=4, so make it switch to the other half of textures
occ_pos.x += 1.0;
}
occ_pos *= sdfgi.occlusion_renormalize;
float occlusion = dot(textureLod(sampler3D(occlusion_texture, linear_sampler), occ_pos, 0.0), occ_mask);
weight *= max(occlusion, 0.01);
}
// Compute lightprobe texture position
vec3 diffuse;
vec3 pos_uvw = diffuse_posf;
pos_uvw.xy += vec2(offset.xy) * sdfgi.lightprobe_uv_offset.xy;
pos_uvw.x += float(offset.z) * sdfgi.lightprobe_uv_offset.z;
diffuse = textureLod(sampler2DArray(lightprobe_texture, linear_sampler), pos_uvw, 0.0).rgb;
diffuse_accum += vec4(diffuse * weight * sdfgi.cascades[cascade].exposure_normalization, weight);
{
vec3 specular = vec3(0.0);
vec3 pos_uvw = specular_posf;
pos_uvw.xy += vec2(offset.xy) * sdfgi.lightprobe_uv_offset.xy;
pos_uvw.x += float(offset.z) * sdfgi.lightprobe_uv_offset.z;
if (roughness < 0.99) {
specular = textureLod(sampler2DArray(lightprobe_texture, linear_sampler), pos_uvw + vec3(0, 0, float(sdfgi.max_cascades)), 0.0).rgb;
}
if (roughness > 0.2) {
specular = mix(specular, textureLod(sampler2DArray(lightprobe_texture, linear_sampler), pos_uvw, 0.0).rgb, (roughness - 0.2) * 1.25);
}
specular_accum += specular * weight * sdfgi.cascades[cascade].exposure_normalization;
}
}
if (diffuse_accum.a > 0.0) {
diffuse_accum.rgb /= diffuse_accum.a;
}
diffuse_light = diffuse_accum.rgb;
if (diffuse_accum.a > 0.0) {
specular_accum /= diffuse_accum.a;
}
specular_light = specular_accum;
}
void sdfgi_process(vec3 vertex, vec3 normal, vec3 reflection, float roughness, out vec4 ambient_light, out vec4 reflection_light) {
//make vertex orientation the world one, but still align to camera
vertex.y *= sdfgi.y_mult;
normal.y *= sdfgi.y_mult;
reflection.y *= sdfgi.y_mult;
//renormalize
normal = normalize(normal);
reflection = normalize(reflection);
vec3 cam_pos = vertex;
vec3 cam_normal = normal;
vec4 light_accum = vec4(0.0);
float weight_accum = 0.0;
vec4 light_blend_accum = vec4(0.0);
float weight_blend_accum = 0.0;
float blend = -1.0;
// helper constants, compute once
uint cascade = 0xFFFFFFFF;
vec3 cascade_pos;
vec3 cascade_normal;
for (uint i = 0; i < sdfgi.max_cascades; i++) {
cascade_pos = (cam_pos - sdfgi.cascades[i].position) * sdfgi.cascades[i].to_probe;
if (any(lessThan(cascade_pos, vec3(0.0))) || any(greaterThanEqual(cascade_pos, sdfgi.cascade_probe_size))) {
continue; //skip cascade
}
cascade = i;
break;
}
if (cascade < SDFGI_MAX_CASCADES) {
ambient_light = vec4(0, 0, 0, 1);
reflection_light = vec4(0, 0, 0, 1);
float blend;
vec3 diffuse, specular;
sdfvoxel_gi_process(cascade, cascade_pos, cam_pos, cam_normal, reflection, roughness, diffuse, specular);
{
//process blend
float blend_from = (float(sdfgi.probe_axis_size - 1) / 2.0) - 2.5;
float blend_to = blend_from + 2.0;
vec3 inner_pos = cam_pos * sdfgi.cascades[cascade].to_probe;
float len = length(inner_pos);
inner_pos = abs(normalize(inner_pos));
len *= max(inner_pos.x, max(inner_pos.y, inner_pos.z));
if (len >= blend_from) {
blend = smoothstep(blend_from, blend_to, len);
} else {
blend = 0.0;
}
}
if (blend > 0.0) {
//blend
if (cascade == sdfgi.max_cascades - 1) {
ambient_light.a = 1.0 - blend;
reflection_light.a = 1.0 - blend;
} else {
vec3 diffuse2, specular2;
cascade_pos = (cam_pos - sdfgi.cascades[cascade + 1].position) * sdfgi.cascades[cascade + 1].to_probe;
sdfvoxel_gi_process(cascade + 1, cascade_pos, cam_pos, cam_normal, reflection, roughness, diffuse2, specular2);
diffuse = mix(diffuse, diffuse2, blend);
specular = mix(specular, specular2, blend);
}
}
ambient_light.rgb = diffuse;
if (roughness < 0.2) {
vec3 pos_to_uvw = 1.0 / sdfgi.grid_size;
vec4 light_accum = vec4(0.0);
float blend_size = (sdfgi.grid_size.x / float(sdfgi.probe_axis_size - 1)) * 0.5;
float radius_sizes[SDFGI_MAX_CASCADES];
cascade = 0xFFFF;
float base_distance = length(cam_pos);
for (uint i = 0; i < sdfgi.max_cascades; i++) {
radius_sizes[i] = (1.0 / sdfgi.cascades[i].to_cell) * (sdfgi.grid_size.x * 0.5 - blend_size);
if (cascade == 0xFFFF && base_distance < radius_sizes[i]) {
cascade = i;
}
}
cascade = min(cascade, sdfgi.max_cascades - 1);
float max_distance = radius_sizes[sdfgi.max_cascades - 1];
vec3 ray_pos = cam_pos;
vec3 ray_dir = reflection;
{
float prev_radius = cascade > 0 ? radius_sizes[cascade - 1] : 0.0;
float base_blend = (base_distance - prev_radius) / (radius_sizes[cascade] - prev_radius);
float bias = (1.0 + base_blend) * 1.1;
vec3 abs_ray_dir = abs(ray_dir);
//ray_pos += ray_dir * (bias / sdfgi.cascades[cascade].to_cell); //bias to avoid self occlusion
ray_pos += (ray_dir * 1.0 / max(abs_ray_dir.x, max(abs_ray_dir.y, abs_ray_dir.z)) + cam_normal * 1.4) * bias / sdfgi.cascades[cascade].to_cell;
}
float softness = 0.2 + min(1.0, roughness * 5.0) * 4.0; //approximation to roughness so it does not seem like a hard fade
uint i = 0;
bool found = false;
while (true) {
if (length(ray_pos) >= max_distance || light_accum.a > 0.99) {
break;
}
if (!found && i >= cascade && length(ray_pos) < radius_sizes[i]) {
uint next_i = min(i + 1, sdfgi.max_cascades - 1);
cascade = max(i, cascade); //never go down
vec3 pos = ray_pos - sdfgi.cascades[i].position;
pos *= sdfgi.cascades[i].to_cell * pos_to_uvw;
float fdistance = textureLod(sampler3D(sdf_cascades[i], linear_sampler), pos, 0.0).r * 255.0 - 1.1;
vec4 hit_light = vec4(0.0);
if (fdistance < softness) {
hit_light.rgb = textureLod(sampler3D(light_cascades[i], linear_sampler), pos, 0.0).rgb;
hit_light.rgb *= 0.5; //approximation given value read is actually meant for anisotropy
hit_light.a = clamp(1.0 - (fdistance / softness), 0.0, 1.0);
hit_light.rgb *= hit_light.a;
}
fdistance /= sdfgi.cascades[i].to_cell;
if (i < (sdfgi.max_cascades - 1)) {
pos = ray_pos - sdfgi.cascades[next_i].position;
pos *= sdfgi.cascades[next_i].to_cell * pos_to_uvw;
float fdistance2 = textureLod(sampler3D(sdf_cascades[next_i], linear_sampler), pos, 0.0).r * 255.0 - 1.1;
vec4 hit_light2 = vec4(0.0);
if (fdistance2 < softness) {
hit_light2.rgb = textureLod(sampler3D(light_cascades[next_i], linear_sampler), pos, 0.0).rgb;
hit_light2.rgb *= 0.5; //approximation given value read is actually meant for anisotropy
hit_light2.a = clamp(1.0 - (fdistance2 / softness), 0.0, 1.0);
hit_light2.rgb *= hit_light2.a;
}
float prev_radius = i == 0 ? 0.0 : radius_sizes[max(0, i - 1)];
float blend = clamp((length(ray_pos) - prev_radius) / (radius_sizes[i] - prev_radius), 0.0, 1.0);
fdistance2 /= sdfgi.cascades[next_i].to_cell;
hit_light = mix(hit_light, hit_light2, blend);
fdistance = mix(fdistance, fdistance2, blend);
}
light_accum += hit_light;
ray_pos += ray_dir * fdistance;
found = true;
}
i++;
if (i == sdfgi.max_cascades) {
i = 0;
found = false;
}
}
vec3 light = light_accum.rgb / max(light_accum.a, 0.00001);
float alpha = min(1.0, light_accum.a);
float b = min(1.0, roughness * 5.0);
float sa = 1.0 - b;
reflection_light.a = alpha * sa + b;
if (reflection_light.a == 0) {
specular = vec3(0.0);
} else {
specular = (light * alpha * sa + specular * b) / reflection_light.a;
}
}
reflection_light.rgb = specular;
ambient_light.rgb *= sdfgi.energy;
reflection_light.rgb *= sdfgi.energy;
} else {
ambient_light = vec4(0);
reflection_light = vec4(0);
}
}
//standard voxel cone trace
vec4 voxel_cone_trace(texture3D probe, vec3 cell_size, vec3 pos, vec3 direction, float tan_half_angle, float max_distance, float p_bias) {
float dist = p_bias;
vec4 color = vec4(0.0);
while (dist < max_distance && color.a < 0.95) {
float diameter = max(1.0, 2.0 * tan_half_angle * dist);
vec3 uvw_pos = (pos + dist * direction) * cell_size;
float half_diameter = diameter * 0.5;
//check if outside, then break
if (any(greaterThan(abs(uvw_pos - 0.5), vec3(0.5f + half_diameter * cell_size)))) {
break;
}
vec4 scolor = textureLod(sampler3D(probe, linear_sampler_with_mipmaps), uvw_pos, log2(diameter));
float a = (1.0 - color.a);
color += a * scolor;
dist += half_diameter;
}
return color;
}
vec4 voxel_cone_trace_45_degrees(texture3D probe, vec3 cell_size, vec3 pos, vec3 direction, float max_distance, float p_bias) {
float dist = p_bias;
vec4 color = vec4(0.0);
float radius = max(0.5, dist);
float lod_level = log2(radius * 2.0);
while (dist < max_distance && color.a < 0.95) {
vec3 uvw_pos = (pos + dist * direction) * cell_size;
//check if outside, then break
if (any(greaterThan(abs(uvw_pos - 0.5), vec3(0.5f + radius * cell_size)))) {
break;
}
vec4 scolor = textureLod(sampler3D(probe, linear_sampler_with_mipmaps), uvw_pos, lod_level);
lod_level += 1.0;
float a = (1.0 - color.a);
scolor *= a;
color += scolor;
dist += radius;
radius = max(0.5, dist);
}
return color;
}
void voxel_gi_compute(uint index, vec3 position, vec3 normal, vec3 ref_vec, mat3 normal_xform, float roughness, inout vec4 out_spec, inout vec4 out_diff, inout float out_blend) {
position = (voxel_gi_instances.data[index].xform * vec4(position, 1.0)).xyz;
ref_vec = normalize((voxel_gi_instances.data[index].xform * vec4(ref_vec, 0.0)).xyz);
normal = normalize((voxel_gi_instances.data[index].xform * vec4(normal, 0.0)).xyz);
position += normal * voxel_gi_instances.data[index].normal_bias;
//this causes corrupted pixels, i have no idea why..
if (any(bvec2(any(lessThan(position, vec3(0.0))), any(greaterThan(position, voxel_gi_instances.data[index].bounds))))) {
return;
}
mat3 dir_xform = mat3(voxel_gi_instances.data[index].xform) * normal_xform;
vec3 blendv = abs(position / voxel_gi_instances.data[index].bounds * 2.0 - 1.0);
float blend = clamp(1.0 - max(blendv.x, max(blendv.y, blendv.z)), 0.0, 1.0);
//float blend=1.0;
float max_distance = length(voxel_gi_instances.data[index].bounds);
vec3 cell_size = 1.0 / voxel_gi_instances.data[index].bounds;
//irradiance
vec4 light = vec4(0.0);
if (params.high_quality_vct) {
const uint cone_dir_count = 6;
vec3 cone_dirs[cone_dir_count] = vec3[](
vec3(0.0, 0.0, 1.0),
vec3(0.866025, 0.0, 0.5),
vec3(0.267617, 0.823639, 0.5),
vec3(-0.700629, 0.509037, 0.5),
vec3(-0.700629, -0.509037, 0.5),
vec3(0.267617, -0.823639, 0.5));
float cone_weights[cone_dir_count] = float[](0.25, 0.15, 0.15, 0.15, 0.15, 0.15);
float cone_angle_tan = 0.577;
for (uint i = 0; i < cone_dir_count; i++) {
vec3 dir = normalize(dir_xform * cone_dirs[i]);
light += cone_weights[i] * voxel_cone_trace(voxel_gi_textures[index], cell_size, position, dir, cone_angle_tan, max_distance, voxel_gi_instances.data[index].bias);
}
} else {
const uint cone_dir_count = 4;
vec3 cone_dirs[cone_dir_count] = vec3[](
vec3(0.707107, 0.0, 0.707107),
vec3(0.0, 0.707107, 0.707107),
vec3(-0.707107, 0.0, 0.707107),
vec3(0.0, -0.707107, 0.707107));
float cone_weights[cone_dir_count] = float[](0.25, 0.25, 0.25, 0.25);
for (int i = 0; i < cone_dir_count; i++) {
vec3 dir = normalize(dir_xform * cone_dirs[i]);
light += cone_weights[i] * voxel_cone_trace_45_degrees(voxel_gi_textures[index], cell_size, position, dir, max_distance, voxel_gi_instances.data[index].bias);
}
}
light.rgb *= voxel_gi_instances.data[index].dynamic_range * voxel_gi_instances.data[index].exposure_normalization;
if (!voxel_gi_instances.data[index].blend_ambient) {
light.a = 1.0;
}
out_diff += light * blend;
//radiance
vec4 irr_light = voxel_cone_trace(voxel_gi_textures[index], cell_size, position, ref_vec, tan(roughness * 0.5 * M_PI * 0.99), max_distance, voxel_gi_instances.data[index].bias);
irr_light.rgb *= voxel_gi_instances.data[index].dynamic_range * voxel_gi_instances.data[index].exposure_normalization;
if (!voxel_gi_instances.data[index].blend_ambient) {
irr_light.a = 1.0;
}
out_spec += irr_light * blend;
out_blend += blend;
}
vec4 fetch_normal_and_roughness(ivec2 pos) {
vec4 normal_roughness = texelFetch(sampler2D(normal_roughness_buffer, linear_sampler), pos, 0);
normal_roughness.xyz = normalize(normal_roughness.xyz * 2.0 - 1.0);
return normal_roughness;
}
void process_gi(ivec2 pos, vec3 vertex, inout vec4 ambient_light, inout vec4 reflection_light) {
vec4 normal_roughness = fetch_normal_and_roughness(pos);
vec3 normal = normal_roughness.xyz;
if (normal.length() > 0.5) {
//valid normal, can do GI
float roughness = normal_roughness.w;
bool dynamic_object = roughness > 0.5;
if (dynamic_object) {
roughness = 1.0 - roughness;
}
roughness /= (127.0 / 255.0);
vec3 view = -normalize(mat3(scene_data.cam_transform) * (vertex - scene_data.eye_offset[gl_GlobalInvocationID.z].xyz));
vertex = mat3(scene_data.cam_transform) * vertex;
normal = normalize(mat3(scene_data.cam_transform) * normal);
vec3 reflection = normalize(reflect(-view, normal));
#ifdef USE_SDFGI
sdfgi_process(vertex, normal, reflection, roughness, ambient_light, reflection_light);
#endif
#ifdef USE_VOXEL_GI_INSTANCES
{
#ifdef SAMPLE_VOXEL_GI_NEAREST
uvec2 voxel_gi_tex = texelFetch(voxel_gi_buffer, pos, 0).rg;
#else
uvec2 voxel_gi_tex = texelFetch(usampler2D(voxel_gi_buffer, linear_sampler), pos, 0).rg;
#endif
roughness *= roughness;
//find arbitrary tangent and bitangent, then build a matrix
vec3 v0 = abs(normal.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 1.0, 0.0);
vec3 tangent = normalize(cross(v0, normal));
vec3 bitangent = normalize(cross(tangent, normal));
mat3 normal_mat = mat3(tangent, bitangent, normal);
vec4 amb_accum = vec4(0.0);
vec4 spec_accum = vec4(0.0);
float blend_accum = 0.0;
for (uint i = 0; i < params.max_voxel_gi_instances; i++) {
if (any(equal(uvec2(i), voxel_gi_tex))) {
voxel_gi_compute(i, vertex, normal, reflection, normal_mat, roughness, spec_accum, amb_accum, blend_accum);
}
}
if (blend_accum > 0.0) {
amb_accum /= blend_accum;
spec_accum /= blend_accum;
}
#ifdef USE_SDFGI
reflection_light = blend_color(spec_accum, reflection_light);
ambient_light = blend_color(amb_accum, ambient_light);
#else
reflection_light = spec_accum;
ambient_light = amb_accum;
#endif
}
#endif
}
}
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
uint vrs_x, vrs_y;
#ifdef USE_VRS
if (sc_use_vrs) {
ivec2 vrs_pos;
// Currently we use a 16x16 texel, possibly some day make this configurable.
if (sc_half_res) {
vrs_pos = pos >> 3;
} else {
vrs_pos = pos >> 4;
}
uint vrs_texel = imageLoad(vrs_buffer, vrs_pos).r;
// note, valid values for vrs_x and vrs_y are 1, 2 and 4.
vrs_x = 1 << ((vrs_texel >> 2) & 3);
vrs_y = 1 << (vrs_texel & 3);
if (mod(pos.x, vrs_x) != 0) {
return;
}
if (mod(pos.y, vrs_y) != 0) {
return;
}
}
#endif
if (sc_half_res) {
pos <<= 1;
}
if (any(greaterThanEqual(pos, scene_data.screen_size))) { //too large, do nothing
return;
}
vec4 ambient_light = vec4(0.0);
vec4 reflection_light = vec4(0.0);
vec3 vertex = reconstruct_position(pos);
vertex.y = -vertex.y;
process_gi(pos, vertex, ambient_light, reflection_light);
if (sc_half_res) {
pos >>= 1;
}
imageStore(ambient_buffer, pos, ambient_light);
imageStore(reflection_buffer, pos, reflection_light);
#ifdef USE_VRS
if (sc_use_vrs) {
if (vrs_x > 1) {
imageStore(ambient_buffer, pos + ivec2(1, 0), ambient_light);
imageStore(reflection_buffer, pos + ivec2(1, 0), reflection_light);
}
if (vrs_x > 2) {
imageStore(ambient_buffer, pos + ivec2(2, 0), ambient_light);
imageStore(reflection_buffer, pos + ivec2(2, 0), reflection_light);
imageStore(ambient_buffer, pos + ivec2(3, 0), ambient_light);
imageStore(reflection_buffer, pos + ivec2(3, 0), reflection_light);
}
if (vrs_y > 1) {
imageStore(ambient_buffer, pos + ivec2(0, 1), ambient_light);
imageStore(reflection_buffer, pos + ivec2(0, 1), reflection_light);
}
if (vrs_y > 1 && vrs_x > 1) {
imageStore(ambient_buffer, pos + ivec2(1, 1), ambient_light);
imageStore(reflection_buffer, pos + ivec2(1, 1), reflection_light);
}
if (vrs_y > 1 && vrs_x > 2) {
imageStore(ambient_buffer, pos + ivec2(2, 1), ambient_light);
imageStore(reflection_buffer, pos + ivec2(2, 1), reflection_light);
imageStore(ambient_buffer, pos + ivec2(3, 1), ambient_light);
imageStore(reflection_buffer, pos + ivec2(3, 1), reflection_light);
}
if (vrs_y > 2) {
imageStore(ambient_buffer, pos + ivec2(0, 2), ambient_light);
imageStore(reflection_buffer, pos + ivec2(0, 2), reflection_light);
imageStore(ambient_buffer, pos + ivec2(0, 3), ambient_light);
imageStore(reflection_buffer, pos + ivec2(0, 3), reflection_light);
}
if (vrs_y > 2 && vrs_x > 1) {
imageStore(ambient_buffer, pos + ivec2(1, 2), ambient_light);
imageStore(reflection_buffer, pos + ivec2(1, 2), reflection_light);
imageStore(ambient_buffer, pos + ivec2(1, 3), ambient_light);
imageStore(reflection_buffer, pos + ivec2(1, 3), reflection_light);
}
if (vrs_y > 2 && vrs_x > 2) {
imageStore(ambient_buffer, pos + ivec2(2, 2), ambient_light);
imageStore(reflection_buffer, pos + ivec2(2, 2), reflection_light);
imageStore(ambient_buffer, pos + ivec2(2, 3), ambient_light);
imageStore(reflection_buffer, pos + ivec2(2, 3), reflection_light);
imageStore(ambient_buffer, pos + ivec2(3, 2), ambient_light);
imageStore(reflection_buffer, pos + ivec2(3, 2), reflection_light);
imageStore(ambient_buffer, pos + ivec2(3, 3), ambient_light);
imageStore(reflection_buffer, pos + ivec2(3, 3), reflection_light);
}
}
#endif
}

View File

@@ -0,0 +1,185 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#define MAX_CASCADES 8
layout(set = 0, binding = 1) uniform texture3D sdf_cascades[MAX_CASCADES];
layout(set = 0, binding = 2) uniform texture3D light_cascades[MAX_CASCADES];
layout(set = 0, binding = 3) uniform texture3D aniso0_cascades[MAX_CASCADES];
layout(set = 0, binding = 4) uniform texture3D aniso1_cascades[MAX_CASCADES];
layout(set = 0, binding = 5) uniform texture3D occlusion_texture;
layout(set = 0, binding = 8) uniform sampler linear_sampler;
struct CascadeData {
vec3 offset; //offset of (0,0,0) in world coordinates
float to_cell; // 1/bounds * grid_size
ivec3 probe_world_offset;
uint pad;
vec4 pad2;
};
layout(set = 0, binding = 9, std140) uniform Cascades {
CascadeData data[MAX_CASCADES];
}
cascades;
layout(rgba16f, set = 0, binding = 10) uniform restrict writeonly image2D screen_buffer;
layout(set = 0, binding = 11) uniform texture2DArray lightprobe_texture;
layout(push_constant, std430) uniform Params {
vec3 grid_size;
uint max_cascades;
ivec2 screen_size;
float y_mult;
float z_near;
mat3x4 inv_projection;
// We pack these more tightly than mat3 and vec3, which will require some reconstruction trickery.
float cam_basis[3][3];
float cam_origin[3];
}
params;
vec3 linear_to_srgb(vec3 color) {
//if going to srgb, clamp from 0 to 1.
color = clamp(color, vec3(0.0), vec3(1.0));
const vec3 a = vec3(0.055f);
return mix((vec3(1.0f) + a) * pow(color.rgb, vec3(1.0f / 2.4f)) - a, 12.92f * color.rgb, lessThan(color.rgb, vec3(0.0031308f)));
}
vec2 octahedron_wrap(vec2 v) {
vec2 signVal;
signVal.x = v.x >= 0.0 ? 1.0 : -1.0;
signVal.y = v.y >= 0.0 ? 1.0 : -1.0;
return (1.0 - abs(v.yx)) * signVal;
}
vec2 octahedron_encode(vec3 n) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
n /= (abs(n.x) + abs(n.y) + abs(n.z));
n.xy = n.z >= 0.0 ? n.xy : octahedron_wrap(n.xy);
n.xy = n.xy * 0.5 + 0.5;
return n.xy;
}
void main() {
// Pixel being shaded
ivec2 screen_pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(screen_pos, params.screen_size))) { //too large, do nothing
return;
}
vec3 ray_pos;
vec3 ray_dir;
{
ray_pos = vec3(params.cam_origin[0], params.cam_origin[1], params.cam_origin[2]);
ray_dir.xy = ((vec2(screen_pos) / vec2(params.screen_size)) * 2.0 - 1.0);
ray_dir.z = params.z_near;
ray_dir = (vec4(ray_dir, 1.0) * mat4(params.inv_projection)).xyz;
mat3 cam_basis;
{
vec3 c0 = vec3(params.cam_basis[0][0], params.cam_basis[0][1], params.cam_basis[0][2]);
vec3 c1 = vec3(params.cam_basis[1][0], params.cam_basis[1][1], params.cam_basis[1][2]);
vec3 c2 = vec3(params.cam_basis[2][0], params.cam_basis[2][1], params.cam_basis[2][2]);
cam_basis = mat3(c0, c1, c2);
}
ray_dir = normalize(cam_basis * ray_dir);
}
ray_pos.y *= params.y_mult;
ray_dir.y *= params.y_mult;
ray_dir = normalize(ray_dir);
vec3 pos_to_uvw = 1.0 / params.grid_size;
vec3 light = vec3(0.0);
float blend = 0.0;
#if 1
// No interpolation
vec3 inv_dir = 1.0 / ray_dir;
float rough = 0.5;
bool hit = false;
for (uint i = 0; i < params.max_cascades; i++) {
//convert to local bounds
vec3 pos = ray_pos - cascades.data[i].offset;
pos *= cascades.data[i].to_cell;
// Should never happen for debug, since we start mostly at the bounds center,
// but add anyway.
//if (any(lessThan(pos,vec3(0.0))) || any(greaterThanEqual(pos,params.grid_size))) {
// continue; //already past bounds for this cascade, goto next
//}
//find maximum advance distance (until reaching bounds)
vec3 t0 = -pos * inv_dir;
vec3 t1 = (params.grid_size - pos) * inv_dir;
vec3 tmax = max(t0, t1);
float max_advance = min(tmax.x, min(tmax.y, tmax.z));
float advance = 0.0;
vec3 uvw;
hit = false;
while (advance < max_advance) {
//read how much to advance from SDF
uvw = (pos + ray_dir * advance) * pos_to_uvw;
float distance = texture(sampler3D(sdf_cascades[i], linear_sampler), uvw).r * 255.0 - 1.7;
if (distance < 0.001) {
//consider hit
hit = true;
break;
}
advance += distance;
}
if (!hit) {
pos += ray_dir * min(advance, max_advance);
pos /= cascades.data[i].to_cell;
pos += cascades.data[i].offset;
ray_pos = pos;
continue;
}
//compute albedo, emission and normal at hit point
const float EPSILON = 0.001;
vec3 hit_normal = normalize(vec3(
texture(sampler3D(sdf_cascades[i], linear_sampler), uvw + vec3(EPSILON, 0.0, 0.0)).r - texture(sampler3D(sdf_cascades[i], linear_sampler), uvw - vec3(EPSILON, 0.0, 0.0)).r,
texture(sampler3D(sdf_cascades[i], linear_sampler), uvw + vec3(0.0, EPSILON, 0.0)).r - texture(sampler3D(sdf_cascades[i], linear_sampler), uvw - vec3(0.0, EPSILON, 0.0)).r,
texture(sampler3D(sdf_cascades[i], linear_sampler), uvw + vec3(0.0, 0.0, EPSILON)).r - texture(sampler3D(sdf_cascades[i], linear_sampler), uvw - vec3(0.0, 0.0, EPSILON)).r));
vec3 hit_light = texture(sampler3D(light_cascades[i], linear_sampler), uvw).rgb;
vec4 aniso0 = texture(sampler3D(aniso0_cascades[i], linear_sampler), uvw);
vec3 hit_aniso0 = aniso0.rgb;
vec3 hit_aniso1 = vec3(aniso0.a, texture(sampler3D(aniso1_cascades[i], linear_sampler), uvw).rg);
hit_light *= (dot(max(vec3(0.0), (hit_normal * hit_aniso0)), vec3(1.0)) + dot(max(vec3(0.0), (-hit_normal * hit_aniso1)), vec3(1.0)));
light = hit_light;
break;
}
#endif
imageStore(screen_buffer, screen_pos, vec4(linear_to_srgb(light), 1.0));
}

View File

@@ -0,0 +1,243 @@
#[vertex]
#version 450
#ifdef USE_MULTIVIEW
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#else // USE_MULTIVIEW
#define ViewIndex 0
#endif // !USE_MULTIVIEW
#VERSION_DEFINES
#define MAX_CASCADES 8
#define MAX_VIEWS 2
layout(push_constant, std430) uniform Params {
uint band_power;
uint sections_in_band;
uint band_mask;
float section_arc;
vec3 grid_size;
uint cascade;
uint pad;
float y_mult;
uint probe_debug_index;
int probe_axis_size;
}
params;
// https://in4k.untergrund.net/html_articles/hugi_27_-_coding_corner_polaris_sphere_tessellation_101.htm
vec3 get_sphere_vertex(uint p_vertex_id) {
float x_angle = float(p_vertex_id & 1u) + (p_vertex_id >> params.band_power);
float y_angle =
float((p_vertex_id & params.band_mask) >> 1) + ((p_vertex_id >> params.band_power) * params.sections_in_band);
x_angle *= params.section_arc * 0.5f; // remember - 180AA x rot not 360
y_angle *= -params.section_arc;
vec3 point = vec3(sin(x_angle) * sin(y_angle), cos(x_angle), sin(x_angle) * cos(y_angle));
return point;
}
#ifdef MODE_PROBES
layout(location = 0) out vec3 normal_interp;
layout(location = 1) out flat uint probe_index;
#endif
#ifdef MODE_VISIBILITY
layout(location = 0) out float visibility;
#endif
struct CascadeData {
vec3 offset; //offset of (0,0,0) in world coordinates
float to_cell; // 1/bounds * grid_size
ivec3 probe_world_offset;
uint pad;
vec4 pad2;
};
layout(set = 0, binding = 1, std140) uniform Cascades {
CascadeData data[MAX_CASCADES];
}
cascades;
layout(set = 0, binding = 4) uniform texture3D occlusion_texture;
layout(set = 0, binding = 3) uniform sampler linear_sampler;
layout(set = 0, binding = 5, std140) uniform SceneData {
mat4 projection[MAX_VIEWS];
}
scene_data;
void main() {
#ifdef MODE_PROBES
probe_index = gl_InstanceIndex;
normal_interp = get_sphere_vertex(gl_VertexIndex);
vec3 vertex = normal_interp * 0.2;
float probe_cell_size = float(params.grid_size / float(params.probe_axis_size - 1)) / cascades.data[params.cascade].to_cell;
ivec3 probe_cell;
probe_cell.x = int(probe_index % params.probe_axis_size);
probe_cell.y = int(probe_index / (params.probe_axis_size * params.probe_axis_size));
probe_cell.z = int((probe_index / params.probe_axis_size) % params.probe_axis_size);
vertex += (cascades.data[params.cascade].offset + vec3(probe_cell) * probe_cell_size) / vec3(1.0, params.y_mult, 1.0);
gl_Position = scene_data.projection[ViewIndex] * vec4(vertex, 1.0);
#endif
#ifdef MODE_VISIBILITY
int probe_index = int(params.probe_debug_index);
vec3 vertex = get_sphere_vertex(gl_VertexIndex) * 0.01;
float probe_cell_size = float(params.grid_size / float(params.probe_axis_size - 1)) / cascades.data[params.cascade].to_cell;
ivec3 probe_cell;
probe_cell.x = int(probe_index % params.probe_axis_size);
probe_cell.y = int((probe_index % (params.probe_axis_size * params.probe_axis_size)) / params.probe_axis_size);
probe_cell.z = int(probe_index / (params.probe_axis_size * params.probe_axis_size));
vertex += (cascades.data[params.cascade].offset + vec3(probe_cell) * probe_cell_size) / vec3(1.0, params.y_mult, 1.0);
int probe_voxels = int(params.grid_size.x) / int(params.probe_axis_size - 1);
int occluder_index = int(gl_InstanceIndex);
int diameter = probe_voxels * 2;
ivec3 occluder_pos;
occluder_pos.x = int(occluder_index % diameter);
occluder_pos.y = int(occluder_index / (diameter * diameter));
occluder_pos.z = int((occluder_index / diameter) % diameter);
float cell_size = 1.0 / cascades.data[params.cascade].to_cell;
ivec3 occluder_offset = occluder_pos - ivec3(diameter / 2);
vertex += ((vec3(occluder_offset) + vec3(0.5)) * cell_size) / vec3(1.0, params.y_mult, 1.0);
ivec3 global_cell = probe_cell + cascades.data[params.cascade].probe_world_offset;
uint occlusion_layer = 0;
if ((global_cell.x & 1) != 0) {
occlusion_layer |= 1;
}
if ((global_cell.y & 1) != 0) {
occlusion_layer |= 2;
}
if ((global_cell.z & 1) != 0) {
occlusion_layer |= 4;
}
ivec3 tex_pos = probe_cell * probe_voxels + occluder_offset;
const vec4 layer_axis[4] = vec4[](
vec4(1, 0, 0, 0),
vec4(0, 1, 0, 0),
vec4(0, 0, 1, 0),
vec4(0, 0, 0, 1));
tex_pos.z += int(params.cascade) * int(params.grid_size);
if (occlusion_layer >= 4) {
tex_pos.x += int(params.grid_size.x);
occlusion_layer &= 3;
}
visibility = dot(texelFetch(sampler3D(occlusion_texture, linear_sampler), tex_pos, 0), layer_axis[occlusion_layer]);
gl_Position = scene_data.projection[ViewIndex] * vec4(vertex, 1.0);
#endif
}
#[fragment]
#version 450
#VERSION_DEFINES
#define MAX_VIEWS 2
layout(location = 0) out vec4 frag_color;
layout(set = 0, binding = 2) uniform texture2DArray lightprobe_texture;
layout(set = 0, binding = 3) uniform sampler linear_sampler;
layout(push_constant, std430) uniform Params {
uint band_power;
uint sections_in_band;
uint band_mask;
float section_arc;
vec3 grid_size;
uint cascade;
uint pad;
float y_mult;
uint probe_debug_index;
int probe_axis_size;
}
params;
#ifdef MODE_PROBES
layout(location = 0) in vec3 normal_interp;
layout(location = 1) in flat uint probe_index;
#endif
#ifdef MODE_VISIBILITY
layout(location = 0) in float visibility;
#endif
vec2 octahedron_wrap(vec2 v) {
vec2 signVal;
signVal.x = v.x >= 0.0 ? 1.0 : -1.0;
signVal.y = v.y >= 0.0 ? 1.0 : -1.0;
return (1.0 - abs(v.yx)) * signVal;
}
vec2 octahedron_encode(vec3 n) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
n /= (abs(n.x) + abs(n.y) + abs(n.z));
n.xy = n.z >= 0.0 ? n.xy : octahedron_wrap(n.xy);
n.xy = n.xy * 0.5 + 0.5;
return n.xy;
}
void main() {
#ifdef MODE_PROBES
ivec3 tex_pos;
tex_pos.x = int(probe_index) % params.probe_axis_size; //x
tex_pos.y = int(probe_index) / (params.probe_axis_size * params.probe_axis_size);
tex_pos.x += params.probe_axis_size * ((int(probe_index) / params.probe_axis_size) % params.probe_axis_size); //z
tex_pos.z = int(params.cascade);
vec3 tex_pos_ofs = vec3(octahedron_encode(normal_interp) * float(OCT_SIZE), 0.0);
vec3 tex_posf = vec3(vec2(tex_pos.xy * (OCT_SIZE + 2) + ivec2(1)), float(tex_pos.z)) + tex_pos_ofs;
tex_posf.xy /= vec2(ivec2(params.probe_axis_size * params.probe_axis_size * (OCT_SIZE + 2), params.probe_axis_size * (OCT_SIZE + 2)));
vec4 indirect_light = textureLod(sampler2DArray(lightprobe_texture, linear_sampler), tex_posf, 0.0);
frag_color = indirect_light;
#endif
#ifdef MODE_VISIBILITY
frag_color = vec4(vec3(1, visibility, visibility), 1.0);
#endif
}

View File

@@ -0,0 +1,507 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#define MAX_CASCADES 8
layout(set = 0, binding = 1) uniform texture3D sdf_cascades[MAX_CASCADES];
layout(set = 0, binding = 2) uniform sampler linear_sampler;
layout(set = 0, binding = 3, std430) restrict readonly buffer DispatchData {
uint x;
uint y;
uint z;
uint total_count;
}
dispatch_data;
struct ProcessVoxel {
uint position; // xyz 7 bit packed, extra 11 bits for neighbors.
uint albedo; // rgb bits 0-15 albedo, bits 16-21 are normal bits (set if geometry exists toward that side), extra 11 bits for neighbors.
uint light; // rgbe8985 encoded total saved light, extra 2 bits for neighbors.
uint light_aniso; // 55555 light anisotropy, extra 2 bits for neighbors.
//total neighbors: 26
};
#ifdef MODE_PROCESS_STATIC
layout(set = 0, binding = 4, std430) restrict buffer ProcessVoxels {
#else
layout(set = 0, binding = 4, std430) restrict buffer readonly ProcessVoxels {
#endif
ProcessVoxel data[];
}
process_voxels;
layout(r32ui, set = 0, binding = 5) uniform restrict uimage3D dst_light;
layout(rgba8, set = 0, binding = 6) uniform restrict image3D dst_aniso0;
layout(rg8, set = 0, binding = 7) uniform restrict image3D dst_aniso1;
struct CascadeData {
vec3 offset; //offset of (0,0,0) in world coordinates
float to_cell; // 1/bounds * grid_size
ivec3 probe_world_offset;
uint pad;
vec4 pad2;
};
layout(set = 0, binding = 8, std140) uniform Cascades {
CascadeData data[MAX_CASCADES];
}
cascades;
#define LIGHT_TYPE_DIRECTIONAL 0
#define LIGHT_TYPE_OMNI 1
#define LIGHT_TYPE_SPOT 2
struct Light {
vec3 color;
float energy;
vec3 direction;
bool has_shadow;
vec3 position;
float attenuation;
uint type;
float cos_spot_angle;
float inv_spot_attenuation;
float radius;
};
layout(set = 0, binding = 9, std140) buffer restrict readonly Lights {
Light data[];
}
lights;
layout(set = 0, binding = 10) uniform texture2DArray lightprobe_texture;
layout(set = 0, binding = 11) uniform texture3D occlusion_texture;
layout(push_constant, std430) uniform Params {
vec3 grid_size;
uint max_cascades;
uint cascade;
uint light_count;
uint process_offset;
uint process_increment;
int probe_axis_size;
float bounce_feedback;
float y_mult;
bool use_occlusion;
}
params;
vec2 octahedron_wrap(vec2 v) {
vec2 signVal;
signVal.x = v.x >= 0.0 ? 1.0 : -1.0;
signVal.y = v.y >= 0.0 ? 1.0 : -1.0;
return (1.0 - abs(v.yx)) * signVal;
}
vec2 octahedron_encode(vec3 n) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
n /= (abs(n.x) + abs(n.y) + abs(n.z));
n.xy = n.z >= 0.0 ? n.xy : octahedron_wrap(n.xy);
n.xy = n.xy * 0.5 + 0.5;
return n.xy;
}
float get_omni_attenuation(float distance, float inv_range, float decay) {
float nd = distance * inv_range;
nd *= nd;
nd *= nd; // nd^4
nd = max(1.0 - nd, 0.0);
nd *= nd; // nd^2
return nd * pow(max(distance, 0.0001), -decay);
}
void main() {
uint voxel_index = uint(gl_GlobalInvocationID.x);
//used for skipping voxels every N frames
if (params.process_increment > 1) {
voxel_index *= params.process_increment;
voxel_index += params.process_offset;
}
if (voxel_index >= dispatch_data.total_count) {
return;
}
uint voxel_position = process_voxels.data[voxel_index].position;
//keep for storing to texture
ivec3 positioni = ivec3((uvec3(voxel_position, voxel_position, voxel_position) >> uvec3(0, 7, 14)) & uvec3(0x7F));
vec3 position = vec3(positioni) + vec3(0.5);
position /= cascades.data[params.cascade].to_cell;
position += cascades.data[params.cascade].offset;
uint voxel_albedo = process_voxels.data[voxel_index].albedo;
vec3 albedo = vec3(uvec3(voxel_albedo >> 10, voxel_albedo >> 5, voxel_albedo) & uvec3(0x1F)) / float(0x1F);
vec3 light_accum[6] = vec3[](vec3(0.0), vec3(0.0), vec3(0.0), vec3(0.0), vec3(0.0), vec3(0.0));
uint valid_aniso = (voxel_albedo >> 15) & 0x3F;
const vec3 aniso_dir[6] = vec3[](
vec3(1, 0, 0),
vec3(0, 1, 0),
vec3(0, 0, 1),
vec3(-1, 0, 0),
vec3(0, -1, 0),
vec3(0, 0, -1));
// Add indirect light first, in order to save computation resources
#ifdef MODE_PROCESS_DYNAMIC
if (params.bounce_feedback > 0.001) {
vec3 feedback = (params.bounce_feedback < 1.0) ? (albedo * params.bounce_feedback) : mix(albedo, vec3(1.0), params.bounce_feedback - 1.0);
vec3 pos = (vec3(positioni) + vec3(0.5)) * float(params.probe_axis_size - 1) / params.grid_size;
ivec3 probe_base_pos = ivec3(pos);
float weight_accum[6] = float[](0, 0, 0, 0, 0, 0);
ivec3 tex_pos = ivec3(probe_base_pos.xy, int(params.cascade));
tex_pos.x += probe_base_pos.z * int(params.probe_axis_size);
tex_pos.xy = tex_pos.xy * (OCT_SIZE + 2) + ivec2(1);
vec3 base_tex_posf = vec3(tex_pos);
vec2 tex_pixel_size = 1.0 / vec2(ivec2((OCT_SIZE + 2) * params.probe_axis_size * params.probe_axis_size, (OCT_SIZE + 2) * params.probe_axis_size));
vec3 probe_uv_offset = vec3(ivec3(OCT_SIZE + 2, OCT_SIZE + 2, (OCT_SIZE + 2) * params.probe_axis_size)) * tex_pixel_size.xyx;
for (uint j = 0; j < 8; j++) {
ivec3 offset = (ivec3(j) >> ivec3(0, 1, 2)) & ivec3(1, 1, 1);
ivec3 probe_posi = probe_base_pos;
probe_posi += offset;
// Compute weight
vec3 probe_pos = vec3(probe_posi);
vec3 probe_to_pos = pos - probe_pos;
vec3 probe_dir = normalize(-probe_to_pos);
// Compute lightprobe texture position
vec3 trilinear = vec3(1.0) - abs(probe_to_pos);
for (uint k = 0; k < 6; k++) {
if (bool(valid_aniso & (1 << k))) {
vec3 n = aniso_dir[k];
float weight = trilinear.x * trilinear.y * trilinear.z * max(0, dot(n, probe_dir));
if (weight > 0.0 && params.use_occlusion) {
ivec3 occ_indexv = abs((cascades.data[params.cascade].probe_world_offset + probe_posi) & ivec3(1, 1, 1)) * ivec3(1, 2, 4);
vec4 occ_mask = mix(vec4(0.0), vec4(1.0), equal(ivec4(occ_indexv.x | occ_indexv.y), ivec4(0, 1, 2, 3)));
vec3 occ_pos = (vec3(positioni) + aniso_dir[k] + vec3(0.5)) / params.grid_size;
occ_pos.z += float(params.cascade);
if (occ_indexv.z != 0) { //z bit is on, means index is >=4, so make it switch to the other half of textures
occ_pos.x += 1.0;
}
occ_pos *= vec3(0.5, 1.0, 1.0 / float(params.max_cascades)); //renormalize
float occlusion = dot(textureLod(sampler3D(occlusion_texture, linear_sampler), occ_pos, 0.0), occ_mask);
weight *= occlusion;
}
if (weight > 0.0) {
vec3 tex_posf = base_tex_posf + vec3(octahedron_encode(n) * float(OCT_SIZE), 0.0);
tex_posf.xy *= tex_pixel_size;
vec3 pos_uvw = tex_posf;
pos_uvw.xy += vec2(offset.xy) * probe_uv_offset.xy;
pos_uvw.x += float(offset.z) * probe_uv_offset.z;
vec3 indirect_light = textureLod(sampler2DArray(lightprobe_texture, linear_sampler), pos_uvw, 0.0).rgb;
light_accum[k] += indirect_light * weight;
weight_accum[k] += weight;
}
}
}
}
for (uint k = 0; k < 6; k++) {
if (weight_accum[k] > 0.0) {
light_accum[k] /= weight_accum[k];
light_accum[k] *= feedback;
}
}
}
#endif
{
uint rgbe = process_voxels.data[voxel_index].light;
//read rgbe8985
float r = float((rgbe & 0xff) << 1);
float g = float((rgbe >> 8) & 0x1ff);
float b = float(((rgbe >> 17) & 0xff) << 1);
float e = float((rgbe >> 25) & 0x1F);
float m = pow(2.0, e - 15.0 - 9.0);
vec3 l = vec3(r, g, b) * m;
uint aniso = process_voxels.data[voxel_index].light_aniso;
for (uint i = 0; i < 6; i++) {
float strength = ((aniso >> (i * 5)) & 0x1F) / float(0x1F);
light_accum[i] += l * strength;
}
}
// Raytrace light
vec3 pos_to_uvw = 1.0 / params.grid_size;
vec3 uvw_ofs = pos_to_uvw * 0.5;
for (uint i = 0; i < params.light_count; i++) {
float attenuation = 1.0;
vec3 direction;
float light_distance = 1e20;
switch (lights.data[i].type) {
case LIGHT_TYPE_DIRECTIONAL: {
direction = -lights.data[i].direction;
} break;
case LIGHT_TYPE_OMNI: {
vec3 rel_vec = lights.data[i].position - position;
direction = normalize(rel_vec);
light_distance = length(rel_vec);
rel_vec.y /= params.y_mult;
attenuation = get_omni_attenuation(light_distance, 1.0 / lights.data[i].radius, lights.data[i].attenuation);
} break;
case LIGHT_TYPE_SPOT: {
vec3 rel_vec = lights.data[i].position - position;
direction = normalize(rel_vec);
light_distance = length(rel_vec);
rel_vec.y /= params.y_mult;
attenuation = get_omni_attenuation(light_distance, 1.0 / lights.data[i].radius, lights.data[i].attenuation);
float cos_spot_angle = lights.data[i].cos_spot_angle;
float cos_angle = dot(-direction, lights.data[i].direction);
if (cos_angle < cos_spot_angle) {
continue;
}
float scos = max(cos_angle, cos_spot_angle);
float spot_rim = max(0.0001, (1.0 - scos) / (1.0 - cos_spot_angle));
attenuation *= 1.0 - pow(spot_rim, lights.data[i].inv_spot_attenuation);
} break;
}
if (attenuation < 0.001) {
continue;
}
bool hit = false;
vec3 ray_pos = position;
vec3 ray_dir = direction;
vec3 inv_dir = 1.0 / ray_dir;
//this is how to properly bias outgoing rays
float cell_size = 1.0 / cascades.data[params.cascade].to_cell;
ray_pos += sign(direction) * cell_size * 0.48; // go almost to the box edge but remain inside
ray_pos += ray_dir * 0.4 * cell_size; //apply a small bias from there
for (uint j = params.cascade; j < params.max_cascades; j++) {
//convert to local bounds
vec3 pos = ray_pos - cascades.data[j].offset;
pos *= cascades.data[j].to_cell;
float local_distance = light_distance * cascades.data[j].to_cell;
if (any(lessThan(pos, vec3(0.0))) || any(greaterThanEqual(pos, params.grid_size))) {
continue; //already past bounds for this cascade, goto next
}
//find maximum advance distance (until reaching bounds)
vec3 t0 = -pos * inv_dir;
vec3 t1 = (params.grid_size - pos) * inv_dir;
vec3 tmax = max(t0, t1);
float max_advance = min(tmax.x, min(tmax.y, tmax.z));
max_advance = min(local_distance, max_advance);
float advance = 0.0;
float occlusion = 1.0;
while (advance < max_advance) {
//read how much to advance from SDF
vec3 uvw = (pos + ray_dir * advance) * pos_to_uvw;
float distance = texture(sampler3D(sdf_cascades[j], linear_sampler), uvw).r * 255.0 - 1.0;
if (distance < 0.001) {
//consider hit
hit = true;
break;
}
occlusion = min(occlusion, distance);
advance += distance;
}
if (hit) {
attenuation *= occlusion;
break;
}
if (advance >= local_distance) {
break; //past light distance, abandon search
}
//change ray origin to collision with bounds
pos += ray_dir * max_advance;
pos /= cascades.data[j].to_cell;
pos += cascades.data[j].offset;
light_distance -= max_advance / cascades.data[j].to_cell;
ray_pos = pos;
}
if (!hit) {
vec3 light = albedo * lights.data[i].color.rgb * lights.data[i].energy * attenuation;
for (int j = 0; j < 6; j++) {
if (bool(valid_aniso & (1 << j))) {
light_accum[j] += max(0.0, dot(aniso_dir[j], direction)) * light;
}
}
}
}
// Store the light in the light texture
float lumas[6];
vec3 light_total = vec3(0);
for (int i = 0; i < 6; i++) {
light_total += light_accum[i];
lumas[i] = max(light_accum[i].r, max(light_accum[i].g, light_accum[i].b));
}
float luma_total = max(light_total.r, max(light_total.g, light_total.b));
uint light_total_rgbe;
{
//compress to RGBE9995 to save space
const float pow2to9 = 512.0f;
const float B = 15.0f;
const float N = 9.0f;
const float LN2 = 0.6931471805599453094172321215;
float cRed = clamp(light_total.r, 0.0, 65408.0);
float cGreen = clamp(light_total.g, 0.0, 65408.0);
float cBlue = clamp(light_total.b, 0.0, 65408.0);
float cMax = max(cRed, max(cGreen, cBlue));
float expp = max(-B - 1.0f, floor(log(cMax) / LN2)) + 1.0f + B;
float sMax = floor((cMax / pow(2.0f, expp - B - N)) + 0.5f);
float exps = expp + 1.0f;
if (0.0 <= sMax && sMax < pow2to9) {
exps = expp;
}
float sRed = floor((cRed / pow(2.0f, exps - B - N)) + 0.5f);
float sGreen = floor((cGreen / pow(2.0f, exps - B - N)) + 0.5f);
float sBlue = floor((cBlue / pow(2.0f, exps - B - N)) + 0.5f);
#ifdef MODE_PROCESS_STATIC
//since its self-save, use RGBE8985
light_total_rgbe = ((uint(sRed) & 0x1FF) >> 1) | ((uint(sGreen) & 0x1FF) << 8) | (((uint(sBlue) & 0x1FF) >> 1) << 17) | ((uint(exps) & 0x1F) << 25);
#else
light_total_rgbe = (uint(sRed) & 0x1FF) | ((uint(sGreen) & 0x1FF) << 9) | ((uint(sBlue) & 0x1FF) << 18) | ((uint(exps) & 0x1F) << 27);
#endif
}
#ifdef MODE_PROCESS_DYNAMIC
vec4 aniso0;
aniso0.r = lumas[0] / luma_total;
aniso0.g = lumas[1] / luma_total;
aniso0.b = lumas[2] / luma_total;
aniso0.a = lumas[3] / luma_total;
vec2 aniso1;
aniso1.r = lumas[4] / luma_total;
aniso1.g = lumas[5] / luma_total;
//save to 3D textures
imageStore(dst_aniso0, positioni, aniso0);
imageStore(dst_aniso1, positioni, vec4(aniso1, 0.0, 0.0));
imageStore(dst_light, positioni, uvec4(light_total_rgbe));
//also fill neighbors, so light interpolation during the indirect pass works
//recover the neighbor list from the leftover bits
uint neighbors = (voxel_albedo >> 21) | ((voxel_position >> 21) << 11) | ((process_voxels.data[voxel_index].light >> 30) << 22) | ((process_voxels.data[voxel_index].light_aniso >> 30) << 24);
const uint max_neighbours = 26;
const ivec3 neighbour_positions[max_neighbours] = ivec3[](
ivec3(-1, -1, -1),
ivec3(-1, -1, 0),
ivec3(-1, -1, 1),
ivec3(-1, 0, -1),
ivec3(-1, 0, 0),
ivec3(-1, 0, 1),
ivec3(-1, 1, -1),
ivec3(-1, 1, 0),
ivec3(-1, 1, 1),
ivec3(0, -1, -1),
ivec3(0, -1, 0),
ivec3(0, -1, 1),
ivec3(0, 0, -1),
ivec3(0, 0, 1),
ivec3(0, 1, -1),
ivec3(0, 1, 0),
ivec3(0, 1, 1),
ivec3(1, -1, -1),
ivec3(1, -1, 0),
ivec3(1, -1, 1),
ivec3(1, 0, -1),
ivec3(1, 0, 0),
ivec3(1, 0, 1),
ivec3(1, 1, -1),
ivec3(1, 1, 0),
ivec3(1, 1, 1));
for (uint i = 0; i < max_neighbours; i++) {
if (bool(neighbors & (1 << i))) {
ivec3 neighbour_pos = positioni + neighbour_positions[i];
imageStore(dst_light, neighbour_pos, uvec4(light_total_rgbe));
imageStore(dst_aniso0, neighbour_pos, aniso0);
imageStore(dst_aniso1, neighbour_pos, vec4(aniso1, 0.0, 0.0));
}
}
#endif
#ifdef MODE_PROCESS_STATIC
//save back the anisotropic
uint light = process_voxels.data[voxel_index].light & (3 << 30);
light |= light_total_rgbe;
process_voxels.data[voxel_index].light = light; //replace
uint light_aniso = process_voxels.data[voxel_index].light_aniso & (3 << 30);
for (int i = 0; i < 6; i++) {
light_aniso |= min(31, uint((lumas[i] / luma_total) * 31.0)) << (i * 5);
}
process_voxels.data[voxel_index].light_aniso = light_aniso;
#endif
}

View File

@@ -0,0 +1,618 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#define MAX_CASCADES 8
layout(set = 0, binding = 1) uniform texture3D sdf_cascades[MAX_CASCADES];
layout(set = 0, binding = 2) uniform texture3D light_cascades[MAX_CASCADES];
layout(set = 0, binding = 3) uniform texture3D aniso0_cascades[MAX_CASCADES];
layout(set = 0, binding = 4) uniform texture3D aniso1_cascades[MAX_CASCADES];
layout(set = 0, binding = 6) uniform sampler linear_sampler;
struct CascadeData {
vec3 offset; //offset of (0,0,0) in world coordinates
float to_cell; // 1/bounds * grid_size
ivec3 probe_world_offset;
uint pad;
vec4 pad2;
};
layout(set = 0, binding = 7, std140) uniform Cascades {
CascadeData data[MAX_CASCADES];
}
cascades;
layout(r32ui, set = 0, binding = 8) uniform restrict uimage2DArray lightprobe_texture_data;
layout(rgba16i, set = 0, binding = 9) uniform restrict iimage2DArray lightprobe_history_texture;
layout(rgba32i, set = 0, binding = 10) uniform restrict iimage2D lightprobe_average_texture;
//used for scrolling
layout(rgba16i, set = 0, binding = 11) uniform restrict iimage2DArray lightprobe_history_scroll_texture;
layout(rgba32i, set = 0, binding = 12) uniform restrict iimage2D lightprobe_average_scroll_texture;
layout(rgba32i, set = 0, binding = 13) uniform restrict iimage2D lightprobe_average_parent_texture;
layout(rgba16f, set = 0, binding = 14) uniform restrict writeonly image2DArray lightprobe_ambient_texture;
#ifdef USE_CUBEMAP_ARRAY
layout(set = 1, binding = 0) uniform textureCubeArray sky_irradiance;
#else
layout(set = 1, binding = 0) uniform textureCube sky_irradiance;
#endif
layout(set = 1, binding = 1) uniform sampler linear_sampler_mipmaps;
#define HISTORY_BITS 10
#define SKY_FLAGS_MODE_COLOR 0x01
#define SKY_FLAGS_MODE_SKY 0x02
#define SKY_FLAGS_ORIENTATION_SIGN 0x04
layout(push_constant, std430) uniform Params {
vec3 grid_size;
uint max_cascades;
uint probe_axis_size;
uint cascade;
uint history_index;
uint history_size;
uint ray_count;
float ray_bias;
ivec2 image_size;
ivec3 world_offset;
uint sky_flags;
ivec3 scroll;
float sky_energy;
vec3 sky_color_or_orientation;
float y_mult;
bool store_ambient_texture;
uint pad[3];
}
params;
const float PI = 3.14159265f;
const float GOLDEN_ANGLE = PI * (3.0 - sqrt(5.0));
vec3 vogel_hemisphere(uint p_index, uint p_count, float p_offset) {
float r = sqrt(float(p_index) + 0.5f) / sqrt(float(p_count));
float theta = float(p_index) * GOLDEN_ANGLE + p_offset;
float y = cos(r * PI * 0.5);
float l = sin(r * PI * 0.5);
return vec3(l * cos(theta), l * sin(theta), y * (float(p_index & 1) * 2.0 - 1.0));
}
uvec3 hash3(uvec3 x) {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return x;
}
float hashf3(vec3 co) {
return fract(sin(dot(co, vec3(12.9898, 78.233, 137.13451))) * 43758.5453);
}
vec3 octahedron_encode(vec2 f) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
f = f * 2.0 - 1.0;
vec3 n = vec3(f.x, f.y, 1.0f - abs(f.x) - abs(f.y));
float t = clamp(-n.z, 0.0, 1.0);
n.x += n.x >= 0 ? -t : t;
n.y += n.y >= 0 ? -t : t;
return normalize(n);
}
uint rgbe_encode(vec3 color) {
const float pow2to9 = 512.0f;
const float B = 15.0f;
const float N = 9.0f;
const float LN2 = 0.6931471805599453094172321215;
float cRed = clamp(color.r, 0.0, 65408.0);
float cGreen = clamp(color.g, 0.0, 65408.0);
float cBlue = clamp(color.b, 0.0, 65408.0);
float cMax = max(cRed, max(cGreen, cBlue));
float expp = max(-B - 1.0f, floor(log(cMax) / LN2)) + 1.0f + B;
float sMax = floor((cMax / pow(2.0f, expp - B - N)) + 0.5f);
float exps = expp + 1.0f;
if (0.0 <= sMax && sMax < pow2to9) {
exps = expp;
}
float sRed = floor((cRed / pow(2.0f, exps - B - N)) + 0.5f);
float sGreen = floor((cGreen / pow(2.0f, exps - B - N)) + 0.5f);
float sBlue = floor((cBlue / pow(2.0f, exps - B - N)) + 0.5f);
return (uint(sRed) & 0x1FF) | ((uint(sGreen) & 0x1FF) << 9) | ((uint(sBlue) & 0x1FF) << 18) | ((uint(exps) & 0x1F) << 27);
}
struct SH {
#if (SH_SIZE == 16)
float c[48];
#else
float c[28];
#endif
};
shared SH sh_accum[64]; //8x8
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(pos, params.image_size))) { //too large, do nothing
return;
}
uint probe_index = gl_LocalInvocationID.x + gl_LocalInvocationID.y * 8;
#ifdef MODE_PROCESS
float probe_cell_size = float(params.grid_size.x / float(params.probe_axis_size - 1)) / cascades.data[params.cascade].to_cell;
ivec3 probe_cell;
probe_cell.x = pos.x % int(params.probe_axis_size);
probe_cell.y = pos.y;
probe_cell.z = pos.x / int(params.probe_axis_size);
vec3 probe_pos = cascades.data[params.cascade].offset + vec3(probe_cell) * probe_cell_size;
vec3 pos_to_uvw = 1.0 / params.grid_size;
for (uint i = 0; i < SH_SIZE * 3; i++) {
sh_accum[probe_index].c[i] = 0.0;
}
// quickly ensure each probe has a different "offset" for the vogel function, based on integer world position
uvec3 h3 = hash3(uvec3(params.world_offset + probe_cell));
float offset = hashf3(vec3(h3 & uvec3(0xFFFFF)));
//for a more homogeneous hemisphere, alternate based on history frames
uint ray_offset = params.history_index;
uint ray_mult = params.history_size;
uint ray_total = ray_mult * params.ray_count;
for (uint i = 0; i < params.ray_count; i++) {
vec3 ray_dir = vogel_hemisphere(ray_offset + i * ray_mult, ray_total, offset);
ray_dir.y *= params.y_mult;
ray_dir = normalize(ray_dir);
//needs to be visible
vec3 ray_pos = probe_pos;
vec3 inv_dir = 1.0 / ray_dir;
bool hit = false;
uint hit_cascade;
float bias = params.ray_bias;
vec3 abs_ray_dir = abs(ray_dir);
ray_pos += ray_dir * 1.0 / max(abs_ray_dir.x, max(abs_ray_dir.y, abs_ray_dir.z)) * bias / cascades.data[params.cascade].to_cell;
vec3 uvw;
for (uint j = params.cascade; j < params.max_cascades; j++) {
//convert to local bounds
vec3 pos = ray_pos - cascades.data[j].offset;
pos *= cascades.data[j].to_cell;
if (any(lessThan(pos, vec3(0.0))) || any(greaterThanEqual(pos, params.grid_size))) {
continue; //already past bounds for this cascade, goto next
}
//find maximum advance distance (until reaching bounds)
vec3 t0 = -pos * inv_dir;
vec3 t1 = (params.grid_size - pos) * inv_dir;
vec3 tmax = max(t0, t1);
float max_advance = min(tmax.x, min(tmax.y, tmax.z));
float advance = 0.0;
while (advance < max_advance) {
//read how much to advance from SDF
uvw = (pos + ray_dir * advance) * pos_to_uvw;
float distance = texture(sampler3D(sdf_cascades[j], linear_sampler), uvw).r * 255.0 - 1.0;
if (distance < 0.05) {
//consider hit
hit = true;
break;
}
advance += distance;
}
if (hit) {
hit_cascade = j;
break;
}
//change ray origin to collision with bounds
pos += ray_dir * max_advance;
pos /= cascades.data[j].to_cell;
pos += cascades.data[j].offset;
ray_pos = pos;
}
vec4 light;
if (hit) {
//avoid reading different texture from different threads
for (uint j = params.cascade; j < params.max_cascades; j++) {
if (j == hit_cascade) {
const float EPSILON = 0.001;
vec3 hit_normal = normalize(vec3(
texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw + vec3(EPSILON, 0.0, 0.0)).r - texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw - vec3(EPSILON, 0.0, 0.0)).r,
texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw + vec3(0.0, EPSILON, 0.0)).r - texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw - vec3(0.0, EPSILON, 0.0)).r,
texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw + vec3(0.0, 0.0, EPSILON)).r - texture(sampler3D(sdf_cascades[hit_cascade], linear_sampler), uvw - vec3(0.0, 0.0, EPSILON)).r));
vec3 hit_light = texture(sampler3D(light_cascades[hit_cascade], linear_sampler), uvw).rgb;
vec4 aniso0 = texture(sampler3D(aniso0_cascades[hit_cascade], linear_sampler), uvw);
vec3 hit_aniso0 = aniso0.rgb;
vec3 hit_aniso1 = vec3(aniso0.a, texture(sampler3D(aniso1_cascades[hit_cascade], linear_sampler), uvw).rg);
//one liner magic
light.rgb = hit_light * (dot(max(vec3(0.0), (hit_normal * hit_aniso0)), vec3(1.0)) + dot(max(vec3(0.0), (-hit_normal * hit_aniso1)), vec3(1.0)));
light.a = 1.0;
}
}
} else if (bool(params.sky_flags & SKY_FLAGS_MODE_SKY)) {
// Reconstruct sky orientation as quaternion and rotate ray_dir before sampling.
float sky_sign = bool(params.sky_flags & SKY_FLAGS_ORIENTATION_SIGN) ? 1.0 : -1.0;
vec4 sky_quat = vec4(params.sky_color_or_orientation, sky_sign * sqrt(1.0 - dot(params.sky_color_or_orientation, params.sky_color_or_orientation)));
vec3 sky_dir = cross(sky_quat.xyz, ray_dir);
sky_dir = ray_dir + ((sky_dir * sky_quat.w) + cross(sky_quat.xyz, sky_dir)) * 2.0;
#ifdef USE_CUBEMAP_ARRAY
light.rgb = textureLod(samplerCubeArray(sky_irradiance, linear_sampler_mipmaps), vec4(sky_dir, 0.0), 2.0).rgb; // Use second mipmap because we don't usually throw a lot of rays, so this compensates.
#else
light.rgb = textureLod(samplerCube(sky_irradiance, linear_sampler_mipmaps), sky_dir, 2.0).rgb; // Use second mipmap because we don't usually throw a lot of rays, so this compensates.
#endif
light.rgb *= params.sky_energy;
light.a = 0.0;
} else if (bool(params.sky_flags & SKY_FLAGS_MODE_COLOR)) {
light.rgb = params.sky_color_or_orientation;
light.rgb *= params.sky_energy;
light.a = 0.0;
} else {
light = vec4(0, 0, 0, 0);
}
vec3 ray_dir2 = ray_dir * ray_dir;
#define SH_ACCUM(m_idx, m_value) \
{ \
vec3 l = light.rgb * (m_value); \
sh_accum[probe_index].c[m_idx * 3 + 0] += l.r; \
sh_accum[probe_index].c[m_idx * 3 + 1] += l.g; \
sh_accum[probe_index].c[m_idx * 3 + 2] += l.b; \
}
SH_ACCUM(0, 0.282095); //l0
SH_ACCUM(1, 0.488603 * ray_dir.y); //l1n1
SH_ACCUM(2, 0.488603 * ray_dir.z); //l1n0
SH_ACCUM(3, 0.488603 * ray_dir.x); //l1p1
SH_ACCUM(4, 1.092548 * ray_dir.x * ray_dir.y); //l2n2
SH_ACCUM(5, 1.092548 * ray_dir.y * ray_dir.z); //l2n1
SH_ACCUM(6, 0.315392 * (3.0 * ray_dir2.z - 1.0)); //l20
SH_ACCUM(7, 1.092548 * ray_dir.x * ray_dir.z); //l2p1
SH_ACCUM(8, 0.546274 * (ray_dir2.x - ray_dir2.y)); //l2p2
#if (SH_SIZE == 16)
SH_ACCUM(9, 0.590043 * ray_dir.y * (3.0f * ray_dir2.x - ray_dir2.y));
SH_ACCUM(10, 2.890611 * ray_dir.y * ray_dir.x * ray_dir.z);
SH_ACCUM(11, 0.646360 * ray_dir.y * (-1.0f + 5.0f * ray_dir2.z));
SH_ACCUM(12, 0.373176 * (5.0f * ray_dir2.z * ray_dir.z - 3.0f * ray_dir.z));
SH_ACCUM(13, 0.457045 * ray_dir.x * (-1.0f + 5.0f * ray_dir2.z));
SH_ACCUM(14, 1.445305 * (ray_dir2.x - ray_dir2.y) * ray_dir.z);
SH_ACCUM(15, 0.590043 * ray_dir.x * (ray_dir2.x - 3.0f * ray_dir2.y));
#endif
}
for (uint i = 0; i < SH_SIZE; i++) {
// store in history texture
ivec3 prev_pos = ivec3(pos.x, pos.y * SH_SIZE + i, int(params.history_index));
ivec2 average_pos = prev_pos.xy;
vec4 value = vec4(sh_accum[probe_index].c[i * 3 + 0], sh_accum[probe_index].c[i * 3 + 1], sh_accum[probe_index].c[i * 3 + 2], 1.0) * 4.0 / float(params.ray_count);
ivec4 ivalue = clamp(ivec4(value * float(1 << HISTORY_BITS)), -32768, 32767); //clamp to 16 bits, so higher values don't break average
ivec4 prev_value = imageLoad(lightprobe_history_texture, prev_pos);
ivec4 average = imageLoad(lightprobe_average_texture, average_pos);
average -= prev_value;
average += ivalue;
imageStore(lightprobe_history_texture, prev_pos, ivalue);
imageStore(lightprobe_average_texture, average_pos, average);
if (params.store_ambient_texture && i == 0) {
ivec3 ambient_pos = ivec3(pos, int(params.cascade));
vec4 ambient_light = (vec4(average) / float(params.history_size)) / float(1 << HISTORY_BITS);
ambient_light *= 0.88622; // SHL0
imageStore(lightprobe_ambient_texture, ambient_pos, ambient_light);
}
}
#endif // MODE PROCESS
#ifdef MODE_STORE
// converting to octahedral in this step is required because
// octahedral is much faster to read from the screen than spherical harmonics,
// despite the very slight quality loss
ivec2 sh_pos = (pos / OCT_SIZE) * ivec2(1, SH_SIZE);
ivec2 oct_pos = (pos / OCT_SIZE) * (OCT_SIZE + 2) + ivec2(1);
ivec2 local_pos = pos % OCT_SIZE;
//compute the octahedral normal for this texel
vec3 normal = octahedron_encode(vec2(local_pos) / float(OCT_SIZE));
// read the spherical harmonic
vec3 normal2 = normal * normal;
float c[SH_SIZE] = float[](
0.282095, //l0
0.488603 * normal.y, //l1n1
0.488603 * normal.z, //l1n0
0.488603 * normal.x, //l1p1
1.092548 * normal.x * normal.y, //l2n2
1.092548 * normal.y * normal.z, //l2n1
0.315392 * (3.0 * normal2.z - 1.0), //l20
1.092548 * normal.x * normal.z, //l2p1
0.546274 * (normal2.x - normal2.y) //l2p2
#if (SH_SIZE == 16)
,
0.590043 * normal.y * (3.0f * normal2.x - normal2.y),
2.890611 * normal.y * normal.x * normal.z,
0.646360 * normal.y * (-1.0f + 5.0f * normal2.z),
0.373176 * (5.0f * normal2.z * normal.z - 3.0f * normal.z),
0.457045 * normal.x * (-1.0f + 5.0f * normal2.z),
1.445305 * (normal2.x - normal2.y) * normal.z,
0.590043 * normal.x * (normal2.x - 3.0f * normal2.y)
#endif
);
const float l_mult[SH_SIZE] = float[](
1.0,
2.0 / 3.0,
2.0 / 3.0,
2.0 / 3.0,
1.0 / 4.0,
1.0 / 4.0,
1.0 / 4.0,
1.0 / 4.0,
1.0 / 4.0
#if (SH_SIZE == 16)
, // l4 does not contribute to irradiance
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0
#endif
);
vec3 irradiance = vec3(0.0);
vec3 radiance = vec3(0.0);
for (uint i = 0; i < SH_SIZE; i++) {
// store in history texture
ivec2 average_pos = sh_pos + ivec2(0, i);
ivec4 average = imageLoad(lightprobe_average_texture, average_pos);
vec4 sh = (vec4(average) / float(params.history_size)) / float(1 << HISTORY_BITS);
vec3 m = sh.rgb * c[i] * 4.0;
irradiance += m * l_mult[i];
radiance += m;
}
//encode RGBE9995 for the final texture
uint irradiance_rgbe = rgbe_encode(irradiance);
uint radiance_rgbe = rgbe_encode(radiance);
//store in octahedral map
ivec3 texture_pos = ivec3(oct_pos, int(params.cascade));
ivec3 copy_to[4] = ivec3[](ivec3(-2, -2, -2), ivec3(-2, -2, -2), ivec3(-2, -2, -2), ivec3(-2, -2, -2));
copy_to[0] = texture_pos + ivec3(local_pos, 0);
if (local_pos == ivec2(0, 0)) {
copy_to[1] = texture_pos + ivec3(OCT_SIZE - 1, -1, 0);
copy_to[2] = texture_pos + ivec3(-1, OCT_SIZE - 1, 0);
copy_to[3] = texture_pos + ivec3(OCT_SIZE, OCT_SIZE, 0);
} else if (local_pos == ivec2(OCT_SIZE - 1, 0)) {
copy_to[1] = texture_pos + ivec3(0, -1, 0);
copy_to[2] = texture_pos + ivec3(OCT_SIZE, OCT_SIZE - 1, 0);
copy_to[3] = texture_pos + ivec3(-1, OCT_SIZE, 0);
} else if (local_pos == ivec2(0, OCT_SIZE - 1)) {
copy_to[1] = texture_pos + ivec3(-1, 0, 0);
copy_to[2] = texture_pos + ivec3(OCT_SIZE - 1, OCT_SIZE, 0);
copy_to[3] = texture_pos + ivec3(OCT_SIZE, -1, 0);
} else if (local_pos == ivec2(OCT_SIZE - 1, OCT_SIZE - 1)) {
copy_to[1] = texture_pos + ivec3(0, OCT_SIZE, 0);
copy_to[2] = texture_pos + ivec3(OCT_SIZE, 0, 0);
copy_to[3] = texture_pos + ivec3(-1, -1, 0);
} else if (local_pos.y == 0) {
copy_to[1] = texture_pos + ivec3(OCT_SIZE - local_pos.x - 1, local_pos.y - 1, 0);
} else if (local_pos.x == 0) {
copy_to[1] = texture_pos + ivec3(local_pos.x - 1, OCT_SIZE - local_pos.y - 1, 0);
} else if (local_pos.y == OCT_SIZE - 1) {
copy_to[1] = texture_pos + ivec3(OCT_SIZE - local_pos.x - 1, local_pos.y + 1, 0);
} else if (local_pos.x == OCT_SIZE - 1) {
copy_to[1] = texture_pos + ivec3(local_pos.x + 1, OCT_SIZE - local_pos.y - 1, 0);
}
for (int i = 0; i < 4; i++) {
if (copy_to[i] == ivec3(-2, -2, -2)) {
continue;
}
imageStore(lightprobe_texture_data, copy_to[i], uvec4(irradiance_rgbe));
imageStore(lightprobe_texture_data, copy_to[i] + ivec3(0, 0, int(params.max_cascades)), uvec4(radiance_rgbe));
}
#endif
#ifdef MODE_SCROLL
ivec3 probe_cell;
probe_cell.x = pos.x % int(params.probe_axis_size);
probe_cell.y = pos.y;
probe_cell.z = pos.x / int(params.probe_axis_size);
ivec3 read_probe = probe_cell - params.scroll;
if (all(greaterThanEqual(read_probe, ivec3(0))) && all(lessThan(read_probe, ivec3(params.probe_axis_size)))) {
// can scroll
ivec2 tex_pos;
tex_pos = read_probe.xy;
tex_pos.x += read_probe.z * int(params.probe_axis_size);
//scroll
for (uint j = 0; j < params.history_size; j++) {
for (int i = 0; i < SH_SIZE; i++) {
// copy from history texture
ivec3 src_pos = ivec3(tex_pos.x, tex_pos.y * SH_SIZE + i, int(j));
ivec3 dst_pos = ivec3(pos.x, pos.y * SH_SIZE + i, int(j));
ivec4 value = imageLoad(lightprobe_history_texture, src_pos);
imageStore(lightprobe_history_scroll_texture, dst_pos, value);
}
}
for (int i = 0; i < SH_SIZE; i++) {
// copy from average texture
ivec2 src_pos = ivec2(tex_pos.x, tex_pos.y * SH_SIZE + i);
ivec2 dst_pos = ivec2(pos.x, pos.y * SH_SIZE + i);
ivec4 value = imageLoad(lightprobe_average_texture, src_pos);
imageStore(lightprobe_average_scroll_texture, dst_pos, value);
}
} else if (params.cascade < params.max_cascades - 1) {
//can't scroll, must look for position in parent cascade
//to global coords
float cell_to_probe = float(params.grid_size.x / float(params.probe_axis_size - 1));
float probe_cell_size = cell_to_probe / cascades.data[params.cascade].to_cell;
vec3 probe_pos = cascades.data[params.cascade].offset + vec3(probe_cell) * probe_cell_size;
//to parent local coords
float probe_cell_size_next = cell_to_probe / cascades.data[params.cascade + 1].to_cell;
probe_pos -= cascades.data[params.cascade + 1].offset;
probe_pos /= probe_cell_size_next;
ivec3 probe_posi = ivec3(probe_pos);
//add up all light, no need to use occlusion here, since occlusion will do its work afterwards
vec4 average_light[SH_SIZE] = vec4[](vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0)
#if (SH_SIZE == 16)
,
vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0), vec4(0)
#endif
);
float total_weight = 0.0;
for (int i = 0; i < 8; i++) {
ivec3 offset = probe_posi + ((ivec3(i) >> ivec3(0, 1, 2)) & ivec3(1, 1, 1));
vec3 trilinear = vec3(1.0) - abs(probe_pos - vec3(offset));
float weight = trilinear.x * trilinear.y * trilinear.z;
ivec2 tex_pos;
tex_pos = offset.xy;
tex_pos.x += offset.z * int(params.probe_axis_size);
for (int j = 0; j < SH_SIZE; j++) {
// copy from history texture
ivec2 src_pos = ivec2(tex_pos.x, tex_pos.y * SH_SIZE + j);
ivec4 average = imageLoad(lightprobe_average_parent_texture, src_pos);
vec4 value = (vec4(average) / float(params.history_size)) / float(1 << HISTORY_BITS);
average_light[j] += value * weight;
}
total_weight += weight;
}
if (total_weight > 0.0) {
total_weight = 1.0 / total_weight;
}
//store the averaged values everywhere
for (int i = 0; i < SH_SIZE; i++) {
ivec4 ivalue = clamp(ivec4(average_light[i] * total_weight * float(1 << HISTORY_BITS)), ivec4(-32768), ivec4(32767)); //clamp to 16 bits, so higher values don't break average
// copy from history texture
ivec3 dst_pos = ivec3(pos.x, pos.y * SH_SIZE + i, 0);
for (uint j = 0; j < params.history_size; j++) {
dst_pos.z = int(j);
imageStore(lightprobe_history_scroll_texture, dst_pos, ivalue);
}
ivalue *= int(params.history_size); //average needs to have all history added up
imageStore(lightprobe_average_scroll_texture, dst_pos.xy, ivalue);
}
} else {
//scroll at the edge of the highest cascade, just copy what is there,
//since its the closest we have anyway
for (uint j = 0; j < params.history_size; j++) {
ivec2 tex_pos;
tex_pos = probe_cell.xy;
tex_pos.x += probe_cell.z * int(params.probe_axis_size);
for (int i = 0; i < SH_SIZE; i++) {
// copy from history texture
ivec3 src_pos = ivec3(tex_pos.x, tex_pos.y * SH_SIZE + i, int(j));
ivec3 dst_pos = ivec3(pos.x, pos.y * SH_SIZE + i, int(j));
ivec4 value = imageLoad(lightprobe_history_texture, dst_pos);
imageStore(lightprobe_history_scroll_texture, dst_pos, value);
}
}
for (int i = 0; i < SH_SIZE; i++) {
// copy from average texture
ivec2 spos = ivec2(pos.x, pos.y * SH_SIZE + i);
ivec4 average = imageLoad(lightprobe_average_texture, spos);
imageStore(lightprobe_average_scroll_texture, spos, average);
}
}
#endif
#ifdef MODE_SCROLL_STORE
//do not update probe texture, as these will be updated later
for (uint j = 0; j < params.history_size; j++) {
for (int i = 0; i < SH_SIZE; i++) {
// copy from history texture
ivec3 spos = ivec3(pos.x, pos.y * SH_SIZE + i, int(j));
ivec4 value = imageLoad(lightprobe_history_scroll_texture, spos);
imageStore(lightprobe_history_texture, spos, value);
}
}
for (int i = 0; i < SH_SIZE; i++) {
// copy from average texture
ivec2 spos = ivec2(pos.x, pos.y * SH_SIZE + i);
ivec4 average = imageLoad(lightprobe_average_scroll_texture, spos);
imageStore(lightprobe_average_texture, spos, average);
}
#endif
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,306 @@
#[vertex]
#version 450
#VERSION_DEFINES
layout(location = 0) out vec2 uv_interp;
layout(push_constant, std430) uniform Params {
mat3 orientation;
vec4 projection; // only applicable if not multiview
vec3 position;
float time;
vec2 pad;
float luminance_multiplier;
float brightness_multiplier;
}
params;
void main() {
vec2 base_arr[3] = vec2[](vec2(-1.0, -3.0), vec2(-1.0, 1.0), vec2(3.0, 1.0));
uv_interp = base_arr[gl_VertexIndex];
gl_Position = vec4(uv_interp, 0.0, 1.0);
}
#[fragment]
#version 450
#VERSION_DEFINES
#ifdef USE_MULTIVIEW
#extension GL_EXT_multiview : enable
#define ViewIndex gl_ViewIndex
#endif
#define M_PI 3.14159265359
layout(location = 0) in vec2 uv_interp;
layout(push_constant, std430) uniform Params {
mat3 orientation;
vec4 projection; // only applicable if not multiview
vec3 position;
float time;
vec2 pad;
float luminance_multiplier;
float brightness_multiplier;
}
params;
#include "../samplers_inc.glsl"
layout(set = 0, binding = 1, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
layout(set = 0, binding = 2, std140) uniform SkySceneData {
mat4 combined_reprojection[2];
mat4 view_inv_projections[2];
vec4 view_eye_offsets[2];
bool volumetric_fog_enabled; // 4 - 4
float volumetric_fog_inv_length; // 4 - 8
float volumetric_fog_detail_spread; // 4 - 12
float volumetric_fog_sky_affect; // 4 - 16
bool fog_enabled; // 4 - 20
float fog_sky_affect; // 4 - 24
float fog_density; // 4 - 28
float fog_sun_scatter; // 4 - 32
vec3 fog_light_color; // 12 - 44
float fog_aerial_perspective; // 4 - 48
float z_far; // 4 - 52
uint directional_light_count; // 4 - 56
uint pad1; // 4 - 60
uint pad2; // 4 - 64
}
sky_scene_data;
struct DirectionalLightData {
vec4 direction_energy;
vec4 color_size;
bool enabled;
};
layout(set = 0, binding = 3, std140) uniform DirectionalLights {
DirectionalLightData data[MAX_DIRECTIONAL_LIGHT_DATA_STRUCTS];
}
directional_lights;
#ifdef MATERIAL_UNIFORMS_USED
/* clang-format off */
layout(set = 1, binding = 0, std140) uniform MaterialUniforms {
#MATERIAL_UNIFORMS
} material;
/* clang-format on */
#endif
layout(set = 2, binding = 0) uniform textureCube radiance;
#ifdef USE_CUBEMAP_PASS
layout(set = 2, binding = 1) uniform textureCube half_res;
layout(set = 2, binding = 2) uniform textureCube quarter_res;
#elif defined(USE_MULTIVIEW)
layout(set = 2, binding = 1) uniform texture2DArray half_res;
layout(set = 2, binding = 2) uniform texture2DArray quarter_res;
#else
layout(set = 2, binding = 1) uniform texture2D half_res;
layout(set = 2, binding = 2) uniform texture2D quarter_res;
#endif
layout(set = 3, binding = 0) uniform texture3D volumetric_fog_texture;
#ifdef USE_CUBEMAP_PASS
#define AT_CUBEMAP_PASS true
#else
#define AT_CUBEMAP_PASS false
#endif
#ifdef USE_HALF_RES_PASS
#define AT_HALF_RES_PASS true
#else
#define AT_HALF_RES_PASS false
#endif
#ifdef USE_QUARTER_RES_PASS
#define AT_QUARTER_RES_PASS true
#else
#define AT_QUARTER_RES_PASS false
#endif
#GLOBALS
layout(location = 0) out vec4 frag_color;
#ifdef USE_DEBANDING
// https://www.iryoku.com/next-generation-post-processing-in-call-of-duty-advanced-warfare
vec3 interleaved_gradient_noise(vec2 pos) {
const vec3 magic = vec3(0.06711056f, 0.00583715f, 52.9829189f);
float res = fract(magic.z * fract(dot(pos, magic.xy))) * 2.0 - 1.0;
return vec3(res, -res, res) / 255.0;
}
#endif
vec4 volumetric_fog_process(vec2 screen_uv) {
#ifdef USE_MULTIVIEW
vec4 reprojected = sky_scene_data.combined_reprojection[ViewIndex] * vec4(screen_uv * 2.0 - 1.0, 0.0, 1.0); // Unproject at the far plane
vec3 fog_pos = vec3(reprojected.xy / reprojected.w, 1.0) * 0.5 + 0.5;
#else
vec3 fog_pos = vec3(screen_uv, 1.0);
#endif
return texture(sampler3D(volumetric_fog_texture, SAMPLER_LINEAR_CLAMP), fog_pos);
}
vec4 fog_process(vec3 view, vec3 sky_color) {
vec3 fog_color = mix(sky_scene_data.fog_light_color, sky_color, sky_scene_data.fog_aerial_perspective);
if (sky_scene_data.fog_sun_scatter > 0.001) {
vec4 sun_scatter = vec4(0.0);
float sun_total = 0.0;
for (uint i = 0; i < sky_scene_data.directional_light_count; i++) {
vec3 light_color = directional_lights.data[i].color_size.xyz * directional_lights.data[i].direction_energy.w;
float light_amount = pow(max(dot(view, directional_lights.data[i].direction_energy.xyz), 0.0), 8.0) * M_PI;
fog_color += light_color * light_amount * sky_scene_data.fog_sun_scatter;
}
}
return vec4(fog_color, 1.0);
}
// Eberly approximation from https://seblagarde.wordpress.com/2014/12/01/inverse-trigonometric-functions-gpu-optimization-for-amd-gcn-architecture/.
// input [-1, 1] and output [0, PI]
float acos_approx(float p_x) {
float x = abs(p_x);
float res = -0.156583f * x + (M_PI / 2.0);
res *= sqrt(1.0f - x);
return (p_x >= 0.0) ? res : M_PI - res;
}
// Based on https://math.stackexchange.com/questions/1098487/atan2-faster-approximation
// but using the Eberly coefficients from https://seblagarde.wordpress.com/2014/12/01/inverse-trigonometric-functions-gpu-optimization-for-amd-gcn-architecture/.
float atan2_approx(float y, float x) {
float a = min(abs(x), abs(y)) / max(abs(x), abs(y));
float s = a * a;
float poly = 0.0872929f;
poly = -0.301895f + poly * s;
poly = 1.0f + poly * s;
poly = poly * a;
float r = abs(y) > abs(x) ? (M_PI / 2.0) - poly : poly;
r = x < 0.0 ? M_PI - r : r;
r = y < 0.0 ? -r : r;
return r;
}
void main() {
vec3 cube_normal;
#ifdef USE_MULTIVIEW
// In multiview our projection matrices will contain positional and rotational offsets that we need to properly unproject.
vec4 unproject = vec4(uv_interp.x, uv_interp.y, 0.0, 1.0); // unproject at the far plane
vec4 unprojected = sky_scene_data.view_inv_projections[ViewIndex] * unproject;
cube_normal = unprojected.xyz / unprojected.w;
// Unproject will give us the position between the eyes, need to re-offset
cube_normal += sky_scene_data.view_eye_offsets[ViewIndex].xyz;
#else
cube_normal.z = -1.0;
cube_normal.x = (cube_normal.z * (-uv_interp.x - params.projection.x)) / params.projection.y;
cube_normal.y = -(cube_normal.z * (uv_interp.y - params.projection.z)) / params.projection.w;
#endif
cube_normal = mat3(params.orientation) * cube_normal;
cube_normal = normalize(cube_normal);
vec2 uv = uv_interp * 0.5 + 0.5;
vec2 panorama_coords = vec2(atan2_approx(cube_normal.x, -cube_normal.z), acos_approx(cube_normal.y));
if (panorama_coords.x < 0.0) {
panorama_coords.x += M_PI * 2.0;
}
panorama_coords /= vec2(M_PI * 2.0, M_PI);
vec3 color = vec3(0.0, 0.0, 0.0);
float alpha = 1.0; // Only available to subpasses
vec4 half_res_color = vec4(1.0);
vec4 quarter_res_color = vec4(1.0);
vec4 custom_fog = vec4(0.0);
#ifdef USE_CUBEMAP_PASS
#ifdef USES_HALF_RES_COLOR
half_res_color = texture(samplerCube(half_res, SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP), cube_normal) / params.luminance_multiplier;
#endif
#ifdef USES_QUARTER_RES_COLOR
quarter_res_color = texture(samplerCube(quarter_res, SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP), cube_normal) / params.luminance_multiplier;
#endif
#else
#ifdef USES_HALF_RES_COLOR
#ifdef USE_MULTIVIEW
half_res_color = textureLod(sampler2DArray(half_res, SAMPLER_LINEAR_CLAMP), vec3(uv, ViewIndex), 0.0) / params.luminance_multiplier;
#else
half_res_color = textureLod(sampler2D(half_res, SAMPLER_LINEAR_CLAMP), uv, 0.0) / params.luminance_multiplier;
#endif // USE_MULTIVIEW
#endif // USES_HALF_RES_COLOR
#ifdef USES_QUARTER_RES_COLOR
#ifdef USE_MULTIVIEW
quarter_res_color = textureLod(sampler2DArray(quarter_res, SAMPLER_LINEAR_CLAMP), vec3(uv, ViewIndex), 0.0) / params.luminance_multiplier;
#else
quarter_res_color = textureLod(sampler2D(quarter_res, SAMPLER_LINEAR_CLAMP), uv, 0.0) / params.luminance_multiplier;
#endif // USE_MULTIVIEW
#endif // USES_QUARTER_RES_COLOR
#endif //USE_CUBEMAP_PASS
{
#CODE : SKY
}
frag_color.rgb = color;
frag_color.a = alpha;
// Apply environment 'brightness' setting separately before fog to ensure consistent luminance.
frag_color.rgb = frag_color.rgb * params.brightness_multiplier;
#if !defined(DISABLE_FOG) && !defined(USE_CUBEMAP_PASS)
// Draw "fixed" fog before volumetric fog to ensure volumetric fog can appear in front of the sky.
if (sky_scene_data.fog_enabled) {
vec4 fog = fog_process(cube_normal, frag_color.rgb);
frag_color.rgb = mix(frag_color.rgb, fog.rgb, fog.a * sky_scene_data.fog_sky_affect);
}
if (sky_scene_data.volumetric_fog_enabled) {
vec4 fog = volumetric_fog_process(uv);
frag_color.rgb = mix(frag_color.rgb, fog.rgb, fog.a * sky_scene_data.volumetric_fog_sky_affect);
}
if (custom_fog.a > 0.0) {
frag_color.rgb = mix(frag_color.rgb, custom_fog.rgb, custom_fog.a);
}
#endif // DISABLE_FOG
// For mobile renderer we're multiplying by 0.5 as we're using a UNORM buffer.
// For both mobile and clustered, we also bake in the exposure value for the environment and camera.
frag_color.rgb = frag_color.rgb * params.luminance_multiplier;
// Blending is disabled for Sky, so alpha doesn't blend.
// Alpha is used for subsurface scattering so make sure it doesn't get applied to Sky.
if (!AT_CUBEMAP_PASS && !AT_HALF_RES_PASS && !AT_QUARTER_RES_PASS) {
frag_color.a = 0.0;
}
#ifdef USE_DEBANDING
frag_color.rgb += interleaved_gradient_noise(gl_FragCoord.xy) * params.luminance_multiplier;
#endif
}

View File

@@ -0,0 +1,303 @@
#[compute]
#version 450
#VERSION_DEFINES
#ifdef USE_VULKAN_MEMORY_MODEL
#pragma use_vulkan_memory_model
#endif
layout(local_size_x = 4, local_size_y = 4, local_size_z = 4) in;
#define DENSITY_SCALE 1024.0
#include "../cluster_data_inc.glsl"
#include "../light_data_inc.glsl"
#define M_PI 3.14159265359
#include "../samplers_inc.glsl"
layout(set = 0, binding = 2, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
layout(push_constant, std430) uniform Params {
vec3 position;
float pad;
vec3 size;
float pad2;
ivec3 corner;
uint shape;
mat4 transform;
}
params;
#ifdef NO_IMAGE_ATOMICS
layout(set = 1, binding = 1) volatile buffer emissive_only_map_buffer {
uint emissive_only_map[];
};
#else
layout(r32ui, set = 1, binding = 1) uniform volatile uimage3D emissive_only_map;
#endif
layout(set = 1, binding = 2, std140) uniform SceneParams {
vec2 fog_frustum_size_begin;
vec2 fog_frustum_size_end;
float fog_frustum_end;
float z_near; //
float z_far; //
float time;
ivec3 fog_volume_size;
uint directional_light_count; //
bool use_temporal_reprojection;
uint temporal_frame;
float detail_spread;
float temporal_blend;
mat4 to_prev_view;
mat4 transform;
}
scene_params;
#ifdef NO_IMAGE_ATOMICS
layout(set = 1, binding = 3) volatile buffer density_only_map_buffer {
uint density_only_map[];
};
layout(set = 1, binding = 4) volatile buffer light_only_map_buffer {
uint light_only_map[];
};
#else
layout(r32ui, set = 1, binding = 3) uniform volatile uimage3D density_only_map;
layout(r32ui, set = 1, binding = 4) uniform volatile uimage3D light_only_map;
#endif
#ifdef MATERIAL_UNIFORMS_USED
/* clang-format off */
layout(set = 2, binding = 0, std140) uniform MaterialUniforms {
#MATERIAL_UNIFORMS
} material;
/* clang-format on */
#endif
#GLOBALS
float get_depth_at_pos(float cell_depth_size, int z) {
float d = float(z) * cell_depth_size + cell_depth_size * 0.5; //center of voxels
d = pow(d, scene_params.detail_spread);
return scene_params.fog_frustum_end * d;
}
#define TEMPORAL_FRAMES 16
const vec3 halton_map[TEMPORAL_FRAMES] = vec3[](
vec3(0.5, 0.33333333, 0.2),
vec3(0.25, 0.66666667, 0.4),
vec3(0.75, 0.11111111, 0.6),
vec3(0.125, 0.44444444, 0.8),
vec3(0.625, 0.77777778, 0.04),
vec3(0.375, 0.22222222, 0.24),
vec3(0.875, 0.55555556, 0.44),
vec3(0.0625, 0.88888889, 0.64),
vec3(0.5625, 0.03703704, 0.84),
vec3(0.3125, 0.37037037, 0.08),
vec3(0.8125, 0.7037037, 0.28),
vec3(0.1875, 0.14814815, 0.48),
vec3(0.6875, 0.48148148, 0.68),
vec3(0.4375, 0.81481481, 0.88),
vec3(0.9375, 0.25925926, 0.12),
vec3(0.03125, 0.59259259, 0.32));
void main() {
vec3 fog_cell_size = 1.0 / vec3(scene_params.fog_volume_size);
ivec3 pos = ivec3(gl_GlobalInvocationID.xyz) + params.corner;
if (any(greaterThanEqual(pos, scene_params.fog_volume_size))) {
return; //do not compute
}
#ifdef NO_IMAGE_ATOMICS
uint lpos = pos.z * scene_params.fog_volume_size.x * scene_params.fog_volume_size.y + pos.y * scene_params.fog_volume_size.x + pos.x;
#endif
vec3 posf = vec3(pos);
vec3 fog_unit_pos = posf * fog_cell_size + fog_cell_size * 0.5; //center of voxels
fog_unit_pos.z = pow(fog_unit_pos.z, scene_params.detail_spread);
vec3 view_pos;
view_pos.xy = (fog_unit_pos.xy * 2.0 - 1.0) * mix(scene_params.fog_frustum_size_begin, scene_params.fog_frustum_size_end, vec2(fog_unit_pos.z));
view_pos.z = -scene_params.fog_frustum_end * fog_unit_pos.z;
view_pos.y = -view_pos.y;
if (scene_params.use_temporal_reprojection) {
vec3 prev_view = (scene_params.to_prev_view * vec4(view_pos, 1.0)).xyz;
//undo transform into prev view
prev_view.y = -prev_view.y;
//z back to unit size
prev_view.z /= -scene_params.fog_frustum_end;
//xy back to unit size
prev_view.xy /= mix(scene_params.fog_frustum_size_begin, scene_params.fog_frustum_size_end, vec2(prev_view.z));
prev_view.xy = prev_view.xy * 0.5 + 0.5;
//z back to unspread value
prev_view.z = pow(prev_view.z, 1.0 / scene_params.detail_spread);
if (all(greaterThan(prev_view, vec3(0.0))) && all(lessThan(prev_view, vec3(1.0)))) {
//reprojectinon fits
// Since we can reproject, now we must jitter the current view pos.
// This is done here because cells that can't reproject should not jitter.
fog_unit_pos = posf * fog_cell_size + fog_cell_size * halton_map[scene_params.temporal_frame]; //center of voxels, offset by halton table
fog_unit_pos.z = pow(fog_unit_pos.z, scene_params.detail_spread);
view_pos.xy = (fog_unit_pos.xy * 2.0 - 1.0) * mix(scene_params.fog_frustum_size_begin, scene_params.fog_frustum_size_end, vec2(fog_unit_pos.z));
view_pos.z = -scene_params.fog_frustum_end * fog_unit_pos.z;
view_pos.y = -view_pos.y;
}
}
float density = 0.0;
vec3 emission = vec3(0.0);
vec3 albedo = vec3(0.0);
float cell_depth_size = abs(view_pos.z - get_depth_at_pos(fog_cell_size.z, pos.z + 1));
vec4 world = scene_params.transform * vec4(view_pos, 1.0);
world.xyz /= world.w;
vec3 uvw = fog_unit_pos;
vec4 local_pos = params.transform * world;
local_pos.xyz /= local_pos.w;
vec3 half_size = params.size / 2.0;
float sdf = -1.0;
if (params.shape == 0) {
// Ellipsoid
// https://www.shadertoy.com/view/tdS3DG
float k0 = length(local_pos.xyz / half_size);
float k1 = length(local_pos.xyz / (half_size * half_size));
sdf = k0 * (k0 - 1.0) / k1;
} else if (params.shape == 1) {
// Cone
// https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
// Compute the cone angle automatically to fit within the volume's size.
float inv_height = 1.0 / max(0.001, half_size.y);
float radius = 1.0 / max(0.001, (min(half_size.x, half_size.z) * 0.5));
float hypotenuse = sqrt(radius * radius + inv_height * inv_height);
float rsin = radius / hypotenuse;
float rcos = inv_height / hypotenuse;
vec2 c = vec2(rsin, rcos);
float q = length(local_pos.xz);
sdf = max(dot(c, vec2(q, local_pos.y - half_size.y)), -half_size.y - local_pos.y);
} else if (params.shape == 2) {
// Cylinder
// https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
vec2 d = abs(vec2(length(local_pos.xz), local_pos.y)) - vec2(min(half_size.x, half_size.z), half_size.y);
sdf = min(max(d.x, d.y), 0.0) + length(max(d, 0.0));
} else if (params.shape == 3) {
// Box
// https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
vec3 q = abs(local_pos.xyz) - half_size;
sdf = length(max(q, 0.0)) + min(max(q.x, max(q.y, q.z)), 0.0);
}
float cull_mask = 1.0; //used to cull cells that do not contribute
if (params.shape <= 3) {
#ifndef SDF_USED
cull_mask = 1.0 - smoothstep(-0.1, 0.0, sdf);
#endif
uvw = clamp((local_pos.xyz + half_size) / params.size, 0.0, 1.0);
}
if (cull_mask > 0.0) {
{
#CODE : FOG
}
#ifdef DENSITY_USED
density *= cull_mask;
if (abs(density) > 0.001) {
int final_density = int(density * DENSITY_SCALE);
#ifdef NO_IMAGE_ATOMICS
atomicAdd(density_only_map[lpos], uint(final_density));
#else
imageAtomicAdd(density_only_map, pos, uint(final_density));
#endif
#ifdef EMISSION_USED
{
emission *= clamp(density, 0.0, 1.0);
emission = clamp(emission, vec3(0.0), vec3(4.0));
// Scale to fit into R11G11B10 with a range of 0-4
uvec3 emission_u = uvec3(emission.r * 511.0, emission.g * 511.0, emission.b * 255.0);
// R and G have 11 bits each and B has 10. Then pack them into a 32 bit uint
uint final_emission = emission_u.r << 21 | emission_u.g << 10 | emission_u.b;
#ifdef NO_IMAGE_ATOMICS
uint prev_emission = atomicAdd(emissive_only_map[lpos], final_emission);
#else
uint prev_emission = imageAtomicAdd(emissive_only_map, pos, final_emission);
#endif
// Adding can lead to colors overflowing, so validate
uvec3 prev_emission_u = uvec3(prev_emission >> 21, (prev_emission << 11) >> 21, prev_emission % 1024);
uint add_emission = final_emission + prev_emission;
uvec3 add_emission_u = uvec3(add_emission >> 21, (add_emission << 11) >> 21, add_emission % 1024);
bvec3 overflowing = lessThan(add_emission_u, prev_emission_u + emission_u);
if (any(overflowing)) {
uvec3 overflow_factor = mix(uvec3(0), uvec3(2047 << 21, 2047 << 10, 1023), overflowing);
uint force_max = overflow_factor.r | overflow_factor.g | overflow_factor.b;
#ifdef NO_IMAGE_ATOMICS
atomicOr(emissive_only_map[lpos], force_max);
#else
imageAtomicOr(emissive_only_map, pos, force_max);
#endif
}
}
#endif
#ifdef ALBEDO_USED
{
vec3 scattering = albedo * clamp(density, 0.0, 1.0);
scattering = clamp(scattering, vec3(0.0), vec3(1.0));
uvec3 scattering_u = uvec3(scattering.r * 2047.0, scattering.g * 2047.0, scattering.b * 1023.0);
// R and G have 11 bits each and B has 10. Then pack them into a 32 bit uint
uint final_scattering = scattering_u.r << 21 | scattering_u.g << 10 | scattering_u.b;
#ifdef NO_IMAGE_ATOMICS
uint prev_scattering = atomicAdd(light_only_map[lpos], final_scattering);
#else
uint prev_scattering = imageAtomicAdd(light_only_map, pos, final_scattering);
#endif
// Adding can lead to colors overflowing, so validate
uvec3 prev_scattering_u = uvec3(prev_scattering >> 21, (prev_scattering << 11) >> 21, prev_scattering % 1024);
uint add_scattering = final_scattering + prev_scattering;
uvec3 add_scattering_u = uvec3(add_scattering >> 21, (add_scattering << 11) >> 21, add_scattering % 1024);
bvec3 overflowing = lessThan(add_scattering_u, prev_scattering_u + scattering_u);
if (any(overflowing)) {
uvec3 overflow_factor = mix(uvec3(0), uvec3(2047 << 21, 2047 << 10, 1023), overflowing);
uint force_max = overflow_factor.r | overflow_factor.g | overflow_factor.b;
#ifdef NO_IMAGE_ATOMICS
atomicOr(light_only_map[lpos], force_max);
#else
imageAtomicOr(light_only_map, pos, force_max);
#endif
}
}
#endif // ALBEDO_USED
}
#endif // DENSITY_USED
}
}

View File

@@ -0,0 +1,753 @@
#[compute]
#version 450
#VERSION_DEFINES
#ifdef USE_VULKAN_MEMORY_MODEL
#pragma use_vulkan_memory_model
#endif
#ifdef MODE_DENSITY
layout(local_size_x = 4, local_size_y = 4, local_size_z = 4) in;
#else
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#endif
#include "../cluster_data_inc.glsl"
#include "../light_data_inc.glsl"
#define M_PI 3.14159265359
#define DENSITY_SCALE 1024.0
layout(set = 0, binding = 1) uniform texture2D shadow_atlas;
layout(set = 0, binding = 2) uniform texture2D directional_shadow_atlas;
layout(set = 0, binding = 3, std430) restrict readonly buffer OmniLights {
LightData data[];
}
omni_lights;
layout(set = 0, binding = 4, std430) restrict readonly buffer SpotLights {
LightData data[];
}
spot_lights;
layout(set = 0, binding = 5, std140) uniform DirectionalLights {
DirectionalLightData data[MAX_DIRECTIONAL_LIGHT_DATA_STRUCTS];
}
directional_lights;
layout(set = 0, binding = 6, std430) buffer restrict readonly ClusterBuffer {
uint data[];
}
cluster_buffer;
layout(set = 0, binding = 7) uniform sampler linear_sampler;
#ifdef MODE_DENSITY
layout(rgba16f, set = 0, binding = 8) uniform restrict writeonly image3D density_map;
#endif
#ifdef MODE_FOG
layout(rgba16f, set = 0, binding = 8) uniform restrict readonly image3D density_map;
layout(rgba16f, set = 0, binding = 9) uniform restrict writeonly image3D fog_map;
#endif
#ifdef MODE_COPY
layout(rgba16f, set = 0, binding = 8) uniform restrict readonly image3D source_map;
layout(rgba16f, set = 0, binding = 9) uniform restrict writeonly image3D dest_map;
#endif
#ifdef MODE_FILTER
layout(rgba16f, set = 0, binding = 8) uniform restrict readonly image3D source_map;
layout(rgba16f, set = 0, binding = 9) uniform restrict writeonly image3D dest_map;
#endif
layout(set = 0, binding = 10) uniform sampler shadow_sampler;
#define MAX_VOXEL_GI_INSTANCES 8
struct VoxelGIData {
mat4 xform; // 64 - 64
vec3 bounds; // 12 - 76
float dynamic_range; // 4 - 80
float bias; // 4 - 84
float normal_bias; // 4 - 88
bool blend_ambient; // 4 - 92
uint mipmaps; // 4 - 96
vec3 pad; // 12 - 108
float exposure_normalization; // 4 - 112
};
layout(set = 0, binding = 11, std140) uniform VoxelGIs {
VoxelGIData data[MAX_VOXEL_GI_INSTANCES];
}
voxel_gi_instances;
layout(set = 0, binding = 12) uniform texture3D voxel_gi_textures[MAX_VOXEL_GI_INSTANCES];
layout(set = 0, binding = 13) uniform sampler linear_sampler_with_mipmaps;
#ifdef ENABLE_SDFGI
// SDFGI Integration on set 1
#define SDFGI_MAX_CASCADES 8
struct SDFVoxelGICascadeData {
vec3 position;
float to_probe;
ivec3 probe_world_offset;
float to_cell; // 1/bounds * grid_size
vec3 pad;
float exposure_normalization;
};
layout(set = 1, binding = 0, std140) uniform SDFGI {
vec3 grid_size;
uint max_cascades;
bool use_occlusion;
int probe_axis_size;
float probe_to_uvw;
float normal_bias;
vec3 lightprobe_tex_pixel_size;
float energy;
vec3 lightprobe_uv_offset;
float y_mult;
vec3 occlusion_clamp;
uint pad3;
vec3 occlusion_renormalize;
uint pad4;
vec3 cascade_probe_size;
uint pad5;
SDFVoxelGICascadeData cascades[SDFGI_MAX_CASCADES];
}
sdfgi;
layout(set = 1, binding = 1) uniform texture2DArray sdfgi_ambient_texture;
layout(set = 1, binding = 2) uniform texture3D sdfgi_occlusion_texture;
#endif //SDFGI
layout(set = 0, binding = 14, std140) uniform Params {
vec2 fog_frustum_size_begin;
vec2 fog_frustum_size_end;
float fog_frustum_end;
float ambient_inject;
float z_far;
int filter_axis;
vec3 ambient_color;
float sky_contribution;
ivec3 fog_volume_size;
uint directional_light_count;
vec3 base_emission;
float base_density;
vec3 base_scattering;
float phase_g;
float detail_spread;
float gi_inject;
uint max_voxel_gi_instances;
uint cluster_type_size;
vec2 screen_size;
uint cluster_shift;
uint cluster_width;
uint max_cluster_element_count_div_32;
bool use_temporal_reprojection;
uint temporal_frame;
float temporal_blend;
mat3x4 cam_rotation;
mat4 to_prev_view;
mat3 radiance_inverse_xform;
}
params;
#ifndef MODE_COPY
layout(set = 0, binding = 15) uniform texture3D prev_density_texture;
#ifdef NO_IMAGE_ATOMICS
layout(set = 0, binding = 16) buffer density_only_map_buffer {
uint density_only_map[];
};
layout(set = 0, binding = 17) buffer light_only_map_buffer {
uint light_only_map[];
};
layout(set = 0, binding = 18) buffer emissive_only_map_buffer {
uint emissive_only_map[];
};
#else
layout(r32ui, set = 0, binding = 16) uniform uimage3D density_only_map;
layout(r32ui, set = 0, binding = 17) uniform uimage3D light_only_map;
layout(r32ui, set = 0, binding = 18) uniform uimage3D emissive_only_map;
#endif
#ifdef USE_RADIANCE_CUBEMAP_ARRAY
layout(set = 0, binding = 19) uniform textureCubeArray sky_texture;
#else
layout(set = 0, binding = 19) uniform textureCube sky_texture;
#endif
#endif // MODE_COPY
float get_depth_at_pos(float cell_depth_size, int z) {
float d = float(z) * cell_depth_size + cell_depth_size * 0.5; //center of voxels
d = pow(d, params.detail_spread);
return params.fog_frustum_end * d;
}
vec3 hash3f(uvec3 x) {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return vec3(x & 0xFFFFF) / vec3(float(0xFFFFF));
}
float get_omni_attenuation(float dist, float inv_range, float decay) {
float nd = dist * inv_range;
nd *= nd;
nd *= nd; // nd^4
nd = max(1.0 - nd, 0.0);
nd *= nd; // nd^2
return nd * pow(max(dist, 0.0001), -decay);
}
void cluster_get_item_range(uint p_offset, out uint item_min, out uint item_max, out uint item_from, out uint item_to) {
uint item_min_max = cluster_buffer.data[p_offset];
item_min = item_min_max & 0xFFFF;
item_max = item_min_max >> 16;
item_from = item_min >> 5;
item_to = (item_max == 0) ? 0 : ((item_max - 1) >> 5) + 1; //side effect of how it is stored, as item_max 0 means no elements
}
uint cluster_get_range_clip_mask(uint i, uint z_min, uint z_max) {
int local_min = clamp(int(z_min) - int(i) * 32, 0, 31);
int mask_width = min(int(z_max) - int(z_min), 32 - local_min);
return bitfieldInsert(uint(0), uint(0xFFFFFFFF), local_min, mask_width);
}
float henyey_greenstein(float cos_theta, float g) {
const float k = 0.0795774715459; // 1 / (4 * PI)
return k * (1.0 - g * g) / (pow(1.0 + g * g - 2.0 * g * cos_theta, 1.5));
}
#define TEMPORAL_FRAMES 16
const vec3 halton_map[TEMPORAL_FRAMES] = vec3[](
vec3(0.5, 0.33333333, 0.2),
vec3(0.25, 0.66666667, 0.4),
vec3(0.75, 0.11111111, 0.6),
vec3(0.125, 0.44444444, 0.8),
vec3(0.625, 0.77777778, 0.04),
vec3(0.375, 0.22222222, 0.24),
vec3(0.875, 0.55555556, 0.44),
vec3(0.0625, 0.88888889, 0.64),
vec3(0.5625, 0.03703704, 0.84),
vec3(0.3125, 0.37037037, 0.08),
vec3(0.8125, 0.7037037, 0.28),
vec3(0.1875, 0.14814815, 0.48),
vec3(0.6875, 0.48148148, 0.68),
vec3(0.4375, 0.81481481, 0.88),
vec3(0.9375, 0.25925926, 0.12),
vec3(0.03125, 0.59259259, 0.32));
// Higher values will make light in volumetric fog fade out sooner when it's occluded by shadow.
const float INV_FOG_FADE = 10.0;
void main() {
vec3 fog_cell_size = 1.0 / vec3(params.fog_volume_size);
#ifdef MODE_DENSITY
ivec3 pos = ivec3(gl_GlobalInvocationID.xyz);
if (any(greaterThanEqual(pos, params.fog_volume_size))) {
return; //do not compute
}
#ifdef NO_IMAGE_ATOMICS
uint lpos = pos.z * params.fog_volume_size.x * params.fog_volume_size.y + pos.y * params.fog_volume_size.x + pos.x;
#endif
vec3 posf = vec3(pos);
//posf += mix(vec3(0.0),vec3(1.0),0.3) * hash3f(uvec3(pos)) * 2.0 - 1.0;
vec3 fog_unit_pos = posf * fog_cell_size + fog_cell_size * 0.5; //center of voxels
uvec2 screen_pos = uvec2(fog_unit_pos.xy * params.screen_size);
uvec2 cluster_pos = screen_pos >> params.cluster_shift;
uint cluster_offset = (params.cluster_width * cluster_pos.y + cluster_pos.x) * (params.max_cluster_element_count_div_32 + 32);
//positions in screen are too spread apart, no hopes for optimizing with subgroups
fog_unit_pos.z = pow(fog_unit_pos.z, params.detail_spread);
vec3 view_pos;
view_pos.xy = (fog_unit_pos.xy * 2.0 - 1.0) * mix(params.fog_frustum_size_begin, params.fog_frustum_size_end, vec2(fog_unit_pos.z));
view_pos.z = -params.fog_frustum_end * fog_unit_pos.z;
view_pos.y = -view_pos.y;
vec4 reprojected_density = vec4(0.0);
float reproject_amount = 0.0;
if (params.use_temporal_reprojection) {
vec3 prev_view = (params.to_prev_view * vec4(view_pos, 1.0)).xyz;
//undo transform into prev view
prev_view.y = -prev_view.y;
//z back to unit size
prev_view.z /= -params.fog_frustum_end;
//xy back to unit size
prev_view.xy /= mix(params.fog_frustum_size_begin, params.fog_frustum_size_end, vec2(prev_view.z));
prev_view.xy = prev_view.xy * 0.5 + 0.5;
//z back to unspread value
prev_view.z = pow(prev_view.z, 1.0 / params.detail_spread);
if (all(greaterThan(prev_view, vec3(0.0))) && all(lessThan(prev_view, vec3(1.0)))) {
//reprojectinon fits
reprojected_density = textureLod(sampler3D(prev_density_texture, linear_sampler), prev_view, 0.0);
reproject_amount = params.temporal_blend;
// Since we can reproject, now we must jitter the current view pos.
// This is done here because cells that can't reproject should not jitter.
fog_unit_pos = posf * fog_cell_size + fog_cell_size * halton_map[params.temporal_frame]; //center of voxels, offset by halton table
screen_pos = uvec2(fog_unit_pos.xy * params.screen_size);
cluster_pos = screen_pos >> params.cluster_shift;
cluster_offset = (params.cluster_width * cluster_pos.y + cluster_pos.x) * (params.max_cluster_element_count_div_32 + 32);
//positions in screen are too spread apart, no hopes for optimizing with subgroups
fog_unit_pos.z = pow(fog_unit_pos.z, params.detail_spread);
view_pos.xy = (fog_unit_pos.xy * 2.0 - 1.0) * mix(params.fog_frustum_size_begin, params.fog_frustum_size_end, vec2(fog_unit_pos.z));
view_pos.z = -params.fog_frustum_end * fog_unit_pos.z;
view_pos.y = -view_pos.y;
}
}
uint cluster_z = uint(clamp((abs(view_pos.z) / params.z_far) * 32.0, 0.0, 31.0));
vec3 total_light = vec3(0.0);
float total_density = params.base_density;
#ifdef NO_IMAGE_ATOMICS
uint local_density = density_only_map[lpos];
#else
uint local_density = imageLoad(density_only_map, pos).x;
#endif
total_density += float(int(local_density)) / DENSITY_SCALE;
total_density = max(0.0, total_density);
#ifdef NO_IMAGE_ATOMICS
uint scattering_u = light_only_map[lpos];
#else
uint scattering_u = imageLoad(light_only_map, pos).x;
#endif
vec3 scattering = vec3(scattering_u >> 21, (scattering_u << 11) >> 21, scattering_u % 1024) / vec3(2047.0, 2047.0, 1023.0);
scattering += params.base_scattering * params.base_density;
#ifdef NO_IMAGE_ATOMICS
uint emission_u = emissive_only_map[lpos];
#else
uint emission_u = imageLoad(emissive_only_map, pos).x;
#endif
vec3 emission = vec3(emission_u >> 21, (emission_u << 11) >> 21, emission_u % 1024) / vec3(511.0, 511.0, 255.0);
emission += params.base_emission * params.base_density;
float cell_depth_size = abs(view_pos.z - get_depth_at_pos(fog_cell_size.z, pos.z + 1));
//compute directional lights
if (total_density > 0.00005) {
for (uint i = 0; i < params.directional_light_count; i++) {
if (directional_lights.data[i].volumetric_fog_energy > 0.001) {
vec3 shadow_attenuation = vec3(1.0);
if (directional_lights.data[i].shadow_opacity > 0.001) {
float depth_z = -view_pos.z;
vec4 pssm_coord;
vec3 light_dir = directional_lights.data[i].direction;
vec4 v = vec4(view_pos, 1.0);
float z_range;
if (depth_z < directional_lights.data[i].shadow_split_offsets.x) {
pssm_coord = (directional_lights.data[i].shadow_matrix1 * v);
pssm_coord /= pssm_coord.w;
z_range = directional_lights.data[i].shadow_z_range.x;
} else if (depth_z < directional_lights.data[i].shadow_split_offsets.y) {
pssm_coord = (directional_lights.data[i].shadow_matrix2 * v);
pssm_coord /= pssm_coord.w;
z_range = directional_lights.data[i].shadow_z_range.y;
} else if (depth_z < directional_lights.data[i].shadow_split_offsets.z) {
pssm_coord = (directional_lights.data[i].shadow_matrix3 * v);
pssm_coord /= pssm_coord.w;
z_range = directional_lights.data[i].shadow_z_range.z;
} else {
pssm_coord = (directional_lights.data[i].shadow_matrix4 * v);
pssm_coord /= pssm_coord.w;
z_range = directional_lights.data[i].shadow_z_range.w;
}
float depth = texture(sampler2D(directional_shadow_atlas, linear_sampler), pssm_coord.xy).r;
float shadow = exp(min(0.0, (pssm_coord.z - depth)) * z_range * INV_FOG_FADE);
shadow = mix(shadow, 1.0, smoothstep(directional_lights.data[i].fade_from, directional_lights.data[i].fade_to, view_pos.z)); //done with negative values for performance
shadow_attenuation = mix(vec3(1.0 - directional_lights.data[i].shadow_opacity), vec3(1.0), shadow);
}
total_light += shadow_attenuation * directional_lights.data[i].color * directional_lights.data[i].energy * henyey_greenstein(dot(normalize(view_pos), normalize(directional_lights.data[i].direction)), params.phase_g) * directional_lights.data[i].volumetric_fog_energy;
}
}
// Compute light from sky
if (params.ambient_inject > 0.0) {
vec3 isotropic = vec3(0.0);
vec3 anisotropic = vec3(0.0);
if (params.sky_contribution > 0.0) {
float mip_bias = 2.0 + total_density * (MAX_SKY_LOD - 2.0); // Not physically based, but looks nice
vec3 scatter_direction = (params.radiance_inverse_xform * normalize(view_pos)) * sign(params.phase_g);
#ifdef USE_RADIANCE_CUBEMAP_ARRAY
isotropic = texture(samplerCubeArray(sky_texture, linear_sampler_with_mipmaps), vec4(0.0, 1.0, 0.0, mip_bias)).rgb;
anisotropic = texture(samplerCubeArray(sky_texture, linear_sampler_with_mipmaps), vec4(scatter_direction, mip_bias)).rgb;
#else
isotropic = textureLod(samplerCube(sky_texture, linear_sampler_with_mipmaps), vec3(0.0, 1.0, 0.0), mip_bias).rgb;
anisotropic = textureLod(samplerCube(sky_texture, linear_sampler_with_mipmaps), vec3(scatter_direction), mip_bias).rgb;
#endif //USE_RADIANCE_CUBEMAP_ARRAY
}
total_light += mix(params.ambient_color, mix(isotropic, anisotropic, abs(params.phase_g)), params.sky_contribution) * params.ambient_inject;
}
//compute lights from cluster
{ //omni lights
uint cluster_omni_offset = cluster_offset;
uint item_min;
uint item_max;
uint item_from;
uint item_to;
cluster_get_item_range(cluster_omni_offset + params.max_cluster_element_count_div_32 + cluster_z, item_min, item_max, item_from, item_to);
for (uint i = item_from; i < item_to; i++) {
uint mask = cluster_buffer.data[cluster_omni_offset + i];
mask &= cluster_get_range_clip_mask(i, item_min, item_max);
uint merged_mask = mask;
while (merged_mask != 0) {
uint bit = findMSB(merged_mask);
merged_mask &= ~(1 << bit);
uint light_index = 32 * i + bit;
//if (!bool(omni_omni_lights.data[light_index].mask & draw_call.layer_mask)) {
// continue; //not masked
//}
vec3 light_pos = omni_lights.data[light_index].position;
float d = distance(omni_lights.data[light_index].position, view_pos);
float shadow_attenuation = 1.0;
if (omni_lights.data[light_index].volumetric_fog_energy > 0.001 && d * omni_lights.data[light_index].inv_radius < 1.0) {
float attenuation = get_omni_attenuation(d, omni_lights.data[light_index].inv_radius, omni_lights.data[light_index].attenuation);
vec3 light = omni_lights.data[light_index].color;
if (omni_lights.data[light_index].shadow_opacity > 0.001) {
//has shadow
vec4 uv_rect = omni_lights.data[light_index].atlas_rect;
vec2 flip_offset = omni_lights.data[light_index].direction.xy;
vec3 local_vert = (omni_lights.data[light_index].shadow_matrix * vec4(view_pos, 1.0)).xyz;
float shadow_len = length(local_vert); //need to remember shadow len from here
vec3 shadow_sample = normalize(local_vert);
if (shadow_sample.z >= 0.0) {
uv_rect.xy += flip_offset;
}
shadow_sample.z = 1.0 + abs(shadow_sample.z);
vec3 pos = vec3(shadow_sample.xy / shadow_sample.z, shadow_len - omni_lights.data[light_index].shadow_bias);
pos.z *= omni_lights.data[light_index].inv_radius;
pos.z = 1.0 - pos.z;
pos.xy = pos.xy * 0.5 + 0.5;
pos.xy = uv_rect.xy + pos.xy * uv_rect.zw;
float depth = texture(sampler2D(shadow_atlas, linear_sampler), pos.xy).r;
shadow_attenuation = mix(1.0 - omni_lights.data[light_index].shadow_opacity, 1.0, exp(min(0.0, (pos.z - depth)) / omni_lights.data[light_index].inv_radius * INV_FOG_FADE));
}
total_light += light * attenuation * shadow_attenuation * henyey_greenstein(dot(normalize(light_pos - view_pos), normalize(view_pos)), params.phase_g) * omni_lights.data[light_index].volumetric_fog_energy;
}
}
}
}
{ //spot lights
uint cluster_spot_offset = cluster_offset + params.cluster_type_size;
uint item_min;
uint item_max;
uint item_from;
uint item_to;
cluster_get_item_range(cluster_spot_offset + params.max_cluster_element_count_div_32 + cluster_z, item_min, item_max, item_from, item_to);
for (uint i = item_from; i < item_to; i++) {
uint mask = cluster_buffer.data[cluster_spot_offset + i];
mask &= cluster_get_range_clip_mask(i, item_min, item_max);
uint merged_mask = mask;
while (merged_mask != 0) {
uint bit = findMSB(merged_mask);
merged_mask &= ~(1 << bit);
//if (!bool(omni_lights.data[light_index].mask & draw_call.layer_mask)) {
// continue; //not masked
//}
uint light_index = 32 * i + bit;
vec3 light_pos = spot_lights.data[light_index].position;
vec3 light_rel_vec = spot_lights.data[light_index].position - view_pos;
float d = length(light_rel_vec);
float shadow_attenuation = 1.0;
if (spot_lights.data[light_index].volumetric_fog_energy > 0.001 && d * spot_lights.data[light_index].inv_radius < 1.0) {
float attenuation = get_omni_attenuation(d, spot_lights.data[light_index].inv_radius, spot_lights.data[light_index].attenuation);
vec3 spot_dir = spot_lights.data[light_index].direction;
float cone_angle = spot_lights.data[light_index].cone_angle;
float scos = max(dot(-normalize(light_rel_vec), spot_dir), cone_angle);
float spot_rim = max(0.0001, (1.0 - scos) / (1.0 - cone_angle));
attenuation *= 1.0 - pow(spot_rim, spot_lights.data[light_index].cone_attenuation);
vec3 light = spot_lights.data[light_index].color;
if (spot_lights.data[light_index].shadow_opacity > 0.001) {
//has shadow
vec4 uv_rect = spot_lights.data[light_index].atlas_rect;
vec4 v = vec4(view_pos, 1.0);
vec4 splane = (spot_lights.data[light_index].shadow_matrix * v);
splane.z -= spot_lights.data[light_index].shadow_bias / (d * spot_lights.data[light_index].inv_radius);
splane /= splane.w;
vec3 pos = vec3(splane.xy * spot_lights.data[light_index].atlas_rect.zw + spot_lights.data[light_index].atlas_rect.xy, splane.z);
float depth = texture(sampler2D(shadow_atlas, linear_sampler), pos.xy).r;
shadow_attenuation = mix(1.0 - spot_lights.data[light_index].shadow_opacity, 1.0, exp(min(0.0, (pos.z - depth)) / spot_lights.data[light_index].inv_radius * INV_FOG_FADE));
}
total_light += light * attenuation * shadow_attenuation * henyey_greenstein(dot(normalize(light_rel_vec), normalize(view_pos)), params.phase_g) * spot_lights.data[light_index].volumetric_fog_energy;
}
}
}
}
vec3 world_pos = mat3(params.cam_rotation) * view_pos;
for (uint i = 0; i < params.max_voxel_gi_instances; i++) {
vec3 position = (voxel_gi_instances.data[i].xform * vec4(world_pos, 1.0)).xyz;
//this causes corrupted pixels, i have no idea why..
if (all(bvec2(all(greaterThanEqual(position, vec3(0.0))), all(lessThan(position, voxel_gi_instances.data[i].bounds))))) {
position /= voxel_gi_instances.data[i].bounds;
vec4 light = vec4(0.0);
for (uint j = 0; j < voxel_gi_instances.data[i].mipmaps; j++) {
vec4 slight = textureLod(sampler3D(voxel_gi_textures[i], linear_sampler_with_mipmaps), position, float(j));
float a = (1.0 - light.a);
light += a * slight;
}
light.rgb *= voxel_gi_instances.data[i].dynamic_range * params.gi_inject * voxel_gi_instances.data[i].exposure_normalization;
total_light += light.rgb;
}
}
//sdfgi
#ifdef ENABLE_SDFGI
{
float blend = -1.0;
vec3 ambient_total = vec3(0.0);
for (uint i = 0; i < sdfgi.max_cascades; i++) {
vec3 cascade_pos = (world_pos - sdfgi.cascades[i].position) * sdfgi.cascades[i].to_probe;
if (any(lessThan(cascade_pos, vec3(0.0))) || any(greaterThanEqual(cascade_pos, sdfgi.cascade_probe_size))) {
continue; //skip cascade
}
vec3 base_pos = floor(cascade_pos);
ivec3 probe_base_pos = ivec3(base_pos);
vec4 ambient_accum = vec4(0.0);
ivec3 tex_pos = ivec3(probe_base_pos.xy, int(i));
tex_pos.x += probe_base_pos.z * sdfgi.probe_axis_size;
for (uint j = 0; j < 8; j++) {
ivec3 offset = (ivec3(j) >> ivec3(0, 1, 2)) & ivec3(1, 1, 1);
ivec3 probe_posi = probe_base_pos;
probe_posi += offset;
// Compute weight
vec3 probe_pos = vec3(probe_posi);
vec3 probe_to_pos = cascade_pos - probe_pos;
vec3 trilinear = vec3(1.0) - abs(probe_to_pos);
float weight = trilinear.x * trilinear.y * trilinear.z;
// Compute lightprobe occlusion
if (sdfgi.use_occlusion) {
ivec3 occ_indexv = abs((sdfgi.cascades[i].probe_world_offset + probe_posi) & ivec3(1, 1, 1)) * ivec3(1, 2, 4);
vec4 occ_mask = mix(vec4(0.0), vec4(1.0), equal(ivec4(occ_indexv.x | occ_indexv.y), ivec4(0, 1, 2, 3)));
vec3 occ_pos = clamp(cascade_pos, probe_pos - sdfgi.occlusion_clamp, probe_pos + sdfgi.occlusion_clamp) * sdfgi.probe_to_uvw;
occ_pos.z += float(i);
if (occ_indexv.z != 0) { //z bit is on, means index is >=4, so make it switch to the other half of textures
occ_pos.x += 1.0;
}
occ_pos *= sdfgi.occlusion_renormalize;
float occlusion = dot(textureLod(sampler3D(sdfgi_occlusion_texture, linear_sampler), occ_pos, 0.0), occ_mask);
weight *= max(occlusion, 0.01);
}
// Compute ambient texture position
ivec3 uvw = tex_pos;
uvw.xy += offset.xy;
uvw.x += offset.z * sdfgi.probe_axis_size;
vec3 ambient = texelFetch(sampler2DArray(sdfgi_ambient_texture, linear_sampler), uvw, 0).rgb;
ambient_accum.rgb += ambient * weight * sdfgi.cascades[i].exposure_normalization;
ambient_accum.a += weight;
}
if (ambient_accum.a > 0) {
ambient_accum.rgb /= ambient_accum.a;
}
ambient_total = ambient_accum.rgb;
break;
}
total_light += ambient_total * params.gi_inject;
}
#endif
}
vec4 final_density = vec4(total_light * scattering + emission, total_density);
final_density = mix(final_density, reprojected_density, reproject_amount);
imageStore(density_map, pos, final_density);
#ifdef NO_IMAGE_ATOMICS
density_only_map[lpos] = 0;
light_only_map[lpos] = 0;
emissive_only_map[lpos] = 0;
#else
imageStore(density_only_map, pos, uvec4(0));
imageStore(light_only_map, pos, uvec4(0));
imageStore(emissive_only_map, pos, uvec4(0));
#endif
#endif
#ifdef MODE_FOG
ivec3 pos = ivec3(gl_GlobalInvocationID.xy, 0);
if (any(greaterThanEqual(pos, params.fog_volume_size))) {
return; //do not compute
}
vec4 fog_accum = vec4(0.0, 0.0, 0.0, 1.0);
float prev_z = 0.0;
for (int i = 0; i < params.fog_volume_size.z; i++) {
//compute fog position
ivec3 fog_pos = pos + ivec3(0, 0, i);
//get fog value
vec4 fog = imageLoad(density_map, fog_pos);
//get depth at cell pos
float z = get_depth_at_pos(fog_cell_size.z, i);
//get distance from previous pos
float d = abs(prev_z - z);
//compute transmittance using beer's law
float transmittance = exp(-d * fog.a);
fog_accum.rgb += ((fog.rgb - fog.rgb * transmittance) / max(fog.a, 0.00001)) * fog_accum.a;
fog_accum.a *= transmittance;
prev_z = z;
imageStore(fog_map, fog_pos, vec4(fog_accum.rgb, 1.0 - fog_accum.a));
}
#endif
#ifdef MODE_FILTER
ivec3 pos = ivec3(gl_GlobalInvocationID.xyz);
const float gauss[7] = float[](0.071303, 0.131514, 0.189879, 0.214607, 0.189879, 0.131514, 0.071303);
const ivec3 filter_dir[3] = ivec3[](ivec3(1, 0, 0), ivec3(0, 1, 0), ivec3(0, 0, 1));
ivec3 offset = filter_dir[params.filter_axis];
vec4 accum = vec4(0.0);
for (int i = -3; i <= 3; i++) {
accum += imageLoad(source_map, clamp(pos + offset * i, ivec3(0), params.fog_volume_size - ivec3(1))) * gauss[i + 3];
}
imageStore(dest_map, pos, accum);
#endif
#ifdef MODE_COPY
ivec3 pos = ivec3(gl_GlobalInvocationID.xyz);
if (any(greaterThanEqual(pos, params.fog_volume_size))) {
return; //do not compute
}
imageStore(dest_map, pos, imageLoad(source_map, pos));
#endif
}

View File

@@ -0,0 +1,620 @@
#[compute]
#version 450
#VERSION_DEFINES
#ifdef MODE_DYNAMIC
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#else
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#endif
#ifndef MODE_DYNAMIC
#define NO_CHILDREN 0xFFFFFFFF
struct CellChildren {
uint children[8];
};
layout(set = 0, binding = 1, std430) buffer CellChildrenBuffer {
CellChildren data[];
}
cell_children;
struct CellData {
uint position; // xyz 10 bits
uint albedo; //rgb albedo
uint emission; //rgb normalized with e as multiplier
uint normal; //RGB normal encoded
};
layout(set = 0, binding = 2, std430) buffer CellDataBuffer {
CellData data[];
}
cell_data;
#endif // MODE DYNAMIC
#define LIGHT_TYPE_DIRECTIONAL 0
#define LIGHT_TYPE_OMNI 1
#define LIGHT_TYPE_SPOT 2
#if defined(MODE_COMPUTE_LIGHT) || defined(MODE_DYNAMIC_LIGHTING)
struct Light {
uint type;
float energy;
float radius;
float attenuation;
vec3 color;
float cos_spot_angle;
vec3 position;
float inv_spot_attenuation;
vec3 direction;
bool has_shadow;
};
layout(set = 0, binding = 3, std140) uniform Lights {
Light data[MAX_LIGHTS];
}
lights;
#endif // MODE COMPUTE LIGHT
#ifdef MODE_SECOND_BOUNCE
layout(set = 0, binding = 5) uniform texture3D color_texture;
#endif // MODE_SECOND_BOUNCE
#ifndef MODE_DYNAMIC
layout(push_constant, std430) uniform Params {
ivec3 limits;
uint stack_size;
float emission_scale;
float propagation;
float dynamic_range;
uint light_count;
uint cell_offset;
uint cell_count;
float aniso_strength;
float cell_size;
}
params;
layout(set = 0, binding = 4, std430) buffer Outputs {
vec4 data[];
}
outputs;
#endif // MODE DYNAMIC
layout(set = 0, binding = 9) uniform texture3D texture_sdf;
layout(set = 0, binding = 10) uniform sampler texture_sampler;
#ifdef MODE_WRITE_TEXTURE
layout(rgba8, set = 0, binding = 5) uniform restrict writeonly image3D color_tex;
#endif
#ifdef MODE_DYNAMIC
layout(push_constant, std430) uniform Params {
ivec3 limits;
uint light_count; //when not lighting
ivec3 x_dir;
float z_base;
ivec3 y_dir;
float z_sign;
ivec3 z_dir;
float pos_multiplier;
ivec2 rect_pos;
ivec2 rect_size;
ivec2 prev_rect_ofs;
ivec2 prev_rect_size;
bool flip_x;
bool flip_y;
float dynamic_range;
bool on_mipmap;
float propagation;
float cell_size;
float pad[2];
}
params;
#ifdef MODE_DYNAMIC_LIGHTING
layout(rgba8, set = 0, binding = 5) uniform restrict readonly image2D source_albedo;
layout(rgba8, set = 0, binding = 6) uniform restrict readonly image2D source_normal;
layout(rgba8, set = 0, binding = 7) uniform restrict readonly image2D source_orm;
//layout (set=0,binding=8) uniform texture2D source_depth;
layout(rgba16f, set = 0, binding = 11) uniform restrict image2D emission;
layout(r32f, set = 0, binding = 12) uniform restrict image2D depth;
#endif
#ifdef MODE_DYNAMIC_SHRINK
layout(rgba16f, set = 0, binding = 5) uniform restrict readonly image2D source_light;
layout(r32f, set = 0, binding = 6) uniform restrict readonly image2D source_depth;
#ifdef MODE_DYNAMIC_SHRINK_WRITE
layout(rgba16f, set = 0, binding = 7) uniform restrict writeonly image2D light;
layout(r32f, set = 0, binding = 8) uniform restrict writeonly image2D depth;
#endif // MODE_DYNAMIC_SHRINK_WRITE
#ifdef MODE_DYNAMIC_SHRINK_PLOT
layout(rgba8, set = 0, binding = 11) uniform restrict image3D color_texture;
#endif //MODE_DYNAMIC_SHRINK_PLOT
#endif // MODE_DYNAMIC_SHRINK
//layout (rgba8,set=0,binding=5) uniform restrict writeonly image3D color_tex;
#endif // MODE DYNAMIC
#if defined(MODE_COMPUTE_LIGHT) || defined(MODE_DYNAMIC_LIGHTING)
float raymarch(float distance, float distance_adv, vec3 from, vec3 direction) {
vec3 cell_size = 1.0 / vec3(params.limits);
float occlusion = 1.0;
while (distance > 0.5) { //use this to avoid precision errors
float advance = texture(sampler3D(texture_sdf, texture_sampler), from * cell_size).r * 255.0 - 1.0;
if (advance < 0.0) {
occlusion = 0.0;
break;
}
occlusion = min(advance, occlusion);
advance = max(distance_adv, advance - mod(advance, distance_adv)); //should always advance in multiples of distance_adv
from += direction * advance;
distance -= advance;
}
return occlusion; //max(0.0,distance);
}
float get_omni_attenuation(float distance, float inv_range, float decay) {
float nd = distance * inv_range;
nd *= nd;
nd *= nd; // nd^4
nd = max(1.0 - nd, 0.0);
nd *= nd; // nd^2
return nd * pow(max(distance, 0.0001), -decay);
}
bool compute_light_vector(uint light, vec3 pos, out float attenuation, out vec3 light_pos) {
if (lights.data[light].type == LIGHT_TYPE_DIRECTIONAL) {
light_pos = pos - lights.data[light].direction * length(vec3(params.limits));
attenuation = 1.0;
} else {
light_pos = lights.data[light].position;
float distance = length(pos - light_pos);
if (distance >= lights.data[light].radius) {
return false;
}
attenuation = get_omni_attenuation(
distance * params.cell_size,
1.0 / (lights.data[light].radius * params.cell_size),
lights.data[light].attenuation);
if (lights.data[light].type == LIGHT_TYPE_SPOT) {
vec3 rel = normalize(pos - light_pos);
float cos_spot_angle = lights.data[light].cos_spot_angle;
float cos_angle = dot(rel, lights.data[light].direction);
if (cos_angle < cos_spot_angle) {
return false;
}
float scos = max(cos_angle, cos_spot_angle);
float spot_rim = max(0.0001, (1.0 - scos) / (1.0 - cos_spot_angle));
attenuation *= 1.0 - pow(spot_rim, lights.data[light].inv_spot_attenuation);
}
}
return true;
}
float get_normal_advance(vec3 p_normal) {
vec3 normal = p_normal;
vec3 unorm = abs(normal);
if ((unorm.x >= unorm.y) && (unorm.x >= unorm.z)) {
// x code
unorm = normal.x > 0.0 ? vec3(1.0, 0.0, 0.0) : vec3(-1.0, 0.0, 0.0);
} else if ((unorm.y > unorm.x) && (unorm.y >= unorm.z)) {
// y code
unorm = normal.y > 0.0 ? vec3(0.0, 1.0, 0.0) : vec3(0.0, -1.0, 0.0);
} else if ((unorm.z > unorm.x) && (unorm.z > unorm.y)) {
// z code
unorm = normal.z > 0.0 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 0.0, -1.0);
} else {
// oh-no we messed up code
// has to be
unorm = vec3(1.0, 0.0, 0.0);
}
return 1.0 / dot(normal, unorm);
}
void clip_segment(vec4 plane, vec3 begin, inout vec3 end) {
vec3 segment = begin - end;
float den = dot(plane.xyz, segment);
//printf("den is %i\n",den);
if (den < 0.0001) {
return;
}
float dist = (dot(plane.xyz, begin) - plane.w) / den;
if (dist < 0.0001 || dist > 1.0001) {
return;
}
end = begin + segment * -dist;
}
bool compute_light_at_pos(uint index, vec3 pos, vec3 normal, inout vec3 light, inout vec3 light_dir) {
float attenuation;
vec3 light_pos;
if (!compute_light_vector(index, pos, attenuation, light_pos)) {
return false;
}
light_dir = normalize(pos - light_pos);
if (attenuation < 0.01 || (length(normal) > 0.2 && dot(normal, light_dir) >= 0)) {
return false; //not facing the light, or attenuation is near zero
}
if (lights.data[index].has_shadow) {
float distance_adv = get_normal_advance(light_dir);
vec3 to = pos;
if (length(normal) > 0.2) {
to += normal * distance_adv * 0.51;
} else {
to -= sign(light_dir) * 0.45; //go near the edge towards the light direction to avoid self occlusion
}
//clip
clip_segment(mix(vec4(-1.0, 0.0, 0.0, 0.0), vec4(1.0, 0.0, 0.0, float(params.limits.x - 1)), bvec4(light_dir.x < 0.0)), to, light_pos);
clip_segment(mix(vec4(0.0, -1.0, 0.0, 0.0), vec4(0.0, 1.0, 0.0, float(params.limits.y - 1)), bvec4(light_dir.y < 0.0)), to, light_pos);
clip_segment(mix(vec4(0.0, 0.0, -1.0, 0.0), vec4(0.0, 0.0, 1.0, float(params.limits.z - 1)), bvec4(light_dir.z < 0.0)), to, light_pos);
float distance = length(to - light_pos);
if (distance < 0.1) {
return false; // hit
}
distance += distance_adv - mod(distance, distance_adv); //make it reach the center of the box always
light_pos = to - light_dir * distance;
//from -= sign(light_dir)*0.45; //go near the edge towards the light direction to avoid self occlusion
/*float dist = raymarch(distance,distance_adv,light_pos,light_dir);
if (dist > distance_adv) {
return false;
}
attenuation *= 1.0 - smoothstep(0.1*distance_adv,distance_adv,dist);
*/
float occlusion = raymarch(distance, distance_adv, light_pos, light_dir);
if (occlusion == 0.0) {
return false;
}
attenuation *= occlusion; //1.0 - smoothstep(0.1*distance_adv,distance_adv,dist);
}
light = lights.data[index].color * attenuation * lights.data[index].energy;
return true;
}
#endif // MODE COMPUTE LIGHT
void main() {
#ifndef MODE_DYNAMIC
uint cell_index = gl_GlobalInvocationID.x;
if (cell_index >= params.cell_count) {
return;
}
cell_index += params.cell_offset;
uvec3 posu = uvec3(cell_data.data[cell_index].position & 0x7FF, (cell_data.data[cell_index].position >> 11) & 0x3FF, cell_data.data[cell_index].position >> 21);
vec4 albedo = unpackUnorm4x8(cell_data.data[cell_index].albedo);
#endif
/////////////////COMPUTE LIGHT///////////////////////////////
#ifdef MODE_COMPUTE_LIGHT
vec3 pos = vec3(posu) + vec3(0.5);
vec3 emission = vec3(uvec3(cell_data.data[cell_index].emission & 0x1ff, (cell_data.data[cell_index].emission >> 9) & 0x1ff, (cell_data.data[cell_index].emission >> 18) & 0x1ff)) * pow(2.0, float(cell_data.data[cell_index].emission >> 27) - 15.0 - 9.0);
vec3 normal = unpackSnorm4x8(cell_data.data[cell_index].normal).xyz;
vec3 accum = vec3(0.0);
for (uint i = 0; i < params.light_count; i++) {
vec3 light;
vec3 light_dir;
if (!compute_light_at_pos(i, pos, normal.xyz, light, light_dir)) {
continue;
}
light *= albedo.rgb;
if (length(normal) > 0.2) {
accum += max(0.0, dot(normal, -light_dir)) * light;
} else {
//all directions
accum += light;
}
}
outputs.data[cell_index] = vec4(accum + emission, 0.0);
#endif //MODE_COMPUTE_LIGHT
/////////////////SECOND BOUNCE///////////////////////////////
#ifdef MODE_SECOND_BOUNCE
vec3 pos = vec3(posu) + vec3(0.5);
ivec3 ipos = ivec3(posu);
vec4 normal = unpackSnorm4x8(cell_data.data[cell_index].normal);
vec3 accum = outputs.data[cell_index].rgb;
if (length(normal.xyz) > 0.2) {
vec3 v0 = abs(normal.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 1.0, 0.0);
vec3 tangent = normalize(cross(v0, normal.xyz));
vec3 bitangent = normalize(cross(tangent, normal.xyz));
mat3 normal_mat = mat3(tangent, bitangent, normal.xyz);
#define MAX_CONE_DIRS 6
vec3 cone_dirs[MAX_CONE_DIRS] = vec3[](
vec3(0.0, 0.0, 1.0),
vec3(0.866025, 0.0, 0.5),
vec3(0.267617, 0.823639, 0.5),
vec3(-0.700629, 0.509037, 0.5),
vec3(-0.700629, -0.509037, 0.5),
vec3(0.267617, -0.823639, 0.5));
float cone_weights[MAX_CONE_DIRS] = float[](0.25, 0.15, 0.15, 0.15, 0.15, 0.15);
float tan_half_angle = 0.577;
for (int i = 0; i < MAX_CONE_DIRS; i++) {
vec3 direction = normal_mat * cone_dirs[i];
vec4 color = vec4(0.0);
{
float dist = 1.5;
float max_distance = length(vec3(params.limits));
vec3 cell_size = 1.0 / vec3(params.limits);
while (dist < max_distance && color.a < 0.95) {
float diameter = max(1.0, 2.0 * tan_half_angle * dist);
vec3 uvw_pos = (pos + dist * direction) * cell_size;
float half_diameter = diameter * 0.5;
//check if outside, then break
//if ( any(greaterThan(abs(uvw_pos - 0.5),vec3(0.5f + half_diameter * cell_size)) ) ) {
// break;
//}
float log2_diameter = log2(diameter);
vec4 scolor = textureLod(sampler3D(color_texture, texture_sampler), uvw_pos, log2_diameter);
float a = (1.0 - color.a);
color += a * scolor;
dist += half_diameter;
}
}
color *= cone_weights[i] * vec4(albedo.rgb, 1.0) * params.dynamic_range; //restore range
accum += color.rgb;
}
}
outputs.data[cell_index] = vec4(accum, 0.0);
#endif // MODE_SECOND_BOUNCE
/////////////////UPDATE MIPMAPS///////////////////////////////
#ifdef MODE_UPDATE_MIPMAPS
{
vec3 light_accum = vec3(0.0);
float count = 0.0;
for (uint i = 0; i < 8; i++) {
uint child_index = cell_children.data[cell_index].children[i];
if (child_index == NO_CHILDREN) {
continue;
}
light_accum += outputs.data[child_index].rgb;
count += 1.0;
}
float divisor = mix(8.0, count, params.propagation);
outputs.data[cell_index] = vec4(light_accum / divisor, 0.0);
}
#endif
///////////////////WRITE TEXTURE/////////////////////////////
#ifdef MODE_WRITE_TEXTURE
{
imageStore(color_tex, ivec3(posu), vec4(outputs.data[cell_index].rgb / params.dynamic_range, albedo.a));
}
#endif
///////////////////DYNAMIC LIGHTING/////////////////////////////
#ifdef MODE_DYNAMIC
ivec2 pos_xy = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(pos_xy, params.rect_size))) {
return; //out of bounds
}
ivec2 uv_xy = pos_xy;
if (params.flip_x) {
uv_xy.x = params.rect_size.x - pos_xy.x - 1;
}
if (params.flip_y) {
uv_xy.y = params.rect_size.y - pos_xy.y - 1;
}
#ifdef MODE_DYNAMIC_LIGHTING
{
float z = params.z_base + imageLoad(depth, uv_xy).x * params.z_sign;
ivec3 pos = params.x_dir * (params.rect_pos.x + pos_xy.x) + params.y_dir * (params.rect_pos.y + pos_xy.y) + abs(params.z_dir) * int(z);
vec3 normal = normalize(imageLoad(source_normal, uv_xy).xyz * 2.0 - 1.0);
normal = vec3(params.x_dir) * normal.x * mix(1.0, -1.0, params.flip_x) + vec3(params.y_dir) * normal.y * mix(1.0, -1.0, params.flip_y) - vec3(params.z_dir) * normal.z;
vec4 albedo = imageLoad(source_albedo, uv_xy);
//determine the position in space
vec3 accum = vec3(0.0);
for (uint i = 0; i < params.light_count; i++) {
vec3 light;
vec3 light_dir;
if (!compute_light_at_pos(i, vec3(pos) * params.pos_multiplier, normal, light, light_dir)) {
continue;
}
light *= albedo.rgb;
accum += max(0.0, dot(normal, -light_dir)) * light;
}
accum += imageLoad(emission, uv_xy).xyz;
imageStore(emission, uv_xy, vec4(accum, albedo.a));
imageStore(depth, uv_xy, vec4(z));
}
#endif // MODE DYNAMIC LIGHTING
#ifdef MODE_DYNAMIC_SHRINK
{
vec4 accum = vec4(0.0);
float accum_z = 0.0;
float count = 0.0;
for (int i = 0; i < 4; i++) {
ivec2 ofs = pos_xy * 2 + ivec2(i & 1, i >> 1) - params.prev_rect_ofs;
if (any(lessThan(ofs, ivec2(0))) || any(greaterThanEqual(ofs, params.prev_rect_size))) {
continue;
}
if (params.flip_x) {
ofs.x = params.prev_rect_size.x - ofs.x - 1;
}
if (params.flip_y) {
ofs.y = params.prev_rect_size.y - ofs.y - 1;
}
vec4 light = imageLoad(source_light, ofs);
if (light.a == 0.0) { //ignore empty
continue;
}
accum += light;
float z = imageLoad(source_depth, ofs).x;
accum_z += z * 0.5; //shrink half too
count += 1.0;
}
if (params.on_mipmap) {
accum.rgb /= mix(8.0, count, params.propagation);
accum.a /= 8.0;
} else {
accum /= 4.0;
}
if (count == 0.0) {
accum_z = 0.0; //avoid nan
} else {
accum_z /= count;
}
#ifdef MODE_DYNAMIC_SHRINK_WRITE
imageStore(light, uv_xy, accum);
imageStore(depth, uv_xy, vec4(accum_z));
#endif
#ifdef MODE_DYNAMIC_SHRINK_PLOT
if (accum.a < 0.001) {
return; //do not blit if alpha is too low
}
ivec3 pos = params.x_dir * (params.rect_pos.x + pos_xy.x) + params.y_dir * (params.rect_pos.y + pos_xy.y) + abs(params.z_dir) * int(accum_z);
float z_frac = fract(accum_z);
for (int i = 0; i < 2; i++) {
ivec3 pos3d = pos + abs(params.z_dir) * i;
if (any(lessThan(pos3d, ivec3(0))) || any(greaterThanEqual(pos3d, params.limits))) {
//skip if offlimits
continue;
}
vec4 color_blit = accum * (i == 0 ? 1.0 - z_frac : z_frac);
vec4 color = imageLoad(color_texture, pos3d);
color.rgb *= params.dynamic_range;
#if 0
color.rgb = mix(color.rgb,color_blit.rgb,color_blit.a);
color.a+=color_blit.a;
#else
float sa = 1.0 - color_blit.a;
vec4 result;
result.a = color.a * sa + color_blit.a;
if (result.a == 0.0) {
result = vec4(0.0);
} else {
result.rgb = (color.rgb * color.a * sa + color_blit.rgb * color_blit.a) / result.a;
color = result;
}
#endif
color.rgb /= params.dynamic_range;
imageStore(color_texture, pos3d, color);
//imageStore(color_texture,pos3d,vec4(1,1,1,1));
}
#endif // MODE_DYNAMIC_SHRINK_PLOT
}
#endif
#endif // MODE DYNAMIC
}

View File

@@ -0,0 +1,184 @@
#[vertex]
#version 450
#VERSION_DEFINES
struct CellData {
uint position; // xyz 10 bits
uint albedo; //rgb albedo
uint emission; //rgb normalized with e as multiplier
uint normal; //RGB normal encoded
};
layout(set = 0, binding = 1, std140) buffer CellDataBuffer {
CellData data[];
}
cell_data;
layout(set = 0, binding = 2) uniform texture3D color_tex;
layout(set = 0, binding = 3) uniform sampler tex_sampler;
layout(push_constant, std430) uniform Params {
mat4 projection;
uint cell_offset;
float dynamic_range;
float alpha;
uint level;
ivec3 bounds;
uint pad;
}
params;
layout(location = 0) out vec4 color_interp;
void main() {
const vec3 cube_triangles[36] = vec3[](
vec3(-1.0f, -1.0f, -1.0f),
vec3(-1.0f, -1.0f, 1.0f),
vec3(-1.0f, 1.0f, 1.0f),
vec3(1.0f, 1.0f, -1.0f),
vec3(-1.0f, -1.0f, -1.0f),
vec3(-1.0f, 1.0f, -1.0f),
vec3(1.0f, -1.0f, 1.0f),
vec3(-1.0f, -1.0f, -1.0f),
vec3(1.0f, -1.0f, -1.0f),
vec3(1.0f, 1.0f, -1.0f),
vec3(1.0f, -1.0f, -1.0f),
vec3(-1.0f, -1.0f, -1.0f),
vec3(-1.0f, -1.0f, -1.0f),
vec3(-1.0f, 1.0f, 1.0f),
vec3(-1.0f, 1.0f, -1.0f),
vec3(1.0f, -1.0f, 1.0f),
vec3(-1.0f, -1.0f, 1.0f),
vec3(-1.0f, -1.0f, -1.0f),
vec3(-1.0f, 1.0f, 1.0f),
vec3(-1.0f, -1.0f, 1.0f),
vec3(1.0f, -1.0f, 1.0f),
vec3(1.0f, 1.0f, 1.0f),
vec3(1.0f, -1.0f, -1.0f),
vec3(1.0f, 1.0f, -1.0f),
vec3(1.0f, -1.0f, -1.0f),
vec3(1.0f, 1.0f, 1.0f),
vec3(1.0f, -1.0f, 1.0f),
vec3(1.0f, 1.0f, 1.0f),
vec3(1.0f, 1.0f, -1.0f),
vec3(-1.0f, 1.0f, -1.0f),
vec3(1.0f, 1.0f, 1.0f),
vec3(-1.0f, 1.0f, -1.0f),
vec3(-1.0f, 1.0f, 1.0f),
vec3(1.0f, 1.0f, 1.0f),
vec3(-1.0f, 1.0f, 1.0f),
vec3(1.0f, -1.0f, 1.0f));
vec3 vertex = cube_triangles[gl_VertexIndex] * 0.5 + 0.5;
#ifdef MODE_DEBUG_LIGHT_FULL
uvec3 posu = uvec3(gl_InstanceIndex % params.bounds.x, (gl_InstanceIndex / params.bounds.x) % params.bounds.y, gl_InstanceIndex / (params.bounds.y * params.bounds.x));
#else
uint cell_index = gl_InstanceIndex + params.cell_offset;
uvec3 posu = uvec3(cell_data.data[cell_index].position & 0x7FF, (cell_data.data[cell_index].position >> 11) & 0x3FF, cell_data.data[cell_index].position >> 21);
#endif
#ifdef MODE_DEBUG_EMISSION
color_interp.xyz = vec3(uvec3(cell_data.data[cell_index].emission & 0x1ff, (cell_data.data[cell_index].emission >> 9) & 0x1ff, (cell_data.data[cell_index].emission >> 18) & 0x1ff)) * pow(2.0, float(cell_data.data[cell_index].emission >> 27) - 15.0 - 9.0);
#endif
#ifdef MODE_DEBUG_COLOR
color_interp.xyz = unpackUnorm4x8(cell_data.data[cell_index].albedo).xyz;
#endif
#ifdef MODE_DEBUG_LIGHT
color_interp = texelFetch(sampler3D(color_tex, tex_sampler), ivec3(posu), int(params.level));
color_interp.xyz *params.dynamic_range;
#endif
float scale = (1 << params.level);
gl_Position = params.projection * vec4((vec3(posu) + vertex) * scale, 1.0);
#ifdef MODE_DEBUG_LIGHT_FULL
if (color_interp.a == 0.0) {
gl_Position = vec4(0.0); //force clip and not draw
}
#else
color_interp.a = params.alpha;
#endif
}
#[fragment]
#version 450
#VERSION_DEFINES
layout(location = 0) in vec4 color_interp;
layout(location = 0) out vec4 frag_color;
void main() {
frag_color = color_interp;
#ifdef MODE_DEBUG_LIGHT_FULL
//there really is no alpha, so use dither
int x = int(gl_FragCoord.x) % 4;
int y = int(gl_FragCoord.y) % 4;
int index = x + y * 4;
float limit = 0.0;
if (x < 8) {
if (index == 0) {
limit = 0.0625;
}
if (index == 1) {
limit = 0.5625;
}
if (index == 2) {
limit = 0.1875;
}
if (index == 3) {
limit = 0.6875;
}
if (index == 4) {
limit = 0.8125;
}
if (index == 5) {
limit = 0.3125;
}
if (index == 6) {
limit = 0.9375;
}
if (index == 7) {
limit = 0.4375;
}
if (index == 8) {
limit = 0.25;
}
if (index == 9) {
limit = 0.75;
}
if (index == 10) {
limit = 0.125;
}
if (index == 11) {
limit = 0.625;
}
if (index == 12) {
limit = 1.0;
}
if (index == 13) {
limit = 0.5;
}
if (index == 14) {
limit = 0.875;
}
if (index == 15) {
limit = 0.375;
}
}
if (frag_color.a < limit) {
discard;
}
#endif
}

View File

@@ -0,0 +1,180 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 4, local_size_y = 4, local_size_z = 4) in;
#define MAX_DISTANCE 100000.0
#define NO_CHILDREN 0xFFFFFFFF
struct CellChildren {
uint children[8];
};
layout(set = 0, binding = 1, std430) buffer CellChildrenBuffer {
CellChildren data[];
}
cell_children;
struct CellData {
uint position; // xyz 10 bits
uint albedo; //rgb albedo
uint emission; //rgb normalized with e as multiplier
uint normal; //RGB normal encoded
};
layout(set = 0, binding = 2, std430) buffer CellDataBuffer {
CellData data[];
}
cell_data;
layout(r8ui, set = 0, binding = 3) uniform restrict writeonly uimage3D sdf_tex;
layout(push_constant, std430) uniform Params {
uint offset;
uint end;
uint pad0;
uint pad1;
}
params;
void main() {
vec3 pos = vec3(gl_GlobalInvocationID);
float closest_dist = MAX_DISTANCE;
for (uint i = params.offset; i < params.end; i++) {
vec3 posu = vec3(uvec3(cell_data.data[i].position & 0x7FF, (cell_data.data[i].position >> 11) & 0x3FF, cell_data.data[i].position >> 21));
float dist = length(pos - posu);
if (dist < closest_dist) {
closest_dist = dist;
}
}
uint dist_8;
if (closest_dist < 0.0001) { // same cell
dist_8 = 0; //equals to -1
} else {
dist_8 = clamp(uint(closest_dist), 0, 254) + 1; //conservative, 0 is 1, so <1 is considered solid
}
imageStore(sdf_tex, ivec3(gl_GlobalInvocationID), uvec4(dist_8));
//imageStore(sdf_tex,pos,uvec4(pos*2,0));
}
#if 0
layout(push_constant, std430) uniform Params {
ivec3 limits;
uint stack_size;
}
params;
float distance_to_aabb(ivec3 pos, ivec3 aabb_pos, ivec3 aabb_size) {
vec3 delta = vec3(max(ivec3(0), max(aabb_pos - pos, pos - (aabb_pos + aabb_size - ivec3(1)))));
return length(delta);
}
void main() {
ivec3 pos = ivec3(gl_GlobalInvocationID);
uint stack[10] = uint[](0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
uint stack_indices[10] = uint[](0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
ivec3 stack_positions[10] = ivec3[](ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0), ivec3(0));
const uint cell_orders[8] = uint[](
0x11f58d1,
0xe2e70a,
0xd47463,
0xbb829c,
0x8d11f5,
0x70ae2e,
0x463d47,
0x29cbb8);
bool cell_found = false;
bool cell_found_exact = false;
ivec3 closest_cell_pos;
float closest_distance = MAX_DISTANCE;
int stack_pos = 0;
while (true) {
uint index = stack_indices[stack_pos] >> 24;
if (index == 8) {
//go up
if (stack_pos == 0) {
break; //done going through octree
}
stack_pos--;
continue;
}
stack_indices[stack_pos] = (stack_indices[stack_pos] & ((1 << 24) - 1)) | ((index + 1) << 24);
uint cell_index = (stack_indices[stack_pos] >> (index * 3)) & 0x7;
uint child_cell = cell_children.data[stack[stack_pos]].children[cell_index];
if (child_cell == NO_CHILDREN) {
continue;
}
ivec3 child_cell_size = params.limits >> (stack_pos + 1);
ivec3 child_cell_pos = stack_positions[stack_pos];
child_cell_pos += mix(ivec3(0), child_cell_size, bvec3(uvec3(index & 1, index & 2, index & 4) != uvec3(0)));
bool is_leaf = stack_pos == (params.stack_size - 2);
if (child_cell_pos == pos && is_leaf) {
//we may actually end up in the exact cell.
//if this happens, just abort
cell_found_exact = true;
break;
}
if (cell_found) {
//discard by distance
float distance = distance_to_aabb(pos, child_cell_pos, child_cell_size);
if (distance >= closest_distance) {
continue; //pointless, just test next child
} else if (is_leaf) {
//closer than what we have AND end of stack, save and continue
closest_cell_pos = child_cell_pos;
closest_distance = distance;
continue;
}
} else if (is_leaf) {
//first solid cell we find, save and continue
closest_distance = distance_to_aabb(pos, child_cell_pos, child_cell_size);
closest_cell_pos = child_cell_pos;
cell_found = true;
continue;
}
bvec3 direction = greaterThan((pos - (child_cell_pos + (child_cell_size >> 1))), ivec3(0));
uint cell_order = 0;
cell_order |= mix(0, 1, direction.x);
cell_order |= mix(0, 2, direction.y);
cell_order |= mix(0, 4, direction.z);
stack[stack_pos + 1] = child_cell;
stack_indices[stack_pos + 1] = cell_orders[cell_order]; //start counting
stack_positions[stack_pos + 1] = child_cell_pos;
stack_pos++; //go up stack
}
uint dist_8;
if (cell_found_exact) {
dist_8 = 0; //equals to -1
} else {
float closest_distance = length(vec3(pos - closest_cell_pos));
dist_8 = clamp(uint(closest_distance), 0, 254) + 1; //conservative, 0 is 1, so <1 is considered solid
}
imageStore(sdf_tex, pos, uvec4(dist_8));
}
#endif

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find all include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")] + [str(f) for f in Glob("../*_inc.glsl")]
# find all shader code(all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile shaders
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)

View File

@@ -0,0 +1,43 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(r8, set = 0, binding = 0) uniform restrict writeonly image2D current_image;
// This shader is used to generate a "best fit normal texture" as described by:
// https://advances.realtimerendering.com/s2010/Kaplanyan-CryEngine3(SIGGRAPH%202010%20Advanced%20RealTime%20Rendering%20Course).pdf
// This texture tells you what length of normal can be used to store a unit vector
// with the lest amount of error.
vec3 quantize(vec3 c) {
return round(clamp(c * 0.5 + 0.5, 0.0, 1.0) * 255.0) * (1.0 / 255.0) * 2.0 - 1.0;
}
float find_minimum_error(vec3 normal) {
float min_error = 100000.0;
float t_best = 0.0;
for (float nstep = 1.5; nstep < 127.5; ++nstep) {
float t = nstep / 127.5;
vec3 vp = normal * t;
vec3 quantizedp = quantize(vp);
vec3 vdiff = (quantizedp - vp) / t;
float error = max(abs(vdiff.x), max(abs(vdiff.y), abs(vdiff.z)));
if (error < min_error) {
min_error = error;
t_best = t;
}
}
return t_best;
}
void main() {
vec2 uv = vec2(gl_GlobalInvocationID.xy) * vec2(1.0 / 1024.0) + vec2(0.5 / 1024.0);
uv.y *= uv.x;
vec3 dir = vec3(uv.x, uv.y, 1.0);
imageStore(current_image, ivec2(gl_GlobalInvocationID.xy), vec4(find_minimum_error(dir), 1.0, 1.0, 1.0));
}

View File

@@ -0,0 +1,156 @@
#[compute]
#version 450
// References:
// https://www.gamedevs.org/uploads/real-shading-in-unreal-engine-4.pdf
// https://google.github.io/filament/Filament.html
// https://learnopengl.com/PBR/IBL/Specular-IBL
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
layout(rgba16f, set = 0, binding = 0) uniform restrict writeonly image2D current_image;
#define M_PI 3.14159265359
#define SAMPLE_COUNT 1024
#define SIZE 128
#define saturate(x) clamp(x, 0, 1)
// http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html
// efficient VanDerCorpus calculation
float radical_inverse_vdc(uint bits) {
bits = (bits << 16u) | (bits >> 16u);
bits = ((bits & 0x55555555u) << 1u) | ((bits & 0xAAAAAAAAu) >> 1u);
bits = ((bits & 0x33333333u) << 2u) | ((bits & 0xCCCCCCCCu) >> 2u);
bits = ((bits & 0x0F0F0F0Fu) << 4u) | ((bits & 0xF0F0F0F0u) >> 4u);
bits = ((bits & 0x00FF00FFu) << 8u) | ((bits & 0xFF00FF00u) >> 8u);
return float(bits) * 2.3283064365386963e-10; // / 0x100000000
}
vec2 hammersley(uint i, float n) {
return vec2(float(i) / n, radical_inverse_vdc(i));
}
vec3 importance_sample_ggx(vec2 Xi, vec3 N, float roughness) {
float a = roughness * roughness;
float phi = 2.0 * M_PI * Xi.x;
float cosTheta = sqrt((1.0 - Xi.y) / (1.0 + (a * a - 1.0) * Xi.y));
float sinTheta = sqrt(1.0 - cosTheta * cosTheta);
// from spherical coordinates to cartesian coordinates - halfway vector
vec3 H;
H.x = cos(phi) * sinTheta;
H.y = sin(phi) * sinTheta;
H.z = cosTheta;
// from tangent-space H vector to world-space sample vector
vec3 up = abs(N.z) < 0.999 ? vec3(0.0, 0.0, 1.0) : vec3(1.0, 0.0, 0.0);
vec3 tangent = normalize(cross(up, N));
vec3 bitangent = cross(N, tangent);
vec3 sampleVec = tangent * H.x + bitangent * H.y + N * H.z;
return normalize(sampleVec);
}
float geometry_schlick_ggx(float NdotV, float roughness) {
// note that we use a different k for IBL
float a = roughness;
float k = (a * a) / 2.0;
float nom = NdotV;
float denom = NdotV * (1.0 - k) + k;
return nom / denom;
}
float geometry_smith(vec3 N, vec3 V, vec3 L, float roughness) {
float NdotV = saturate(dot(N, V));
float NdotL = saturate(dot(N, L));
float ggx2 = geometry_schlick_ggx(NdotV, roughness);
float ggx1 = geometry_schlick_ggx(NdotL, roughness);
return ggx1 * ggx2;
}
vec3 importance_uniform_sample(vec2 u) {
float phi = 2.0f * M_PI * u.x;
float cosTheta = 1 - u.y;
float sinTheta = sqrt(1 - cosTheta * cosTheta);
return vec3(sinTheta * cos(phi), sinTheta * sin(phi), cosTheta);
}
float distribution_charlie(float NoH, float roughness) {
// Estevez and Kulla 2017, "Production Friendly Microfacet Sheen BRDF"
float a = roughness * roughness;
float invAlpha = 1 / a;
float cos2h = NoH * NoH;
float sin2h = 1 - cos2h;
return (2.0f + invAlpha) * pow(sin2h, invAlpha * 0.5f) / (2.0f * M_PI);
}
float visibility_ashikhmin(float NoV, float NoL) {
// Neubelt and Pettineo 2013, "Crafting a Next-gen Material Pipeline for The Order: 1886"
return 1 / (4 * (NoL + NoV - NoL * NoV));
}
void integrate_brdfs(float n_dot_v, float roughness, out vec2 brdf, out float cloth_brdf) {
vec3 v = vec3(sqrt(1.0 - n_dot_v * n_dot_v), 0, n_dot_v);
vec3 n = vec3(0.0f, 0.0f, 1.0f);
float A = 0.0f;
float B = 0.0f;
float C = 0.0f;
for (uint i = 0; i < SAMPLE_COUNT; ++i) {
vec2 Xi = hammersley(i, SAMPLE_COUNT);
vec3 h = importance_sample_ggx(Xi, n, roughness);
vec3 l = normalize(2.0 * dot(v, h) * h - v);
float n_dot_l = saturate(l.z);
float n_dot_h = saturate(h.z);
float v_dot_h = saturate(dot(v, h));
if (n_dot_l > 0.0) {
float G = geometry_smith(n, v, l, roughness);
float G_Vis = (G * v_dot_h) / (n_dot_h * n_dot_v);
float Fc = pow(1.0 - v_dot_h, 5.0);
// LDFG term for multiscattering
// https://google.github.io/filament/Filament.html#toc5.3.4.7
A += Fc * G_Vis;
B += G_Vis;
}
// Cloth BRDF calculations
// https://github.com/google/filament/blob/main/libs/ibl/src/CubemapIBL.cpp#L856-L874
vec3 h_cloth = importance_uniform_sample(Xi);
vec3 l_cloth = normalize(2.0 * dot(v, h_cloth) * h_cloth - v);
float n_dot_l_cloth = saturate(l_cloth.z);
float n_dot_h_cloth = saturate(h_cloth.z);
float v_dot_h_cloth = saturate(dot(v, h_cloth));
if (n_dot_l_cloth > 0.0) {
float v_cloth = visibility_ashikhmin(n_dot_v, n_dot_l_cloth);
float d_cloth = distribution_charlie(n_dot_h_cloth, roughness);
C += v_cloth * d_cloth * n_dot_l_cloth * v_dot_h_cloth;
}
}
A /= float(SAMPLE_COUNT);
B /= float(SAMPLE_COUNT);
C *= (4.0 * 2.0 * M_PI / SAMPLE_COUNT);
brdf = vec2(A, B);
cloth_brdf = C;
}
void main() {
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
float roughness = float(pos.y + 0.5f) / SIZE;
float NdotV = float(pos.x + 0.5f) / SIZE;
vec2 brdf;
float cloth_brdf;
integrate_brdfs(NdotV, roughness, brdf, cloth_brdf);
ivec2 out_pos = ivec2(pos.x, (SIZE - 1) - pos.y);
imageStore(current_image, out_pos, vec4(brdf, cloth_brdf, 1.0));
}

View File

@@ -0,0 +1,468 @@
#define M_PI 3.14159265359
#define M_TAU 6.28318530718
#define ROUGHNESS_MAX_LOD 5
#define MAX_VOXEL_GI_INSTANCES 8
#define MAX_VIEWS 2
#extension GL_KHR_shader_subgroup_ballot : enable
#extension GL_KHR_shader_subgroup_arithmetic : enable
#include "../cluster_data_inc.glsl"
#include "../decal_data_inc.glsl"
#include "../scene_data_inc.glsl"
#if !defined(MODE_RENDER_DEPTH) || defined(MODE_RENDER_MATERIAL) || defined(MODE_RENDER_SDF) || defined(MODE_RENDER_NORMAL_ROUGHNESS) || defined(MODE_RENDER_VOXEL_GI) || defined(TANGENT_USED) || defined(NORMAL_MAP_USED) || defined(BENT_NORMAL_MAP_USED) || defined(LIGHT_ANISOTROPY_USED)
#ifndef NORMAL_USED
#define NORMAL_USED
#endif
#endif
#if !defined(TANGENT_USED) && (defined(NORMAL_MAP_USED) || defined(BENT_NORMAL_MAP_USED) || defined(LIGHT_ANISOTROPY_USED))
#define TANGENT_USED
#endif
layout(push_constant, std430) uniform DrawCall {
uint instance_index;
uint uv_offset;
uint multimesh_motion_vectors_current_offset;
uint multimesh_motion_vectors_previous_offset;
#ifdef UBERSHADER
uint sc_packed_0;
uint sc_packed_1;
uint sc_packed_2;
uint uc_packed_0;
#endif
}
draw_call;
/* Specialization Constants */
#ifdef UBERSHADER
#define POLYGON_CULL_DISABLED 0
#define POLYGON_CULL_FRONT 1
#define POLYGON_CULL_BACK 2
// Pull the constants from the draw call's push constants.
uint sc_packed_0() {
return draw_call.sc_packed_0;
}
uint sc_packed_1() {
return draw_call.sc_packed_1;
}
uint uc_cull_mode() {
return (draw_call.uc_packed_0 >> 0) & 3U;
}
#else
// Pull the constants from the pipeline's specialization constants.
layout(constant_id = 0) const uint pso_sc_packed_0 = 0;
layout(constant_id = 1) const uint pso_sc_packed_1 = 0;
uint sc_packed_0() {
return pso_sc_packed_0;
}
uint sc_packed_1() {
return pso_sc_packed_1;
}
#endif
bool sc_use_forward_gi() {
return ((sc_packed_0() >> 0) & 1U) != 0;
}
bool sc_use_light_projector() {
return ((sc_packed_0() >> 1) & 1U) != 0;
}
bool sc_use_light_soft_shadows() {
return ((sc_packed_0() >> 2) & 1U) != 0;
}
bool sc_use_directional_soft_shadows() {
return ((sc_packed_0() >> 3) & 1U) != 0;
}
bool sc_decal_use_mipmaps() {
return ((sc_packed_0() >> 4) & 1U) != 0;
}
bool sc_projector_use_mipmaps() {
return ((sc_packed_0() >> 5) & 1U) != 0;
}
bool sc_use_depth_fog() {
return ((sc_packed_0() >> 6) & 1U) != 0;
}
bool sc_use_lightmap_bicubic_filter() {
return ((sc_packed_0() >> 7) & 1U) != 0;
}
uint sc_soft_shadow_samples() {
return (sc_packed_0() >> 8) & 63U;
}
uint sc_penumbra_shadow_samples() {
return (sc_packed_0() >> 14) & 63U;
}
uint sc_directional_soft_shadow_samples() {
return (sc_packed_0() >> 20) & 63U;
}
uint sc_directional_penumbra_shadow_samples() {
return (sc_packed_0() >> 26) & 63U;
}
bool sc_multimesh() {
return ((sc_packed_1() >> 0) & 1U) != 0;
}
bool sc_multimesh_format_2d() {
return ((sc_packed_1() >> 1) & 1U) != 0;
}
bool sc_multimesh_has_color() {
return ((sc_packed_1() >> 2) & 1U) != 0;
}
bool sc_multimesh_has_custom_data() {
return ((sc_packed_1() >> 3) & 1U) != 0;
}
float sc_luminance_multiplier() {
// Not used in clustered renderer but we share some code with the mobile renderer that requires this.
return 1.0;
}
#define SDFGI_MAX_CASCADES 8
/* Set 0: Base Pass (never changes) */
#include "../light_data_inc.glsl"
layout(set = 0, binding = 2) uniform sampler shadow_sampler;
#define INSTANCE_FLAGS_DYNAMIC (1 << 3)
#define INSTANCE_FLAGS_NON_UNIFORM_SCALE (1 << 4)
#define INSTANCE_FLAGS_USE_GI_BUFFERS (1 << 5)
#define INSTANCE_FLAGS_USE_SDFGI (1 << 6)
#define INSTANCE_FLAGS_USE_LIGHTMAP_CAPTURE (1 << 7)
#define INSTANCE_FLAGS_USE_LIGHTMAP (1 << 8)
#define INSTANCE_FLAGS_USE_SH_LIGHTMAP (1 << 9)
#define INSTANCE_FLAGS_USE_VOXEL_GI (1 << 10)
#define INSTANCE_FLAGS_PARTICLES (1 << 11)
#define INSTANCE_FLAGS_PARTICLE_TRAIL_SHIFT 16
#define INSTANCE_FLAGS_FADE_SHIFT 24
//3 bits of stride
#define INSTANCE_FLAGS_PARTICLE_TRAIL_MASK 0xFF
#define SCREEN_SPACE_EFFECTS_FLAGS_USE_SSAO 1
#define SCREEN_SPACE_EFFECTS_FLAGS_USE_SSIL 2
layout(set = 0, binding = 3, std430) restrict readonly buffer OmniLights {
LightData data[];
}
omni_lights;
layout(set = 0, binding = 4, std430) restrict readonly buffer SpotLights {
LightData data[];
}
spot_lights;
layout(set = 0, binding = 5, std430) restrict readonly buffer ReflectionProbeData {
ReflectionData data[];
}
reflections;
layout(set = 0, binding = 6, std140) uniform DirectionalLights {
DirectionalLightData data[MAX_DIRECTIONAL_LIGHT_DATA_STRUCTS];
}
directional_lights;
#define LIGHTMAP_FLAG_USE_DIRECTION 1
#define LIGHTMAP_FLAG_USE_SPECULAR_DIRECTION 2
#define LIGHTMAP_SHADOWMASK_MODE_NONE 0
#define LIGHTMAP_SHADOWMASK_MODE_REPLACE 1
#define LIGHTMAP_SHADOWMASK_MODE_OVERLAY 2
#define LIGHTMAP_SHADOWMASK_MODE_ONLY 3
struct Lightmap {
mat3 normal_xform;
vec2 light_texture_size;
float exposure_normalization;
uint flags;
};
layout(set = 0, binding = 7, std140) restrict readonly buffer Lightmaps {
Lightmap data[];
}
lightmaps;
struct LightmapCapture {
vec4 sh[9];
};
layout(set = 0, binding = 8, std140) restrict readonly buffer LightmapCaptures {
LightmapCapture data[];
}
lightmap_captures;
layout(set = 0, binding = 9) uniform texture2D decal_atlas;
layout(set = 0, binding = 10) uniform texture2D decal_atlas_srgb;
layout(set = 0, binding = 11, std430) restrict readonly buffer Decals {
DecalData data[];
}
decals;
layout(set = 0, binding = 12, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
struct SDFVoxelGICascadeData {
vec3 position;
float to_probe;
ivec3 probe_world_offset;
float to_cell; // 1/bounds * grid_size
vec3 pad;
float exposure_normalization;
};
layout(set = 0, binding = 13, std140) uniform SDFGI {
vec3 grid_size;
uint max_cascades;
bool use_occlusion;
int probe_axis_size;
float probe_to_uvw;
float normal_bias;
vec3 lightprobe_tex_pixel_size;
float energy;
vec3 lightprobe_uv_offset;
float y_mult;
vec3 occlusion_clamp;
uint pad3;
vec3 occlusion_renormalize;
uint pad4;
vec3 cascade_probe_size;
uint pad5;
SDFVoxelGICascadeData cascades[SDFGI_MAX_CASCADES];
}
sdfgi;
layout(set = 0, binding = 14) uniform sampler DEFAULT_SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP;
layout(set = 0, binding = 15) uniform texture2D best_fit_normal_texture;
layout(set = 0, binding = 16) uniform texture2D dfg;
/* Set 1: Render Pass (changes per render pass) */
layout(set = 1, binding = 0, std140) uniform SceneDataBlock {
SceneData data;
SceneData prev_data;
}
scene_data_block;
struct ImplementationData {
uint cluster_shift;
uint cluster_width;
uint cluster_type_size;
uint max_cluster_element_count_div_32;
uint ss_effects_flags;
float ssao_light_affect;
float ssao_ao_affect;
uint pad1;
mat4 sdf_to_bounds;
ivec3 sdf_offset;
uint pad2;
ivec3 sdf_size;
bool gi_upscale_for_msaa;
bool volumetric_fog_enabled;
float volumetric_fog_inv_length;
float volumetric_fog_detail_spread;
uint volumetric_fog_pad;
};
layout(set = 1, binding = 1, std140) uniform ImplementationDataBlock {
ImplementationData data;
}
implementation_data_block;
#define implementation_data implementation_data_block.data
struct InstanceData {
mat4 transform;
mat4 prev_transform;
uint flags;
uint instance_uniforms_ofs; //base offset in global buffer for instance variables
uint gi_offset; //GI information when using lightmapping (VCT or lightmap index)
uint layer_mask;
vec4 lightmap_uv_scale;
vec4 compressed_aabb_position_pad; // Only .xyz is used. .w is padding.
vec4 compressed_aabb_size_pad; // Only .xyz is used. .w is padding.
vec4 uv_scale;
};
layout(set = 1, binding = 2, std430) buffer restrict readonly InstanceDataBuffer {
InstanceData data[];
}
instances;
#ifdef USE_RADIANCE_CUBEMAP_ARRAY
layout(set = 1, binding = 3) uniform textureCubeArray radiance_cubemap;
#else
layout(set = 1, binding = 3) uniform textureCube radiance_cubemap;
#endif
layout(set = 1, binding = 4) uniform textureCubeArray reflection_atlas;
layout(set = 1, binding = 5) uniform texture2D shadow_atlas;
layout(set = 1, binding = 6) uniform texture2D directional_shadow_atlas;
layout(set = 1, binding = 7) uniform texture2DArray lightmap_textures[MAX_LIGHTMAP_TEXTURES * 2];
layout(set = 1, binding = 8) uniform texture3D voxel_gi_textures[MAX_VOXEL_GI_INSTANCES];
layout(set = 1, binding = 9, std430) buffer restrict readonly ClusterBuffer {
uint data[];
}
cluster_buffer;
layout(set = 1, binding = 10) uniform sampler decal_sampler;
layout(set = 1, binding = 11) uniform sampler light_projector_sampler;
layout(set = 1, binding = 12 + 0) uniform sampler SAMPLER_NEAREST_CLAMP;
layout(set = 1, binding = 12 + 1) uniform sampler SAMPLER_LINEAR_CLAMP;
layout(set = 1, binding = 12 + 2) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_CLAMP;
layout(set = 1, binding = 12 + 3) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP;
layout(set = 1, binding = 12 + 4) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 1, binding = 12 + 5) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 1, binding = 12 + 6) uniform sampler SAMPLER_NEAREST_REPEAT;
layout(set = 1, binding = 12 + 7) uniform sampler SAMPLER_LINEAR_REPEAT;
layout(set = 1, binding = 12 + 8) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_REPEAT;
layout(set = 1, binding = 12 + 9) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_REPEAT;
layout(set = 1, binding = 12 + 10) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_REPEAT;
layout(set = 1, binding = 12 + 11) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_REPEAT;
#ifdef MODE_RENDER_SDF
layout(r16ui, set = 1, binding = 24) uniform restrict writeonly uimage3D albedo_volume_grid;
layout(r32ui, set = 1, binding = 25) uniform restrict writeonly uimage3D emission_grid;
layout(r32ui, set = 1, binding = 26) uniform restrict writeonly uimage3D emission_aniso_grid;
layout(r32ui, set = 1, binding = 27) uniform restrict uimage3D geom_facing_grid;
//still need to be present for shaders that use it, so remap them to something
#define depth_buffer shadow_atlas
#define color_buffer shadow_atlas
#define normal_roughness_buffer shadow_atlas
#define multiviewSampler sampler2D
#else
#ifdef USE_MULTIVIEW
layout(set = 1, binding = 24) uniform texture2DArray depth_buffer;
layout(set = 1, binding = 25) uniform texture2DArray color_buffer;
layout(set = 1, binding = 26) uniform texture2DArray normal_roughness_buffer;
layout(set = 1, binding = 27) uniform texture2DArray ao_buffer;
layout(set = 1, binding = 28) uniform texture2DArray ambient_buffer;
layout(set = 1, binding = 29) uniform texture2DArray reflection_buffer;
#define multiviewSampler sampler2DArray
#else // USE_MULTIVIEW
layout(set = 1, binding = 24) uniform texture2D depth_buffer;
layout(set = 1, binding = 25) uniform texture2D color_buffer;
layout(set = 1, binding = 26) uniform texture2D normal_roughness_buffer;
layout(set = 1, binding = 27) uniform texture2D ao_buffer;
layout(set = 1, binding = 28) uniform texture2D ambient_buffer;
layout(set = 1, binding = 29) uniform texture2D reflection_buffer;
#define multiviewSampler sampler2D
#endif
layout(set = 1, binding = 30) uniform texture2DArray sdfgi_lightprobe_texture;
layout(set = 1, binding = 31) uniform texture3D sdfgi_occlusion_cascades;
struct VoxelGIData {
mat4 xform; // 64 - 64
vec3 bounds; // 12 - 76
float dynamic_range; // 4 - 80
float bias; // 4 - 84
float normal_bias; // 4 - 88
bool blend_ambient; // 4 - 92
uint mipmaps; // 4 - 96
vec3 pad; // 12 - 108
float exposure_normalization; // 4 - 112
};
layout(set = 1, binding = 32, std140) uniform VoxelGIs {
VoxelGIData data[MAX_VOXEL_GI_INSTANCES];
}
voxel_gi_instances;
layout(set = 1, binding = 33) uniform texture3D volumetric_fog_texture;
#ifdef USE_MULTIVIEW
layout(set = 1, binding = 34) uniform texture2DArray ssil_buffer;
#else
layout(set = 1, binding = 34) uniform texture2D ssil_buffer;
#endif // USE_MULTIVIEW
#endif
vec4 normal_roughness_compatibility(vec4 p_normal_roughness) {
float roughness = p_normal_roughness.w;
if (roughness > 0.5) {
roughness = 1.0 - roughness;
}
roughness /= (127.0 / 255.0);
return vec4(normalize(p_normal_roughness.xyz * 2.0 - 1.0) * 0.5 + 0.5, roughness);
}
// https://google.github.io/filament/Filament.html#toc5.3.4.7
// Note: The roughness value is inverted
vec3 prefiltered_dfg(float lod, float NoV) {
return textureLod(sampler2D(dfg, SAMPLER_LINEAR_CLAMP), vec2(NoV, 1.0 - lod), 0.0).rgb;
}
// Compute multiscatter compensation
// https://google.github.io/filament/Filament.html#listing_energycompensationimpl
vec3 get_energy_compensation(vec3 f0, float env) {
return 1.0 + f0 * (1.0 / env - 1.0);
}
/* Set 2 Skeleton & Instancing (can change per item) */
layout(set = 2, binding = 0, std430) restrict readonly buffer Transforms {
vec4 data[];
}
transforms;
/* Set 3 User Material */

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python
from misc.utility.scons_hints import *
Import("env")
if "RD_GLSL" in env["BUILDERS"]:
# find all include files
gl_include_files = [str(f) for f in Glob("*_inc.glsl")] + [str(f) for f in Glob("../*_inc.glsl")]
# find all shader code(all glsl files excluding our include files)
glsl_files = [str(f) for f in Glob("*.glsl") if str(f) not in gl_include_files]
# make sure we recompile shaders if include files change
env.Depends([f + ".gen.h" for f in glsl_files], gl_include_files + ["#glsl_builders.py"])
# compile shaders
for glsl_file in glsl_files:
env.RD_GLSL(glsl_file)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,382 @@
#define M_PI 3.14159265359
#define MAX_VIEWS 2
#include "../decal_data_inc.glsl"
#include "../scene_data_inc.glsl"
#if !defined(MODE_RENDER_DEPTH) || defined(MODE_RENDER_MATERIAL) || defined(TANGENT_USED) || defined(NORMAL_MAP_USED) || defined(BENT_NORMAL_MAP_USED) || defined(LIGHT_ANISOTROPY_USED)
#ifndef NORMAL_USED
#define NORMAL_USED
#endif
#endif
#define USING_MOBILE_RENDERER
layout(push_constant, std430) uniform DrawCall {
uint uv_offset;
uint instance_index;
uint multimesh_motion_vectors_current_offset;
uint multimesh_motion_vectors_previous_offset;
#ifdef UBERSHADER
uint sc_packed_0;
uint sc_packed_1;
float sc_packed_2;
uint uc_packed_0;
#endif
}
draw_call;
/* Specialization Constants */
#ifdef UBERSHADER
#define POLYGON_CULL_DISABLED 0
#define POLYGON_CULL_FRONT 1
#define POLYGON_CULL_BACK 2
// Pull the constants from the draw call's push constants.
uint sc_packed_0() {
return draw_call.sc_packed_0;
}
uint sc_packed_1() {
return draw_call.sc_packed_1;
}
float sc_packed_2() {
return draw_call.sc_packed_2;
}
uint uc_cull_mode() {
return (draw_call.uc_packed_0 >> 0) & 3U;
}
#else
// Pull the constants from the pipeline's specialization constants.
layout(constant_id = 0) const uint pso_sc_packed_0 = 0;
layout(constant_id = 1) const uint pso_sc_packed_1 = 0;
layout(constant_id = 2) const float pso_sc_packed_2 = 2.0;
uint sc_packed_0() {
return pso_sc_packed_0;
}
uint sc_packed_1() {
return pso_sc_packed_1;
}
float sc_packed_2() {
return pso_sc_packed_2;
}
#endif
bool sc_use_light_projector() {
return ((sc_packed_0() >> 0) & 1U) != 0;
}
bool sc_use_light_soft_shadows() {
return ((sc_packed_0() >> 1) & 1U) != 0;
}
bool sc_use_directional_soft_shadows() {
return ((sc_packed_0() >> 2) & 1U) != 0;
}
bool sc_decal_use_mipmaps() {
return ((sc_packed_0() >> 3) & 1U) != 0;
}
bool sc_projector_use_mipmaps() {
return ((sc_packed_0() >> 4) & 1U) != 0;
}
bool sc_disable_fog() {
return ((sc_packed_0() >> 5) & 1U) != 0;
}
bool sc_use_depth_fog() {
return ((sc_packed_0() >> 6) & 1U) != 0;
}
bool sc_use_fog_aerial_perspective() {
return ((sc_packed_0() >> 7) & 1U) != 0;
}
bool sc_use_fog_sun_scatter() {
return ((sc_packed_0() >> 8) & 1U) != 0;
}
bool sc_use_fog_height_density() {
return ((sc_packed_0() >> 9) & 1U) != 0;
}
bool sc_use_lightmap_bicubic_filter() {
return ((sc_packed_0() >> 10) & 1U) != 0;
}
bool sc_multimesh() {
return ((sc_packed_0() >> 11) & 1U) != 0;
}
bool sc_multimesh_format_2d() {
return ((sc_packed_0() >> 12) & 1U) != 0;
}
bool sc_multimesh_has_color() {
return ((sc_packed_0() >> 13) & 1U) != 0;
}
bool sc_multimesh_has_custom_data() {
return ((sc_packed_0() >> 14) & 1U) != 0;
}
bool sc_scene_use_ambient_cubemap() {
return ((sc_packed_0() >> 15) & 1U) != 0;
}
bool sc_scene_use_reflection_cubemap() {
return ((sc_packed_0() >> 16) & 1U) != 0;
}
bool sc_scene_roughness_limiter_enabled() {
return ((sc_packed_0() >> 17) & 1U) != 0;
}
uint sc_soft_shadow_samples() {
return (sc_packed_0() >> 20) & 63U;
}
uint sc_penumbra_shadow_samples() {
return (sc_packed_0() >> 26) & 63U;
}
uint sc_directional_soft_shadow_samples() {
return (sc_packed_1() >> 0) & 63U;
}
uint sc_directional_penumbra_shadow_samples() {
return (sc_packed_1() >> 6) & 63U;
}
#define SHADER_COUNT_NONE 0
#define SHADER_COUNT_SINGLE 1
#define SHADER_COUNT_MULTIPLE 2
uint option_to_count(uint option, uint bound) {
switch (option) {
case SHADER_COUNT_NONE:
return 0;
case SHADER_COUNT_SINGLE:
return 1;
case SHADER_COUNT_MULTIPLE:
return bound;
}
}
uint sc_omni_lights(uint bound) {
uint option = (sc_packed_1() >> 12) & 3U;
return option_to_count(option, bound);
}
uint sc_spot_lights(uint bound) {
uint option = (sc_packed_1() >> 14) & 3U;
return option_to_count(option, bound);
}
uint sc_reflection_probes(uint bound) {
uint option = (sc_packed_1() >> 16) & 3U;
return option_to_count(option, bound);
}
uint sc_directional_lights(uint bound) {
uint option = (sc_packed_1() >> 18) & 3U;
return option_to_count(option, bound);
}
uint sc_decals(uint bound) {
if (((sc_packed_1() >> 20) & 1U) != 0) {
return bound;
} else {
return 0;
}
}
bool sc_directional_light_blend_split(uint i) {
return ((sc_packed_1() >> (21 + i)) & 1U) != 0;
}
half sc_luminance_multiplier() {
return half(sc_packed_2());
}
/* Set 0: Base Pass (never changes) */
#include "../light_data_inc.glsl"
layout(set = 0, binding = 2) uniform sampler shadow_sampler;
#define INSTANCE_FLAGS_DYNAMIC (1 << 3)
#define INSTANCE_FLAGS_NON_UNIFORM_SCALE (1 << 4)
#define INSTANCE_FLAGS_USE_GI_BUFFERS (1 << 5)
#define INSTANCE_FLAGS_USE_SDFGI (1 << 6)
#define INSTANCE_FLAGS_USE_LIGHTMAP_CAPTURE (1 << 7)
#define INSTANCE_FLAGS_USE_LIGHTMAP (1 << 8)
#define INSTANCE_FLAGS_USE_SH_LIGHTMAP (1 << 9)
#define INSTANCE_FLAGS_USE_VOXEL_GI (1 << 10)
#define INSTANCE_FLAGS_PARTICLES (1 << 11)
#define INSTANCE_FLAGS_PARTICLE_TRAIL_SHIFT 16
//3 bits of stride
#define INSTANCE_FLAGS_PARTICLE_TRAIL_MASK 0xFF
layout(set = 0, binding = 3, std430) restrict readonly buffer OmniLights {
LightData data[];
}
omni_lights;
layout(set = 0, binding = 4, std430) restrict readonly buffer SpotLights {
LightData data[];
}
spot_lights;
layout(set = 0, binding = 5, std430) restrict readonly buffer ReflectionProbeData {
ReflectionData data[];
}
reflections;
layout(set = 0, binding = 6, std140) uniform DirectionalLights {
DirectionalLightData data[MAX_DIRECTIONAL_LIGHT_DATA_STRUCTS];
}
directional_lights;
#define LIGHTMAP_FLAG_USE_DIRECTION 1
#define LIGHTMAP_FLAG_USE_SPECULAR_DIRECTION 2
#define LIGHTMAP_SHADOWMASK_MODE_NONE 0
#define LIGHTMAP_SHADOWMASK_MODE_REPLACE 1
#define LIGHTMAP_SHADOWMASK_MODE_OVERLAY 2
#define LIGHTMAP_SHADOWMASK_MODE_ONLY 3
struct Lightmap {
mat3 normal_xform;
vec2 light_texture_size;
float exposure_normalization;
uint flags;
};
layout(set = 0, binding = 7, std140) restrict readonly buffer Lightmaps {
Lightmap data[];
}
lightmaps;
struct LightmapCapture {
vec4 sh[9];
};
layout(set = 0, binding = 8, std140) restrict readonly buffer LightmapCaptures {
LightmapCapture data[];
}
lightmap_captures;
layout(set = 0, binding = 9) uniform texture2D decal_atlas;
layout(set = 0, binding = 10) uniform texture2D decal_atlas_srgb;
layout(set = 0, binding = 11, std430) restrict readonly buffer Decals {
DecalData data[];
}
decals;
layout(set = 0, binding = 12, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
layout(set = 0, binding = 13) uniform sampler DEFAULT_SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP;
/* Set 1: Render Pass (changes per render pass) */
layout(set = 1, binding = 0, std140) uniform SceneDataBlock {
SceneData data;
SceneData prev_data;
}
scene_data_block;
struct InstanceData {
highp mat4 transform; // 64 - 64
highp mat4 prev_transform;
uint flags; // 04 - 68
uint instance_uniforms_ofs; // Base offset in global buffer for instance variables. // 04 - 72
uint gi_offset; // GI information when using lightmapping (VCT or lightmap index). // 04 - 76
uint layer_mask; // 04 - 80
vec4 lightmap_uv_scale; // 16 - 96 Doubles as uv_offset when needed.
uvec2 reflection_probes; // 08 - 104
uvec2 omni_lights; // 08 - 112
uvec2 spot_lights; // 08 - 120
uvec2 decals; // 08 - 128
vec4 compressed_aabb_position_pad; // 16 - 144 // Only .xyz is used. .w is padding.
vec4 compressed_aabb_size_pad; // 16 - 160 // Only .xyz is used. .w is padding.
vec4 uv_scale; // 16 - 176
};
layout(set = 1, binding = 1, std430) buffer restrict readonly InstanceDataBuffer {
InstanceData data[];
}
instances;
#ifdef USE_RADIANCE_CUBEMAP_ARRAY
layout(set = 1, binding = 2) uniform textureCubeArray radiance_cubemap;
#else
layout(set = 1, binding = 2) uniform textureCube radiance_cubemap;
#endif
layout(set = 1, binding = 3) uniform textureCubeArray reflection_atlas;
layout(set = 1, binding = 4) uniform texture2D shadow_atlas;
layout(set = 1, binding = 5) uniform texture2D directional_shadow_atlas;
// this needs to change to providing just the lightmap we're using..
layout(set = 1, binding = 6) uniform texture2DArray lightmap_textures[MAX_LIGHTMAP_TEXTURES * 2];
#ifdef USE_MULTIVIEW
layout(set = 1, binding = 9) uniform texture2DArray depth_buffer;
layout(set = 1, binding = 10) uniform texture2DArray color_buffer;
#define multiviewSampler sampler2DArray
#else
layout(set = 1, binding = 9) uniform texture2D depth_buffer;
layout(set = 1, binding = 10) uniform texture2D color_buffer;
#define multiviewSampler sampler2D
#endif // USE_MULTIVIEW
layout(set = 1, binding = 11) uniform sampler decal_sampler;
layout(set = 1, binding = 12) uniform sampler light_projector_sampler;
layout(set = 1, binding = 13 + 0) uniform sampler SAMPLER_NEAREST_CLAMP;
layout(set = 1, binding = 13 + 1) uniform sampler SAMPLER_LINEAR_CLAMP;
layout(set = 1, binding = 13 + 2) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_CLAMP;
layout(set = 1, binding = 13 + 3) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP;
layout(set = 1, binding = 13 + 4) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 1, binding = 13 + 5) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 1, binding = 13 + 6) uniform sampler SAMPLER_NEAREST_REPEAT;
layout(set = 1, binding = 13 + 7) uniform sampler SAMPLER_LINEAR_REPEAT;
layout(set = 1, binding = 13 + 8) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_REPEAT;
layout(set = 1, binding = 13 + 9) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_REPEAT;
layout(set = 1, binding = 13 + 10) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_REPEAT;
layout(set = 1, binding = 13 + 11) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_REPEAT;
/* Set 2 Skeleton & Instancing (can change per item) */
layout(set = 2, binding = 0, std430) restrict readonly buffer Transforms {
vec4 data[];
}
transforms;
/* Set 3 User Material */

View File

@@ -0,0 +1,275 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#define NO_CHILDREN 0xFFFFFFFF
struct CellChildren {
uint children[8];
};
layout(set = 0, binding = 1, std430) buffer CellChildrenBuffer {
CellChildren data[];
}
cell_children;
struct CellData {
uint position; // xyz 10 bits
uint albedo; //rgb albedo
uint emission; //rgb normalized with e as multiplier
uint normal; //RGB normal encoded
};
layout(set = 0, binding = 2, std430) buffer CellDataBuffer {
CellData data[];
}
cell_data;
#define LIGHT_TYPE_DIRECTIONAL 0
#define LIGHT_TYPE_OMNI 1
#define LIGHT_TYPE_SPOT 2
#ifdef MODE_COMPUTE_LIGHT
struct Light {
uint type;
float energy;
float radius;
float attenuation;
vec3 color;
float cos_spot_angle;
vec3 position;
float inv_spot_attenuation;
vec3 direction;
bool has_shadow;
};
layout(set = 0, binding = 3, std140) uniform Lights {
Light data[MAX_LIGHTS];
}
lights;
#endif
layout(push_constant, std430) uniform Params {
ivec3 limits;
uint stack_size;
float emission_scale;
float propagation;
float dynamic_range;
uint light_count;
uint cell_offset;
uint cell_count;
uint pad[2];
}
params;
layout(set = 0, binding = 4, std140) uniform Outputs {
vec4 data[];
}
output;
#ifdef MODE_COMPUTE_LIGHT
uint raymarch(float distance, float distance_adv, vec3 from, vec3 direction) {
uint result = NO_CHILDREN;
ivec3 size = ivec3(max(max(params.limits.x, params.limits.y), params.limits.z));
while (distance > -distance_adv) { //use this to avoid precision errors
uint cell = 0;
ivec3 pos = ivec3(from);
if (all(greaterThanEqual(pos, ivec3(0))) && all(lessThan(pos, size))) {
ivec3 ofs = ivec3(0);
ivec3 half_size = size / 2;
for (int i = 0; i < params.stack_size - 1; i++) {
bvec3 greater = greaterThanEqual(pos, ofs + half_size);
ofs += mix(ivec3(0), half_size, greater);
uint child = 0; //wonder if this can be done faster
if (greater.x) {
child |= 1;
}
if (greater.y) {
child |= 2;
}
if (greater.z) {
child |= 4;
}
cell = cell_children.data[cell].children[child];
if (cell == NO_CHILDREN) {
break;
}
half_size >>= ivec3(1);
}
if (cell != NO_CHILDREN) {
return cell; //found cell!
}
}
from += direction * distance_adv;
distance -= distance_adv;
}
return NO_CHILDREN;
}
bool compute_light_vector(uint light, uint cell, vec3 pos, out float attenuation, out vec3 light_pos) {
if (lights.data[light].type == LIGHT_TYPE_DIRECTIONAL) {
light_pos = pos - lights.data[light].direction * length(vec3(params.limits));
attenuation = 1.0;
} else {
light_pos = lights.data[light].position;
float distance = length(pos - light_pos);
if (distance >= lights.data[light].radius) {
return false;
}
attenuation = pow(clamp(1.0 - distance / lights.data[light].radius, 0.0001, 1.0), lights.data[light].attenuation);
if (lights.data[light].type == LIGHT_TYPE_SPOT) {
vec3 rel = normalize(pos - light_pos);
float cos_spot_angle = lights.data[light].cos_spot_angle;
float cos_angle = dot(rel, lights.data[light].direction);
if (cos_angle < cos_spot_angle) {
return false;
}
float scos = max(cos_angle, cos_spot_angle);
float spot_rim = max(0.0001, (1.0 - scos) / (1.0 - cos_spot_angle));
attenuation *= 1.0 - pow(spot_rim, lights.data[light].inv_spot_attenuation);
}
}
return true;
}
float get_normal_advance(vec3 p_normal) {
vec3 normal = p_normal;
vec3 unorm = abs(normal);
if ((unorm.x >= unorm.y) && (unorm.x >= unorm.z)) {
// x code
unorm = normal.x > 0.0 ? vec3(1.0, 0.0, 0.0) : vec3(-1.0, 0.0, 0.0);
} else if ((unorm.y > unorm.x) && (unorm.y >= unorm.z)) {
// y code
unorm = normal.y > 0.0 ? vec3(0.0, 1.0, 0.0) : vec3(0.0, -1.0, 0.0);
} else if ((unorm.z > unorm.x) && (unorm.z > unorm.y)) {
// z code
unorm = normal.z > 0.0 ? vec3(0.0, 0.0, 1.0) : vec3(0.0, 0.0, -1.0);
} else {
// oh-no we messed up code
// has to be
unorm = vec3(1.0, 0.0, 0.0);
}
return 1.0 / dot(normal, unorm);
}
#endif
void main() {
uint cell_index = gl_GlobalInvocationID.x;
if (cell_index >= params.cell_count) {
return;
}
cell_index += params.cell_offset;
uvec3 posu = uvec3(cell_data.data[cell_index].position & 0x7FF, (cell_data.data[cell_index].position >> 11) & 0x3FF, cell_data.data[cell_index].position >> 21);
vec4 albedo = unpackUnorm4x8(cell_data.data[cell_index].albedo);
#ifdef MODE_COMPUTE_LIGHT
vec3 pos = vec3(posu) + vec3(0.5);
vec3 emission = vec3(ivec3(cell_data.data[cell_index].emission & 0x3FF, (cell_data.data[cell_index].emission >> 10) & 0x7FF, cell_data.data[cell_index].emission >> 21)) * params.emission_scale;
vec4 normal = unpackSnorm4x8(cell_data.data[cell_index].normal);
vec3 accum = vec3(0.0);
for (uint i = 0; i < params.light_count; i++) {
float attenuation;
vec3 light_pos;
if (!compute_light_vector(i, cell_index, pos, attenuation, light_pos)) {
continue;
}
vec3 light_dir = pos - light_pos;
float distance = length(light_dir);
light_dir = normalize(light_dir);
if (length(normal.xyz) > 0.2 && dot(normal.xyz, light_dir) >= 0) {
continue; //not facing the light
}
if (lights.data[i].has_shadow) {
float distance_adv = get_normal_advance(light_dir);
distance += distance_adv - mod(distance, distance_adv); //make it reach the center of the box always
vec3 from = pos - light_dir * distance; //approximate
from -= sign(light_dir) * 0.45; //go near the edge towards the light direction to avoid self occlusion
uint result = raymarch(distance, distance_adv, from, light_dir);
if (result != cell_index) {
continue; //was occluded
}
}
vec3 light = lights.data[i].color * albedo.rgb * attenuation * lights.data[i].energy;
if (length(normal.xyz) > 0.2) {
accum += max(0.0, dot(normal.xyz, -light_dir)) * light + emission;
} else {
//all directions
accum += light + emission;
}
}
output.data[cell_index] = vec4(accum, 0.0);
#endif //MODE_COMPUTE_LIGHT
#ifdef MODE_UPDATE_MIPMAPS
{
vec3 light_accum = vec3(0.0);
float count = 0.0;
for (uint i = 0; i < 8; i++) {
uint child_index = cell_children.data[cell_index].children[i];
if (child_index == NO_CHILDREN) {
continue;
}
light_accum += output.data[child_index].rgb;
count += 1.0;
}
float divisor = mix(8.0, count, params.propagation);
output.data[cell_index] = vec4(light_accum / divisor, 0.0);
}
#endif
#ifdef MODE_WRITE_TEXTURE
{
}
#endif
}

View File

@@ -0,0 +1,43 @@
// Use of FP16 in Godot is done explicitly through the types half and hvec.
// The extensions must be supported by the system to use this shader.
//
// If EXPLICIT_FP16 is not defined, all operations will use full precision
// floats instead and all casting operations will not be performed.
#ifndef HALF_INC_H
#define HALF_INC_H
#ifdef EXPLICIT_FP16
#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
#extension GL_EXT_shader_16bit_storage : require
#define HALF_FLT_MIN float16_t(6.10352e-5)
#define HALF_FLT_MAX float16_t(65504.0)
#define half float16_t
#define hvec2 f16vec2
#define hvec3 f16vec3
#define hvec4 f16vec4
#define hmat2 f16mat2
#define hmat3 f16mat3
#define hmat4 f16mat4
#define saturateHalf(x) min(float16_t(x), HALF_FLT_MAX)
#else
#define HALF_FLT_MIN float(1.175494351e-38F)
#define HALF_FLT_MAX float(3.402823466e+38F)
#define half float
#define hvec2 vec2
#define hvec3 vec3
#define hvec4 vec4
#define hmat2 mat2
#define hmat3 mat3
#define hmat4 mat4
#define saturateHalf(x) (x)
#endif
#endif // HALF_INC_H

View File

@@ -0,0 +1,87 @@
#define LIGHT_BAKE_DISABLED 0
#define LIGHT_BAKE_STATIC 1
#define LIGHT_BAKE_DYNAMIC 2
struct LightData { //this structure needs to be as packed as possible
vec3 position;
float inv_radius;
vec3 direction;
float size;
vec3 color;
float attenuation;
float cone_attenuation;
float cone_angle;
float specular_amount;
float shadow_opacity;
vec4 atlas_rect; // rect in the shadow atlas
mat4 shadow_matrix;
float shadow_bias;
float shadow_normal_bias;
float transmittance_bias;
float soft_shadow_size; // for spot, it's the size in uv coordinates of the light, for omni it's the span angle
float soft_shadow_scale; // scales the shadow kernel for blurrier shadows
uint mask;
float volumetric_fog_energy;
uint bake_mode;
vec4 projector_rect; //projector rect in srgb decal atlas
};
#define REFLECTION_AMBIENT_DISABLED 0
#define REFLECTION_AMBIENT_ENVIRONMENT 1
#define REFLECTION_AMBIENT_COLOR 2
struct ReflectionData {
vec3 box_extents;
float index;
vec3 box_offset;
uint mask;
vec3 ambient; // ambient color
float intensity;
float blend_distance;
bool exterior;
bool box_project;
uint ambient_mode;
float exposure_normalization;
float pad0;
float pad1;
float pad2;
//0-8 is intensity,8-9 is ambient, mode
mat4 local_matrix; // up to here for spot and omni, rest is for directional
// notes: for ambientblend, use distance to edge to blend between already existing global environment
};
struct DirectionalLightData {
vec3 direction;
float energy; // needs to be highp to avoid NaNs being created with high energy values (i.e. when using physical light units and over-exposing the image)
vec3 color;
float size;
float specular;
uint mask;
float softshadow_angle;
float soft_shadow_scale;
bool blend_splits;
float shadow_opacity;
float fade_from;
float fade_to;
uvec2 pad;
uint bake_mode;
float volumetric_fog_energy;
vec4 shadow_bias;
vec4 shadow_normal_bias;
vec4 shadow_transmittance_bias;
vec4 shadow_z_range;
vec4 shadow_range_begin;
vec4 shadow_split_offsets;
mat4 shadow_matrix1;
mat4 shadow_matrix2;
mat4 shadow_matrix3;
mat4 shadow_matrix4;
vec2 uv_scale1;
vec2 uv_scale2;
vec2 uv_scale3;
vec2 uv_scale4;
};

View File

@@ -0,0 +1,678 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#define SDF_MAX_LENGTH 16384.0
/* SET 0: GLOBAL DATA */
#include "samplers_inc.glsl"
layout(set = 0, binding = 2, std430) restrict readonly buffer GlobalShaderUniformData {
vec4 data[];
}
global_shader_uniforms;
/* Set 1: FRAME AND PARTICLE DATA */
// a frame history is kept for trail deterministic behavior
#define MAX_ATTRACTORS 32
#define ATTRACTOR_TYPE_SPHERE 0
#define ATTRACTOR_TYPE_BOX 1
#define ATTRACTOR_TYPE_VECTOR_FIELD 2
struct Attractor {
mat4 transform;
vec3 extents; //exents or radius
uint type;
uint texture_index; //texture index for vector field
float strength;
float attenuation;
float directionality;
};
#define MAX_COLLIDERS 32
#define COLLIDER_TYPE_SPHERE 0
#define COLLIDER_TYPE_BOX 1
#define COLLIDER_TYPE_SDF 2
#define COLLIDER_TYPE_HEIGHT_FIELD 3
#define COLLIDER_TYPE_2D_SDF 4
struct Collider {
mat4 transform;
vec3 extents; //exents or radius
uint type;
uint texture_index; //texture index for vector field
float scale;
uint pad[2];
};
struct FrameParams {
bool emitting;
float system_phase;
float prev_system_phase;
uint cycle;
float explosiveness;
float randomness;
float time;
float delta;
uint frame;
float amount_ratio;
uint pad1;
uint pad2;
uint random_seed;
uint attractor_count;
uint collider_count;
float particle_size;
mat4 emission_transform;
vec3 emitter_velocity;
float interp_to_end;
Attractor attractors[MAX_ATTRACTORS];
Collider colliders[MAX_COLLIDERS];
};
layout(set = 1, binding = 0, std430) restrict buffer FrameHistory {
FrameParams data[];
}
frame_history;
#define PARTICLE_FLAG_ACTIVE uint(1)
#define PARTICLE_FLAG_STARTED uint(2)
#define PARTICLE_FLAG_TRAILED uint(4)
#define PARTICLE_FRAME_MASK uint(0xFFFF)
#define PARTICLE_FRAME_SHIFT uint(16)
struct ParticleData {
mat4 xform;
vec3 velocity;
uint flags;
vec4 color;
vec4 custom;
#ifdef USERDATA1_USED
vec4 userdata1;
#endif
#ifdef USERDATA2_USED
vec4 userdata2;
#endif
#ifdef USERDATA3_USED
vec4 userdata3;
#endif
#ifdef USERDATA4_USED
vec4 userdata4;
#endif
#ifdef USERDATA5_USED
vec4 userdata5;
#endif
#ifdef USERDATA6_USED
vec4 userdata6;
#endif
};
layout(set = 1, binding = 1, std430) restrict buffer Particles {
ParticleData data[];
}
particles;
#define EMISSION_FLAG_HAS_POSITION 1
#define EMISSION_FLAG_HAS_ROTATION_SCALE 2
#define EMISSION_FLAG_HAS_VELOCITY 4
#define EMISSION_FLAG_HAS_COLOR 8
#define EMISSION_FLAG_HAS_CUSTOM 16
struct ParticleEmission {
mat4 xform;
vec3 velocity;
uint flags;
vec4 color;
vec4 custom;
};
layout(set = 1, binding = 2, std430) restrict buffer SourceEmission {
int particle_count;
uint pad0;
uint pad1;
uint pad2;
ParticleEmission data[];
}
src_particles;
layout(set = 1, binding = 3, std430) restrict buffer DestEmission {
int particle_count;
int particle_max;
uint pad1;
uint pad2;
ParticleEmission data[];
}
dst_particles;
/* SET 2: COLLIDER/ATTRACTOR TEXTURES */
#define MAX_3D_TEXTURES 7
layout(set = 2, binding = 0) uniform texture3D sdf_vec_textures[MAX_3D_TEXTURES];
layout(set = 2, binding = 1) uniform texture2D height_field_texture;
/* SET 3: MATERIAL */
#ifdef MATERIAL_UNIFORMS_USED
/* clang-format off */
layout(set = 3, binding = 0, std140) uniform MaterialUniforms {
#MATERIAL_UNIFORMS
} material;
/* clang-format on */
#endif
layout(push_constant, std430) uniform Params {
float lifetime;
bool clear;
uint total_particles;
uint trail_size;
bool use_fractional_delta;
bool sub_emitter_mode;
bool can_emit;
bool trail_pass;
}
params;
uint hash(uint x) {
x = ((x >> uint(16)) ^ x) * uint(0x45d9f3b);
x = ((x >> uint(16)) ^ x) * uint(0x45d9f3b);
x = (x >> uint(16)) ^ x;
return x;
}
bool emit_subparticle(mat4 p_xform, vec3 p_velocity, vec4 p_color, vec4 p_custom, uint p_flags) {
if (!params.can_emit) {
return false;
}
bool valid = false;
int dst_index = atomicAdd(dst_particles.particle_count, 1);
if (dst_index >= dst_particles.particle_max) {
atomicAdd(dst_particles.particle_count, -1);
return false;
}
dst_particles.data[dst_index].xform = p_xform;
dst_particles.data[dst_index].velocity = p_velocity;
dst_particles.data[dst_index].color = p_color;
dst_particles.data[dst_index].custom = p_custom;
dst_particles.data[dst_index].flags = p_flags;
return true;
}
vec3 safe_normalize(vec3 direction) {
const float EPSILON = 0.001;
if (length(direction) < EPSILON) {
return vec3(0.0);
}
return normalize(direction);
}
#GLOBALS
void main() {
uint particle = gl_GlobalInvocationID.x;
if (params.trail_size > 1) {
if (params.trail_pass) {
if (particle >= params.total_particles * (params.trail_size - 1)) {
return;
}
particle += (particle / (params.trail_size - 1)) + 1;
} else {
if (particle >= params.total_particles) {
return;
}
particle *= params.trail_size;
}
}
if (particle >= params.total_particles * params.trail_size) {
return; //discard
}
uint index = particle / params.trail_size;
uint frame = (particle % params.trail_size);
#define FRAME frame_history.data[frame]
#define PARTICLE particles.data[particle]
bool apply_forces = true;
bool apply_velocity = true;
float local_delta = FRAME.delta;
float mass = 1.0;
bool restart = false;
bool restart_position = false;
bool restart_rotation_scale = false;
bool restart_velocity = false;
bool restart_color = false;
bool restart_custom = false;
if (params.clear) {
PARTICLE.color = vec4(1.0);
PARTICLE.custom = vec4(0.0);
PARTICLE.velocity = vec3(0.0);
PARTICLE.flags = 0;
PARTICLE.xform = mat4(
vec4(1.0, 0.0, 0.0, 0.0),
vec4(0.0, 1.0, 0.0, 0.0),
vec4(0.0, 0.0, 1.0, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
}
//clear started flag if set
if (params.trail_pass) {
//trail started
uint src_idx = index * params.trail_size;
if (bool(particles.data[src_idx].flags & PARTICLE_FLAG_STARTED)) {
//save start conditions for trails
PARTICLE.color = particles.data[src_idx].color;
PARTICLE.custom = particles.data[src_idx].custom;
PARTICLE.velocity = particles.data[src_idx].velocity;
PARTICLE.flags = PARTICLE_FLAG_TRAILED | ((frame_history.data[0].frame & PARTICLE_FRAME_MASK) << PARTICLE_FRAME_SHIFT); //mark it as trailed, save in which frame it will start
PARTICLE.xform = particles.data[src_idx].xform;
#ifdef USERDATA1_USED
PARTICLE.userdata1 = particles.data[src_idx].userdata1;
#endif
#ifdef USERDATA2_USED
PARTICLE.userdata2 = particles.data[src_idx].userdata2;
#endif
#ifdef USERDATA3_USED
PARTICLE.userdata3 = particles.data[src_idx].userdata3;
#endif
#ifdef USERDATA4_USED
PARTICLE.userdata4 = particles.data[src_idx].userdata4;
#endif
#ifdef USERDATA5_USED
PARTICLE.userdata5 = particles.data[src_idx].userdata5;
#endif
#ifdef USERDATA6_USED
PARTICLE.userdata6 = particles.data[src_idx].userdata6;
#endif
}
if (!bool(particles.data[src_idx].flags & PARTICLE_FLAG_ACTIVE)) {
// Disable the entire trail if the parent is no longer active.
PARTICLE.flags = 0;
return;
}
if (bool(PARTICLE.flags & PARTICLE_FLAG_TRAILED) && ((PARTICLE.flags >> PARTICLE_FRAME_SHIFT) == (FRAME.frame & PARTICLE_FRAME_MASK))) { //check this is trailed and see if it should start now
// we just assume that this is the first frame of the particle, the rest is deterministic
PARTICLE.flags = PARTICLE_FLAG_ACTIVE | (particles.data[src_idx].flags & (PARTICLE_FRAME_MASK << PARTICLE_FRAME_SHIFT));
return; //- this appears like it should be correct, but it seems not to be.. wonder why.
}
} else {
PARTICLE.flags &= ~PARTICLE_FLAG_STARTED;
}
bool collided = false;
vec3 collision_normal = vec3(0.0);
float collision_depth = 0.0;
vec3 attractor_force = vec3(0.0);
#if !defined(DISABLE_VELOCITY)
if (bool(PARTICLE.flags & PARTICLE_FLAG_ACTIVE)) {
PARTICLE.xform[3].xyz += PARTICLE.velocity * local_delta;
}
#endif
if (!params.trail_pass && params.sub_emitter_mode) {
if (!bool(PARTICLE.flags & PARTICLE_FLAG_ACTIVE)) {
int src_index = atomicAdd(src_particles.particle_count, -1) - 1;
if (src_index >= 0) {
PARTICLE.flags = (PARTICLE_FLAG_ACTIVE | PARTICLE_FLAG_STARTED | (FRAME.cycle << PARTICLE_FRAME_SHIFT));
restart = true;
if (bool(src_particles.data[src_index].flags & EMISSION_FLAG_HAS_POSITION)) {
PARTICLE.xform[3] = src_particles.data[src_index].xform[3];
} else {
PARTICLE.xform[3] = vec4(0, 0, 0, 1);
restart_position = true;
}
if (bool(src_particles.data[src_index].flags & EMISSION_FLAG_HAS_ROTATION_SCALE)) {
PARTICLE.xform[0] = src_particles.data[src_index].xform[0];
PARTICLE.xform[1] = src_particles.data[src_index].xform[1];
PARTICLE.xform[2] = src_particles.data[src_index].xform[2];
} else {
PARTICLE.xform[0] = vec4(1, 0, 0, 0);
PARTICLE.xform[1] = vec4(0, 1, 0, 0);
PARTICLE.xform[2] = vec4(0, 0, 1, 0);
restart_rotation_scale = true;
}
if (bool(src_particles.data[src_index].flags & EMISSION_FLAG_HAS_VELOCITY)) {
PARTICLE.velocity = src_particles.data[src_index].velocity;
} else {
PARTICLE.velocity = vec3(0);
restart_velocity = true;
}
if (bool(src_particles.data[src_index].flags & EMISSION_FLAG_HAS_COLOR)) {
PARTICLE.color = src_particles.data[src_index].color;
} else {
PARTICLE.color = vec4(1);
restart_color = true;
}
if (bool(src_particles.data[src_index].flags & EMISSION_FLAG_HAS_CUSTOM)) {
PARTICLE.custom = src_particles.data[src_index].custom;
} else {
PARTICLE.custom = vec4(0);
restart_custom = true;
}
}
}
} else if (FRAME.emitting) {
float restart_phase = float(index) / float(params.total_particles);
if (FRAME.randomness > 0.0) {
uint seed = FRAME.cycle;
if (restart_phase >= FRAME.system_phase) {
seed -= uint(1);
}
seed *= uint(params.total_particles);
seed += uint(index);
float random = float(hash(seed) % uint(65536)) / 65536.0;
restart_phase += FRAME.randomness * random * 1.0 / float(params.total_particles);
}
restart_phase *= (1.0 - FRAME.explosiveness);
if (FRAME.system_phase > FRAME.prev_system_phase) {
// restart_phase >= prev_system_phase is used so particles emit in the first frame they are processed
if (restart_phase >= FRAME.prev_system_phase && restart_phase < FRAME.system_phase) {
restart = true;
if (params.use_fractional_delta) {
local_delta = (FRAME.system_phase - restart_phase) * params.lifetime;
}
}
} else if (FRAME.delta > 0.0) {
if (restart_phase >= FRAME.prev_system_phase) {
restart = true;
if (params.use_fractional_delta) {
local_delta = (1.0 - restart_phase + FRAME.system_phase) * params.lifetime;
}
} else if (restart_phase < FRAME.system_phase) {
restart = true;
if (params.use_fractional_delta) {
local_delta = (FRAME.system_phase - restart_phase) * params.lifetime;
}
}
}
if (params.trail_pass) {
restart = false;
}
if (restart) {
PARTICLE.flags = FRAME.emitting ? (PARTICLE_FLAG_ACTIVE | PARTICLE_FLAG_STARTED | (FRAME.cycle << PARTICLE_FRAME_SHIFT)) : 0;
restart_position = true;
restart_rotation_scale = true;
restart_velocity = true;
restart_color = true;
restart_custom = true;
}
}
bool particle_active = bool(PARTICLE.flags & PARTICLE_FLAG_ACTIVE);
uint particle_number = (PARTICLE.flags >> PARTICLE_FRAME_SHIFT) * uint(params.total_particles) + index;
if (restart && particle_active) {
#CODE : START
}
if (particle_active) {
for (uint i = 0; i < FRAME.attractor_count; i++) {
vec3 dir;
float amount;
vec3 rel_vec = PARTICLE.xform[3].xyz - FRAME.attractors[i].transform[3].xyz;
vec3 local_pos = rel_vec * mat3(FRAME.attractors[i].transform);
switch (FRAME.attractors[i].type) {
case ATTRACTOR_TYPE_SPHERE: {
dir = safe_normalize(rel_vec);
float d = length(local_pos) / FRAME.attractors[i].extents.x;
if (d > 1.0) {
continue;
}
amount = max(0.0, 1.0 - d);
} break;
case ATTRACTOR_TYPE_BOX: {
dir = safe_normalize(rel_vec);
vec3 abs_pos = abs(local_pos / FRAME.attractors[i].extents);
float d = max(abs_pos.x, max(abs_pos.y, abs_pos.z));
if (d > 1.0) {
continue;
}
amount = max(0.0, 1.0 - d);
} break;
case ATTRACTOR_TYPE_VECTOR_FIELD: {
vec3 uvw_pos = (local_pos / FRAME.attractors[i].extents + 1.0) * 0.5;
if (any(lessThan(uvw_pos, vec3(0.0))) || any(greaterThan(uvw_pos, vec3(1.0)))) {
continue;
}
vec3 s = texture(sampler3D(sdf_vec_textures[FRAME.attractors[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos).xyz * -2.0 + 1.0;
dir = mat3(FRAME.attractors[i].transform) * safe_normalize(s); //revert direction
amount = length(s);
} break;
}
amount = pow(amount, FRAME.attractors[i].attenuation);
dir = safe_normalize(mix(dir, FRAME.attractors[i].transform[2].xyz, FRAME.attractors[i].directionality));
attractor_force -= mass * amount * dir * FRAME.attractors[i].strength;
}
float particle_size = FRAME.particle_size;
#ifdef USE_COLLISION_SCALE
particle_size *= dot(vec3(length(PARTICLE.xform[0].xyz), length(PARTICLE.xform[1].xyz), length(PARTICLE.xform[2].xyz)), vec3(0.33333333333));
#endif
if (FRAME.collider_count == 1 && FRAME.colliders[0].type == COLLIDER_TYPE_2D_SDF) {
//2D collision
vec2 pos = PARTICLE.xform[3].xy;
vec4 to_sdf_x = FRAME.colliders[0].transform[0];
vec4 to_sdf_y = FRAME.colliders[0].transform[1];
vec2 sdf_pos = vec2(dot(vec4(pos, 0, 1), to_sdf_x), dot(vec4(pos, 0, 1), to_sdf_y));
vec4 sdf_to_screen = vec4(FRAME.colliders[0].extents, FRAME.colliders[0].scale);
vec2 uv_pos = sdf_pos * sdf_to_screen.xy + sdf_to_screen.zw;
if (all(greaterThan(uv_pos, vec2(0.0))) && all(lessThan(uv_pos, vec2(1.0)))) {
vec2 pos2 = pos + vec2(0, particle_size);
vec2 sdf_pos2 = vec2(dot(vec4(pos2, 0, 1), to_sdf_x), dot(vec4(pos2, 0, 1), to_sdf_y));
float sdf_particle_size = distance(sdf_pos, sdf_pos2);
float d = texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uv_pos).r * SDF_MAX_LENGTH;
d -= sdf_particle_size;
if (d < 0.0) {
const float EPSILON = 0.001;
vec2 n = normalize(vec2(
texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uv_pos + vec2(EPSILON, 0.0)).r - texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uv_pos - vec2(EPSILON, 0.0)).r,
texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uv_pos + vec2(0.0, EPSILON)).r - texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uv_pos - vec2(0.0, EPSILON)).r));
collided = true;
sdf_pos2 = sdf_pos + n * d;
pos2 = vec2(dot(vec4(sdf_pos2, 0, 1), FRAME.colliders[0].transform[2]), dot(vec4(sdf_pos2, 0, 1), FRAME.colliders[0].transform[3]));
n = pos - pos2;
collision_normal = normalize(vec3(n, 0.0));
collision_depth = length(n);
}
}
} else {
for (uint i = 0; i < FRAME.collider_count; i++) {
vec3 normal;
float depth;
bool col = false;
vec3 rel_vec = PARTICLE.xform[3].xyz - FRAME.colliders[i].transform[3].xyz;
vec3 local_pos = rel_vec * mat3(FRAME.colliders[i].transform);
// Allowing for a small epsilon to allow particle just touching colliders to count as collided
const float EPSILON = 0.001;
switch (FRAME.colliders[i].type) {
case COLLIDER_TYPE_SPHERE: {
float d = length(rel_vec) - (particle_size + FRAME.colliders[i].extents.x);
if (d <= EPSILON) {
col = true;
depth = -d;
normal = normalize(rel_vec);
}
} break;
case COLLIDER_TYPE_BOX: {
vec3 abs_pos = abs(local_pos);
vec3 sgn_pos = sign(local_pos);
if (any(greaterThan(abs_pos, FRAME.colliders[i].extents))) {
//point outside box
vec3 closest = min(abs_pos, FRAME.colliders[i].extents);
vec3 rel = abs_pos - closest;
depth = length(rel) - particle_size;
if (depth <= EPSILON) {
col = true;
normal = mat3(FRAME.colliders[i].transform) * (normalize(rel) * sgn_pos);
depth = -depth;
}
} else {
//point inside box
vec3 axis_len = FRAME.colliders[i].extents - abs_pos;
// there has to be a faster way to do this?
if (all(lessThan(axis_len.xx, axis_len.yz))) {
normal = vec3(1, 0, 0);
} else if (all(lessThan(axis_len.yy, axis_len.xz))) {
normal = vec3(0, 1, 0);
} else {
normal = vec3(0, 0, 1);
}
col = true;
depth = dot(normal * axis_len, vec3(1)) + particle_size;
normal = mat3(FRAME.colliders[i].transform) * (normal * sgn_pos);
}
} break;
case COLLIDER_TYPE_SDF: {
vec3 apos = abs(local_pos);
float extra_dist = 0.0;
if (any(greaterThan(apos, FRAME.colliders[i].extents))) { //outside
vec3 mpos = min(apos, FRAME.colliders[i].extents);
extra_dist = distance(mpos, apos);
}
if (extra_dist > particle_size) {
continue;
}
vec3 uvw_pos = (local_pos / FRAME.colliders[i].extents) * 0.5 + 0.5;
float s = texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos).r;
s *= FRAME.colliders[i].scale;
s += extra_dist;
if (s <= particle_size + EPSILON) {
col = true;
depth = particle_size - s;
normal = mat3(FRAME.colliders[i].transform) *
normalize(
vec3(
texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos + vec3(EPSILON, 0.0, 0.0)).r - texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos - vec3(EPSILON, 0.0, 0.0)).r,
texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos + vec3(0.0, EPSILON, 0.0)).r - texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos - vec3(0.0, EPSILON, 0.0)).r,
texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos + vec3(0.0, 0.0, EPSILON)).r - texture(sampler3D(sdf_vec_textures[FRAME.colliders[i].texture_index], SAMPLER_LINEAR_CLAMP), uvw_pos - vec3(0.0, 0.0, EPSILON)).r));
}
} break;
case COLLIDER_TYPE_HEIGHT_FIELD: {
vec3 local_pos_bottom = local_pos;
local_pos_bottom.y -= particle_size;
if (any(greaterThan(abs(local_pos_bottom), FRAME.colliders[i].extents))) {
continue;
}
const float DELTA = 1.0 / 8192.0;
vec3 uvw_pos = vec3(local_pos_bottom / FRAME.colliders[i].extents) * 0.5 + 0.5;
float y = texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uvw_pos.xz).r;
if (y + EPSILON >= uvw_pos.y) {
//inside heightfield
vec3 pos1 = (vec3(uvw_pos.x, y, uvw_pos.z) * 2.0 - 1.0) * FRAME.colliders[i].extents;
vec3 pos2 = (vec3(uvw_pos.x + DELTA, texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uvw_pos.xz + vec2(DELTA, 0)).r, uvw_pos.z) * 2.0 - 1.0) * FRAME.colliders[i].extents;
vec3 pos3 = (vec3(uvw_pos.x, texture(sampler2D(height_field_texture, SAMPLER_LINEAR_CLAMP), uvw_pos.xz + vec2(0, DELTA)).r, uvw_pos.z + DELTA) * 2.0 - 1.0) * FRAME.colliders[i].extents;
normal = normalize(cross(pos1 - pos2, pos1 - pos3));
float local_y = (vec3(local_pos / FRAME.colliders[i].extents) * 0.5 + 0.5).y;
col = true;
depth = dot(normal, pos1) - dot(normal, local_pos_bottom);
}
} break;
}
if (col) {
if (!collided) {
collided = true;
collision_normal = normal;
collision_depth = depth;
} else {
vec3 c = collision_normal * collision_depth;
c += normal * max(0.0, depth - dot(normal, c));
collision_normal = normalize(c);
collision_depth = length(c);
}
}
}
}
}
if (particle_active) {
#CODE : PROCESS
}
PARTICLE.flags &= ~PARTICLE_FLAG_ACTIVE;
if (particle_active) {
PARTICLE.flags |= PARTICLE_FLAG_ACTIVE;
}
}

View File

@@ -0,0 +1,237 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
#define PARTICLE_FLAG_ACTIVE uint(1)
#define PARTICLE_FLAG_STARTED uint(2)
#define PARTICLE_FLAG_TRAILED uint(4)
struct ParticleData {
mat4 xform;
vec3 velocity;
uint flags;
vec4 color;
vec4 custom;
#ifdef USERDATA_COUNT
vec4 userdata[USERDATA_COUNT];
#endif
};
layout(set = 0, binding = 1, std430) restrict readonly buffer Particles {
ParticleData data[];
}
particles;
layout(set = 0, binding = 2, std430) restrict writeonly buffer Transforms {
vec4 data[];
}
instances;
#ifdef USE_SORT_BUFFER
layout(set = 1, binding = 0, std430) restrict buffer SortBuffer {
vec2 data[];
}
sort_buffer;
#endif // USE_SORT_BUFFER
layout(set = 2, binding = 0, std430) restrict readonly buffer TrailBindPoses {
mat4 data[];
}
trail_bind_poses;
#define PARAMS_FLAG_ORDER_BY_LIFETIME 1
#define PARAMS_FLAG_COPY_MODE_2D 2
layout(push_constant, std430) uniform Params {
vec3 sort_direction;
uint total_particles;
uint trail_size;
uint trail_total;
float frame_delta;
float frame_remainder;
vec3 align_up;
uint align_mode;
uint lifetime_split;
bool lifetime_reverse;
uint motion_vectors_current_offset;
uint flags;
mat4 inv_emission_transform;
}
params;
#define TRANSFORM_ALIGN_DISABLED 0
#define TRANSFORM_ALIGN_Z_BILLBOARD 1
#define TRANSFORM_ALIGN_Y_TO_VELOCITY 2
#define TRANSFORM_ALIGN_Z_BILLBOARD_Y_TO_VELOCITY 3
void main() {
#ifdef MODE_FILL_SORT_BUFFER
uint particle = gl_GlobalInvocationID.x;
if (particle >= params.total_particles) {
return; //discard
}
uint src_particle = particle;
if (params.trail_size > 1) {
src_particle = src_particle * params.trail_size + params.trail_size / 2; //use trail center for sorting
}
sort_buffer.data[particle].x = dot(params.sort_direction, particles.data[src_particle].xform[3].xyz);
sort_buffer.data[particle].y = float(particle);
#endif
#ifdef MODE_FILL_INSTANCES
uint particle = gl_GlobalInvocationID.x;
if (particle >= params.total_particles) {
return; //discard
}
#ifdef USE_SORT_BUFFER
if (params.trail_size > 1) {
particle = uint(sort_buffer.data[particle / params.trail_size].y) + (particle % params.trail_size);
} else {
particle = uint(sort_buffer.data[particle].y); //use index from sort buffer
}
#else
if (bool(params.flags & PARAMS_FLAG_ORDER_BY_LIFETIME)) {
if (params.trail_size > 1) {
uint limit = (params.total_particles / params.trail_size) - params.lifetime_split;
uint base_index = particle / params.trail_size;
uint base_offset = particle % params.trail_size;
if (params.lifetime_reverse) {
base_index = (params.total_particles / params.trail_size) - base_index - 1;
}
if (base_index < limit) {
base_index = params.lifetime_split + base_index;
} else {
base_index -= limit;
}
particle = base_index * params.trail_size + base_offset;
} else {
uint limit = params.total_particles - params.lifetime_split;
if (params.lifetime_reverse) {
particle = params.total_particles - particle - 1;
}
if (particle < limit) {
particle = params.lifetime_split + particle;
} else {
particle -= limit;
}
}
}
#endif // USE_SORT_BUFFER
mat4 txform;
if (bool(particles.data[particle].flags & PARTICLE_FLAG_ACTIVE) || bool(particles.data[particle].flags & PARTICLE_FLAG_TRAILED)) {
txform = particles.data[particle].xform;
if (params.trail_size > 1) {
// Since the steps don't fit precisely in the history frames, must do a tiny bit of
// interpolation to get them close to their intended location.
uint part_ofs = particle % params.trail_size;
float natural_ofs = fract((float(part_ofs) / float(params.trail_size)) * float(params.trail_total)) * params.frame_delta;
txform[3].xyz -= particles.data[particle].velocity * natural_ofs;
}
switch (params.align_mode) {
case TRANSFORM_ALIGN_DISABLED: {
} break; //nothing
case TRANSFORM_ALIGN_Z_BILLBOARD: {
mat3 local = mat3(normalize(cross(params.align_up, params.sort_direction)), params.align_up, params.sort_direction);
local = local * mat3(txform);
txform[0].xyz = local[0];
txform[1].xyz = local[1];
txform[2].xyz = local[2];
} break;
case TRANSFORM_ALIGN_Y_TO_VELOCITY: {
vec3 v = particles.data[particle].velocity;
float s = (length(txform[0]) + length(txform[1]) + length(txform[2])) / 3.0;
if (length(v) > 0.0) {
txform[1].xyz = normalize(v);
} else {
txform[1].xyz = normalize(txform[1].xyz);
}
txform[0].xyz = normalize(cross(txform[1].xyz, txform[2].xyz));
txform[2].xyz = vec3(0.0, 0.0, 1.0) * s;
txform[0].xyz *= s;
txform[1].xyz *= s;
} break;
case TRANSFORM_ALIGN_Z_BILLBOARD_Y_TO_VELOCITY: {
vec3 v = particles.data[particle].velocity;
vec3 sv = v - params.sort_direction * dot(params.sort_direction, v); //screen velocity
if (length(sv) == 0) {
sv = params.align_up;
}
sv = normalize(sv);
txform[0].xyz = normalize(cross(sv, params.sort_direction)) * length(txform[0]);
txform[1].xyz = sv * length(txform[1]);
txform[2].xyz = params.sort_direction * length(txform[2]);
} break;
}
txform[3].xyz += particles.data[particle].velocity * params.frame_remainder;
if (params.trail_size > 1) {
uint part_ofs = particle % params.trail_size;
txform = txform * trail_bind_poses.data[part_ofs];
}
if (bool(params.flags & PARAMS_FLAG_COPY_MODE_2D)) {
// In global mode, bring 2D particles to local coordinates
// as they will be drawn with the node position as origin.
txform = params.inv_emission_transform * txform;
}
} else {
// Set scale to zero and translate to -INF so particle will be invisible
// even for materials that ignore rotation/scale (i.e. billboards).
txform = mat4(vec4(0.0), vec4(0.0), vec4(0.0), vec4(-1.0 / 0.0, -1.0 / 0.0, -1.0 / 0.0, 0.0));
}
txform = transpose(txform);
uint instance_index = gl_GlobalInvocationID.x + params.motion_vectors_current_offset;
if (bool(params.flags & PARAMS_FLAG_COPY_MODE_2D)) {
uint write_offset = instance_index * (2 + 1 + 1); //xform + color + custom
instances.data[write_offset + 0] = txform[0];
instances.data[write_offset + 1] = txform[1];
instances.data[write_offset + 2] = particles.data[particle].color;
instances.data[write_offset + 3] = particles.data[particle].custom;
} else {
uint write_offset = instance_index * (3 + 1 + 1); //xform + color + custom
instances.data[write_offset + 0] = txform[0];
instances.data[write_offset + 1] = txform[1];
instances.data[write_offset + 2] = txform[2];
instances.data[write_offset + 3] = particles.data[particle].color;
instances.data[write_offset + 4] = particles.data[particle].custom;
}
#endif
}

View File

@@ -0,0 +1,12 @@
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 0) uniform sampler SAMPLER_NEAREST_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 1) uniform sampler SAMPLER_LINEAR_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 2) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 3) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 4) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 5) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_CLAMP;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 6) uniform sampler SAMPLER_NEAREST_REPEAT;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 7) uniform sampler SAMPLER_LINEAR_REPEAT;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 8) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_REPEAT;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 9) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_REPEAT;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 10) uniform sampler SAMPLER_NEAREST_WITH_MIPMAPS_ANISOTROPIC_REPEAT;
layout(set = 0, binding = SAMPLERS_BINDING_FIRST_INDEX + 11) uniform sampler SAMPLER_LINEAR_WITH_MIPMAPS_ANISOTROPIC_REPEAT;

View File

@@ -0,0 +1,76 @@
// Scene data stores all our 3D rendering globals for a frame such as our matrices
// where this information is independent of the different RD implementations.
// This enables us to use this UBO in our main scene render shaders but also in
// effects that need access to this data.
#define SCENE_DATA_FLAGS_USE_AMBIENT_LIGHT (1 << 0)
#define SCENE_DATA_FLAGS_USE_AMBIENT_CUBEMAP (1 << 1)
#define SCENE_DATA_FLAGS_USE_REFLECTION_CUBEMAP (1 << 2)
#define SCENE_DATA_FLAGS_USE_ROUGHNESS_LIMITER (1 << 3)
#define SCENE_DATA_FLAGS_USE_FOG (1 << 4)
#define SCENE_DATA_FLAGS_USE_UV2_MATERIAL (1 << 5)
#define SCENE_DATA_FLAGS_USE_PANCAKE_SHADOWS (1 << 6)
#define SCENE_DATA_FLAGS_IN_SHADOW_PASS (1 << 7)
struct SceneData {
mat4 projection_matrix;
mat4 inv_projection_matrix;
mat4 inv_view_matrix;
mat4 view_matrix;
// only used for multiview
mat4 projection_matrix_view[MAX_VIEWS];
mat4 inv_projection_matrix_view[MAX_VIEWS];
vec4 eye_offset[MAX_VIEWS];
// Used for billboards to cast correct shadows.
mat4 main_cam_inv_view_matrix;
vec2 viewport_size;
vec2 screen_pixel_size;
// Use vec4s because std140 doesn't play nice with vec2s, z and w are wasted.
vec4 directional_penumbra_shadow_kernel[32];
vec4 directional_soft_shadow_kernel[32];
vec4 penumbra_shadow_kernel[32];
vec4 soft_shadow_kernel[32];
vec2 shadow_atlas_pixel_size;
vec2 directional_shadow_pixel_size;
uint directional_light_count;
float dual_paraboloid_side;
float z_far;
float z_near;
float roughness_limiter_amount;
float roughness_limiter_limit;
float opaque_prepass_threshold;
uint flags;
mat3 radiance_inverse_xform;
vec4 ambient_light_color_energy;
float ambient_color_sky_mix;
float fog_density;
float fog_height;
float fog_height_density;
float fog_depth_curve;
float fog_depth_begin;
float fog_depth_end;
float fog_sun_scatter;
vec3 fog_light_color;
float fog_aerial_perspective;
float time;
float taa_frame_count;
vec2 taa_jitter;
float emissive_exposure_normalization;
float IBL_exposure_normalization;
uint camera_visible_layers;
float pass_alpha_multiplier;
};

View File

@@ -0,0 +1,58 @@
#ifdef ALPHA_HASH_USED
float hash_2d(vec2 p) {
return fract(1.0e4 * sin(17.0 * p.x + 0.1 * p.y) *
(0.1 + abs(sin(13.0 * p.y + p.x))));
}
float hash_3d(vec3 p) {
return hash_2d(vec2(hash_2d(p.xy), p.z));
}
half compute_alpha_hash_threshold(vec3 pos, float hash_scale) {
vec3 dx = dFdx(pos);
vec3 dy = dFdy(pos);
float delta_max_sqr = max(length(dx), length(dy));
float pix_scale = 1.0 / (hash_scale * delta_max_sqr);
vec2 pix_scales =
vec2(exp2(floor(log2(pix_scale))), exp2(ceil(log2(pix_scale))));
vec2 a_thresh = vec2(hash_3d(floor(pix_scales.x * pos.xyz)),
hash_3d(floor(pix_scales.y * pos.xyz)));
float lerp_factor = fract(log2(pix_scale));
float a_interp = (1.0 - lerp_factor) * a_thresh.x + lerp_factor * a_thresh.y;
float min_lerp = min(lerp_factor, 1.0 - lerp_factor);
vec3 cases = vec3(a_interp * a_interp / (2.0 * min_lerp * (1.0 - min_lerp)),
(a_interp - 0.5 * min_lerp) / (1.0 - min_lerp),
1.0 - ((1.0 - a_interp) * (1.0 - a_interp) / (2.0 * min_lerp * (1.0 - min_lerp))));
float alpha_hash_threshold =
(a_interp < (1.0 - min_lerp)) ? ((a_interp < min_lerp) ? cases.x : cases.y) : cases.z;
return half(clamp(alpha_hash_threshold, 0.00001, 1.0));
}
#endif // ALPHA_HASH_USED
#ifdef ALPHA_ANTIALIASING_EDGE_USED
half calc_mip_level(vec2 texture_coord) {
vec2 dx = dFdx(texture_coord);
vec2 dy = dFdy(texture_coord);
float delta_max_sqr = max(dot(dx, dx), dot(dy, dy));
return max(half(0.0), half(0.5) * half(log2(delta_max_sqr)));
}
half compute_alpha_antialiasing_edge(half input_alpha, vec2 texture_coord, half alpha_edge) {
input_alpha *= half(1.0) + calc_mip_level(texture_coord) * half(0.25);
input_alpha = (input_alpha - alpha_edge) / max(fwidth(input_alpha), half(0.0001)) + half(0.5);
return clamp(input_alpha, half(0.0), half(1.0));
}
#endif // ALPHA_ANTIALIASING_USED

View File

@@ -0,0 +1,242 @@
// Functions related to gi/sdfgi for our forward renderer
//standard voxel cone trace
vec4 voxel_cone_trace(texture3D probe, vec3 cell_size, vec3 pos, vec3 direction, float tan_half_angle, float max_distance, float p_bias) {
float dist = p_bias;
vec4 color = vec4(0.0);
while (dist < max_distance && color.a < 0.95) {
float diameter = max(1.0, 2.0 * tan_half_angle * dist);
vec3 uvw_pos = (pos + dist * direction) * cell_size;
float half_diameter = diameter * 0.5;
//check if outside, then break
if (any(greaterThan(abs(uvw_pos - 0.5), vec3(0.5f + half_diameter * cell_size)))) {
break;
}
vec4 scolor = textureLod(sampler3D(probe, DEFAULT_SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP), uvw_pos, log2(diameter));
float a = (1.0 - color.a);
color += a * scolor;
dist += half_diameter;
}
return color;
}
vec4 voxel_cone_trace_45_degrees(texture3D probe, vec3 cell_size, vec3 pos, vec3 direction, float tan_half_angle, float max_distance, float p_bias) {
float dist = p_bias;
vec4 color = vec4(0.0);
float radius = max(0.5, tan_half_angle * dist);
float lod_level = log2(radius * 2.0);
while (dist < max_distance && color.a < 0.95) {
vec3 uvw_pos = (pos + dist * direction) * cell_size;
//check if outside, then break
if (any(greaterThan(abs(uvw_pos - 0.5), vec3(0.5f + radius * cell_size)))) {
break;
}
vec4 scolor = textureLod(sampler3D(probe, DEFAULT_SAMPLER_LINEAR_WITH_MIPMAPS_CLAMP), uvw_pos, lod_level);
lod_level += 1.0;
float a = (1.0 - color.a);
scolor *= a;
color += scolor;
dist += radius;
radius = max(0.5, tan_half_angle * dist);
}
return color;
}
void voxel_gi_compute(uint index, vec3 position, vec3 normal, vec3 ref_vec, mat3 normal_xform, float roughness, vec3 ambient, vec3 environment, inout vec4 out_spec, inout vec4 out_diff) {
position = (voxel_gi_instances.data[index].xform * vec4(position, 1.0)).xyz;
ref_vec = normalize((voxel_gi_instances.data[index].xform * vec4(ref_vec, 0.0)).xyz);
normal = normalize((voxel_gi_instances.data[index].xform * vec4(normal, 0.0)).xyz);
position += normal * voxel_gi_instances.data[index].normal_bias;
//this causes corrupted pixels, i have no idea why..
if (any(bvec2(any(lessThan(position, vec3(0.0))), any(greaterThan(position, voxel_gi_instances.data[index].bounds))))) {
return;
}
vec3 blendv = abs(position / voxel_gi_instances.data[index].bounds * 2.0 - 1.0);
float blend = clamp(1.0 - max(blendv.x, max(blendv.y, blendv.z)), 0.0, 1.0);
//float blend=1.0;
float max_distance = length(voxel_gi_instances.data[index].bounds);
vec3 cell_size = 1.0 / voxel_gi_instances.data[index].bounds;
//radiance
#define MAX_CONE_DIRS 4
vec3 cone_dirs[MAX_CONE_DIRS] = vec3[](
vec3(0.707107, 0.0, 0.707107),
vec3(0.0, 0.707107, 0.707107),
vec3(-0.707107, 0.0, 0.707107),
vec3(0.0, -0.707107, 0.707107));
float cone_weights[MAX_CONE_DIRS] = float[](0.25, 0.25, 0.25, 0.25);
float cone_angle_tan = 0.98269;
vec3 light = vec3(0.0);
for (int i = 0; i < MAX_CONE_DIRS; i++) {
vec3 dir = normalize((voxel_gi_instances.data[index].xform * vec4(normal_xform * cone_dirs[i], 0.0)).xyz);
vec4 cone_light = voxel_cone_trace_45_degrees(voxel_gi_textures[index], cell_size, position, dir, cone_angle_tan, max_distance, voxel_gi_instances.data[index].bias);
if (voxel_gi_instances.data[index].blend_ambient) {
cone_light.rgb = mix(ambient, cone_light.rgb, min(1.0, cone_light.a / 0.95));
}
light += cone_weights[i] * cone_light.rgb;
}
light *= voxel_gi_instances.data[index].dynamic_range * voxel_gi_instances.data[index].exposure_normalization;
out_diff += vec4(light * blend, blend);
//irradiance
vec4 irr_light = voxel_cone_trace(voxel_gi_textures[index], cell_size, position, ref_vec, tan(roughness * 0.5 * M_PI * 0.99), max_distance, voxel_gi_instances.data[index].bias);
if (voxel_gi_instances.data[index].blend_ambient) {
irr_light.rgb = mix(environment, irr_light.rgb, min(1.0, irr_light.a / 0.95));
}
irr_light.rgb *= voxel_gi_instances.data[index].dynamic_range * voxel_gi_instances.data[index].exposure_normalization;
//irr_light=vec3(0.0);
out_spec += vec4(irr_light.rgb * blend, blend);
}
vec2 octahedron_wrap(vec2 v) {
vec2 signVal;
signVal.x = v.x >= 0.0 ? 1.0 : -1.0;
signVal.y = v.y >= 0.0 ? 1.0 : -1.0;
return (1.0 - abs(v.yx)) * signVal;
}
vec2 octahedron_encode(vec3 n) {
// https://twitter.com/Stubbesaurus/status/937994790553227264
n /= (abs(n.x) + abs(n.y) + abs(n.z));
n.xy = n.z >= 0.0 ? n.xy : octahedron_wrap(n.xy);
n.xy = n.xy * 0.5 + 0.5;
return n.xy;
}
void sdfgi_process(uint cascade, vec3 cascade_pos, vec3 cam_pos, vec3 cam_normal, vec3 cam_specular_normal, bool use_specular, float roughness, out vec3 diffuse_light, out vec3 specular_light, out float blend) {
cascade_pos += cam_normal * sdfgi.normal_bias;
vec3 base_pos = floor(cascade_pos);
//cascade_pos += mix(vec3(0.0),vec3(0.01),lessThan(abs(cascade_pos-base_pos),vec3(0.01))) * cam_normal;
ivec3 probe_base_pos = ivec3(base_pos);
vec4 diffuse_accum = vec4(0.0);
vec3 specular_accum;
ivec3 tex_pos = ivec3(probe_base_pos.xy, int(cascade));
tex_pos.x += probe_base_pos.z * sdfgi.probe_axis_size;
tex_pos.xy = tex_pos.xy * (SDFGI_OCT_SIZE + 2) + ivec2(1);
vec3 diffuse_posf = (vec3(tex_pos) + vec3(octahedron_encode(cam_normal) * float(SDFGI_OCT_SIZE), 0.0)) * sdfgi.lightprobe_tex_pixel_size;
vec3 specular_posf;
if (use_specular) {
specular_accum = vec3(0.0);
specular_posf = (vec3(tex_pos) + vec3(octahedron_encode(cam_specular_normal) * float(SDFGI_OCT_SIZE), 0.0)) * sdfgi.lightprobe_tex_pixel_size;
}
vec4 light_accum = vec4(0.0);
float weight_accum = 0.0;
for (uint j = 0; j < 8; j++) {
ivec3 offset = (ivec3(j) >> ivec3(0, 1, 2)) & ivec3(1, 1, 1);
ivec3 probe_posi = probe_base_pos;
probe_posi += offset;
// Compute weight
vec3 probe_pos = vec3(probe_posi);
vec3 probe_to_pos = cascade_pos - probe_pos;
vec3 probe_dir = normalize(-probe_to_pos);
vec3 trilinear = vec3(1.0) - abs(probe_to_pos);
float weight = trilinear.x * trilinear.y * trilinear.z * max(0.005, dot(cam_normal, probe_dir));
// Compute lightprobe occlusion
if (sdfgi.use_occlusion) {
ivec3 occ_indexv = abs((sdfgi.cascades[cascade].probe_world_offset + probe_posi) & ivec3(1, 1, 1)) * ivec3(1, 2, 4);
vec4 occ_mask = mix(vec4(0.0), vec4(1.0), equal(ivec4(occ_indexv.x | occ_indexv.y), ivec4(0, 1, 2, 3)));
vec3 occ_pos = clamp(cascade_pos, probe_pos - sdfgi.occlusion_clamp, probe_pos + sdfgi.occlusion_clamp) * sdfgi.probe_to_uvw;
occ_pos.z += float(cascade);
if (occ_indexv.z != 0) { //z bit is on, means index is >=4, so make it switch to the other half of textures
occ_pos.x += 1.0;
}
occ_pos *= sdfgi.occlusion_renormalize;
float occlusion = dot(textureLod(sampler3D(sdfgi_occlusion_cascades, SAMPLER_LINEAR_CLAMP), occ_pos, 0.0), occ_mask);
weight *= max(occlusion, 0.01);
}
// Compute lightprobe texture position
vec3 diffuse;
vec3 pos_uvw = diffuse_posf;
pos_uvw.xy += vec2(offset.xy) * sdfgi.lightprobe_uv_offset.xy;
pos_uvw.x += float(offset.z) * sdfgi.lightprobe_uv_offset.z;
diffuse = textureLod(sampler2DArray(sdfgi_lightprobe_texture, SAMPLER_LINEAR_CLAMP), pos_uvw, 0.0).rgb;
diffuse_accum += vec4(diffuse * weight * sdfgi.cascades[cascade].exposure_normalization, weight);
if (use_specular) {
vec3 specular = vec3(0.0);
vec3 pos_uvw = specular_posf;
pos_uvw.xy += vec2(offset.xy) * sdfgi.lightprobe_uv_offset.xy;
pos_uvw.x += float(offset.z) * sdfgi.lightprobe_uv_offset.z;
if (roughness < 0.99) {
specular = textureLod(sampler2DArray(sdfgi_lightprobe_texture, SAMPLER_LINEAR_CLAMP), pos_uvw + vec3(0, 0, float(sdfgi.max_cascades)), 0.0).rgb;
}
if (roughness > 0.5) {
specular = mix(specular, textureLod(sampler2DArray(sdfgi_lightprobe_texture, SAMPLER_LINEAR_CLAMP), pos_uvw, 0.0).rgb, (roughness - 0.5) * 2.0);
}
specular_accum += specular * weight * sdfgi.cascades[cascade].exposure_normalization;
}
}
if (diffuse_accum.a > 0.0) {
diffuse_accum.rgb /= diffuse_accum.a;
}
diffuse_light = diffuse_accum.rgb;
if (use_specular) {
if (diffuse_accum.a > 0.0) {
specular_accum /= diffuse_accum.a;
}
specular_light = specular_accum;
}
{
//process blend
float blend_from = (float(sdfgi.probe_axis_size - 1) / 2.0) - 2.5;
float blend_to = blend_from + 2.0;
vec3 inner_pos = cam_pos * sdfgi.cascades[cascade].to_probe;
float len = length(inner_pos);
inner_pos = abs(normalize(inner_pos));
len *= max(inner_pos.x, max(inner_pos.y, inner_pos.z));
if (len >= blend_from) {
blend = smoothstep(blend_from, blend_to, len);
} else {
blend = 0.0;
}
}
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More