mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2024-12-02 22:54:05 +08:00
freedreno/ir3: Use nir_lower_mem_access_bit_sizes instead custom lowering
- More robust.
- Handles properly UBO cases, needed for proper OpenCL support (rusticl).
- Resolved KHR-GL46.gpu_shader_fp64.fp64.max_uniform_components failure.
Fixes: f5ce806ed7
("freedreno/ir3: Add wide load/store lowering")
Reviewed-by: Rob Clark <robdclark@freedesktop.org>
Co-authored-by: Rob Clark <robclark@freedesktop.org>
Signed-off-by: David Heidelberg <david@ixit.cz>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/30961>
This commit is contained in:
parent
5db135f66a
commit
78a121b8cf
@ -15,7 +15,6 @@ spec@ext_external_objects@vk-image-overwrite@RGBA 8 UINT optimal: Failed to crea
|
||||
spec@ext_external_objects@vk-image-overwrite@RGBA 8 UNORM optimal: Failed to create texture from GL memory object.,Fail
|
||||
spec@ext_external_objects@vk-stencil-display@D32S8,Fail
|
||||
|
||||
KHR-GL46.gpu_shader_fp64.fp64.max_uniform_components,Fail
|
||||
KHR-GL46.shader_image_load_store.basic-allFormats-store,Fail
|
||||
KHR-GL46.shader_image_load_store.basic-allTargets-store,Fail
|
||||
KHR-GL46.shading_language_420pack.binding_images,Fail
|
||||
|
@ -15,8 +15,6 @@ spec@ext_external_objects@vk-image-overwrite@RGBA 8 UINT optimal: Failed to crea
|
||||
spec@ext_external_objects@vk-image-overwrite@RGBA 8 UNORM optimal: Failed to create texture from GL memory object.,Fail
|
||||
spec@ext_external_objects@vk-stencil-display@D32S8,Fail
|
||||
|
||||
KHR-GL46.gpu_shader_fp64.fp64.max_uniform_components,Fail
|
||||
|
||||
KHR-GL46.shader_image_load_store.basic-allFormats-store,Fail
|
||||
KHR-GL46.shading_language_420pack.binding_images,Fail
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
KHR-GL46.direct_state_access.renderbuffers_storage_multisample,Fail
|
||||
KHR-GL46.gpu_shader_fp64.fp64.max_uniform_components,Fail
|
||||
KHR-GL46.multi_bind.dispatch_bind_image_textures,Fail
|
||||
KHR-GL46.shader_image_load_store.basic-allTargets-store,Fail
|
||||
KHR-GL46.shader_subroutine.control_flow_and_returned_subroutine_values_used_as_subroutine_input,Fail
|
||||
|
@ -850,6 +850,36 @@ lower_binning(nir_shader *s)
|
||||
nir_metadata_control_flow, NULL);
|
||||
}
|
||||
|
||||
static nir_mem_access_size_align
|
||||
ir3_mem_access_size_align(nir_intrinsic_op intrin, uint8_t bytes,
|
||||
uint8_t bit_size, uint32_t align,
|
||||
uint32_t align_offset, bool offset_is_const,
|
||||
const void *cb_data)
|
||||
{
|
||||
align = nir_combined_align(align, align_offset);
|
||||
assert(util_is_power_of_two_nonzero(align));
|
||||
|
||||
/* But if we're only aligned to 1 byte, use 8-bit loads. If we're only
|
||||
* aligned to 2 bytes, use 16-bit loads, unless we needed 8-bit loads due to
|
||||
* the size.
|
||||
*/
|
||||
if ((bytes & 1) || (align == 1))
|
||||
bit_size = 8;
|
||||
else if ((bytes & 2) || (align == 2))
|
||||
bit_size = 16;
|
||||
else if (bit_size >= 32)
|
||||
bit_size = 32;
|
||||
|
||||
if (intrin == nir_intrinsic_load_ubo)
|
||||
bit_size = 32;
|
||||
|
||||
return (nir_mem_access_size_align){
|
||||
.num_components = MAX2(1, MIN2(bytes / (bit_size / 8), 4)),
|
||||
.bit_size = bit_size,
|
||||
.align = bit_size / 8,
|
||||
};
|
||||
}
|
||||
|
||||
void
|
||||
ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
|
||||
{
|
||||
@ -949,7 +979,14 @@ ir3_nir_lower_variant(struct ir3_shader_variant *so, nir_shader *s)
|
||||
OPT_V(s, ir3_nir_lower_64b_regs);
|
||||
}
|
||||
|
||||
progress |= OPT(s, ir3_nir_lower_wide_load_store);
|
||||
nir_lower_mem_access_bit_sizes_options mem_bit_size_options = {
|
||||
.modes = nir_var_mem_constant | nir_var_mem_ubo |
|
||||
nir_var_mem_global | nir_var_mem_shared |
|
||||
nir_var_function_temp,
|
||||
.callback = ir3_mem_access_size_align,
|
||||
};
|
||||
|
||||
progress |= OPT(s, nir_lower_mem_access_bit_sizes, &mem_bit_size_options);
|
||||
progress |= OPT(s, ir3_nir_lower_64b_global);
|
||||
progress |= OPT(s, ir3_nir_lower_64b_intrinsics);
|
||||
progress |= OPT(s, ir3_nir_lower_64b_undef);
|
||||
|
@ -29,7 +29,6 @@ bool ir3_nir_lower_driver_params_to_ubo(nir_shader *nir,
|
||||
bool ir3_nir_move_varying_inputs(nir_shader *shader);
|
||||
int ir3_nir_coord_offset(nir_def *ssa);
|
||||
bool ir3_nir_lower_tex_prefetch(nir_shader *shader);
|
||||
bool ir3_nir_lower_wide_load_store(nir_shader *shader);
|
||||
bool ir3_nir_lower_layer_id(nir_shader *shader);
|
||||
|
||||
void ir3_nir_lower_to_explicit_output(nir_shader *shader,
|
||||
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
* Copyright © 2021 Google, Inc.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
#include "ir3_nir.h"
|
||||
|
||||
|
||||
/*
|
||||
* Lowering for wide (larger than vec4) load/store
|
||||
*/
|
||||
|
||||
static bool
|
||||
lower_wide_load_store_filter(const nir_instr *instr, const void *unused)
|
||||
{
|
||||
(void)unused;
|
||||
|
||||
if (instr->type != nir_instr_type_intrinsic)
|
||||
return false;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
|
||||
if (is_intrinsic_store(intr->intrinsic))
|
||||
return nir_intrinsic_src_components(intr, 0) > 4;
|
||||
|
||||
if (is_intrinsic_load(intr->intrinsic))
|
||||
return nir_intrinsic_dest_components(intr) > 4;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static nir_def *
|
||||
lower_wide_load_store(nir_builder *b, nir_instr *instr, void *unused)
|
||||
{
|
||||
(void)unused;
|
||||
|
||||
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
|
||||
|
||||
if (is_intrinsic_store(intr->intrinsic)) {
|
||||
unsigned num_comp = nir_intrinsic_src_components(intr, 0);
|
||||
unsigned wrmask = nir_intrinsic_write_mask(intr);
|
||||
nir_def *val = intr->src[0].ssa;
|
||||
nir_def *addr = intr->src[1].ssa;
|
||||
|
||||
for (unsigned off = 0; off < num_comp; off += 4) {
|
||||
unsigned c = MIN2(num_comp - off, 4);
|
||||
nir_def *v = nir_channels(b, val, BITFIELD_MASK(c) << off);
|
||||
|
||||
nir_intrinsic_instr *store =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
store->num_components = c;
|
||||
store->src[0] = nir_src_for_ssa(v);
|
||||
store->src[1] = nir_src_for_ssa(addr);
|
||||
nir_intrinsic_set_align(store, nir_intrinsic_align(intr), 0);
|
||||
nir_intrinsic_set_write_mask(store, (wrmask >> off) & 0xf);
|
||||
nir_builder_instr_insert(b, &store->instr);
|
||||
|
||||
addr = nir_iadd(b,
|
||||
nir_imm_intN_t(b, (c * val->bit_size) / 8, addr->bit_size),
|
||||
addr);
|
||||
}
|
||||
|
||||
return NIR_LOWER_INSTR_PROGRESS_REPLACE;
|
||||
} else {
|
||||
unsigned num_comp = nir_intrinsic_dest_components(intr);
|
||||
unsigned bit_size = intr->def.bit_size;
|
||||
nir_def *addr = intr->src[0].ssa;
|
||||
nir_def *components[num_comp];
|
||||
|
||||
for (unsigned off = 0; off < num_comp;) {
|
||||
unsigned c = MIN2(num_comp - off, 4);
|
||||
|
||||
nir_intrinsic_instr *load =
|
||||
nir_intrinsic_instr_create(b->shader, intr->intrinsic);
|
||||
load->num_components = c;
|
||||
load->src[0] = nir_src_for_ssa(addr);
|
||||
nir_intrinsic_set_align(load, nir_intrinsic_align(intr), 0);
|
||||
nir_def_init(&load->instr, &load->def, c, bit_size);
|
||||
nir_builder_instr_insert(b, &load->instr);
|
||||
|
||||
addr = nir_iadd(b,
|
||||
nir_imm_intN_t(b, (c * bit_size) / 8, addr->bit_size),
|
||||
addr);
|
||||
|
||||
for (unsigned i = 0; i < c; i++) {
|
||||
components[off++] = nir_channel(b, &load->def, i);
|
||||
}
|
||||
}
|
||||
|
||||
return nir_build_alu_src_arr(b, nir_op_vec(num_comp), components);
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
ir3_nir_lower_wide_load_store(nir_shader *shader)
|
||||
{
|
||||
return nir_shader_lower_instructions(
|
||||
shader, lower_wide_load_store_filter,
|
||||
lower_wide_load_store, NULL);
|
||||
}
|
@ -95,7 +95,6 @@ libfreedreno_ir3_files = files(
|
||||
'ir3_nir_lower_io_offsets.c',
|
||||
'ir3_nir_lower_tess.c',
|
||||
'ir3_nir_lower_tex_prefetch.c',
|
||||
'ir3_nir_lower_wide_load_store.c',
|
||||
'ir3_nir_move_varying_inputs.c',
|
||||
'ir3_nir_lower_layer_id.c',
|
||||
'ir3_nir_opt_preamble.c',
|
||||
|
Loading…
Reference in New Issue
Block a user