panvk: fix frag_completed for layered rendering

Make sure frag_completed is incremented once per render pass, regardless
of layer count.

This fixes

  [44354.379592] panthor fb000000.gpu: [drm] Failed to extend the tiler heap

in some cases.

Fixes: 157a4dc509 ("panvk/csf: Fix multi-layer rendering")
Signed-off-by: Chia-I Wu <olvaffe@gmail.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/32234>
(cherry picked from commit 3e9b8488b6)
This commit is contained in:
Chia-I Wu 2024-11-19 09:17:40 -08:00 committed by Dylan Baker
parent 2a4deafd01
commit 49de8e26a2
2 changed files with 19 additions and 10 deletions

View File

@ -504,7 +504,7 @@
"description": "panvk: fix frag_completed for layered rendering", "description": "panvk: fix frag_completed for layered rendering",
"nominated": true, "nominated": true,
"nomination_type": 2, "nomination_type": 2,
"resolution": 0, "resolution": 1,
"main_sha": null, "main_sha": null,
"because_sha": "157a4dc509360fe2bd9135046aa3691acf7321f3", "because_sha": "157a4dc509360fe2bd9135046aa3691acf7321f3",
"notes": null "notes": null

View File

@ -2411,8 +2411,12 @@ issue_fragment_jobs(struct panvk_cmd_buffer *cmdbuf)
struct cs_index cur_tiler = cs_sr_reg64(b, 52); struct cs_index cur_tiler = cs_sr_reg64(b, 52);
struct cs_index remaining_layers_in_td = cs_sr_reg32(b, 54); struct cs_index remaining_layers_in_td = cs_sr_reg32(b, 54);
struct cs_index src_fbd_ptr = cs_sr_reg64(b, 56); struct cs_index src_fbd_ptr = cs_sr_reg64(b, 56);
uint32_t td_count = DIV_ROUND_UP(cmdbuf->state.gfx.render.layer_count,
MAX_LAYERS_PER_TILER_DESC); uint32_t td_count = 0;
if (cmdbuf->state.gfx.render.tiler) {
td_count = DIV_ROUND_UP(cmdbuf->state.gfx.render.layer_count,
MAX_LAYERS_PER_TILER_DESC);
}
if (copy_fbds) { if (copy_fbds) {
cs_load64_to( cs_load64_to(
@ -2505,24 +2509,29 @@ issue_fragment_jobs(struct panvk_cmd_buffer *cmdbuf)
cs_match(b, iter_sb, cmp_scratch) { cs_match(b, iter_sb, cmp_scratch) {
#define CASE(x) \ #define CASE(x) \
cs_case(b, x) { \ cs_case(b, x) { \
if (cmdbuf->state.gfx.render.tiler) { \ const struct cs_async_op async = \
cs_defer(SB_WAIT_ITER(x), SB_ID(DEFERRED_SYNC)); \
if (td_count == 1) { \
cs_load_to(b, completed, cur_tiler, BITFIELD_MASK(4), 40); \
cs_wait_slot(b, SB_ID(LS), false); \
cs_finish_fragment(b, true, completed_top, completed_bottom, async); \
} else if (td_count > 1) { \
cs_while(b, MALI_CS_CONDITION_GREATER, tiler_count) { \ cs_while(b, MALI_CS_CONDITION_GREATER, tiler_count) { \
cs_load_to(b, completed, cur_tiler, BITFIELD_MASK(4), 40); \ cs_load_to(b, completed, cur_tiler, BITFIELD_MASK(4), 40); \
cs_wait_slot(b, SB_ID(LS), false); \ cs_wait_slot(b, SB_ID(LS), false); \
cs_finish_fragment( \ cs_finish_fragment(b, false, completed_top, completed_bottom, \
b, true, completed_top, completed_bottom, \ async); \
cs_defer(SB_WAIT_ITER(x), SB_ID(DEFERRED_SYNC))); \
cs_add64(b, cur_tiler, cur_tiler, pan_size(TILER_CONTEXT)); \ cs_add64(b, cur_tiler, cur_tiler, pan_size(TILER_CONTEXT)); \
cs_add32(b, tiler_count, tiler_count, -1); \ cs_add32(b, tiler_count, tiler_count, -1); \
} \ } \
cs_frag_end(b, async); \
} \ } \
if (copy_fbds) { \ if (copy_fbds) { \
cs_sync32_add(b, true, MALI_CS_SYNC_SCOPE_CSG, release_sz, \ cs_sync32_add(b, true, MALI_CS_SYNC_SCOPE_CSG, release_sz, \
ringbuf_sync_addr, \ ringbuf_sync_addr, async); \
cs_defer(SB_WAIT_ITER(x), SB_ID(DEFERRED_SYNC))); \
} \ } \
cs_sync64_add(b, true, MALI_CS_SYNC_SCOPE_CSG, add_val, sync_addr, \ cs_sync64_add(b, true, MALI_CS_SYNC_SCOPE_CSG, add_val, sync_addr, \
cs_defer(SB_WAIT_ITER(x), SB_ID(DEFERRED_SYNC))); \ async); \
cs_move32_to(b, iter_sb, next_iter_sb(x)); \ cs_move32_to(b, iter_sb, next_iter_sb(x)); \
} }