mirror of
https://gitlab.freedesktop.org/mesa/mesa.git
synced 2024-11-26 19:54:33 +08:00
nvk: remove NVK_MME_COPY_QUERIES
Its not being used by anything, and it gets sent to the GPU, remove it. Signed-off-by: Yusuf Khan <yusisamerican@gmail.com> Reviewed-by: Mary Guillemard <mary.guillemard@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29030>
This commit is contained in:
parent
12fa8d749a
commit
586bca76dd
@ -18,7 +18,6 @@ static const nvk_mme_builder_func mme_builders[NVK_MME_COUNT] = {
|
||||
[NVK_MME_ADD_CS_INVOCATIONS] = nvk_mme_add_cs_invocations,
|
||||
[NVK_MME_DISPATCH_INDIRECT] = nvk_mme_dispatch_indirect,
|
||||
[NVK_MME_WRITE_CS_INVOCATIONS] = nvk_mme_write_cs_invocations,
|
||||
[NVK_MME_COPY_QUERIES] = nvk_mme_copy_queries,
|
||||
[NVK_MME_XFB_COUNTER_LOAD] = nvk_mme_xfb_counter_load,
|
||||
[NVK_MME_XFB_DRAW_INDIRECT] = nvk_mme_xfb_draw_indirect,
|
||||
[NVK_MME_SET_PRIV_REG] = nvk_mme_set_priv_reg,
|
||||
|
@ -21,7 +21,6 @@ enum nvk_mme {
|
||||
NVK_MME_ADD_CS_INVOCATIONS,
|
||||
NVK_MME_DISPATCH_INDIRECT,
|
||||
NVK_MME_WRITE_CS_INVOCATIONS,
|
||||
NVK_MME_COPY_QUERIES,
|
||||
NVK_MME_XFB_COUNTER_LOAD,
|
||||
NVK_MME_XFB_DRAW_INDIRECT,
|
||||
NVK_MME_SET_PRIV_REG,
|
||||
@ -125,7 +124,6 @@ void nvk_mme_draw_indexed_indirect_count(struct mme_builder *b);
|
||||
void nvk_mme_add_cs_invocations(struct mme_builder *b);
|
||||
void nvk_mme_dispatch_indirect(struct mme_builder *b);
|
||||
void nvk_mme_write_cs_invocations(struct mme_builder *b);
|
||||
void nvk_mme_copy_queries(struct mme_builder *b);
|
||||
void nvk_mme_xfb_counter_load(struct mme_builder *b);
|
||||
void nvk_mme_xfb_draw_indirect(struct mme_builder *b);
|
||||
void nvk_mme_set_priv_reg(struct mme_builder *b);
|
||||
|
@ -986,139 +986,6 @@ nvk_meta_copy_query_pool_results(struct nvk_cmd_buffer *cmd,
|
||||
memcpy(desc->root.push, push_save, NVK_MAX_PUSH_SIZE);
|
||||
}
|
||||
|
||||
void
|
||||
nvk_mme_copy_queries(struct mme_builder *b)
|
||||
{
|
||||
if (b->devinfo->cls_eng3d < TURING_A)
|
||||
return;
|
||||
|
||||
struct mme_value64 dst_addr = mme_load_addr64(b);
|
||||
struct mme_value64 dst_stride = mme_load_addr64(b);
|
||||
struct mme_value64 avail_addr = mme_load_addr64(b);
|
||||
struct mme_value64 report_addr = mme_load_addr64(b);
|
||||
|
||||
struct mme_value query_count = mme_load(b);
|
||||
struct mme_value control = mme_load(b);
|
||||
|
||||
struct mme_value flags = control;
|
||||
struct mme_value write64 =
|
||||
mme_and(b, flags, mme_imm(VK_QUERY_RESULT_64_BIT));
|
||||
struct mme_value query_stride =
|
||||
mme_merge(b, mme_zero(), control, 0, 16, 8);
|
||||
struct mme_value is_timestamp =
|
||||
mme_merge(b, mme_zero(), control, 0, 1, 24);
|
||||
|
||||
mme_while(b, ugt, query_count, mme_zero()) {
|
||||
struct mme_value dw_per_query = mme_srl(b, query_stride, mme_imm(2));
|
||||
mme_tu104_read_fifoed(b, report_addr, dw_per_query);
|
||||
mme_free_reg(b, dw_per_query);
|
||||
|
||||
struct mme_value64 write_addr = mme_mov64(b, dst_addr);
|
||||
struct mme_value report_count = mme_srl(b, query_stride, mme_imm(4));
|
||||
mme_while(b, ugt, report_count, mme_zero()) {
|
||||
struct mme_value result_lo = mme_alloc_reg(b);
|
||||
struct mme_value result_hi = mme_alloc_reg(b);
|
||||
struct mme_value64 result = mme_value64(result_lo, result_hi);
|
||||
|
||||
mme_if(b, ine, is_timestamp, mme_zero()) {
|
||||
mme_load_to(b, mme_zero());
|
||||
mme_load_to(b, mme_zero());
|
||||
mme_load_to(b, result_lo);
|
||||
mme_load_to(b, result_hi);
|
||||
mme_sub_to(b, report_count, report_count, mme_imm(1));
|
||||
}
|
||||
mme_if(b, ieq, is_timestamp, mme_zero()) {
|
||||
struct mme_value begin_lo = mme_load(b);
|
||||
struct mme_value begin_hi = mme_load(b);
|
||||
struct mme_value64 begin = mme_value64(begin_lo, begin_hi);
|
||||
mme_load_to(b, mme_zero());
|
||||
mme_load_to(b, mme_zero());
|
||||
|
||||
struct mme_value end_lo = mme_load(b);
|
||||
struct mme_value end_hi = mme_load(b);
|
||||
struct mme_value64 end = mme_value64(end_lo, end_hi);
|
||||
mme_load_to(b, mme_zero());
|
||||
mme_load_to(b, mme_zero());
|
||||
|
||||
mme_sub64_to(b, result, end, begin);
|
||||
mme_sub_to(b, report_count, report_count, mme_imm(2));
|
||||
|
||||
mme_free_reg64(b, begin);
|
||||
mme_free_reg64(b, end);
|
||||
}
|
||||
|
||||
mme_store_global(b, write_addr, result_lo);
|
||||
mme_add64_to(b, write_addr, write_addr, mme_imm64(4));
|
||||
mme_if(b, ine, write64, mme_zero()) {
|
||||
mme_store_global(b, write_addr, result_hi);
|
||||
mme_add64_to(b, write_addr, write_addr, mme_imm64(4));
|
||||
}
|
||||
}
|
||||
|
||||
struct mme_value with_availability =
|
||||
mme_and(b, flags, mme_imm(VK_QUERY_RESULT_WITH_AVAILABILITY_BIT));
|
||||
mme_if(b, ine, with_availability, mme_zero()) {
|
||||
mme_tu104_read_fifoed(b, avail_addr, mme_imm(1));
|
||||
struct mme_value avail = mme_load(b);
|
||||
mme_store_global(b, write_addr, avail);
|
||||
mme_if(b, ine, write64, mme_zero()) {
|
||||
mme_add64_to(b, write_addr, write_addr, mme_imm64(4));
|
||||
mme_store_global(b, write_addr, mme_zero());
|
||||
}
|
||||
}
|
||||
mme_free_reg(b, with_availability);
|
||||
|
||||
mme_add64_to(b, avail_addr, avail_addr, mme_imm64(4));
|
||||
|
||||
mme_add64_to(b, report_addr, report_addr,
|
||||
mme_value64(query_stride, mme_zero()));
|
||||
|
||||
mme_add64_to(b, dst_addr, dst_addr, dst_stride);
|
||||
|
||||
mme_sub_to(b, query_count, query_count, mme_imm(1));
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nvk_cmd_copy_query_pool_results_mme(struct nvk_cmd_buffer *cmd,
|
||||
struct nvk_query_pool *pool,
|
||||
uint32_t first_query,
|
||||
uint32_t query_count,
|
||||
uint64_t dst_addr,
|
||||
uint64_t dst_stride,
|
||||
VkQueryResultFlags flags)
|
||||
{
|
||||
/* TODO: vkCmdCopyQueryPoolResults() with a compute shader */
|
||||
ASSERTED struct nvk_device *dev = nvk_cmd_buffer_device(cmd);
|
||||
assert(nvk_device_physical(dev)->info.cls_eng3d >= TURING_A);
|
||||
|
||||
struct nv_push *p = nvk_cmd_buffer_push(cmd, 13);
|
||||
P_IMMD(p, NVC597, SET_MME_DATA_FIFO_CONFIG, FIFO_SIZE_SIZE_4KB);
|
||||
P_1INC(p, NVC597, CALL_MME_MACRO(NVK_MME_COPY_QUERIES));
|
||||
|
||||
P_INLINE_DATA(p, dst_addr >> 32);
|
||||
P_INLINE_DATA(p, dst_addr);
|
||||
P_INLINE_DATA(p, dst_stride >> 32);
|
||||
P_INLINE_DATA(p, dst_stride);
|
||||
|
||||
uint64_t avail_start = nvk_query_available_addr(pool, first_query);
|
||||
P_INLINE_DATA(p, avail_start >> 32);
|
||||
P_INLINE_DATA(p, avail_start);
|
||||
|
||||
uint64_t report_start = nvk_query_report_addr(pool, first_query);
|
||||
P_INLINE_DATA(p, report_start >> 32);
|
||||
P_INLINE_DATA(p, report_start);
|
||||
|
||||
P_INLINE_DATA(p, query_count);
|
||||
|
||||
uint32_t is_timestamp = pool->vk.query_type == VK_QUERY_TYPE_TIMESTAMP;
|
||||
|
||||
uint32_t control = (flags & 0xff) |
|
||||
(pool->query_stride << 8) |
|
||||
(is_timestamp << 24);
|
||||
P_INLINE_DATA(p, control);
|
||||
}
|
||||
|
||||
VKAPI_ATTR void VKAPI_CALL
|
||||
nvk_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
|
||||
VkQueryPool queryPool,
|
||||
@ -1154,3 +1021,4 @@ nvk_CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer,
|
||||
nvk_meta_copy_query_pool_results(cmd, pool, firstQuery, queryCount,
|
||||
dst_addr, stride, flags);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user