Merge tag 'drm-habanalabs-next-2024-06-23' of https://github.com/HabanaAI/drivers.accel.habanalabs.kernel into drm-next

This tag contains habanalabs driver changes for v6.11.

The notable changes are:

- uAPI changes:
  - Use device-name directory in debugfs-driver-habanalabs.
  - Expose server type in debugfs.

- New features and improvements:
  - Gradual sleep in polling memory macro.
  - Reduce Gaudi2 MSI-X interrupt count to 128.
  - Add Gaudi2-D revision support.

- Firmware related changes:
  - Add timestamp to CPLD info.
  - Gaudi2: Assume hard-reset by firmware upon MC SEI severe error.
  - Align Gaudi2 interrupt names.
  - Check for errors after preboot is ready.

- Bug fixes and code cleanups:
  - Move heartbeat work initialization to early init.
  - Fix a race when receiving events during reset.
  - Change the heartbeat scheduling point.

- Maintainers:
  - Change habanalabs maintainer and git repo path.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Ofir Bitton <obitton@habana.ai>
Link: https://patchwork.freedesktop.org/patch/msgid/ZnfIjTH5AYQvPe7n@obitton-vm-u22.habana-labs.com
This commit is contained in:
Dave Airlie 2024-06-28 09:41:03 +10:00
commit fb625bf618
26 changed files with 726 additions and 490 deletions

View File

@ -217,7 +217,7 @@ Description: Displays the hop values and physical address for a given ASID
and virtual address. The user should write the ASID and VA into and virtual address. The user should write the ASID and VA into
the file and then read the file to get the result. the file and then read the file to get the result.
e.g. to display info about VA 0x1000 for ASID 1 you need to do: e.g. to display info about VA 0x1000 for ASID 1 you need to do:
echo "1 0x1000" > /sys/kernel/debug/accel/0/mmu echo "1 0x1000" > /sys/kernel/debug/accel/<parent_device>/mmu
What: /sys/kernel/debug/accel/<parent_device>/mmu_error What: /sys/kernel/debug/accel/<parent_device>/mmu_error
Date: Mar 2021 Date: Mar 2021
@ -226,8 +226,8 @@ Contact: fkassabri@habana.ai
Description: Check and display page fault or access violation mmu errors for Description: Check and display page fault or access violation mmu errors for
all MMUs specified in mmu_cap_mask. all MMUs specified in mmu_cap_mask.
e.g. to display error info for MMU hw cap bit 9, you need to do: e.g. to display error info for MMU hw cap bit 9, you need to do:
echo "0x200" > /sys/kernel/debug/accel/0/mmu_error echo "0x200" > /sys/kernel/debug/accel/<parent_device>/mmu_error
cat /sys/kernel/debug/accel/0/mmu_error cat /sys/kernel/debug/accel/<parent_device>/mmu_error
What: /sys/kernel/debug/accel/<parent_device>/monitor_dump What: /sys/kernel/debug/accel/<parent_device>/monitor_dump
Date: Mar 2022 Date: Mar 2022
@ -253,6 +253,12 @@ Description: Triggers dump of monitor data. The value to trigger the operatio
When the write is finished, the user can read the "monitor_dump" When the write is finished, the user can read the "monitor_dump"
blob blob
What: /sys/kernel/debug/accel/<parent_device>/server_type
Date: Feb 2024
KernelVersion: 6.11
Contact: trisin@habana.ai
Description: Exposes the device's server type, maps to enum hl_server_type.
What: /sys/kernel/debug/accel/<parent_device>/set_power_state What: /sys/kernel/debug/accel/<parent_device>/set_power_state
Date: Jan 2019 Date: Jan 2019
KernelVersion: 5.1 KernelVersion: 5.1

View File

@ -9597,11 +9597,11 @@ S: Maintained
F: block/partitions/efi.* F: block/partitions/efi.*
HABANALABS PCI DRIVER HABANALABS PCI DRIVER
M: Oded Gabbay <ogabbay@kernel.org> M: Ofir Bitton <obitton@habana.ai>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
S: Supported S: Supported
C: irc://irc.oftc.net/dri-devel C: irc://irc.oftc.net/dri-devel
T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git T: git https://github.com/HabanaAI/drivers.accel.habanalabs.kernel.git
F: Documentation/ABI/testing/debugfs-driver-habanalabs F: Documentation/ABI/testing/debugfs-driver-habanalabs
F: Documentation/ABI/testing/sysfs-driver-habanalabs F: Documentation/ABI/testing/sysfs-driver-habanalabs
F: drivers/accel/habanalabs/ F: drivers/accel/habanalabs/

View File

@ -3284,12 +3284,6 @@ static int ts_get_and_handle_kernel_record(struct hl_device *hdev, struct hl_ctx
/* In case the node already registered, need to unregister first then re-use */ /* In case the node already registered, need to unregister first then re-use */
if (req_offset_record->ts_reg_info.in_use) { if (req_offset_record->ts_reg_info.in_use) {
dev_dbg(data->buf->mmg->dev,
"Requested record %p is in use on irq: %u ts addr: %p, unregister first then put on irq: %u\n",
req_offset_record,
req_offset_record->ts_reg_info.interrupt->interrupt_id,
req_offset_record->ts_reg_info.timestamp_kernel_addr,
data->interrupt->interrupt_id);
/* /*
* Since interrupt here can be different than the one the node currently registered * Since interrupt here can be different than the one the node currently registered
* on, and we don't want to lock two lists while we're doing unregister, so * on, and we don't want to lock two lists while we're doing unregister, so
@ -3345,10 +3339,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
goto put_cq_cb; goto put_cq_cb;
} }
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, handle: 0x%llx, ts offset: %llu, cq_offset: %llu\n",
data->interrupt->interrupt_id, data->ts_handle,
data->ts_offset, data->cq_offset);
data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle); data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle);
if (!data->buf) { if (!data->buf) {
rc = -EINVAL; rc = -EINVAL;
@ -3370,9 +3360,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
if (*pend->cq_kernel_addr >= data->target_value) { if (*pend->cq_kernel_addr >= data->target_value) {
spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags); spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
dev_dbg(hdev->dev, "Target value already reached release ts record: pend: %p, offset: %llu, interrupt: %u\n",
pend, data->ts_offset, data->interrupt->interrupt_id);
pend->ts_reg_info.in_use = 0; pend->ts_reg_info.in_use = 0;
*status = HL_WAIT_CS_STATUS_COMPLETED; *status = HL_WAIT_CS_STATUS_COMPLETED;
*pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns(); *pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();

View File

@ -42,9 +42,8 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg; pkt.i2c_reg = i2c_reg;
pkt.i2c_len = i2c_len; pkt.i2c_len = i2c_len;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, val);
0, val); if (rc && rc != -EAGAIN)
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc); dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
return rc; return rc;
@ -75,10 +74,8 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_len = i2c_len; pkt.i2c_len = i2c_len;
pkt.value = cpu_to_le64(val); pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
0, NULL); if (rc && rc != -EAGAIN)
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc); dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
return rc; return rc;
@ -99,10 +96,8 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.led_index = cpu_to_le32(led); pkt.led_index = cpu_to_le32(led);
pkt.value = cpu_to_le64(state); pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
0, NULL); if (rc && rc != -EAGAIN)
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc); dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
} }
@ -1722,6 +1717,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
root, root,
&hdev->device_release_watchdog_timeout_sec); &hdev->device_release_watchdog_timeout_sec);
debugfs_create_u16("server_type",
0444,
root,
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) { for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name, debugfs_create_file(hl_debugfs_list[i].name,
0644, 0644,

View File

@ -30,6 +30,8 @@ enum dma_alloc_type {
#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
static void hl_device_heartbeat(struct work_struct *work);
/* /*
* hl_set_dram_bar- sets the bar to allow later access to address * hl_set_dram_bar- sets the bar to allow later access to address
* *
@ -130,8 +132,8 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
} }
if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr)) if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size, trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,
caller); size, caller);
return ptr; return ptr;
} }
@ -152,7 +154,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
break; break;
} }
trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller); trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);
} }
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
@ -204,15 +206,15 @@ int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
return 0; return 0;
for_each_sgtable_dma_sg(sgt, sg, i) for_each_sgtable_dma_sg(sgt, sg, i)
trace_habanalabs_dma_map_page(hdev->dev, trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,
page_to_phys(sg_page(sg)), page_to_phys(sg_page(sg)),
sg->dma_address - prop->device_dma_offset_for_host_access, sg->dma_address - prop->device_dma_offset_for_host_access,
#ifdef CONFIG_NEED_SG_DMA_LENGTH #ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length, sg->dma_length,
#else #else
sg->length, sg->length,
#endif #endif
dir, caller); dir, caller);
return 0; return 0;
} }
@ -247,7 +249,8 @@ void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
if (trace_habanalabs_dma_unmap_page_enabled()) { if (trace_habanalabs_dma_unmap_page_enabled()) {
for_each_sgtable_dma_sg(sgt, sg, i) for_each_sgtable_dma_sg(sgt, sg, i)
trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)), trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,
page_to_phys(sg_page(sg)),
sg->dma_address - prop->device_dma_offset_for_host_access, sg->dma_address - prop->device_dma_offset_for_host_access,
#ifdef CONFIG_NEED_SG_DMA_LENGTH #ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length, sg->dma_length,
@ -439,16 +442,19 @@ static void print_idle_status_mask(struct hl_device *hdev, const char *message,
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE]) u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
{ {
if (idle_mask[3]) if (idle_mask[3])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n", dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",
message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]); dev_name(&hdev->pdev->dev), message,
idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[2]) else if (idle_mask[2])
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n", dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",
message, idle_mask[2], idle_mask[1], idle_mask[0]); dev_name(&hdev->pdev->dev), message,
idle_mask[2], idle_mask[1], idle_mask[0]);
else if (idle_mask[1]) else if (idle_mask[1])
dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n", dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",
message, idle_mask[1], idle_mask[0]); dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);
else else
dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]); dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,
idle_mask[0]);
} }
static void hpriv_release(struct kref *ref) static void hpriv_release(struct kref *ref)
@ -545,7 +551,8 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
return kref_put(&hpriv->refcount, hpriv_release); return kref_put(&hpriv->refcount, hpriv_release);
} }
static void print_device_in_use_info(struct hl_device *hdev, const char *message) static void print_device_in_use_info(struct hl_device *hdev,
struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)
{ {
u32 active_cs_num, dmabuf_export_cnt; u32 active_cs_num, dmabuf_export_cnt;
bool unknown_reason = true; bool unknown_reason = true;
@ -569,6 +576,12 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message
dmabuf_export_cnt); dmabuf_export_cnt);
} }
if (mm_fini_stats->n_busy_cb) {
unknown_reason = false;
offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",
mm_fini_stats->n_busy_cb);
}
if (unknown_reason) if (unknown_reason)
scnprintf(buf + offset, size - offset, " [unknown reason]"); scnprintf(buf + offset, size - offset, " [unknown reason]");
@ -586,6 +599,7 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
{ {
struct hl_fpriv *hpriv = file_priv->driver_priv; struct hl_fpriv *hpriv = file_priv->driver_priv;
struct hl_device *hdev = to_hl_device(ddev); struct hl_device *hdev = to_hl_device(ddev);
struct hl_mem_mgr_fini_stats mm_fini_stats;
if (!hdev) { if (!hdev) {
pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n"); pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
@ -597,12 +611,13 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
/* Memory buffers might be still in use at this point and thus the handles IDR destruction /* Memory buffers might be still in use at this point and thus the handles IDR destruction
* is postponed to hpriv_release(). * is postponed to hpriv_release().
*/ */
hl_mem_mgr_fini(&hpriv->mem_mgr); hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);
hdev->compute_ctx_in_release = 1; hdev->compute_ctx_in_release = 1;
if (!hl_hpriv_put(hpriv)) { if (!hl_hpriv_put(hpriv)) {
print_device_in_use_info(hdev, "User process closed FD but device still in use"); print_device_in_use_info(hdev, &mm_fini_stats,
"User process closed FD but device still in use");
hl_device_reset(hdev, HL_DRV_RESET_HARD); hl_device_reset(hdev, HL_DRV_RESET_HARD);
} }
@ -858,6 +873,10 @@ static int device_early_init(struct hl_device *hdev)
gaudi2_set_asic_funcs(hdev); gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name)); strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
break; break;
case ASIC_GAUDI2D:
gaudi2_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));
break;
default: default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n", dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type); hdev->asic_type);
@ -946,6 +965,8 @@ static int device_early_init(struct hl_device *hdev)
goto free_cb_mgr; goto free_cb_mgr;
} }
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending); INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
hdev->device_reset_work.hdev = hdev; hdev->device_reset_work.hdev = hdev;
hdev->device_fini_pending = 0; hdev->device_fini_pending = 0;
@ -968,7 +989,7 @@ static int device_early_init(struct hl_device *hdev)
return 0; return 0;
free_cb_mgr: free_cb_mgr:
hl_mem_mgr_fini(&hdev->kernel_mem_mgr); hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
free_chip_info: free_chip_info:
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
@ -1012,7 +1033,7 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->clk_throttling.lock); mutex_destroy(&hdev->clk_throttling.lock);
hl_mem_mgr_fini(&hdev->kernel_mem_mgr); hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr); hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
@ -1045,21 +1066,55 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device); return (device_id == hdev->pdev->device);
} }
static int hl_device_eq_heartbeat_check(struct hl_device *hdev) static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
bool is_pq_hb)
{ {
time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
: hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
struct tm tm;
if (!seconds)
return;
time64_to_tm(seconds, 0, &tm);
snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
}
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop; struct asic_fixed_properties *prop = &hdev->asic_prop;
char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
if (!prop->cpucp_info.eq_health_check_supported) if (!prop->cpucp_info.eq_health_check_supported)
return 0; return true;
if (hdev->eq_heartbeat_received) { if (!hdev->eq_heartbeat_received) {
hdev->eq_heartbeat_received = false;
} else {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n"); dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
return -EIO;
stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
dev_err(hdev->dev,
"EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
hdev->event_queue.ci,
heartbeat_debug_info->heartbeat_event_counter,
eq_time_str,
hdev->kernel_queues[cpu_q_id].pi,
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
pq_time_str);
hl_eq_dump(hdev, &hdev->event_queue);
return false;
} }
return 0; hdev->eq_heartbeat_received = false;
return true;
} }
static void hl_device_heartbeat(struct work_struct *work) static void hl_device_heartbeat(struct work_struct *work)
@ -1078,7 +1133,7 @@ static void hl_device_heartbeat(struct work_struct *work)
* in order to validate the eq is working. * in order to validate the eq is working.
* Only if both the EQ is healthy and we managed to send the next heartbeat reschedule. * Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
*/ */
if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev))) if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))
goto reschedule; goto reschedule;
if (hl_device_operational(hdev, NULL)) if (hl_device_operational(hdev, NULL))
@ -1132,21 +1187,6 @@ static int device_late_init(struct hl_device *hdev)
} }
hdev->high_pll = hdev->asic_prop.high_pll; hdev->high_pll = hdev->asic_prop.high_pll;
if (hdev->heartbeat) {
/*
* Before scheduling the heartbeat driver will check if eq event has received.
* for the first schedule we need to set the indication as true then for the next
* one this indication will be true only if eq event was sent by FW.
*/
hdev->eq_heartbeat_received = true;
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
hdev->late_init_done = true; hdev->late_init_done = true;
return 0; return 0;
@ -1163,9 +1203,6 @@ static void device_late_fini(struct hl_device *hdev)
if (!hdev->late_init_done) if (!hdev->late_init_done)
return; return;
if (hdev->heartbeat)
cancel_delayed_work_sync(&hdev->work_heartbeat);
if (hdev->asic_funcs->late_fini) if (hdev->asic_funcs->late_fini)
hdev->asic_funcs->late_fini(hdev); hdev->asic_funcs->late_fini(hdev);
@ -1266,8 +1303,12 @@ static void hl_abort_waiting_for_completions(struct hl_device *hdev)
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset, static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
bool skip_wq_flush) bool skip_wq_flush)
{ {
if (hard_reset) if (hard_reset) {
if (hdev->heartbeat)
cancel_delayed_work_sync(&hdev->work_heartbeat);
device_late_fini(hdev); device_late_fini(hdev);
}
/* /*
* Halt the engines and disable interrupts so we won't get any more * Halt the engines and disable interrupts so we won't get any more
@ -1495,15 +1536,14 @@ static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
* of heartbeat, the device CPU is marked as disable * of heartbeat, the device CPU is marked as disable
* so this message won't be sent * so this message won't be sent
*/ */
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) { if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
return; return;
}
/* verify that last EQs are handled before disabled is set */ /* disable_irq also generates sync irq, this verifies that last EQs are handled
* before disabled is set. The IRQ will be enabled again in request_irq call.
*/
if (hdev->cpu_queues_enable) if (hdev->cpu_queues_enable)
synchronize_irq(pci_irq_vector(hdev->pdev, disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));
hdev->asic_prop.eq_interrupt_id));
} }
} }
@ -1547,6 +1587,31 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
} }
} }
static void reset_heartbeat_debug_info(struct hl_device *hdev)
{
hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;
hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;
hdev->heartbeat_debug_info.heartbeat_event_counter = 0;
}
static inline void device_heartbeat_schedule(struct hl_device *hdev)
{
if (!hdev->heartbeat)
return;
reset_heartbeat_debug_info(hdev);
/*
* Before scheduling the heartbeat driver will check if eq event has received.
* for the first schedule we need to set the indication as true then for the next
* one this indication will be true only if eq event was sent by FW.
*/
hdev->eq_heartbeat_received = true;
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
/* /*
* hl_device_reset - reset the device * hl_device_reset - reset the device
* *
@ -1916,6 +1981,8 @@ kill_processes:
if (hard_reset) { if (hard_reset) {
hdev->reset_info.hard_reset_cnt++; hdev->reset_info.hard_reset_cnt++;
device_heartbeat_schedule(hdev);
/* After reset is done, we are ready to receive events from /* After reset is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events * the F/W. We can't do it before because we will ignore events
* and if those events are fatal, we won't know about it and * and if those events are fatal, we won't know about it and
@ -2350,6 +2417,12 @@ int hl_device_init(struct hl_device *hdev)
goto out_disabled; goto out_disabled;
} }
/* Scheduling the EQ heartbeat thread must come after driver is done with all
* initializations, as we want to make sure the FW gets enough time to be prepared
* to respond to heartbeat packets.
*/
device_heartbeat_schedule(hdev);
dev_notice(hdev->dev, dev_notice(hdev->dev,
"Successfully added device %s to habanalabs driver\n", "Successfully added device %s to habanalabs driver\n",
dev_name(&(hdev)->pdev->dev)); dev_name(&(hdev)->pdev->dev));
@ -2592,7 +2665,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
u32 val = readl(hdev->rmmio + reg); u32 val = readl(hdev->rmmio + reg);
if (unlikely(trace_habanalabs_rreg32_enabled())) if (unlikely(trace_habanalabs_rreg32_enabled()))
trace_habanalabs_rreg32(hdev->dev, reg, val); trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);
return val; return val;
} }
@ -2610,7 +2683,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{ {
if (unlikely(trace_habanalabs_wreg32_enabled())) if (unlikely(trace_habanalabs_wreg32_enabled()))
trace_habanalabs_wreg32(hdev->dev, reg, val); trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);
writel(val, hdev->rmmio + reg); writel(val, hdev->rmmio + reg);
} }
@ -2836,3 +2909,56 @@ void hl_set_irq_affinity(struct hl_device *hdev, int irq)
if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask)) if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq); dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
} }
void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
{
hdev->heartbeat_debug_info.heartbeat_event_counter++;
hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();
hdev->eq_heartbeat_received = true;
}
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;
ktime_t zero_time = ktime_set(0, 0);
mutex_lock(&clk_throttle->lock);
switch (event_type) {
case EQ_EVENT_POWER_EVT_START:
clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;
clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
break;
case EQ_EVENT_POWER_EVT_END:
clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
break;
case EQ_EVENT_THERMAL_EVT_START:
clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;
clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
break;
case EQ_EVENT_THERMAL_EVT_END:
clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
break;
default:
dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
break;
}
mutex_unlock(&clk_throttle->lock);
}

View File

@ -8,6 +8,7 @@
#include "habanalabs.h" #include "habanalabs.h"
#include <linux/habanalabs/hl_boot_if.h> #include <linux/habanalabs/hl_boot_if.h>
#include <linux/pci.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -40,6 +41,31 @@ static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR), [COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
}; };
/**
* hl_fw_version_cmp() - compares the FW version to a specific version
*
* @hdev: pointer to hl_device structure
* @major: major number of a reference version
* @minor: minor number of a reference version
* @subminor: sub-minor number of a reference version
*
* Return 1 if FW version greater than the reference version, -1 if it's
* smaller and 0 if versions are identical.
*/
int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor)
{
if (hdev->fw_sw_major_ver != major)
return (hdev->fw_sw_major_ver > major) ? 1 : -1;
if (hdev->fw_sw_minor_ver != minor)
return (hdev->fw_sw_minor_ver > minor) ? 1 : -1;
if (hdev->fw_sw_sub_minor_ver != subminor)
return (hdev->fw_sw_sub_minor_ver > subminor) ? 1 : -1;
return 0;
}
static char *extract_fw_ver_from_str(const char *fw_str) static char *extract_fw_ver_from_str(const char *fw_str)
{ {
char *str, *fw_ver, *whitespace; char *str, *fw_ver, *whitespace;
@ -345,43 +371,63 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value) int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
{ {
struct cpucp_packet pkt = {}; struct cpucp_packet pkt = {};
int rc;
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT); pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(value); pkt.value = cpu_to_le64(value);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to disable FW's PCI access\n");
return rc;
} }
/**
* hl_fw_send_cpu_message() - send CPU message to the device.
*
* @hdev: pointer to hl_device structure.
* @hw_queue_id: HW queue ID
* @msg: raw data of the message/packet
* @size: size of @msg in bytes
* @timeout_us: timeout in usec to wait for CPU reply on the message
* @result: return code reported by FW
*
* send message to the device CPU.
*
* Return: 0 on success, non-zero for failure.
* -ENOMEM: memory allocation failure
* -EAGAIN: CPU is disabled (try again when enabled)
* -ETIMEDOUT: timeout waiting for FW response
* -EIO: protocol error
*/
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result) u16 size, u32 timeout_us, u64 *result)
{ {
struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id]; struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
struct asic_fixed_properties *prop = &hdev->asic_prop; struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 tmp, expected_ack_val, pi, opcode;
struct cpucp_packet *pkt; struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr; dma_addr_t pkt_dma_addr;
struct hl_bd *sent_bd; struct hl_bd *sent_bd;
u32 tmp, expected_ack_val, pi, opcode; int rc = 0, fw_rc;
int rc;
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr); pkt = hl_cpu_accessible_dma_pool_alloc(hdev, size, &pkt_dma_addr);
if (!pkt) { if (!pkt) {
dev_err(hdev->dev, dev_err(hdev->dev, "Failed to allocate DMA memory for packet to CPU\n");
"Failed to allocate DMA memory for packet to CPU\n");
return -ENOMEM; return -ENOMEM;
} }
memcpy(pkt, msg, len); memcpy(pkt, msg, size);
mutex_lock(&hdev->send_cpu_message_lock); mutex_lock(&hdev->send_cpu_message_lock);
/* CPU-CP messages can be sent during soft-reset */ /* CPU-CP messages can be sent during soft-reset */
if (hdev->disabled && !hdev->reset_info.in_compute_reset) { if (hdev->disabled && !hdev->reset_info.in_compute_reset)
rc = 0;
goto out; goto out;
}
if (hdev->device_cpu_disabled) { if (hdev->device_cpu_disabled) {
rc = -EIO; rc = -EAGAIN;
goto out; goto out;
} }
@ -397,7 +443,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
* Which means that we don't need to lock the access to the entire H/W * Which means that we don't need to lock the access to the entire H/W
* queues module when submitting a JOB to the CPU queue. * queues module when submitting a JOB to the CPU queue.
*/ */
hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr); hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), size, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN) if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi; expected_ack_val = queue->pi;
@ -406,7 +452,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp, rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
(tmp == expected_ack_val), 1000, (tmp == expected_ack_val), 1000,
timeout, true); timeout_us, true);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
@ -414,19 +460,27 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* If FW performed reset just before sending it a packet, we will get a timeout. /* If FW performed reset just before sending it a packet, we will get a timeout.
* This is expected behavior, hence no need for error message. * This is expected behavior, hence no need for error message.
*/ */
if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset) if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset) {
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n", dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
tmp); tmp);
else } else {
dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp); struct hl_bd *bd = queue->kernel_address;
bd += hl_pi_2_offset(pi);
dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n"
"Pkt info[%u]: dma_addr: 0x%llx, kernel_addr: %p, len:0x%x, ctl: 0x%x, ptr:0x%llx, dram_bd:%u\n",
tmp, pi, pkt_dma_addr, (void *)pkt, bd->len, bd->ctl, bd->ptr,
queue->dram_bd);
}
hdev->device_cpu_disabled = true; hdev->device_cpu_disabled = true;
goto out; goto out;
} }
tmp = le32_to_cpu(pkt->ctl); tmp = le32_to_cpu(pkt->ctl);
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT; fw_rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) { if (fw_rc) {
opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT; opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
if (!prop->supports_advanced_cpucp_rc) { if (!prop->supports_advanced_cpucp_rc) {
@ -435,7 +489,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto scrub_descriptor; goto scrub_descriptor;
} }
switch (rc) { switch (fw_rc) {
case cpucp_packet_invalid: case cpucp_packet_invalid:
dev_err(hdev->dev, dev_err(hdev->dev,
"CPU packet %d is not supported by F/W\n", opcode); "CPU packet %d is not supported by F/W\n", opcode);
@ -460,7 +514,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* propagate the return code from the f/w to the callers who want to check it */ /* propagate the return code from the f/w to the callers who want to check it */
if (result) if (result)
*result = rc; *result = fw_rc;
rc = -EIO; rc = -EIO;
@ -480,7 +534,7 @@ scrub_descriptor:
out: out:
mutex_unlock(&hdev->send_cpu_message_lock); mutex_unlock(&hdev->send_cpu_message_lock);
hl_cpu_accessible_dma_pool_free(hdev, len, pkt); hl_cpu_accessible_dma_pool_free(hdev, size, pkt);
return rc; return rc;
} }
@ -550,7 +604,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
int hl_fw_test_cpu_queue(struct hl_device *hdev) int hl_fw_test_cpu_queue(struct hl_device *hdev)
{ {
struct cpucp_packet test_pkt = {}; struct cpucp_packet test_pkt = {};
u64 result; u64 result = 0;
int rc; int rc;
test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
@ -623,16 +677,14 @@ int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
int hl_fw_send_heartbeat(struct hl_device *hdev) int hl_fw_send_heartbeat(struct hl_device *hdev)
{ {
struct cpucp_packet hb_pkt; struct cpucp_packet hb_pkt;
u64 result; u64 result = 0;
int rc; int rc;
memset(&hb_pkt, 0, sizeof(hb_pkt)); memset(&hb_pkt, 0, sizeof(hb_pkt));
hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << CPUCP_PKT_CTL_OPCODE_SHIFT);
CPUCP_PKT_CTL_OPCODE_SHIFT);
hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL); hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, sizeof(hb_pkt), 0, &result);
sizeof(hb_pkt), 0, &result);
if ((rc) || (result != CPUCP_PACKET_FENCE_VAL)) if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
return -EIO; return -EIO;
@ -643,6 +695,8 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
rc = -EIO; rc = -EIO;
} }
hdev->heartbeat_debug_info.last_pq_heartbeat_ts = ktime_get_real_seconds();
return rc; return rc;
} }
@ -885,7 +939,7 @@ static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
{ {
struct cpucp_array_data_packet *pkt; struct cpucp_array_data_packet *pkt;
size_t total_pkt_size, data_size; size_t total_pkt_size, data_size;
u64 result; u64 result = 0;
int rc; int rc;
/* skip sending this info for unsupported ASICs */ /* skip sending this info for unsupported ASICs */
@ -976,11 +1030,10 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_EEPROM_TIMEOUT_USEC, &result); HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP EEPROM packet, error %d\n", dev_err(hdev->dev,
rc); "Failed to handle CPU-CP EEPROM packet, error %d\n", rc);
goto out; goto out;
} }
@ -1021,7 +1074,9 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result); HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc); if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
goto out; goto out;
} }
@ -1055,8 +1110,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc); dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc; return rc;
} }
counters->rx_throughput = result; counters->rx_throughput = result;
@ -1070,8 +1126,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc); dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc; return rc;
} }
counters->tx_throughput = result; counters->tx_throughput = result;
@ -1084,8 +1141,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc); dev_err(hdev->dev,
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
return rc; return rc;
} }
counters->replay_cnt = (u32) result; counters->replay_cnt = (u32) result;
@ -1105,9 +1163,9 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CpuCP total energy pkt, error %d\n", dev_err(hdev->dev,
rc); "Failed to handle CpuCP total energy pkt, error %d\n", rc);
return rc; return rc;
} }
@ -1183,7 +1241,8 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc); if (rc != -EAGAIN)
dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
return rc; return rc;
} }
@ -1210,7 +1269,8 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to read power, error %d\n", rc); if (rc != -EAGAIN)
dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
return rc; return rc;
} }
@ -1247,8 +1307,9 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc); dev_err(hdev->dev,
"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
goto out; goto out;
} }
@ -1273,7 +1334,8 @@ int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
dev_err(hdev->dev,
"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc); "Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
goto out; goto out;
} }
@ -1428,7 +1490,7 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{ {
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
u32 status = 0, timeout; u32 status = 0, timeout;
int rc, tries = 1; int rc, tries = 1, fw_err = 0;
bool preboot_still_runs; bool preboot_still_runs;
/* Need to check two possible scenarios: /* Need to check two possible scenarios:
@ -1468,18 +1530,18 @@ retry:
} }
} }
if (rc) { /* If we read all FF, then something is totally wrong, no point
* of reading specific errors
*/
if (status != -1)
fw_err = fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
pre_fw_load->boot_err1_reg,
pre_fw_load->sts_boot_dev_sts0_reg,
pre_fw_load->sts_boot_dev_sts1_reg);
if (rc || fw_err) {
detect_cpu_boot_status(hdev, status); detect_cpu_boot_status(hdev, status);
dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status); dev_err(hdev->dev, "CPU boot %s (status = %d)\n",
fw_err ? "failed due to an error" : "ready timeout", status);
/* If we read all FF, then something is totally wrong, no point
* of reading specific errors
*/
if (status != -1)
fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
pre_fw_load->boot_err1_reg,
pre_fw_load->sts_boot_dev_sts0_reg,
pre_fw_load->sts_boot_dev_sts1_reg);
return -EIO; return -EIO;
} }
@ -1750,7 +1812,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd); val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size); val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]); trace_habanalabs_comms_send_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val); WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
} }
@ -1808,7 +1870,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs; dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]); trace_habanalabs_comms_wait_status(&hdev->pdev->dev, comms_sts_str_arr[expected_status]);
/* Wait for expected status */ /* Wait for expected status */
rc = hl_poll_timeout( rc = hl_poll_timeout(
@ -1825,7 +1887,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
return -EIO; return -EIO;
} }
trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]); trace_habanalabs_comms_wait_status_done(&hdev->pdev->dev,
comms_sts_str_arr[expected_status]);
/* /*
* skip storing FW response for NOOP to preserve the actual desired * skip storing FW response for NOOP to preserve the actual desired
@ -1899,7 +1962,7 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
{ {
int rc; int rc;
trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]); trace_habanalabs_comms_protocol_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
/* first send clear command to clean former commands */ /* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader); rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
@ -2038,7 +2101,7 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
* note that no alignment/stride address issues here as all structures * note that no alignment/stride address issues here as all structures
* are 64 bit padded. * are 64 bit padded.
*/ */
data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header); data_ptr = (u8 *)fw_desc + sizeof(struct comms_msg_header);
data_size = le16_to_cpu(fw_desc->header.size); data_size = le16_to_cpu(fw_desc->header.size);
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size); data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
@ -2192,11 +2255,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc)); memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
fw_data_size = le16_to_cpu(fw_desc->header.size); fw_data_size = le16_to_cpu(fw_desc->header.size);
temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size); temp_fw_desc = vzalloc(sizeof(struct comms_msg_header) + fw_data_size);
if (!temp_fw_desc) if (!temp_fw_desc)
return -ENOMEM; return -ENOMEM;
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size); memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_msg_header) + fw_data_size);
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader, rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc); (struct lkd_fw_comms_desc *) temp_fw_desc);
@ -3122,10 +3185,10 @@ long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32((u32)used_pll_idx); pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n", if (rc != -EAGAIN)
used_pll_idx, rc); dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
used_pll_idx, rc);
return rc; return rc;
} }
@ -3149,8 +3212,7 @@ void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq); pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n", dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
used_pll_idx, rc); used_pll_idx, rc);
} }
@ -3166,9 +3228,9 @@ long hl_fw_get_max_power(struct hl_device *hdev)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
if (rc) { if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc); if (rc != -EAGAIN)
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
return rc; return rc;
} }
@ -3190,8 +3252,7 @@ void hl_fw_set_max_power(struct hl_device *hdev)
pkt.value = cpu_to_le64(hdev->max_power); pkt.value = cpu_to_le64(hdev->max_power);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc); dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
} }
@ -3217,11 +3278,11 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
pkt.data_max_size = cpu_to_le32(size); pkt.data_max_size = cpu_to_le32(size);
pkt.nonce = cpu_to_le32(nonce); pkt.nonce = cpu_to_le32(nonce);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), timeout, NULL);
timeout, NULL);
if (rc) { if (rc) {
dev_err(hdev->dev, if (rc != -EAGAIN)
"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc); dev_err(hdev->dev,
"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
goto out; goto out;
} }
@ -3263,10 +3324,12 @@ int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result); HL_CPUCP_INFO_TIMEOUT_USEC, &result);
if (rc) if (rc) {
dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n"); if (rc != -EAGAIN)
else dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
} else {
dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result); dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
}
*size = (u32)result; *size = (u32)result;

View File

@ -71,7 +71,7 @@ struct hl_fpriv;
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */ #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */ #define HL_HEARTBEAT_PER_USEC 10000000 /* 10 s */
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */ #define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
@ -651,6 +651,8 @@ struct hl_hints_range {
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is * @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
* not supported. * not supported.
* @reserved_fw_mem_size: size of dram memory reserved for FW. * @reserved_fw_mem_size: size of dram memory reserved for FW.
* @fw_event_queue_size: queue size for events from CPU-CP.
* A value of 0 means using the default HL_EQ_SIZE_IN_BYTES value.
* @collective_first_sob: first sync object available for collective use * @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use * @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use * @sync_stream_first_sob: first sync object available for sync stream use
@ -782,6 +784,7 @@ struct asic_fixed_properties {
u32 glbl_err_max_cause_num; u32 glbl_err_max_cause_num;
u32 hbw_flush_reg; u32 hbw_flush_reg;
u32 reserved_fw_mem_size; u32 reserved_fw_mem_size;
u32 fw_event_queue_size;
u16 collective_first_sob; u16 collective_first_sob;
u16 collective_first_mon; u16 collective_first_mon;
u16 sync_stream_first_sob; u16 sync_stream_first_sob;
@ -901,6 +904,18 @@ struct hl_mem_mgr {
struct idr handles; struct idr handles;
}; };
/**
* struct hl_mem_mgr_fini_stats - describes statistics returned during memory manager teardown.
* @n_busy_cb: the amount of CB handles that could not be removed
* @n_busy_ts: the amount of TS handles that could not be removed
* @n_busy_other: the amount of any other type of handles that could not be removed
*/
struct hl_mem_mgr_fini_stats {
u32 n_busy_cb;
u32 n_busy_ts;
u32 n_busy_other;
};
/** /**
* struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
* @topic: string identifier used for logging * @topic: string identifier used for logging
@ -1229,6 +1244,7 @@ struct hl_user_pending_interrupt {
* @hdev: pointer to the device structure * @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address * @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address * @bus_address: holds the queue's DMA address
* @size: the event queue size
* @ci: ci inside the queue * @ci: ci inside the queue
* @prev_eqe_index: the index of the previous event queue entry. The index of * @prev_eqe_index: the index of the previous event queue entry. The index of
* the current entry's index must be +1 of the previous one. * the current entry's index must be +1 of the previous one.
@ -1240,6 +1256,7 @@ struct hl_eq {
struct hl_device *hdev; struct hl_device *hdev;
void *kernel_address; void *kernel_address;
dma_addr_t bus_address; dma_addr_t bus_address;
u32 size;
u32 ci; u32 ci;
u32 prev_eqe_index; u32 prev_eqe_index;
bool check_eqe_index; bool check_eqe_index;
@ -1268,15 +1285,18 @@ struct hl_dec {
* @ASIC_GAUDI2: Gaudi2 device. * @ASIC_GAUDI2: Gaudi2 device.
* @ASIC_GAUDI2B: Gaudi2B device. * @ASIC_GAUDI2B: Gaudi2B device.
* @ASIC_GAUDI2C: Gaudi2C device. * @ASIC_GAUDI2C: Gaudi2C device.
* @ASIC_GAUDI2D: Gaudi2D device.
*/ */
enum hl_asic_type { enum hl_asic_type {
ASIC_INVALID, ASIC_INVALID,
ASIC_GOYA, ASIC_GOYA,
ASIC_GAUDI, ASIC_GAUDI,
ASIC_GAUDI_SEC, ASIC_GAUDI_SEC,
ASIC_GAUDI2, ASIC_GAUDI2,
ASIC_GAUDI2B, ASIC_GAUDI2B,
ASIC_GAUDI2C, ASIC_GAUDI2C,
ASIC_GAUDI2D,
}; };
struct hl_cs_parser; struct hl_cs_parser;
@ -2709,11 +2729,16 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
* updated directly by the device. If false, the host memory being polled will * updated directly by the device. If false, the host memory being polled will
* be updated by host CPU. Required so host knows whether or not the memory * be updated by host CPU. Required so host knows whether or not the memory
* might need to be byte-swapped before returning value to caller. * might need to be byte-swapped before returning value to caller.
*
* On the first 4 polling iterations the macro goes to sleep for short period of
* time that gradually increases and reaches sleep_us on the fifth iteration.
*/ */
#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \ #define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
mem_written_by_device) \ mem_written_by_device) \
({ \ ({ \
u64 __sleep_step_us; \
ktime_t __timeout; \ ktime_t __timeout; \
u8 __step = 8; \
\ \
__timeout = ktime_add_us(ktime_get(), timeout_us); \ __timeout = ktime_add_us(ktime_get(), timeout_us); \
might_sleep_if(sleep_us); \ might_sleep_if(sleep_us); \
@ -2731,8 +2756,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(val) = le32_to_cpu(*(__le32 *) &(val)); \ (val) = le32_to_cpu(*(__le32 *) &(val)); \
break; \ break; \
} \ } \
if (sleep_us) \ __sleep_step_us = sleep_us >> __step; \
usleep_range((sleep_us >> 2) + 1, sleep_us); \ if (__sleep_step_us) \
usleep_range((__sleep_step_us >> 2) + 1, __sleep_step_us); \
__step >>= 1; \
} \ } \
(cond) ? 0 : -ETIMEDOUT; \ (cond) ? 0 : -ETIMEDOUT; \
}) })
@ -3174,6 +3201,21 @@ struct hl_reset_info {
u8 watchdog_active; u8 watchdog_active;
}; };
/**
* struct eq_heartbeat_debug_info - stores debug info to be used upon heartbeat failure.
* @last_pq_heartbeat_ts: timestamp of the last test packet that was sent to FW.
* This packet is the trigger in FW to send the EQ heartbeat event.
* @last_eq_heartbeat_ts: timestamp of the last EQ heartbeat event that was received from FW.
* @heartbeat_event_counter: number of heartbeat events received.
* @cpu_queue_id: used to read the queue pi/ci
*/
struct eq_heartbeat_debug_info {
time64_t last_pq_heartbeat_ts;
time64_t last_eq_heartbeat_ts;
u32 heartbeat_event_counter;
u32 cpu_queue_id;
};
/** /**
* struct hl_device - habanalabs device structure. * struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device. * @pdev: pointer to PCI device, can be NULL in case of simulator device.
@ -3262,6 +3304,7 @@ struct hl_reset_info {
* @clk_throttling: holds information about current/previous clock throttling events * @clk_throttling: holds information about current/previous clock throttling events
* @captured_err_info: holds information about errors. * @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information. * @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling. * @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams. * @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version. * @fw_inner_major_ver: the major of current loaded preboot inner version.
@ -3452,6 +3495,8 @@ struct hl_device {
struct hl_reset_info reset_info; struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
cpumask_t irq_affinity_mask; cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr; u32 *stream_master_qid_arr;
@ -3596,25 +3641,6 @@ struct hl_ioctl_desc {
hl_ioctl_t *func; hl_ioctl_t *func;
}; };
static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major, u32 fw_sw_minor)
{
if (hdev->fw_sw_major_ver < fw_sw_major)
return true;
if (hdev->fw_sw_major_ver > fw_sw_major)
return false;
if (hdev->fw_sw_minor_ver < fw_sw_minor)
return true;
return false;
}
static inline bool hl_is_fw_sw_ver_equal_or_greater(struct hl_device *hdev, u32 fw_sw_major,
u32 fw_sw_minor)
{
return (hdev->fw_sw_major_ver > fw_sw_major ||
(hdev->fw_sw_major_ver == fw_sw_major &&
hdev->fw_sw_minor_ver >= fw_sw_minor));
}
/* /*
* Kernel module functions that can be accessed by entire module * Kernel module functions that can be accessed by entire module
*/ */
@ -3740,6 +3766,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q); void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q); void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q); void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg); irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg); irqreturn_t hl_irq_handler_eq(int irq, void *arg);
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg); irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
@ -3919,6 +3946,7 @@ void hl_mmu_dr_flush(struct hl_ctx *ctx);
int hl_mmu_dr_init(struct hl_device *hdev); int hl_mmu_dr_init(struct hl_device *hdev);
void hl_mmu_dr_fini(struct hl_device *hdev); void hl_mmu_dr_fini(struct hl_device *hdev);
int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size); void __iomem *dst, u32 src_offset, u32 size);
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value); int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value);
@ -4033,7 +4061,7 @@ char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type); const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg); void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg); void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats);
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg); void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma, int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args); void *args);
@ -4059,6 +4087,8 @@ void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_coun
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info); void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
void hl_init_cpu_for_irq(struct hl_device *hdev); void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq); void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS

View File

@ -144,6 +144,9 @@ static enum hl_asic_type get_asic_type(struct hl_device *hdev)
case REV_ID_C: case REV_ID_C:
asic_type = ASIC_GAUDI2C; asic_type = ASIC_GAUDI2C;
break; break;
case REV_ID_D:
asic_type = ASIC_GAUDI2D;
break;
default: default:
break; break;
} }
@ -260,7 +263,7 @@ int hl_device_open(struct drm_device *ddev, struct drm_file *file_priv)
out_err: out_err:
mutex_unlock(&hdev->fpriv_list_lock); mutex_unlock(&hdev->fpriv_list_lock);
hl_mem_mgr_fini(&hpriv->mem_mgr); hl_mem_mgr_fini(&hpriv->mem_mgr, NULL);
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr); hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
mutex_destroy(&hpriv->ctx_lock); mutex_destroy(&hpriv->ctx_lock);

View File

@ -585,9 +585,10 @@ int hl_get_temperature(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get temperature from sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get temperature from sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }
@ -610,8 +611,7 @@ int hl_set_temperature(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL); 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Failed to set temperature of sensor %d, error %d\n", "Failed to set temperature of sensor %d, error %d\n",
sensor_index, rc); sensor_index, rc);
@ -639,9 +639,10 @@ int hl_get_voltage(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get voltage from sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get voltage from sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }
@ -668,9 +669,10 @@ int hl_get_current(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get current from sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get current from sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }
@ -697,9 +699,10 @@ int hl_get_fan_speed(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get fan speed from sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get fan speed from sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }
@ -726,9 +729,10 @@ int hl_get_pwm_info(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get pwm info from sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get pwm info from sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }
@ -751,8 +755,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL); 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Failed to set pwm info to sensor %d, error %d\n", "Failed to set pwm info to sensor %d, error %d\n",
sensor_index, rc); sensor_index, rc);
@ -774,8 +777,7 @@ int hl_set_voltage(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL); 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Failed to set voltage of sensor %d, error %d\n", "Failed to set voltage of sensor %d, error %d\n",
sensor_index, rc); sensor_index, rc);
@ -797,10 +799,8 @@ int hl_set_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr); pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value); pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
0, NULL); if (rc && rc != -EAGAIN)
if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Failed to set current of sensor %d, error %d\n", "Failed to set current of sensor %d, error %d\n",
sensor_index, rc); sensor_index, rc);
@ -830,8 +830,7 @@ int hl_set_power(struct hl_device *hdev,
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, NULL); 0, NULL);
if (rc && rc != -EAGAIN)
if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"Failed to set power of sensor %d, error %d\n", "Failed to set power of sensor %d, error %d\n",
sensor_index, rc); sensor_index, rc);
@ -859,9 +858,10 @@ int hl_get_power(struct hl_device *hdev,
*value = (long) result; *value = (long) result;
if (rc) { if (rc) {
dev_err_ratelimited(hdev->dev, if (rc != -EAGAIN)
"Failed to get power of sensor %d, error %d\n", dev_err_ratelimited(hdev->dev,
sensor_index, rc); "Failed to get power of sensor %d, error %d\n",
sensor_index, rc);
*value = 0; *value = 0;
} }

View File

@ -652,14 +652,16 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
*/ */
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q) int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{ {
u32 size = hdev->asic_prop.fw_event_queue_size ? : HL_EQ_SIZE_IN_BYTES;
void *p; void *p;
p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address); p = hl_cpu_accessible_dma_pool_alloc(hdev, size, &q->bus_address);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
q->hdev = hdev; q->hdev = hdev;
q->kernel_address = p; q->kernel_address = p;
q->size = size;
q->ci = 0; q->ci = 0;
q->prev_eqe_index = 0; q->prev_eqe_index = 0;
@ -678,7 +680,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{ {
flush_workqueue(hdev->eq_wq); flush_workqueue(hdev->eq_wq);
hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address); hl_cpu_accessible_dma_pool_free(hdev, q->size, q->kernel_address);
} }
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q) void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
@ -693,5 +695,30 @@ void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
* when the device is operational again * when the device is operational again
*/ */
memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES); memset(q->kernel_address, 0, q->size);
}
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q)
{
u32 eq_length, eqe_size, ctl, ready, mode, type, index;
struct hl_eq_header *hdr;
u8 *ptr;
int i;
eq_length = HL_EQ_LENGTH;
eqe_size = q->size / HL_EQ_LENGTH;
dev_info(hdev->dev, "Contents of EQ entries headers:\n");
for (i = 0, ptr = q->kernel_address ; i < eq_length ; ++i, ptr += eqe_size) {
hdr = (struct hl_eq_header *) ptr;
ctl = le32_to_cpu(hdr->ctl);
ready = FIELD_GET(EQ_CTL_READY_MASK, ctl);
mode = FIELD_GET(EQ_CTL_EVENT_MODE_MASK, ctl);
type = FIELD_GET(EQ_CTL_EVENT_TYPE_MASK, ctl);
index = FIELD_GET(EQ_CTL_INDEX_MASK, ctl);
dev_info(hdev->dev, "%02u: %#010x [ready: %u, mode %u, type %04u, index %05u]\n",
i, ctl, ready, mode, type, index);
}
} }

View File

@ -318,28 +318,61 @@ void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
idr_init(&mmg->handles); idr_init(&mmg->handles);
} }
static void hl_mem_mgr_fini_stats_reset(struct hl_mem_mgr_fini_stats *stats)
{
if (!stats)
return;
memset(stats, 0, sizeof(*stats));
}
static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)
{
if (!stats)
return;
switch (mem_id) {
case HL_MMAP_TYPE_CB:
++stats->n_busy_cb;
break;
case HL_MMAP_TYPE_TS_BUFF:
++stats->n_busy_ts;
break;
default:
/* we currently store only CB/TS so this shouldn't happen */
++stats->n_busy_other;
}
}
/** /**
* hl_mem_mgr_fini - release unified memory manager * hl_mem_mgr_fini - release unified memory manager
* *
* @mmg: parent unified memory manager * @mmg: parent unified memory manager
* @stats: if non-NULL, will return some counters for handles that could not be removed.
* *
* Release the unified memory manager. Shall be called from an interrupt context. * Release the unified memory manager. Shall be called from an interrupt context.
*/ */
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg) void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats)
{ {
struct hl_mmap_mem_buf *buf; struct hl_mmap_mem_buf *buf;
struct idr *idp; struct idr *idp;
const char *topic; const char *topic;
u64 mem_id;
u32 id; u32 id;
hl_mem_mgr_fini_stats_reset(stats);
idp = &mmg->handles; idp = &mmg->handles;
idr_for_each_entry(idp, buf, id) { idr_for_each_entry(idp, buf, id) {
topic = buf->behavior->topic; topic = buf->behavior->topic;
if (hl_mmap_mem_buf_put(buf) != 1) mem_id = buf->behavior->mem_id;
if (hl_mmap_mem_buf_put(buf) != 1) {
dev_err(mmg->dev, dev_err(mmg->dev,
"%s: Buff handle %u for CTX is still alive\n", "%s: Buff handle %u for CTX is still alive\n",
topic, id); topic, id);
hl_mem_mgr_fini_stats_inc(mem_id, stats);
}
} }
} }

View File

@ -6,6 +6,7 @@
*/ */
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h>
#include "../habanalabs.h" #include "../habanalabs.h"
@ -262,7 +263,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
mmu_funcs->flush(ctx); mmu_funcs->flush(ctx);
if (trace_habanalabs_mmu_unmap_enabled() && !rc) if (trace_habanalabs_mmu_unmap_enabled() && !rc)
trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte); trace_habanalabs_mmu_unmap(&hdev->pdev->dev, virt_addr, 0, page_size, flush_pte);
return rc; return rc;
} }
@ -349,7 +350,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
if (flush_pte) if (flush_pte)
mmu_funcs->flush(ctx); mmu_funcs->flush(ctx);
trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte); trace_habanalabs_mmu_map(&hdev->pdev->dev, virt_addr, phys_addr, page_size, flush_pte);
return 0; return 0;
@ -599,6 +600,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI2: case ASIC_GAUDI2:
case ASIC_GAUDI2B: case ASIC_GAUDI2B:
case ASIC_GAUDI2C: case ASIC_GAUDI2C:
case ASIC_GAUDI2D:
hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]); hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
if (prop->pmmu.host_resident) if (prop->pmmu.host_resident)
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]); hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
@ -644,7 +646,8 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
if (rc) if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"%s cache invalidation failed, rc=%d\n", "%s: %s cache invalidation failed, rc=%d\n",
dev_name(&hdev->pdev->dev),
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc); flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
return rc; return rc;
@ -659,8 +662,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
asid, va, size); asid, va, size);
if (rc) if (rc)
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d", "%s: %s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc); dev_name(&hdev->pdev->dev), flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU",
va, size, rc);
return rc; return rc;
} }

View File

@ -123,7 +123,7 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
if (unlikely(trace_habanalabs_elbi_read_enabled())) if (unlikely(trace_habanalabs_elbi_read_enabled()))
trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val); trace_habanalabs_elbi_read(&hdev->pdev->dev, (u32) addr, val);
return 0; return 0;
} }
@ -186,7 +186,7 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) { if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
if (unlikely(trace_habanalabs_elbi_write_enabled())) if (unlikely(trace_habanalabs_elbi_write_enabled()))
trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val); trace_habanalabs_elbi_write(&hdev->pdev->dev, (u32) addr, val);
return 0; return 0;
} }

View File

@ -142,8 +142,9 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
{ {
struct hl_device *hdev = dev_get_drvdata(dev); struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n", return sprintf(buf, "0x%08x%08x\n",
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version)); le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_timestamp),
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
} }
static ssize_t cpucp_kernel_ver_show(struct device *dev, static ssize_t cpucp_kernel_ver_show(struct device *dev,
@ -270,6 +271,9 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI2C: case ASIC_GAUDI2C:
str = "GAUDI2C"; str = "GAUDI2C";
break; break;
case ASIC_GAUDI2D:
str = "GAUDI2D";
break;
default: default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n", dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type); hdev->asic_type);

View File

@ -1639,10 +1639,8 @@ static int gaudi_late_init(struct hl_device *hdev)
} }
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0); rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) { if (rc)
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc; return rc;
}
/* Scrub both SRAM and DRAM */ /* Scrub both SRAM and DRAM */
rc = hdev->asic_funcs->scrub_device_mem(hdev); rc = hdev->asic_funcs->scrub_device_mem(hdev);
@ -4154,13 +4152,7 @@ skip_reset:
static int gaudi_suspend(struct hl_device *hdev) static int gaudi_suspend(struct hl_device *hdev)
{ {
int rc; return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
} }
static int gaudi_resume(struct hl_device *hdev) static int gaudi_resume(struct hl_device *hdev)

View File

@ -2601,6 +2601,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0; prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
prop->supports_advanced_cpucp_rc = true;
return 0; return 0;
free_qprops: free_qprops:
@ -3308,14 +3310,10 @@ static int gaudi2_late_init(struct hl_device *hdev)
struct gaudi2_device *gaudi2 = hdev->asic_specific; struct gaudi2_device *gaudi2 = hdev->asic_specific;
int rc; int rc;
hdev->asic_prop.supports_advanced_cpucp_rc = true;
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
gaudi2->virt_msix_db_dma_addr); gaudi2->virt_msix_db_dma_addr);
if (rc) { if (rc)
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
return rc; return rc;
}
rc = gaudi2_fetch_psoc_frequency(hdev); rc = gaudi2_fetch_psoc_frequency(hdev);
if (rc) { if (rc) {
@ -3783,7 +3781,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
prop->supports_compute_reset = true; prop->supports_compute_reset = true;
/* Event queue sanity check added in FW version 1.11 */ /* Event queue sanity check added in FW version 1.11 */
if (hl_is_fw_sw_ver_below(hdev, 1, 11)) if (hl_fw_version_cmp(hdev, 1, 11, 0) < 0)
hdev->event_queue.check_eqe_index = false; hdev->event_queue.check_eqe_index = false;
else else
hdev->event_queue.check_eqe_index = true; hdev->event_queue.check_eqe_index = true;
@ -3798,6 +3796,8 @@ static int gaudi2_sw_init(struct hl_device *hdev)
if (rc) if (rc)
goto special_blocks_free; goto special_blocks_free;
hdev->heartbeat_debug_info.cpu_queue_id = GAUDI2_QUEUE_ID_CPU_PQ;
return 0; return 0;
special_blocks_free: special_blocks_free:
@ -6314,26 +6314,6 @@ static void gaudi2_execute_hard_reset(struct hl_device *hdev)
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1); WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
} }
static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
{
int i, rc = 0;
u32 reg_val;
for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
rc = hl_poll_timeout(
hdev,
mmCPU_RST_STATUS_TO_HOST,
reg_val,
reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
1000,
poll_timeout_us);
if (rc)
dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
reg_val);
return rc;
}
/** /**
* gaudi2_execute_soft_reset - execute soft reset by driver/FW * gaudi2_execute_soft_reset - execute soft reset by driver/FW
* *
@ -6346,23 +6326,8 @@ static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_
static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset, static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
u32 poll_timeout_us) u32 poll_timeout_us)
{ {
int rc; if (!driver_performs_reset)
return hl_fw_send_soft_reset(hdev);
if (!driver_performs_reset) {
if (hl_is_fw_sw_ver_below(hdev, 1, 10)) {
/* set SP to indicate reset request sent to FW */
WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
WREG32(mmGIC_HOST_SOFT_RST_IRQ_POLL_REG,
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
/* wait for f/w response */
rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
} else {
rc = hl_fw_send_soft_reset(hdev);
}
return rc;
}
/* Block access to engines, QMANs and SM during reset, these /* Block access to engines, QMANs and SM during reset, these
* RRs will be reconfigured after soft reset. * RRs will be reconfigured after soft reset.
@ -6502,13 +6467,7 @@ skip_reset:
static int gaudi2_suspend(struct hl_device *hdev) static int gaudi2_suspend(struct hl_device *hdev)
{ {
int rc; return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
} }
static int gaudi2_resume(struct hl_device *hdev) static int gaudi2_resume(struct hl_device *hdev)
@ -7914,7 +7873,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
bool has_block_id = false; bool has_block_id = false;
u16 block_id; u16 block_id;
if (!hl_is_fw_sw_ver_below(hdev, 1, 12)) if (hl_fw_version_cmp(hdev, 1, 12, 0) >= 0)
has_block_id = true; has_block_id = true;
ecc_address = le64_to_cpu(ecc_data->ecc_address); ecc_address = le64_to_cpu(ecc_data->ecc_address);
@ -8165,13 +8124,7 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
} }
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx]; hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
if (hl_is_fw_sw_ver_below(hdev, 1, 9) &&
!hdev->asic_prop.fw_security_enabled &&
((module_idx == 0) || (module_idx == 1)))
lbw_rtr_id = DCORE0_RTR0;
else
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
break; break;
case RAZWI_MME: case RAZWI_MME:
sprintf(initiator_name, "MME_%u", module_idx); sprintf(initiator_name, "MME_%u", module_idx);
@ -9310,8 +9263,8 @@ static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type,
static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev, static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt) struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt)
{ {
bool require_hard_reset = false;
u32 addr, beat, beat_shift; u32 addr, beat, beat_shift;
bool rc = false;
dev_err_ratelimited(hdev->dev, dev_err_ratelimited(hdev->dev,
"READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n", "READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n",
@ -9343,7 +9296,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
beat, beat,
le32_to_cpu(rd_err_data->dbg_rd_err_dm), le32_to_cpu(rd_err_data->dbg_rd_err_dm),
le32_to_cpu(rd_err_data->dbg_rd_err_syndrome)); le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
rc |= true; require_hard_reset = true;
} }
beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT; beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT;
@ -9356,7 +9309,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
(le32_to_cpu(rd_err_data->dbg_rd_err_misc) & (le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
(HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >> (HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >>
(HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift)); (HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift));
rc |= true; require_hard_reset = true;
} }
dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat); dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat);
@ -9366,7 +9319,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1])); le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1]));
} }
return rc; return require_hard_reset;
} }
static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev, static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev,
@ -9824,11 +9777,6 @@ static u16 event_id_to_engine_id(struct hl_device *hdev, u16 event_type)
return U16_MAX; return U16_MAX;
} }
static void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
{
hdev->eq_heartbeat_received = true;
}
static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
{ {
struct gaudi2_device *gaudi2 = hdev->asic_specific; struct gaudi2_device *gaudi2 = hdev->asic_specific;
@ -10050,6 +9998,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) { if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true; reset_required = true;
is_critical = eq_entry->sei_data.hdr.is_critical;
} }
error_count++; error_count++;
break; break;
@ -10070,7 +10019,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data); error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
if (hl_is_fw_sw_ver_equal_or_greater(hdev, 1, 13)) if (hl_fw_version_cmp(hdev, 1, 13, 0) >= 0)
is_critical = true; is_critical = true;
break; break;
@ -10281,8 +10230,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2_print_event(hdev, event_type, true, gaudi2_print_event(hdev, event_type, true,
"No error cause for H/W event %u", event_type); "No error cause for H/W event %u", event_type);
if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) || if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) || reset_required) {
reset_required) {
if (reset_required || if (reset_required ||
(gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD)) (gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
reset_flags |= HL_DRV_RESET_HARD; reset_flags |= HL_DRV_RESET_HARD;

View File

@ -384,7 +384,7 @@ enum gaudi2_edma_id {
/* User interrupt count is aligned with HW CQ count. /* User interrupt count is aligned with HW CQ count.
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode * We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/ */
#define GAUDI2_NUM_USER_INTERRUPTS 255 #define GAUDI2_NUM_USER_INTERRUPTS 64
#define GAUDI2_NUM_RESERVED_INTERRUPTS 1 #define GAUDI2_NUM_RESERVED_INTERRUPTS 1
#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS) #define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
@ -416,11 +416,11 @@ enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1), GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
GAUDI2_IRQ_NUM_TPC_ASSERT, GAUDI2_IRQ_NUM_TPC_ASSERT,
GAUDI2_IRQ_NUM_EQ_ERROR, GAUDI2_IRQ_NUM_EQ_ERROR,
GAUDI2_IRQ_NUM_RESERVED_FIRST, GAUDI2_IRQ_NUM_USER_FIRST,
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1), GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_RESERVED_FIRST,
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_RESERVED_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1) GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
}; };

View File

@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS, mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
mmDCORE0_EDMA0_CORE_CTX_IDX, mmDCORE0_EDMA0_CORE_CTX_IDX,
mmDCORE0_EDMA0_CORE_CTX_IDX_INC, mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG, mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
mmDCORE0_EDMA0_QM_CQ_CFG0_0, mmDCORE0_EDMA0_QM_CQ_CFG0_0,
mmDCORE0_EDMA0_QM_CQ_CFG0_1, mmDCORE0_EDMA0_QM_CQ_CFG0_1,

View File

@ -893,11 +893,8 @@ int goya_late_init(struct hl_device *hdev)
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size)); WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0); rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
if (rc) { if (rc)
dev_err(hdev->dev,
"Failed to enable PCI access from CPU %d\n", rc);
return rc; return rc;
}
/* force setting to low frequency */ /* force setting to low frequency */
goya->curr_pll_profile = PLL_LOW; goya->curr_pll_profile = PLL_LOW;
@ -2864,13 +2861,7 @@ static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
int goya_suspend(struct hl_device *hdev) int goya_suspend(struct hl_device *hdev)
{ {
int rc; return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
if (rc)
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
return rc;
} }
int goya_resume(struct hl_device *hdev) int goya_resume(struct hl_device *hdev)

View File

@ -63,9 +63,9 @@
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull #define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull #define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 256 #define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 127
#define GAUDI2_MSIX_ENTRIES 512 #define GAUDI2_MSIX_ENTRIES 128
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */ #define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */

View File

@ -330,9 +330,9 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "EDMA7_ECC_SERR" }, .name = "EDMA7_ECC_SERR" },
{ .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "HDMA4_ECC_SERR" }, .name = "EDMA4_ECC_SERR" },
{ .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "HDMA5_ECC_SERR" }, .name = "EDMA5_ECC_SERR" },
{ .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "EDMA2_ECC_DERR" }, .name = "EDMA2_ECC_DERR" },
{ .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
@ -856,55 +856,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PCIE_ADDR_DEC_ERR" }, .name = "PCIE_ADDR_DEC_ERR" },
{ .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC0_AXI_ERR_RSP" }, .name = "DCORE0_TPC0_AXI_ERR_RSP" },
{ .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC1_AXI_ERR_RSP" }, .name = "DCORE0_TPC1_AXI_ERR_RSP" },
{ .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC2_AXI_ERR_RSP" }, .name = "DCORE0_TPC2_AXI_ERR_RSP" },
{ .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC3_AXI_ERR_RSP" }, .name = "DCORE0_TPC3_AXI_ERR_RSP" },
{ .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC4_AXI_ERR_RSP" }, .name = "DCORE0_TPC4_AXI_ERR_RSP" },
{ .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC5_AXI_ERR_RSP" }, .name = "DCORE0_TPC5_AXI_ERR_RSP" },
{ .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC6_AXI_ERR_RSP" }, .name = "DCORE1_TPC0_AXI_ERR_RSP" },
{ .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC7_AXI_ERR_RSP" }, .name = "DCORE1_TPC1_AXI_ERR_RSP" },
{ .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC8_AXI_ERR_RSP" }, .name = "DCORE1_TPC2_AXI_ERR_RSP" },
{ .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC9_AXI_ERR_RSP" }, .name = "DCORE1_TPC3_AXI_ERR_RSP" },
{ .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC10_AXI_ERR_RSP" }, .name = "DCORE1_TPC4_AXI_ERR_RSP" },
{ .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC11_AXI_ERR_RSP" }, .name = "DCORE1_TPC5_AXI_ERR_RSP" },
{ .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC12_AXI_ERR_RSP" }, .name = "DCORE2_TPC0_AXI_ERR_RSP" },
{ .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC13_AXI_ERR_RSP" }, .name = "DCORE2_TPC1_AXI_ERR_RSP" },
{ .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC14_AXI_ERR_RSP" }, .name = "DCORE2_TPC2_AXI_ERR_RSP" },
{ .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC15_AXI_ERR_RSP" }, .name = "DCORE2_TPC3_AXI_ERR_RSP" },
{ .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC16_AXI_ERR_RSP" }, .name = "DCORE2_TPC4_AXI_ERR_RSP" },
{ .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC17_AXI_ERR_RSP" }, .name = "DCORE2_TPC5_AXI_ERR_RSP" },
{ .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC18_AXI_ERR_RSP" }, .name = "DCORE3_TPC0_AXI_ERR_RSP" },
{ .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC19_AXI_ERR_RSP" }, .name = "DCORE3_TPC1_AXI_ERR_RSP" },
{ .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC20_AXI_ERR_RSP" }, .name = "DCORE3_TPC2_AXI_ERR_RSP" },
{ .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC21_AXI_ERR_RSP" }, .name = "DCORE3_TPC3_AXI_ERR_RSP" },
{ .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC22_AXI_ERR_RSP" }, .name = "DCORE3_TPC4_AXI_ERR_RSP" },
{ .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC23_AXI_ERR_RSP" }, .name = "DCORE3_TPC5_AXI_ERR_RSP" },
{ .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC24_AXI_ERR_RSP" }, .name = "DCORE4_TPC0_AXI_ERR_RSP" },
{ .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "AXI_ECC" }, .name = "AXI_ECC" },
{ .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
@ -965,73 +965,73 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "MME3_CTRL_AXI_ERROR_RESPONSE" }, .name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
{ .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "MME3_QMAN_SW_ERROR" }, .name = "MME3_QMAN_SW_ERROR" },
{ .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_MME_PLL_LOCK_ERR" }, .name = "PSOC_MME_PLL_LOCK_ERR" },
{ .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_CPU_PLL_LOCK_ERR" }, .name = "PSOC_CPU_PLL_LOCK_ERR" },
{ .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_TPC_PLL_LOCK_ERR" }, .name = "DCORE3_TPC_PLL_LOCK_ERR" },
{ .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_NIC_PLL_LOCK_ERR" }, .name = "DCORE3_NIC_PLL_LOCK_ERR" },
{ .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" }, .name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
{ .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" }, .name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
{ .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" }, .name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
{ .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" }, .name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
{ .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" }, .name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
{ .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" }, .name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
{ .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" }, .name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
{ .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" }, .name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
{ .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_TPC_PLL_LOCK_ERR" }, .name = "DCORE1_TPC_PLL_LOCK_ERR" },
{ .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_NIC_PLL_LOCK_ERR" }, .name = "DCORE1_NIC_PLL_LOCK_ERR" },
{ .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PMMU_MME_PLL_LOCK_ERR" }, .name = "PMMU_MME_PLL_LOCK_ERR" },
{ .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_TPC_PLL_LOCK_ERR" }, .name = "DCORE0_TPC_PLL_LOCK_ERR" },
{ .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_PCI_PLL_LOCK_ERR" }, .name = "DCORE0_PCI_PLL_LOCK_ERR" },
{ .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" }, .name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
{ .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" }, .name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
{ .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" }, .name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
{ .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" }, .name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
{ .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" }, .name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
{ .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" }, .name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
{ .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" }, .name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
{ .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" }, .name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
{ .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_TPC_PLL_LOCK_ERR" }, .name = "DCORE2_TPC_PLL_LOCK_ERR" },
{ .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PSOC_VID_PLL_LOCK_ERR" }, .name = "PSOC_VID_PLL_LOCK_ERR" },
{ .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "PMMU_VID_PLL_LOCK_ERR" }, .name = "PMMU_VID_PLL_LOCK_ERR" },
{ .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE3_HBM_PLL_LOCK_ERR" }, .name = "DCORE3_HBM_PLL_LOCK_ERR" },
{ .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" }, .name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
{ .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE1_HBM_PLL_LOCK_ERR" }, .name = "DCORE1_HBM_PLL_LOCK_ERR" },
{ .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE0_HBM_PLL_LOCK_ERR" }, .name = "DCORE0_HBM_PLL_LOCK_ERR" },
{ .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" }, .name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
{ .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "DCORE2_HBM_PLL_LOCK_ERR" }, .name = "DCORE2_HBM_PLL_LOCK_ERR" },
{ .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
.name = "CPU_AXI_ERR_RSP" }, .name = "CPU_AXI_ERR_RSP" },
@ -1298,103 +1298,103 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC0_BMON_SPMU" }, .name = "TPC0_BMON_SPMU" },
{ .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC0_KERNEL_ERR" }, .name = "DCORE0_TPC0_KERNEL_ERR" },
{ .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC1_BMON_SPMU" }, .name = "TPC1_BMON_SPMU" },
{ .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC1_KERNEL_ERR" }, .name = "DCORE0_TPC1_KERNEL_ERR" },
{ .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC2_BMON_SPMU" }, .name = "TPC2_BMON_SPMU" },
{ .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC2_KERNEL_ERR" }, .name = "DCORE0_TPC2_KERNEL_ERR" },
{ .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC3_BMON_SPMU" }, .name = "TPC3_BMON_SPMU" },
{ .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC3_KERNEL_ERR" }, .name = "DCORE0_TPC3_KERNEL_ERR" },
{ .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC4_BMON_SPMU" }, .name = "TPC4_BMON_SPMU" },
{ .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC4_KERNEL_ERR" }, .name = "DCORE0_TPC4_KERNEL_ERR" },
{ .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC5_BMON_SPMU" }, .name = "TPC5_BMON_SPMU" },
{ .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC5_KERNEL_ERR" }, .name = "DCORE0_TPC5_KERNEL_ERR" },
{ .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC6_BMON_SPMU" }, .name = "TPC6_BMON_SPMU" },
{ .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC6_KERNEL_ERR" }, .name = "DCORE1_TPC0_KERNEL_ERR" },
{ .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC7_BMON_SPMU" }, .name = "TPC7_BMON_SPMU" },
{ .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC7_KERNEL_ERR" }, .name = "DCORE1_TPC1_KERNEL_ERR" },
{ .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC8_BMON_SPMU" }, .name = "TPC8_BMON_SPMU" },
{ .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC8_KERNEL_ERR" }, .name = "DCORE1_TPC2_KERNEL_ERR" },
{ .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC9_BMON_SPMU" }, .name = "TPC9_BMON_SPMU" },
{ .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC9_KERNEL_ERR" }, .name = "DCORE1_TPC3_KERNEL_ERR" },
{ .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC10_BMON_SPMU" }, .name = "TPC10_BMON_SPMU" },
{ .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC10_KERNEL_ERR" }, .name = "DCORE1_TPC4_KERNEL_ERR" },
{ .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC11_BMON_SPMU" }, .name = "TPC11_BMON_SPMU" },
{ .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC11_KERNEL_ERR" }, .name = "DCORE1_TPC5_KERNEL_ERR" },
{ .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC12_BMON_SPMU" }, .name = "TPC12_BMON_SPMU" },
{ .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC12_KERNEL_ERR" }, .name = "DCORE2_TPC0_KERNEL_ERR" },
{ .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC13_BMON_SPMU" }, .name = "TPC13_BMON_SPMU" },
{ .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC13_KERNEL_ERR" }, .name = "DCORE2_TPC1_KERNEL_ERR" },
{ .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC14_BMON_SPMU" }, .name = "TPC14_BMON_SPMU" },
{ .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC14_KERNEL_ERR" }, .name = "DCORE2_TPC2_KERNEL_ERR" },
{ .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC15_BMON_SPMU" }, .name = "TPC15_BMON_SPMU" },
{ .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC15_KERNEL_ERR" }, .name = "DCORE2_TPC3_KERNEL_ERR" },
{ .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC16_BMON_SPMU" }, .name = "TPC16_BMON_SPMU" },
{ .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC16_KERNEL_ERR" }, .name = "DCORE2_TPC4_KERNEL_ERR" },
{ .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC17_BMON_SPMU" }, .name = "TPC17_BMON_SPMU" },
{ .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC17_KERNEL_ERR" }, .name = "DCORE2_TPC5_KERNEL_ERR" },
{ .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC18_BMON_SPMU" }, .name = "TPC18_BMON_SPMU" },
{ .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC18_KERNEL_ERR" }, .name = "DCORE3_TPC0_KERNEL_ERR" },
{ .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC19_BMON_SPMU" }, .name = "TPC19_BMON_SPMU" },
{ .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC19_KERNEL_ERR" }, .name = "DCORE3_TPC1_KERNEL_ERR" },
{ .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC20_BMON_SPMU" }, .name = "TPC20_BMON_SPMU" },
{ .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC20_KERNEL_ERR" }, .name = "DCORE3_TPC2_KERNEL_ERR" },
{ .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC21_BMON_SPMU" }, .name = "TPC21_BMON_SPMU" },
{ .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC21_KERNEL_ERR" }, .name = "DCORE3_TPC3_KERNEL_ERR" },
{ .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC22_BMON_SPMU" }, .name = "TPC22_BMON_SPMU" },
{ .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC22_KERNEL_ERR" }, .name = "DCORE3_TPC4_KERNEL_ERR" },
{ .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC23_BMON_SPMU" }, .name = "TPC23_BMON_SPMU" },
{ .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC23_KERNEL_ERR" }, .name = "DCORE3_TPC5_KERNEL_ERR" },
{ .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "TPC24_BMON_SPMU" }, .name = "TPC24_BMON_SPMU" },
{ .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC24_KERNEL_ERR" }, .name = "DCORE4_TPC0_KERNEL_ERR" },
{ .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "" },
{ .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@ -1827,8 +1827,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "DEC0_BMON_SPMU" }, .name = "DEC0_BMON_SPMU" },
{ .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "DEC1_SPI" }, .name = "DEC1_SPI" },
{ .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "DEC1_SPI" }, .name = "DEC1_BMON_SPMU" },
{ .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "DEC2_SPI" }, .name = "DEC2_SPI" },
{ .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@ -2377,8 +2377,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.name = "" }, .name = "" },
{ .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "" },
{ .fc_id = 1174, .cpu_id = 480, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1174, .cpu_id = 480, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "PSOC_DMA_QM" },
{ .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "" },
{ .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
@ -2442,55 +2442,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "" },
{ .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC0_QM" }, .name = "DCORE0_TPC0_QM" },
{ .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC1_QM" }, .name = "DCORE0_TPC1_QM" },
{ .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC2_QM" }, .name = "DCORE0_TPC2_QM" },
{ .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC3_QM" }, .name = "DCORE0_TPC3_QM" },
{ .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC4_QM" }, .name = "DCORE0_TPC4_QM" },
{ .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC5_QM" }, .name = "DCORE0_TPC5_QM" },
{ .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC6_QM" }, .name = "DCORE1_TPC0_QM" },
{ .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC7_QM" }, .name = "DCORE1_TPC1_QM" },
{ .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC8_QM" }, .name = "DCORE1_TPC2_QM" },
{ .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC9_QM" }, .name = "DCORE1_TPC3_QM" },
{ .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC10_QM" }, .name = "DCORE1_TPC4_QM" },
{ .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC11_QM" }, .name = "DCORE1_TPC5_QM" },
{ .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC12_QM" }, .name = "DCORE2_TPC0_QM" },
{ .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC13_QM" }, .name = "DCORE2_TPC1_QM" },
{ .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC14_QM" }, .name = "DCORE2_TPC2_QM" },
{ .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC15_QM" }, .name = "DCORE2_TPC3_QM" },
{ .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC16_QM" }, .name = "DCORE2_TPC4_QM" },
{ .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC17_QM" }, .name = "DCORE2_TPC5_QM" },
{ .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC18_QM" }, .name = "DCORE3_TPC0_QM" },
{ .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC19_QM" }, .name = "DCORE3_TPC1_QM" },
{ .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC20_QM" }, .name = "DCORE3_TPC2_QM" },
{ .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC21_QM" }, .name = "DCORE3_TPC3_QM" },
{ .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC22_QM" }, .name = "DCORE3_TPC4_QM" },
{ .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC23_QM" }, .name = "DCORE3_TPC5_QM" },
{ .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
.name = "TPC24_QM" }, .name = "DCORE4_TPC0_QM" },
{ .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "" }, .name = "" },
{ .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE, { .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
@ -2674,19 +2674,19 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
{ .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD, { .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
.name = "DEV_RESET_REQ" }, .name = "DEV_RESET_REQ" },
{ .fc_id = 1322, .cpu_id = 628, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1322, .cpu_id = 628, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_BRK_ENTRY" }, .name = "PWR_BRK_ENTRY" },
{ .fc_id = 1323, .cpu_id = 629, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1323, .cpu_id = 629, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_BRK_EXT" }, .name = "PWR_BRK_EXT" },
{ .fc_id = 1324, .cpu_id = 630, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1324, .cpu_id = 630, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_RD_MODE0" }, .name = "PWR_RD_MODE0" },
{ .fc_id = 1325, .cpu_id = 631, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1325, .cpu_id = 631, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_RD_MODE1" }, .name = "PWR_RD_MODE1" },
{ .fc_id = 1326, .cpu_id = 632, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1326, .cpu_id = 632, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_RD_MODE2" }, .name = "PWR_RD_MODE2" },
{ .fc_id = 1327, .cpu_id = 633, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1327, .cpu_id = 633, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_PWR_RD_MODE3" }, .name = "PWR_RD_MODE3" },
{ .fc_id = 1328, .cpu_id = 634, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE, { .fc_id = 1328, .cpu_id = 634, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
.name = "ARC_EQ_HEARTBEAT" }, .name = "EQ_HEARTBEAT" },
}; };
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */ #endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */

View File

@ -45,6 +45,13 @@
#define GAUDI2_ARM_RX_MB_OFFSET (GAUDI2_ARM_RX_MB_ADDR - \ #define GAUDI2_ARM_RX_MB_OFFSET (GAUDI2_ARM_RX_MB_ADDR - \
GAUDI2_SP_SRAM_BASE_ADDR) GAUDI2_SP_SRAM_BASE_ADDR)
#define POWER_MODE_LEVELS { \
150000, /* 00 */ \
250000, /* 01 */ \
400000, /* 10 */ \
/* 11: Normal mode */ \
}
enum gaudi2_fw_status { enum gaudi2_fw_status {
GAUDI2_PID_STATUS_UP = 0x1, /* PID on ARC0 is up */ GAUDI2_PID_STATUS_UP = 0x1, /* PID on ARC0 is up */
GAUDI2_ARM_STATUS_UP = 0x2, /* ARM Linux Boot complete */ GAUDI2_ARM_STATUS_UP = 0x2, /* ARM Linux Boot complete */
@ -52,26 +59,6 @@ enum gaudi2_fw_status {
GAUDI2_STATUS_LAST = 0xFF GAUDI2_STATUS_LAST = 0xFF
}; };
struct gaudi2_cold_rst_data {
union {
struct {
u32 recovery_flag: 1;
u32 validation_flag: 1;
u32 efuse_read_flag: 1;
u32 spsram_init_done : 1;
u32 fake_security_enable : 1;
u32 fake_sig_validation_en : 1;
u32 bist_skip_enable : 1;
u32 reserved1 : 1;
u32 fake_bis_compliant : 1;
u32 wd_rst_cause_arm : 1;
u32 wd_rst_cause_arcpid : 1;
u32 reserved : 21;
};
__le32 data;
};
};
enum gaudi2_rst_src { enum gaudi2_rst_src {
HL_COLD_RST = 1, HL_COLD_RST = 1,
HL_MANUAL_RST = 2, HL_MANUAL_RST = 2,

View File

@ -58,4 +58,12 @@
#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT #define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT
#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER #define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER
/* Registers below are used to pass the boot_if data between ARM and ARC1 */
#define mmARM_MSG_BOOT_ERR_SET mmCPU_IF_SPECIAL_GLBL_SPARE_0
#define mmARM_MSG_BOOT_ERR_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_1
#define mmARM_MSG_BOOT_DEV_STS_SET mmCPU_IF_SPECIAL_GLBL_SPARE_2
#define mmARM_MSG_BOOT_DEV_STS_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_3
#define mmMGMT_MSG_BOOT_ERR mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_0
#define mmMGMT_MSG_BOOT_DEV_STS mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_1
#endif /* GAUDI2_REG_MAP_H_ */ #endif /* GAUDI2_REG_MAP_H_ */

View File

@ -25,7 +25,8 @@ enum hl_revision_id {
REV_ID_INVALID = 0x00, REV_ID_INVALID = 0x00,
REV_ID_A = 0x01, REV_ID_A = 0x01,
REV_ID_B = 0x02, REV_ID_B = 0x02,
REV_ID_C = 0x03 REV_ID_C = 0x03,
REV_ID_D = 0x04
}; };
#endif /* INCLUDE_PCI_GENERAL_H_ */ #endif /* INCLUDE_PCI_GENERAL_H_ */

View File

@ -42,6 +42,12 @@ enum eq_event_id {
EQ_EVENT_PWR_BRK_ENTRY, EQ_EVENT_PWR_BRK_ENTRY,
EQ_EVENT_PWR_BRK_EXIT, EQ_EVENT_PWR_BRK_EXIT,
EQ_EVENT_HEARTBEAT, EQ_EVENT_HEARTBEAT,
EQ_EVENT_CPLD_RESET_REASON,
EQ_EVENT_CPLD_SHUTDOWN,
EQ_EVENT_POWER_EVT_START,
EQ_EVENT_POWER_EVT_END,
EQ_EVENT_THERMAL_EVT_START,
EQ_EVENT_THERMAL_EVT_END,
}; };
/* /*
@ -391,6 +397,9 @@ struct hl_eq_entry {
#define EQ_CTL_READY_SHIFT 31 #define EQ_CTL_READY_SHIFT 31
#define EQ_CTL_READY_MASK 0x80000000 #define EQ_CTL_READY_MASK 0x80000000
#define EQ_CTL_EVENT_MODE_SHIFT 28
#define EQ_CTL_EVENT_MODE_MASK 0x70000000
#define EQ_CTL_EVENT_TYPE_SHIFT 16 #define EQ_CTL_EVENT_TYPE_SHIFT 16
#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000 #define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
@ -853,9 +862,6 @@ struct cpucp_packet {
* result cannot be used to hold general purpose data. * result cannot be used to hold general purpose data.
*/ */
__le32 status_mask; __le32 status_mask;
/* random, used once number, for security packets */
__le32 nonce;
}; };
union { union {
@ -864,6 +870,9 @@ struct cpucp_packet {
/* For Generic packet sub index */ /* For Generic packet sub index */
__le32 pkt_subidx; __le32 pkt_subidx;
/* random, used once number, for security packets */
__le32 nonce;
}; };
}; };
@ -1140,6 +1149,7 @@ struct cpucp_security_info {
* (0 = fully functional, 1 = lower-half is not functional, * (0 = fully functional, 1 = lower-half is not functional,
* 2 = upper-half is not functional) * 2 = upper-half is not functional)
* @sec_info: security information * @sec_info: security information
* @cpld_timestamp: CPLD programmed F/W timestamp.
* @pll_map: Bit map of supported PLLs for current ASIC version. * @pll_map: Bit map of supported PLLs for current ASIC version.
* @mme_binning_mask: MME binning mask, * @mme_binning_mask: MME binning mask,
* bits [0:6] <==> dcore0 mme fma * bits [0:6] <==> dcore0 mme fma
@ -1165,7 +1175,7 @@ struct cpucp_security_info {
struct cpucp_info { struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS]; struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
__u8 kernel_version[VERSION_MAX_LEN]; __u8 kernel_version[VERSION_MAX_LEN];
__le32 reserved; __le32 reserved1;
__le32 card_type; __le32 card_type;
__le32 card_location; __le32 card_location;
__le32 cpld_version; __le32 cpld_version;
@ -1187,7 +1197,7 @@ struct cpucp_info {
__u8 substrate_version; __u8 substrate_version;
__u8 eq_health_check_supported; __u8 eq_health_check_supported;
struct cpucp_security_info sec_info; struct cpucp_security_info sec_info;
__le32 fw_hbm_region_size; __le32 cpld_timestamp;
__u8 pll_map[PLL_MAP_LEN]; __u8 pll_map[PLL_MAP_LEN];
__le64 mme_binning_mask; __le64 mme_binning_mask;
__u8 fw_os_version[VERSION_MAX_LEN]; __u8 fw_os_version[VERSION_MAX_LEN];

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 /* SPDX-License-Identifier: GPL-2.0
* *
* Copyright 2018-2020 HabanaLabs, Ltd. * Copyright 2018-2023 HabanaLabs, Ltd.
* All Rights Reserved. * All Rights Reserved.
* *
*/ */
@ -49,7 +49,6 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR_FATAL_MASK \ #define CPU_BOOT_ERR_FATAL_MASK \
((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
(1 << CPU_BOOT_ERR_PLL_FAIL) | \ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
(1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
(1 << CPU_BOOT_ERR_BINNING_FAIL) | \ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
(1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
(1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
@ -194,6 +193,8 @@ enum cpu_boot_dev_sts {
CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24, CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25, CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26, CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN = 27,
CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN = 28,
CPU_BOOT_DEV_STS_ENABLED = 31, CPU_BOOT_DEV_STS_ENABLED = 31,
CPU_BOOT_DEV_STS_SCND_EN = 63, CPU_BOOT_DEV_STS_SCND_EN = 63,
CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
@ -331,6 +332,17 @@ enum cpu_boot_dev_sts {
* HWMON enum mapping to cpucp enums. * HWMON enum mapping to cpucp enums.
* Initialized in: linux * Initialized in: linux
* *
* CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN
* If set, means f/w supports nic hbm memory clear and
* tmr,txs hbm memory init.
* Initialized in: zephyr-mgmt
*
* CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN
* MMU page tables are located in DRAM.
* F/W initializes security settings for MMU
* page tables to reside in DRAM.
* Initialized in: zephyr-mgmt
*
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled. * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the * This is a main indication that the
* running FW populates the device status * running FW populates the device status
@ -367,6 +379,8 @@ enum cpu_boot_dev_sts {
#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN) #define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN) #define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN) #define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
#define CPU_BOOT_DEV_STS0_NIC_MEM_CLEAR_EN (1 << CPU_BOOT_DEV_STS_NIC_MEM_CLEAR_EN)
#define CPU_BOOT_DEV_STS0_MMU_PGTBL_DRAM_EN (1 << CPU_BOOT_DEV_STS_MMU_PGTBL_DRAM_EN)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) #define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED) #define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
@ -450,11 +464,11 @@ struct cpu_dyn_regs {
__le32 gic_dma_core_irq_ctrl; __le32 gic_dma_core_irq_ctrl;
__le32 gic_host_halt_irq; __le32 gic_host_halt_irq;
__le32 gic_host_ints_irq; __le32 gic_host_ints_irq;
__le32 gic_host_soft_rst_irq; __le32 reserved0;
__le32 gic_rot_qm_irq_ctrl; __le32 gic_rot_qm_irq_ctrl;
__le32 cpu_rst_status; __le32 reserved1;
__le32 eng_arc_irq_ctrl; __le32 eng_arc_irq_ctrl;
__le32 reserved1[20]; /* reserve for future use */ __le32 reserved2[20]; /* reserve for future use */
}; };
/* TODO: remove the desc magic after the code is updated to use message */ /* TODO: remove the desc magic after the code is updated to use message */
@ -551,8 +565,9 @@ enum lkd_fw_ascii_msg_lvls {
LKD_FW_ASCII_MSG_DBG = 3, LKD_FW_ASCII_MSG_DBG = 3,
}; };
#define LKD_FW_ASCII_MSG_MAX_LEN 128 #define LKD_FW_ASCII_MSG_MAX_LEN 128
#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */ #define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
#define LKD_FW_ASCII_MSG_MIN_DESC_VERSION 3
struct lkd_fw_ascii_msg { struct lkd_fw_ascii_msg {
__u8 valid; __u8 valid;