2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 15:13:55 +08:00

staging: ti dspbridge: add core driver sources

Add TI's DSP Bridge core driver sources

Signed-off-by: Omar Ramirez Luna <omar.ramirez@ti.com>
Signed-off-by: Kanigeri, Hari <h-kanigeri2@ti.com>
Signed-off-by: Ameya Palande <ameya.palande@nokia.com>
Signed-off-by: Guzman Lugo, Fernando <fernando.lugo@ti.com>
Signed-off-by: Hebbar, Shivananda <x0hebbar@ti.com>
Signed-off-by: Ramos Falcon, Ernesto <ernesto@ti.com>
Signed-off-by: Felipe Contreras <felipe.contreras@gmail.com>
Signed-off-by: Anna, Suman <s-anna@ti.com>
Signed-off-by: Gupta, Ramesh <grgupta@ti.com>
Signed-off-by: Gomez Castellanos, Ivan <ivan.gomez@ti.com>
Signed-off-by: Andy Shevchenko <ext-andriy.shevchenko@nokia.com>
Signed-off-by: Armando Uribe De Leon <x0095078@ti.com>
Signed-off-by: Deepak Chitriki <deepak.chitriki@ti.com>
Signed-off-by: Menon, Nishanth <nm@ti.com>
Signed-off-by: Phil Carmody <ext-phil.2.carmody@nokia.com>
Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Omar Ramirez Luna 2010-06-23 16:01:56 +03:00 committed by Greg Kroah-Hartman
parent 6c5fe83876
commit 999e07d632
17 changed files with 8884 additions and 0 deletions

View File

@ -0,0 +1,45 @@
/*
* _cmm.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Private header file defining CMM manager objects and defines needed
* by IO manager to register shared memory regions when DSP base image
* is loaded(bridge_io_on_loaded).
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _CMM_
#define _CMM_
/*
* These target side symbols define the beginning and ending addresses
* of the section of shared memory used for shared memory manager CMM.
* They are defined in the *cfg.cmd file by cdb code.
*/
#define SHM0_SHARED_BASE_SYM "_SHM0_BEG"
#define SHM0_SHARED_END_SYM "_SHM0_END"
#define SHM0_SHARED_RESERVED_BASE_SYM "_SHM0_RSVDSTRT"
/*
* Shared Memory Region #0(SHMSEG0) is used in the following way:
*
* |(_SHM0_BEG) | (_SHM0_RSVDSTRT) | (_SHM0_END)
* V V V
* ------------------------------------------------------------
* | DSP-side allocations | GPP-side allocations |
* ------------------------------------------------------------
*
*
*/
#endif /* _CMM_ */

View File

@ -0,0 +1,35 @@
/*
* _deh.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Private header for DEH module.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _DEH_
#define _DEH_
#include <dspbridge/ntfy.h>
#include <dspbridge/dspdefs.h>
/* DEH Manager: only one created per board: */
struct deh_mgr {
struct bridge_dev_context *hbridge_context; /* Bridge context. */
struct ntfy_object *ntfy_obj; /* NTFY object */
struct dsp_errorinfo err_info; /* DSP exception info. */
/* MMU Fault DPC */
struct tasklet_struct dpc_tasklet;
};
#endif /* _DEH_ */

View File

@ -0,0 +1,142 @@
/*
* _msg_sm.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Private header file defining msg_ctrl manager objects and defines needed
* by IO manager.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _MSG_SM_
#define _MSG_SM_
#include <dspbridge/list.h>
#include <dspbridge/msgdefs.h>
/*
* These target side symbols define the beginning and ending addresses
* of the section of shared memory used for messages. They are
* defined in the *cfg.cmd file by cdb code.
*/
#define MSG_SHARED_BUFFER_BASE_SYM "_MSG_BEG"
#define MSG_SHARED_BUFFER_LIMIT_SYM "_MSG_END"
#ifndef _CHNL_WORDSIZE
#define _CHNL_WORDSIZE 4 /* default _CHNL_WORDSIZE is 2 bytes/word */
#endif
/*
* ======== msg_ctrl ========
* There is a control structure for messages to the DSP, and a control
* structure for messages from the DSP. The shared memory region for
* transferring messages is partitioned as follows:
*
* ----------------------------------------------------------
* |Control | Messages from DSP | Control | Messages to DSP |
* ----------------------------------------------------------
*
* msg_ctrl control structure for messages to the DSP is used in the following
* way:
*
* buf_empty - This flag is set to FALSE by the GPP after it has output
* messages for the DSP. The DSP host driver sets it to
* TRUE after it has copied the messages.
* post_swi - Set to 1 by the GPP after it has written the messages,
* set the size, and set buf_empty to FALSE.
* The DSP Host driver uses SWI_andn of the post_swi field
* when a host interrupt occurs. The host driver clears
* this after posting the SWI.
* size - Number of messages to be read by the DSP.
*
* For messages from the DSP:
* buf_empty - This flag is set to FALSE by the DSP after it has output
* messages for the GPP. The DPC on the GPP sets it to
* TRUE after it has copied the messages.
* post_swi - Set to 1 the DPC on the GPP after copying the messages.
* size - Number of messages to be read by the GPP.
*/
struct msg_ctrl {
u32 buf_empty; /* to/from DSP buffer is empty */
u32 post_swi; /* Set to "1" to post msg_ctrl SWI */
u32 size; /* Number of messages to/from the DSP */
u32 resvd;
};
/*
* ======== msg_mgr ========
* The msg_mgr maintains a list of all MSG_QUEUEs. Each NODE object can
* have msg_queue to hold all messages that come up from the corresponding
* node on the DSP. The msg_mgr also has a shared queue of messages
* ready to go to the DSP.
*/
struct msg_mgr {
/* The first field must match that in msgobj.h */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct io_mgr *hio_mgr; /* IO manager */
struct lst_list *queue_list; /* List of MSG_QUEUEs */
spinlock_t msg_mgr_lock; /* For critical sections */
/* Signalled when MsgFrame is available */
struct sync_object *sync_event;
struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
struct lst_list *msg_used_list; /* MsgFrames ready to go to DSP */
u32 msgs_pending; /* # of queued messages to go to DSP */
u32 max_msgs; /* Max # of msgs that fit in buffer */
msg_onexit on_exit; /* called when RMS_EXIT is received */
};
/*
* ======== msg_queue ========
* Each NODE has a msg_queue for receiving messages from the
* corresponding node on the DSP. The msg_queue object maintains a list
* of messages that have been sent to the host, but not yet read (MSG_Get),
* and a list of free frames that can be filled when new messages arrive
* from the DSP.
* The msg_queue's hSynEvent gets posted when a message is ready.
*/
struct msg_queue {
struct list_head list_elem;
struct msg_mgr *hmsg_mgr;
u32 max_msgs; /* Node message depth */
u32 msgq_id; /* Node environment pointer */
struct lst_list *msg_free_list; /* Free MsgFrames ready to be filled */
/* Filled MsgFramess waiting to be read */
struct lst_list *msg_used_list;
void *arg; /* Handle passed to mgr on_exit callback */
struct sync_object *sync_event; /* Signalled when message is ready */
struct sync_object *sync_done; /* For synchronizing cleanup */
struct sync_object *sync_done_ack; /* For synchronizing cleanup */
struct ntfy_object *ntfy_obj; /* For notification of message ready */
bool done; /* TRUE <==> deleting the object */
u32 io_msg_pend; /* Number of pending MSG_get/put calls */
};
/*
* ======== msg_dspmsg ========
*/
struct msg_dspmsg {
struct dsp_msg msg;
u32 msgq_id; /* Identifies the node the message goes to */
};
/*
* ======== msg_frame ========
*/
struct msg_frame {
struct list_head list_elem;
struct msg_dspmsg msg_data;
};
#endif /* _MSG_SM_ */

View File

@ -0,0 +1,377 @@
/*
* _tiomap.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Definitions and types private to this Bridge driver.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _TIOMAP_
#define _TIOMAP_
#include <plat/powerdomain.h>
#include <plat/clockdomain.h>
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#include <dspbridge/devdefs.h>
#include <hw_defs.h>
#include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */
#include <dspbridge/sync.h>
#include <dspbridge/clk.h>
struct map_l4_peripheral {
u32 phys_addr;
u32 dsp_virt_addr;
};
#define ARM_MAILBOX_START 0xfffcf000
#define ARM_MAILBOX_LENGTH 0x800
/* New Registers in OMAP3.1 */
#define TESTBLOCK_ID_START 0xfffed400
#define TESTBLOCK_ID_LENGTH 0xff
/* ID Returned by OMAP1510 */
#define TBC_ID_VALUE 0xB47002F
#define SPACE_LENGTH 0x2000
#define API_CLKM_DPLL_DMA 0xfffec000
#define ARM_INTERRUPT_OFFSET 0xb00
#define BIOS24XX
#define L4_PERIPHERAL_NULL 0x0
#define DSPVA_PERIPHERAL_NULL 0x0
#define MAX_LOCK_TLB_ENTRIES 15
#define L4_PERIPHERAL_PRM 0x48306000 /*PRM L4 Peripheral */
#define DSPVA_PERIPHERAL_PRM 0x1181e000
#define L4_PERIPHERAL_SCM 0x48002000 /*SCM L4 Peripheral */
#define DSPVA_PERIPHERAL_SCM 0x1181f000
#define L4_PERIPHERAL_MMU 0x5D000000 /*MMU L4 Peripheral */
#define DSPVA_PERIPHERAL_MMU 0x11820000
#define L4_PERIPHERAL_CM 0x48004000 /* Core L4, Clock Management */
#define DSPVA_PERIPHERAL_CM 0x1181c000
#define L4_PERIPHERAL_PER 0x48005000 /* PER */
#define DSPVA_PERIPHERAL_PER 0x1181d000
#define L4_PERIPHERAL_GPIO1 0x48310000
#define DSPVA_PERIPHERAL_GPIO1 0x11809000
#define L4_PERIPHERAL_GPIO2 0x49050000
#define DSPVA_PERIPHERAL_GPIO2 0x1180a000
#define L4_PERIPHERAL_GPIO3 0x49052000
#define DSPVA_PERIPHERAL_GPIO3 0x1180b000
#define L4_PERIPHERAL_GPIO4 0x49054000
#define DSPVA_PERIPHERAL_GPIO4 0x1180c000
#define L4_PERIPHERAL_GPIO5 0x49056000
#define DSPVA_PERIPHERAL_GPIO5 0x1180d000
#define L4_PERIPHERAL_IVA2WDT 0x49030000
#define DSPVA_PERIPHERAL_IVA2WDT 0x1180e000
#define L4_PERIPHERAL_DISPLAY 0x48050000
#define DSPVA_PERIPHERAL_DISPLAY 0x1180f000
#define L4_PERIPHERAL_SSI 0x48058000
#define DSPVA_PERIPHERAL_SSI 0x11804000
#define L4_PERIPHERAL_GDD 0x48059000
#define DSPVA_PERIPHERAL_GDD 0x11805000
#define L4_PERIPHERAL_SS1 0x4805a000
#define DSPVA_PERIPHERAL_SS1 0x11806000
#define L4_PERIPHERAL_SS2 0x4805b000
#define DSPVA_PERIPHERAL_SS2 0x11807000
#define L4_PERIPHERAL_CAMERA 0x480BC000
#define DSPVA_PERIPHERAL_CAMERA 0x11819000
#define L4_PERIPHERAL_SDMA 0x48056000
#define DSPVA_PERIPHERAL_SDMA 0x11810000 /* 0x1181d000 conflict w/ PER */
#define L4_PERIPHERAL_UART1 0x4806a000
#define DSPVA_PERIPHERAL_UART1 0x11811000
#define L4_PERIPHERAL_UART2 0x4806c000
#define DSPVA_PERIPHERAL_UART2 0x11812000
#define L4_PERIPHERAL_UART3 0x49020000
#define DSPVA_PERIPHERAL_UART3 0x11813000
#define L4_PERIPHERAL_MCBSP1 0x48074000
#define DSPVA_PERIPHERAL_MCBSP1 0x11814000
#define L4_PERIPHERAL_MCBSP2 0x49022000
#define DSPVA_PERIPHERAL_MCBSP2 0x11815000
#define L4_PERIPHERAL_MCBSP3 0x49024000
#define DSPVA_PERIPHERAL_MCBSP3 0x11816000
#define L4_PERIPHERAL_MCBSP4 0x49026000
#define DSPVA_PERIPHERAL_MCBSP4 0x11817000
#define L4_PERIPHERAL_MCBSP5 0x48096000
#define DSPVA_PERIPHERAL_MCBSP5 0x11818000
#define L4_PERIPHERAL_GPTIMER5 0x49038000
#define DSPVA_PERIPHERAL_GPTIMER5 0x11800000
#define L4_PERIPHERAL_GPTIMER6 0x4903a000
#define DSPVA_PERIPHERAL_GPTIMER6 0x11801000
#define L4_PERIPHERAL_GPTIMER7 0x4903c000
#define DSPVA_PERIPHERAL_GPTIMER7 0x11802000
#define L4_PERIPHERAL_GPTIMER8 0x4903e000
#define DSPVA_PERIPHERAL_GPTIMER8 0x11803000
#define L4_PERIPHERAL_SPI1 0x48098000
#define DSPVA_PERIPHERAL_SPI1 0x1181a000
#define L4_PERIPHERAL_SPI2 0x4809a000
#define DSPVA_PERIPHERAL_SPI2 0x1181b000
#define L4_PERIPHERAL_MBOX 0x48094000
#define DSPVA_PERIPHERAL_MBOX 0x11808000
#define PM_GRPSEL_BASE 0x48307000
#define DSPVA_GRPSEL_BASE 0x11821000
#define L4_PERIPHERAL_SIDETONE_MCBSP2 0x49028000
#define DSPVA_PERIPHERAL_SIDETONE_MCBSP2 0x11824000
#define L4_PERIPHERAL_SIDETONE_MCBSP3 0x4902a000
#define DSPVA_PERIPHERAL_SIDETONE_MCBSP3 0x11825000
/* define a static array with L4 mappings */
static const struct map_l4_peripheral l4_peripheral_table[] = {
{L4_PERIPHERAL_MBOX, DSPVA_PERIPHERAL_MBOX},
{L4_PERIPHERAL_SCM, DSPVA_PERIPHERAL_SCM},
{L4_PERIPHERAL_MMU, DSPVA_PERIPHERAL_MMU},
{L4_PERIPHERAL_GPTIMER5, DSPVA_PERIPHERAL_GPTIMER5},
{L4_PERIPHERAL_GPTIMER6, DSPVA_PERIPHERAL_GPTIMER6},
{L4_PERIPHERAL_GPTIMER7, DSPVA_PERIPHERAL_GPTIMER7},
{L4_PERIPHERAL_GPTIMER8, DSPVA_PERIPHERAL_GPTIMER8},
{L4_PERIPHERAL_GPIO1, DSPVA_PERIPHERAL_GPIO1},
{L4_PERIPHERAL_GPIO2, DSPVA_PERIPHERAL_GPIO2},
{L4_PERIPHERAL_GPIO3, DSPVA_PERIPHERAL_GPIO3},
{L4_PERIPHERAL_GPIO4, DSPVA_PERIPHERAL_GPIO4},
{L4_PERIPHERAL_GPIO5, DSPVA_PERIPHERAL_GPIO5},
{L4_PERIPHERAL_IVA2WDT, DSPVA_PERIPHERAL_IVA2WDT},
{L4_PERIPHERAL_DISPLAY, DSPVA_PERIPHERAL_DISPLAY},
{L4_PERIPHERAL_SSI, DSPVA_PERIPHERAL_SSI},
{L4_PERIPHERAL_GDD, DSPVA_PERIPHERAL_GDD},
{L4_PERIPHERAL_SS1, DSPVA_PERIPHERAL_SS1},
{L4_PERIPHERAL_SS2, DSPVA_PERIPHERAL_SS2},
{L4_PERIPHERAL_UART1, DSPVA_PERIPHERAL_UART1},
{L4_PERIPHERAL_UART2, DSPVA_PERIPHERAL_UART2},
{L4_PERIPHERAL_UART3, DSPVA_PERIPHERAL_UART3},
{L4_PERIPHERAL_MCBSP1, DSPVA_PERIPHERAL_MCBSP1},
{L4_PERIPHERAL_MCBSP2, DSPVA_PERIPHERAL_MCBSP2},
{L4_PERIPHERAL_MCBSP3, DSPVA_PERIPHERAL_MCBSP3},
{L4_PERIPHERAL_MCBSP4, DSPVA_PERIPHERAL_MCBSP4},
{L4_PERIPHERAL_MCBSP5, DSPVA_PERIPHERAL_MCBSP5},
{L4_PERIPHERAL_CAMERA, DSPVA_PERIPHERAL_CAMERA},
{L4_PERIPHERAL_SPI1, DSPVA_PERIPHERAL_SPI1},
{L4_PERIPHERAL_SPI2, DSPVA_PERIPHERAL_SPI2},
{L4_PERIPHERAL_PRM, DSPVA_PERIPHERAL_PRM},
{L4_PERIPHERAL_CM, DSPVA_PERIPHERAL_CM},
{L4_PERIPHERAL_PER, DSPVA_PERIPHERAL_PER},
{PM_GRPSEL_BASE, DSPVA_GRPSEL_BASE},
{L4_PERIPHERAL_SIDETONE_MCBSP2, DSPVA_PERIPHERAL_SIDETONE_MCBSP2},
{L4_PERIPHERAL_SIDETONE_MCBSP3, DSPVA_PERIPHERAL_SIDETONE_MCBSP3},
{L4_PERIPHERAL_NULL, DSPVA_PERIPHERAL_NULL}
};
/*
* 15 10 0
* ---------------------------------
* |0|0|1|0|0|0|c|c|c|i|i|i|i|i|i|i|
* ---------------------------------
* | (class) | (module specific) |
*
* where c -> Externel Clock Command: Clk & Autoidle Disable/Enable
* i -> External Clock ID Timers 5,6,7,8, McBSP1,2 and WDT3
*/
/* MBX_PM_CLK_IDMASK: DSP External clock id mask. */
#define MBX_PM_CLK_IDMASK 0x7F
/* MBX_PM_CLK_CMDSHIFT: DSP External clock command shift. */
#define MBX_PM_CLK_CMDSHIFT 7
/* MBX_PM_CLK_CMDMASK: DSP External clock command mask. */
#define MBX_PM_CLK_CMDMASK 7
/* MBX_PM_MAX_RESOURCES: CORE 1 Clock resources. */
#define MBX_CORE1_RESOURCES 7
/* MBX_PM_MAX_RESOURCES: CORE 2 Clock Resources. */
#define MBX_CORE2_RESOURCES 1
/* MBX_PM_MAX_RESOURCES: TOTAL Clock Reosurces. */
#define MBX_PM_MAX_RESOURCES 11
/* Power Management Commands */
#define BPWR_DISABLE_CLOCK 0
#define BPWR_ENABLE_CLOCK 1
/* OMAP242x specific resources */
enum bpwr_ext_clock_id {
BPWR_GP_TIMER5 = 0x10,
BPWR_GP_TIMER6,
BPWR_GP_TIMER7,
BPWR_GP_TIMER8,
BPWR_WD_TIMER3,
BPWR_MCBSP1,
BPWR_MCBSP2,
BPWR_MCBSP3,
BPWR_MCBSP4,
BPWR_MCBSP5,
BPWR_SSI = 0x20
};
static const u32 bpwr_clkid[] = {
(u32) BPWR_GP_TIMER5,
(u32) BPWR_GP_TIMER6,
(u32) BPWR_GP_TIMER7,
(u32) BPWR_GP_TIMER8,
(u32) BPWR_WD_TIMER3,
(u32) BPWR_MCBSP1,
(u32) BPWR_MCBSP2,
(u32) BPWR_MCBSP3,
(u32) BPWR_MCBSP4,
(u32) BPWR_MCBSP5,
(u32) BPWR_SSI
};
struct bpwr_clk_t {
u32 clk_id;
enum dsp_clk_id clk;
};
static const struct bpwr_clk_t bpwr_clks[] = {
{(u32) BPWR_GP_TIMER5, DSP_CLK_GPT5},
{(u32) BPWR_GP_TIMER6, DSP_CLK_GPT6},
{(u32) BPWR_GP_TIMER7, DSP_CLK_GPT7},
{(u32) BPWR_GP_TIMER8, DSP_CLK_GPT8},
{(u32) BPWR_WD_TIMER3, DSP_CLK_WDT3},
{(u32) BPWR_MCBSP1, DSP_CLK_MCBSP1},
{(u32) BPWR_MCBSP2, DSP_CLK_MCBSP2},
{(u32) BPWR_MCBSP3, DSP_CLK_MCBSP3},
{(u32) BPWR_MCBSP4, DSP_CLK_MCBSP4},
{(u32) BPWR_MCBSP5, DSP_CLK_MCBSP5},
{(u32) BPWR_SSI, DSP_CLK_SSI}
};
/* Interrupt Register Offsets */
#define INTH_IT_REG_OFFSET 0x00 /* Interrupt register offset */
#define INTH_MASK_IT_REG_OFFSET 0x04 /* Mask Interrupt reg offset */
#define DSP_MAILBOX1_INT 10
/*
* Bit definition of Interrupt Level Registers
*/
/* Mail Box defines */
#define MB_ARM2DSP1_REG_OFFSET 0x00
#define MB_ARM2DSP1B_REG_OFFSET 0x04
#define MB_DSP2ARM1B_REG_OFFSET 0x0C
#define MB_ARM2DSP1_FLAG_REG_OFFSET 0x18
#define MB_ARM2DSP_FLAG 0x0001
#define MBOX_ARM2DSP HW_MBOX_ID0
#define MBOX_DSP2ARM HW_MBOX_ID1
#define MBOX_ARM HW_MBOX_U0_ARM
#define MBOX_DSP HW_MBOX_U1_DSP1
#define ENABLE true
#define DISABLE false
#define HIGH_LEVEL true
#define LOW_LEVEL false
/* Macro's */
#define REG16(A) (*(reg_uword16 *)(A))
#define CLEAR_BIT(reg, mask) (reg &= ~mask)
#define SET_BIT(reg, mask) (reg |= mask)
#define SET_GROUP_BITS16(reg, position, width, value) \
do {\
reg &= ~((0xFFFF >> (16 - (width))) << (position)) ; \
reg |= ((value & (0xFFFF >> (16 - (width)))) << (position)); \
} while (0);
#define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index)))
/* This Bridge driver's device context: */
struct bridge_dev_context {
struct dev_object *hdev_obj; /* Handle to Bridge device object. */
u32 dw_dsp_base_addr; /* Arm's API to DSP virt base addr */
/*
* DSP External memory prog address as seen virtually by the OS on
* the host side.
*/
u32 dw_dsp_ext_base_addr; /* See the comment above */
u32 dw_api_reg_base; /* API mem map'd registers */
void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */
u32 dw_api_clk_base; /* CLK Registers */
u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */
u32 dw_public_rhea; /* Pub Rhea */
u32 dw_int_addr; /* MB INTR reg */
u32 dw_tc_endianism; /* TC Endianism register */
u32 dw_test_base; /* DSP MMU Mapped registers */
u32 dw_self_loop; /* Pointer to the selfloop */
u32 dw_dsp_start_add; /* API Boot vector */
u32 dw_internal_size; /* Internal memory size */
struct omap_mbox *mbox; /* Mail box handle */
struct cfg_hostres *resources; /* Host Resources */
/*
* Processor specific info is set when prog loaded and read from DCD.
* [See bridge_dev_ctrl()] PROC info contains DSP-MMU TLB entries.
*/
/* DMMU TLB entries */
struct bridge_ioctl_extproc atlb_entry[BRDIOCTL_NUMOFMMUTLB];
u32 dw_brd_state; /* Last known board state. */
u32 ul_int_mask; /* int mask */
u16 io_base; /* Board I/O base */
u32 num_tlb_entries; /* DSP MMU TLB entry counter */
u32 fixed_tlb_entries; /* Fixed DSPMMU TLB entry count */
/* TC Settings */
bool tc_word_swap_on; /* Traffic Controller Word Swap */
struct pg_table_attrs *pt_attrs;
u32 dsp_per_clks;
};
/*
* If dsp_debug is true, do not branch to the DSP entry
* point and wait for DSP to boot.
*/
extern s32 dsp_debug;
/*
* ======== sm_interrupt_dsp ========
* Purpose:
* Set interrupt value & send an interrupt to the DSP processor(s).
* This is typicaly used when mailbox interrupt mechanisms allow data
* to be associated with interrupt such as for OMAP's CMD/DATA regs.
* Parameters:
* dev_context: Handle to Bridge driver defined device info.
* mb_val: Value associated with interrupt(e.g. mailbox value).
* Returns:
* 0: Interrupt sent;
* else: Unable to send interrupt.
* Requires:
* Ensures:
*/
int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val);
#endif /* _TIOMAP_ */

View File

@ -0,0 +1,85 @@
/*
* _tiomap_pwr.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Definitions and types for the DSP wake/sleep routines.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _TIOMAP_PWR_
#define _TIOMAP_PWR_
#ifdef CONFIG_PM
extern s32 dsp_test_sleepstate;
#endif
extern struct mailbox_context mboxsetting;
/*
* ======== wake_dsp =========
* Wakes up the DSP from DeepSleep
*/
extern int wake_dsp(struct bridge_dev_context *dev_context,
IN void *pargs);
/*
* ======== sleep_dsp =========
* Places the DSP in DeepSleep.
*/
extern int sleep_dsp(struct bridge_dev_context *dev_context,
IN u32 dw_cmd, IN void *pargs);
/*
* ========interrupt_dsp========
* Sends an interrupt to DSP unconditionally.
*/
extern void interrupt_dsp(struct bridge_dev_context *dev_context,
IN u16 mb_val);
/*
* ======== wake_dsp =========
* Wakes up the DSP from DeepSleep
*/
extern int dsp_peripheral_clk_ctrl(struct bridge_dev_context
*dev_context, IN void *pargs);
/*
* ======== handle_hibernation_from_dsp ========
* Handle Hibernation requested from DSP
*/
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context);
/*
* ======== post_scale_dsp ========
* Handle Post Scale notification to DSP
*/
int post_scale_dsp(struct bridge_dev_context *dev_context,
IN void *pargs);
/*
* ======== pre_scale_dsp ========
* Handle Pre Scale notification to DSP
*/
int pre_scale_dsp(struct bridge_dev_context *dev_context,
IN void *pargs);
/*
* ======== handle_constraints_set ========
* Handle constraints request from DSP
*/
int handle_constraints_set(struct bridge_dev_context *dev_context,
IN void *pargs);
/*
* ======== dsp_clk_wakeup_event_ctrl ========
* This function sets the group selction bits for while
* enabling/disabling.
*/
void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable);
#endif /* _TIOMAP_PWR_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,421 @@
/*
* clk.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Clock and Timer services.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
#include <plat/dmtimer.h>
#include <plat/mcbsp.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
#include <dspbridge/cfg.h>
#include <dspbridge/drv.h>
#include <dspbridge/dev.h>
#include "_tiomap.h"
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- This */
#include <dspbridge/clk.h>
/* ----------------------------------- Defines, Data Structures, Typedefs */
#define OMAP_SSI_OFFSET 0x58000
#define OMAP_SSI_SIZE 0x1000
#define OMAP_SSI_SYSCONFIG_OFFSET 0x10
#define SSI_AUTOIDLE (1 << 0)
#define SSI_SIDLE_SMARTIDLE (2 << 3)
#define SSI_MIDLE_NOIDLE (1 << 12)
/* Clk types requested by the dsp */
#define IVA2_CLK 0
#define GPT_CLK 1
#define WDT_CLK 2
#define MCBSP_CLK 3
#define SSI_CLK 4
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
static struct omap_dm_timer *timer[4];
struct clk *iva2_clk;
struct dsp_ssi {
struct clk *sst_fck;
struct clk *ssr_fck;
struct clk *ick;
};
static struct dsp_ssi ssi;
static u32 dsp_clocks;
static inline u32 is_dsp_clk_active(u32 clk, u8 id)
{
return clk & (1 << id);
}
static inline void set_dsp_clk_active(u32 *clk, u8 id)
{
*clk |= (1 << id);
}
static inline void set_dsp_clk_inactive(u32 *clk, u8 id)
{
*clk &= ~(1 << id);
}
static s8 get_clk_type(u8 id)
{
s8 type;
if (id == DSP_CLK_IVA2)
type = IVA2_CLK;
else if (id <= DSP_CLK_GPT8)
type = GPT_CLK;
else if (id == DSP_CLK_WDT3)
type = WDT_CLK;
else if (id <= DSP_CLK_MCBSP5)
type = MCBSP_CLK;
else if (id == DSP_CLK_SSI)
type = SSI_CLK;
else
type = -1;
return type;
}
/*
* ======== dsp_clk_exit ========
* Purpose:
* Cleanup CLK module.
*/
void dsp_clk_exit(void)
{
dsp_clock_disable_all(dsp_clocks);
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
clk_put(ssi.ick);
}
/*
* ======== dsp_clk_init ========
* Purpose:
* Initialize CLK module.
*/
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
dspbridge_device.dev.bus = &platform_bus_type;
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck");
ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck");
ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick");
if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick))
dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n",
ssi.sst_fck, ssi.ssr_fck, ssi.ick);
}
#ifdef CONFIG_OMAP_MCBSP
static void mcbsp_clk_prepare(bool flag, u8 id)
{
struct cfg_hostres *resources;
struct dev_object *hdev_object = NULL;
struct bridge_dev_context *bridge_context = NULL;
u32 val;
hdev_object = (struct dev_object *)drv_get_first_dev_object();
if (!hdev_object)
return;
dev_get_bridge_context(hdev_object, &bridge_context);
if (!bridge_context)
return;
resources = bridge_context->resources;
if (!resources)
return;
if (flag) {
if (id == DSP_CLK_MCBSP1) {
/* set MCBSP1_CLKS, on McBSP1 ON */
val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
val |= 1 << 2;
__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
} else if (id == DSP_CLK_MCBSP2) {
/* set MCBSP2_CLKS, on McBSP2 ON */
val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
val |= 1 << 6;
__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
}
} else {
if (id == DSP_CLK_MCBSP1) {
/* clear MCBSP1_CLKS, on McBSP1 OFF */
val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
val &= ~(1 << 2);
__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
} else if (id == DSP_CLK_MCBSP2) {
/* clear MCBSP2_CLKS, on McBSP2 OFF */
val = __raw_readl(resources->dw_sys_ctrl_base + 0x274);
val &= ~(1 << 6);
__raw_writel(val, resources->dw_sys_ctrl_base + 0x274);
}
}
}
#endif
/**
* dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout
* @clk_id: GP Timer clock id.
* @load: Overflow value.
*
* Sets an overflow interrupt for the desired GPT waiting for a timeout
* of 5 msecs for the interrupt to occur.
*/
void dsp_gpt_wait_overflow(short int clk_id, unsigned int load)
{
struct omap_dm_timer *gpt = timer[clk_id - 1];
unsigned long timeout;
if (!gpt)
return;
/* Enable overflow interrupt */
omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW);
/*
* Set counter value to overflow counter after
* one tick and start timer.
*/
omap_dm_timer_set_load_start(gpt, 0, load);
/* Wait 80us for timer to overflow */
udelay(80);
timeout = msecs_to_jiffies(5);
/* Check interrupt status and wait for interrupt */
while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) {
if (time_is_after_jiffies(timeout)) {
pr_err("%s: GPTimer interrupt failed\n", __func__);
break;
}
}
}
/*
* ======== dsp_clk_enable ========
* Purpose:
* Enable Clock .
*
*/
int dsp_clk_enable(IN enum dsp_clk_id clk_id)
{
int status = 0;
if (is_dsp_clk_active(dsp_clocks, clk_id)) {
dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id);
goto out;
}
switch (get_clk_type(clk_id)) {
case IVA2_CLK:
clk_enable(iva2_clk);
break;
case GPT_CLK:
timer[clk_id - 1] =
omap_dm_timer_request_specific(DMT_ID(clk_id));
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
mcbsp_clk_prepare(true, clk_id);
omap_mcbsp_set_io_type(MCBSP_ID(clk_id), OMAP_MCBSP_POLL_IO);
omap_mcbsp_request(MCBSP_ID(clk_id));
break;
#endif
case WDT_CLK:
dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n");
break;
case SSI_CLK:
clk_enable(ssi.sst_fck);
clk_enable(ssi.ssr_fck);
clk_enable(ssi.ick);
/*
* The SSI module need to configured not to have the Forced
* idle for master interface. If it is set to forced idle,
* the SSI module is transitioning to standby thereby causing
* the client in the DSP hang waiting for the SSI module to
* be active after enabling the clocks
*/
ssi_clk_prepare(true);
break;
default:
dev_err(bridge, "Invalid clock id for enable\n");
status = -EPERM;
}
if (DSP_SUCCEEDED(status))
set_dsp_clk_active(&dsp_clocks, clk_id);
out:
return status;
}
/**
* dsp_clock_enable_all - Enable clocks used by the DSP
* @dev_context Driver's device context strucure
*
* This function enables all the peripheral clocks that were requested by DSP.
*/
u32 dsp_clock_enable_all(u32 dsp_per_clocks)
{
u32 clk_id;
u32 status = -EPERM;
for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
if (is_dsp_clk_active(dsp_per_clocks, clk_id))
status = dsp_clk_enable(clk_id);
}
return status;
}
/*
* ======== dsp_clk_disable ========
* Purpose:
* Disable the clock.
*
*/
int dsp_clk_disable(IN enum dsp_clk_id clk_id)
{
int status = 0;
if (!is_dsp_clk_active(dsp_clocks, clk_id)) {
dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id);
goto out;
}
switch (get_clk_type(clk_id)) {
case IVA2_CLK:
clk_disable(iva2_clk);
break;
case GPT_CLK:
omap_dm_timer_free(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
mcbsp_clk_prepare(false, clk_id);
omap_mcbsp_free(MCBSP_ID(clk_id));
break;
#endif
case WDT_CLK:
dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n");
break;
case SSI_CLK:
ssi_clk_prepare(false);
ssi_clk_prepare(false);
clk_disable(ssi.sst_fck);
clk_disable(ssi.ssr_fck);
clk_disable(ssi.ick);
break;
default:
dev_err(bridge, "Invalid clock id for disable\n");
status = -EPERM;
}
if (DSP_SUCCEEDED(status))
set_dsp_clk_inactive(&dsp_clocks, clk_id);
out:
return status;
}
/**
* dsp_clock_disable_all - Disable all active clocks
* @dev_context Driver's device context structure
*
* This function disables all the peripheral clocks that were enabled by DSP.
* It is meant to be called only when DSP is entering hibernation or when DSP
* is in error state.
*/
u32 dsp_clock_disable_all(u32 dsp_per_clocks)
{
u32 clk_id;
u32 status = -EPERM;
for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) {
if (is_dsp_clk_active(dsp_per_clocks, clk_id))
status = dsp_clk_disable(clk_id);
}
return status;
}
u32 dsp_clk_get_iva2_rate(void)
{
u32 clk_speed_khz;
clk_speed_khz = clk_get_rate(iva2_clk);
clk_speed_khz /= 1000;
dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz);
return clk_speed_khz;
}
void ssi_clk_prepare(bool FLAG)
{
void __iomem *ssi_base;
unsigned int value;
ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE);
if (!ssi_base) {
pr_err("%s: error, SSI not configured\n", __func__);
return;
}
if (FLAG) {
/* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to
* no idle
*/
value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE;
} else {
/* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to
* forced idle
*/
value = SSI_AUTOIDLE;
}
__raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET);
iounmap(ssi_base);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,139 @@
/*
* mmu_fault.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implements DSP MMU fault handling functions.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/host_os.h>
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/drv.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdeh.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* ----------------------------------- This */
#include "_deh.h"
#include <dspbridge/cfg.h>
#include "_tiomap.h"
#include "mmu_fault.h"
static u32 dmmu_event_mask;
u32 fault_addr;
static bool mmu_check_if_fault(struct bridge_dev_context *dev_context);
/*
* ======== mmu_fault_dpc ========
* Deferred procedure call to handle DSP MMU fault.
*/
void mmu_fault_dpc(IN unsigned long pRefData)
{
struct deh_mgr *hdeh_mgr = (struct deh_mgr *)pRefData;
if (hdeh_mgr)
bridge_deh_notify(hdeh_mgr, DSP_MMUFAULT, 0L);
}
/*
* ======== mmu_fault_isr ========
* ISR to be triggered by a DSP MMU fault interrupt.
*/
irqreturn_t mmu_fault_isr(int irq, IN void *pRefData)
{
struct deh_mgr *deh_mgr_obj = (struct deh_mgr *)pRefData;
struct bridge_dev_context *dev_context;
struct cfg_hostres *resources;
DBC_REQUIRE(irq == INT_DSP_MMU_IRQ);
DBC_REQUIRE(deh_mgr_obj);
if (deh_mgr_obj) {
dev_context =
(struct bridge_dev_context *)deh_mgr_obj->hbridge_context;
resources = dev_context->resources;
if (!resources) {
dev_dbg(bridge, "%s: Failed to get Host Resources\n",
__func__);
return IRQ_HANDLED;
}
if (mmu_check_if_fault(dev_context)) {
printk(KERN_INFO "***** DSPMMU FAULT ***** IRQStatus "
"0x%x\n", dmmu_event_mask);
printk(KERN_INFO "***** DSPMMU FAULT ***** fault_addr "
"0x%x\n", fault_addr);
/*
* Schedule a DPC directly. In the future, it may be
* necessary to check if DSP MMU fault is intended for
* Bridge.
*/
tasklet_schedule(&deh_mgr_obj->dpc_tasklet);
/* Reset err_info structure before use. */
deh_mgr_obj->err_info.dw_err_mask = DSP_MMUFAULT;
deh_mgr_obj->err_info.dw_val1 = fault_addr >> 16;
deh_mgr_obj->err_info.dw_val2 = fault_addr & 0xFFFF;
deh_mgr_obj->err_info.dw_val3 = 0L;
/* Disable the MMU events, else once we clear it will
* start to raise INTs again */
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
} else {
hw_mmu_event_disable(resources->dw_dmmu_base,
HW_MMU_ALL_INTERRUPTS);
}
}
return IRQ_HANDLED;
}
/*
* ======== mmu_check_if_fault ========
* Check to see if MMU Fault is valid TLB miss from DSP
* Note: This function is called from an ISR
*/
static bool mmu_check_if_fault(struct bridge_dev_context *dev_context)
{
bool ret = false;
hw_status hw_status_obj;
struct cfg_hostres *resources = dev_context->resources;
if (!resources) {
dev_dbg(bridge, "%s: Failed to get Host Resources in\n",
__func__);
return ret;
}
hw_status_obj =
hw_mmu_event_status(resources->dw_dmmu_base, &dmmu_event_mask);
if (dmmu_event_mask == HW_MMU_TRANSLATION_FAULT) {
hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
ret = true;
}
return ret;
}

View File

@ -0,0 +1,36 @@
/*
* mmu_fault.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Defines DSP MMU fault handling functions.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef MMU_FAULT_
#define MMU_FAULT_
extern u32 fault_addr;
/*
* ======== mmu_fault_dpc ========
* Deferred procedure call to handle DSP MMU fault.
*/
void mmu_fault_dpc(IN unsigned long pRefData);
/*
* ======== mmu_fault_isr ========
* ISR to be triggered by a DSP MMU fault interrupt.
*/
irqreturn_t mmu_fault_isr(int irq, IN void *pRefData);
#endif /* MMU_FAULT_ */

View File

@ -0,0 +1,673 @@
/*
* msg_sm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implements upper edge functions for Bridge message module.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/list.h>
#include <dspbridge/sync.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
/* ----------------------------------- Others */
#include <dspbridge/io_sm.h>
/* ----------------------------------- This */
#include <_msg_sm.h>
#include <dspbridge/dspmsg.h>
/* ----------------------------------- Function Prototypes */
static int add_new_msg(struct lst_list *msgList);
static void delete_msg_mgr(struct msg_mgr *hmsg_mgr);
static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 uNumToDSP);
static void free_msg_list(struct lst_list *msgList);
/*
* ======== bridge_msg_create ========
* Create an object to manage message queues. Only one of these objects
* can exist per device object.
*/
int bridge_msg_create(OUT struct msg_mgr **phMsgMgr,
struct dev_object *hdev_obj,
msg_onexit msgCallback)
{
struct msg_mgr *msg_mgr_obj;
struct io_mgr *hio_mgr;
int status = 0;
if (!phMsgMgr || !msgCallback || !hdev_obj) {
status = -EFAULT;
goto func_end;
}
dev_get_io_mgr(hdev_obj, &hio_mgr);
if (!hio_mgr) {
status = -EFAULT;
goto func_end;
}
*phMsgMgr = NULL;
/* Allocate msg_ctrl manager object */
msg_mgr_obj = kzalloc(sizeof(struct msg_mgr), GFP_KERNEL);
if (msg_mgr_obj) {
msg_mgr_obj->on_exit = msgCallback;
msg_mgr_obj->hio_mgr = hio_mgr;
/* List of MSG_QUEUEs */
msg_mgr_obj->queue_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
/* Queues of message frames for messages to the DSP. Message
* frames will only be added to the free queue when a
* msg_queue object is created. */
msg_mgr_obj->msg_free_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
msg_mgr_obj->msg_used_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
if (msg_mgr_obj->queue_list == NULL ||
msg_mgr_obj->msg_free_list == NULL ||
msg_mgr_obj->msg_used_list == NULL) {
status = -ENOMEM;
} else {
INIT_LIST_HEAD(&msg_mgr_obj->queue_list->head);
INIT_LIST_HEAD(&msg_mgr_obj->msg_free_list->head);
INIT_LIST_HEAD(&msg_mgr_obj->msg_used_list->head);
spin_lock_init(&msg_mgr_obj->msg_mgr_lock);
}
/* Create an event to be used by bridge_msg_put() in waiting
* for an available free frame from the message manager. */
msg_mgr_obj->sync_event =
kzalloc(sizeof(struct sync_object), GFP_KERNEL);
if (!msg_mgr_obj->sync_event)
status = -ENOMEM;
else
sync_init_event(msg_mgr_obj->sync_event);
if (DSP_SUCCEEDED(status))
*phMsgMgr = msg_mgr_obj;
else
delete_msg_mgr(msg_mgr_obj);
} else {
status = -ENOMEM;
}
func_end:
return status;
}
/*
* ======== bridge_msg_create_queue ========
* Create a msg_queue for sending/receiving messages to/from a node
* on the DSP.
*/
int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr,
OUT struct msg_queue **phMsgQueue,
u32 msgq_id, u32 max_msgs, void *arg)
{
u32 i;
u32 num_allocated = 0;
struct msg_queue *msg_q;
int status = 0;
if (!hmsg_mgr || phMsgQueue == NULL || !hmsg_mgr->msg_free_list) {
status = -EFAULT;
goto func_end;
}
*phMsgQueue = NULL;
/* Allocate msg_queue object */
msg_q = kzalloc(sizeof(struct msg_queue), GFP_KERNEL);
if (!msg_q) {
status = -ENOMEM;
goto func_end;
}
lst_init_elem((struct list_head *)msg_q);
msg_q->max_msgs = max_msgs;
msg_q->hmsg_mgr = hmsg_mgr;
msg_q->arg = arg; /* Node handle */
msg_q->msgq_id = msgq_id; /* Node env (not valid yet) */
/* Queues of Message frames for messages from the DSP */
msg_q->msg_free_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
msg_q->msg_used_list = kzalloc(sizeof(struct lst_list), GFP_KERNEL);
if (msg_q->msg_free_list == NULL || msg_q->msg_used_list == NULL)
status = -ENOMEM;
else {
INIT_LIST_HEAD(&msg_q->msg_free_list->head);
INIT_LIST_HEAD(&msg_q->msg_used_list->head);
}
/* Create event that will be signalled when a message from
* the DSP is available. */
if (DSP_SUCCEEDED(status)) {
msg_q->sync_event = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_event)
sync_init_event(msg_q->sync_event);
else
status = -ENOMEM;
}
/* Create a notification list for message ready notification. */
if (DSP_SUCCEEDED(status)) {
msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (msg_q->ntfy_obj)
ntfy_init(msg_q->ntfy_obj);
else
status = -ENOMEM;
}
/* Create events that will be used to synchronize cleanup
* when the object is deleted. sync_done will be set to
* unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
* will be set by the unblocked thread to signal that it
* is unblocked and will no longer reference the object. */
if (DSP_SUCCEEDED(status)) {
msg_q->sync_done = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_done)
sync_init_event(msg_q->sync_done);
else
status = -ENOMEM;
}
if (DSP_SUCCEEDED(status)) {
msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_done_ack)
sync_init_event(msg_q->sync_done_ack);
else
status = -ENOMEM;
}
if (DSP_SUCCEEDED(status)) {
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* Initialize message frames and put in appropriate queues */
for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
status = add_new_msg(hmsg_mgr->msg_free_list);
if (DSP_SUCCEEDED(status)) {
num_allocated++;
status = add_new_msg(msg_q->msg_free_list);
}
}
if (DSP_FAILED(status)) {
/* Stay inside CS to prevent others from taking any
* of the newly allocated message frames. */
delete_msg_queue(msg_q, num_allocated);
} else {
lst_put_tail(hmsg_mgr->queue_list,
(struct list_head *)msg_q);
*phMsgQueue = msg_q;
/* Signal that free frames are now available */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
sync_set_event(hmsg_mgr->sync_event);
}
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
} else {
delete_msg_queue(msg_q, 0);
}
func_end:
return status;
}
/*
* ======== bridge_msg_delete ========
* Delete a msg_ctrl manager allocated in bridge_msg_create().
*/
void bridge_msg_delete(struct msg_mgr *hmsg_mgr)
{
if (hmsg_mgr)
delete_msg_mgr(hmsg_mgr);
}
/*
* ======== bridge_msg_delete_queue ========
* Delete a msg_ctrl queue allocated in bridge_msg_create_queue.
*/
void bridge_msg_delete_queue(struct msg_queue *msg_queue_obj)
{
struct msg_mgr *hmsg_mgr;
u32 io_msg_pend;
if (!msg_queue_obj || !msg_queue_obj->hmsg_mgr)
goto func_end;
hmsg_mgr = msg_queue_obj->hmsg_mgr;
msg_queue_obj->done = true;
/* Unblock all threads blocked in MSG_Get() or MSG_Put(). */
io_msg_pend = msg_queue_obj->io_msg_pend;
while (io_msg_pend) {
/* Unblock thread */
sync_set_event(msg_queue_obj->sync_done);
/* Wait for acknowledgement */
sync_wait_on_event(msg_queue_obj->sync_done_ack, SYNC_INFINITE);
io_msg_pend = msg_queue_obj->io_msg_pend;
}
/* Remove message queue from hmsg_mgr->queue_list */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
lst_remove_elem(hmsg_mgr->queue_list,
(struct list_head *)msg_queue_obj);
/* Free the message queue object */
delete_msg_queue(msg_queue_obj, msg_queue_obj->max_msgs);
if (!hmsg_mgr->msg_free_list)
goto func_cont;
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
sync_reset_event(hmsg_mgr->sync_event);
func_cont:
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
func_end:
return;
}
/*
* ======== bridge_msg_get ========
* Get a message from a msg_ctrl queue.
*/
int bridge_msg_get(struct msg_queue *msg_queue_obj,
struct dsp_msg *pmsg, u32 utimeout)
{
struct msg_frame *msg_frame_obj;
struct msg_mgr *hmsg_mgr;
bool got_msg = false;
struct sync_object *syncs[2];
u32 index;
int status = 0;
if (!msg_queue_obj || pmsg == NULL) {
status = -ENOMEM;
goto func_end;
}
hmsg_mgr = msg_queue_obj->hmsg_mgr;
if (!msg_queue_obj->msg_used_list) {
status = -EFAULT;
goto func_end;
}
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* If a message is already there, get it */
if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list)) {
msg_frame_obj = (struct msg_frame *)
lst_get_head(msg_queue_obj->msg_used_list);
if (msg_frame_obj != NULL) {
*pmsg = msg_frame_obj->msg_data.msg;
lst_put_tail(msg_queue_obj->msg_free_list,
(struct list_head *)msg_frame_obj);
if (LST_IS_EMPTY(msg_queue_obj->msg_used_list))
sync_reset_event(msg_queue_obj->sync_event);
got_msg = true;
}
} else {
if (msg_queue_obj->done)
status = -EPERM;
else
msg_queue_obj->io_msg_pend++;
}
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
if (DSP_SUCCEEDED(status) && !got_msg) {
/* Wait til message is available, timeout, or done. We don't
* have to schedule the DPC, since the DSP will send messages
* when they are available. */
syncs[0] = msg_queue_obj->sync_event;
syncs[1] = msg_queue_obj->sync_done;
status = sync_wait_on_multiple_events(syncs, 2, utimeout,
&index);
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
if (msg_queue_obj->done) {
msg_queue_obj->io_msg_pend--;
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
/* Signal that we're not going to access msg_queue_obj
* anymore, so it can be deleted. */
(void)sync_set_event(msg_queue_obj->sync_done_ack);
status = -EPERM;
} else {
if (DSP_SUCCEEDED(status)) {
DBC_ASSERT(!LST_IS_EMPTY
(msg_queue_obj->msg_used_list));
/* Get msg from used list */
msg_frame_obj = (struct msg_frame *)
lst_get_head(msg_queue_obj->msg_used_list);
/* Copy message into pmsg and put frame on the
* free list */
if (msg_frame_obj != NULL) {
*pmsg = msg_frame_obj->msg_data.msg;
lst_put_tail
(msg_queue_obj->msg_free_list,
(struct list_head *)
msg_frame_obj);
}
}
msg_queue_obj->io_msg_pend--;
/* Reset the event if there are still queued messages */
if (!LST_IS_EMPTY(msg_queue_obj->msg_used_list))
sync_set_event(msg_queue_obj->sync_event);
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
}
}
func_end:
return status;
}
/*
* ======== bridge_msg_put ========
* Put a message onto a msg_ctrl queue.
*/
int bridge_msg_put(struct msg_queue *msg_queue_obj,
IN CONST struct dsp_msg *pmsg, u32 utimeout)
{
struct msg_frame *msg_frame_obj;
struct msg_mgr *hmsg_mgr;
bool put_msg = false;
struct sync_object *syncs[2];
u32 index;
int status = 0;
if (!msg_queue_obj || !pmsg || !msg_queue_obj->hmsg_mgr) {
status = -ENOMEM;
goto func_end;
}
hmsg_mgr = msg_queue_obj->hmsg_mgr;
if (!hmsg_mgr->msg_free_list) {
status = -EFAULT;
goto func_end;
}
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* If a message frame is available, use it */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
msg_frame_obj =
(struct msg_frame *)lst_get_head(hmsg_mgr->msg_free_list);
if (msg_frame_obj != NULL) {
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
lst_put_tail(hmsg_mgr->msg_used_list,
(struct list_head *)msg_frame_obj);
hmsg_mgr->msgs_pending++;
put_msg = true;
}
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list))
sync_reset_event(hmsg_mgr->sync_event);
/* Release critical section before scheduling DPC */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
/* Schedule a DPC, to do the actual data transfer: */
iosm_schedule(hmsg_mgr->hio_mgr);
} else {
if (msg_queue_obj->done)
status = -EPERM;
else
msg_queue_obj->io_msg_pend++;
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
}
if (DSP_SUCCEEDED(status) && !put_msg) {
/* Wait til a free message frame is available, timeout,
* or done */
syncs[0] = hmsg_mgr->sync_event;
syncs[1] = msg_queue_obj->sync_done;
status = sync_wait_on_multiple_events(syncs, 2, utimeout,
&index);
if (DSP_FAILED(status))
goto func_end;
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
if (msg_queue_obj->done) {
msg_queue_obj->io_msg_pend--;
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
/* Signal that we're not going to access msg_queue_obj
* anymore, so it can be deleted. */
(void)sync_set_event(msg_queue_obj->sync_done_ack);
status = -EPERM;
} else {
if (LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
status = -EFAULT;
goto func_cont;
}
/* Get msg from free list */
msg_frame_obj = (struct msg_frame *)
lst_get_head(hmsg_mgr->msg_free_list);
/*
* Copy message into pmsg and put frame on the
* used list.
*/
if (msg_frame_obj) {
msg_frame_obj->msg_data.msg = *pmsg;
msg_frame_obj->msg_data.msgq_id =
msg_queue_obj->msgq_id;
lst_put_tail(hmsg_mgr->msg_used_list,
(struct list_head *)msg_frame_obj);
hmsg_mgr->msgs_pending++;
/*
* Schedule a DPC, to do the actual
* data transfer.
*/
iosm_schedule(hmsg_mgr->hio_mgr);
}
msg_queue_obj->io_msg_pend--;
/* Reset event if there are still frames available */
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list))
sync_set_event(hmsg_mgr->sync_event);
func_cont:
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
}
}
func_end:
return status;
}
/*
* ======== bridge_msg_register_notify ========
*/
int bridge_msg_register_notify(struct msg_queue *msg_queue_obj,
u32 event_mask, u32 notify_type,
struct dsp_notification *hnotification)
{
int status = 0;
if (!msg_queue_obj || !hnotification) {
status = -ENOMEM;
goto func_end;
}
if (!(event_mask == DSP_NODEMESSAGEREADY || event_mask == 0)) {
status = -EPERM;
goto func_end;
}
if (notify_type != DSP_SIGNALEVENT) {
status = -EBADR;
goto func_end;
}
if (event_mask)
status = ntfy_register(msg_queue_obj->ntfy_obj, hnotification,
event_mask, notify_type);
else
status = ntfy_unregister(msg_queue_obj->ntfy_obj,
hnotification);
if (status == -EINVAL) {
/* Not registered. Ok, since we couldn't have known. Node
* notifications are split between node state change handled
* by NODE, and message ready handled by msg_ctrl. */
status = 0;
}
func_end:
return status;
}
/*
* ======== bridge_msg_set_queue_id ========
*/
void bridge_msg_set_queue_id(struct msg_queue *msg_queue_obj, u32 msgq_id)
{
/*
* A message queue must be created when a node is allocated,
* so that node_register_notify() can be called before the node
* is created. Since we don't know the node environment until the
* node is created, we need this function to set msg_queue_obj->msgq_id
* to the node environment, after the node is created.
*/
if (msg_queue_obj)
msg_queue_obj->msgq_id = msgq_id;
}
/*
* ======== add_new_msg ========
* Must be called in message manager critical section.
*/
static int add_new_msg(struct lst_list *msgList)
{
struct msg_frame *pmsg;
int status = 0;
pmsg = kzalloc(sizeof(struct msg_frame), GFP_ATOMIC);
if (pmsg != NULL) {
lst_init_elem((struct list_head *)pmsg);
lst_put_tail(msgList, (struct list_head *)pmsg);
} else {
status = -ENOMEM;
}
return status;
}
/*
* ======== delete_msg_mgr ========
*/
static void delete_msg_mgr(struct msg_mgr *hmsg_mgr)
{
if (!hmsg_mgr)
goto func_end;
if (hmsg_mgr->queue_list) {
if (LST_IS_EMPTY(hmsg_mgr->queue_list)) {
kfree(hmsg_mgr->queue_list);
hmsg_mgr->queue_list = NULL;
}
}
if (hmsg_mgr->msg_free_list) {
free_msg_list(hmsg_mgr->msg_free_list);
hmsg_mgr->msg_free_list = NULL;
}
if (hmsg_mgr->msg_used_list) {
free_msg_list(hmsg_mgr->msg_used_list);
hmsg_mgr->msg_used_list = NULL;
}
kfree(hmsg_mgr->sync_event);
kfree(hmsg_mgr);
func_end:
return;
}
/*
* ======== delete_msg_queue ========
*/
static void delete_msg_queue(struct msg_queue *msg_queue_obj, u32 uNumToDSP)
{
struct msg_mgr *hmsg_mgr;
struct msg_frame *pmsg;
u32 i;
if (!msg_queue_obj ||
!msg_queue_obj->hmsg_mgr || !msg_queue_obj->hmsg_mgr->msg_free_list)
goto func_end;
hmsg_mgr = msg_queue_obj->hmsg_mgr;
/* Pull off uNumToDSP message frames from Msg manager and free */
for (i = 0; i < uNumToDSP; i++) {
if (!LST_IS_EMPTY(hmsg_mgr->msg_free_list)) {
pmsg = (struct msg_frame *)
lst_get_head(hmsg_mgr->msg_free_list);
kfree(pmsg);
} else {
/* Cannot free all of the message frames */
break;
}
}
if (msg_queue_obj->msg_free_list) {
free_msg_list(msg_queue_obj->msg_free_list);
msg_queue_obj->msg_free_list = NULL;
}
if (msg_queue_obj->msg_used_list) {
free_msg_list(msg_queue_obj->msg_used_list);
msg_queue_obj->msg_used_list = NULL;
}
if (msg_queue_obj->ntfy_obj) {
ntfy_delete(msg_queue_obj->ntfy_obj);
kfree(msg_queue_obj->ntfy_obj);
}
kfree(msg_queue_obj->sync_event);
kfree(msg_queue_obj->sync_done);
kfree(msg_queue_obj->sync_done_ack);
kfree(msg_queue_obj);
func_end:
return;
}
/*
* ======== free_msg_list ========
*/
static void free_msg_list(struct lst_list *msgList)
{
struct msg_frame *pmsg;
if (!msgList)
goto func_end;
while ((pmsg = (struct msg_frame *)lst_get_head(msgList)) != NULL)
kfree(pmsg);
DBC_ASSERT(LST_IS_EMPTY(msgList));
kfree(msgList);
func_end:
return;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,604 @@
/*
* tiomap_pwr.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implementation of DSP wake/sleep routines.
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
#include <dspbridge/cfg.h>
#include <dspbridge/drv.h>
#include <dspbridge/io_sm.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/brddefs.h>
#include <dspbridge/dev.h>
#include <dspbridge/iodefs.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
#include <dspbridge/pwr_sh.h>
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdeh.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- specific to this file */
#include "_tiomap.h"
#include "_tiomap_pwr.h"
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#define PWRSTST_TIMEOUT 200
/*
* ======== handle_constraints_set ========
* Sets new DSP constraint
*/
int handle_constraints_set(struct bridge_dev_context *dev_context,
IN void *pargs)
{
#ifdef CONFIG_BRIDGE_DVFS
u32 *constraint_val;
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
constraint_val = (u32 *) (pargs);
/* Read the target value requested by DSP */
dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
(u32) *(constraint_val + 1));
/* Set the new opp value */
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
#endif /* #ifdef CONFIG_BRIDGE_DVFS */
return 0;
}
/*
* ======== handle_hibernation_from_dsp ========
* Handle Hibernation requested from DSP
*/
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
{
int status = 0;
#ifdef CONFIG_PM
u16 timeout = PWRSTST_TIMEOUT / 10;
u32 pwr_state;
#ifdef CONFIG_BRIDGE_DVFS
u32 opplevel;
struct io_mgr *hio_mgr;
#endif
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
/* Wait for DSP to move into OFF state */
while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
if (msleep_interruptible(10)) {
pr_err("Waiting for DSP OFF mode interrupted\n");
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
}
if (timeout == 0) {
pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
status = -ETIMEDOUT;
return status;
} else {
/* Save mailbox settings */
omap_mbox_save_ctx(dev_context->mbox);
/* Turn off DSP Peripheral clocks and DSP Load monitor timer */
status = dsp_clock_disable_all(dev_context->dsp_per_clks);
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
if (DSP_SUCCEEDED(status)) {
/* Update the Bridger Driver state */
dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_BRIDGE_DVFS
status =
dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
if (!hio_mgr) {
status = DSP_EHANDLE;
return status;
}
io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
/*
* Set the OPP to low level before moving to OFF
* mode
*/
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) (VDD1_OPP1);
status = 0;
#endif /* CONFIG_BRIDGE_DVFS */
}
}
#endif
return status;
}
/*
* ======== sleep_dsp ========
* Put DSP in low power consuming state.
*/
int sleep_dsp(struct bridge_dev_context *dev_context, IN u32 dw_cmd,
IN void *pargs)
{
int status = 0;
#ifdef CONFIG_PM
#ifdef CONFIG_BRIDGE_NTFY_PWRERR
struct deh_mgr *hdeh_mgr;
#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
u16 timeout = PWRSTST_TIMEOUT / 10;
u32 pwr_state, target_pwr_state;
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
/* Check if sleep code is valid */
if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
return -EINVAL;
switch (dev_context->dw_brd_state) {
case BRD_RUNNING:
omap_mbox_save_ctx(dev_context->mbox);
if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
__func__);
target_pwr_state = PWRDM_POWER_OFF;
} else {
sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
target_pwr_state = PWRDM_POWER_RET;
}
break;
case BRD_RETENTION:
omap_mbox_save_ctx(dev_context->mbox);
if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
target_pwr_state = PWRDM_POWER_OFF;
} else
return 0;
break;
case BRD_HIBERNATION:
case BRD_DSP_HIBERNATION:
/* Already in Hibernation, so just return */
dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
__func__);
return 0;
case BRD_STOPPED:
dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
return 0;
default:
dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
return -EPERM;
}
/* Get the PRCM DSP power domain status */
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
/* Wait for DSP to move into target power state */
while ((pwr_state != target_pwr_state) && --timeout) {
if (msleep_interruptible(10)) {
pr_err("Waiting for DSP to Suspend interrupted\n");
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
}
if (!timeout) {
pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
__func__, pwr_state);
#ifdef CONFIG_BRIDGE_NTFY_PWRERR
dev_get_deh_mgr(dev_context->hdev_obj, &hdeh_mgr);
bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
return -ETIMEDOUT;
} else {
/* Update the Bridger Driver state */
if (dsp_test_sleepstate == PWRDM_POWER_OFF)
dev_context->dw_brd_state = BRD_HIBERNATION;
else
dev_context->dw_brd_state = BRD_RETENTION;
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
/* Turn off DSP Peripheral clocks */
status = dsp_clock_disable_all(dev_context->dsp_per_clks);
if (DSP_FAILED(status))
return status;
#ifdef CONFIG_BRIDGE_DVFS
else if (target_pwr_state == PWRDM_POWER_OFF) {
/*
* Set the OPP to low level before moving to OFF mode
*/
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) (VDD1_OPP1);
}
#endif /* CONFIG_BRIDGE_DVFS */
}
#endif /* CONFIG_PM */
return status;
}
/*
* ======== wake_dsp ========
* Wake up DSP from sleep.
*/
int wake_dsp(struct bridge_dev_context *dev_context, IN void *pargs)
{
int status = 0;
#ifdef CONFIG_PM
/* Check the board state, if it is not 'SLEEP' then return */
if (dev_context->dw_brd_state == BRD_RUNNING ||
dev_context->dw_brd_state == BRD_STOPPED) {
/* The Device is in 'RET' or 'OFF' state and Bridge state is not
* 'SLEEP', this means state inconsistency, so return */
return 0;
}
/* Send a wakeup message to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
/* Set the device state to RUNNIG */
dev_context->dw_brd_state = BRD_RUNNING;
#endif /* CONFIG_PM */
return status;
}
/*
* ======== dsp_peripheral_clk_ctrl ========
* Enable/Disable the DSP peripheral clocks as needed..
*/
int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
IN void *pargs)
{
u32 ext_clk = 0;
u32 ext_clk_id = 0;
u32 ext_clk_cmd = 0;
u32 clk_id_index = MBX_PM_MAX_RESOURCES;
u32 tmp_index;
u32 dsp_per_clks_before;
int status = 0;
dsp_per_clks_before = dev_context->dsp_per_clks;
ext_clk = (u32) *((u32 *) pargs);
ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
/* process the power message -- TODO, keep it in a separate function */
for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
if (ext_clk_id == bpwr_clkid[tmp_index]) {
clk_id_index = tmp_index;
break;
}
}
/* TODO -- Assert may be a too hard restriction here.. May be we should
* just return with failure when the CLK ID does not match */
/* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
if (clk_id_index == MBX_PM_MAX_RESOURCES) {
/* return with a more meaningfull error code */
return -EPERM;
}
ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
switch (ext_clk_cmd) {
case BPWR_DISABLE_CLOCK:
status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
false);
if (DSP_SUCCEEDED(status)) {
(dev_context->dsp_per_clks) &=
(~((u32) (1 << bpwr_clks[clk_id_index].clk)));
}
break;
case BPWR_ENABLE_CLOCK:
status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
if (DSP_SUCCEEDED(status))
(dev_context->dsp_per_clks) |=
(1 << bpwr_clks[clk_id_index].clk);
break;
default:
dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
/* unsupported cmd */
/* TODO -- provide support for AUTOIDLE Enable/Disable
* commands */
}
return status;
}
/*
* ========pre_scale_dsp========
* Sends prescale notification to DSP
*
*/
int pre_scale_dsp(struct bridge_dev_context *dev_context, IN void *pargs)
{
#ifdef CONFIG_BRIDGE_DVFS
u32 level;
u32 voltage_domain;
voltage_domain = *((u32 *) pargs);
level = *((u32 *) pargs + 1);
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
(dev_context->dw_brd_state == BRD_RETENTION) ||
(dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
return 0;
} else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
/* Send a prenotificatio to DSP */
dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
return 0;
} else {
return -EPERM;
}
#endif /* #ifdef CONFIG_BRIDGE_DVFS */
return 0;
}
/*
* ========post_scale_dsp========
* Sends postscale notification to DSP
*
*/
int post_scale_dsp(struct bridge_dev_context *dev_context,
IN void *pargs)
{
int status = 0;
#ifdef CONFIG_BRIDGE_DVFS
u32 level;
u32 voltage_domain;
struct io_mgr *hio_mgr;
status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
if (!hio_mgr)
return -EFAULT;
voltage_domain = *((u32 *) pargs);
level = *((u32 *) pargs + 1);
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
if ((dev_context->dw_brd_state == BRD_HIBERNATION) ||
(dev_context->dw_brd_state == BRD_RETENTION) ||
(dev_context->dw_brd_state == BRD_DSP_HIBERNATION)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
__func__);
} else if ((dev_context->dw_brd_state == BRD_RUNNING)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
/* Send a post notification to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
"to DSP\n", __func__);
} else {
status = -EPERM;
}
#endif /* #ifdef CONFIG_BRIDGE_DVFS */
return status;
}
void dsp_clk_wakeup_event_ctrl(u32 ClkId, bool enable)
{
struct cfg_hostres *resources;
int status = 0;
u32 iva2_grpsel;
u32 mpu_grpsel;
struct dev_object *hdev_object = NULL;
struct bridge_dev_context *bridge_context = NULL;
hdev_object = (struct dev_object *)drv_get_first_dev_object();
if (!hdev_object)
return;
status = dev_get_bridge_context(hdev_object, &bridge_context);
if (!bridge_context)
return;
resources = bridge_context->resources;
if (!resources)
return;
switch (ClkId) {
case BPWR_GP_TIMER5:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_GP_TIMER6:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_GP_TIMER7:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_GP_TIMER8:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_MCBSP1:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_core_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_core_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_MCBSP2:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_MCBSP3:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_MCBSP4:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_per_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_per_pm_base) + 0xA4))
= mpu_grpsel;
break;
case BPWR_MCBSP5:
iva2_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_core_pm_base) +
0xA8));
mpu_grpsel = (u32) *((reg_uword32 *)
((u32) (resources->dw_core_pm_base) +
0xA4));
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
}
*((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA8))
= iva2_grpsel;
*((reg_uword32 *) ((u32) (resources->dw_core_pm_base) + 0xA4))
= mpu_grpsel;
break;
}
}

View File

@ -0,0 +1,458 @@
/*
* tiomap_io.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implementation for the io read/write routines.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/drv.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/cfg.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- specific to this file */
#include "_tiomap.h"
#include "_tiomap_pwr.h"
#include "tiomap_io.h"
static u32 ul_ext_base;
static u32 ul_ext_end;
static u32 shm0_end;
static u32 ul_dyn_ext_base;
static u32 ul_trace_sec_beg;
static u32 ul_trace_sec_end;
static u32 ul_shm_base_virt;
bool symbols_reloaded = true;
/*
* ======== read_ext_dsp_data ========
* Copies DSP external memory buffers to the host side buffers.
*/
int read_ext_dsp_data(struct bridge_dev_context *hDevContext,
OUT u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType)
{
int status = 0;
struct bridge_dev_context *dev_context = hDevContext;
u32 offset;
u32 ul_tlb_base_virt = 0;
u32 ul_shm_offset_virt = 0;
u32 dw_ext_prog_virt_mem;
u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
bool trace_read = false;
if (!ul_shm_base_virt) {
status = dev_get_symbol(dev_context->hdev_obj,
SHMBASENAME, &ul_shm_base_virt);
}
DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
if (DSP_SUCCEEDED(status) && !ul_trace_sec_beg) {
status = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
DBC_ASSERT(ul_trace_sec_beg != 0);
if (DSP_SUCCEEDED(status) && !ul_trace_sec_end) {
status = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
DBC_ASSERT(ul_trace_sec_end != 0);
if (DSP_SUCCEEDED(status)) {
if ((dwDSPAddr <= ul_trace_sec_end) &&
(dwDSPAddr >= ul_trace_sec_beg))
trace_read = true;
}
/* If reading from TRACE, force remap/unmap */
if (trace_read && dw_base_addr) {
dw_base_addr = 0;
dev_context->dw_dsp_ext_base_addr = 0;
}
if (!dw_base_addr) {
/* Initialize ul_ext_base and ul_ext_end */
ul_ext_base = 0;
ul_ext_end = 0;
/* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
if (DSP_SUCCEEDED(status) && !ul_dyn_ext_base) {
status = dev_get_symbol(dev_context->hdev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
DBC_ASSERT(ul_dyn_ext_base != 0);
if (DSP_SUCCEEDED(status)) {
status = dev_get_symbol(dev_context->hdev_obj,
EXTBASE, &ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
if (DSP_SUCCEEDED(status)) {
status = dev_get_symbol(dev_context->hdev_obj,
EXTEND, &ul_ext_end);
}
DBC_ASSERT(ul_ext_end != 0);
/* Trace buffer is right after the shm SEG0,
* so set the base address to SHMBASE */
if (trace_read) {
ul_ext_base = ul_shm_base_virt;
ul_ext_end = ul_trace_sec_end;
}
DBC_ASSERT(ul_ext_end != 0);
DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
status = -EPERM;
if (DSP_SUCCEEDED(status)) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].ul_gpp_va;
if (!trace_read) {
ul_shm_offset_virt =
ul_shm_base_virt - ul_tlb_base_virt;
ul_shm_offset_virt +=
PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
1, HW_PAGE_SIZE64KB);
dw_ext_prog_virt_mem -= ul_shm_offset_virt;
dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base);
dev_context->dw_dsp_ext_base_addr =
dw_ext_prog_virt_mem;
/*
* This dw_dsp_ext_base_addr will get cleared
* only when the board is stopped.
*/
if (!dev_context->dw_dsp_ext_base_addr)
status = -EPERM;
}
dw_base_addr = dw_ext_prog_virt_mem;
}
}
if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
status = -EPERM;
offset = dwDSPAddr - ul_ext_base;
if (DSP_SUCCEEDED(status))
memcpy(pbHostBuf, (u8 *) dw_base_addr + offset, ul_num_bytes);
return status;
}
/*
* ======== write_dsp_data ========
* purpose:
* Copies buffers to the DSP internal/external memory.
*/
int write_dsp_data(struct bridge_dev_context *hDevContext,
IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ul_num_bytes,
u32 ulMemType)
{
u32 offset;
u32 dw_base_addr = hDevContext->dw_dsp_base_addr;
struct cfg_hostres *resources = hDevContext->resources;
int status = 0;
u32 base1, base2, base3;
base1 = OMAP_DSP_MEM1_SIZE;
base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE;
base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE;
if (!resources)
return -EPERM;
offset = dwDSPAddr - hDevContext->dw_dsp_start_add;
if (offset < base1) {
dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2],
resources->dw_mem_length[2]);
} else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) {
dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3],
resources->dw_mem_length[3]);
offset = offset - base2;
} else if (offset >= base2 + OMAP_DSP_MEM2_SIZE &&
offset < base3 + OMAP_DSP_MEM3_SIZE) {
dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4],
resources->dw_mem_length[4]);
offset = offset - base3;
} else {
return -EPERM;
}
if (ul_num_bytes)
memcpy((u8 *) (dw_base_addr + offset), pbHostBuf, ul_num_bytes);
else
*((u32 *) pbHostBuf) = dw_base_addr + offset;
return status;
}
/*
* ======== write_ext_dsp_data ========
* purpose:
* Copies buffers to the external memory.
*
*/
int write_ext_dsp_data(struct bridge_dev_context *dev_context,
IN u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType,
bool bDynamicLoad)
{
u32 dw_base_addr = dev_context->dw_dsp_ext_base_addr;
u32 dw_offset = 0;
u8 temp_byte1, temp_byte2;
u8 remain_byte[4];
s32 i;
int ret = 0;
u32 dw_ext_prog_virt_mem;
u32 ul_tlb_base_virt = 0;
u32 ul_shm_offset_virt = 0;
struct cfg_hostres *host_res = dev_context->resources;
bool trace_load = false;
temp_byte1 = 0x0;
temp_byte2 = 0x0;
if (symbols_reloaded) {
/* Check if it is a load to Trace section */
ret = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
if (DSP_SUCCEEDED(ret))
ret = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_END,
&ul_trace_sec_end);
}
if (DSP_SUCCEEDED(ret)) {
if ((dwDSPAddr <= ul_trace_sec_end) &&
(dwDSPAddr >= ul_trace_sec_beg))
trace_load = true;
}
/* If dynamic, force remap/unmap */
if ((bDynamicLoad || trace_load) && dw_base_addr) {
dw_base_addr = 0;
MEM_UNMAP_LINEAR_ADDRESS((void *)
dev_context->dw_dsp_ext_base_addr);
dev_context->dw_dsp_ext_base_addr = 0x0;
}
if (!dw_base_addr) {
if (symbols_reloaded)
/* Get SHM_BEG EXT_BEG and EXT_END. */
ret = dev_get_symbol(dev_context->hdev_obj,
SHMBASENAME, &ul_shm_base_virt);
DBC_ASSERT(ul_shm_base_virt != 0);
if (bDynamicLoad) {
if (DSP_SUCCEEDED(ret)) {
if (symbols_reloaded)
ret =
dev_get_symbol
(dev_context->hdev_obj, DYNEXTBASE,
&ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
if (DSP_SUCCEEDED(ret)) {
/* DR OMAPS00013235 : DLModules array may be
* in EXTMEM. It is expected that DYNEXTMEM and
* EXTMEM are contiguous, so checking for the
* upper bound at EXTEND should be Ok. */
if (symbols_reloaded)
ret =
dev_get_symbol
(dev_context->hdev_obj, EXTEND,
&ul_ext_end);
}
} else {
if (symbols_reloaded) {
if (DSP_SUCCEEDED(ret))
ret =
dev_get_symbol
(dev_context->hdev_obj, EXTBASE,
&ul_ext_base);
DBC_ASSERT(ul_ext_base != 0);
if (DSP_SUCCEEDED(ret))
ret =
dev_get_symbol
(dev_context->hdev_obj, EXTEND,
&ul_ext_end);
}
}
/* Trace buffer it right after the shm SEG0, so set the
* base address to SHMBASE */
if (trace_load)
ul_ext_base = ul_shm_base_virt;
DBC_ASSERT(ul_ext_end != 0);
DBC_ASSERT(ul_ext_end > ul_ext_base);
if (ul_ext_end < ul_ext_base)
ret = -EPERM;
if (DSP_SUCCEEDED(ret)) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
if (DSP_SUCCEEDED(ret)) {
ret =
dev_get_symbol
(dev_context->hdev_obj,
DSP_TRACESEC_END, &shm0_end);
}
if (DSP_SUCCEEDED(ret)) {
ret =
dev_get_symbol
(dev_context->hdev_obj, DYNEXTBASE,
&ul_dyn_ext_base);
}
}
ul_shm_offset_virt =
ul_shm_base_virt - ul_tlb_base_virt;
if (trace_load) {
dw_ext_prog_virt_mem =
dev_context->atlb_entry[0].ul_gpp_va;
} else {
dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
dw_ext_prog_virt_mem +=
(ul_ext_base - ul_dyn_ext_base);
}
dev_context->dw_dsp_ext_base_addr =
(u32) MEM_LINEAR_ADDRESS((void *)
dw_ext_prog_virt_mem,
ul_ext_end - ul_ext_base);
dw_base_addr += dev_context->dw_dsp_ext_base_addr;
/* This dw_dsp_ext_base_addr will get cleared only when
* the board is stopped. */
if (!dev_context->dw_dsp_ext_base_addr)
ret = -EPERM;
}
}
if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
ret = -EPERM;
if (DSP_SUCCEEDED(ret)) {
for (i = 0; i < 4; i++)
remain_byte[i] = 0x0;
dw_offset = dwDSPAddr - ul_ext_base;
/* Also make sure the dwDSPAddr is < ul_ext_end */
if (dwDSPAddr > ul_ext_end || dw_offset > dwDSPAddr)
ret = -EPERM;
}
if (DSP_SUCCEEDED(ret)) {
if (ul_num_bytes)
memcpy((u8 *) dw_base_addr + dw_offset, pbHostBuf,
ul_num_bytes);
else
*((u32 *) pbHostBuf) = dw_base_addr + dw_offset;
}
/* Unmap here to force remap for other Ext loads */
if ((bDynamicLoad || trace_load) && dev_context->dw_dsp_ext_base_addr) {
MEM_UNMAP_LINEAR_ADDRESS((void *)
dev_context->dw_dsp_ext_base_addr);
dev_context->dw_dsp_ext_base_addr = 0x0;
}
symbols_reloaded = false;
return ret;
}
int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
{
#ifdef CONFIG_BRIDGE_DVFS
u32 opplevel = 0;
#endif
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
struct cfg_hostres *resources = dev_context->resources;
int status = 0;
u32 temp;
if (!dev_context->mbox)
return 0;
if (!resources)
return -EPERM;
if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
dev_context->dw_brd_state == BRD_HIBERNATION) {
#ifdef CONFIG_BRIDGE_DVFS
if (pdata->dsp_get_opp)
opplevel = (*pdata->dsp_get_opp) ();
if (opplevel == VDD1_OPP1) {
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) (VDD1_OPP2);
}
#endif
/* Restart the peripheral clocks */
dsp_clock_enable_all(dev_context->dsp_per_clks);
dsp_wdt_enable(true);
/*
* 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
* in CM_AUTOIDLE_PLL_IVA2 register
*/
(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
/*
* 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
* 0.75 MHz - 1.0 MHz
* 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
*/
(*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
OMAP3430_EN_IVA2_DPLL_MASK,
0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);
/* Restore mailbox settings */
omap_mbox_restore_ctx(dev_context->mbox);
/* Access MMU SYS CONFIG register to generate a short wakeup */
temp = *(reg_uword32 *) (resources->dw_dmmu_base + 0x10);
dev_context->dw_brd_state = BRD_RUNNING;
} else if (dev_context->dw_brd_state == BRD_RETENTION) {
/* Restart the peripheral clocks */
dsp_clock_enable_all(dev_context->dsp_per_clks);
}
status = omap_mbox_msg_send(dev_context->mbox, mb_val);
if (status) {
pr_err("omap_mbox_msg_send Fail and status = %d\n", status);
status = -EPERM;
}
return 0;
}

View File

@ -0,0 +1,104 @@
/*
* tiomap_io.h
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Definitions, types and function prototypes for the io (r/w external mem).
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#ifndef _TIOMAP_IO_
#define _TIOMAP_IO_
/*
* Symbol that defines beginning of shared memory.
* For OMAP (Helen) this is the DSP Virtual base address of SDRAM.
* This will be used to program DSP MMU to map DSP Virt to GPP phys.
* (see dspMmuTlbEntry()).
*/
#define SHMBASENAME "SHM_BEG"
#define EXTBASE "EXT_BEG"
#define EXTEND "_EXT_END"
#define DYNEXTBASE "_DYNEXT_BEG"
#define DYNEXTEND "_DYNEXT_END"
#define IVAEXTMEMBASE "_IVAEXTMEM_BEG"
#define IVAEXTMEMEND "_IVAEXTMEM_END"
#define DSP_TRACESEC_BEG "_BRIDGE_TRACE_BEG"
#define DSP_TRACESEC_END "_BRIDGE_TRACE_END"
#define SYS_PUTCBEG "_SYS_PUTCBEG"
#define SYS_PUTCEND "_SYS_PUTCEND"
#define BRIDGE_SYS_PUTC_CURRENT "_BRIDGE_SYS_PUTC_current"
#define WORDSWAP_ENABLE 0x3 /* Enable word swap */
/*
* ======== read_ext_dsp_data ========
* Reads it from DSP External memory. The external memory for the DSP
* is configured by the combination of DSP MMU and shm Memory manager in the CDB
*/
extern int read_ext_dsp_data(struct bridge_dev_context *dev_context,
OUT u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType);
/*
* ======== write_dsp_data ========
*/
extern int write_dsp_data(struct bridge_dev_context *dev_context,
OUT u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType);
/*
* ======== write_ext_dsp_data ========
* Writes to the DSP External memory for external program.
* The ext mem for progra is configured by the combination of DSP MMU and
* shm Memory manager in the CDB
*/
extern int write_ext_dsp_data(struct bridge_dev_context *dev_context,
IN u8 *pbHostBuf, u32 dwDSPAddr,
u32 ul_num_bytes, u32 ulMemType,
bool bDynamicLoad);
/*
* ======== write_ext32_bit_dsp_data ========
* Writes 32 bit data to the external memory
*/
extern inline void write_ext32_bit_dsp_data(IN const
struct bridge_dev_context *dev_context,
IN u32 dwDSPAddr, IN u32 val)
{
*(u32 *) dwDSPAddr = ((dev_context->tc_word_swap_on) ? (((val << 16) &
0xFFFF0000) |
((val >> 16) &
0x0000FFFF)) :
val);
}
/*
* ======== read_ext32_bit_dsp_data ========
* Reads 32 bit data from the external memory
*/
extern inline u32 read_ext32_bit_dsp_data(IN const struct bridge_dev_context
*dev_context, IN u32 dwDSPAddr)
{
u32 ret;
ret = *(u32 *) dwDSPAddr;
ret = ((dev_context->tc_word_swap_on) ? (((ret << 16)
& 0xFFFF0000) | ((ret >> 16) &
0x0000FFFF))
: ret);
return ret;
}
#endif /* _TIOMAP_IO_ */

View File

@ -0,0 +1,303 @@
/*
* ue_deh.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implements upper edge DSP exception handling (DEH) functions.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/cfg.h>
#include <dspbridge/clk.h>
#include <dspbridge/ntfy.h>
#include <dspbridge/drv.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdeh.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/dev.h>
#include <dspbridge/dspapi.h>
#include <dspbridge/wdt.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* ----------------------------------- This */
#include "mmu_fault.h"
#include "_tiomap.h"
#include "_deh.h"
#include "_tiomap_pwr.h"
#include <dspbridge/io_sm.h>
static struct hw_mmu_map_attrs_t map_attrs = { HW_LITTLE_ENDIAN,
HW_ELEM_SIZE16BIT,
HW_MMU_CPUES
};
static void *dummy_va_addr;
int bridge_deh_create(struct deh_mgr **ret_deh_mgr,
struct dev_object *hdev_obj)
{
int status = 0;
struct deh_mgr *deh_mgr;
struct bridge_dev_context *hbridge_context = NULL;
/* Message manager will be created when a file is loaded, since
* size of message buffer in shared memory is configurable in
* the base image. */
/* Get Bridge context info. */
dev_get_bridge_context(hdev_obj, &hbridge_context);
DBC_ASSERT(hbridge_context);
dummy_va_addr = NULL;
/* Allocate IO manager object: */
deh_mgr = kzalloc(sizeof(struct deh_mgr), GFP_KERNEL);
if (!deh_mgr) {
status = -ENOMEM;
goto leave;
}
/* Create an NTFY object to manage notifications */
deh_mgr->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL);
if (deh_mgr->ntfy_obj) {
ntfy_init(deh_mgr->ntfy_obj);
} else {
status = -ENOMEM;
goto err;
}
/* Create a MMUfault DPC */
tasklet_init(&deh_mgr->dpc_tasklet, mmu_fault_dpc, (u32) deh_mgr);
/* Fill in context structure */
deh_mgr->hbridge_context = hbridge_context;
deh_mgr->err_info.dw_err_mask = 0L;
deh_mgr->err_info.dw_val1 = 0L;
deh_mgr->err_info.dw_val2 = 0L;
deh_mgr->err_info.dw_val3 = 0L;
/* Install ISR function for DSP MMU fault */
if ((request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
"DspBridge\tiommu fault",
(void *)deh_mgr)) == 0)
status = 0;
else
status = -EPERM;
err:
if (DSP_FAILED(status)) {
/* If create failed, cleanup */
bridge_deh_destroy(deh_mgr);
deh_mgr = NULL;
}
leave:
*ret_deh_mgr = deh_mgr;
return status;
}
int bridge_deh_destroy(struct deh_mgr *deh_mgr)
{
if (!deh_mgr)
return -EFAULT;
/* Release dummy VA buffer */
bridge_deh_release_dummy_mem();
/* If notification object exists, delete it */
if (deh_mgr->ntfy_obj) {
ntfy_delete(deh_mgr->ntfy_obj);
kfree(deh_mgr->ntfy_obj);
}
/* Disable DSP MMU fault */
free_irq(INT_DSP_MMU_IRQ, deh_mgr);
/* Free DPC object */
tasklet_kill(&deh_mgr->dpc_tasklet);
/* Deallocate the DEH manager object */
kfree(deh_mgr);
return 0;
}
int bridge_deh_register_notify(struct deh_mgr *deh_mgr, u32 event_mask,
u32 notify_type,
struct dsp_notification *hnotification)
{
int status = 0;
if (!deh_mgr)
return -EFAULT;
if (event_mask)
status = ntfy_register(deh_mgr->ntfy_obj, hnotification,
event_mask, notify_type);
else
status = ntfy_unregister(deh_mgr->ntfy_obj, hnotification);
return status;
}
void bridge_deh_notify(struct deh_mgr *deh_mgr, u32 ulEventMask, u32 dwErrInfo)
{
struct bridge_dev_context *dev_context;
int status = 0;
u32 hw_mmu_max_tlb_count = 31;
struct cfg_hostres *resources;
hw_status hw_status_obj;
if (!deh_mgr)
return;
dev_info(bridge, "%s: device exception\n", __func__);
dev_context = (struct bridge_dev_context *)deh_mgr->hbridge_context;
resources = dev_context->resources;
switch (ulEventMask) {
case DSP_SYSERROR:
/* reset err_info structure before use */
deh_mgr->err_info.dw_err_mask = DSP_SYSERROR;
deh_mgr->err_info.dw_val1 = 0L;
deh_mgr->err_info.dw_val2 = 0L;
deh_mgr->err_info.dw_val3 = 0L;
deh_mgr->err_info.dw_val1 = dwErrInfo;
dev_err(bridge, "%s: %s, err_info = 0x%x\n",
__func__, "DSP_SYSERROR", dwErrInfo);
dump_dl_modules(dev_context);
dump_dsp_stack(dev_context);
break;
case DSP_MMUFAULT:
/* MMU fault routine should have set err info structure. */
deh_mgr->err_info.dw_err_mask = DSP_MMUFAULT;
dev_err(bridge, "%s: %s, err_info = 0x%x\n",
__func__, "DSP_MMUFAULT", dwErrInfo);
dev_info(bridge, "%s: %s, high=0x%x, low=0x%x, "
"fault=0x%x\n", __func__, "DSP_MMUFAULT",
(unsigned int) deh_mgr->err_info.dw_val1,
(unsigned int) deh_mgr->err_info.dw_val2,
(unsigned int) fault_addr);
dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
dev_context = (struct bridge_dev_context *)
deh_mgr->hbridge_context;
print_dsp_trace_buffer(dev_context);
dump_dl_modules(dev_context);
/*
* Reset the dynamic mmu index to fixed count if it exceeds
* 31. So that the dynmmuindex is always between the range of
* standard/fixed entries and 31.
*/
if (dev_context->num_tlb_entries >
hw_mmu_max_tlb_count) {
dev_context->num_tlb_entries =
dev_context->fixed_tlb_entries;
}
if (DSP_SUCCEEDED(status)) {
hw_status_obj =
hw_mmu_tlb_add(resources->dw_dmmu_base,
virt_to_phys(dummy_va_addr), fault_addr,
HW_PAGE_SIZE4KB, 1,
&map_attrs, HW_SET, HW_SET);
}
dsp_clk_enable(DSP_CLK_GPT8);
dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
/* Clear MMU interrupt */
hw_mmu_event_ack(resources->dw_dmmu_base,
HW_MMU_TRANSLATION_FAULT);
dump_dsp_stack(deh_mgr->hbridge_context);
dsp_clk_disable(DSP_CLK_GPT8);
break;
#ifdef CONFIG_BRIDGE_NTFY_PWRERR
case DSP_PWRERROR:
/* reset err_info structure before use */
deh_mgr->err_info.dw_err_mask = DSP_PWRERROR;
deh_mgr->err_info.dw_val1 = 0L;
deh_mgr->err_info.dw_val2 = 0L;
deh_mgr->err_info.dw_val3 = 0L;
deh_mgr->err_info.dw_val1 = dwErrInfo;
dev_err(bridge, "%s: %s, err_info = 0x%x\n",
__func__, "DSP_PWRERROR", dwErrInfo);
break;
#endif /* CONFIG_BRIDGE_NTFY_PWRERR */
case DSP_WDTOVERFLOW:
deh_mgr->err_info.dw_err_mask = DSP_WDTOVERFLOW;
deh_mgr->err_info.dw_val1 = 0L;
deh_mgr->err_info.dw_val2 = 0L;
deh_mgr->err_info.dw_val3 = 0L;
dev_err(bridge, "%s: DSP_WDTOVERFLOW\n", __func__);
break;
default:
dev_dbg(bridge, "%s: Unknown Error, err_info = 0x%x\n",
__func__, dwErrInfo);
break;
}
/* Filter subsequent notifications when an error occurs */
if (dev_context->dw_brd_state != BRD_ERROR) {
ntfy_notify(deh_mgr->ntfy_obj, ulEventMask);
#ifdef CONFIG_BRIDGE_RECOVERY
bridge_recover_schedule();
#endif
}
/* Set the Board state as ERROR */
dev_context->dw_brd_state = BRD_ERROR;
/* Disable all the clocks that were enabled by DSP */
dsp_clock_disable_all(dev_context->dsp_per_clks);
/*
* Avoid the subsequent WDT if it happens once,
* also if fatal error occurs.
*/
dsp_wdt_enable(false);
}
int bridge_deh_get_info(struct deh_mgr *deh_mgr,
struct dsp_errorinfo *pErrInfo)
{
DBC_REQUIRE(deh_mgr);
DBC_REQUIRE(pErrInfo);
if (!deh_mgr)
return -EFAULT;
/* Copy DEH error info structure to PROC error info structure. */
pErrInfo->dw_err_mask = deh_mgr->err_info.dw_err_mask;
pErrInfo->dw_val1 = deh_mgr->err_info.dw_val1;
pErrInfo->dw_val2 = deh_mgr->err_info.dw_val2;
pErrInfo->dw_val3 = deh_mgr->err_info.dw_val3;
return 0;
}
void bridge_deh_release_dummy_mem(void)
{
free_page((unsigned long)dummy_va_addr);
dummy_va_addr = NULL;
}

View File

@ -0,0 +1,150 @@
/*
* wdt.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* IO dispatcher for a shared memory channel driver.
*
* Copyright (C) 2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <dspbridge/std.h>
#include <dspbridge/dbdefs.h>
#include <dspbridge/dspdeh.h>
#include <dspbridge/dev.h>
#include <dspbridge/_chnl_sm.h>
#include <dspbridge/wdt.h>
#include <dspbridge/host_os.h>
#ifdef CONFIG_BRIDGE_WDT3
#define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000)
static struct dsp_wdt_setting dsp_wdt;
void dsp_wdt_dpc(unsigned long data)
{
struct deh_mgr *deh_mgr;
dev_get_deh_mgr(dev_get_first(), &deh_mgr);
if (deh_mgr)
bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0);
}
irqreturn_t dsp_wdt_isr(int irq, void *data)
{
u32 value;
/* ack wdt3 interrupt */
value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
__raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
tasklet_schedule(&dsp_wdt.wdt3_tasklet);
return IRQ_HANDLED;
}
int dsp_wdt_init(void)
{
int ret = 0;
dsp_wdt.sm_wdt = NULL;
dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE);
tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0);
dsp_wdt.fclk = clk_get(NULL, "wdt3_fck");
if (dsp_wdt.fclk) {
dsp_wdt.iclk = clk_get(NULL, "wdt3_ick");
if (!dsp_wdt.iclk) {
clk_put(dsp_wdt.fclk);
dsp_wdt.fclk = NULL;
ret = -EFAULT;
}
} else
ret = -EFAULT;
if (!ret)
ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0,
"dsp_wdt", &dsp_wdt);
/* Disable at this moment, it will be enabled when DSP starts */
if (!ret)
disable_irq(INT_34XX_WDT3_IRQ);
return ret;
}
void dsp_wdt_sm_set(void *data)
{
dsp_wdt.sm_wdt = data;
dsp_wdt.sm_wdt->wdt_overflow = CONFIG_WDT_TIMEOUT;
}
void dsp_wdt_exit(void)
{
free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt);
tasklet_kill(&dsp_wdt.wdt3_tasklet);
if (dsp_wdt.fclk)
clk_put(dsp_wdt.fclk);
if (dsp_wdt.iclk)
clk_put(dsp_wdt.iclk);
dsp_wdt.fclk = NULL;
dsp_wdt.iclk = NULL;
dsp_wdt.sm_wdt = NULL;
dsp_wdt.reg_base = NULL;
}
void dsp_wdt_enable(bool enable)
{
u32 tmp;
static bool wdt_enable;
if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk)
return;
wdt_enable = enable;
if (enable) {
clk_enable(dsp_wdt.fclk);
clk_enable(dsp_wdt.iclk);
dsp_wdt.sm_wdt->wdt_setclocks = 1;
tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
__raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET);
enable_irq(INT_34XX_WDT3_IRQ);
} else {
disable_irq(INT_34XX_WDT3_IRQ);
dsp_wdt.sm_wdt->wdt_setclocks = 0;
clk_disable(dsp_wdt.iclk);
clk_disable(dsp_wdt.fclk);
}
}
#else
void dsp_wdt_enable(bool enable)
{
}
void dsp_wdt_sm_set(void *data)
{
}
int dsp_wdt_init(void)
{
return 0;
}
void dsp_wdt_exit(void)
{
}
#endif