2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

staging: tidspbridge: split bridge_io_on_loaded

Due to its size, this function declares too many variables,
to split it a new structure has been declared to hold values
as they are read from the baseimage.

While at it, indentation was reduced by renaming variables
and reducing blocks of code with the following structure:

if (success) {
    ...
    if (success)
        ...
}

This fixes the following warning:
drivers/staging/tidspbridge/core/io_sm.c:
	In function 'bridge_io_on_loaded':
drivers/staging/tidspbridge/core/io_sm.c:777:
	warning: the frame size of 1032 bytes is larger
	than 1024 bytes

Signed-off-by: Omar Ramirez Luna <omar.ramirez@copitl.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Omar Ramirez Luna 2012-06-29 13:49:18 -05:00 committed by Greg Kroah-Hartman
parent a454ad15e0
commit e8cfe4116d

View File

@ -128,6 +128,16 @@ struct io_mgr {
};
struct shm_symbol_val {
u32 shm_base;
u32 shm_lim;
u32 msg_base;
u32 msg_lim;
u32 shm0_end;
u32 dyn_ext;
u32 ext_end;
};
/* Function Prototypes */
static void io_dispatch_pm(struct io_mgr *pio_mgr);
static void notify_chnl_complete(struct chnl_object *pchnl,
@ -256,6 +266,75 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
return status;
}
struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr)
{
struct shm_symbol_val *s;
struct cod_manager *cod_man;
int status;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return ERR_PTR(-ENOMEM);
status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
if (status)
goto free_symbol;
/* Get start and length of channel part of shared memory */
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
&s->shm_base);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
&s->shm_lim);
if (status)
goto free_symbol;
if (s->shm_lim <= s->shm_base) {
status = -EINVAL;
goto free_symbol;
}
/* Get start and length of message part of shared memory */
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
&s->msg_base);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
&s->msg_lim);
if (status)
goto free_symbol;
if (s->msg_lim <= s->msg_base) {
status = -EINVAL;
goto free_symbol;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end);
#else
status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end);
#endif
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end);
if (status)
goto free_symbol;
return s;
free_symbol:
kfree(s);
return ERR_PTR(status);
}
/*
* ======== bridge_io_on_loaded ========
* Purpose:
@ -265,193 +344,112 @@ int bridge_io_destroy(struct io_mgr *hio_mgr)
*/
int bridge_io_on_loaded(struct io_mgr *hio_mgr)
{
struct bridge_dev_context *dc = hio_mgr->bridge_context;
struct cfg_hostres *cfg_res = dc->resources;
struct bridge_ioctl_extproc *eproc;
struct cod_manager *cod_man;
struct chnl_mgr *hchnl_mgr;
struct msg_mgr *hmsg_mgr;
u32 ul_shm_base;
u32 ul_shm_base_offset;
u32 ul_shm_limit;
u32 ul_shm_length = -1;
u32 ul_mem_length = -1;
u32 ul_msg_base;
u32 ul_msg_limit;
u32 ul_msg_length = -1;
u32 ul_ext_end;
u32 ul_gpp_pa = 0;
u32 ul_gpp_va = 0;
u32 ul_dsp_va = 0;
u32 ul_seg_size = 0;
u32 ul_pad_size = 0;
struct shm_symbol_val *s;
int status;
u8 num_procs;
s32 ndx;
u32 i;
int status = 0;
u8 num_procs = 0;
s32 ndx = 0;
/* DSP MMU setup table */
struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 map_attrs;
u32 shm0_end;
u32 ul_dyn_ext_base;
u32 ul_seg1_size = 0;
u32 pa_curr = 0;
u32 va_curr = 0;
u32 gpp_va_curr = 0;
u32 num_bytes = 0;
u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs;
u32 seg0_sz, seg1_sz;
u32 pa, va, da;
u32 pa_curr, va_curr, da_curr;
u32 bytes;
u32 all_bits = 0;
u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
u32 page_size[] = {
HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR |
DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK;
status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context);
if (!pbridge_context) {
status = -EFAULT;
goto func_end;
}
host_res = pbridge_context->resources;
if (!host_res) {
status = -EFAULT;
goto func_end;
}
status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
if (!cod_man) {
status = -EFAULT;
goto func_end;
}
if (status)
return status;
hchnl_mgr = hio_mgr->chnl_mgr;
/* The message manager is destroyed when the board is stopped. */
/* The message manager is destroyed when the board is stopped */
dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
hmsg_mgr = hio_mgr->msg_mgr;
if (!hchnl_mgr || !hmsg_mgr) {
status = -EFAULT;
goto func_end;
}
if (!hchnl_mgr || !hmsg_mgr)
return -EFAULT;
if (hio_mgr->shared_mem)
hio_mgr->shared_mem = NULL;
/* Get start and length of channel part of shared memory */
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
&ul_shm_base);
if (status) {
status = -EFAULT;
goto func_end;
}
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
&ul_shm_limit);
if (status) {
status = -EFAULT;
goto func_end;
}
if (ul_shm_limit <= ul_shm_base) {
status = -EINVAL;
goto func_end;
}
s = _get_shm_symbol_values(hio_mgr);
if (IS_ERR(s))
return PTR_ERR(s);
/* Get total length in bytes */
ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size;
/* Calculate size of a PROCCOPY shared memory region */
dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
__func__, (ul_shm_length - sizeof(struct shm)));
__func__, shm_sz - sizeof(struct shm));
/* Get start and length of message part of shared memory */
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
&ul_msg_base);
if (!status) {
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
&ul_msg_limit);
if (!status) {
if (ul_msg_limit <= ul_msg_base) {
status = -EINVAL;
} else {
/*
* Length (bytes) of messaging part of shared
* memory.
*/
ul_msg_length =
(ul_msg_limit - ul_msg_base +
1) * hio_mgr->word_size;
/*
* Total length (bytes) of shared memory:
* chnl + msg.
*/
ul_mem_length = ul_shm_length + ul_msg_length;
}
} else {
status = -EFAULT;
}
} else {
status = -EFAULT;
}
if (!status) {
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
status =
cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
#else
status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
&shm0_end);
#endif
if (status)
status = -EFAULT;
}
if (!status) {
status =
cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
if (status)
status = -EFAULT;
}
if (!status) {
status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
if (status)
status = -EFAULT;
}
if (!status) {
/* Get memory reserved in host resources */
(void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
&hio_mgr->ext_proc_info,
sizeof(struct
mgr_processorextinfo),
&num_procs);
/* Length (bytes) of messaging part of shared memory */
msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size;
/* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
ndx = 0;
ul_gpp_pa = host_res->mem_phys[1];
ul_gpp_va = host_res->mem_base[1];
/* This is the virtual uncached ioremapped address!!! */
/* Why can't we directly take the DSPVA from the symbols? */
ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
ul_seg1_size =
(ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
/* 4K align */
ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
/* 64K align */
ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
UL_PAGE_ALIGN_SIZE);
if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
ul_pad_size = 0x0;
/* Total length (bytes) of shared memory: chnl + msg */
mem_sz = shm_sz + msg_sz;
dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
"shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
"ul_seg_size %x ul_seg1_size %x \n", __func__,
ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
/* Get memory reserved in host resources */
(void)mgr_enum_processor_info(0,
(struct dsp_processorinfo *)
&hio_mgr->ext_proc_info,
sizeof(struct mgr_processorextinfo),
&num_procs);
if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
host_res->mem_length[1]) {
pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
__func__, host_res->mem_length[1],
ul_seg_size + ul_seg1_size + ul_pad_size);
status = -ENOMEM;
}
/* IO supports only one DSP for now */
if (num_procs != 1) {
status = -EINVAL;
goto free_symbol;
}
if (status)
goto func_end;
pa_curr = ul_gpp_pa;
va_curr = ul_dyn_ext_base * hio_mgr->word_size;
gpp_va_curr = ul_gpp_va;
num_bytes = ul_seg1_size;
/* The first MMU TLB entry(TLB_0) in DCD is ShmBase */
pa = cfg_res->mem_phys[1];
va = cfg_res->mem_base[1];
/* This is the virtual uncached ioremapped address!!! */
/* Why can't we directly take the DSPVA from the symbols? */
da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
seg0_sz = (s->shm0_end - da) * hio_mgr->word_size;
seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size;
/* 4K align */
seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL);
/* 64K align */
seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL);
pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE);
if (pad_sz == UL_PAGE_ALIGN_SIZE)
pad_sz = 0x0;
dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da);
dev_dbg(bridge,
"shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n",
s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz);
if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) {
pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
__func__, cfg_res->mem_length[1],
seg0_sz + seg1_sz + pad_sz);
status = -ENOMEM;
goto free_symbol;
}
pa_curr = pa;
va_curr = s->dyn_ext * hio_mgr->word_size;
da_curr = va;
bytes = seg1_sz;
/*
* Try to fit into TLB entries. If not possible, push them to page
@ -459,37 +457,30 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
* bigger page boundary, we may end up making several small pages.
* So, push them onto page tables, if that is the case.
*/
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
while (num_bytes) {
while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
"num_bytes %x\n", all_bits, pa_curr, va_curr,
num_bytes);
dev_dbg(bridge,
"seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
all_bits, pa_curr, va_curr, bytes);
for (i = 0; i < 4; i++) {
if ((num_bytes >= page_size[i]) && ((all_bits &
(page_size[i] -
1)) == 0)) {
status =
hio_mgr->intf_fxns->
brd_mem_map(hio_mgr->bridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if ((bytes >= page_size[i]) &&
((all_bits & (page_size[i] - 1)) == 0)) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if (status)
goto func_end;
goto free_symbol;
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
da_curr += page_size[i];
bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
@ -499,71 +490,75 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
}
}
}
pa_curr += ul_pad_size;
va_curr += ul_pad_size;
gpp_va_curr += ul_pad_size;
pa_curr += pad_sz;
va_curr += pad_sz;
da_curr += pad_sz;
bytes = seg0_sz;
va_curr = da * hio_mgr->word_size;
eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL);
if (!eproc) {
status = -ENOMEM;
goto free_symbol;
}
ndx = 0;
/* Configure the TLB entries for the next cacheable segment */
num_bytes = ul_seg_size;
va_curr = ul_dsp_va * hio_mgr->word_size;
while (num_bytes) {
while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
va_curr, num_bytes);
dev_dbg(bridge,
"seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
all_bits, pa_curr, va_curr, bytes);
for (i = 0; i < 4; i++) {
if (!(num_bytes >= page_size[i]) ||
if (!(bytes >= page_size[i]) ||
!((all_bits & (page_size[i] - 1)) == 0))
continue;
if (ndx < MAX_LOCK_TLB_ENTRIES) {
/*
* This is the physical address written to
* DSP MMU.
*/
ae_proc[ndx].gpp_pa = pa_curr;
/*
* This is the virtual uncached ioremapped
* address!!!
*/
ae_proc[ndx].gpp_va = gpp_va_curr;
ae_proc[ndx].dsp_va =
va_curr / hio_mgr->word_size;
ae_proc[ndx].size = page_size[i];
ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
dev_dbg(bridge, "shm MMU TLB entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].gpp_pa,
ae_proc[ndx].gpp_va,
ae_proc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
ndx++;
} else {
status =
hio_mgr->intf_fxns->
brd_mem_map(hio_mgr->bridge_context,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if (ndx >= MAX_LOCK_TLB_ENTRIES) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
dev_dbg(bridge,
"shm MMU PTE entry PA %x"
" VA %x DSP_VA %x Size %x\n",
ae_proc[ndx].gpp_pa,
ae_proc[ndx].gpp_va,
ae_proc[ndx].dsp_va *
"PTE pa %x va %x dsp_va %x sz %x\n",
eproc[ndx].gpp_pa,
eproc[ndx].gpp_va,
eproc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
goto func_end;
goto free_eproc;
}
/* This is the physical address written to DSP MMU */
eproc[ndx].gpp_pa = pa_curr;
/*
* This is the virtual uncached ioremapped
* address!!!
*/
eproc[ndx].gpp_va = da_curr;
eproc[ndx].dsp_va = va_curr / hio_mgr->word_size;
eproc[ndx].size = page_size[i];
eproc[ndx].endianism = HW_LITTLE_ENDIAN;
eproc[ndx].elem_size = HW_ELEM_SIZE16BIT;
eproc[ndx].mixed_mode = HW_MMU_CPUES;
dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n",
__func__, eproc[ndx].gpp_pa,
eproc[ndx].gpp_va,
eproc[ndx].dsp_va * hio_mgr->word_size,
page_size[i]);
ndx++;
pa_curr += page_size[i];
va_curr += page_size[i];
gpp_va_curr += page_size[i];
num_bytes -= page_size[i];
da_curr += page_size[i];
bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have reached
* an address aligned to a bigger page size.
@ -577,146 +572,127 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
* should not conflict with shm entries on MPU or DSP side.
*/
for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0)
struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info;
u32 word_sz = hio_mgr->word_size;
if (ep->ty_tlb[i].gpp_phys == 0)
continue;
if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys >
ul_gpp_pa - 0x100000
&& hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <=
ul_gpp_pa + ul_seg_size)
|| (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt >
ul_dsp_va - 0x100000 / hio_mgr->word_size
&& hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <=
ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 &&
ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) ||
(ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz &&
ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) {
dev_dbg(bridge,
"CDB MMU entry %d conflicts with "
"shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
"GppPa %x, DspVa %x, Bytes %x.\n", i,
hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt,
ul_gpp_pa, ul_dsp_va, ul_seg_size);
"err cdb%d pa %x da %x shm pa %x da %x sz %x\n",
i, ep->ty_tlb[i].gpp_phys,
ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz);
status = -EPERM;
} else {
if (ndx < MAX_LOCK_TLB_ENTRIES) {
ae_proc[ndx].dsp_va =
hio_mgr->ext_proc_info.ty_tlb[i].
dsp_virt;
ae_proc[ndx].gpp_pa =
hio_mgr->ext_proc_info.ty_tlb[i].
gpp_phys;
ae_proc[ndx].gpp_va = 0;
/* 1 MB */
ae_proc[ndx].size = 0x100000;
dev_dbg(bridge, "shm MMU entry PA %x "
"DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa,
ae_proc[ndx].dsp_va);
ndx++;
} else {
status = hio_mgr->intf_fxns->brd_mem_map
(hio_mgr->bridge_context,
hio_mgr->ext_proc_info.ty_tlb[i].
gpp_phys,
hio_mgr->ext_proc_info.ty_tlb[i].
dsp_virt, 0x100000, map_attrs,
NULL);
}
goto free_eproc;
}
if (status)
goto func_end;
}
map_attrs = 0x00000000;
map_attrs = DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPPHYSICALADDR;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPDONOTLOCK;
if (ndx >= MAX_LOCK_TLB_ENTRIES) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
ep->ty_tlb[i].gpp_phys,
ep->ty_tlb[i].dsp_virt,
0x100000, map_attrs, NULL);
if (status)
goto free_eproc;
}
eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt;
eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys;
eproc[ndx].gpp_va = 0;
/* 1 MB */
eproc[ndx].size = 0x100000;
dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n",
eproc[ndx].gpp_pa, eproc[ndx].dsp_va);
ndx++;
}
/* Map the L4 peripherals */
i = 0;
while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->brd_mem_map
(hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
map_attrs, NULL);
status = hio_mgr->intf_fxns->brd_mem_map(dc,
l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr,
HW_PAGE_SIZE4KB, map_attrs, NULL);
if (status)
goto func_end;
goto free_eproc;
i++;
}
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
ae_proc[i].dsp_va = 0;
ae_proc[i].gpp_pa = 0;
ae_proc[i].gpp_va = 0;
ae_proc[i].size = 0;
eproc[i].dsp_va = 0;
eproc[i].gpp_pa = 0;
eproc[i].gpp_va = 0;
eproc[i].size = 0;
}
/*
* Set the shm physical address entry (grayed out in CDB file)
* to the virtual uncached ioremapped address of shm reserved
* on MPU.
*/
hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
(ul_gpp_va + ul_seg1_size + ul_pad_size);
(va + seg1_sz + pad_sz);
/*
* Need shm Phys addr. IO supports only one DSP for now:
* num_procs = 1.
*/
if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) {
status = -EFAULT;
goto func_end;
} else {
if (ae_proc[0].dsp_va > ul_shm_base) {
status = -EPERM;
goto func_end;
}
/* ul_shm_base may not be at ul_dsp_va address */
ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) *
hio_mgr->word_size;
/*
* bridge_dev_ctrl() will set dev context dsp-mmu info. In
* bridge_brd_start() the MMU will be re-programed with MMU
* DSPVa-GPPPa pair info while DSP is in a known
* (reset) state.
*/
if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys)
return -EFAULT;
status =
hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
BRDIOCTL_SETMMUCONFIG,
ae_proc);
if (status)
goto func_end;
ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
ul_shm_base += ul_shm_base_offset;
ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
ul_mem_length);
if (ul_shm_base == 0) {
status = -EFAULT;
goto func_end;
}
/* Register SM */
status =
register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa);
if (eproc[0].dsp_va > s->shm_base)
return -EPERM;
/* shm_base may not be at ul_dsp_va address */
shm_base_offs = (s->shm_base - eproc[0].dsp_va) *
hio_mgr->word_size;
/*
* bridge_dev_ctrl() will set dev context dsp-mmu info. In
* bridge_brd_start() the MMU will be re-programed with MMU
* DSPVa-GPPPa pair info while DSP is in a known
* (reset) state.
*/
status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
BRDIOCTL_SETMMUCONFIG, eproc);
if (status)
goto free_eproc;
s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
s->shm_base += shm_base_offs;
s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base,
mem_sz);
if (!s->shm_base) {
status = -EFAULT;
goto free_eproc;
}
hio_mgr->shared_mem = (struct shm *)ul_shm_base;
/* Register SM */
status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa);
hio_mgr->shared_mem = (struct shm *)s->shm_base;
hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
hio_mgr->output = hio_mgr->input + (ul_shm_length -
hio_mgr->output = hio_mgr->input + (shm_sz -
sizeof(struct shm)) / 2;
hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
/* Set up Shared memory addresses for messaging. */
hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
+ ul_shm_length);
/* Set up Shared memory addresses for messaging */
hio_mgr->msg_input_ctrl =
(struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz);
hio_mgr->msg_input =
(u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
(u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
hio_mgr->msg_output_ctrl =
(struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
ul_msg_length / 2);
(struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
msg_sz / 2);
hio_mgr->msg_output =
(u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
(u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
hmsg_mgr->max_msgs =
((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
/ sizeof(struct msg_dspmsg);
((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) /
sizeof(struct msg_dspmsg);
dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
"output %p, msg_input_ctrl %p, msg_input %p, "
"msg_output_ctrl %p, msg_output %p\n",
@ -732,47 +708,53 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
if (status) {
status = -EFAULT;
goto func_end;
}
if (status)
goto free_eproc;
hio_mgr->gpp_read_pointer =
hio_mgr->trace_buffer_begin =
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_begin - da);
hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
(hio_mgr->trace_buffer_begin - ul_dsp_va);
/* Get the end address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCEND,
&hio_mgr->trace_buffer_end);
if (status) {
status = -EFAULT;
goto func_end;
}
if (status)
goto free_eproc;
hio_mgr->trace_buffer_end =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
(hio_mgr->trace_buffer_end - ul_dsp_va);
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_end - da);
/* Get the current address of DSP write pointer */
status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
&hio_mgr->trace_buffer_current);
if (status) {
status = -EFAULT;
goto func_end;
}
if (status)
goto free_eproc;
hio_mgr->trace_buffer_current =
(ul_gpp_va + ul_seg1_size + ul_pad_size) +
(hio_mgr->trace_buffer_current - ul_dsp_va);
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_current - da);
/* Calculate the size of trace buffer */
kfree(hio_mgr->msg);
hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
hio_mgr->trace_buffer_begin) *
hio_mgr->word_size) + 2, GFP_KERNEL);
if (!hio_mgr->msg)
if (!hio_mgr->msg) {
status = -ENOMEM;
goto free_eproc;
}
hio_mgr->dsp_va = ul_dsp_va;
hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
hio_mgr->dsp_va = da;
hio_mgr->gpp_va = (va + seg1_sz + pad_sz);
#endif
func_end:
free_eproc:
kfree(eproc);
free_symbol:
kfree(s);
return status;
}