2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 12:43:55 +08:00

staging: tidspbridge - deprecate reserve/unreserve_memory funtions

Now what iommu memory usage is kept track by iommu module
the functions reserve/unreserve_memory are not needed anymore.

Signed-off-by: Fernando Guzman Lugo <x0095840@ti.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Fernando Guzman Lugo 2010-10-05 15:35:43 -05:00 committed by Greg Kroah-Hartman
parent db348ca36e
commit b1ced160af
4 changed files with 17 additions and 197 deletions

View File

@ -550,29 +550,6 @@ extern int proc_map(void *hprocessor,
void **pp_map_addr, u32 ul_map_attr,
struct process_context *pr_ctxt);
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* ul_size : Size of the address space to reserve.
* pp_rsv_addr : Ptr to DSP side reserved u8 address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOMEM : Cannot reserve chunk of this size.
* Requires:
* pp_rsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_reserve_memory(void *hprocessor,
u32 ul_size, void **pp_rsv_addr,
struct process_context *pr_ctxt);
/*
* ======== proc_un_map ========
* Purpose:
@ -595,27 +572,4 @@ extern int proc_reserve_memory(void *hprocessor,
extern int proc_un_map(void *hprocessor, void *map_addr,
struct process_context *pr_ctxt);
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
* Parameters:
* hprocessor : The processor handle.
* prsv_addr : Ptr to DSP side reservedBYTE address.
* Returns:
* 0 : Success.
* -EFAULT : Invalid processor handle.
* -EPERM : General failure.
* -ENOENT : Cannot find a reserved region starting with this
* : address.
* Requires:
* prsv_addr is not NULL
* PROC Initialized.
* Ensures:
* Details:
*/
extern int proc_un_reserve_memory(void *hprocessor,
void *prsv_addr,
struct process_context *pr_ctxt);
#endif /* PROC_ */

View File

@ -989,27 +989,10 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt)
/*
* ======== procwrap_reserve_memory ========
*/
u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
u32 __deprecated procwrap_reserve_memory(union trapped_args *args,
void *pr_ctxt)
{
int status;
void *prsv_addr;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
if ((args->args_proc_rsvmem.ul_size <= 0) ||
(args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
return -EINVAL;
status = proc_reserve_memory(hprocessor,
args->args_proc_rsvmem.ul_size, &prsv_addr,
pr_ctxt);
if (!status) {
if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
status = -EINVAL;
proc_un_reserve_memory(args->args_proc_rsvmem.
hprocessor, prsv_addr, pr_ctxt);
}
}
return status;
return 0;
}
/*
@ -1038,15 +1021,10 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt)
/*
* ======== procwrap_un_reserve_memory ========
*/
u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args,
void *pr_ctxt)
{
int status;
void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
status = proc_un_reserve_memory(hprocessor,
args->args_proc_unrsvmem.prsv_addr,
pr_ctxt);
return status;
return 0;
}
/*

View File

@ -430,17 +430,6 @@ int node_allocate(struct proc_object *hprocessor,
if (status)
goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.udsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
@ -456,8 +445,7 @@ int node_allocate(struct proc_object *hprocessor,
map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size,
(void *)pnode->create_args.asa.task_arg_obj.
udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
NULL, (void **)&mapped_addr, map_attrs,
pr_ctxt);
if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@ -2576,12 +2564,6 @@ static void delete_node(struct node_object *hnode,
status = proc_un_map(hnode->hprocessor, (void *)
task_arg_obj.udsp_heap_addr,
pr_ctxt);
status = proc_un_reserve_memory(hnode->hprocessor,
(void *)
task_arg_obj.
udsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)

View File

@ -152,34 +152,21 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt,
return map_obj;
}
static int match_exact_map_obj(struct dmm_map_object *map_obj,
u32 dsp_addr, u32 size)
{
if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
__func__, dsp_addr, map_obj->size, size);
return map_obj->dsp_addr == dsp_addr &&
map_obj->size == size;
}
static void remove_mapping_information(struct process_context *pr_ctxt,
u32 dsp_addr, u32 size)
u32 dsp_addr)
{
struct dmm_map_object *map_obj;
pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
dsp_addr, size);
pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr);
spin_lock(&pr_ctxt->dmm_map_lock);
list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n",
__func__,
map_obj->mpu_addr,
map_obj->dsp_addr,
map_obj->size);
map_obj->dsp_addr);
if (match_exact_map_obj(map_obj, dsp_addr, size)) {
if (map_obj->dsp_addr == dsp_addr) {
pr_debug("%s: match, deleting map info\n", __func__);
list_del(&map_obj->link);
kfree(map_obj->dma_info.sg);
@ -1353,7 +1340,6 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
{
u32 va_align;
u32 pa_align;
struct dmm_object *dmm_mgr;
u32 size_align;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
@ -1382,11 +1368,6 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
}
/* Critical section */
mutex_lock(&proc_lock);
dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
status = dmm_map_memory(dmm_mgr, va_align, size_align);
else
status = -EFAULT;
/* Add mapping to the page tables. */
if (!status) {
@ -1409,9 +1390,9 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size,
map_obj->dsp_addr = (va_align |
((u32)pmpu_addr & (PG_SIZE4K - 1)));
*pp_map_addr = (void *)map_obj->dsp_addr;
pr_err("%s: mapped address %x\n", __func__, *pp_map_addr);
} else {
remove_mapping_information(pr_ctxt, va_align, size_align);
dmm_un_map_memory(dmm_mgr, va_align, &size_align);
remove_mapping_information(pr_ctxt, va_align);
}
mutex_unlock(&proc_lock);
@ -1503,38 +1484,6 @@ func_end:
return status;
}
/*
* ======== proc_reserve_memory ========
* Purpose:
* Reserve a virtually contiguous region of DSP address space.
*/
int proc_reserve_memory(void *hprocessor, u32 ul_size,
void **pp_rsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
"status 0x%x\n", __func__, hprocessor,
ul_size, pp_rsv_addr, status);
return status;
}
/*
* ======== proc_start ========
* Purpose:
@ -1683,7 +1632,6 @@ int proc_un_map(void *hprocessor, void *map_addr,
{
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
struct dmm_object *dmm_mgr;
u32 va_align;
u32 size_align;
@ -1693,23 +1641,11 @@ int proc_un_map(void *hprocessor, void *map_addr,
goto func_end;
}
status = dmm_get_handle(hprocessor, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
/* Critical section */
mutex_lock(&proc_lock);
/*
* Update DMM structures. Get the size to unmap.
* This function returns error if the VA is not mapped
*/
status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
/* Remove mapping from the page tables. */
if (!status)
status = user_to_dsp_unmap(
p_proc_object->hbridge_context->dsp_mmu, va_align);
status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu,
va_align);
mutex_unlock(&proc_lock);
if (status)
@ -1720,7 +1656,7 @@ int proc_un_map(void *hprocessor, void *map_addr,
* from dmm_map_list, so that mapped memory resource tracking
* remains uptodate
*/
remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
remove_mapping_information(pr_ctxt, (u32) map_addr);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@ -1728,36 +1664,6 @@ func_end:
return status;
}
/*
* ======== proc_un_reserve_memory ========
* Purpose:
* Frees a previously reserved region of DSP address space.
*/
int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
struct process_context *pr_ctxt)
{
struct dmm_object *dmm_mgr;
int status = 0;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
if (!p_proc_object) {
status = -EFAULT;
goto func_end;
}
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = -EFAULT;
goto func_end;
}
status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
func_end:
dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
__func__, hprocessor, prsv_addr, status);
return status;
}
/*
* ======== = proc_monitor ======== ==
* Purpose: