mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-20 01:24:39 +08:00
RDMA/rxe: Delete _locked() APIs for pool objects
Since caller managed locks for indexed objects are no longer used these APIs are deleted. Link: https://lore.kernel.org/r/20220304000808.225811-5-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
c9f4c69583
commit
3c3e4d582b
@ -189,17 +189,6 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __rxe_add_index_locked(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
int err;
|
||||
|
||||
elem->index = alloc_index(pool);
|
||||
err = rxe_insert_index(pool, elem);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int __rxe_add_index(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
@ -207,55 +196,24 @@ int __rxe_add_index(struct rxe_pool_elem *elem)
|
||||
int err;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
err = __rxe_add_index_locked(elem);
|
||||
elem->index = alloc_index(pool);
|
||||
err = rxe_insert_index(pool, elem);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
|
||||
clear_bit(elem->index - pool->index.min_index, pool->index.table);
|
||||
rb_erase(&elem->index_node, &pool->index.tree);
|
||||
}
|
||||
|
||||
void __rxe_drop_index(struct rxe_pool_elem *elem)
|
||||
{
|
||||
struct rxe_pool *pool = elem->pool;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&pool->pool_lock, flags);
|
||||
__rxe_drop_index_locked(elem);
|
||||
clear_bit(elem->index - pool->index.min_index, pool->index.table);
|
||||
rb_erase(&elem->index_node, &pool->index.tree);
|
||||
write_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
}
|
||||
|
||||
void *rxe_alloc_locked(struct rxe_pool *pool)
|
||||
{
|
||||
struct rxe_pool_elem *elem;
|
||||
void *obj;
|
||||
|
||||
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
|
||||
goto out_cnt;
|
||||
|
||||
obj = kzalloc(pool->elem_size, GFP_ATOMIC);
|
||||
if (!obj)
|
||||
goto out_cnt;
|
||||
|
||||
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
|
||||
|
||||
elem->pool = pool;
|
||||
elem->obj = obj;
|
||||
kref_init(&elem->ref_cnt);
|
||||
|
||||
return obj;
|
||||
|
||||
out_cnt:
|
||||
atomic_dec(&pool->num_elem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void *rxe_alloc(struct rxe_pool *pool)
|
||||
{
|
||||
struct rxe_pool_elem *elem;
|
||||
@ -321,12 +279,14 @@ void rxe_elem_release(struct kref *kref)
|
||||
atomic_dec(&pool->num_elem);
|
||||
}
|
||||
|
||||
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
|
||||
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct rxe_pool_elem *elem;
|
||||
struct rb_node *node;
|
||||
unsigned long flags;
|
||||
void *obj;
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
node = pool->index.tree.rb_node;
|
||||
|
||||
while (node) {
|
||||
@ -346,17 +306,6 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
|
||||
} else {
|
||||
obj = NULL;
|
||||
}
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *obj;
|
||||
|
||||
read_lock_irqsave(&pool->pool_lock, flags);
|
||||
obj = rxe_pool_get_index_locked(pool, index);
|
||||
read_unlock_irqrestore(&pool->pool_lock, flags);
|
||||
|
||||
return obj;
|
||||
|
@ -68,9 +68,7 @@ int rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
|
||||
/* free resources from object pool */
|
||||
void rxe_pool_cleanup(struct rxe_pool *pool);
|
||||
|
||||
/* allocate an object from pool holding and not holding the pool lock */
|
||||
void *rxe_alloc_locked(struct rxe_pool *pool);
|
||||
|
||||
/* allocate an object from pool */
|
||||
void *rxe_alloc(struct rxe_pool *pool);
|
||||
|
||||
/* connect already allocated object to pool */
|
||||
@ -79,32 +77,18 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
|
||||
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
|
||||
|
||||
/* assign an index to an indexed object and insert object into
|
||||
* pool's rb tree holding and not holding the pool_lock
|
||||
* pool's rb tree
|
||||
*/
|
||||
int __rxe_add_index_locked(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
|
||||
|
||||
int __rxe_add_index(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
|
||||
|
||||
/* drop an index and remove object from rb tree
|
||||
* holding and not holding the pool_lock
|
||||
*/
|
||||
void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
|
||||
|
||||
/* drop an index and remove object from rb tree */
|
||||
void __rxe_drop_index(struct rxe_pool_elem *elem);
|
||||
|
||||
#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
|
||||
|
||||
/* lookup an indexed object from index holding and not holding the pool_lock.
|
||||
* takes a reference on object
|
||||
*/
|
||||
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index);
|
||||
|
||||
/* lookup an indexed object from index. takes a reference on object */
|
||||
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
|
||||
|
||||
/* cleanup an object when all references are dropped */
|
||||
|
Loading…
Reference in New Issue
Block a user