2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

zsmalloc: use bit_spin_lock

Use kernel standard bit spin-lock instead of custom mess.  Even, it has
a bug which doesn't disable preemption.  The reason we don't have any
problem is that we have used it during preemption disable section by
class->lock spinlock.  So no need to go to stable.

Link: http://lkml.kernel.org/r/1464736881-24886-6-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Minchan Kim 2016-07-26 15:23:14 -07:00 committed by Linus Torvalds
parent 1fc6e27d7b
commit 1b8320b620

View File

@ -868,21 +868,17 @@ static unsigned long obj_idx_to_offset(struct page *page,
static inline int trypin_tag(unsigned long handle)
{
unsigned long *ptr = (unsigned long *)handle;
return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void pin_tag(unsigned long handle)
{
while (!trypin_tag(handle));
bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void unpin_tag(unsigned long handle)
{
unsigned long *ptr = (unsigned long *)handle;
clear_bit_unlock(HANDLE_PIN_BIT, ptr);
bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void reset_page(struct page *page)