staging: erofs: fix `erofs_workgroup_{try_to_freeze, unfreeze}'

There are two minor issues in the current freeze interface:

   1) Freeze interfaces have not related with CONFIG_DEBUG_SPINLOCK,
      therefore fix the incorrect conditions;

   2) For SMP platforms, it should also disable preemption before
      doing atomic_cmpxchg in case that some high priority tasks
      preempt between atomic_cmpxchg and disable_preempt, then spin
      on the locked refcount later.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Gao Xiang 2018-11-23 01:16:02 +08:00 committed by Greg Kroah-Hartman
parent df134b8d17
commit 73f5c66df3

View File

@ -194,40 +194,49 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) #define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
static inline bool erofs_workgroup_try_to_freeze( #if defined(CONFIG_SMP)
struct erofs_workgroup *grp, int v) static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
int val)
{ {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
if (v != atomic_cmpxchg(&grp->refcount,
v, EROFS_LOCKED_MAGIC))
return false;
preempt_disable(); preempt_disable();
#else if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
preempt_disable();
if (atomic_read(&grp->refcount) != v) {
preempt_enable(); preempt_enable();
return false; return false;
} }
#endif
return true; return true;
} }
static inline void erofs_workgroup_unfreeze( static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
struct erofs_workgroup *grp, int v) int orig_val)
{ {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) atomic_set(&grp->refcount, orig_val);
atomic_set(&grp->refcount, v);
#endif
preempt_enable(); preempt_enable();
} }
#if defined(CONFIG_SMP)
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{ {
return atomic_cond_read_relaxed(&grp->refcount, return atomic_cond_read_relaxed(&grp->refcount,
VAL != EROFS_LOCKED_MAGIC); VAL != EROFS_LOCKED_MAGIC);
} }
#else #else
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
int val)
{
preempt_disable();
/* no need to spin on UP platforms, let's just disable preemption. */
if (val != atomic_read(&grp->refcount)) {
preempt_enable();
return false;
}
return true;
}
static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
int orig_val)
{
preempt_enable();
}
static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
{ {
int v = atomic_read(&grp->refcount); int v = atomic_read(&grp->refcount);