mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 10:14:23 +08:00
a231b8839c
This patch only contains asid help code from arm for next patch to use. The asid allocator use five level check to reduce the cost of switch_mm. 1. Check if the asid version is the same (it's general) 2. Check reserved_asid which is set in rollover flush_context() and key point is to keep the same bit position with the current asid version instead of input version. 3. Check if the position of bitmap is free then it could be set & used directly. 4. find_next_zero_bit() (a little performance cost) 5. flush_context (this is the worst cost with increase current asid version) Check is level by level and cost is also higher with the next level. The reserved_asid and bitmap mechanism prevent unnecessary find_next_zero_bit(). The atomic 64 bit asid is also suitable for 32-bit system and it won't cost a lot in 1th 2th 3th level check. The operation of set/clear mm_cpumask was removed in arm64 compared to arm32. It seems no side effect on current arm64 system, but from software meaning it's wrong. Although csky also needn't it, we add it back for csky. The asid_per_ctxt is no use for csky and it reserves the lowest bits for other use, maybe: trust zone ? Ok, just keep it in csky copy. Seems it also could be used by other archs and it's worth to move asid code to generic in future. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Julien Grall <julien.grall@arm.com>
79 lines
2.4 KiB
C
79 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_ASM_ASID_H
|
|
#define __ASM_ASM_ASID_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/spinlock.h>
|
|
|
|
struct asid_info
|
|
{
|
|
atomic64_t generation;
|
|
unsigned long *map;
|
|
atomic64_t __percpu *active;
|
|
u64 __percpu *reserved;
|
|
u32 bits;
|
|
/* Lock protecting the structure */
|
|
raw_spinlock_t lock;
|
|
/* Which CPU requires context flush on next call */
|
|
cpumask_t flush_pending;
|
|
/* Number of ASID allocated by context (shift value) */
|
|
unsigned int ctxt_shift;
|
|
/* Callback to locally flush the context. */
|
|
void (*flush_cpu_ctxt_cb)(void);
|
|
};
|
|
|
|
#define NUM_ASIDS(info) (1UL << ((info)->bits))
|
|
#define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
|
|
|
|
#define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
|
|
|
|
void asid_new_context(struct asid_info *info, atomic64_t *pasid,
|
|
unsigned int cpu, struct mm_struct *mm);
|
|
|
|
/*
|
|
* Check the ASID is still valid for the context. If not generate a new ASID.
|
|
*
|
|
* @pasid: Pointer to the current ASID batch
|
|
* @cpu: current CPU ID. Must have been acquired throught get_cpu()
|
|
*/
|
|
static inline void asid_check_context(struct asid_info *info,
|
|
atomic64_t *pasid, unsigned int cpu,
|
|
struct mm_struct *mm)
|
|
{
|
|
u64 asid, old_active_asid;
|
|
|
|
asid = atomic64_read(pasid);
|
|
|
|
/*
|
|
* The memory ordering here is subtle.
|
|
* If our active_asid is non-zero and the ASID matches the current
|
|
* generation, then we update the active_asid entry with a relaxed
|
|
* cmpxchg. Racing with a concurrent rollover means that either:
|
|
*
|
|
* - We get a zero back from the cmpxchg and end up waiting on the
|
|
* lock. Taking the lock synchronises with the rollover and so
|
|
* we are forced to see the updated generation.
|
|
*
|
|
* - We get a valid ASID back from the cmpxchg, which means the
|
|
* relaxed xchg in flush_context will treat us as reserved
|
|
* because atomic RmWs are totally ordered for a given location.
|
|
*/
|
|
old_active_asid = atomic64_read(&active_asid(info, cpu));
|
|
if (old_active_asid &&
|
|
!((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
|
|
atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
|
|
old_active_asid, asid))
|
|
return;
|
|
|
|
asid_new_context(info, pasid, cpu, mm);
|
|
}
|
|
|
|
int asid_allocator_init(struct asid_info *info,
|
|
u32 bits, unsigned int asid_per_ctxt,
|
|
void (*flush_cpu_ctxt_cb)(void));
|
|
|
|
#endif
|