mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
x86/vector: Rename send_cleanup_vector() to vector_schedule_cleanup()
Rename send_cleanup_vector() to vector_schedule_cleanup() to prepare for replacing the vector cleanup IPI with a timer callback. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Xin Li <xin3.li@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steve Wahl <steve.wahl@hpe.com> Link: https://lore.kernel.org/r/20230621171248.6805-2-xin3.li@intel.com
This commit is contained in:
parent
5d0c230f1d
commit
a539cc86a1
@ -97,10 +97,10 @@ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
|
||||
extern void lock_vector_lock(void);
|
||||
extern void unlock_vector_lock(void);
|
||||
#ifdef CONFIG_SMP
|
||||
extern void send_cleanup_vector(struct irq_cfg *);
|
||||
extern void vector_schedule_cleanup(struct irq_cfg *);
|
||||
extern void irq_complete_move(struct irq_cfg *cfg);
|
||||
#else
|
||||
static inline void send_cleanup_vector(struct irq_cfg *c) { }
|
||||
static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
|
||||
static inline void irq_complete_move(struct irq_cfg *c) { }
|
||||
#endif
|
||||
|
||||
|
@ -967,7 +967,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
|
||||
raw_spin_unlock(&vector_lock);
|
||||
}
|
||||
|
||||
static void __send_cleanup_vector(struct apic_chip_data *apicd)
|
||||
static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
@ -983,13 +983,13 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd)
|
||||
raw_spin_unlock(&vector_lock);
|
||||
}
|
||||
|
||||
void send_cleanup_vector(struct irq_cfg *cfg)
|
||||
void vector_schedule_cleanup(struct irq_cfg *cfg)
|
||||
{
|
||||
struct apic_chip_data *apicd;
|
||||
|
||||
apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
|
||||
if (apicd->move_in_progress)
|
||||
__send_cleanup_vector(apicd);
|
||||
__vector_schedule_cleanup(apicd);
|
||||
}
|
||||
|
||||
void irq_complete_move(struct irq_cfg *cfg)
|
||||
@ -1007,7 +1007,7 @@ void irq_complete_move(struct irq_cfg *cfg)
|
||||
* on the same CPU.
|
||||
*/
|
||||
if (apicd->cpu == smp_processor_id())
|
||||
__send_cleanup_vector(apicd);
|
||||
__vector_schedule_cleanup(apicd);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -58,7 +58,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
ret = parent->chip->irq_set_affinity(parent, mask, force);
|
||||
if (ret >= 0) {
|
||||
uv_program_mmr(cfg, data->chip_data);
|
||||
send_cleanup_vector(cfg);
|
||||
vector_schedule_cleanup(cfg);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -3681,7 +3681,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
* vector allocation.
|
||||
*/
|
||||
send_cleanup_vector(cfg);
|
||||
vector_schedule_cleanup(cfg);
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ static int hyperv_ir_set_affinity(struct irq_data *data,
|
||||
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
||||
return ret;
|
||||
|
||||
send_cleanup_vector(cfg);
|
||||
vector_schedule_cleanup(cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -257,7 +257,7 @@ static int hyperv_root_ir_set_affinity(struct irq_data *data,
|
||||
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
||||
return ret;
|
||||
|
||||
send_cleanup_vector(cfg);
|
||||
vector_schedule_cleanup(cfg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1176,7 +1176,7 @@ intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
* vector allocation.
|
||||
*/
|
||||
send_cleanup_vector(cfg);
|
||||
vector_schedule_cleanup(cfg);
|
||||
|
||||
return IRQ_SET_MASK_OK_DONE;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user