mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
x86/paravirt: Remove clobbers parameter from paravirt patch functions
The clobbers parameter from paravirt_patch_default() et al isn't used any longer. Remove it. Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: xen-devel@lists.xenproject.org Cc: virtualization@lists.linux-foundation.org Cc: akataria@vmware.com Cc: rusty@rustcorp.com.au Cc: boris.ostrovsky@oracle.com Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180828074026.820-7-jgross@suse.com
This commit is contained in:
parent
7e43720289
commit
abc745f85c
@ -85,7 +85,7 @@ struct pv_init_ops {
|
||||
* the number of bytes of code generated, as we nop pad the
|
||||
* rest in generic code.
|
||||
*/
|
||||
unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
|
||||
unsigned (*patch)(u8 type, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
} __no_randomize_layout;
|
||||
|
||||
@ -373,14 +373,13 @@ extern struct pv_lock_ops pv_lock_ops;
|
||||
|
||||
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
|
||||
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
||||
unsigned long addr, unsigned len);
|
||||
|
||||
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
|
||||
const char *start, const char *end);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len);
|
||||
unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
|
||||
|
||||
int paravirt_disable_iospace(void);
|
||||
|
||||
|
@ -594,7 +594,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
|
||||
BUG_ON(p->len > MAX_PATCH_LEN);
|
||||
/* prep the buffer with the original instructions */
|
||||
memcpy(insnbuf, p->instr, p->len);
|
||||
used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
|
||||
used = pv_init_ops.patch(p->instrtype, insnbuf,
|
||||
(unsigned long)p->instr, p->len);
|
||||
|
||||
BUG_ON(used > p->len);
|
||||
|
@ -81,10 +81,8 @@ struct branch {
|
||||
u32 delta;
|
||||
} __attribute__((packed));
|
||||
|
||||
static unsigned paravirt_patch_call(void *insnbuf,
|
||||
const void *target, u16 tgt_clobbers,
|
||||
unsigned long addr, u16 site_clobbers,
|
||||
unsigned len)
|
||||
static unsigned paravirt_patch_call(void *insnbuf, const void *target,
|
||||
unsigned long addr, unsigned len)
|
||||
{
|
||||
struct branch *b = insnbuf;
|
||||
unsigned long delta = (unsigned long)target - (addr+5);
|
||||
@ -149,7 +147,7 @@ static void *get_call_destination(u8 type)
|
||||
return *((void **)&tmpl + type);
|
||||
}
|
||||
|
||||
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
unsigned paravirt_patch_default(u8 type, void *insnbuf,
|
||||
unsigned long addr, unsigned len)
|
||||
{
|
||||
void *opfunc = get_call_destination(type);
|
||||
@ -172,10 +170,8 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
||||
/* If operation requires a jmp, then jmp */
|
||||
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
|
||||
else
|
||||
/* Otherwise call the function; assume target could
|
||||
clobber any caller-save reg */
|
||||
ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
|
||||
addr, clobbers, len);
|
||||
/* Otherwise call the function. */
|
||||
ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -30,8 +30,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
|
||||
{
|
||||
const unsigned char *start, *end;
|
||||
unsigned ret;
|
||||
@ -70,7 +69,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
|
||||
default:
|
||||
patch_default: __maybe_unused
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
ret = paravirt_patch_default(type, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
patch_site:
|
||||
|
@ -38,8 +38,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
|
||||
extern bool pv_is_native_spin_unlock(void);
|
||||
extern bool pv_is_native_vcpu_is_preempted(void);
|
||||
|
||||
unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
|
||||
{
|
||||
const unsigned char *start, *end;
|
||||
unsigned ret;
|
||||
@ -80,7 +79,7 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
|
||||
default:
|
||||
patch_default: __maybe_unused
|
||||
ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
ret = paravirt_patch_default(type, ibuf, addr, len);
|
||||
break;
|
||||
|
||||
patch_site:
|
||||
|
@ -69,7 +69,7 @@ asmlinkage __visible void vsmp_irq_enable(void)
|
||||
}
|
||||
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
|
||||
|
||||
static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
static unsigned __init vsmp_patch(u8 type, void *ibuf,
|
||||
unsigned long addr, unsigned len)
|
||||
{
|
||||
switch (type) {
|
||||
@ -77,9 +77,9 @@ static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf,
|
||||
case PARAVIRT_PATCH(pv_irq_ops.irq_disable):
|
||||
case PARAVIRT_PATCH(pv_irq_ops.save_fl):
|
||||
case PARAVIRT_PATCH(pv_irq_ops.restore_fl):
|
||||
return paravirt_patch_default(type, clobbers, ibuf, addr, len);
|
||||
return paravirt_patch_default(type, ibuf, addr, len);
|
||||
default:
|
||||
return native_patch(type, clobbers, ibuf, addr, len);
|
||||
return native_patch(type, ibuf, addr, len);
|
||||
}
|
||||
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user