mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
x86: Fix common misspellings
They were generated by 'codespell' and then manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi> Cc: trivial@kernel.org LKML-Reference: <1300389856-1099-3-git-send-email-lucas.demarchi@profusion.mobi> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a6c3270b04
commit
0d2eb44f63
@ -326,7 +326,7 @@ config X86_PPRO_FENCE
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurances of this problem, at the cost of much heavier spinlock and
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
|
@ -1346,7 +1346,7 @@ _zero_cipher_left_decrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_decrypt
|
||||
|
||||
# Handle the last <16 byte block seperately
|
||||
# Handle the last <16 byte block separately
|
||||
|
||||
paddd ONE(%rip), %xmm0 # increment CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
@ -1355,7 +1355,7 @@ _zero_cipher_left_decrypt:
|
||||
ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
|
||||
sub $16, %r11
|
||||
add %r13, %r11
|
||||
movdqu (%arg3,%r11,1), %xmm1 # recieve the last <16 byte block
|
||||
movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
|
||||
lea SHIFT_MASK+16(%rip), %r12
|
||||
sub %r13, %r12
|
||||
# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
|
||||
@ -1607,7 +1607,7 @@ _zero_cipher_left_encrypt:
|
||||
and $15, %r13 # %r13 = arg4 (mod 16)
|
||||
je _multiple_of_16_bytes_encrypt
|
||||
|
||||
# Handle the last <16 Byte block seperately
|
||||
# Handle the last <16 Byte block separately
|
||||
paddd ONE(%rip), %xmm0 # INCR CNT to get Yn
|
||||
movdqa SHUF_MASK(%rip), %xmm10
|
||||
PSHUFB_XMM %xmm10, %xmm0
|
||||
|
@ -71,7 +71,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
|
||||
* Read/Write : ReadOnly, ReadWrite
|
||||
* Presence : NotPresent
|
||||
*
|
||||
* Within a catagory, the attributes are mutually exclusive.
|
||||
* Within a category, the attributes are mutually exclusive.
|
||||
*
|
||||
* The implementation of this API will take care of various aspects that
|
||||
* are associated with changing such attributes, such as:
|
||||
|
@ -29,8 +29,8 @@ void arch_trigger_all_cpu_backtrace(void);
|
||||
* external nmis, because the local ones are more frequent.
|
||||
*
|
||||
* Also setup some default high/normal/low settings for
|
||||
* subsystems to registers with. Using 4 bits to seperate
|
||||
* the priorities. This can go alot higher if needed be.
|
||||
* subsystems to registers with. Using 4 bits to separate
|
||||
* the priorities. This can go a lot higher if needed be.
|
||||
*/
|
||||
|
||||
#define NMI_LOCAL_SHIFT 16 /* randomly picked */
|
||||
|
@ -38,7 +38,7 @@
|
||||
#define K8_NOP8 K8_NOP4 K8_NOP4
|
||||
|
||||
/* K7 nops
|
||||
uses eax dependencies (arbitary choice)
|
||||
uses eax dependencies (arbitrary choice)
|
||||
1: nop
|
||||
2: movl %eax,%eax
|
||||
3: leal (,%eax,1),%eax
|
||||
|
@ -20,7 +20,7 @@ extern struct olpc_platform_t olpc_platform_info;
|
||||
|
||||
/*
|
||||
* OLPC board IDs contain the major build number within the mask 0x0ff0,
|
||||
* and the minor build number withing 0x000f. Pre-builds have a minor
|
||||
* and the minor build number within 0x000f. Pre-builds have a minor
|
||||
* number less than 8, and normal builds start at 8. For example, 0x0B10
|
||||
* is a PreB1, and 0x0C18 is a C1.
|
||||
*/
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
* Netburst Performance Events (P4, old Xeon)
|
||||
*/
|
||||
|
||||
#ifndef PERF_EVENT_P4_H
|
||||
@ -9,7 +9,7 @@
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* NetBurst has perfomance MSRs shared between
|
||||
* NetBurst has performance MSRs shared between
|
||||
* threads if HT is turned on, ie for both logical
|
||||
* processors (mem: in turn in Atom with HT support
|
||||
* perf-MSRs are not shared and every thread has its
|
||||
|
@ -7,7 +7,7 @@
|
||||
*/
|
||||
#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
|
||||
#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
|
||||
#define X86_EFLAGS_AF 0x00000010 /* Auxiliary carry Flag */
|
||||
#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
|
||||
#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
|
||||
#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define R12 24
|
||||
#define RBP 32
|
||||
#define RBX 40
|
||||
/* arguments: interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: interrupts/non tracing syscalls only save up to here*/
|
||||
#define R11 48
|
||||
#define R10 56
|
||||
#define R9 64
|
||||
|
@ -73,7 +73,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long rbp;
|
||||
unsigned long rbx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
@ -103,7 +103,7 @@ struct pt_regs {
|
||||
unsigned long r12;
|
||||
unsigned long bp;
|
||||
unsigned long bx;
|
||||
/* arguments: non interrupts/non tracing syscalls only save upto here*/
|
||||
/* arguments: non interrupts/non tracing syscalls only save up to here*/
|
||||
unsigned long r11;
|
||||
unsigned long r10;
|
||||
unsigned long r9;
|
||||
|
@ -35,7 +35,7 @@ static inline cycles_t get_cycles(void)
|
||||
static __always_inline cycles_t vget_cycles(void)
|
||||
{
|
||||
/*
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldnt
|
||||
* We only do VDSOs on TSC capable CPUs, so this shouldn't
|
||||
* access boot_cpu_data (which is not VDSO-safe):
|
||||
*/
|
||||
#ifndef CONFIG_X86_TSC
|
||||
|
@ -86,7 +86,7 @@ DEFINE_GUEST_HANDLE(void);
|
||||
* The privilege level specifies which modes may enter a trap via a software
|
||||
* interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
|
||||
* privilege levels as follows:
|
||||
* Level == 0: Noone may enter
|
||||
* Level == 0: No one may enter
|
||||
* Level == 1: Kernel may enter
|
||||
* Level == 2: Kernel may enter
|
||||
* Level == 3: Everyone may enter
|
||||
|
@ -199,7 +199,7 @@ void *text_poke_early(void *addr, const void *opcode, size_t len);
|
||||
|
||||
/* Replace instructions with better alternatives for this CPU type.
|
||||
This runs before SMP is initialized to avoid SMP problems with
|
||||
self modifying code. This implies that assymetric systems where
|
||||
self modifying code. This implies that asymmetric systems where
|
||||
APs have less capabilities than the boot processor are not handled.
|
||||
Tough. Make sure you disable such features by hand. */
|
||||
|
||||
|
@ -73,7 +73,7 @@ static u32 __init allocate_aperture(void)
|
||||
/*
|
||||
* using 512M as goal, in case kexec will load kernel_big
|
||||
* that will do the on position decompress, and could overlap with
|
||||
* that positon with gart that is used.
|
||||
* that position with gart that is used.
|
||||
* sequende:
|
||||
* kernel_small
|
||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
||||
|
@ -1886,7 +1886,7 @@ void disable_IO_APIC(void)
|
||||
*
|
||||
* With interrupt-remapping, for now we will use virtual wire A mode,
|
||||
* as virtual wire B is little complex (need to configure both
|
||||
* IOAPIC RTE aswell as interrupt-remapping table entry).
|
||||
* IOAPIC RTE as well as interrupt-remapping table entry).
|
||||
* As this gets called during crash dump, keep this simple for now.
|
||||
*/
|
||||
if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
|
||||
@ -2905,7 +2905,7 @@ void __init setup_IO_APIC(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after all the initialization is done. If we didnt find any
|
||||
* Called after all the initialization is done. If we didn't find any
|
||||
* APIC bugs then we can allow the modify fast path
|
||||
*/
|
||||
|
||||
|
@ -66,7 +66,7 @@
|
||||
* 1.5: Fix segment register reloading (in case of bad segments saved
|
||||
* across BIOS call).
|
||||
* Stephen Rothwell
|
||||
* 1.6: Cope with complier/assembler differences.
|
||||
* 1.6: Cope with compiler/assembler differences.
|
||||
* Only try to turn off the first display device.
|
||||
* Fix OOPS at power off with no APM BIOS by Jan Echternach
|
||||
* <echter@informatik.uni-rostock.de>
|
||||
|
@ -444,7 +444,7 @@ static int __cpuinit longhaul_get_ranges(void)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Get max multiplier - as we always did.
|
||||
* Longhaul MSR is usefull only when voltage scaling is enabled.
|
||||
* Longhaul MSR is useful only when voltage scaling is enabled.
|
||||
* C3 is booting at max anyway. */
|
||||
maxmult = mult;
|
||||
/* Get min multiplier */
|
||||
@ -1011,7 +1011,7 @@ static void __exit longhaul_exit(void)
|
||||
* trigger frequency transition in some cases. */
|
||||
module_param(disable_acpi_c3, int, 0644);
|
||||
MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
|
||||
/* Change CPU voltage with frequency. Very usefull to save
|
||||
/* Change CPU voltage with frequency. Very useful to save
|
||||
* power, but most VIA C3 processors aren't supporting it. */
|
||||
module_param(scale_voltage, int, 0644);
|
||||
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
|
||||
|
@ -1276,7 +1276,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* Use the PSB BIOS structure. This is only available on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
*/
|
||||
if (num_online_cpus() != 1) {
|
||||
|
@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
result = speedstep_smi_ownership();
|
||||
if (result) {
|
||||
dprintk("fails in aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in acquiring ownership of a SMI interface.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -360,7 +360,7 @@ static int speedstep_resume(struct cpufreq_policy *policy)
|
||||
int result = speedstep_smi_ownership();
|
||||
|
||||
if (result)
|
||||
dprintk("fails in re-aquiring ownership of a SMI interface.\n");
|
||||
dprintk("fails in re-acquiring ownership of a SMI interface.\n");
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ static void inject_mce(struct mce *m)
|
||||
{
|
||||
struct mce *i = &per_cpu(injectm, m->extcpu);
|
||||
|
||||
/* Make sure noone reads partially written injectm */
|
||||
/* Make sure no one reads partially written injectm */
|
||||
i->finished = 0;
|
||||
mb();
|
||||
m->finished = 0;
|
||||
|
@ -881,7 +881,7 @@ reset:
|
||||
* Check if the address reported by the CPU is in a format we can parse.
|
||||
* It would be possible to add code for most other cases, but all would
|
||||
* be somewhat complicated (e.g. segment offset would require an instruction
|
||||
* parser). So only support physical addresses upto page granuality for now.
|
||||
* parser). So only support physical addresses up to page granuality for now.
|
||||
*/
|
||||
static int mce_usable_address(struct mce *m)
|
||||
{
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
|
||||
* because MTRRs can span upto 40 bits (36bits on most modern x86)
|
||||
* because MTRRs can span up to 40 bits (36bits on most modern x86)
|
||||
*/
|
||||
#define DEBUG
|
||||
|
||||
|
@ -1111,7 +1111,7 @@ static int x86_pmu_add(struct perf_event *event, int flags)
|
||||
|
||||
/*
|
||||
* If group events scheduling transaction was started,
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* skip the schedulability test here, it will be performed
|
||||
* at commit time (->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Netburst Perfomance Events (P4, old Xeon)
|
||||
* Netburst Performance Events (P4, old Xeon)
|
||||
*
|
||||
* Copyright (C) 2010 Parallels, Inc., Cyrill Gorcunov <gorcunov@openvz.org>
|
||||
* Copyright (C) 2010 Intel Corporation, Lin Ming <ming.m.lin@intel.com>
|
||||
@ -679,7 +679,7 @@ static int p4_validate_raw_event(struct perf_event *event)
|
||||
*/
|
||||
|
||||
/*
|
||||
* if an event is shared accross the logical threads
|
||||
* if an event is shared across the logical threads
|
||||
* the user needs special permissions to be able to use it
|
||||
*/
|
||||
if (p4_ht_active() && p4_event_bind_map[v].shared) {
|
||||
@ -790,13 +790,13 @@ static void p4_pmu_disable_pebs(void)
|
||||
*
|
||||
* It's still allowed that two threads setup same cache
|
||||
* events so we can't simply clear metrics until we knew
|
||||
* noone is depending on us, so we need kind of counter
|
||||
* no one is depending on us, so we need kind of counter
|
||||
* for "ReplayEvent" users.
|
||||
*
|
||||
* What is more complex -- RAW events, if user (for some
|
||||
* reason) will pass some cache event metric with improper
|
||||
* event opcode -- it's fine from hardware point of view
|
||||
* but completely nonsence from "meaning" of such action.
|
||||
* but completely nonsense from "meaning" of such action.
|
||||
*
|
||||
* So at moment let leave metrics turned on forever -- it's
|
||||
* ok for now but need to be revisited!
|
||||
|
@ -86,7 +86,7 @@ static void __init vmware_platform_setup(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* While checking the dmi string infomation, just checking the product
|
||||
* While checking the dmi string information, just checking the product
|
||||
* serial key should be enough, as this will always have a VMware
|
||||
* specific string when running under VMware hypervisor.
|
||||
*/
|
||||
|
@ -18,7 +18,7 @@
|
||||
* A note on terminology:
|
||||
* - top of stack: Architecture defined interrupt frame from SS to RIP
|
||||
* at the top of the kernel process stack.
|
||||
* - partial stack frame: partially saved registers upto R11.
|
||||
* - partial stack frame: partially saved registers up to R11.
|
||||
* - full stack frame: Like partial stack frame, but all register saved.
|
||||
*
|
||||
* Some macro usage:
|
||||
@ -422,7 +422,7 @@ ENTRY(ret_from_fork)
|
||||
END(ret_from_fork)
|
||||
|
||||
/*
|
||||
* System call entry. Upto 6 arguments in registers are supported.
|
||||
* System call entry. Up to 6 arguments in registers are supported.
|
||||
*
|
||||
* SYSCALL does not save anything on the stack and does not change the
|
||||
* stack pointer.
|
||||
|
@ -145,7 +145,7 @@ EXPORT_SYMBOL_GPL(fpu_finit);
|
||||
* The _current_ task is using the FPU for the first time
|
||||
* so initialize it and set the mxcsr to its default
|
||||
* value at reset if we support XMM instructions and then
|
||||
* remeber the current task has used the FPU.
|
||||
* remember the current task has used the FPU.
|
||||
*/
|
||||
int init_fpu(struct task_struct *tsk)
|
||||
{
|
||||
|
@ -172,7 +172,7 @@ asmlinkage void do_softirq(void)
|
||||
|
||||
call_on_stack(__do_softirq, isp);
|
||||
/*
|
||||
* Shouldnt happen, we returned above if in_interrupt():
|
||||
* Shouldn't happen, we returned above if in_interrupt():
|
||||
*/
|
||||
WARN_ON_ONCE(softirq_count());
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ static int hw_break_release_slot(int breakno)
|
||||
pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
|
||||
if (dbg_release_bp_slot(*pevent))
|
||||
/*
|
||||
* The debugger is responisble for handing the retry on
|
||||
* The debugger is responsible for handing the retry on
|
||||
* remove failure.
|
||||
*/
|
||||
return -1;
|
||||
|
@ -259,7 +259,7 @@ static int __init mca_init(void)
|
||||
/*
|
||||
* WARNING: Be careful when making changes here. Putting an adapter
|
||||
* and the motherboard simultaneously into setup mode may result in
|
||||
* damage to chips (according to The Indispensible PC Hardware Book
|
||||
* damage to chips (according to The Indispensable PC Hardware Book
|
||||
* by Hans-Peter Messmer). Also, we disable system interrupts (so
|
||||
* that we are not disturbed in the middle of this).
|
||||
*/
|
||||
|
@ -883,7 +883,7 @@ static int __init update_mp_table(void)
|
||||
|
||||
if (!mpc_new_phys) {
|
||||
unsigned char old, new;
|
||||
/* check if we can change the postion */
|
||||
/* check if we can change the position */
|
||||
mpc->checksum = 0;
|
||||
old = mpf_checksum((unsigned char *)mpc, mpc->length);
|
||||
mpc->checksum = 0xff;
|
||||
@ -892,7 +892,7 @@ static int __init update_mp_table(void)
|
||||
printk(KERN_INFO "mpc is readonly, please try alloc_mptable instead\n");
|
||||
return 0;
|
||||
}
|
||||
printk(KERN_INFO "use in-positon replacing\n");
|
||||
printk(KERN_INFO "use in-position replacing\n");
|
||||
} else {
|
||||
mpf->physptr = mpc_new_phys;
|
||||
mpc_new = phys_to_virt(mpc_new_phys);
|
||||
|
@ -1279,7 +1279,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
|
||||
|
||||
if (pci_dev == PCI_DEVICE_ID_IBM_CALIOC2) {
|
||||
/*
|
||||
* FIXME: properly scan for devices accross the
|
||||
* FIXME: properly scan for devices across the
|
||||
* PCI-to-PCI bridge on every CalIOC2 port.
|
||||
*/
|
||||
return 1;
|
||||
@ -1295,7 +1295,7 @@ static int __init calgary_bus_has_devices(int bus, unsigned short pci_dev)
|
||||
|
||||
/*
|
||||
* calgary_init_bitmap_from_tce_table():
|
||||
* Funtion for kdump case. In the second/kdump kernel initialize
|
||||
* Function for kdump case. In the second/kdump kernel initialize
|
||||
* the bitmap based on the tce table entries obtained from first kernel
|
||||
*/
|
||||
static void calgary_init_bitmap_from_tce_table(struct iommu_table *tbl)
|
||||
|
@ -166,7 +166,7 @@ static void enable_step(struct task_struct *child, bool block)
|
||||
* Make sure block stepping (BTF) is not enabled unless it should be.
|
||||
* Note that we don't try to worry about any is_setting_trap_flag()
|
||||
* instructions after the first when using block stepping.
|
||||
* So noone should try to use debugger block stepping in a program
|
||||
* So no one should try to use debugger block stepping in a program
|
||||
* that uses user-mode single stepping itself.
|
||||
*/
|
||||
if (enable_single_step(child) && block) {
|
||||
|
@ -39,7 +39,7 @@ int __ref arch_register_cpu(int num)
|
||||
/*
|
||||
* CPU0 cannot be offlined due to several
|
||||
* restrictions and assumptions in kernel. This basically
|
||||
* doesnt add a control file, one cannot attempt to offline
|
||||
* doesn't add a control file, one cannot attempt to offline
|
||||
* BSP.
|
||||
*
|
||||
* Also certain PCI quirks require not to enable hotplug control
|
||||
|
@ -427,7 +427,7 @@ unsigned long native_calibrate_tsc(void)
|
||||
* the delta to the previous read. We keep track of the min
|
||||
* and max values of that delta. The delta is mostly defined
|
||||
* by the IO time of the PIT access, so we can detect when a
|
||||
* SMI/SMM disturbance happend between the two reads. If the
|
||||
* SMI/SMM disturbance happened between the two reads. If the
|
||||
* maximum time is significantly larger than the minimum time,
|
||||
* then we discard the result and have another try.
|
||||
*
|
||||
@ -900,7 +900,7 @@ static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
|
||||
* timer based, instead of loop based, we don't block the boot
|
||||
* process while this longer calibration is done.
|
||||
*
|
||||
* If there are any calibration anomolies (too many SMIs, etc),
|
||||
* If there are any calibration anomalies (too many SMIs, etc),
|
||||
* or the refined calibration is off by 1% of the fast early
|
||||
* calibration, we throw out the new calibration and use the
|
||||
* early calibration.
|
||||
|
@ -18,7 +18,7 @@
|
||||
* This file is expected to run in 32bit code. Currently:
|
||||
*
|
||||
* arch/x86/boot/compressed/head_64.S: Boot cpu verification
|
||||
* arch/x86/kernel/trampoline_64.S: secondary processor verfication
|
||||
* arch/x86/kernel/trampoline_64.S: secondary processor verification
|
||||
* arch/x86/kernel/head_32.S: processor startup
|
||||
*
|
||||
* verify_cpu, returns the status of longmode and SSE in register %eax.
|
||||
|
@ -53,7 +53,7 @@ void __sanitize_i387_state(struct task_struct *tsk)
|
||||
|
||||
/*
|
||||
* None of the feature bits are in init state. So nothing else
|
||||
* to do for us, as the memory layout is upto date.
|
||||
* to do for us, as the memory layout is up to date.
|
||||
*/
|
||||
if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
|
||||
return;
|
||||
|
@ -348,7 +348,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
return;
|
||||
kvm_get_pfn(pfn);
|
||||
/*
|
||||
* we call mmu_set_spte() with host_writable = true beacuse that
|
||||
* we call mmu_set_spte() with host_writable = true because that
|
||||
* vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
|
||||
*/
|
||||
mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
|
||||
|
@ -25,7 +25,7 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
|
||||
|
||||
/*
|
||||
* There is a race window between reading and incrementing, but we do
|
||||
* not care about potentially loosing timer events in the !reinject
|
||||
* not care about potentially losing timer events in the !reinject
|
||||
* case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
|
||||
* in vcpu_enter_guest.
|
||||
*/
|
||||
|
@ -1028,7 +1028,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
|
||||
/*
|
||||
* Special case: close write to TSC within 5 seconds of
|
||||
* another CPU is interpreted as an attempt to synchronize
|
||||
* The 5 seconds is to accomodate host load / swapping as
|
||||
* The 5 seconds is to accommodate host load / swapping as
|
||||
* well as any reset of TSC during the boot process.
|
||||
*
|
||||
* In that case, for a reliable TSC, we can match TSC offsets,
|
||||
|
@ -397,7 +397,7 @@ static void lguest_load_tr_desc(void)
|
||||
* instead we just use the real "cpuid" instruction. Then I pretty much turned
|
||||
* off feature bits until the Guest booted. (Don't say that: you'll damage
|
||||
* lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
|
||||
* hardly future proof.) Noone's listening! They don't like you anyway,
|
||||
* hardly future proof.) No one's listening! They don't like you anyway,
|
||||
* parenthetic weirdo!
|
||||
*
|
||||
* Replacing the cpuid so we can turn features off is great for the kernel, but
|
||||
|
@ -117,7 +117,7 @@ ENDPROC(bad_from_user)
|
||||
* rdx count
|
||||
*
|
||||
* Output:
|
||||
* eax uncopied bytes or 0 if successfull.
|
||||
* eax uncopied bytes or 0 if successful.
|
||||
*/
|
||||
ENTRY(copy_user_generic_unrolled)
|
||||
CFI_STARTPROC
|
||||
|
@ -152,7 +152,7 @@ ENTRY(csum_partial_copy_generic)
|
||||
|
||||
adcq %r9,%rax
|
||||
|
||||
/* do last upto 56 bytes */
|
||||
/* do last up to 56 bytes */
|
||||
.Lhandle_tail:
|
||||
/* ecx: count */
|
||||
movl %ecx,%r10d
|
||||
@ -180,7 +180,7 @@ ENTRY(csum_partial_copy_generic)
|
||||
addl %ebx,%eax
|
||||
adcl %r9d,%eax
|
||||
|
||||
/* do last upto 6 bytes */
|
||||
/* do last up to 6 bytes */
|
||||
.Lhandle_7:
|
||||
movl %r10d,%ecx
|
||||
andl $7,%ecx
|
||||
|
@ -84,7 +84,7 @@ static unsigned do_csum(const unsigned char *buff, unsigned len)
|
||||
count64--;
|
||||
}
|
||||
|
||||
/* last upto 7 8byte blocks */
|
||||
/* last up to 7 8byte blocks */
|
||||
count %= 8;
|
||||
while (count) {
|
||||
asm("addq %1,%0\n\t"
|
||||
|
@ -326,7 +326,7 @@ try_again:
|
||||
if (mm->free_area_cache < len)
|
||||
goto fail;
|
||||
|
||||
/* either no address requested or cant fit in requested address hole */
|
||||
/* either no address requested or can't fit in requested address hole */
|
||||
addr = (mm->free_area_cache - len) & huge_page_mask(h);
|
||||
do {
|
||||
/*
|
||||
|
@ -917,7 +917,7 @@ static void mark_nxdata_nx(void)
|
||||
{
|
||||
/*
|
||||
* When this called, init has already been executed and released,
|
||||
* so everything past _etext sould be NX.
|
||||
* so everything past _etext should be NX.
|
||||
*/
|
||||
unsigned long start = PFN_ALIGN(_etext);
|
||||
/*
|
||||
|
@ -446,7 +446,7 @@ static int __init numa_alloc_distance(void)
|
||||
* @distance: NUMA distance
|
||||
*
|
||||
* Set the distance from node @from to @to to @distance. If distance table
|
||||
* doesn't exist, one which is large enough to accomodate all the currently
|
||||
* doesn't exist, one which is large enough to accommodate all the currently
|
||||
* known nodes will be created.
|
||||
*
|
||||
* If such table cannot be allocated, a warning is printed and further
|
||||
|
@ -310,7 +310,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
|
||||
* these shared mappings are made of small page mappings.
|
||||
* Thus this don't enforce !RW mapping for small page kernel
|
||||
* text mapping logic will help Linux Xen parvirt guest boot
|
||||
* aswell.
|
||||
* as well.
|
||||
*/
|
||||
if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
|
||||
pgprot_val(forbidden) |= _PAGE_RW;
|
||||
|
@ -241,7 +241,7 @@ void __init pcibios_resource_survey(void)
|
||||
e820_reserve_resources_late();
|
||||
/*
|
||||
* Insert the IO APIC resources after PCI initialization has
|
||||
* occured to handle IO APICS that are mapped in on a BAR in
|
||||
* occurred to handle IO APICS that are mapped in on a BAR in
|
||||
* PCI space, but before trying to assign unassigned pci res.
|
||||
*/
|
||||
ioapic_insert_resources();
|
||||
@ -304,7 +304,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
/*
|
||||
* ioremap() and ioremap_nocache() defaults to UC MINUS for now.
|
||||
* To avoid attribute conflicts, request UC MINUS here
|
||||
* aswell.
|
||||
* as well.
|
||||
*/
|
||||
prot |= _PAGE_CACHE_UC_MINUS;
|
||||
|
||||
|
@ -1745,7 +1745,7 @@ static void convert_pfn_mfn(void *v)
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the inital kernel pagetable.
|
||||
* Set up the initial kernel pagetable.
|
||||
*
|
||||
* We can construct this by grafting the Xen provided pagetable into
|
||||
* head_64.S's preconstructed pagetables. We copy the Xen L2's into
|
||||
|
Loading…
Reference in New Issue
Block a user