Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2016-04-09 17:41:41 -04:00
commit ae95d71261
710 changed files with 7705 additions and 4845 deletions

View File

@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller
The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB.
Required Properties:
- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following
- compatible: has to be "qca,<soctype>-pll" and one of the following
fallbacks:
- "qca,ar7100-pll"
- "qca,ar7240-pll"
@ -21,8 +21,8 @@ Optional properties:
Example:
memory-controller@18050000 {
compatible = "qca,ar9132-ppl", "qca,ar9130-pll";
pll-controller@18050000 {
compatible = "qca,ar9132-pll", "qca,ar9130-pll";
reg = <0x18050000 0x20>;
clock-names = "ref";

View File

@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug
mfio81 dreq0, mips_trace_data, eth_debug
mfio82 dreq1, mips_trace_data, eth_debug
mfio83 mips_pll_lock, mips_trace_data, usb_debug
mfio84 sys_pll_lock, mips_trace_data, usb_debug
mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug
mfio86 bt_pll_lock, mips_trace_data, sdhost_debug
mfio87 rpu_v_pll_lock, dreq2, socif_debug
mfio88 rpu_l_pll_lock, dreq3, socif_debug
mfio89 audio_pll_lock, dreq4, dreq5
mfio84 audio_pll_lock, mips_trace_data, usb_debug
mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug
mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug
mfio87 sys_pll_lock, dreq2, socif_debug
mfio88 wifi_pll_lock, dreq3, socif_debug
mfio89 bt_pll_lock, dreq4, dreq5
tck
trstn
tdi

View File

@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the
same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't

View File

@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory.
nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,

View File

@ -708,9 +708,9 @@ struct address_space_operations {
from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page
will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released,
length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.

View File

@ -586,6 +586,10 @@ drivers to make their ->remove() callbacks avoid races with runtime PM directly,
but also it allows of more flexibility in the handling of devices during the
removal of their drivers.
Drivers in ->remove() callback should undo the runtime PM changes done
in ->probe(). Usually this means calling pm_runtime_disable(),
pm_runtime_dont_use_autosuspend() etc.
The user space can effectively disallow the driver of the device to power manage
it at run time by changing the value of its /sys/devices/.../power/control
attribute to "on", which causes pm_runtime_forbid() to be called. In principle,

View File

@ -0,0 +1,208 @@
x86 Topology
============
This documents and clarifies the main aspects of x86 topology modelling and
representation in the kernel. Update/change when doing changes to the
respective code.
The architecture-agnostic topology definitions are in
Documentation/cputopology.txt. This file holds x86-specific
differences/specialities which must not necessarily apply to the generic
definitions. Thus, the way to read up on Linux topology on x86 is to start
with the generic one and look at this one in parallel for the x86 specifics.
Needless to say, code should use the generic functions - this file is *only*
here to *document* the inner workings of x86 topology.
Started by Thomas Gleixner <tglx@linutronix.de> and Borislav Petkov <bp@alien8.de>.
The main aim of the topology facilities is to present adequate interfaces to
code which needs to know/query/use the structure of the running system wrt
threads, cores, packages, etc.
The kernel does not care about the concept of physical sockets because a
socket has no relevance to software. It's an electromechanical component. In
the past a socket always contained a single package (see below), but with the
advent of Multi Chip Modules (MCM) a socket can hold more than one package. So
there might be still references to sockets in the code, but they are of
historical nature and should be cleaned up.
The topology of a system is described in the units of:
- packages
- cores
- threads
* Package:
Packages contain a number of cores plus shared resources, e.g. DRAM
controller, shared caches etc.
AMD nomenclature for package is 'Node'.
Package-related topology information in the kernel:
- cpuinfo_x86.x86_max_cores:
The number of cores in a package. This information is retrieved via CPUID.
- cpuinfo_x86.phys_proc_id:
The physical ID of the package. This information is retrieved via CPUID
and deduced from the APIC IDs of the cores in the package.
- cpuinfo_x86.logical_id:
The logical ID of the package. As we do not trust BIOSes to enumerate the
packages in a consistent way, we introduced the concept of logical package
ID so we can sanely calculate the number of maximum possible packages in
the system and have the packages enumerated linearly.
- topology_max_packages():
The maximum possible number of packages in the system. Helpful for per
package facilities to preallocate per package information.
* Cores:
A core consists of 1 or more threads. It does not matter whether the threads
are SMT- or CMT-type threads.
AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
"core".
Core-related topology information in the kernel:
- smp_num_siblings:
The number of threads in a core. The number of threads in a package can be
calculated by:
threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
* Threads:
A thread is a single scheduling unit. It's the equivalent to a logical Linux
CPU.
AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always
uses "thread".
Thread-related topology information in the kernel:
- topology_core_cpumask():
The cpumask contains all online threads in the package to which a thread
belongs.
The number of online threads is also printed in /proc/cpuinfo "siblings."
- topology_sibling_mask():
The cpumask contains all online threads in the core to which a thread
belongs.
- topology_logical_package_id():
The logical package ID to which a thread belongs.
- topology_physical_package_id():
The physical package ID to which a thread belongs.
- topology_core_id();
The ID of the core to which a thread belongs. It is also printed in /proc/cpuinfo
"core_id."
System topology examples
Note:
The alternative Linux CPU enumeration depends on how the BIOS enumerates the
threads. Many BIOSes enumerate all threads 0 first and then all threads 1.
That has the "advantage" that the logical Linux CPU numbers of threads 0 stay
the same whether threads are enabled or not. That's merely an implementation
detail and has no practical impact.
1) Single Package, Single Core
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
2) Single Package, Dual Core
a) One thread per core
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [core 1] -> [thread 0] -> Linux CPU 1
b) Two threads per core
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 1
-> [core 1] -> [thread 0] -> Linux CPU 2
-> [thread 1] -> Linux CPU 3
Alternative enumeration:
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 2
-> [core 1] -> [thread 0] -> Linux CPU 1
-> [thread 1] -> Linux CPU 3
AMD nomenclature for CMT systems:
[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
-> [Compute Unit Core 1] -> Linux CPU 1
-> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
-> [Compute Unit Core 1] -> Linux CPU 3
4) Dual Package, Dual Core
a) One thread per core
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [core 1] -> [thread 0] -> Linux CPU 1
[package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
-> [core 1] -> [thread 0] -> Linux CPU 3
b) Two threads per core
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 1
-> [core 1] -> [thread 0] -> Linux CPU 2
-> [thread 1] -> Linux CPU 3
[package 1] -> [core 0] -> [thread 0] -> Linux CPU 4
-> [thread 1] -> Linux CPU 5
-> [core 1] -> [thread 0] -> Linux CPU 6
-> [thread 1] -> Linux CPU 7
Alternative enumeration:
[package 0] -> [core 0] -> [thread 0] -> Linux CPU 0
-> [thread 1] -> Linux CPU 4
-> [core 1] -> [thread 0] -> Linux CPU 1
-> [thread 1] -> Linux CPU 5
[package 1] -> [core 0] -> [thread 0] -> Linux CPU 2
-> [thread 1] -> Linux CPU 6
-> [core 1] -> [thread 0] -> Linux CPU 3
-> [thread 1] -> Linux CPU 7
AMD nomenclature for CMT systems:
[node 0] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 0
-> [Compute Unit Core 1] -> Linux CPU 1
-> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 2
-> [Compute Unit Core 1] -> Linux CPU 3
[node 1] -> [Compute Unit 0] -> [Compute Unit Core 0] -> Linux CPU 4
-> [Compute Unit Core 1] -> Linux CPU 5
-> [Compute Unit 1] -> [Compute Unit Core 0] -> Linux CPU 6
-> [Compute Unit Core 1] -> Linux CPU 7

View File

@ -4303,7 +4303,7 @@ F: drivers/net/ethernet/agere/
ETHERNET BRIDGE
M: Stephen Hemminger <stephen@networkplumber.org>
L: bridge@lists.linux-foundation.org
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net:Bridge
S: Maintained
@ -5043,6 +5043,7 @@ F: include/linux/hw_random.h
HARDWARE SPINLOCK CORE
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
L: linux-remoteproc@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
F: Documentation/hwspinlock.txt
@ -5751,7 +5752,7 @@ R: Don Skidmore <donald.c.skidmore@intel.com>
R: Bruce Allan <bruce.w.allan@intel.com>
R: John Ronciak <john.ronciak@intel.com>
R: Mitch Williams <mitch.a.williams@intel.com>
L: intel-wired-lan@lists.osuosl.org
L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers)
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
@ -6403,7 +6404,7 @@ KPROBES
M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
M: Masami Hiramatsu <mhiramat@kernel.org>
S: Maintained
F: Documentation/kprobes.txt
F: include/linux/kprobes.h
@ -7576,7 +7577,7 @@ F: drivers/infiniband/hw/nes/
NETEM NETWORK EMULATOR
M: Stephen Hemminger <stephen@networkplumber.org>
L: netem@lists.linux-foundation.org
L: netem@lists.linux-foundation.org (moderated for non-subscribers)
S: Maintained
F: net/sched/sch_netem.c
@ -8712,6 +8713,8 @@ F: drivers/pinctrl/sh-pfc/
PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Krzysztof Kozlowski <k.kozlowski@samsung.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
@ -9140,6 +9143,13 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git
S: Supported
F: drivers/net/wireless/ath/wcn36xx/
QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT
M: Gabriel Somlo <somlo@cmu.edu>
M: "Michael S. Tsirkin" <mst@redhat.com>
L: qemu-devel@nongnu.org
S: Maintained
F: drivers/firmware/qemu_fw_cfg.c
RADOS BLOCK DEVICE (RBD)
M: Ilya Dryomov <idryomov@gmail.com>
M: Sage Weil <sage@redhat.com>
@ -9315,6 +9325,7 @@ F: include/linux/regmap.h
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
L: linux-remoteproc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
S: Maintained
F: drivers/remoteproc/
@ -9324,6 +9335,7 @@ F: include/linux/remoteproc.h
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
L: linux-remoteproc@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
S: Maintained
F: drivers/rpmsg/
@ -10584,6 +10596,14 @@ L: linux-tegra@vger.kernel.org
S: Maintained
F: drivers/staging/nvec/
STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
M: Jens Frederich <jfrederich@gmail.com>
M: Daniel Drake <dsd@laptop.org>
M: Jon Nettleton <jon.nettleton@gmail.com>
W: http://wiki.laptop.org/go/DCON
S: Maintained
F: drivers/staging/olpc_dcon/
STAGING - REALTEK RTL8712U DRIVERS
M: Larry Finger <Larry.Finger@lwfinger.net>
M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@ -12203,9 +12223,9 @@ S: Maintained
F: drivers/media/tuners/tuner-xc2028.*
XEN HYPERVISOR INTERFACE
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
M: David Vrabel <david.vrabel@citrix.com>
M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
@ -12217,16 +12237,16 @@ F: include/xen/
F: include/uapi/xen/
XEN HYPERVISOR ARM
M: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
S: Maintained
F: arch/arm/xen/
F: arch/arm/include/asm/xen/
XEN HYPERVISOR ARM64
M: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
S: Maintained
F: arch/arm64/xen/
F: arch/arm64/include/asm/xen/

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Blurry Fish Butt
# *DOCUMENTATION*

View File

@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
unsigned long vaddr = page->index << PAGE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);

View File

@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy)
kvm_arm_init_debug();
}
static void cpu_hyp_reinit(void)
{
if (is_kernel_in_hyp_mode()) {
/*
* cpu_init_stage2() is safe to call even if the PM
* event was cancelled before the CPU was reset.
*/
cpu_init_stage2(NULL);
} else {
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
}
}
static int hyp_init_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
switch (action) {
case CPU_STARTING:
case CPU_STARTING_FROZEN:
if (__hyp_get_vectors() == hyp_default_vectors)
cpu_init_hyp_mode(NULL);
break;
cpu_hyp_reinit();
}
return NOTIFY_OK;
@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd,
void *v)
{
if (cmd == CPU_PM_EXIT &&
__hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL);
if (cmd == CPU_PM_EXIT) {
cpu_hyp_reinit();
return NOTIFY_OK;
}
@ -1127,6 +1138,22 @@ static int init_subsystems(void)
{
int err;
/*
* Register CPU Hotplug notifier
*/
cpu_notifier_register_begin();
err = __register_cpu_notifier(&hyp_init_cpu_nb);
cpu_notifier_register_done();
if (err) {
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
return err;
}
/*
* Register CPU lower-power notifier
*/
hyp_cpu_pm_init();
/*
* Init HYP view of VGIC
*/
@ -1270,19 +1297,6 @@ static int init_hyp_mode(void)
free_boot_hyp_pgd();
#endif
cpu_notifier_register_begin();
err = __register_cpu_notifier(&hyp_init_cpu_nb);
cpu_notifier_register_done();
if (err) {
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
goto out_err;
}
hyp_cpu_pm_init();
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);

View File

@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
page->index << PAGE_SHIFT);
}
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {

View File

@ -124,7 +124,9 @@
#define VTCR_EL2_SL0_LVL1 (1 << 6)
#define VTCR_EL2_T0SZ_MASK 0x3f
#define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS 19
#define VTCR_EL2_VS_SHIFT 19
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be

View File

@ -141,6 +141,9 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
#define ID_AA64MMFR1_VMIDBITS_8 0
#define ID_AA64MMFR1_VMIDBITS_16 2
/* id_aa64mmfr2 */
#define ID_AA64MMFR2_UAO_SHIFT 4

View File

@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void)
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
* bit in VTCR_EL2.
*/
tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
val |= (tmp == 2) ? VTCR_EL2_VS : 0;
tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
VTCR_EL2_VS_16BIT :
VTCR_EL2_VS_8BIT;
write_sysreg(val, vtcr_el2);
}

View File

@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
au1x_dma_chan_t *cp;
/*
* We do the intialization on the first channel allocation.
* We do the initialization on the first channel allocation.
* We have to wait because of the interrupt handler initialization
* which can't be done successfully during board set up.
*/
@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
dp->dscr_source1 = dscr->dscr_source1;
dp->dscr_cmd1 = dscr->dscr_cmd1;
nbytes = dscr->dscr_cmd1;
/* Allow the caller to specifiy if an interrupt is generated */
/* Allow the caller to specify if an interrupt is generated */
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;

View File

@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
if (board == BCSR_WHOAMI_DB1500) {
c0 = AU1500_GPIO2_INT;
c1 = AU1500_GPIO5_INT;
d0 = AU1500_GPIO0_INT;
d1 = AU1500_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO1_INT;
s1 = AU1500_GPIO4_INT;
} else if (board == BCSR_WHOAMI_DB1100) {
c0 = AU1100_GPIO2_INT;
c1 = AU1100_GPIO5_INT;
d0 = AU1100_GPIO0_INT;
d1 = AU1100_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
} else if (board == BCSR_WHOAMI_DB1000) {
c0 = AU1000_GPIO2_INT;
c1 = AU1000_GPIO5_INT;
d0 = AU1000_GPIO0_INT;
d1 = AU1000_GPIO3_INT;
d0 = 0; /* GPIO number, NOT irq! */
d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1000_GPIO1_INT;
s1 = AU1000_GPIO4_INT;
platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
} else if ((board == BCSR_WHOAMI_PB1500) ||
(board == BCSR_WHOAMI_PB1500R2)) {
c0 = AU1500_GPIO203_INT;
d0 = AU1500_GPIO201_INT;
d0 = 1; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO202_INT;
twosocks = 0;
flashsize = 64;
@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
*/
} else if (board == BCSR_WHOAMI_PB1100) {
c0 = AU1100_GPIO11_INT;
d0 = AU1100_GPIO9_INT;
d0 = 9; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO10_INT;
twosocks = 0;
flashsize = 64;
@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
} else
return 0; /* unknown board, no further dev setup to do */
irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);

View File

@ -514,7 +514,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
AU1550_GPIO3_INT, AU1550_GPIO0_INT,
AU1550_GPIO3_INT, 0,
/*AU1550_GPIO21_INT*/0, 0, 0);
db1x_register_pcmcia_socket(
@ -524,7 +524,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
AU1550_GPIO5_INT, AU1550_GPIO1_INT,
AU1550_GPIO5_INT, 1,
/*AU1550_GPIO22_INT*/0, 0, 1);
platform_device_register(&db1550_nand_dev);

View File

@ -26,8 +26,7 @@
#include "common.h"
#define AR71XX_BASE_FREQ 40000000
#define AR724X_BASE_FREQ 5000000
#define AR913X_BASE_FREQ 5000000
#define AR724X_BASE_FREQ 40000000
static struct clk *clks[3];
static struct clk_onecell_data clk_data = {
@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void)
div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
freq = div * ref_rate;
div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK);
freq *= div;
div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
freq /= div;
cpu_rate = freq;
@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void)
clk_add_alias("uart", NULL, "ahb", NULL);
}
static void __init ar913x_clocks_init(void)
{
unsigned long ref_rate;
unsigned long cpu_rate;
unsigned long ddr_rate;
unsigned long ahb_rate;
u32 pll;
u32 freq;
u32 div;
ref_rate = AR913X_BASE_FREQ;
pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
freq = div * ref_rate;
cpu_rate = freq;
div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
ddr_rate = freq / div;
div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
ahb_rate = cpu_rate / div;
ath79_add_sys_clkdev("ref", ref_rate);
clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ahb", NULL);
}
static void __init ar933x_clocks_init(void)
{
unsigned long ref_rate;
@ -443,10 +409,8 @@ void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
ar71xx_clocks_init();
else if (soc_is_ar724x())
else if (soc_is_ar724x() || soc_is_ar913x())
ar724x_clocks_init();
else if (soc_is_ar913x())
ar913x_clocks_init();
else if (soc_is_ar933x())
ar933x_clocks_init();
else if (soc_is_ar934x())

View File

@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void)
{
#if defined(CONFIG_BCM47XX_SSB)
if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
pr_warn("Failed to registered ssb SPROM handler\n");
pr_warn("Failed to register ssb SPROM handler\n");
#endif
#if defined(CONFIG_BCM47XX_BCMA)
if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
pr_warn("Failed to registered bcma SPROM handler\n");
pr_warn("Failed to register bcma SPROM handler\n");
#endif
}

View File

@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
endif
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c
extra-y += ashldi3.c bswapsi.c
$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
$(call cmd,shipped)
targets := $(notdir $(vmlinuzobjs-y))

View File

@ -82,7 +82,7 @@
};
gisb-arb@400000 {
compatible = "brcm,bcm7400-gisb-arb";
compatible = "brcm,bcm7435-gisb-arb";
reg = <0x400000 0xdc>;
native-endian;
interrupt-parent = <&sun_l2_intc>;

View File

@ -83,7 +83,7 @@
};
pll: pll-controller@18050000 {
compatible = "qca,ar9132-ppl",
compatible = "qca,ar9132-pll",
"qca,ar9130-pll";
reg = <0x18050000 0x20>;

View File

@ -18,7 +18,7 @@
reg = <0x0 0x2000000>;
};
extosc: oscillator {
extosc: ref {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <40000000>;

View File

@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
/*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
/*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */

View File

@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
/*
* Set the size of the PKO command buffers to an odd number of
* 64bit words. This allows the normal two word send to stay
* aligned and never span a comamnd word buffer.
* aligned and never span a command word buffer.
*/
config.u64 = 0;
config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;

View File

@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
}
if (!(avail_coremask & (1 << coreid))) {
/* core not available, assume, that catched by simple-executive */
/* core not available, assume, that caught by simple-executive */
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}

View File

@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_MTD=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_JZ4780=y
CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_FASTMAP=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_UBIFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y

View File

@ -5,7 +5,7 @@
* Written by Ralf Baechle and Andreas Busse, modified for DECstation
* support by Paul Antoine and Harald Koerfgen.
*
* completly rewritten:
* completely rewritten:
* Copyright (C) 1998 Harald Koerfgen
*
* Rewritten extensively for controller-driven IRQ support

View File

@ -9,7 +9,7 @@
* PROM library functions for acquiring/using memory descriptors given to us
* from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
* because on some machines like SGI IP27 the ARC memory configuration data
* completly bogus and alternate easier to use mechanisms are available.
* completely bogus and alternate easier to use mechanisms are available.
*/
#include <linux/init.h>
#include <linux/kernel.h>

View File

@ -102,7 +102,7 @@ extern void cpu_probe(void);
extern void cpu_report(void);
extern const char *__cpu_name[];
#define cpu_name_string() __cpu_name[smp_processor_id()]
#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
struct seq_file;
struct notifier_block;

View File

@ -141,7 +141,7 @@ octeon_main_processor:
.endm
/*
* Do SMP slave processor setup necessary before we can savely execute C code.
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
.endm

View File

@ -16,7 +16,7 @@
.endm
/*
* Do SMP slave processor setup necessary before we can savely execute C code.
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
.endm

View File

@ -11,7 +11,7 @@
#define __ASM_MACH_IP27_IRQ_H
/*
* A hardwired interrupt number is completly stupid for this system - a
* A hardwired interrupt number is completely stupid for this system - a
* large configuration might have thousands if not tenthousands of
* interrupts.
*/

View File

@ -81,7 +81,7 @@
.endm
/*
* Do SMP slave processor setup necessary before we can savely execute C code.
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
GET_NASID_ASM t1

View File

@ -27,7 +27,7 @@ enum jz_gpio_function {
/*
Usually a driver for a SoC component has to request several gpio pins and
configure them as funcion pins.
configure them as function pins.
jz_gpio_bulk_request can be used to ease this process.
Usually one would do something like:

View File

@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
* This function returns the physical base address of the Coherence Manager
* global control block, or 0 if no Coherence Manager is present. It provides
* a default implementation which reads the CMGCRBase register where available,
* and may be overriden by platforms which determine this address in a
* and may be overridden by platforms which determine this address in a
* different way by defining a function with the same prototype except for the
* name mips_cm_phys_base (without underscores).
*/

View File

@ -79,7 +79,7 @@ struct r2_decoder_table {
};
extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str);
#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR

View File

@ -33,7 +33,7 @@
/* Packet buffers */
#define CVMX_FPA_PACKET_POOL (0)
#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
/* Work queue entrys */
/* Work queue entries */
#define CVMX_FPA_WQE_POOL (1)
#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */

View File

@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
if (sizeof(void *) == 8) {
/* Just set the top bit, avoiding any TLB uglyness */
/* Just set the top bit, avoiding any TLB ugliness */
return CASTPTR(void,
CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
physical_address));

View File

@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
union {
u32 cmd_word;
struct {
u32 didn:4, /* Destination ID */
sidn:4, /* Source ID */
pactyp:4, /* Packet type */
tnum:5, /* Trans Number */
coh:1, /* Coh Transacti */
ds:2, /* Data size */
gbr:1, /* GBR enable */
vbpm:1, /* VBPM message */
u32 didn:4, /* Destination ID */
sidn:4, /* Source ID */
pactyp:4, /* Packet type */
tnum:5, /* Trans Number */
coh:1, /* Coh Transaction */
ds:2, /* Data size */
gbr:1, /* GBR enable */
vbpm:1, /* VBPM message */
error:1, /* Error occurred */
barr:1, /* Barrier op */
barr:1, /* Barrier op */
rsvd:8;
} berr_st;
} berr_un;

View File

@ -147,7 +147,7 @@ struct hpc3_ethregs {
#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
u32 _unused2[0x1000/4 - 8]; /* padding */

View File

@ -144,7 +144,7 @@ struct linux_tinfo {
struct linux_vdirent {
ULONG namelen;
unsigned char attr;
char fname[32]; /* XXX imperical, should be a define */
char fname[32]; /* XXX empirical, should be a define */
};
/* Other stuff for files. */
@ -179,7 +179,7 @@ struct linux_finfo {
enum linux_devtypes dtype;
unsigned long namelen;
unsigned char attr;
char name[32]; /* XXX imperical, should be define */
char name[32]; /* XXX empirical, should be define */
};
/* This describes the vector containing function pointers to the ARC

View File

@ -355,7 +355,7 @@ struct ioc3_etxd {
#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
#define SSCR_RESET 0x80000000 /* reset DMA channels */
/* all producer/comsumer pointers are the same bitfield */
/* all producer/consumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3

View File

@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
/*
* Values for field imsgtype
*/
#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */

View File

@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void)
}
/*
* Is a address valid? This does a straighforward calculation rather
* Is a address valid? This does a straightforward calculation rather
* than tests.
*
* Address valid if:

View File

@ -381,16 +381,18 @@
#define __NR_membarrier (__NR_Linux + 358)
#define __NR_mlock2 (__NR_Linux + 359)
#define __NR_copy_file_range (__NR_Linux + 360)
#define __NR_preadv2 (__NR_Linux + 361)
#define __NR_pwritev2 (__NR_Linux + 362)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 360
#define __NR_Linux_syscalls 362
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 360
#define __NR_O32_Linux_syscalls 362
#if _MIPS_SIM == _MIPS_SIM_ABI64
@ -719,16 +721,18 @@
#define __NR_membarrier (__NR_Linux + 318)
#define __NR_mlock2 (__NR_Linux + 319)
#define __NR_copy_file_range (__NR_Linux + 320)
#define __NR_preadv2 (__NR_Linux + 321)
#define __NR_pwritev2 (__NR_Linux + 322)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 320
#define __NR_Linux_syscalls 322
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 320
#define __NR_64_Linux_syscalls 322
#if _MIPS_SIM == _MIPS_SIM_NABI32
@ -1061,15 +1065,17 @@
#define __NR_membarrier (__NR_Linux + 322)
#define __NR_mlock2 (__NR_Linux + 323)
#define __NR_copy_file_range (__NR_Linux + 324)
#define __NR_preadv2 (__NR_Linux + 325)
#define __NR_pwritev2 (__NR_Linux + 326)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 324
#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 324
#define __NR_N32_Linux_syscalls 326
#endif /* _UAPI_ASM_UNISTD_H */

View File

@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
"0x04", "cpc", "0x06", "0x07"
};
/* CM3 Tag ECC transation type */
/* CM3 Tag ECC transaction type */
static char *cm3_tr[16] = {
[0x0] = "ReqNoData",
[0x1] = "0x1",

View File

@ -940,42 +940,42 @@ repeat:
switch (rt) {
case tgei_op:
if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, "TGEI");
do_trap_or_bp(regs, 0, 0, "TGEI");
MIPS_R2_STATS(traps);
break;
case tgeiu_op:
if (regs->regs[rs] >= MIPSInst_UIMM(inst))
do_trap_or_bp(regs, 0, "TGEIU");
do_trap_or_bp(regs, 0, 0, "TGEIU");
MIPS_R2_STATS(traps);
break;
case tlti_op:
if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, "TLTI");
do_trap_or_bp(regs, 0, 0, "TLTI");
MIPS_R2_STATS(traps);
break;
case tltiu_op:
if (regs->regs[rs] < MIPSInst_UIMM(inst))
do_trap_or_bp(regs, 0, "TLTIU");
do_trap_or_bp(regs, 0, 0, "TLTIU");
MIPS_R2_STATS(traps);
break;
case teqi_op:
if (regs->regs[rs] == MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, "TEQI");
do_trap_or_bp(regs, 0, 0, "TEQI");
MIPS_R2_STATS(traps);
break;
case tnei_op:
if (regs->regs[rs] != MIPSInst_SIMM(inst))
do_trap_or_bp(regs, 0, "TNEI");
do_trap_or_bp(regs, 0, 0, "TNEI");
MIPS_R2_STATS(traps);

View File

@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
struct module *me)
{
Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
int (*handler)(struct module *me, u32 *location, Elf_Addr v);
Elf_Sym *sym;
u32 *location;
unsigned int i;
unsigned int i, type;
Elf_Addr v;
int res;
@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return -ENOENT;
}
v = sym->st_value + rel[i].r_addend;
type = ELF_MIPS_R_TYPE(rel[i]);
res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (type < ARRAY_SIZE(reloc_handlers_rela))
handler = reloc_handlers_rela[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n",
me->name, type);
return -EINVAL;
}
v = sym->st_value + rel[i].r_addend;
res = handler(me, location, v);
if (res)
return res;
}

View File

@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
struct module *me)
{
Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
int (*handler)(struct module *me, u32 *location, Elf_Addr v);
Elf_Sym *sym;
u32 *location;
unsigned int i;
unsigned int i, type;
Elf_Addr v;
int res;
@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
return -ENOENT;
}
v = sym->st_value;
type = ELF_MIPS_R_TYPE(rel[i]);
res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (type < ARRAY_SIZE(reloc_handlers_rel))
handler = reloc_handlers_rel[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n",
me->name, type);
return -EINVAL;
}
v = sym->st_value;
res = handler(me, location, v);
if (res)
return res;
}

View File

@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu)
/*
* MIPS performance counters can be per-TC. The control registers can
* not be directly accessed accross CPUs. Hence if we want to do global
* not be directly accessed across CPUs. Hence if we want to do global
* control, we need cross CPU calls. on_each_cpu() can help us, but we
* can not make sure this function is called with interrupts enabled. So
* here we pause local counters and then grab a rwlock and leave the

View File

@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
/*
* Disable all but self interventions. The load from COHCTL is defined
* by the interAptiv & proAptiv SUMs as ensuring that the operation
* resulting from the preceeding store is complete.
* resulting from the preceding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
uasm_i_sw(&p, t0, 0, r_pcohctl);

View File

@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
* allows us to only worry about whether an FP mode switch is in
* progress when FP is first used in a tasks time slice. Pretty much all
* of the mode switch overhead can thus be confined to cases where mode
* switches are actually occuring. That is, to here. However for the
* switches are actually occurring. That is, to here. However for the
* thread performing the mode switch it may take a while...
*/
if (num_online_cpus() > 1) {

View File

@ -596,3 +596,5 @@ EXPORT(sys_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 4360 */
PTR sys_preadv2
PTR sys_pwritev2

View File

@ -434,4 +434,6 @@ EXPORT(sys_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 5320 */
PTR sys_preadv2
PTR sys_pwritev2
.size sys_call_table,.-sys_call_table

View File

@ -424,4 +424,6 @@ EXPORT(sysn32_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range
PTR compat_sys_preadv2 /* 6325 */
PTR compat_sys_pwritev2
.size sysn32_call_table,.-sysn32_call_table

View File

@ -579,4 +579,6 @@ EXPORT(sys32_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 4360 */
PTR compat_sys_preadv2
PTR compat_sys_pwritev2
.size sys32_call_table,.-sys32_call_table

View File

@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void)
struct irq_domain *ipidomain;
struct device_node *node;
/*
* In some cases like qemu-malta, it is desired to try SMP with
* a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
* would cause a BUG_ON() to be triggered since there's no ipidomain.
*
* Since for a single core system IPIs aren't required really, skip the
* initialisation which should generally keep any such configurations
* happy and only fail hard when trying to truely run SMP.
*/
if (cpumask_weight(cpu_possible_mask) == 1)
return 0;
node = of_irq_find_parent(of_root);
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);

View File

@ -56,6 +56,7 @@
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@ -871,7 +872,7 @@ out:
exception_exit(prev_state);
}
void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
siginfo_t info = { 0 };
@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
if (si_code) {
info.si_signo = SIGTRAP;
info.si_code = si_code;
force_sig_info(SIGTRAP, &info, current);
} else {
force_sig(SIGTRAP, current);
}
}
}
@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
break;
}
do_trap_or_bp(regs, bcode, "Break");
do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
out:
set_fs(seg);
@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
tcode = (opcode >> 6) & ((1 << 10) - 1);
}
do_trap_or_bp(regs, tcode, "Trap");
do_trap_or_bp(regs, tcode, 0, "Trap");
out:
set_fs(seg);
@ -1115,19 +1122,7 @@ no_r2_instr:
if (unlikely(compute_return_epc(regs) < 0))
goto out;
if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
status = SIGSEGV;
if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
status = SIGSEGV;
opcode = mmop[0];
opcode = (opcode << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
} else {
if (!get_isa16_mode(regs->cp0_epc)) {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
@ -1142,6 +1137,18 @@ no_r2_instr:
if (status < 0)
status = simulate_fp(regs, opcode, old_epc, old31);
} else if (cpu_has_mmips) {
unsigned short mmop[2] = { 0 };
if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
status = SIGSEGV;
if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
status = SIGSEGV;
opcode = mmop[0];
opcode = (opcode << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
}
if (status < 0)
@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
enum ctx_state prev_state;
u32 cause;
@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig(SIGTRAP, current);
force_sig_info(SIGTRAP, &info, current);
} else {
mips_clear_watch_registers();
local_irq_enable();
@ -2214,7 +2222,7 @@ void __init trap_init(void)
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* This will be overridden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);

View File

@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
unsigned int res, preempted;
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
goto sigbus;
/*
* Disable preemption to avoid a race between copying
* state from userland, migrating to another CPU and
* updating the hardware vector register below.
*/
preempt_disable();
do {
/*
* If we have live MSA context keep track of
* whether we get preempted in order to avoid
* the register context we load being clobbered
* by the live context as it's saved during
* preemption. If we don't have live context
* then it can't be saved to clobber the value
* we load.
*/
preempted = test_thread_flag(TIF_USEDMSA);
res = __copy_from_user_inatomic(fpr, addr,
sizeof(*fpr));
if (res)
goto fault;
res = __copy_from_user_inatomic(fpr, addr,
sizeof(*fpr));
if (res)
goto fault;
/*
* Update the hardware register if it is in use by the
* task in this quantum, in order to avoid having to
* save & restore the whole vector context.
*/
if (test_thread_flag(TIF_USEDMSA))
write_msa_wr(wd, fpr, df);
preempt_enable();
/*
* Update the hardware register if it is in use
* by the task in this quantum, in order to
* avoid having to save & restore the whole
* vector context.
*/
preempt_disable();
if (test_thread_flag(TIF_USEDMSA)) {
write_msa_wr(wd, fpr, df);
preempted = 0;
}
preempt_enable();
} while (preempted);
break;
case msa_st_op:

View File

@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
/* Alocate new kernel and user ASIDs if needed */
/* Allocate new kernel and user ASIDs if needed */
local_irq_save(flags);

View File

@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/*
* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
* Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
*/
kvm_write_c0_guest_intctl(cop0, 0xFC000000);

View File

@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */
assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
assert(xm & (DP_HIDDEN_BIT << 3));
if (xe < DP_EMIN) {
@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
/* strip grs bits */
xm >>= 3;
assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xe >= DP_EMIN);
if (xe > DP_EMAX) {
@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
ieee754_setcx(IEEE754_UNDERFLOW);
return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
} else {
assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xm & DP_HIDDEN_BIT);
return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);

View File

@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */
assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
assert(xm & (SP_HIDDEN_BIT << 3));
if (xe < SP_EMIN) {
@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
/* strip grs bits */
xm >>= 3;
assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
assert(xe >= SP_EMIN);
if (xe > SP_EMAX) {
@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
ieee754_setcx(IEEE754_UNDERFLOW);
return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
} else {
assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
assert(xm & SP_HIDDEN_BIT);
return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);

View File

@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
return 1;
}
/* XXX Check with wje if the Indy caches can differenciate between
/* XXX Check with wje if the Indy caches can differentiate between
writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,

View File

@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@ -486,6 +487,10 @@ static void r4k_tlb_configure(void)
* be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
back_to_back_c0_hazard();
if (read_c0_pagemask() != PM_DEFAULT_MASK)
panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||

View File

@ -12,7 +12,7 @@
* Copyright (C) 2011 MIPS Technologies, Inc.
*
* ... and the days got worse and worse and now you see
* I've gone completly out of my mind.
* I've gone completely out of my mind.
*
* They're coming to take me a away haha
* they're coming to take me a away hoho hihi haha

View File

@ -7,7 +7,7 @@
* Copyright (C) 2000 by Silicon Graphics, Inc.
* Copyright (C) 2004 by Christoph Hellwig
*
* On SGI IP27 the ARC memory configuration data is completly bogus but
* On SGI IP27 the ARC memory configuration data is completely bogus but
* alternate easier to use mechanisms are available.
*/
#include <linux/init.h>

View File

@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
if (!mapping)
return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether

View File

@ -22,7 +22,7 @@
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>

View File

@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;

View File

@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->uid = current_uid();
sbi->gid = current_gid();
sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
if (hypfs_parse_options(data, sb))

View File

@ -23,7 +23,7 @@
/**
* gmap_alloc - allocate a guest address space
* @mm: pointer to the parent mm_struct
* @limit: maximum size of the gmap address space
* @limit: maximum address of the gmap address space
*
* Returns a guest address space structure.
*/
@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL;
if (len == 0 || from + len < from || to + len < to ||
from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
return -EINVAL;
flush = 0;

View File

@ -369,7 +369,7 @@ static int amd_pmu_cpu_prepare(int cpu)
WARN_ON_ONCE(cpuc->amd_nb);
if (boot_cpu_data.x86_max_cores < 2)
if (!x86_pmu.amd_nb_constraints)
return NOTIFY_OK;
cpuc->amd_nb = amd_alloc_nb(cpu);
@ -388,7 +388,7 @@ static void amd_pmu_cpu_starting(int cpu)
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
if (boot_cpu_data.x86_max_cores < 2)
if (!x86_pmu.amd_nb_constraints)
return;
nb_id = amd_get_nb_id(cpu);
@ -414,7 +414,7 @@ static void amd_pmu_cpu_dead(int cpu)
{
struct cpu_hw_events *cpuhw;
if (boot_cpu_data.x86_max_cores < 2)
if (!x86_pmu.amd_nb_constraints)
return;
cpuhw = &per_cpu(cpu_hw_events, cpu);
@ -648,6 +648,8 @@ static __initconst const struct x86_pmu amd_pmu = {
.cpu_prepare = amd_pmu_cpu_prepare,
.cpu_starting = amd_pmu_cpu_starting,
.cpu_dead = amd_pmu_cpu_dead,
.amd_nb_constraints = 1,
};
static int __init amd_core_pmu_init(void)
@ -674,6 +676,11 @@ static int __init amd_core_pmu_init(void)
x86_pmu.eventsel = MSR_F15H_PERF_CTL;
x86_pmu.perfctr = MSR_F15H_PERF_CTR;
x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
/*
* AMD Core perfctr has separate MSRs for the NB events, see
* the amd/uncore.c driver.
*/
x86_pmu.amd_nb_constraints = 0;
pr_cont("core perfctr, ");
return 0;
@ -693,6 +700,14 @@ __init int amd_pmu_init(void)
if (ret)
return ret;
if (num_possible_cpus() == 1) {
/*
* No point in allocating data structures to serialize
* against other CPUs, when there is only the one CPU.
*/
x86_pmu.amd_nb_constraints = 0;
}
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
sizeof(hw_cache_event_ids));

View File

@ -28,10 +28,46 @@ static u32 ibs_caps;
#define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
#define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
/*
* IBS states:
*
* ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
* and any further add()s must fail.
*
* STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
* complicated by the fact that the IBS hardware can send late NMIs (ie. after
* we've cleared the EN bit).
*
* In order to consume these late NMIs we have the STOPPED state, any NMI that
* happens after we've cleared the EN state will clear this bit and report the
* NMI handled (this is fundamentally racy in the face or multiple NMI sources,
* someone else can consume our BIT and our NMI will go unhandled).
*
* And since we cannot set/clear this separate bit together with the EN bit,
* there are races; if we cleared STARTED early, an NMI could land in
* between clearing STARTED and clearing the EN bit (in fact multiple NMIs
* could happen if the period is small enough), and consume our STOPPED bit
* and trigger streams of unhandled NMIs.
*
* If, however, we clear STARTED late, an NMI can hit between clearing the
* EN bit and clearing STARTED, still see STARTED set and process the event.
* If this event will have the VALID bit clear, we bail properly, but this
* is not a given. With VALID set we can end up calling pmu::stop() again
* (the throttle logic) and trigger the WARNs in there.
*
* So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
* nesting, and clear STARTED late, so that we have a well defined state over
* the clearing of the EN bit.
*
* XXX: we could probably be using !atomic bitops for all this.
*/
enum ibs_states {
IBS_ENABLED = 0,
IBS_STARTED = 1,
IBS_STOPPING = 2,
IBS_STOPPED = 3,
IBS_MAX_STATES,
};
@ -377,11 +413,10 @@ static void perf_ibs_start(struct perf_event *event, int flags)
perf_ibs_set_period(perf_ibs, hwc, &period);
/*
* Set STARTED before enabling the hardware, such that
* a subsequent NMI must observe it. Then clear STOPPING
* such that we don't consume NMIs by accident.
* Set STARTED before enabling the hardware, such that a subsequent NMI
* must observe it.
*/
set_bit(IBS_STARTED, pcpu->state);
set_bit(IBS_STARTED, pcpu->state);
clear_bit(IBS_STOPPING, pcpu->state);
perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
@ -396,6 +431,9 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
u64 config;
int stopping;
if (test_and_set_bit(IBS_STOPPING, pcpu->state))
return;
stopping = test_bit(IBS_STARTED, pcpu->state);
if (!stopping && (hwc->state & PERF_HES_UPTODATE))
@ -405,12 +443,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags)
if (stopping) {
/*
* Set STOPPING before disabling the hardware, such that it
* Set STOPPED before disabling the hardware, such that it
* must be visible to NMIs the moment we clear the EN bit,
* at which point we can generate an !VALID sample which
* we need to consume.
*/
set_bit(IBS_STOPPING, pcpu->state);
set_bit(IBS_STOPPED, pcpu->state);
perf_ibs_disable_event(perf_ibs, hwc, config);
/*
* Clear STARTED after disabling the hardware; if it were
@ -556,7 +594,7 @@ fail:
* with samples that even have the valid bit cleared.
* Mark all this NMIs as handled.
*/
if (test_and_clear_bit(IBS_STOPPING, pcpu->state))
if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
return 1;
return 0;

View File

@ -607,6 +607,11 @@ struct x86_pmu {
*/
atomic_t lbr_exclusive[x86_lbr_exclusive_max];
/*
* AMD bits
*/
unsigned int amd_nb_constraints : 1;
/*
* Extra registers for events
*/
@ -795,6 +800,9 @@ ssize_t intel_event_sysfs_show(char *page, u64 config);
struct attribute **merge_attr(struct attribute **a, struct attribute **b);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
@ -925,9 +933,6 @@ int p6_pmu_init(void);
int knc_pmu_init(void);
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
static inline int is_ht_workaround_enabled(void)
{
return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED);

View File

@ -43,7 +43,7 @@
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
#define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_HALT_POLL_NS_DEFAULT 400000
#define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS

View File

@ -167,6 +167,14 @@
#define MSR_PKG_C9_RESIDENCY 0x00000631
#define MSR_PKG_C10_RESIDENCY 0x00000632
/* Interrupt Response Limit */
#define MSR_PKGC3_IRTL 0x0000060a
#define MSR_PKGC6_IRTL 0x0000060b
#define MSR_PKGC7_IRTL 0x0000060c
#define MSR_PKGC8_IRTL 0x00000633
#define MSR_PKGC9_IRTL 0x00000634
#define MSR_PKGC10_IRTL 0x00000635
/* Run Time Average Power Limiting (RAPL) Interface */
#define MSR_RAPL_POWER_UNIT 0x00000606
@ -190,6 +198,7 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
#define MSR_CONFIG_TDP_LEVEL_2 0x0000064A
@ -210,13 +219,6 @@
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL1 0x00000649
#define MSR_CONFIG_TDP_LEVEL2 0x0000064A
#define MSR_CONFIG_TDP_CONTROL 0x0000064B
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f

View File

@ -132,8 +132,6 @@ struct cpuinfo_x86 {
u16 logical_proc_id;
/* Core id: */
u16 cpu_core_id;
/* Compute unit id */
u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;

View File

@ -155,6 +155,7 @@ static inline int wbinvd_on_all_cpus(void)
wbinvd();
return 0;
}
#define smp_num_siblings 1
#endif /* CONFIG_SMP */
extern unsigned disabled_cpus;

View File

@ -276,11 +276,9 @@ static inline bool is_ia32_task(void)
*/
#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
#endif /* !__ASSEMBLY__ */
#ifndef __ASSEMBLY__
extern void arch_task_cache_init(void);
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
extern void arch_release_task_struct(struct task_struct *tsk);
#endif
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_THREAD_INFO_H */

View File

@ -170,15 +170,13 @@ int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
unsigned int mask;
int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
return 0;
pci_read_config_dword(link, 0x1d4, &mask);
cuid = cpu_data(cpu).compute_unit_id;
return (mask >> (4 * cuid)) & 0xf;
return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
}
int amd_set_subcaches(int cpu, unsigned long mask)
@ -204,7 +202,7 @@ int amd_set_subcaches(int cpu, unsigned long mask)
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}
cuid = cpu_data(cpu).compute_unit_id;
cuid = cpu_data(cpu).cpu_core_id;
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;

View File

@ -300,7 +300,6 @@ static int nearby_node(int apicid)
#ifdef CONFIG_SMP
static void amd_get_topology(struct cpuinfo_x86 *c)
{
u32 cores_per_cu = 1;
u8 node_id;
int cpu = smp_processor_id();
@ -313,8 +312,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get compute unit information */
smp_num_siblings = ((ebx >> 8) & 3) + 1;
c->compute_unit_id = ebx & 0xff;
cores_per_cu += ((ebx >> 8) & 3);
c->x86_max_cores /= smp_num_siblings;
c->cpu_core_id = ebx & 0xff;
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
@ -325,19 +324,16 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* fixup multi-node processor information */
if (nodes_per_socket > 1) {
u32 cores_per_node;
u32 cus_per_node;
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
cores_per_node = c->x86_max_cores / nodes_per_socket;
cus_per_node = cores_per_node / cores_per_cu;
cus_per_node = c->x86_max_cores / nodes_per_socket;
/* store NodeID, use llc_shared_map to store sibling info */
per_cpu(cpu_llc_id, cpu) = node_id;
/* core id has to be in the [0 .. cores_per_node - 1] range */
c->cpu_core_id %= cores_per_node;
c->compute_unit_id %= cus_per_node;
c->cpu_core_id %= cus_per_node;
}
}
#endif

View File

@ -18,4 +18,6 @@ const char *const x86_power_flags[32] = {
"", /* tsc invariant mapped to constant_tsc */
"cpb", /* core performance boost */
"eff_freq_ro", /* Readonly aperf/mperf */
"proc_feedback", /* processor feedback interface */
"acc_power", /* accumulated power mechanism */
};

View File

@ -146,31 +146,6 @@ int default_check_phys_apicid_present(int phys_apicid)
struct boot_params boot_params;
/*
* Machine setup..
*/
static struct resource data_resource = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
static struct resource bss_resource = {
.name = "Kernel bss",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
};
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data = {
@ -949,13 +924,6 @@ void __init setup_arch(char **cmdline_p)
mpx_mm_init(&init_mm);
code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1;
data_resource.start = __pa_symbol(_etext);
data_resource.end = __pa_symbol(_edata)-1;
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
@ -1019,11 +987,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
insert_resource(&iomem_resource, &code_resource);
insert_resource(&iomem_resource, &data_resource);
insert_resource(&iomem_resource, &bss_resource);
e820_add_kernel_range();
trim_bios_range();
#ifdef CONFIG_X86_32

View File

@ -422,7 +422,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
if (c->phys_proc_id == o->phys_proc_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
c->compute_unit_id == o->compute_unit_id)
c->cpu_core_id == o->cpu_core_id)
return topology_sane(c, o, "smt");
} else if (c->phys_proc_id == o->phys_proc_id &&

View File

@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break;
case HVCALL_POST_MESSAGE:
case HVCALL_SIGNAL_EVENT:
/* don't bother userspace if it has no way to handle it */
if (!vcpu_to_synic(vcpu)->active) {
res = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
vcpu->run->hyperv.u.hcall.input = param;

View File

@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
hrtimer_start(&apic->lapic_timer.timer,
ktime_add_ns(now, apic->lapic_timer.period),
HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS_PINNED);
apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
PRIx64 ", "
@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
expire = ktime_add_ns(now, ns);
expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
hrtimer_start(&apic->lapic_timer.timer,
expire, HRTIMER_MODE_ABS);
expire, HRTIMER_MODE_ABS_PINNED);
} else
apic_timer_expired(apic);
@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
/*
@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
timer = &vcpu->arch.apic->lapic_timer.timer;
if (hrtimer_cancel(timer))
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
}
/*

View File

@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
!is_writable_pte(new_spte))
ret = true;
if (!shadow_accessed_mask)
if (!shadow_accessed_mask) {
/*
* We don't set page dirty when dropping non-writable spte.
* So do it now if the new spte is becoming non-writable.
*/
if (ret)
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
return ret;
}
/*
* Flush TLB when accessed/dirty bits are changed in the page tables,
@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
kvm_set_pfn_accessed(pfn);
if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask :
PT_WRITABLE_MASK))
kvm_set_pfn_dirty(pfn);
return 1;
}

View File

@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
}
/* try to inject new event if pending */
if (vcpu->arch.nmi_pending) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
/*
* Because interrupts can be injected asynchronously, we are
@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (inject_pending_event(vcpu, req_int_win) != 0)
req_immediate_exit = true;
/* enable NMI/IRQ window open exits if needed */
else if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
else {
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
}
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);

View File

@ -20,6 +20,7 @@
#include <linux/pci.h>
#include <asm/mce.h>
#include <asm/smp.h>
#include <asm/amd_nb.h>
#include <asm/irq_vectors.h>
@ -206,7 +207,7 @@ static u32 get_nbc_for_node(int node_id)
struct cpuinfo_x86 *c = &boot_cpu_data;
u32 cores_per_node;
cores_per_node = c->x86_max_cores / amd_get_nodes_per_socket();
cores_per_node = (c->x86_max_cores * smp_num_siblings) / amd_get_nodes_per_socket();
return cores_per_node * node_id;
}

View File

@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg)
ret = HYPERVISOR_platform_op(&op);
if (ret)
return 0;
op.u.pcpu_info.apic_id = BAD_APICID;
return op.u.pcpu_info.apic_id << 24;
}
@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid)
{
}
static int xen_cpu_present_to_apicid(int cpu)
{
if (cpu_present(cpu))
return xen_get_apic_id(xen_apic_read(APIC_ID));
else
return BAD_APICID;
}
static struct apic xen_pv_apic = {
.name = "Xen PV",
.probe = xen_apic_probe_pv,
@ -162,7 +170,7 @@ static struct apic xen_pv_apic = {
.ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */
.setup_apic_routing = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.cpu_present_to_apicid = xen_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */
.check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */
.phys_pkg_id = xen_phys_pkg_id, /* detect_ht */

View File

@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
* data back is to call:
*/
tick_nohz_idle_enter();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
#else /* !CONFIG_HOTPLUG_CPU */

View File

@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any
*/
while (j < page_limit)
page_cache_release(pages[j++]);
put_page(pages[j++]);
}
kfree(pages);
@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
page_cache_release(pages[j]);
put_page(pages[j]);
}
out:
kfree(pages);
@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
put_page(bvec->bv_page);
}
bio_put(bio);
@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one page_cache_release() against each page and will
* run one bio_put() against the BIO.
* here on. It will run one put_page() against each page and will run one
* bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page);
put_page(page);
bvec->bv_page = NULL;
} else {
nr_clean_pages++;

View File

@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;

View File

@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
if (max_size < PAGE_SIZE) {
max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
if (mask < PAGE_SIZE - 1) {
mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}

View File

@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
(PAGE_CACHE_SHIFT - 10);
(PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
}
@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page));
return queue_var_show(PAGE_SIZE, (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0)

View File

@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);

View File

@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
(bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);

Some files were not shown because too many files have changed in this diff Show More