Linux 3.19-rc7

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJUzvgKAAoJEHm+PkMAQRiG8XQH/1qVbHI4pP0KcnzfZUHq/mXq
 RuS4aJMwLm/Y6cXFraXBDaPde1A3CPtwtpob2C6giKcfu2zXGunY65haOEeJWNpX
 lCbBsLkNC3oDNkygBpVr5Zd6yibaw63WBjjLnpAi7pn2G2Zm2zB8DfILWWWMb7yz
 MH8ZXV+/xIYCTkjNWGWA1iMjmdYqu0PQHPeOgLsYQ+u7rxfM1zb/wHEkjqUZS6iu
 IaaZv7PV2PnFYnqib/iIPYjAEDvSQ4vN/7b82zlFd2Culm9j/568KCCWUPhJTb2l
 X0u4QYs49GnMTWVRa3bgYxS/nTUaE/6DeWs2y2WzqTt0/XDntVUnok0blUeDxGk=
 =o2kS
 -----END PGP SIGNATURE-----

Merge tag 'v3.19-rc7' into perf/core, to merge fixes before applying new changes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2015-02-04 07:58:29 +01:00
commit 8f4bf4bcc4
206 changed files with 2206 additions and 1202 deletions

View File

@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
compatible = "st,comms-ssc4-i2c";
reg = <0xfed40000 0x110>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CLK_S_ICN_REG_0>;
clocks = <&clk_s_a0_ls CLK_ICN_REG>;
clock-names = "ssc";
clock-frequency = <400000>;
pinctrl-names = "default";

View File

@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
dlg,da9063 DA9063: system PMIC for quad-core application processors
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer

View File

@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: staging/iio/trigger/iio-trig-bfin-timer.c
ANDROID DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
M: Riley Andrews <riandrews@android.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
L: devel@driverdev.osuosl.org
S: Supported
F: drivers/android/
F: drivers/staging/android/
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@ -10166,6 +10176,7 @@ USERSPACE I/O (UIO)
M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/DocBook/uio-howto.tmpl
F: drivers/uio/
F: include/linux/uio*.h

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Diseased Newt
# *DOCUMENTATION*

View File

@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -161,6 +161,8 @@ good_area:
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;

View File

@ -166,12 +166,12 @@
#address-cells = <1>;
#size-cells = <0>;
ethphy1: ethernet-phy@0 {
reg = <0>;
ethphy1: ethernet-phy@1 {
reg = <1>;
};
ethphy2: ethernet-phy@1 {
reg = <1>;
ethphy2: ethernet-phy@2 {
reg = <2>;
};
};
};

View File

@ -17,14 +17,6 @@
aliases {
ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &uart6;
serial7 = &uart7;
};
chosen {
@ -39,6 +31,14 @@
<&ahb_gates 44>;
status = "disabled";
};
framebuffer@1 {
compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
<&ahb_gates 44>, <&ahb_gates 46>;
status = "disabled";
};
};
cpus {
@ -438,8 +438,8 @@
reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
resets = <&usb_clk 1>, <&usb_clk 2>;
reset-names = "usb1_reset", "usb2_reset";
resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
status = "disabled";
};

View File

@ -55,6 +55,12 @@
model = "Olimex A10s-Olinuxino Micro";
compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
aliases {
serial0 = &uart0;
serial1 = &uart2;
serial2 = &uart3;
};
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";

View File

@ -18,10 +18,6 @@
aliases {
ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
};
chosen {
@ -390,8 +386,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
resets = <&usb_clk 1>;
reset-names = "usb1_reset";
resets = <&usb_clk 0>, <&usb_clk 1>;
reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};

View File

@ -53,6 +53,10 @@
model = "HSG H702";
compatible = "hsg,h702", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";

View File

@ -54,6 +54,10 @@
model = "Olimex A13-Olinuxino Micro";
compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";

View File

@ -55,6 +55,10 @@
model = "Olimex A13-Olinuxino";
compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";

View File

@ -16,11 +16,6 @@
/ {
interrupt-parent = <&intc>;
aliases {
serial0 = &uart1;
serial1 = &uart3;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
@ -349,8 +344,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
resets = <&usb_clk 1>;
reset-names = "usb1_reset";
resets = <&usb_clk 0>, <&usb_clk 1>;
reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};

View File

@ -53,12 +53,6 @@
interrupt-parent = <&gic>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
ethernet0 = &gmac;
};

View File

@ -55,6 +55,12 @@
model = "LeMaker Banana Pi";
compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart3;
serial2 = &uart7;
};
soc@01c00000 {
spi0: spi@01c05000 {
pinctrl-names = "default";

View File

@ -19,6 +19,14 @@
model = "Merrii A20 Hummingbird";
compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
};
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";

View File

@ -20,6 +20,9 @@
compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart6;
serial2 = &uart7;
spi0 = &spi1;
spi1 = &spi2;
};

View File

@ -54,14 +54,6 @@
aliases {
ethernet0 = &gmac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &uart6;
serial7 = &uart7;
};
chosen {

View File

@ -55,6 +55,10 @@
model = "Ippo Q8H Dual Core Tablet (v5)";
compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
aliases {
serial0 = &r_uart;
};
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};

View File

@ -52,15 +52,6 @@
/ {
interrupt-parent = <&gic>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &r_uart;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -54,6 +54,11 @@
model = "Merrii A80 Optimus Board";
compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
aliases {
serial0 = &uart0;
serial1 = &uart4;
};
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};

View File

@ -52,16 +52,6 @@
/ {
interrupt-parent = <&gic>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &r_uart;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;

View File

@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr = hcr;
}
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;

View File

@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;

View File

@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
unsigned long size,
bool ipa_uncached)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size,
bool ipa_uncached)
{
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/
if (icache_is_pipt()) {
__cpuc_coherent_user_range(hva, hva + size);
} else if (!icache_is_vivt_asid_tagged()) {
bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
VM_BUG_ON(size & PAGE_MASK);
if (!need_flush && !icache_is_pipt())
goto vipt_cache;
while (size) {
void *va = kmap_atomic_pfn(pfn);
if (need_flush)
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
void *va = kmap_atomic(pte_page(pte));
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
kunmap_atomic(va);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
unsigned long size = PMD_SIZE;
pfn_t pfn = pmd_pfn(pmd);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
pfn++;
size -= PAGE_SIZE;
kunmap_atomic(va);
}
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm);
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */

View File

@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
/*
* Check whether this vcpu requires the cache to be flushed on
* this physical CPU. This is a consequence of doing dcache
* operations by set/way on this vcpu. We do it here to be in
* a non-preemptible section.
*/
if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
kvm_arm_set_running_vcpu(vcpu);
}
@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*

View File

@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true;
}
/* See note at ARM ARM B1.14.4 */
/*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
unsigned long val;
int cpu;
if (!p->is_write)
return read_from_write_only(vcpu, p);
cpu = get_cpu();
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}
val = *vcpu_reg(vcpu, p->Rt1);
switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
break;
case 10: /* DCCSW */
asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
break;
}
done:
put_cpu();
kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
* is set.
* is set. If the guest enables the MMU, we stop trapping the VM
* sys_regs and leave it in complete control of the caches.
*
* Used by the cpu-specific code.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
return true;
}
/*
* SCTLR accessor. Only called as long as HCR_TVM is set. If the
* guest enables the MMU, we stop trapping the VM sys_regs and leave
* it in complete control of the caches.
*
* Used by the cpu-specific code.
*/
bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
access_vm_reg(vcpu, p, r);
if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
vcpu->arch.hcr &= ~HCR_TVM;
stage2_flush_vm(vcpu->kvm);
}
kvm_toggle_cache(vcpu, was_enabled);
return true;
}

View File

@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true
#define is32 .is_64 = false
bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */

View File

@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {

View File

@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {

View File

@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
/*
* D-Cache management functions. They take the page table entries by
* value, as they are flushing the cache using the kernel mapping (or
* kmap on 32bit).
*/
static void kvm_flush_dcache_pte(pte_t pte)
{
__kvm_flush_dcache_pte(pte);
}
static void kvm_flush_dcache_pmd(pmd_t pmd)
{
__kvm_flush_dcache_pmd(pmd);
}
static void kvm_flush_dcache_pud(pud_t pud)
{
__kvm_flush_dcache_pud(pud);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
{
@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
/*
* Unmapping vs dcache management:
*
* If a guest maps certain memory pages as uncached, all writes will
* bypass the data cache and go directly to RAM. However, the CPUs
* can still speculate reads (not writes) and fill cache lines with
* data.
*
* Those cache lines will be *clean* cache lines though, so a
* clean+invalidate operation is equivalent to an invalidate
* operation, because no cache lines are marked dirty.
*
* Those clean cache lines could be filled prior to an uncached write
* by the guest, and the cache coherent IO subsystem would therefore
* end up writing old data to disk.
*
* This is why right after unmapping a page/section and invalidating
* the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
* the IO subsystem will never hit in the cache.
*/
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
start_pte = pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
pte_t old_pte = *pte;
kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
}
} while (pte++, addr += PAGE_SIZE, addr != end);
@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
pmd_t old_pmd = *pmd;
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pmd(old_pmd);
put_page(virt_to_page(pmd));
} else {
unmap_ptes(kvm, pmd, addr, next);
@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
pud_t old_pud = *pud;
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
} else {
unmap_pmds(kvm, pud, addr, next);
@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
}
if (!pte_none(*pte) &&
(pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
} else {
if (kvm_pmd_huge(*pmd))
kvm_flush_dcache_pmd(*pmd);
else
stage2_flush_ptes(kvm, pmd, addr, next);
}
}
} while (pmd++, addr = next, addr != end);
}
@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
} else {
if (pud_huge(*pud))
kvm_flush_dcache_pud(*pud);
else
stage2_flush_pmds(kvm, pud, addr, next);
}
}
} while (pud++, addr = next, addr != end);
}
@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
* Go through the stage 2 page tables and invalidate any cache lines
* backing memory already mapped to the VM.
*/
void stage2_flush_vm(struct kvm *kvm)
static void stage2_flush_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn);
}
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size, bool uncached)
{
__coherent_cache_guest_page(vcpu, pfn, size, uncached);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
fault_ipa_uncached);
coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
fault_ipa_uncached);
coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
}
@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
/*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*
* Main problems:
* - S/W ops are local to a CPU (not broadcast)
* - We have line migration behind our back (speculation)
* - System caches don't support S/W at all (damn!)
*
* In the face of the above, the best we can do is to try and convert
* S/W ops to VA ops. Because the guest is not allowed to infer the
* S/W to PA mapping, it can only use S/W to nuke the whole cache,
* which is a rather good thing for us.
*
* Also, it is only used when turning caches on/off ("The expected
* usage of the cache maintenance instructions that operate by set/way
* is associated with the cache maintenance instructions associated
* with the powerdown and powerup of caches, if this is required by
* the implementation.").
*
* We use the following policy:
*
* - If we trap a S/W operation, we enable VM trapping to detect
* caches being turned on/off, and do a full clean.
*
* - We flush the caches on both caches being turned on and off.
*
* - Once the caches are enabled, we stop trapping VM ops.
*/
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
{
unsigned long hcr = vcpu_get_hcr(vcpu);
/*
* If this is the first time we do a S/W operation
* (i.e. HCR_TVM not set) flush the whole memory, and set the
* VM trapping.
*
* Otherwise, rely on the VM trapping to wait for the MMU +
* Caches to be turned off. At that point, we'll be able to
* clean the caches again.
*/
if (!(hcr & HCR_TVM)) {
trace_kvm_set_way_flush(*vcpu_pc(vcpu),
vcpu_has_cache_enabled(vcpu));
stage2_flush_vm(vcpu->kvm);
vcpu_set_hcr(vcpu, hcr | HCR_TVM);
}
}
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
{
bool now_enabled = vcpu_has_cache_enabled(vcpu);
/*
* If switching the MMU+caches on, need to invalidate the caches.
* If switching it off, need to clean the caches.
* Clean + invalidate does the trick always.
*/
if (now_enabled != was_enabled)
stage2_flush_vm(vcpu->kvm);
/* Caches are now on, stop trapping VM ops (until a S/W op) */
if (now_enabled)
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}

View File

@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
TRACE_EVENT(kvm_set_way_flush,
TP_PROTO(unsigned long vcpu_pc, bool cache),
TP_ARGS(vcpu_pc, cache),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( bool, cache )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->cache = cache;
),
TP_printk("S/W flush at 0x%016lx (cache %s)",
__entry->vcpu_pc, __entry->cache ? "on" : "off")
);
TRACE_EVENT(kvm_toggle_cache,
TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
TP_ARGS(vcpu_pc, was, now),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( bool, was )
__field( bool, now )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->was = was;
__entry->now = now;
),
TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
__entry->vcpu_pc, __entry->was ? "on" : "off",
__entry->now ? "on" : "off")
);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH

View File

@ -189,6 +189,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
coherency_cpu_base = of_iomap(np, 0);
arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
/*
* We should switch the PL310 to I/O coherency mode only if
* I/O coherency is actually enabled.
*/
if (!coherency_available())
return;
/*
* Add the PL310 property "arm,io-coherent". This makes sure the
* outer sync operation is not used, which allows to

View File

@ -18,6 +18,8 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
sizeof(ape6evm_leds_pdata));
}
static void __init ape6evm_legacy_init_time(void)
{
/* Do not invoke DT-based timers via clocksource_of_init() */
}
static void __init ape6evm_legacy_init_irq(void)
{
void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
/* Do not invoke DT-based interrupt code via irqchip_init() */
}
static const char *ape6evm_boards_compat_dt[] __initdata = {
"renesas,ape6evm",
NULL,
@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
.init_early = shmobile_init_delay,
.init_irq = ape6evm_legacy_init_irq,
.init_machine = ape6evm_add_standard_devices,
.init_late = shmobile_init_late,
.dt_compat = ape6evm_boards_compat_dt,
.init_time = ape6evm_legacy_init_time,
MACHINE_END

View File

@ -21,6 +21,8 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/mfd/tmio.h>
@ -811,6 +813,16 @@ static void __init lager_init(void)
lager_ksz8041_fixup);
}
static void __init lager_legacy_init_irq(void)
{
void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
/* Do not invoke DT-based interrupt code via irqchip_init() */
}
static const char * const lager_boards_compat_dt[] __initconst = {
"renesas,lager",
NULL,
@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
DT_MACHINE_START(LAGER_DT, "lager")
.smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay,
.init_irq = lager_legacy_init_irq,
.init_time = rcar_gen2_timer_init,
.init_machine = lager_init,
.init_late = shmobile_init_late,

View File

@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
#ifdef CONFIG_COMMON_CLK
rcar_gen2_clocks_init(mode);
#endif
#ifdef CONFIG_ARCH_SHMOBILE_MULTI
clocksource_of_init();
#endif
}
struct memory_reserve_config {

View File

@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
if (!max_freq)
return;
#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
/* Non-multiplatform r8a73a4 SoC cannot use arch timer due
* to GIC being initialized from C and arch timer via DT */
if (of_machine_is_compatible("renesas,r8a73a4"))
has_arch_timer = false;
/* Non-multiplatform r8a7790 SoC cannot use arch timer due
* to GIC being initialized from C and arch timer via DT */
if (of_machine_is_compatible("renesas,r8a7790"))
has_arch_timer = false;
#endif
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
if (is_a7_a8_a9)
shmobile_setup_delay_hz(max_freq, 1, 3);

View File

@ -1940,18 +1940,8 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
}
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/**
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
* Attaches specified io address space mapping to the provided device,
* More than one client might be attached to the same io address space
* mapping.
*/
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
static int __arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
@ -1965,15 +1955,35 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
/**
* arm_iommu_detach_device
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
* Detaches the provided device from a previously attached map.
* Attaches specified io address space mapping to the provided device.
* This replaces the dma operations (dma_map_ops pointer) with the
* IOMMU aware version.
*
* More than one client might be attached to the same io address space
* mapping.
*/
void arm_iommu_detach_device(struct device *dev)
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
static void __arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
/**
* arm_iommu_detach_device
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
* This voids the dma operations (dma_map_ops pointer)
*/
void arm_iommu_detach_device(struct device *dev)
{
__arm_iommu_detach_device(dev);
set_dma_ops(dev, NULL);
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return false;
}
if (arm_iommu_attach_device(dev, mapping)) {
if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
@ -2025,7 +2048,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
arm_iommu_detach_device(dev);
__arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}

View File

@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_RW;
}
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr_el2;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;

View File

@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;

View File

@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
unsigned long size,
bool ipa_uncached)
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size,
bool ipa_uncached)
{
void *va = page_address(pfn_to_page(pfn));
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
flush_icache_range(hva, hva + size);
flush_icache_range((unsigned long)va,
(unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
struct page *page = pmd_page(pmd);
kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
struct page *page = pud_page(pud);
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm);
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */

View File

@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
static void do_dc_cisw(u32 val)
{
asm volatile("dc cisw, %x0" : : "r" (val));
dsb(ish);
}
static void do_dc_csw(u32 val)
{
asm volatile("dc csw, %x0" : : "r" (val));
dsb(ish);
}
/* See note at ARM ARM B1.14.4 */
/*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
int cpu;
if (!p->is_write)
return read_from_write_only(vcpu, p);
cpu = get_cpu();
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}
val = *vcpu_reg(vcpu, p->Rt);
switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
do_dc_cisw(val);
break;
case 10: /* DCCSW */
do_dc_csw(val);
break;
}
done:
put_cpu();
kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
* is set.
* is set. If the guest enables the MMU, we stop trapping the VM
* sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
return true;
}
/*
* SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
* guest enables the MMU, we stop trapping the VM sys_regs and leave
* it in complete control of the caches.
*/
static bool access_sctlr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
access_vm_reg(vcpu, p, r);
if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
vcpu->arch.hcr_el2 &= ~HCR_TVM;
stage2_flush_vm(vcpu->kvm);
}
kvm_toggle_cache(vcpu, was_enabled);
return true;
}
@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },

View File

@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -172,6 +172,8 @@ retry:
*/
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS;
goto bad_area;

View File

@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -145,6 +145,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto map_err;
else if (fault & VM_FAULT_SIGBUS)
goto bus_err;
BUG();

View File

@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -158,6 +158,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -135,6 +135,8 @@ survive:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -256,6 +256,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();

View File

@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
} else if (*flt & VM_FAULT_SIGBUS) {
} else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}

View File

@ -437,6 +437,8 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;

View File

@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same accross all cpus.
*/
uint64_t lpcr_val = mfspr(SPRN_LPCR);
uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);

View File

@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE)
return;
args.token = cpu_to_be32(args.token);
args.nargs = cpu_to_be32(3);
args.nret = cpu_to_be32(1);
args.rets = &args.args[3];

View File

@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs);
else
pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGSEGV) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs);
else
do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))

View File

@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
else if (fault & VM_FAULT_SIGSEGV)
bad_area(regs, error_code, address);
else
BUG();
}

View File

@ -249,6 +249,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;

View File

@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
u16 cid, lid;
u32 ldr, aid;
if (!kvm_apic_present(vcpu))
continue;
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);

View File

@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}

View File

@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();

View File

@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
source "drivers/soc/Kconfig"
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"

View File

@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
* We must get the reference before checking for the overlap to
* coordinate properly with zeroing the parent overlap in
* rbd_dev_v2_parent_info() when an image gets flattened. We
* drop it again if there is no overlap.
*
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
int counter;
int counter = 0;
if (!rbd_dev->parent_spec)
return false;
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
if (counter > 0 && rbd_dev->parent_overlap)
return true;
/* Image was flattened, but parent is not yet torn down */
down_read(&rbd_dev->header_rwsem);
if (rbd_dev->parent_overlap)
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
return false;
return counter > 0;
}
/*
@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
/* Drop parent reference unless it's already been done (or none) */
if (rbd_dev->parent_overlap)
rbd_dev_parent_put(rbd_dev);
rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */

View File

@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
size = max_num_of_processes *
max_num_of_queues_per_process *
kfd->device_info->mqd_size_aligned;
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
size += 512 * 1024;

View File

@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return 0;
}
@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out:
mutex_unlock(&dqm->lock);
return retval;
@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
/*
* HPD buffer on GTT is allocated by amdkfd, no need to waste
* space in GTT for pipelines we don't initialize
*/
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
kfd2kgd->init_pipeline(dqm->dev->kgd, i,
kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
}
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out:
mutex_unlock(&dqm->lock);
return retval;
@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return 0;

View File

@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int vmid_bitmap;

View File

@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
module_param(max_num_of_processes, int, 0444);
MODULE_PARM_DESC(max_num_of_processes,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
module_param(max_num_of_queues_per_process, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_process,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
if ((max_num_of_processes < 0) ||
(max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
return -1;
}
if ((max_num_of_queues_per_process < 0) ||
(max_num_of_queues_per_process >
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
if ((max_num_of_queues_per_device < 0) ||
(max_num_of_queues_per_device >
KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}

View File

@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
pasid_limit = max_num_of_processes;
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)

View File

@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
/* Kernel module parameter to specify maximum number of supported processes */
extern int max_num_of_processes;
#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
* Kernel module parameter to specify maximum number of supported queues
* per process
* Kernel module parameter to specify maximum number of supported queues per
* device
*/
extern int max_num_of_queues_per_process;
extern int max_num_of_queues_per_device;
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
(KFD_MAX_NUM_OF_PROCESSES * \
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048

View File

@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
max_num_of_queues_per_process);
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
if (found >= max_num_of_queues_per_process) {
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
pr_err("kfd: error dqm create queue\n");
pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval;
}

View File

@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
static void remove_from_modeset(struct drm_mode_set *set,
struct drm_connector *connector)
{
int i, j;
for (i = 0; i < set->num_connectors; i++) {
if (set->connectors[i] == connector)
break;
}
if (i == set->num_connectors)
return;
for (j = i + 1; j < set->num_connectors; j++) {
set->connectors[j - 1] = set->connectors[j];
}
set->num_connectors--;
/* because i915 is pissy about this..
* TODO maybe need to makes sure we set it back to !=NULL somewhere?
*/
if (set->num_connectors == 0)
set->fb = NULL;
}
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
/* also cleanup dangling references to the connector: */
for (i = 0; i < fb_helper->crtc_count; i++)
remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);

View File

@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
struct mutex mutex;
struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
return ret;
goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
return ret;
goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
return ret;
}
@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
return;
goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static int
@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
return;
goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static void
@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
return;
goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
}
static void
@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
/* handle HDMI connect/disconnect */
static void tda998x_hpd(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tda998x_priv *priv =
container_of(dwork, struct tda998x_priv, dwork);
if (priv->encoder && priv->encoder->dev)
drm_kms_helper_hotplug_event(priv->encoder->dev);
}
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
if (priv->encoder && priv->encoder->dev)
drm_helper_hpd_irq_event(priv->encoder->dev);
schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq)
if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
cancel_delayed_work_sync(&priv->dwork);
}
i2c_unregister_device(priv->cec);
}
@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
/* CEC I2C address bound to TDA998x I2C addr by configuration pins */
cec_addr = 0x34 + (client->addr & 0x03);
priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
mutex_init(&priv->mutex); /* protect the page access */
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
/* init read EDID waitqueue */
/* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);

View File

@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(IS_HSW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("This is Broadwell, assuming "
"LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_HSW_ULT(dev));
WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");

View File

@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x2 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)

View File

@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
/* Adjust fence size to match tiled area */
if (obj->tiling_mode != I915_TILING_NONE) {
uint32_t row_size = obj->stride *
(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
size = (size / row_size) * row_size;
}
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@ -4884,13 +4891,12 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
/*
* XXX: Contexts should only be initialized once. Doing a switch to the
* default context switch however is something we'd like to do after
* reset or thaw (the latter may not actually be necessary for HW, but
* goes with our code better). Context switching requires rings (for
* the do_switch), but before enabling PPGTT. So don't move this.
*/
ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
}
ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret);
@ -4899,12 +4905,6 @@ i915_gem_init_hw(struct drm_device *dev)
return ret;
}
ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
}
return ret;
}

View File

@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
if (panel->backlight.level == 0) {
if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =

View File

@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {

View File

@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {

View File

@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
return addr;
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr));
gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)

View File

@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
return addr;
}
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
void __iomem *ptr = rdev->gart.ptr;
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4));
writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}

View File

@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
dma_addr_t *pages_addr;
uint64_t *pages_entry;
bool ready;
};
@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))

View File

@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {

View File

@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags);
uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);

View File

@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
RADEON_GART_PAGE_DUMMY);
return 0;
}

View File

@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
if (!r) {
int i;
/* We might have dropped some GART table updates while it wasn't
* mapped, restore all entries
*/
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
mb();
radeon_gart_tlb_flush(rdev);
}
return r;
}
@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base,
RADEON_GART_PAGE_DUMMY);
radeon_gart_set_page(rdev, t,
rdev->dummy_page.entry);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
uint64_t page_base;
uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base, flags);
page_base += RADEON_GPU_PAGE_SIZE;
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
page_entry = radeon_gart_get_page_entry(page_base, flags);
rdev->gart.pages_entry[t] = page_entry;
if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_entry);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) {
rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
rdev->gart.num_gpu_pages);
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
}
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
vfree(rdev->gart.pages_addr);
vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}

View File

@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);

View File

@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
result &= ~RADEON_GPU_PAGE_MASK;
return result;
}

View File

@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
entry = cpu_to_le32(entry);
gtt[i] = entry;
return entry;
}
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)

View File

@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags)
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
writeq(addr, ptr + (i * 8));
return addr;
}
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
void __iomem *ptr = (void *)rdev->gart.ptr;
writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)

View File

@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {

View File

@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
else if (hide_svga) {
mutex_lock(&dev_priv->hw_mutex);
else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
mutex_unlock(&dev_priv->hw_mutex);
}
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0)) {
mutex_unlock(&dev_priv->hw_mutex);
if (unlikely(ret != 0))
goto out_err0;
}
/*
* Limit back buffer size to VRAM size. Remove this once
@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
mutex_unlock(&dev_priv->hw_mutex);
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially

Some files were not shown because too many files have changed in this diff Show More