mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-12 05:48:39 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/net/ipip.h The changes made to ipip.h in 'net' were already included in 'net-next' before that header was moved to another location. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
e2a553dbf1
@ -5,7 +5,7 @@ Supported adapters:
|
||||
Documentation:
|
||||
http://www.diolan.com/i2c/u2c12.html
|
||||
|
||||
Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
Author: Guenter Roeck <linux@roeck-us.net>
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
|
||||
models depending on the codec chip. The list of available models
|
||||
is found in HD-Audio-Models.txt
|
||||
|
||||
The model name "genric" is treated as a special case. When this
|
||||
The model name "generic" is treated as a special case. When this
|
||||
model is given, the driver uses the generic codec parser without
|
||||
"codec-patch". It's sometimes good for testing and debugging.
|
||||
|
||||
|
@ -285,7 +285,7 @@ sample data.
|
||||
<H4>
|
||||
7.2.4 Close Callback</H4>
|
||||
The <TT>close</TT> callback is called when this device is closed by the
|
||||
applicaion. If any private data was allocated in open callback, it must
|
||||
application. If any private data was allocated in open callback, it must
|
||||
be released in the close callback. The deletion of ALSA port should be
|
||||
done here, too. This callback must not be NULL.
|
||||
<H4>
|
||||
|
22
MAINTAINERS
22
MAINTAINERS
@ -1461,6 +1461,12 @@ F: drivers/dma/at_hdmac.c
|
||||
F: drivers/dma/at_hdmac_regs.h
|
||||
F: include/linux/platform_data/dma-atmel.h
|
||||
|
||||
ATMEL I2C DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-at91.c
|
||||
|
||||
ATMEL ISI DRIVER
|
||||
M: Josh Wu <josh.wu@atmel.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -2623,7 +2629,7 @@ F: include/uapi/drm/
|
||||
|
||||
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
|
||||
M: Daniel Vetter <daniel.vetter@ffwll.ch>
|
||||
L: intel-gfx@lists.freedesktop.org (subscribers-only)
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
T: git git://people.freedesktop.org/~danvet/drm-intel
|
||||
S: Supported
|
||||
@ -5641,6 +5647,14 @@ S: Maintained
|
||||
F: drivers/video/riva/
|
||||
F: drivers/video/nvidia/
|
||||
|
||||
NVM EXPRESS DRIVER
|
||||
M: Matthew Wilcox <willy@linux.intel.com>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
T: git git://git.infradead.org/users/willy/linux-nvme.git
|
||||
S: Supported
|
||||
F: drivers/block/nvme.c
|
||||
F: include/linux/nvme.h
|
||||
|
||||
OMAP SUPPORT
|
||||
M: Tony Lindgren <tony@atomide.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
@ -5669,7 +5683,7 @@ S: Maintained
|
||||
F: arch/arm/*omap*/*clock*
|
||||
|
||||
OMAP POWER MANAGEMENT SUPPORT
|
||||
M: Kevin Hilman <khilman@ti.com>
|
||||
M: Kevin Hilman <khilman@deeprootsystems.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: arch/arm/*omap*/*pm*
|
||||
@ -5763,7 +5777,7 @@ F: arch/arm/*omap*/usb*
|
||||
|
||||
OMAP GPIO DRIVER
|
||||
M: Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
M: Kevin Hilman <khilman@ti.com>
|
||||
M: Kevin Hilman <khilman@deeprootsystems.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/gpio/gpio-omap.c
|
||||
@ -7161,7 +7175,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c
|
||||
|
||||
TI DAVINCI MACHINE SUPPORT
|
||||
M: Sekhar Nori <nsekhar@ti.com>
|
||||
M: Kevin Hilman <khilman@ti.com>
|
||||
M: Kevin Hilman <khilman@deeprootsystems.com>
|
||||
L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
|
||||
T: git git://gitorious.org/linux-davinci/linux-davinci.git
|
||||
Q: http://patchwork.kernel.org/project/linux-davinci/list/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT
|
||||
DEBUG_IMX53_UART || \
|
||||
DEBUG_IMX6Q_UART
|
||||
default 1
|
||||
depends on ARCH_MXC
|
||||
help
|
||||
Choose UART port on which kernel low-level debug messages
|
||||
should be output.
|
||||
|
@ -385,7 +385,7 @@
|
||||
|
||||
spi@7000d800 {
|
||||
compatible = "nvidia,tegra20-slink";
|
||||
reg = <0x7000d480 0x200>;
|
||||
reg = <0x7000d800 0x200>;
|
||||
interrupts = <0 83 0x04>;
|
||||
nvidia,dma-request-selector = <&apbdma 17>;
|
||||
#address-cells = <1>;
|
||||
|
@ -372,7 +372,7 @@
|
||||
|
||||
spi@7000d800 {
|
||||
compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink";
|
||||
reg = <0x7000d480 0x200>;
|
||||
reg = <0x7000d800 0x200>;
|
||||
interrupts = <0 83 0x04>;
|
||||
nvidia,dma-request-selector = <&apbdma 17>;
|
||||
#address-cells = <1>;
|
||||
|
@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = {
|
||||
.lower_margin = 4,
|
||||
.hsync_len = 1,
|
||||
.vsync_len = 1,
|
||||
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
FB_SYNC_DOTCLK_FAILING_ACT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = {
|
||||
.lower_margin = 10,
|
||||
.hsync_len = 10,
|
||||
.vsync_len = 10,
|
||||
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
FB_SYNC_DOTCLK_FAILING_ACT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = {
|
||||
.lower_margin = 45,
|
||||
.hsync_len = 1,
|
||||
.vsync_len = 1,
|
||||
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = {
|
||||
.lower_margin = 13,
|
||||
.hsync_len = 48,
|
||||
.vsync_len = 3,
|
||||
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
|
||||
FB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
FB_SYNC_DOTCLK_FAILING_ACT,
|
||||
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = {
|
||||
.lower_margin = 0x15,
|
||||
.hsync_len = 64,
|
||||
.vsync_len = 4,
|
||||
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
|
||||
FB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
FB_SYNC_DOTCLK_FAILING_ACT,
|
||||
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
|
||||
},
|
||||
};
|
||||
|
||||
@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = {
|
||||
.lower_margin = 2,
|
||||
.hsync_len = 15,
|
||||
.vsync_len = 15,
|
||||
.sync = FB_SYNC_DATA_ENABLE_HIGH_ACT
|
||||
},
|
||||
};
|
||||
|
||||
@ -259,6 +249,8 @@ static void __init imx23_evk_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
|
||||
mxsfb_pdata.default_bpp = 32;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
MXSFB_SYNC_DOTCLK_FAILING_ACT;
|
||||
}
|
||||
|
||||
static inline void enable_clk_enet_out(void)
|
||||
@ -278,6 +270,8 @@ static void __init imx28_evk_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
|
||||
mxsfb_pdata.default_bpp = 32;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
MXSFB_SYNC_DOTCLK_FAILING_ACT;
|
||||
|
||||
mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0);
|
||||
}
|
||||
@ -297,6 +291,7 @@ static void __init m28evk_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
|
||||
mxsfb_pdata.default_bpp = 16;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
|
||||
}
|
||||
|
||||
static void __init sc_sps1_init(void)
|
||||
@ -322,6 +317,8 @@ static void __init apx4devkit_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
|
||||
mxsfb_pdata.default_bpp = 32;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
MXSFB_SYNC_DOTCLK_FAILING_ACT;
|
||||
}
|
||||
|
||||
#define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0)
|
||||
@ -407,6 +404,7 @@ static void __init cfa10049_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes);
|
||||
mxsfb_pdata.default_bpp = 32;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT;
|
||||
}
|
||||
|
||||
static void __init cfa10037_init(void)
|
||||
@ -423,6 +421,8 @@ static void __init apf28_init(void)
|
||||
mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes);
|
||||
mxsfb_pdata.default_bpp = 16;
|
||||
mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT;
|
||||
mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT |
|
||||
MXSFB_SYNC_DOTCLK_FAILING_ACT;
|
||||
}
|
||||
|
||||
static void __init mxs_machine_init(void)
|
||||
|
@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = {
|
||||
.attrs = power7_events_attr,
|
||||
};
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-19");
|
||||
|
||||
static struct attribute *power7_pmu_format_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct attribute_group power7_pmu_format_group = {
|
||||
.name = "format",
|
||||
.attrs = power7_pmu_format_attr,
|
||||
};
|
||||
|
||||
static const struct attribute_group *power7_pmu_attr_groups[] = {
|
||||
&power7_pmu_format_group,
|
||||
&power7_pmu_events_group,
|
||||
NULL,
|
||||
};
|
||||
|
@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_RAID1=m
|
||||
CONFIG_MD_RAID10=m
|
||||
CONFIG_MD_RAID456=m
|
||||
CONFIG_MULTICORE_RAID456=y
|
||||
CONFIG_MD_FAULTY=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
CONFIG_DM_DEBUG=y
|
||||
|
@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_RAID1=m
|
||||
CONFIG_MD_RAID10=m
|
||||
CONFIG_MD_RAID456=m
|
||||
CONFIG_MULTICORE_RAID456=y
|
||||
CONFIG_MD_FAULTY=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
CONFIG_DM_DEBUG=y
|
||||
|
@ -77,6 +77,7 @@ struct arch_specific_insn {
|
||||
* a post_handler or break_handler).
|
||||
*/
|
||||
int boostable;
|
||||
bool if_modifier;
|
||||
};
|
||||
|
||||
struct arch_optimized_insn {
|
||||
|
@ -414,8 +414,8 @@ struct kvm_vcpu_arch {
|
||||
gpa_t time;
|
||||
struct pvclock_vcpu_time_info hv_clock;
|
||||
unsigned int hw_tsc_khz;
|
||||
unsigned int time_offset;
|
||||
struct page *time_page;
|
||||
struct gfn_to_hva_cache pv_time;
|
||||
bool pv_time_enabled;
|
||||
/* set guest stopped flag in pvclock flags field */
|
||||
bool pvclock_set_guest_stopped_request;
|
||||
|
||||
|
@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
|
||||
INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
|
@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
else
|
||||
p->ainsn.boostable = -1;
|
||||
|
||||
/* Check whether the instruction modifies Interrupt Flag or not */
|
||||
p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
|
||||
|
||||
/* Also, displacement change doesn't affect the first byte */
|
||||
p->opcode = p->ainsn.insn[0];
|
||||
}
|
||||
@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
|
||||
= (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
|
||||
if (is_IF_modifier(p->ainsn.insn))
|
||||
if (p->ainsn.if_modifier)
|
||||
kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
|
||||
}
|
||||
|
||||
|
@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
|
||||
struct microcode_intel ***mc_saved;
|
||||
|
||||
mc_saved = (struct microcode_intel ***)
|
||||
__pa_symbol(&mc_saved_data->mc_saved);
|
||||
__pa_nodebug(&mc_saved_data->mc_saved);
|
||||
for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
|
||||
struct microcode_intel *p;
|
||||
|
||||
p = *(struct microcode_intel **)
|
||||
__pa(mc_saved_data->mc_saved + i);
|
||||
mc_saved_tmp[i] = (struct microcode_intel *)__pa(p);
|
||||
__pa_nodebug(mc_saved_data->mc_saved + i);
|
||||
mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end,
|
||||
struct cpio_data cd;
|
||||
long offset = 0;
|
||||
#ifdef CONFIG_X86_32
|
||||
char *p = (char *)__pa_symbol(ucode_name);
|
||||
char *p = (char *)__pa_nodebug(ucode_name);
|
||||
#else
|
||||
char *p = ucode_name;
|
||||
#endif
|
||||
@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
|
||||
if (mc_intel == NULL)
|
||||
return;
|
||||
|
||||
delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info);
|
||||
current_mc_date_p = (int *)__pa_symbol(¤t_mc_date);
|
||||
delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
|
||||
current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
|
||||
|
||||
*delay_ucode_info_p = 1;
|
||||
*current_mc_date_p = mc_intel->hdr.date;
|
||||
@ -659,7 +659,7 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
|
||||
static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data,
|
||||
struct ucode_cpu_info *uci)
|
||||
{
|
||||
struct microcode_intel *mc_intel;
|
||||
@ -741,15 +741,15 @@ load_ucode_intel_bsp(void)
|
||||
#ifdef CONFIG_X86_32
|
||||
struct boot_params *boot_params_p;
|
||||
|
||||
boot_params_p = (struct boot_params *)__pa_symbol(&boot_params);
|
||||
boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params);
|
||||
ramdisk_image = boot_params_p->hdr.ramdisk_image;
|
||||
ramdisk_size = boot_params_p->hdr.ramdisk_size;
|
||||
initrd_start_early = ramdisk_image;
|
||||
initrd_end_early = initrd_start_early + ramdisk_size;
|
||||
|
||||
_load_ucode_intel_bsp(
|
||||
(struct mc_saved_data *)__pa_symbol(&mc_saved_data),
|
||||
(unsigned long *)__pa_symbol(&mc_saved_in_initrd),
|
||||
(struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
|
||||
(unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
|
||||
initrd_start_early, initrd_end_early, &uci);
|
||||
#else
|
||||
ramdisk_image = boot_params.hdr.ramdisk_image;
|
||||
@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void)
|
||||
unsigned long *initrd_start_p;
|
||||
|
||||
mc_saved_in_initrd_p =
|
||||
(unsigned long *)__pa_symbol(mc_saved_in_initrd);
|
||||
mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data);
|
||||
initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start);
|
||||
initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p);
|
||||
(unsigned long *)__pa_nodebug(mc_saved_in_initrd);
|
||||
mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
|
||||
initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
|
||||
initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
|
||||
#else
|
||||
mc_saved_data_p = &mc_saved_data;
|
||||
mc_saved_in_initrd_p = mc_saved_in_initrd;
|
||||
|
@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
unsigned long flags, this_tsc_khz;
|
||||
struct kvm_vcpu_arch *vcpu = &v->arch;
|
||||
struct kvm_arch *ka = &v->kvm->arch;
|
||||
void *shared_kaddr;
|
||||
s64 kernel_ns, max_kernel_ns;
|
||||
u64 tsc_timestamp, host_tsc;
|
||||
struct pvclock_vcpu_time_info *guest_hv_clock;
|
||||
struct pvclock_vcpu_time_info guest_hv_clock;
|
||||
u8 pvclock_flags;
|
||||
bool use_master_clock;
|
||||
|
||||
kernel_ns = 0;
|
||||
host_tsc = 0;
|
||||
|
||||
/* Keep irq disabled to prevent changes to the clock */
|
||||
local_irq_save(flags);
|
||||
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
|
||||
if (unlikely(this_tsc_khz == 0)) {
|
||||
local_irq_restore(flags);
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the host uses TSC clock, then passthrough TSC as stable
|
||||
* to the guest.
|
||||
@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
kernel_ns = ka->master_kernel_ns;
|
||||
}
|
||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
||||
|
||||
/* Keep irq disabled to prevent changes to the clock */
|
||||
local_irq_save(flags);
|
||||
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
|
||||
if (unlikely(this_tsc_khz == 0)) {
|
||||
local_irq_restore(flags);
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
|
||||
return 1;
|
||||
}
|
||||
if (!use_master_clock) {
|
||||
host_tsc = native_read_tsc();
|
||||
kernel_ns = get_kernel_ns();
|
||||
@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (!vcpu->time_page)
|
||||
if (!vcpu->pv_time_enabled)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
*/
|
||||
vcpu->hv_clock.version += 2;
|
||||
|
||||
shared_kaddr = kmap_atomic(vcpu->time_page);
|
||||
|
||||
guest_hv_clock = shared_kaddr + vcpu->time_offset;
|
||||
if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
|
||||
&guest_hv_clock, sizeof(guest_hv_clock))))
|
||||
return 0;
|
||||
|
||||
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
|
||||
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
|
||||
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
|
||||
|
||||
if (vcpu->pvclock_set_guest_stopped_request) {
|
||||
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
|
||||
@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
||||
|
||||
vcpu->hv_clock.flags = pvclock_flags;
|
||||
|
||||
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
|
||||
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
|
||||
&vcpu->hv_clock,
|
||||
sizeof(vcpu->hv_clock));
|
||||
|
||||
kunmap_atomic(shared_kaddr);
|
||||
|
||||
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.time_page) {
|
||||
kvm_release_page_dirty(vcpu->arch.time_page);
|
||||
vcpu->arch.time_page = NULL;
|
||||
}
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
}
|
||||
|
||||
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
|
||||
@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
break;
|
||||
case MSR_KVM_SYSTEM_TIME_NEW:
|
||||
case MSR_KVM_SYSTEM_TIME: {
|
||||
u64 gpa_offset;
|
||||
kvmclock_reset(vcpu);
|
||||
|
||||
vcpu->arch.time = data;
|
||||
@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
if (!(data & 1))
|
||||
break;
|
||||
|
||||
/* ...but clean it before doing the actual write */
|
||||
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
|
||||
gpa_offset = data & ~(PAGE_MASK | 1);
|
||||
|
||||
vcpu->arch.time_page =
|
||||
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
|
||||
/* Check that the address is 32-byte aligned. */
|
||||
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
|
||||
break;
|
||||
|
||||
if (is_error_page(vcpu->arch.time_page))
|
||||
vcpu->arch.time_page = NULL;
|
||||
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.pv_time, data & ~1ULL))
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
else
|
||||
vcpu->arch.pv_time_enabled = true;
|
||||
|
||||
break;
|
||||
}
|
||||
@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!vcpu->arch.time_page)
|
||||
if (!vcpu->arch.pv_time_enabled)
|
||||
return -EINVAL;
|
||||
vcpu->arch.pvclock_set_guest_stopped_request = true;
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
goto fail_free_wbinvd_dirty_mask;
|
||||
|
||||
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
|
||||
vcpu->arch.pv_time_enabled = false;
|
||||
kvm_async_pf_hash_reset(vcpu);
|
||||
kvm_pmu_init(vcpu);
|
||||
|
||||
|
@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
|
||||
char c;
|
||||
unsigned zero_len;
|
||||
|
||||
for (; len; --len) {
|
||||
for (; len; --len, to++) {
|
||||
if (__get_user_nocheck(c, from++, sizeof(char)))
|
||||
break;
|
||||
if (__put_user_nocheck(c, to++, sizeof(char)))
|
||||
if (__put_user_nocheck(c, to, sizeof(char)))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -59,15 +59,16 @@ config ATA_ACPI
|
||||
option libata.noacpi=1
|
||||
|
||||
config SATA_ZPODD
|
||||
bool "SATA Zero Power ODD Support"
|
||||
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
|
||||
depends on ATA_ACPI
|
||||
default n
|
||||
help
|
||||
This option adds support for SATA ZPODD. It requires both
|
||||
ODD and the platform support, and if enabled, will automatically
|
||||
power on/off the ODD when certain condition is satisfied. This
|
||||
does not impact user's experience of the ODD, only power is saved
|
||||
when ODD is not in use(i.e. no disc inside).
|
||||
This option adds support for SATA Zero Power Optical Disc
|
||||
Drive (ZPODD). It requires both the ODD and the platform
|
||||
support, and if enabled, will automatically power on/off the
|
||||
ODD when certain condition is satisfied. This does not impact
|
||||
end user's experience of the ODD, only power is saved when
|
||||
the ODD is not in use (i.e. no disc inside).
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
|
@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
|
||||
{ PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
|
||||
{ PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
|
||||
|
@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
|
||||
|
||||
static int prefer_ms_hyperv = 1;
|
||||
module_param(prefer_ms_hyperv, int, 0);
|
||||
MODULE_PARM_DESC(prefer_ms_hyperv,
|
||||
"Prefer Hyper-V paravirtualization drivers instead of ATA, "
|
||||
"0 - Use ATA drivers, "
|
||||
"1 (Default) - Use the paravirtualization drivers.");
|
||||
|
||||
static void piix_ignore_devices_quirk(struct ata_host *host)
|
||||
{
|
||||
|
@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev)
|
||||
|
||||
handle = ata_dev_acpi_handle(dev);
|
||||
if (handle)
|
||||
acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev);
|
||||
acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
static void ata_acpi_unregister_power_resource(struct ata_device *dev)
|
||||
|
@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = {
|
||||
},
|
||||
};
|
||||
|
||||
static int __init pata_s3c_init(void)
|
||||
{
|
||||
return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe);
|
||||
}
|
||||
|
||||
static void __exit pata_s3c_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&pata_s3c_driver);
|
||||
}
|
||||
|
||||
module_init(pata_s3c_init);
|
||||
module_exit(pata_s3c_exit);
|
||||
module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
|
||||
|
||||
MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
|
||||
MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
|
||||
|
@ -1511,7 +1511,6 @@ error_exit_with_cleanup:
|
||||
|
||||
if (hcr_base)
|
||||
iounmap(hcr_base);
|
||||
if (host_priv)
|
||||
kfree(host_priv);
|
||||
|
||||
return retval;
|
||||
|
@ -135,6 +135,7 @@ static inline void _nvme_check_size(void)
|
||||
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
|
||||
BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
|
||||
}
|
||||
|
||||
typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
|
||||
@ -237,6 +238,7 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
|
||||
*fn = special_completion;
|
||||
return CMD_CTX_INVALID;
|
||||
}
|
||||
if (fn)
|
||||
*fn = info[cmdid].fn;
|
||||
ctx = info[cmdid].ctx;
|
||||
info[cmdid].fn = special_completion;
|
||||
@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
|
||||
iod->offset = offsetof(struct nvme_iod, sg[nseg]);
|
||||
iod->npages = -1;
|
||||
iod->length = nbytes;
|
||||
iod->nents = 0;
|
||||
}
|
||||
|
||||
return iod;
|
||||
@ -375,6 +378,7 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
|
||||
struct bio *bio = iod->private;
|
||||
u16 status = le16_to_cpup(&cqe->status) >> 1;
|
||||
|
||||
if (iod->nents)
|
||||
dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
|
||||
bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
nvme_free_iod(dev, iod);
|
||||
@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
|
||||
result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
|
||||
if (result < 0)
|
||||
goto free_iod;
|
||||
goto free_cmdid;
|
||||
length = result;
|
||||
|
||||
cmnd->rw.command_id = cmdid;
|
||||
@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
|
||||
|
||||
return 0;
|
||||
|
||||
free_cmdid:
|
||||
free_cmdid(nvmeq, cmdid, NULL);
|
||||
free_iod:
|
||||
nvme_free_iod(nvmeq->dev, iod);
|
||||
nomem:
|
||||
@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
|
||||
return nvme_submit_admin_cmd(dev, &c, NULL);
|
||||
}
|
||||
|
||||
static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
|
||||
unsigned nsid, dma_addr_t dma_addr)
|
||||
static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
|
||||
dma_addr_t dma_addr, u32 *result)
|
||||
{
|
||||
struct nvme_command c;
|
||||
|
||||
@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
|
||||
c.features.prp1 = cpu_to_le64(dma_addr);
|
||||
c.features.fid = cpu_to_le32(fid);
|
||||
|
||||
return nvme_submit_admin_cmd(dev, &c, NULL);
|
||||
return nvme_submit_admin_cmd(dev, &c, result);
|
||||
}
|
||||
|
||||
static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
|
||||
@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
|
||||
|
||||
spin_lock_irq(&nvmeq->q_lock);
|
||||
nvme_cancel_ios(nvmeq, false);
|
||||
while (bio_list_peek(&nvmeq->sq_cong)) {
|
||||
struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
|
||||
bio_endio(bio, -EIO);
|
||||
}
|
||||
spin_unlock_irq(&nvmeq->q_lock);
|
||||
|
||||
irq_set_affinity_hint(vector, NULL);
|
||||
@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
|
||||
if (length != cmd.data_len)
|
||||
status = -ENOMEM;
|
||||
else
|
||||
status = nvme_submit_admin_cmd(dev, &c, NULL);
|
||||
status = nvme_submit_admin_cmd(dev, &c, &cmd.result);
|
||||
|
||||
if (cmd.data_len) {
|
||||
nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
|
||||
nvme_free_iod(dev, iod);
|
||||
}
|
||||
|
||||
if (!status && copy_to_user(&ucmd->result, &cmd.result,
|
||||
sizeof(cmd.result)))
|
||||
status = -EFAULT;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev)
|
||||
continue;
|
||||
|
||||
res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
|
||||
dma_addr + 4096);
|
||||
dma_addr + 4096, NULL);
|
||||
if (res)
|
||||
continue;
|
||||
memset(mem + 4096, 0, 4096);
|
||||
|
||||
ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
|
||||
if (ns)
|
||||
|
@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci)
|
||||
edac_dbg(1, "MC node: %d, csrow: %d\n",
|
||||
pvt->mc_node_id, i);
|
||||
|
||||
if (row_dct0)
|
||||
if (row_dct0) {
|
||||
nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
|
||||
csrow->channels[0]->dimm->nr_pages = nr_pages;
|
||||
}
|
||||
|
||||
/* K8 has only one DCT */
|
||||
if (boot_cpu_data.x86 != 0xf && row_dct1)
|
||||
nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
|
||||
if (boot_cpu_data.x86 != 0xf && row_dct1) {
|
||||
int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
|
||||
|
||||
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
|
||||
nr_pages += row_dct1_pages;
|
||||
}
|
||||
|
||||
mtype = amd64_determine_memory_type(pvt, i);
|
||||
|
||||
@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci)
|
||||
dimm = csrow->channels[j]->dimm;
|
||||
dimm->mtype = mtype;
|
||||
dimm->edac_mode = edac_mode;
|
||||
dimm->nr_pages = nr_pages;
|
||||
}
|
||||
csrow->nr_pages = nr_pages;
|
||||
}
|
||||
|
||||
return empty;
|
||||
@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2)
|
||||
|
||||
mci->pvt_info = pvt;
|
||||
mci->pdev = &pvt->F2->dev;
|
||||
mci->csbased = 1;
|
||||
|
||||
setup_mci_misc_attrs(mci, fam_type);
|
||||
|
||||
|
@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number)
|
||||
edac_dimm_info_location(dimm, location, sizeof(location));
|
||||
|
||||
edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
|
||||
dimm->mci->mem_is_per_rank ? "rank" : "dimm",
|
||||
dimm->mci->csbased ? "rank" : "dimm",
|
||||
number, location, dimm->csrow, dimm->cschannel);
|
||||
edac_dbg(4, " dimm = %p\n", dimm);
|
||||
edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
|
||||
@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
|
||||
memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
|
||||
mci->nr_csrows = tot_csrows;
|
||||
mci->num_cschannel = tot_channels;
|
||||
mci->mem_is_per_rank = per_rank;
|
||||
mci->csbased = per_rank;
|
||||
|
||||
/*
|
||||
* Alocate and fill the csrow/channels structs
|
||||
@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
|
||||
* incrementing the compat API counters
|
||||
*/
|
||||
edac_dbg(4, "%s csrows map: (%d,%d)\n",
|
||||
mci->mem_is_per_rank ? "rank" : "dimm",
|
||||
mci->csbased ? "rank" : "dimm",
|
||||
dimm->csrow, dimm->cschannel);
|
||||
if (row == -1)
|
||||
row = dimm->csrow;
|
||||
|
@ -143,7 +143,7 @@ static const char *edac_caps[] = {
|
||||
* and the per-dimm/per-rank one
|
||||
*/
|
||||
#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
|
||||
struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
|
||||
static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
|
||||
|
||||
struct dev_ch_attribute {
|
||||
struct device_attribute attr;
|
||||
@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev,
|
||||
int i;
|
||||
u32 nr_pages = 0;
|
||||
|
||||
if (csrow->mci->csbased)
|
||||
return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
|
||||
|
||||
for (i = 0; i < csrow->nr_channels; i++)
|
||||
nr_pages += csrow->channels[i]->dimm->nr_pages;
|
||||
return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
|
||||
@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
|
||||
device_initialize(&dimm->dev);
|
||||
|
||||
dimm->dev.parent = &mci->dev;
|
||||
if (mci->mem_is_per_rank)
|
||||
if (mci->csbased)
|
||||
dev_set_name(&dimm->dev, "rank%d", index);
|
||||
else
|
||||
dev_set_name(&dimm->dev, "dimm%d", index);
|
||||
@ -778,16 +775,12 @@ static ssize_t mci_size_mb_show(struct device *dev,
|
||||
for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
|
||||
struct csrow_info *csrow = mci->csrows[csrow_idx];
|
||||
|
||||
if (csrow->mci->csbased) {
|
||||
total_pages += csrow->nr_pages;
|
||||
} else {
|
||||
for (j = 0; j < csrow->nr_channels; j++) {
|
||||
struct dimm_info *dimm = csrow->channels[j]->dimm;
|
||||
|
||||
total_pages += dimm->nr_pages;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
|
||||
}
|
||||
|
@ -53,6 +53,24 @@ config EFI_VARS
|
||||
Subsequent efibootmgr releases may be found at:
|
||||
<http://linux.dell.com/efibootmgr>
|
||||
|
||||
config EFI_VARS_PSTORE
|
||||
bool "Register efivars backend for pstore"
|
||||
depends on EFI_VARS && PSTORE
|
||||
default y
|
||||
help
|
||||
Say Y here to enable use efivars as a backend to pstore. This
|
||||
will allow writing console messages, crash dumps, or anything
|
||||
else supported by pstore to EFI variables.
|
||||
|
||||
config EFI_VARS_PSTORE_DEFAULT_DISABLE
|
||||
bool "Disable using efivars as a pstore backend by default"
|
||||
depends on EFI_VARS_PSTORE
|
||||
default n
|
||||
help
|
||||
Saying Y here will disable the use of efivars as a storage
|
||||
backend for pstore by default. This setting can be overridden
|
||||
using the efivars module's pstore_disable parameter.
|
||||
|
||||
config EFI_PCDP
|
||||
bool "Console device selection via EFI PCDP or HCDP table"
|
||||
depends on ACPI && EFI && IA64
|
||||
|
@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION);
|
||||
*/
|
||||
#define GUID_LEN 36
|
||||
|
||||
static bool efivars_pstore_disable =
|
||||
IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE);
|
||||
|
||||
module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644);
|
||||
|
||||
/*
|
||||
* The maximum size of VariableName + Data = 1024
|
||||
* Therefore, it's reasonable to save that much
|
||||
@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
|
||||
|
||||
static void efivar_update_sysfs_entries(struct work_struct *);
|
||||
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
|
||||
static bool efivar_wq_enabled = true;
|
||||
|
||||
/* Return the number of unicode characters in data */
|
||||
static unsigned long
|
||||
@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = {
|
||||
.create = efivarfs_create,
|
||||
};
|
||||
|
||||
static struct pstore_info efi_pstore_info;
|
||||
|
||||
#ifdef CONFIG_PSTORE
|
||||
#ifdef CONFIG_EFI_VARS_PSTORE
|
||||
|
||||
static int efi_pstore_open(struct pstore_info *psi)
|
||||
{
|
||||
@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type,
|
||||
|
||||
spin_unlock_irqrestore(&efivars->lock, flags);
|
||||
|
||||
if (reason == KMSG_DUMP_OOPS)
|
||||
if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled)
|
||||
schedule_work(&efivar_work);
|
||||
|
||||
*id = part;
|
||||
@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int efi_pstore_open(struct pstore_info *psi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efi_pstore_close(struct pstore_info *psi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count,
|
||||
struct timespec *timespec,
|
||||
char **buf, struct pstore_info *psi)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int efi_pstore_write(enum pstore_type_id type,
|
||||
enum kmsg_dump_reason reason, u64 *id,
|
||||
unsigned int part, int count, size_t size,
|
||||
struct pstore_info *psi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
|
||||
struct timespec time, struct pstore_info *psi)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct pstore_info efi_pstore_info = {
|
||||
.owner = THIS_MODULE,
|
||||
@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = {
|
||||
.erase = efi_pstore_erase,
|
||||
};
|
||||
|
||||
static void efivar_pstore_register(struct efivars *efivars)
|
||||
{
|
||||
efivars->efi_pstore_info = efi_pstore_info;
|
||||
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
|
||||
if (efivars->efi_pstore_info.buf) {
|
||||
efivars->efi_pstore_info.bufsize = 1024;
|
||||
efivars->efi_pstore_info.data = efivars;
|
||||
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
|
||||
pstore_register(&efivars->efi_pstore_info);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void efivar_pstore_register(struct efivars *efivars)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
char *buf, loff_t pos, size_t count)
|
||||
@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the size of variable_name, in bytes, including the
|
||||
* terminating NULL character, or variable_name_size if no NULL
|
||||
* character is found among the first variable_name_size bytes.
|
||||
*/
|
||||
static unsigned long var_name_strnsize(efi_char16_t *variable_name,
|
||||
unsigned long variable_name_size)
|
||||
{
|
||||
unsigned long len;
|
||||
efi_char16_t c;
|
||||
|
||||
/*
|
||||
* The variable name is, by definition, a NULL-terminated
|
||||
* string, so make absolutely sure that variable_name_size is
|
||||
* the value we expect it to be. If not, return the real size.
|
||||
*/
|
||||
for (len = 2; len <= variable_name_size; len += sizeof(c)) {
|
||||
c = variable_name[(len / sizeof(c)) - 1];
|
||||
if (!c)
|
||||
break;
|
||||
}
|
||||
|
||||
return min(len, variable_name_size);
|
||||
}
|
||||
|
||||
static void efivar_update_sysfs_entries(struct work_struct *work)
|
||||
{
|
||||
struct efivars *efivars = &__efivars;
|
||||
@ -1756,12 +1771,15 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
|
||||
if (!found) {
|
||||
kfree(variable_name);
|
||||
break;
|
||||
} else
|
||||
} else {
|
||||
variable_name_size = var_name_strnsize(variable_name,
|
||||
variable_name_size);
|
||||
efivar_create_sysfs_entry(efivars,
|
||||
variable_name_size,
|
||||
variable_name, &vendor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Let's not leave out systab information that snuck into
|
||||
@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_efivars);
|
||||
|
||||
/*
|
||||
* Print a warning when duplicate EFI variables are encountered and
|
||||
* disable the sysfs workqueue since the firmware is buggy.
|
||||
*/
|
||||
static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
|
||||
unsigned long len16)
|
||||
{
|
||||
size_t i, len8 = len16 / sizeof(efi_char16_t);
|
||||
char *s8;
|
||||
|
||||
/*
|
||||
* Disable the workqueue since the algorithm it uses for
|
||||
* detecting new variables won't work with this buggy
|
||||
* implementation of GetNextVariableName().
|
||||
*/
|
||||
efivar_wq_enabled = false;
|
||||
|
||||
s8 = kzalloc(len8, GFP_KERNEL);
|
||||
if (!s8)
|
||||
return;
|
||||
|
||||
for (i = 0; i < len8; i++)
|
||||
s8[i] = s16[i];
|
||||
|
||||
printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
|
||||
s8, vendor_guid);
|
||||
kfree(s8);
|
||||
}
|
||||
|
||||
int register_efivars(struct efivars *efivars,
|
||||
const struct efivar_operations *ops,
|
||||
struct kobject *parent_kobj)
|
||||
@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars,
|
||||
&vendor_guid);
|
||||
switch (status) {
|
||||
case EFI_SUCCESS:
|
||||
variable_name_size = var_name_strnsize(variable_name,
|
||||
variable_name_size);
|
||||
|
||||
/*
|
||||
* Some firmware implementations return the
|
||||
* same variable name on multiple calls to
|
||||
* get_next_variable(). Terminate the loop
|
||||
* immediately as there is no guarantee that
|
||||
* we'll ever see a different variable name,
|
||||
* and may end up looping here forever.
|
||||
*/
|
||||
if (variable_is_present(variable_name, &vendor_guid)) {
|
||||
dup_variable_bug(variable_name, &vendor_guid,
|
||||
variable_name_size);
|
||||
status = EFI_NOT_FOUND;
|
||||
break;
|
||||
}
|
||||
|
||||
efivar_create_sysfs_entry(efivars,
|
||||
variable_name_size,
|
||||
variable_name,
|
||||
@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars,
|
||||
if (error)
|
||||
unregister_efivars(efivars);
|
||||
|
||||
efivars->efi_pstore_info = efi_pstore_info;
|
||||
|
||||
efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL);
|
||||
if (efivars->efi_pstore_info.buf) {
|
||||
efivars->efi_pstore_info.bufsize = 1024;
|
||||
efivars->efi_pstore_info.data = efivars;
|
||||
spin_lock_init(&efivars->efi_pstore_info.buf_lock);
|
||||
pstore_register(&efivars->efi_pstore_info);
|
||||
}
|
||||
if (!efivars_pstore_disable)
|
||||
efivar_pstore_register(efivars);
|
||||
|
||||
register_filesystem(&efivarfs_type);
|
||||
|
||||
|
@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
do {
|
||||
for (;; index++) {
|
||||
ret = of_parse_phandle_with_args(np, "gpio-ranges",
|
||||
"#gpio-range-cells", index, &pinspec);
|
||||
if (ret)
|
||||
@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip)
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
} while (index++);
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
||||
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
|
||||
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
|
||||
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
|
||||
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
|
||||
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
|
||||
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
|
||||
|
||||
/* ignore tiny modes */
|
||||
@ -1715,6 +1715,7 @@ set_size:
|
||||
}
|
||||
|
||||
mode->type = DRM_MODE_TYPE_DRIVER;
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
return mode;
|
||||
|
@ -38,11 +38,12 @@
|
||||
/* position control register for hardware window 0, 2 ~ 4.*/
|
||||
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
|
||||
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
|
||||
/* size control register for hardware window 0. */
|
||||
#define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08)
|
||||
/* alpha control register for hardware window 1 ~ 4. */
|
||||
#define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16)
|
||||
/* size control register for hardware window 1 ~ 4. */
|
||||
/*
|
||||
* size control register for hardware windows 0 and alpha control register
|
||||
* for hardware windows 1 ~ 4
|
||||
*/
|
||||
#define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16)
|
||||
/* size control register for hardware windows 1 ~ 2. */
|
||||
#define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16)
|
||||
|
||||
#define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8)
|
||||
@ -50,9 +51,9 @@
|
||||
#define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4)
|
||||
|
||||
/* color key control register for hardware window 1 ~ 4. */
|
||||
#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8))
|
||||
#define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8))
|
||||
/* color key value register for hardware window 1 ~ 4. */
|
||||
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8))
|
||||
#define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8))
|
||||
|
||||
/* FIMD has totally five hardware windows. */
|
||||
#define WINDOWS_NR 5
|
||||
@ -109,9 +110,9 @@ struct fimd_context {
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id fimd_driver_dt_match[] = {
|
||||
{ .compatible = "samsung,exynos4-fimd",
|
||||
{ .compatible = "samsung,exynos4210-fimd",
|
||||
.data = &exynos4_fimd_driver_data },
|
||||
{ .compatible = "samsung,exynos5-fimd",
|
||||
{ .compatible = "samsung,exynos5250-fimd",
|
||||
.data = &exynos5_fimd_driver_data },
|
||||
{},
|
||||
};
|
||||
@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos)
|
||||
if (win != 3 && win != 4) {
|
||||
u32 offset = VIDOSD_D(win);
|
||||
if (win == 0)
|
||||
offset = VIDOSD_C_SIZE_W0;
|
||||
offset = VIDOSD_C(win);
|
||||
val = win_data->ovl_width * win_data->ovl_height;
|
||||
writel(val, ctx->regs + offset);
|
||||
|
||||
|
@ -48,8 +48,14 @@
|
||||
|
||||
/* registers for base address */
|
||||
#define G2D_SRC_BASE_ADDR 0x0304
|
||||
#define G2D_SRC_COLOR_MODE 0x030C
|
||||
#define G2D_SRC_LEFT_TOP 0x0310
|
||||
#define G2D_SRC_RIGHT_BOTTOM 0x0314
|
||||
#define G2D_SRC_PLANE2_BASE_ADDR 0x0318
|
||||
#define G2D_DST_BASE_ADDR 0x0404
|
||||
#define G2D_DST_COLOR_MODE 0x040C
|
||||
#define G2D_DST_LEFT_TOP 0x0410
|
||||
#define G2D_DST_RIGHT_BOTTOM 0x0414
|
||||
#define G2D_DST_PLANE2_BASE_ADDR 0x0418
|
||||
#define G2D_PAT_BASE_ADDR 0x0500
|
||||
#define G2D_MSK_BASE_ADDR 0x0520
|
||||
@ -82,7 +88,7 @@
|
||||
#define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
|
||||
|
||||
/* G2D_DMA_HOLD_CMD */
|
||||
#define G2D_USET_HOLD (1 << 2)
|
||||
#define G2D_USER_HOLD (1 << 2)
|
||||
#define G2D_LIST_HOLD (1 << 1)
|
||||
#define G2D_BITBLT_HOLD (1 << 0)
|
||||
|
||||
@ -91,13 +97,27 @@
|
||||
#define G2D_START_NHOLT (1 << 1)
|
||||
#define G2D_START_BITBLT (1 << 0)
|
||||
|
||||
/* buffer color format */
|
||||
#define G2D_FMT_XRGB8888 0
|
||||
#define G2D_FMT_ARGB8888 1
|
||||
#define G2D_FMT_RGB565 2
|
||||
#define G2D_FMT_XRGB1555 3
|
||||
#define G2D_FMT_ARGB1555 4
|
||||
#define G2D_FMT_XRGB4444 5
|
||||
#define G2D_FMT_ARGB4444 6
|
||||
#define G2D_FMT_PACKED_RGB888 7
|
||||
#define G2D_FMT_A8 11
|
||||
#define G2D_FMT_L8 12
|
||||
|
||||
/* buffer valid length */
|
||||
#define G2D_LEN_MIN 1
|
||||
#define G2D_LEN_MAX 8000
|
||||
|
||||
#define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
|
||||
#define G2D_CMDLIST_NUM 64
|
||||
#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
|
||||
#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
|
||||
|
||||
#define MAX_BUF_ADDR_NR 6
|
||||
|
||||
/* maximum buffer pool size of userptr is 64MB as default */
|
||||
#define MAX_POOL (64 * 1024 * 1024)
|
||||
|
||||
@ -106,6 +126,17 @@ enum {
|
||||
BUF_TYPE_USERPTR,
|
||||
};
|
||||
|
||||
enum g2d_reg_type {
|
||||
REG_TYPE_NONE = -1,
|
||||
REG_TYPE_SRC,
|
||||
REG_TYPE_SRC_PLANE2,
|
||||
REG_TYPE_DST,
|
||||
REG_TYPE_DST_PLANE2,
|
||||
REG_TYPE_PAT,
|
||||
REG_TYPE_MSK,
|
||||
MAX_REG_TYPE_NR
|
||||
};
|
||||
|
||||
/* cmdlist data structure */
|
||||
struct g2d_cmdlist {
|
||||
u32 head;
|
||||
@ -113,6 +144,42 @@ struct g2d_cmdlist {
|
||||
u32 last; /* last data offset */
|
||||
};
|
||||
|
||||
/*
|
||||
* A structure of buffer description
|
||||
*
|
||||
* @format: color format
|
||||
* @left_x: the x coordinates of left top corner
|
||||
* @top_y: the y coordinates of left top corner
|
||||
* @right_x: the x coordinates of right bottom corner
|
||||
* @bottom_y: the y coordinates of right bottom corner
|
||||
*
|
||||
*/
|
||||
struct g2d_buf_desc {
|
||||
unsigned int format;
|
||||
unsigned int left_x;
|
||||
unsigned int top_y;
|
||||
unsigned int right_x;
|
||||
unsigned int bottom_y;
|
||||
};
|
||||
|
||||
/*
|
||||
* A structure of buffer information
|
||||
*
|
||||
* @map_nr: manages the number of mapped buffers
|
||||
* @reg_types: stores regitster type in the order of requested command
|
||||
* @handles: stores buffer handle in its reg_type position
|
||||
* @types: stores buffer type in its reg_type position
|
||||
* @descs: stores buffer description in its reg_type position
|
||||
*
|
||||
*/
|
||||
struct g2d_buf_info {
|
||||
unsigned int map_nr;
|
||||
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
|
||||
unsigned long handles[MAX_REG_TYPE_NR];
|
||||
unsigned int types[MAX_REG_TYPE_NR];
|
||||
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
|
||||
};
|
||||
|
||||
struct drm_exynos_pending_g2d_event {
|
||||
struct drm_pending_event base;
|
||||
struct drm_exynos_g2d_event event;
|
||||
@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr {
|
||||
bool in_pool;
|
||||
bool out_of_list;
|
||||
};
|
||||
|
||||
struct g2d_cmdlist_node {
|
||||
struct list_head list;
|
||||
struct g2d_cmdlist *cmdlist;
|
||||
unsigned int map_nr;
|
||||
unsigned long handles[MAX_BUF_ADDR_NR];
|
||||
unsigned int obj_type[MAX_BUF_ADDR_NR];
|
||||
dma_addr_t dma_addr;
|
||||
struct g2d_buf_info buf_info;
|
||||
|
||||
struct drm_exynos_pending_g2d_event *event;
|
||||
};
|
||||
@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
|
||||
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
|
||||
int nr;
|
||||
int ret;
|
||||
struct g2d_buf_info *buf_info;
|
||||
|
||||
init_dma_attrs(&g2d->cmdlist_dma_attrs);
|
||||
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
|
||||
@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
|
||||
}
|
||||
|
||||
for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
|
||||
unsigned int i;
|
||||
|
||||
node[nr].cmdlist =
|
||||
g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
|
||||
node[nr].dma_addr =
|
||||
g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
|
||||
|
||||
buf_info = &node[nr].buf_info;
|
||||
for (i = 0; i < MAX_REG_TYPE_NR; i++)
|
||||
buf_info->reg_types[i] = REG_TYPE_NONE;
|
||||
|
||||
list_add_tail(&node[nr].list, &g2d->free_cmdlist);
|
||||
}
|
||||
|
||||
@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to map sgt with dma region.\n");
|
||||
goto err_free_sgt;
|
||||
goto err_sg_free_table;
|
||||
}
|
||||
|
||||
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
|
||||
@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
||||
|
||||
return &g2d_userptr->dma_addr;
|
||||
|
||||
err_free_sgt:
|
||||
err_sg_free_table:
|
||||
sg_free_table(sgt);
|
||||
|
||||
err_free_sgt:
|
||||
kfree(sgt);
|
||||
sgt = NULL;
|
||||
|
||||
@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev,
|
||||
g2d->current_pool = 0;
|
||||
}
|
||||
|
||||
static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
|
||||
{
|
||||
enum g2d_reg_type reg_type;
|
||||
|
||||
switch (reg_offset) {
|
||||
case G2D_SRC_BASE_ADDR:
|
||||
case G2D_SRC_COLOR_MODE:
|
||||
case G2D_SRC_LEFT_TOP:
|
||||
case G2D_SRC_RIGHT_BOTTOM:
|
||||
reg_type = REG_TYPE_SRC;
|
||||
break;
|
||||
case G2D_SRC_PLANE2_BASE_ADDR:
|
||||
reg_type = REG_TYPE_SRC_PLANE2;
|
||||
break;
|
||||
case G2D_DST_BASE_ADDR:
|
||||
case G2D_DST_COLOR_MODE:
|
||||
case G2D_DST_LEFT_TOP:
|
||||
case G2D_DST_RIGHT_BOTTOM:
|
||||
reg_type = REG_TYPE_DST;
|
||||
break;
|
||||
case G2D_DST_PLANE2_BASE_ADDR:
|
||||
reg_type = REG_TYPE_DST_PLANE2;
|
||||
break;
|
||||
case G2D_PAT_BASE_ADDR:
|
||||
reg_type = REG_TYPE_PAT;
|
||||
break;
|
||||
case G2D_MSK_BASE_ADDR:
|
||||
reg_type = REG_TYPE_MSK;
|
||||
break;
|
||||
default:
|
||||
reg_type = REG_TYPE_NONE;
|
||||
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
|
||||
break;
|
||||
};
|
||||
|
||||
return reg_type;
|
||||
}
|
||||
|
||||
static unsigned long g2d_get_buf_bpp(unsigned int format)
|
||||
{
|
||||
unsigned long bpp;
|
||||
|
||||
switch (format) {
|
||||
case G2D_FMT_XRGB8888:
|
||||
case G2D_FMT_ARGB8888:
|
||||
bpp = 4;
|
||||
break;
|
||||
case G2D_FMT_RGB565:
|
||||
case G2D_FMT_XRGB1555:
|
||||
case G2D_FMT_ARGB1555:
|
||||
case G2D_FMT_XRGB4444:
|
||||
case G2D_FMT_ARGB4444:
|
||||
bpp = 2;
|
||||
break;
|
||||
case G2D_FMT_PACKED_RGB888:
|
||||
bpp = 3;
|
||||
break;
|
||||
default:
|
||||
bpp = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
return bpp;
|
||||
}
|
||||
|
||||
static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
|
||||
enum g2d_reg_type reg_type,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned int width, height;
|
||||
unsigned long area;
|
||||
|
||||
/*
|
||||
* check source and destination buffers only.
|
||||
* so the others are always valid.
|
||||
*/
|
||||
if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
|
||||
return true;
|
||||
|
||||
width = buf_desc->right_x - buf_desc->left_x;
|
||||
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
|
||||
DRM_ERROR("width[%u] is out of range!\n", width);
|
||||
return false;
|
||||
}
|
||||
|
||||
height = buf_desc->bottom_y - buf_desc->top_y;
|
||||
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
|
||||
DRM_ERROR("height[%u] is out of range!\n", height);
|
||||
return false;
|
||||
}
|
||||
|
||||
area = (unsigned long)width * (unsigned long)height *
|
||||
g2d_get_buf_bpp(buf_desc->format);
|
||||
if (area > size) {
|
||||
DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
|
||||
struct g2d_cmdlist_node *node,
|
||||
struct drm_device *drm_dev,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct g2d_cmdlist *cmdlist = node->cmdlist;
|
||||
struct g2d_buf_info *buf_info = &node->buf_info;
|
||||
int offset;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < node->map_nr; i++) {
|
||||
for (i = 0; i < buf_info->map_nr; i++) {
|
||||
struct g2d_buf_desc *buf_desc;
|
||||
enum g2d_reg_type reg_type;
|
||||
int reg_pos;
|
||||
unsigned long handle;
|
||||
dma_addr_t *addr;
|
||||
|
||||
offset = cmdlist->last - (i * 2 + 1);
|
||||
handle = cmdlist->data[offset];
|
||||
reg_pos = cmdlist->last - 2 * (i + 1);
|
||||
|
||||
offset = cmdlist->data[reg_pos];
|
||||
handle = cmdlist->data[reg_pos + 1];
|
||||
|
||||
reg_type = g2d_get_reg_type(offset);
|
||||
if (reg_type == REG_TYPE_NONE) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
buf_desc = &buf_info->descs[reg_type];
|
||||
|
||||
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
|
||||
unsigned long size;
|
||||
|
||||
size = exynos_drm_gem_get_size(drm_dev, handle, file);
|
||||
if (!size) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
|
||||
size)) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (node->obj_type[i] == BUF_TYPE_GEM) {
|
||||
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
|
||||
file);
|
||||
if (IS_ERR(addr)) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
} else {
|
||||
struct drm_exynos_g2d_userptr g2d_userptr;
|
||||
|
||||
if (copy_from_user(&g2d_userptr, (void __user *)handle,
|
||||
sizeof(struct drm_exynos_g2d_userptr))) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
|
||||
g2d_userptr.size)) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
addr = g2d_userptr_get_dma_addr(drm_dev,
|
||||
@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
|
||||
file,
|
||||
&handle);
|
||||
if (IS_ERR(addr)) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
cmdlist->data[offset] = *addr;
|
||||
node->handles[i] = handle;
|
||||
cmdlist->data[reg_pos + 1] = *addr;
|
||||
buf_info->reg_types[i] = reg_type;
|
||||
buf_info->handles[reg_type] = handle;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
buf_info->map_nr = i;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
|
||||
@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
|
||||
struct g2d_buf_info *buf_info = &node->buf_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < node->map_nr; i++) {
|
||||
unsigned long handle = node->handles[i];
|
||||
for (i = 0; i < buf_info->map_nr; i++) {
|
||||
struct g2d_buf_desc *buf_desc;
|
||||
enum g2d_reg_type reg_type;
|
||||
unsigned long handle;
|
||||
|
||||
if (node->obj_type[i] == BUF_TYPE_GEM)
|
||||
reg_type = buf_info->reg_types[i];
|
||||
|
||||
buf_desc = &buf_info->descs[reg_type];
|
||||
handle = buf_info->handles[reg_type];
|
||||
|
||||
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
|
||||
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
|
||||
filp);
|
||||
else
|
||||
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
|
||||
false);
|
||||
|
||||
node->handles[i] = 0;
|
||||
buf_info->reg_types[i] = REG_TYPE_NONE;
|
||||
buf_info->handles[reg_type] = 0;
|
||||
buf_info->types[reg_type] = 0;
|
||||
memset(buf_desc, 0x00, sizeof(*buf_desc));
|
||||
}
|
||||
|
||||
node->map_nr = 0;
|
||||
buf_info->map_nr = 0;
|
||||
}
|
||||
|
||||
static void g2d_dma_start(struct g2d_data *g2d,
|
||||
@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d,
|
||||
pm_runtime_get_sync(g2d->dev);
|
||||
clk_enable(g2d->gate_clk);
|
||||
|
||||
/* interrupt enable */
|
||||
writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF,
|
||||
g2d->regs + G2D_INTEN);
|
||||
|
||||
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
|
||||
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
|
||||
}
|
||||
@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
|
||||
struct g2d_data *g2d = container_of(work, struct g2d_data,
|
||||
runqueue_work);
|
||||
|
||||
|
||||
mutex_lock(&g2d->runqueue_mutex);
|
||||
clk_disable(g2d->gate_clk);
|
||||
pm_runtime_put_sync(g2d->dev);
|
||||
@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev,
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct g2d_buf_info *buf_info = &node->buf_info;
|
||||
struct g2d_buf_desc *buf_desc;
|
||||
enum g2d_reg_type reg_type;
|
||||
unsigned long value;
|
||||
|
||||
index = cmdlist->last - 2 * (i + 1);
|
||||
|
||||
if (for_addr) {
|
||||
/* check userptr buffer type. */
|
||||
reg_offset = (cmdlist->data[index] &
|
||||
~0x7fffffff) >> 31;
|
||||
if (reg_offset) {
|
||||
node->obj_type[i] = BUF_TYPE_USERPTR;
|
||||
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
|
||||
}
|
||||
}
|
||||
|
||||
reg_offset = cmdlist->data[index] & ~0xfffff000;
|
||||
|
||||
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
|
||||
goto err;
|
||||
if (reg_offset % 4)
|
||||
@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev,
|
||||
if (!for_addr)
|
||||
goto err;
|
||||
|
||||
if (node->obj_type[i] != BUF_TYPE_USERPTR)
|
||||
node->obj_type[i] = BUF_TYPE_GEM;
|
||||
reg_type = g2d_get_reg_type(reg_offset);
|
||||
if (reg_type == REG_TYPE_NONE)
|
||||
goto err;
|
||||
|
||||
/* check userptr buffer type. */
|
||||
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
|
||||
buf_info->types[reg_type] = BUF_TYPE_USERPTR;
|
||||
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
|
||||
} else
|
||||
buf_info->types[reg_type] = BUF_TYPE_GEM;
|
||||
break;
|
||||
case G2D_SRC_COLOR_MODE:
|
||||
case G2D_DST_COLOR_MODE:
|
||||
if (for_addr)
|
||||
goto err;
|
||||
|
||||
reg_type = g2d_get_reg_type(reg_offset);
|
||||
if (reg_type == REG_TYPE_NONE)
|
||||
goto err;
|
||||
|
||||
buf_desc = &buf_info->descs[reg_type];
|
||||
value = cmdlist->data[index + 1];
|
||||
|
||||
buf_desc->format = value & 0xf;
|
||||
break;
|
||||
case G2D_SRC_LEFT_TOP:
|
||||
case G2D_DST_LEFT_TOP:
|
||||
if (for_addr)
|
||||
goto err;
|
||||
|
||||
reg_type = g2d_get_reg_type(reg_offset);
|
||||
if (reg_type == REG_TYPE_NONE)
|
||||
goto err;
|
||||
|
||||
buf_desc = &buf_info->descs[reg_type];
|
||||
value = cmdlist->data[index + 1];
|
||||
|
||||
buf_desc->left_x = value & 0x1fff;
|
||||
buf_desc->top_y = (value & 0x1fff0000) >> 16;
|
||||
break;
|
||||
case G2D_SRC_RIGHT_BOTTOM:
|
||||
case G2D_DST_RIGHT_BOTTOM:
|
||||
if (for_addr)
|
||||
goto err;
|
||||
|
||||
reg_type = g2d_get_reg_type(reg_offset);
|
||||
if (reg_type == REG_TYPE_NONE)
|
||||
goto err;
|
||||
|
||||
buf_desc = &buf_info->descs[reg_type];
|
||||
value = cmdlist->data[index + 1];
|
||||
|
||||
buf_desc->right_x = value & 0x1fff;
|
||||
buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
|
||||
break;
|
||||
default:
|
||||
if (for_addr)
|
||||
@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
|
||||
cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
|
||||
cmdlist->data[cmdlist->last++] = 0;
|
||||
|
||||
/*
|
||||
* 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
|
||||
* and GCF bit should be set to INTEN register if user wants
|
||||
* G2D interrupt event once current command list execution is
|
||||
* finished.
|
||||
* Otherwise only ACF bit should be set to INTEN register so
|
||||
* that one interrupt is occured after all command lists
|
||||
* have been completed.
|
||||
*/
|
||||
if (node->event) {
|
||||
cmdlist->data[cmdlist->last++] = G2D_INTEN;
|
||||
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
|
||||
cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
|
||||
cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
|
||||
} else {
|
||||
cmdlist->data[cmdlist->last++] = G2D_INTEN;
|
||||
cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
|
||||
}
|
||||
|
||||
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
|
||||
@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
|
||||
if (ret < 0)
|
||||
goto err_free_event;
|
||||
|
||||
node->map_nr = req->cmd_buf_nr;
|
||||
node->buf_info.map_nr = req->cmd_buf_nr;
|
||||
if (req->cmd_buf_nr) {
|
||||
struct drm_exynos_g2d_cmd *cmd_buf;
|
||||
|
||||
|
@ -164,6 +164,27 @@ out:
|
||||
exynos_gem_obj = NULL;
|
||||
}
|
||||
|
||||
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
|
||||
unsigned int gem_handle,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
struct drm_gem_object *obj;
|
||||
|
||||
obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
|
||||
if (!obj) {
|
||||
DRM_ERROR("failed to lookup gem object.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return exynos_gem_obj->buffer->size;
|
||||
}
|
||||
|
||||
|
||||
struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
|
||||
unsigned long size)
|
||||
{
|
||||
|
@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* get buffer size to gem handle. */
|
||||
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
|
||||
unsigned int gem_handle,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* initialize gem object. */
|
||||
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
|
||||
|
||||
|
@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev,
|
||||
}
|
||||
|
||||
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
|
||||
edid = kzalloc(edid_len, GFP_KERNEL);
|
||||
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
|
||||
if (!edid) {
|
||||
DRM_DEBUG_KMS("failed to allocate edid\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
memcpy(edid, ctx->raw_edid, edid_len);
|
||||
return edid;
|
||||
}
|
||||
|
||||
@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
|
||||
ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL);
|
||||
ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
|
||||
if (!ctx->raw_edid) {
|
||||
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(ctx->raw_edid, raw_edid, edid_len);
|
||||
} else {
|
||||
/*
|
||||
* with connection = 0, free raw_edid
|
||||
|
@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win)
|
||||
mixer_ctx->win_data[win].enabled = false;
|
||||
}
|
||||
|
||||
int mixer_check_timing(void *ctx, struct fb_videomode *timing)
|
||||
static int mixer_check_timing(void *ctx, struct fb_videomode *timing)
|
||||
{
|
||||
struct mixer_context *mixer_ctx = ctx;
|
||||
u32 w, h;
|
||||
|
@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
|
||||
static void
|
||||
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
{
|
||||
seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
|
||||
seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
|
||||
&obj->base,
|
||||
get_pin_flag(obj),
|
||||
get_tiling_flag(obj),
|
||||
|
@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support,
|
||||
"Enable Haswell and ValleyView Support. "
|
||||
"(default: false)");
|
||||
|
||||
int i915_disable_power_well __read_mostly = 0;
|
||||
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
|
||||
MODULE_PARM_DESC(disable_power_well,
|
||||
"Disable the power well when possible (default: false)");
|
||||
|
||||
static struct drm_driver driver;
|
||||
extern int intel_agp_enabled;
|
||||
|
||||
|
@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly;
|
||||
extern bool i915_enable_hangcheck __read_mostly;
|
||||
extern int i915_enable_ppgtt __read_mostly;
|
||||
extern unsigned int i915_preliminary_hw_support __read_mostly;
|
||||
extern int i915_disable_power_well __read_mostly;
|
||||
|
||||
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
|
||||
extern int i915_resume(struct drm_device *dev);
|
||||
|
@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
int count)
|
||||
{
|
||||
int i;
|
||||
int relocs_total = 0;
|
||||
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
|
||||
@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
|
||||
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
/* First check for malicious input causing overflow */
|
||||
if (exec[i].relocation_count >
|
||||
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
|
||||
/* First check for malicious input causing overflow in
|
||||
* the worst case where we need to allocate the entire
|
||||
* relocation tree as a single array.
|
||||
*/
|
||||
if (exec[i].relocation_count > relocs_max - relocs_total)
|
||||
return -EINVAL;
|
||||
relocs_total += exec[i].relocation_count;
|
||||
|
||||
length = exec[i].relocation_count *
|
||||
sizeof(struct drm_i915_gem_relocation_entry);
|
||||
|
@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
|
||||
num_connectors++;
|
||||
}
|
||||
|
||||
if (is_cpu_edp)
|
||||
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
|
||||
else
|
||||
intel_crtc->cpu_transcoder = pipe;
|
||||
|
||||
/* We are not sure yet this won't happen. */
|
||||
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
|
||||
INTEL_PCH_TYPE(dev));
|
||||
@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
||||
int pipe = intel_crtc->pipe;
|
||||
int ret;
|
||||
|
||||
if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
|
||||
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
|
||||
else
|
||||
intel_crtc->cpu_transcoder = pipe;
|
||||
|
||||
drm_vblank_pre_modeset(dev, pipe);
|
||||
|
||||
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
|
||||
|
@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct intel_link_m_n m_n;
|
||||
int pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
|
||||
int target_clock;
|
||||
|
||||
/*
|
||||
* Find the lane count in the intel_encoder private
|
||||
@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
}
|
||||
}
|
||||
|
||||
target_clock = mode->clock;
|
||||
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
|
||||
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
|
||||
target_clock = intel_edp_target_clock(intel_encoder,
|
||||
mode);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Compute the GMCH and Link ratios. The '3' here is
|
||||
* the number of bytes_per_pixel post-LUT, which we always
|
||||
* set up for 8-bits of R/G/B, or 3 bytes total.
|
||||
*/
|
||||
intel_link_compute_m_n(intel_crtc->bpp, lane_count,
|
||||
mode->clock, adjusted_mode->clock, &m_n);
|
||||
target_clock, adjusted_mode->clock, &m_n);
|
||||
|
||||
if (IS_HASWELL(dev)) {
|
||||
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
|
||||
@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
for (i = 0; i < intel_dp->lane_count; i++)
|
||||
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
|
||||
break;
|
||||
if (i == intel_dp->lane_count && voltage_tries == 5) {
|
||||
if (i == intel_dp->lane_count) {
|
||||
++loop_tries;
|
||||
if (loop_tries == 5) {
|
||||
DRM_DEBUG_KMS("too many full retries, give up\n");
|
||||
|
@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
|
||||
algo->data = bus;
|
||||
}
|
||||
|
||||
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
|
||||
/*
|
||||
* gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
|
||||
* mode. This results in spurious interrupt warnings if the legacy irq no. is
|
||||
* shared with another device. The kernel then disables that interrupt source
|
||||
* and so prevents the other device from working properly.
|
||||
*/
|
||||
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
|
||||
static int
|
||||
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
u32 gmbus2_status,
|
||||
@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
|
||||
u32 gmbus2 = 0;
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
if (!HAS_GMBUS_IRQ(dev_priv->dev))
|
||||
gmbus4_irq_en = 0;
|
||||
|
||||
/* Important: The hw handles only the first bit, so set only one! Since
|
||||
* we also need to check for NAKs besides the hw ready/idle signal, we
|
||||
* need to wake up periodically and check that ourselves. */
|
||||
|
@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev,
|
||||
if (dev_priv->backlight_level == 0)
|
||||
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
|
||||
|
||||
dev_priv->backlight_enabled = true;
|
||||
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 4) {
|
||||
uint32_t reg, tmp;
|
||||
|
||||
@ -359,11 +356,11 @@ void intel_panel_enable_backlight(struct drm_device *dev,
|
||||
}
|
||||
|
||||
set_level:
|
||||
/* Check the current backlight level and try to set again if it's zero.
|
||||
* On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
|
||||
* when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
|
||||
/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
|
||||
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
|
||||
* registers are set.
|
||||
*/
|
||||
if (!intel_panel_get_backlight(dev))
|
||||
dev_priv->backlight_enabled = true;
|
||||
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
|
||||
}
|
||||
|
||||
|
@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
|
||||
if (!IS_HASWELL(dev))
|
||||
return;
|
||||
|
||||
if (!i915_disable_power_well && !enable)
|
||||
return;
|
||||
|
||||
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
|
||||
is_enabled = tmp & HSW_PWR_WELL_STATE;
|
||||
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
|
||||
|
@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
|
||||
m = n = p = 0;
|
||||
vcomax = 800000;
|
||||
vcomin = 400000;
|
||||
pllreffreq = 3333;
|
||||
pllreffreq = 33333;
|
||||
|
||||
delta = 0xffffffff;
|
||||
permitteddelta = clock * 5 / 1000;
|
||||
|
||||
for (testp = 16; testp > 0; testp--) {
|
||||
for (testp = 16; testp > 0; testp >>= 1) {
|
||||
if (clock * testp > vcomax)
|
||||
continue;
|
||||
if (clock * testp < vcomin)
|
||||
continue;
|
||||
|
||||
for (testm = 1; testm < 33; testm++) {
|
||||
for (testn = 1; testn < 257; testn++) {
|
||||
for (testn = 17; testn < 257; testn++) {
|
||||
computed = (pllreffreq * testn) /
|
||||
(testm * testp);
|
||||
if (computed > clock)
|
||||
@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
|
||||
if (tmpdelta < delta) {
|
||||
delta = tmpdelta;
|
||||
n = testn - 1;
|
||||
m = (testm - 1) | ((n >> 1) & 0x80);
|
||||
m = (testm - 1);
|
||||
p = testp - 1;
|
||||
}
|
||||
if ((clock * testp) >= 600000)
|
||||
p |= 80;
|
||||
p |= 0x80;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
|
||||
struct nouveau_object *parent = NULL;
|
||||
struct nouveau_object *namedb = NULL;
|
||||
struct nouveau_handle *handle = NULL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
parent = nouveau_handle_ref(client, _parent);
|
||||
if (!parent)
|
||||
@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle)
|
||||
}
|
||||
|
||||
nouveau_object_ref(NULL, &parent);
|
||||
return ret;
|
||||
return handle ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -4,7 +4,7 @@
|
||||
#include <core/device.h>
|
||||
#include <core/subdev.h>
|
||||
|
||||
enum nouveau_therm_mode {
|
||||
enum nouveau_therm_fan_mode {
|
||||
NOUVEAU_THERM_CTRL_NONE = 0,
|
||||
NOUVEAU_THERM_CTRL_MANUAL = 1,
|
||||
NOUVEAU_THERM_CTRL_AUTO = 2,
|
||||
|
@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm)
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_therm_mode(struct nouveau_therm *therm, int mode)
|
||||
nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode)
|
||||
{
|
||||
struct nouveau_therm_priv *priv = (void *)therm;
|
||||
struct nouveau_device *device = nv_device(therm);
|
||||
@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode)
|
||||
(mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
|
||||
return -EINVAL;
|
||||
|
||||
/* do not allow automatic fan management if the thermal sensor is
|
||||
* not available */
|
||||
if (priv->mode == 2 && therm->temp_get(therm) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (priv->mode == mode)
|
||||
return 0;
|
||||
|
||||
nv_info(therm, "Thermal management: %s\n", name[mode]);
|
||||
nv_info(therm, "fan management: %s\n", name[mode]);
|
||||
nouveau_therm_update(therm, mode);
|
||||
return 0;
|
||||
}
|
||||
@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
|
||||
priv->fan->bios.max_duty = value;
|
||||
return 0;
|
||||
case NOUVEAU_THERM_ATTR_FAN_MODE:
|
||||
return nouveau_therm_mode(therm, value);
|
||||
return nouveau_therm_fan_mode(therm, value);
|
||||
case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
|
||||
priv->bios_sensor.thrs_fan_boost.temp = value;
|
||||
priv->sensor.program_alarms(therm);
|
||||
@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object)
|
||||
return ret;
|
||||
|
||||
if (priv->suspend >= 0)
|
||||
nouveau_therm_mode(therm, priv->mode);
|
||||
nouveau_therm_fan_mode(therm, priv->mode);
|
||||
priv->sensor.program_alarms(therm);
|
||||
return 0;
|
||||
}
|
||||
@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent,
|
||||
int
|
||||
nouveau_therm_preinit(struct nouveau_therm *therm)
|
||||
{
|
||||
nouveau_therm_ic_ctor(therm);
|
||||
nouveau_therm_sensor_ctor(therm);
|
||||
nouveau_therm_ic_ctor(therm);
|
||||
nouveau_therm_fan_ctor(therm);
|
||||
|
||||
nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
|
||||
nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE);
|
||||
nouveau_therm_sensor_preinit(therm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
|
||||
struct i2c_board_info *info)
|
||||
{
|
||||
struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
|
||||
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
|
||||
struct i2c_client *client;
|
||||
|
||||
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
|
||||
@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
|
||||
}
|
||||
|
||||
nv_info(priv,
|
||||
"Found an %s at address 0x%x (controlled by lm_sensors)\n",
|
||||
info->type, info->addr);
|
||||
"Found an %s at address 0x%x (controlled by lm_sensors, "
|
||||
"temp offset %+i C)\n",
|
||||
info->type, info->addr, sensor->offset_constant);
|
||||
priv->ic = client;
|
||||
|
||||
return true;
|
||||
|
@ -29,54 +29,83 @@ struct nv40_therm_priv {
|
||||
struct nouveau_therm_priv base;
|
||||
};
|
||||
|
||||
static int
|
||||
nv40_sensor_setup(struct nouveau_therm *therm)
|
||||
enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 };
|
||||
|
||||
static enum nv40_sensor_style
|
||||
nv40_sensor_style(struct nouveau_therm *therm)
|
||||
{
|
||||
struct nouveau_device *device = nv_device(therm);
|
||||
|
||||
switch (device->chipset) {
|
||||
case 0x43:
|
||||
case 0x44:
|
||||
case 0x4a:
|
||||
case 0x47:
|
||||
return OLD_STYLE;
|
||||
|
||||
case 0x46:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
case 0x4e:
|
||||
case 0x4c:
|
||||
case 0x67:
|
||||
case 0x68:
|
||||
case 0x63:
|
||||
return NEW_STYLE;
|
||||
default:
|
||||
return INVALID_STYLE;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_sensor_setup(struct nouveau_therm *therm)
|
||||
{
|
||||
enum nv40_sensor_style style = nv40_sensor_style(therm);
|
||||
|
||||
/* enable ADC readout and disable the ALARM threshold */
|
||||
if (device->chipset >= 0x46) {
|
||||
if (style == NEW_STYLE) {
|
||||
nv_mask(therm, 0x15b8, 0x80000000, 0);
|
||||
nv_wr32(therm, 0x15b0, 0x80003fff);
|
||||
mdelay(10); /* wait for the temperature to stabilize */
|
||||
mdelay(20); /* wait for the temperature to stabilize */
|
||||
return nv_rd32(therm, 0x15b4) & 0x3fff;
|
||||
} else {
|
||||
} else if (style == OLD_STYLE) {
|
||||
nv_wr32(therm, 0x15b0, 0xff);
|
||||
mdelay(20); /* wait for the temperature to stabilize */
|
||||
return nv_rd32(therm, 0x15b4) & 0xff;
|
||||
}
|
||||
} else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_temp_get(struct nouveau_therm *therm)
|
||||
{
|
||||
struct nouveau_therm_priv *priv = (void *)therm;
|
||||
struct nouveau_device *device = nv_device(therm);
|
||||
struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
|
||||
enum nv40_sensor_style style = nv40_sensor_style(therm);
|
||||
int core_temp;
|
||||
|
||||
if (device->chipset >= 0x46) {
|
||||
if (style == NEW_STYLE) {
|
||||
nv_wr32(therm, 0x15b0, 0x80003fff);
|
||||
core_temp = nv_rd32(therm, 0x15b4) & 0x3fff;
|
||||
} else {
|
||||
} else if (style == OLD_STYLE) {
|
||||
nv_wr32(therm, 0x15b0, 0xff);
|
||||
core_temp = nv_rd32(therm, 0x15b4) & 0xff;
|
||||
}
|
||||
} else
|
||||
return -ENODEV;
|
||||
|
||||
/* Setup the sensor if the temperature is 0 */
|
||||
if (core_temp == 0)
|
||||
core_temp = nv40_sensor_setup(therm);
|
||||
|
||||
if (sensor->slope_div == 0)
|
||||
sensor->slope_div = 1;
|
||||
if (sensor->offset_den == 0)
|
||||
sensor->offset_den = 1;
|
||||
if (sensor->slope_mult < 1)
|
||||
sensor->slope_mult = 1;
|
||||
/* if the slope or the offset is unset, do no use the sensor */
|
||||
if (!sensor->slope_div || !sensor->slope_mult ||
|
||||
!sensor->offset_num || !sensor->offset_den)
|
||||
return -ENODEV;
|
||||
|
||||
core_temp = core_temp * sensor->slope_mult / sensor->slope_div;
|
||||
core_temp = core_temp + sensor->offset_num / sensor->offset_den;
|
||||
core_temp = core_temp + sensor->offset_constant - 8;
|
||||
|
||||
/* reserve negative temperatures for errors */
|
||||
if (core_temp < 0)
|
||||
core_temp = 0;
|
||||
|
||||
return core_temp;
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ struct nouveau_therm_priv {
|
||||
struct i2c_client *ic;
|
||||
};
|
||||
|
||||
int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
|
||||
int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode);
|
||||
int nouveau_therm_attr_get(struct nouveau_therm *therm,
|
||||
enum nouveau_therm_attr_type type);
|
||||
int nouveau_therm_attr_set(struct nouveau_therm *therm,
|
||||
@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
|
||||
|
||||
int nouveau_therm_preinit(struct nouveau_therm *);
|
||||
|
||||
void nouveau_therm_sensor_preinit(struct nouveau_therm *);
|
||||
void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
|
||||
enum nouveau_therm_thrs thrs,
|
||||
enum nouveau_therm_thrs_state st);
|
||||
|
@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm)
|
||||
{
|
||||
struct nouveau_therm_priv *priv = (void *)therm;
|
||||
|
||||
priv->bios_sensor.slope_mult = 1;
|
||||
priv->bios_sensor.slope_div = 1;
|
||||
priv->bios_sensor.offset_num = 0;
|
||||
priv->bios_sensor.offset_den = 1;
|
||||
priv->bios_sensor.offset_constant = 0;
|
||||
|
||||
priv->bios_sensor.thrs_fan_boost.temp = 90;
|
||||
@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
|
||||
struct nouveau_therm_priv *priv = (void *)therm;
|
||||
struct nvbios_therm_sensor *s = &priv->bios_sensor;
|
||||
|
||||
if (!priv->bios_sensor.slope_div)
|
||||
priv->bios_sensor.slope_div = 1;
|
||||
if (!priv->bios_sensor.offset_den)
|
||||
priv->bios_sensor.offset_den = 1;
|
||||
|
||||
/* enforce a minimum hysteresis on thresholds */
|
||||
s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
|
||||
s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
|
||||
@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
|
||||
const char *thresolds[] = {
|
||||
"fanboost", "downclock", "critical", "shutdown"
|
||||
};
|
||||
uint8_t temperature = therm->temp_get(therm);
|
||||
int temperature = therm->temp_get(therm);
|
||||
|
||||
if (thrs < 0 || thrs > 3)
|
||||
return;
|
||||
|
||||
if (dir == NOUVEAU_THERM_THRS_FALLING)
|
||||
nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
|
||||
nv_info(therm, "temperature (%i C) went below the '%s' threshold\n",
|
||||
temperature, thresolds[thrs]);
|
||||
else
|
||||
nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
|
||||
nv_info(therm, "temperature (%i C) hit the '%s' threshold\n",
|
||||
temperature, thresolds[thrs]);
|
||||
|
||||
active = (dir == NOUVEAU_THERM_THRS_RISING);
|
||||
@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm,
|
||||
case NOUVEAU_THERM_THRS_FANBOOST:
|
||||
if (active) {
|
||||
nouveau_therm_fan_set(therm, true, 100);
|
||||
nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
|
||||
nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
|
||||
}
|
||||
break;
|
||||
case NOUVEAU_THERM_THRS_DOWNCLOCK:
|
||||
@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
|
||||
NOUVEAU_THERM_THRS_SHUTDOWN);
|
||||
|
||||
/* schedule the next poll in one second */
|
||||
if (list_empty(&alarm->head))
|
||||
if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
|
||||
ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
|
||||
|
||||
spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
|
||||
@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
|
||||
alarm_timer_callback(&priv->sensor.therm_poll_alarm);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
|
||||
{
|
||||
const char *sensor_avail = "yes";
|
||||
|
||||
if (therm->temp_get(therm) < 0)
|
||||
sensor_avail = "no";
|
||||
|
||||
nv_info(therm, "internal sensor: %s\n", sensor_avail);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
|
||||
{
|
||||
|
@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_therm *therm = nouveau_therm(drm->device);
|
||||
int temp = therm->temp_get(therm);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000);
|
||||
if (temp < 0)
|
||||
return temp;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000);
|
||||
}
|
||||
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
|
||||
NULL, 0);
|
||||
@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
|
||||
nouveau_hwmon_get_pwm1_max,
|
||||
nouveau_hwmon_set_pwm1_max, 0);
|
||||
|
||||
static struct attribute *hwmon_attributes[] = {
|
||||
static struct attribute *hwmon_default_attributes[] = {
|
||||
&sensor_dev_attr_name.dev_attr.attr,
|
||||
&sensor_dev_attr_update_rate.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
static struct attribute *hwmon_temp_attributes[] = {
|
||||
&sensor_dev_attr_temp1_input.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
|
||||
@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = {
|
||||
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
|
||||
&sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
|
||||
&sensor_dev_attr_name.dev_attr.attr,
|
||||
&sensor_dev_attr_update_rate.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
static struct attribute *hwmon_fan_rpm_attributes[] = {
|
||||
@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group hwmon_attrgroup = {
|
||||
.attrs = hwmon_attributes,
|
||||
static const struct attribute_group hwmon_default_attrgroup = {
|
||||
.attrs = hwmon_default_attributes,
|
||||
};
|
||||
static const struct attribute_group hwmon_temp_attrgroup = {
|
||||
.attrs = hwmon_temp_attributes,
|
||||
};
|
||||
static const struct attribute_group hwmon_fan_rpm_attrgroup = {
|
||||
.attrs = hwmon_fan_rpm_attributes,
|
||||
@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev)
|
||||
}
|
||||
dev_set_drvdata(hwmon_dev, dev);
|
||||
|
||||
/* default sysfs entries */
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
|
||||
/* set the default attributes */
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* if the card has a working thermal sensor */
|
||||
if (therm->temp_get(therm) >= 0) {
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
/* if the card has a pwm fan */
|
||||
/*XXX: incorrect, need better detection for this, some boards have
|
||||
* the gpio entries for pwm fan control even when there's no
|
||||
@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
|
||||
struct nouveau_pm *pm = nouveau_pm(dev);
|
||||
|
||||
if (pm->hwmon) {
|
||||
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj,
|
||||
&hwmon_pwm_fan_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj,
|
||||
&hwmon_fan_rpm_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup);
|
||||
sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
|
||||
|
||||
hwmon_device_unregister(pm->hwmon);
|
||||
}
|
||||
|
@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
swap_interval <<= 4;
|
||||
if (swap_interval == 0)
|
||||
swap_interval |= 0x100;
|
||||
if (chan == NULL)
|
||||
evo_sync(crtc->dev);
|
||||
|
||||
push = evo_wait(sync, 128);
|
||||
if (unlikely(push == NULL))
|
||||
@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
sync->addr ^= 0x10;
|
||||
sync->data++;
|
||||
FIRE_RING (chan);
|
||||
} else {
|
||||
evo_sync(crtc->dev);
|
||||
}
|
||||
|
||||
/* queue the flip */
|
||||
|
@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
(rdev->pdev->device == 0x9907) ||
|
||||
(rdev->pdev->device == 0x9908) ||
|
||||
(rdev->pdev->device == 0x9909) ||
|
||||
(rdev->pdev->device == 0x990B) ||
|
||||
(rdev->pdev->device == 0x990C) ||
|
||||
(rdev->pdev->device == 0x990F) ||
|
||||
(rdev->pdev->device == 0x9910) ||
|
||||
(rdev->pdev->device == 0x9917)) {
|
||||
(rdev->pdev->device == 0x9917) ||
|
||||
(rdev->pdev->device == 0x9999)) {
|
||||
rdev->config.cayman.max_simds_per_se = 6;
|
||||
rdev->config.cayman.max_backends_per_se = 2;
|
||||
} else if ((rdev->pdev->device == 0x9903) ||
|
||||
(rdev->pdev->device == 0x9904) ||
|
||||
(rdev->pdev->device == 0x990A) ||
|
||||
(rdev->pdev->device == 0x990D) ||
|
||||
(rdev->pdev->device == 0x990E) ||
|
||||
(rdev->pdev->device == 0x9913) ||
|
||||
(rdev->pdev->device == 0x9918)) {
|
||||
rdev->config.cayman.max_simds_per_se = 4;
|
||||
@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
(rdev->pdev->device == 0x9990) ||
|
||||
(rdev->pdev->device == 0x9991) ||
|
||||
(rdev->pdev->device == 0x9994) ||
|
||||
(rdev->pdev->device == 0x9995) ||
|
||||
(rdev->pdev->device == 0x9996) ||
|
||||
(rdev->pdev->device == 0x999A) ||
|
||||
(rdev->pdev->device == 0x99A0)) {
|
||||
rdev->config.cayman.max_simds_per_se = 3;
|
||||
rdev->config.cayman.max_backends_per_se = 1;
|
||||
@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
||||
WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
|
||||
WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
|
||||
|
||||
if ((rdev->config.cayman.max_backends_per_se == 1) &&
|
||||
(rdev->flags & RADEON_IS_IGP)) {
|
||||
if ((disabled_rb_mask & 3) == 1) {
|
||||
/* RB0 disabled, RB1 enabled */
|
||||
tmp = 0x11111111;
|
||||
} else {
|
||||
/* RB1 disabled, RB0 enabled */
|
||||
tmp = 0x00000000;
|
||||
}
|
||||
} else {
|
||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||
tmp = r6xx_remap_render_backend(rdev, tmp,
|
||||
rdev->config.cayman.max_backends_per_se *
|
||||
rdev->config.cayman.max_shader_engines,
|
||||
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
|
||||
}
|
||||
WREG32(GB_BACKEND_MAP, tmp);
|
||||
|
||||
cgts_tcc_disable = 0xffff0000;
|
||||
@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev)
|
||||
int cayman_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
r600_audio_fini(rdev);
|
||||
radeon_vm_manager_fini(rdev);
|
||||
cayman_cp_enable(rdev, false);
|
||||
cayman_dma_stop(rdev);
|
||||
evergreen_irq_suspend(rdev);
|
||||
|
@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
||||
/* r100 doesn't have dma engine so skip the test */
|
||||
/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
|
||||
/* skip it as well if domains are the same */
|
||||
if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
|
||||
if (rdev->asic->copy.dma) {
|
||||
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
|
||||
RADEON_BENCHMARK_COPY_DMA, n);
|
||||
if (time < 0)
|
||||
@ -135,6 +132,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
sdomain, ddomain, "dma");
|
||||
}
|
||||
|
||||
if (rdev->asic->copy.blit) {
|
||||
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
|
||||
RADEON_BENCHMARK_COPY_BLIT, n);
|
||||
if (time < 0)
|
||||
@ -142,6 +140,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
if (time > 0)
|
||||
radeon_benchmark_log_results(n, size, time,
|
||||
sdomain, ddomain, "blit");
|
||||
}
|
||||
|
||||
out_cleanup:
|
||||
if (sobj) {
|
||||
|
@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev)
|
||||
|
||||
int si_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
radeon_vm_manager_fini(rdev);
|
||||
si_cp_enable(rdev, false);
|
||||
cayman_dma_stop(rdev);
|
||||
si_irq_suspend(rdev);
|
||||
|
@ -80,6 +80,7 @@
|
||||
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
|
||||
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
|
||||
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
|
||||
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
|
||||
|
||||
#define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */
|
||||
#define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */
|
||||
@ -185,6 +186,7 @@ struct ismt_priv {
|
||||
static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
|
||||
int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE;
|
||||
u32 clk_divisor;
|
||||
|
||||
tegra_i2c_clock_enable(i2c_dev);
|
||||
err = tegra_i2c_clock_enable(i2c_dev);
|
||||
if (err < 0) {
|
||||
dev_err(i2c_dev->dev, "Clock enable failed %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
tegra_periph_reset_assert(i2c_dev->div_clk);
|
||||
udelay(2);
|
||||
@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
|
||||
if (i2c_dev->is_suspended)
|
||||
return -EBUSY;
|
||||
|
||||
tegra_i2c_clock_enable(i2c_dev);
|
||||
ret = tegra_i2c_clock_enable(i2c_dev);
|
||||
if (ret < 0) {
|
||||
dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
enum msg_end_type end_type = MSG_END_STOP;
|
||||
if (i < (num - 1)) {
|
||||
|
@ -3,7 +3,7 @@
|
||||
*
|
||||
* Copyright (c) 2010 Ericsson AB.
|
||||
*
|
||||
* Author: Guenter Roeck <guenter.roeck@ericsson.com>
|
||||
* Author: Guenter Roeck <linux@roeck-us.net>
|
||||
*
|
||||
* Derived from:
|
||||
* pca954x.c
|
||||
|
@ -194,8 +194,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
|
||||
wq->rq.memsize, &(wq->rq.dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!wq->rq.queue)
|
||||
if (!wq->rq.queue) {
|
||||
ret = -ENOMEM;
|
||||
goto free_sq;
|
||||
}
|
||||
PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
|
||||
__func__, wq->sq.queue,
|
||||
(unsigned long long)virt_to_phys(wq->sq.queue),
|
||||
|
@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
|
||||
goto bail;
|
||||
}
|
||||
|
||||
opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
|
||||
opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
|
||||
dev->opstats[opcode].n_bytes += tlen;
|
||||
dev->opstats[opcode].n_packets++;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
config INFINIBAND_QIB
|
||||
tristate "QLogic PCIe HCA support"
|
||||
tristate "Intel PCIe HCA support"
|
||||
depends on 64BIT
|
||||
---help---
|
||||
This is a low-level driver for QLogic PCIe QLE InfiniBand host
|
||||
channel adapters. This driver does not support the QLogic
|
||||
This is a low-level driver for Intel PCIe QLE InfiniBand host
|
||||
channel adapters. This driver does not support the Intel
|
||||
HyperTransport card (model QHT7140).
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
|
||||
"Attempt pre-IBTA 1.2 DDR speed negotiation");
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("QLogic <support@qlogic.com>");
|
||||
MODULE_DESCRIPTION("QLogic IB driver");
|
||||
MODULE_AUTHOR("Intel <ibsupport@intel.com>");
|
||||
MODULE_DESCRIPTION("Intel IB driver");
|
||||
MODULE_VERSION(QIB_DRIVER_VERSION);
|
||||
|
||||
/*
|
||||
|
@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
|
||||
* All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64);
|
||||
|
||||
/*
|
||||
* This file contains all the chip-specific register information and
|
||||
* access functions for the QLogic QLogic_IB PCI-Express chip.
|
||||
* access functions for the Intel Intel_IB PCI-Express chip.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
|
||||
static void qib_remove_one(struct pci_dev *);
|
||||
static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
|
||||
|
||||
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
|
||||
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
|
||||
#define PFX QIB_DRV_NAME ": "
|
||||
|
||||
static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
|
||||
@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dd = qib_init_iba6120_funcs(pdev, ent);
|
||||
#else
|
||||
qib_early_err(&pdev->dev,
|
||||
"QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
|
||||
"Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
|
||||
ent->device);
|
||||
dd = ERR_PTR(-ENODEV);
|
||||
#endif
|
||||
@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
default:
|
||||
qib_early_err(&pdev->dev,
|
||||
"Failing on unknown QLogic deviceid 0x%x\n",
|
||||
"Failing on unknown Intel deviceid 0x%x\n",
|
||||
ent->device);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@ -44,7 +44,7 @@
|
||||
#include "qib.h"
|
||||
#include "qib_7220.h"
|
||||
|
||||
#define SD7220_FW_NAME "qlogic/sd7220.fw"
|
||||
#define SD7220_FW_NAME "intel/sd7220.fw"
|
||||
MODULE_FIRMWARE(SD7220_FW_NAME);
|
||||
|
||||
/*
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
|
||||
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
|
||||
*
|
||||
@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
||||
ibdev->dma_ops = &qib_dma_mapping_ops;
|
||||
|
||||
snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
|
||||
"QLogic Infiniband HCA %s", init_utsname()->nodename);
|
||||
"Intel Infiniband HCA %s", init_utsname()->nodename);
|
||||
|
||||
ret = ib_register_device(ibdev, qib_create_port_files);
|
||||
if (ret)
|
||||
|
@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||
if (++priv->tx_outstanding == ipoib_sendq_size) {
|
||||
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
||||
tx->qp->qp_num);
|
||||
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
|
||||
ipoib_warn(priv, "request notify on send CQ failed\n");
|
||||
netif_stop_queue(dev);
|
||||
rc = ib_req_notify_cq(priv->send_cq,
|
||||
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
|
||||
if (rc < 0)
|
||||
ipoib_warn(priv, "request notify on send CQ failed\n");
|
||||
else if (rc)
|
||||
ipoib_send_comp_handler(priv->send_cq, dev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ config IRQ_REMAP
|
||||
# OMAP IOMMU support
|
||||
config OMAP_IOMMU
|
||||
bool "OMAP IOMMU Support"
|
||||
depends on ARCH_OMAP
|
||||
depends on ARCH_OMAP2PLUS
|
||||
select IOMMU_API
|
||||
|
||||
config OMAP_IOVMM
|
||||
|
@ -2466,8 +2466,7 @@ static int device_change_notifier(struct notifier_block *nb,
|
||||
|
||||
/* allocate a protection domain if a device is added */
|
||||
dma_domain = find_protection_domain(devid);
|
||||
if (dma_domain)
|
||||
goto out;
|
||||
if (!dma_domain) {
|
||||
dma_domain = dma_ops_domain_alloc();
|
||||
if (!dma_domain)
|
||||
goto out;
|
||||
@ -2476,8 +2475,7 @@ static int device_change_notifier(struct notifier_block *nb,
|
||||
spin_lock_irqsave(&iommu_pd_list_lock, flags);
|
||||
list_add_tail(&dma_domain->list, &iommu_pd_list);
|
||||
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
|
||||
|
||||
dev_data = get_dev_data(dev);
|
||||
}
|
||||
|
||||
dev->archdata.dma_ops = &amd_iommu_dma_ops;
|
||||
|
||||
|
@ -980,7 +980,7 @@ static void __init free_iommu_all(void)
|
||||
* BIOS should disable L2B micellaneous clock gating by setting
|
||||
* L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b
|
||||
*/
|
||||
static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
|
||||
static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/irq.h>
|
||||
|
@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
|
||||
{
|
||||
struct blk_plug plug;
|
||||
|
||||
BUG_ON(dm_bufio_in_request());
|
||||
|
||||
blk_start_plug(&plug);
|
||||
dm_bufio_lock(c);
|
||||
|
||||
|
@ -83,6 +83,8 @@ struct cache_disk_superblock {
|
||||
__le32 read_misses;
|
||||
__le32 write_hits;
|
||||
__le32 write_misses;
|
||||
|
||||
__le32 policy_version[CACHE_POLICY_VERSION_SIZE];
|
||||
} __packed;
|
||||
|
||||
struct dm_cache_metadata {
|
||||
@ -109,6 +111,7 @@ struct dm_cache_metadata {
|
||||
bool clean_when_opened:1;
|
||||
|
||||
char policy_name[CACHE_POLICY_NAME_SIZE];
|
||||
unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
|
||||
size_t policy_hint_size;
|
||||
struct dm_cache_statistics stats;
|
||||
};
|
||||
@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
|
||||
disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
|
||||
disk_super->version = cpu_to_le32(CACHE_VERSION);
|
||||
memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
|
||||
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
|
||||
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
|
||||
disk_super->policy_hint_size = 0;
|
||||
|
||||
r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
|
||||
@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
|
||||
disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
|
||||
disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
|
||||
disk_super->cache_blocks = cpu_to_le32(0);
|
||||
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
|
||||
|
||||
disk_super->read_hits = cpu_to_le32(0);
|
||||
disk_super->read_misses = cpu_to_le32(0);
|
||||
@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd,
|
||||
cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
|
||||
cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
|
||||
strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
|
||||
cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
|
||||
cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
|
||||
cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
|
||||
cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
|
||||
|
||||
cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
|
||||
@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
|
||||
disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
|
||||
disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
|
||||
strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
|
||||
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
|
||||
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
|
||||
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
|
||||
|
||||
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
|
||||
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
|
||||
@ -854,18 +863,43 @@ struct thunk {
|
||||
bool hints_valid;
|
||||
};
|
||||
|
||||
static bool policy_unchanged(struct dm_cache_metadata *cmd,
|
||||
struct dm_cache_policy *policy)
|
||||
{
|
||||
const char *policy_name = dm_cache_policy_get_name(policy);
|
||||
const unsigned *policy_version = dm_cache_policy_get_version(policy);
|
||||
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
|
||||
|
||||
/*
|
||||
* Ensure policy names match.
|
||||
*/
|
||||
if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Ensure policy major versions match.
|
||||
*/
|
||||
if (cmd->policy_version[0] != policy_version[0])
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Ensure policy hint sizes match.
|
||||
*/
|
||||
if (cmd->policy_hint_size != policy_hint_size)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool hints_array_initialized(struct dm_cache_metadata *cmd)
|
||||
{
|
||||
return cmd->hint_root && cmd->policy_hint_size;
|
||||
}
|
||||
|
||||
static bool hints_array_available(struct dm_cache_metadata *cmd,
|
||||
const char *policy_name)
|
||||
struct dm_cache_policy *policy)
|
||||
{
|
||||
bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
|
||||
sizeof(cmd->policy_name));
|
||||
|
||||
return cmd->clean_when_opened && policy_names_match &&
|
||||
return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
|
||||
hints_array_initialized(cmd);
|
||||
}
|
||||
|
||||
@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
|
||||
static int __load_mappings(struct dm_cache_metadata *cmd,
|
||||
struct dm_cache_policy *policy,
|
||||
load_mapping_fn fn, void *context)
|
||||
{
|
||||
struct thunk thunk;
|
||||
@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam
|
||||
|
||||
thunk.cmd = cmd;
|
||||
thunk.respect_dirty_flags = cmd->clean_when_opened;
|
||||
thunk.hints_valid = hints_array_available(cmd, policy_name);
|
||||
thunk.hints_valid = hints_array_available(cmd, policy);
|
||||
|
||||
return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
|
||||
}
|
||||
|
||||
int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
|
||||
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
|
||||
struct dm_cache_policy *policy,
|
||||
load_mapping_fn fn, void *context)
|
||||
{
|
||||
int r;
|
||||
|
||||
down_read(&cmd->root_lock);
|
||||
r = __load_mappings(cmd, policy_name, fn, context);
|
||||
r = __load_mappings(cmd, policy, fn, context);
|
||||
up_read(&cmd->root_lock);
|
||||
|
||||
return r;
|
||||
@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
|
||||
/* nothing to be done */
|
||||
return 0;
|
||||
|
||||
value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
|
||||
value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
|
||||
__dm_bless_for_disk(&value);
|
||||
|
||||
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
|
||||
@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
|
||||
__le32 value;
|
||||
size_t hint_size;
|
||||
const char *policy_name = dm_cache_policy_get_name(policy);
|
||||
const unsigned *policy_version = dm_cache_policy_get_version(policy);
|
||||
|
||||
if (!policy_name[0] ||
|
||||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (strcmp(cmd->policy_name, policy_name)) {
|
||||
if (!policy_unchanged(cmd, policy)) {
|
||||
strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
|
||||
memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
|
||||
|
||||
hint_size = dm_cache_policy_get_hint_size(policy);
|
||||
if (!hint_size)
|
||||
|
@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
|
||||
dm_cblock_t cblock, bool dirty,
|
||||
uint32_t hint, bool hint_valid);
|
||||
int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
|
||||
const char *policy_name,
|
||||
struct dm_cache_policy *policy,
|
||||
load_mapping_fn fn,
|
||||
void *context);
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
/*----------------------------------------------------------------*/
|
||||
|
||||
#define DM_MSG_PREFIX "cache cleaner"
|
||||
#define CLEANER_VERSION "1.0.0"
|
||||
|
||||
/* Cache entry struct. */
|
||||
struct wb_cache_entry {
|
||||
@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
|
||||
|
||||
static struct dm_cache_policy_type wb_policy_type = {
|
||||
.name = "cleaner",
|
||||
.version = {1, 0, 0},
|
||||
.hint_size = 0,
|
||||
.owner = THIS_MODULE,
|
||||
.create = wb_create
|
||||
@ -446,7 +446,10 @@ static int __init wb_init(void)
|
||||
if (r < 0)
|
||||
DMERR("register failed %d", r);
|
||||
else
|
||||
DMINFO("version " CLEANER_VERSION " loaded");
|
||||
DMINFO("version %u.%u.%u loaded",
|
||||
wb_policy_type.version[0],
|
||||
wb_policy_type.version[1],
|
||||
wb_policy_type.version[2]);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
|
||||
*/
|
||||
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
|
||||
|
||||
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
|
||||
|
||||
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
|
||||
|
||||
/*----------------------------------------------------------------*/
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#define DM_MSG_PREFIX "cache-policy-mq"
|
||||
#define MQ_VERSION "1.0.0"
|
||||
|
||||
static struct kmem_cache *mq_entry_cache;
|
||||
|
||||
@ -1133,6 +1132,7 @@ bad_cache_alloc:
|
||||
|
||||
static struct dm_cache_policy_type mq_policy_type = {
|
||||
.name = "mq",
|
||||
.version = {1, 0, 0},
|
||||
.hint_size = 4,
|
||||
.owner = THIS_MODULE,
|
||||
.create = mq_create
|
||||
@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = {
|
||||
|
||||
static struct dm_cache_policy_type default_policy_type = {
|
||||
.name = "default",
|
||||
.version = {1, 0, 0},
|
||||
.hint_size = 4,
|
||||
.owner = THIS_MODULE,
|
||||
.create = mq_create
|
||||
@ -1164,7 +1165,10 @@ static int __init mq_init(void)
|
||||
|
||||
r = dm_cache_policy_register(&default_policy_type);
|
||||
if (!r) {
|
||||
DMINFO("version " MQ_VERSION " loaded");
|
||||
DMINFO("version %u.%u.%u loaded",
|
||||
mq_policy_type.version[0],
|
||||
mq_policy_type.version[1],
|
||||
mq_policy_type.version[2]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
|
||||
|
||||
const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
|
||||
{
|
||||
struct dm_cache_policy_type *t = p->private;
|
||||
|
||||
return t->version;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_cache_policy_get_version);
|
||||
|
||||
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
|
||||
{
|
||||
struct dm_cache_policy_type *t = p->private;
|
||||
|
@ -196,6 +196,7 @@ struct dm_cache_policy {
|
||||
* We maintain a little register of the different policy types.
|
||||
*/
|
||||
#define CACHE_POLICY_NAME_SIZE 16
|
||||
#define CACHE_POLICY_VERSION_SIZE 3
|
||||
|
||||
struct dm_cache_policy_type {
|
||||
/* For use by the register code only. */
|
||||
@ -206,6 +207,7 @@ struct dm_cache_policy_type {
|
||||
* what gets passed on the target line to select your policy.
|
||||
*/
|
||||
char name[CACHE_POLICY_NAME_SIZE];
|
||||
unsigned version[CACHE_POLICY_VERSION_SIZE];
|
||||
|
||||
/*
|
||||
* Policies may store a hint for each each cache block.
|
||||
|
@ -142,6 +142,7 @@ struct cache {
|
||||
spinlock_t lock;
|
||||
struct bio_list deferred_bios;
|
||||
struct bio_list deferred_flush_bios;
|
||||
struct bio_list deferred_writethrough_bios;
|
||||
struct list_head quiesced_migrations;
|
||||
struct list_head completed_migrations;
|
||||
struct list_head need_commit_migrations;
|
||||
@ -158,7 +159,7 @@ struct cache {
|
||||
/*
|
||||
* origin_blocks entries, discarded if set.
|
||||
*/
|
||||
sector_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
dm_dblock_t discard_nr_blocks;
|
||||
unsigned long *discard_bitset;
|
||||
|
||||
@ -199,6 +200,11 @@ struct per_bio_data {
|
||||
bool tick:1;
|
||||
unsigned req_nr:2;
|
||||
struct dm_deferred_entry *all_io_entry;
|
||||
|
||||
/* writethrough fields */
|
||||
struct cache *cache;
|
||||
dm_cblock_t cblock;
|
||||
bio_end_io_t *saved_bi_end_io;
|
||||
};
|
||||
|
||||
struct dm_cache_migration {
|
||||
@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache)
|
||||
return cache->sectors_per_block_shift >= 0;
|
||||
}
|
||||
|
||||
static dm_block_t block_div(dm_block_t b, uint32_t n)
|
||||
{
|
||||
do_div(b, n);
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
|
||||
{
|
||||
sector_t discard_blocks = cache->discard_block_size;
|
||||
uint32_t discard_blocks = cache->discard_block_size;
|
||||
dm_block_t b = from_oblock(oblock);
|
||||
|
||||
if (!block_size_is_power_of_two(cache))
|
||||
(void) sector_div(discard_blocks, cache->sectors_per_block);
|
||||
discard_blocks = discard_blocks / cache->sectors_per_block;
|
||||
else
|
||||
discard_blocks >>= cache->sectors_per_block_shift;
|
||||
|
||||
(void) sector_div(b, discard_blocks);
|
||||
b = block_div(b, discard_blocks);
|
||||
|
||||
return to_dblock(b);
|
||||
}
|
||||
@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio)
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
}
|
||||
|
||||
static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
bio_list_add(&cache->deferred_writethrough_bios, bio);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
|
||||
wake_worker(cache);
|
||||
}
|
||||
|
||||
static void writethrough_endio(struct bio *bio, int err)
|
||||
{
|
||||
struct per_bio_data *pb = get_per_bio_data(bio);
|
||||
bio->bi_end_io = pb->saved_bi_end_io;
|
||||
|
||||
if (err) {
|
||||
bio_endio(bio, err);
|
||||
return;
|
||||
}
|
||||
|
||||
remap_to_cache(pb->cache, bio, pb->cblock);
|
||||
|
||||
/*
|
||||
* We can't issue this bio directly, since we're in interrupt
|
||||
* context. So it get's put on a bio list for processing by the
|
||||
* worker thread.
|
||||
*/
|
||||
defer_writethrough_bio(pb->cache, bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* When running in writethrough mode we need to send writes to clean blocks
|
||||
* to both the cache and origin devices. In future we'd like to clone the
|
||||
* bio and send them in parallel, but for now we're doing them in
|
||||
* series as this is easier.
|
||||
*/
|
||||
static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
|
||||
dm_oblock_t oblock, dm_cblock_t cblock)
|
||||
{
|
||||
struct per_bio_data *pb = get_per_bio_data(bio);
|
||||
|
||||
pb->cache = cache;
|
||||
pb->cblock = cblock;
|
||||
pb->saved_bi_end_io = bio->bi_end_io;
|
||||
bio->bi_end_io = writethrough_endio;
|
||||
|
||||
remap_to_origin_clear_discard(pb->cache, bio, oblock);
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------
|
||||
* Migration processing
|
||||
*
|
||||
@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio)
|
||||
dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
|
||||
dm_block_t b;
|
||||
|
||||
(void) sector_div(end_block, cache->discard_block_size);
|
||||
end_block = block_div(end_block, cache->discard_block_size);
|
||||
|
||||
for (b = start_block; b < end_block; b++)
|
||||
set_discard(cache, to_dblock(b));
|
||||
@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
|
||||
inc_hit_counter(cache, bio);
|
||||
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
|
||||
|
||||
if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
|
||||
/*
|
||||
* No need to mark anything dirty in write through mode.
|
||||
*/
|
||||
pb->req_nr == 0 ?
|
||||
remap_to_cache(cache, bio, lookup_result.cblock) :
|
||||
remap_to_origin_clear_discard(cache, bio, block);
|
||||
} else
|
||||
if (is_writethrough_io(cache, bio, lookup_result.cblock))
|
||||
remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
|
||||
else
|
||||
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
|
||||
|
||||
issue(cache, bio);
|
||||
@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
|
||||
case POLICY_MISS:
|
||||
inc_miss_counter(cache, bio);
|
||||
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
|
||||
|
||||
if (pb->req_nr != 0) {
|
||||
/*
|
||||
* This is a duplicate writethrough io that is no
|
||||
* longer needed because the block has been demoted.
|
||||
*/
|
||||
bio_endio(bio, 0);
|
||||
} else {
|
||||
remap_to_origin_clear_discard(cache, bio, block);
|
||||
issue(cache, bio);
|
||||
}
|
||||
break;
|
||||
|
||||
case POLICY_NEW:
|
||||
@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
|
||||
submit_bios ? generic_make_request(bio) : bio_io_error(bio);
|
||||
}
|
||||
|
||||
static void process_deferred_writethrough_bios(struct cache *cache)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct bio_list bios;
|
||||
struct bio *bio;
|
||||
|
||||
bio_list_init(&bios);
|
||||
|
||||
spin_lock_irqsave(&cache->lock, flags);
|
||||
bio_list_merge(&bios, &cache->deferred_writethrough_bios);
|
||||
bio_list_init(&cache->deferred_writethrough_bios);
|
||||
spin_unlock_irqrestore(&cache->lock, flags);
|
||||
|
||||
while ((bio = bio_list_pop(&bios)))
|
||||
generic_make_request(bio);
|
||||
}
|
||||
|
||||
static void writeback_some_dirty_blocks(struct cache *cache)
|
||||
{
|
||||
int r = 0;
|
||||
@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache)
|
||||
else
|
||||
return !bio_list_empty(&cache->deferred_bios) ||
|
||||
!bio_list_empty(&cache->deferred_flush_bios) ||
|
||||
!bio_list_empty(&cache->deferred_writethrough_bios) ||
|
||||
!list_empty(&cache->quiesced_migrations) ||
|
||||
!list_empty(&cache->completed_migrations) ||
|
||||
!list_empty(&cache->need_commit_migrations);
|
||||
@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws)
|
||||
|
||||
writeback_some_dirty_blocks(cache);
|
||||
|
||||
process_deferred_writethrough_bios(cache);
|
||||
|
||||
if (commit_if_needed(cache)) {
|
||||
process_deferred_flush_bios(cache, false);
|
||||
|
||||
@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
|
||||
}
|
||||
|
||||
r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
|
||||
if (r)
|
||||
if (r) {
|
||||
*error = "Error setting cache policy's config values";
|
||||
dm_cache_policy_destroy(cache->policy);
|
||||
cache->policy = NULL;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size,
|
||||
|
||||
#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
|
||||
|
||||
static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
|
||||
|
||||
static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
{
|
||||
int r = 0;
|
||||
@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
|
||||
memcpy(&cache->features, &ca->features, sizeof(cache->features));
|
||||
|
||||
if (cache->features.write_through)
|
||||
ti->num_write_bios = cache_num_write_bios;
|
||||
|
||||
cache->callbacks.congested_fn = cache_is_congested;
|
||||
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
|
||||
|
||||
@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
|
||||
/* FIXME: factor out this whole section */
|
||||
origin_blocks = cache->origin_sectors = ca->origin_sectors;
|
||||
(void) sector_div(origin_blocks, ca->block_size);
|
||||
origin_blocks = block_div(origin_blocks, ca->block_size);
|
||||
cache->origin_blocks = to_oblock(origin_blocks);
|
||||
|
||||
cache->sectors_per_block = ca->block_size;
|
||||
@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
dm_block_t cache_size = ca->cache_sectors;
|
||||
|
||||
cache->sectors_per_block_shift = -1;
|
||||
(void) sector_div(cache_size, ca->block_size);
|
||||
cache_size = block_div(cache_size, ca->block_size);
|
||||
cache->cache_size = to_cblock(cache_size);
|
||||
} else {
|
||||
cache->sectors_per_block_shift = __ffs(ca->block_size);
|
||||
@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
spin_lock_init(&cache->lock);
|
||||
bio_list_init(&cache->deferred_bios);
|
||||
bio_list_init(&cache->deferred_flush_bios);
|
||||
bio_list_init(&cache->deferred_writethrough_bios);
|
||||
INIT_LIST_HEAD(&cache->quiesced_migrations);
|
||||
INIT_LIST_HEAD(&cache->completed_migrations);
|
||||
INIT_LIST_HEAD(&cache->need_commit_migrations);
|
||||
@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
|
||||
goto out;
|
||||
|
||||
r = cache_create(ca, &cache);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
|
||||
if (r) {
|
||||
@ -2016,20 +2086,6 @@ out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
int r;
|
||||
struct cache *cache = ti->private;
|
||||
dm_oblock_t block = get_bio_block(cache, bio);
|
||||
dm_cblock_t cblock;
|
||||
|
||||
r = policy_lookup(cache->policy, block, &cblock);
|
||||
if (r < 0)
|
||||
return 2; /* assume the worst */
|
||||
|
||||
return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
|
||||
}
|
||||
|
||||
static int cache_map(struct dm_target *ti, struct bio *bio)
|
||||
{
|
||||
struct cache *cache = ti->private;
|
||||
@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
|
||||
inc_hit_counter(cache, bio);
|
||||
pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
|
||||
|
||||
if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
|
||||
/*
|
||||
* No need to mark anything dirty in write through mode.
|
||||
*/
|
||||
pb->req_nr == 0 ?
|
||||
remap_to_cache(cache, bio, lookup_result.cblock) :
|
||||
remap_to_origin_clear_discard(cache, bio, block);
|
||||
cell_defer(cache, cell, false);
|
||||
} else {
|
||||
if (is_writethrough_io(cache, bio, lookup_result.cblock))
|
||||
remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
|
||||
else
|
||||
remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
|
||||
|
||||
cell_defer(cache, cell, false);
|
||||
}
|
||||
break;
|
||||
|
||||
case POLICY_MISS:
|
||||
@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti)
|
||||
}
|
||||
|
||||
if (!cache->loaded_mappings) {
|
||||
r = dm_cache_load_mappings(cache->cmd,
|
||||
dm_cache_policy_get_name(cache->policy),
|
||||
r = dm_cache_load_mappings(cache->cmd, cache->policy,
|
||||
load_mapping, cache);
|
||||
if (r) {
|
||||
DMERR("could not load cache mappings");
|
||||
@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
||||
|
||||
static struct target_type cache_target = {
|
||||
.name = "cache",
|
||||
.version = {1, 0, 0},
|
||||
.version = {1, 1, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = cache_ctr,
|
||||
.dtr = cache_dtr,
|
||||
|
@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt)
|
||||
return q && blk_queue_discard(q);
|
||||
}
|
||||
|
||||
static bool is_factor(sector_t block_size, uint32_t n)
|
||||
{
|
||||
return !sector_div(block_size, n);
|
||||
}
|
||||
|
||||
/*
|
||||
* If discard_passdown was enabled verify that the data device
|
||||
* supports discards. Disable discard_passdown if not.
|
||||
@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt)
|
||||
else if (data_limits->discard_granularity > block_size)
|
||||
reason = "discard granularity larger than a block";
|
||||
|
||||
else if (block_size & (data_limits->discard_granularity - 1))
|
||||
else if (!is_factor(block_size, data_limits->discard_granularity))
|
||||
reason = "discard granularity not a factor of block size";
|
||||
|
||||
if (reason) {
|
||||
@ -2544,7 +2549,7 @@ static struct target_type pool_target = {
|
||||
.name = "thin-pool",
|
||||
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
|
||||
DM_TARGET_IMMUTABLE,
|
||||
.version = {1, 6, 1},
|
||||
.version = {1, 7, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = pool_ctr,
|
||||
.dtr = pool_dtr,
|
||||
@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti,
|
||||
|
||||
static struct target_type thin_target = {
|
||||
.name = "thin",
|
||||
.version = {1, 7, 1},
|
||||
.version = {1, 8, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = thin_ctr,
|
||||
.dtr = thin_dtr,
|
||||
|
@ -93,6 +93,13 @@ struct dm_verity_io {
|
||||
*/
|
||||
};
|
||||
|
||||
struct dm_verity_prefetch_work {
|
||||
struct work_struct work;
|
||||
struct dm_verity *v;
|
||||
sector_t block;
|
||||
unsigned n_blocks;
|
||||
};
|
||||
|
||||
static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io)
|
||||
{
|
||||
return (struct shash_desc *)(io + 1);
|
||||
@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error)
|
||||
* The root buffer is not prefetched, it is assumed that it will be cached
|
||||
* all the time.
|
||||
*/
|
||||
static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io)
|
||||
static void verity_prefetch_io(struct work_struct *work)
|
||||
{
|
||||
struct dm_verity_prefetch_work *pw =
|
||||
container_of(work, struct dm_verity_prefetch_work, work);
|
||||
struct dm_verity *v = pw->v;
|
||||
int i;
|
||||
|
||||
for (i = v->levels - 2; i >= 0; i--) {
|
||||
sector_t hash_block_start;
|
||||
sector_t hash_block_end;
|
||||
verity_hash_at_level(v, io->block, i, &hash_block_start, NULL);
|
||||
verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL);
|
||||
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
|
||||
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
|
||||
if (!i) {
|
||||
unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster);
|
||||
|
||||
@ -452,6 +462,25 @@ no_prefetch_cluster:
|
||||
dm_bufio_prefetch(v->bufio, hash_block_start,
|
||||
hash_block_end - hash_block_start + 1);
|
||||
}
|
||||
|
||||
kfree(pw);
|
||||
}
|
||||
|
||||
static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io)
|
||||
{
|
||||
struct dm_verity_prefetch_work *pw;
|
||||
|
||||
pw = kmalloc(sizeof(struct dm_verity_prefetch_work),
|
||||
GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
|
||||
|
||||
if (!pw)
|
||||
return;
|
||||
|
||||
INIT_WORK(&pw->work, verity_prefetch_io);
|
||||
pw->v = v;
|
||||
pw->block = io->block;
|
||||
pw->n_blocks = io->n_blocks;
|
||||
queue_work(v->verify_wq, &pw->work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
|
||||
memcpy(io->io_vec, bio_iovec(bio),
|
||||
io->io_vec_size * sizeof(struct bio_vec));
|
||||
|
||||
verity_prefetch_io(v, io);
|
||||
verity_submit_prefetch(v, io);
|
||||
|
||||
generic_make_request(bio);
|
||||
|
||||
@ -858,7 +887,7 @@ bad:
|
||||
|
||||
static struct target_type verity_target = {
|
||||
.name = "verity",
|
||||
.version = {1, 1, 1},
|
||||
.version = {1, 2, 0},
|
||||
.module = THIS_MODULE,
|
||||
.ctr = verity_ctr,
|
||||
.dtr = verity_dtr,
|
||||
|
@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev)
|
||||
removed++;
|
||||
}
|
||||
}
|
||||
if (removed)
|
||||
sysfs_notify(&mddev->kobj, NULL,
|
||||
"degraded");
|
||||
|
||||
if (removed && mddev->kobj.sd)
|
||||
sysfs_notify(&mddev->kobj, NULL, "degraded");
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
if (rdev->raid_disk >= 0 &&
|
||||
|
@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev)
|
||||
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
if (!test_bit(Replacement, &rdev->flags)) {
|
||||
if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
|
||||
} else
|
||||
@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
if (!test_bit(Replacement, &rdev->flags)) {
|
||||
if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
sysfs_remove_link(&mddev->kobj, nm);
|
||||
}
|
||||
|
@ -139,15 +139,8 @@ struct child {
|
||||
struct btree_node *n;
|
||||
};
|
||||
|
||||
static struct dm_btree_value_type le64_type = {
|
||||
.context = NULL,
|
||||
.size = sizeof(__le64),
|
||||
.inc = NULL,
|
||||
.dec = NULL,
|
||||
.equal = NULL
|
||||
};
|
||||
|
||||
static int init_child(struct dm_btree_info *info, struct btree_node *parent,
|
||||
static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
|
||||
struct btree_node *parent,
|
||||
unsigned index, struct child *result)
|
||||
{
|
||||
int r, inc;
|
||||
@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent,
|
||||
result->n = dm_block_data(result->block);
|
||||
|
||||
if (inc)
|
||||
inc_children(info->tm, result->n, &le64_type);
|
||||
inc_children(info->tm, result->n, vt);
|
||||
|
||||
*((__le64 *) value_ptr(parent, index)) =
|
||||
cpu_to_le64(dm_block_location(result->block));
|
||||
@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
|
||||
}
|
||||
|
||||
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
unsigned left_index)
|
||||
struct dm_btree_value_type *vt, unsigned left_index)
|
||||
{
|
||||
int r;
|
||||
struct btree_node *parent;
|
||||
@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
|
||||
parent = dm_block_data(shadow_current(s));
|
||||
|
||||
r = init_child(info, parent, left_index, &left);
|
||||
r = init_child(info, vt, parent, left_index, &left);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = init_child(info, parent, left_index + 1, &right);
|
||||
r = init_child(info, vt, parent, left_index + 1, &right);
|
||||
if (r) {
|
||||
exit_child(info, &left);
|
||||
return r;
|
||||
@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
|
||||
}
|
||||
|
||||
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
unsigned left_index)
|
||||
struct dm_btree_value_type *vt, unsigned left_index)
|
||||
{
|
||||
int r;
|
||||
struct btree_node *parent = dm_block_data(shadow_current(s));
|
||||
@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
/*
|
||||
* FIXME: fill out an array?
|
||||
*/
|
||||
r = init_child(info, parent, left_index, &left);
|
||||
r = init_child(info, vt, parent, left_index, &left);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = init_child(info, parent, left_index + 1, ¢er);
|
||||
r = init_child(info, vt, parent, left_index + 1, ¢er);
|
||||
if (r) {
|
||||
exit_child(info, &left);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = init_child(info, parent, left_index + 2, &right);
|
||||
r = init_child(info, vt, parent, left_index + 2, &right);
|
||||
if (r) {
|
||||
exit_child(info, &left);
|
||||
exit_child(info, ¢er);
|
||||
@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm,
|
||||
}
|
||||
|
||||
static int rebalance_children(struct shadow_spine *s,
|
||||
struct dm_btree_info *info, uint64_t key)
|
||||
struct dm_btree_info *info,
|
||||
struct dm_btree_value_type *vt, uint64_t key)
|
||||
{
|
||||
int i, r, has_left_sibling, has_right_sibling;
|
||||
uint32_t child_entries;
|
||||
@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s,
|
||||
has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1);
|
||||
|
||||
if (!has_left_sibling)
|
||||
r = rebalance2(s, info, i);
|
||||
r = rebalance2(s, info, vt, i);
|
||||
|
||||
else if (!has_right_sibling)
|
||||
r = rebalance2(s, info, i - 1);
|
||||
r = rebalance2(s, info, vt, i - 1);
|
||||
|
||||
else
|
||||
r = rebalance3(s, info, i - 1);
|
||||
r = rebalance3(s, info, vt, i - 1);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
if (le32_to_cpu(n->header.flags) & LEAF_NODE)
|
||||
return do_leaf(n, key, index);
|
||||
|
||||
r = rebalance_children(s, info, key);
|
||||
r = rebalance_children(s, info, vt, key);
|
||||
if (r)
|
||||
break;
|
||||
|
||||
@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
|
||||
return r;
|
||||
}
|
||||
|
||||
static struct dm_btree_value_type le64_type = {
|
||||
.context = NULL,
|
||||
.size = sizeof(__le64),
|
||||
.inc = NULL,
|
||||
.dec = NULL,
|
||||
.equal = NULL
|
||||
};
|
||||
|
||||
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
|
||||
uint64_t *keys, dm_block_t *new_root)
|
||||
{
|
||||
|
@ -671,6 +671,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
bi->bi_next = NULL;
|
||||
if (rrdev)
|
||||
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
|
||||
|
||||
if (conf->mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
|
||||
bi, disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
@ -701,6 +703,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
||||
rbi->bi_io_vec[0].bv_offset = 0;
|
||||
rbi->bi_size = STRIPE_SIZE;
|
||||
rbi->bi_next = NULL;
|
||||
if (conf->mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
|
||||
rbi, disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
int level = conf->level;
|
||||
|
||||
if (rcw) {
|
||||
/* if we are not expanding this is a proper write request, and
|
||||
* there will be bios with new data to be drained into the
|
||||
* stripe cache
|
||||
*/
|
||||
if (!expand) {
|
||||
sh->reconstruct_state = reconstruct_state_drain_run;
|
||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||
} else
|
||||
sh->reconstruct_state = reconstruct_state_run;
|
||||
|
||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
s->locked++;
|
||||
}
|
||||
}
|
||||
/* if we are not expanding this is a proper write request, and
|
||||
* there will be bios with new data to be drained into the
|
||||
* stripe cache
|
||||
*/
|
||||
if (!expand) {
|
||||
if (!s->locked)
|
||||
/* False alarm, nothing to do */
|
||||
return;
|
||||
sh->reconstruct_state = reconstruct_state_drain_run;
|
||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||
} else
|
||||
sh->reconstruct_state = reconstruct_state_run;
|
||||
|
||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||
|
||||
if (s->locked + conf->max_degraded == disks)
|
||||
if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
|
||||
atomic_inc(&conf->pending_full_writes);
|
||||
@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
|
||||
test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
|
||||
|
||||
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
|
||||
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
|
||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
if (i == pd_idx)
|
||||
@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
|
||||
s->locked++;
|
||||
}
|
||||
}
|
||||
if (!s->locked)
|
||||
/* False alarm - nothing to do */
|
||||
return;
|
||||
sh->reconstruct_state = reconstruct_state_prexor_drain_run;
|
||||
set_bit(STRIPE_OP_PREXOR, &s->ops_request);
|
||||
set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
|
||||
set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
|
||||
}
|
||||
|
||||
/* keep the parity disk(s) locked while asynchronous operations
|
||||
@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
|
||||
int i;
|
||||
|
||||
clear_bit(STRIPE_SYNCING, &sh->state);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
s->syncing = 0;
|
||||
s->replacing = 0;
|
||||
/* There is nothing more to do for sync/check/repair.
|
||||
@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
|
||||
{
|
||||
int i;
|
||||
struct r5dev *dev;
|
||||
int discard_pending = 0;
|
||||
|
||||
for (i = disks; i--; )
|
||||
if (sh->dev[i].written) {
|
||||
@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(struct r5conf *conf,
|
||||
STRIPE_SECTORS,
|
||||
!test_bit(STRIPE_DEGRADED, &sh->state),
|
||||
0);
|
||||
} else if (test_bit(R5_Discard, &dev->flags))
|
||||
discard_pending = 1;
|
||||
}
|
||||
if (!discard_pending &&
|
||||
test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
|
||||
clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
|
||||
clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
|
||||
if (sh->qd_idx >= 0) {
|
||||
clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags);
|
||||
clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags);
|
||||
}
|
||||
/* now that discard is done we can proceed with any sync */
|
||||
clear_bit(STRIPE_DISCARD, &sh->state);
|
||||
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
|
||||
}
|
||||
} else if (test_bit(R5_Discard, &sh->dev[i].flags))
|
||||
clear_bit(R5_Discard, &sh->dev[i].flags);
|
||||
|
||||
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
|
||||
if (atomic_dec_and_test(&conf->pending_full_writes))
|
||||
@ -2826,7 +2852,9 @@ static void handle_stripe_dirtying(struct r5conf *conf,
|
||||
set_bit(STRIPE_HANDLE, &sh->state);
|
||||
if (rmw < rcw && rmw > 0) {
|
||||
/* prefer read-modify-write, but need to get some data */
|
||||
blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
|
||||
if (conf->mddev->queue)
|
||||
blk_add_trace_msg(conf->mddev->queue,
|
||||
"raid5 rmw %llu %d",
|
||||
(unsigned long long)sh->sector, rmw);
|
||||
for (i = disks; i--; ) {
|
||||
struct r5dev *dev = &sh->dev[i];
|
||||
@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
|
||||
}
|
||||
}
|
||||
}
|
||||
if (rcw)
|
||||
if (rcw && conf->mddev->queue)
|
||||
blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
|
||||
(unsigned long long)sh->sector,
|
||||
rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
|
||||
@ -3417,10 +3445,16 @@ static void handle_stripe(struct stripe_head *sh)
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
|
||||
if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
|
||||
spin_lock(&sh->stripe_lock);
|
||||
/* Cannot process 'sync' concurrently with 'discard' */
|
||||
if (!test_bit(STRIPE_DISCARD, &sh->state) &&
|
||||
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
|
||||
set_bit(STRIPE_SYNCING, &sh->state);
|
||||
clear_bit(STRIPE_INSYNC, &sh->state);
|
||||
}
|
||||
spin_unlock(&sh->stripe_lock);
|
||||
}
|
||||
clear_bit(STRIPE_DELAYED, &sh->state);
|
||||
|
||||
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
|
||||
@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_head *sh)
|
||||
test_bit(STRIPE_INSYNC, &sh->state)) {
|
||||
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
|
||||
clear_bit(STRIPE_SYNCING, &sh->state);
|
||||
if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags))
|
||||
wake_up(&conf->wait_for_overlap);
|
||||
}
|
||||
|
||||
/* If the failed drives are just a ReadError, then we might need
|
||||
@ -3982,6 +4018,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
|
||||
atomic_inc(&conf->active_aligned_reads);
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
|
||||
align_bi, disk_devt(mddev->gendisk),
|
||||
raid_bio->bi_sector);
|
||||
@ -4078,6 +4115,7 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
|
||||
}
|
||||
spin_unlock_irq(&conf->device_lock);
|
||||
}
|
||||
if (mddev->queue)
|
||||
trace_block_unplug(mddev->queue, cnt, !from_schedule);
|
||||
kfree(cb);
|
||||
}
|
||||
@ -4141,6 +4179,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
sh = get_active_stripe(conf, logical_sector, 0, 0, 0);
|
||||
prepare_to_wait(&conf->wait_for_overlap, &w,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
|
||||
if (test_bit(STRIPE_SYNCING, &sh->state)) {
|
||||
release_stripe(sh);
|
||||
schedule();
|
||||
goto again;
|
||||
}
|
||||
clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags);
|
||||
spin_lock_irq(&sh->stripe_lock);
|
||||
for (d = 0; d < conf->raid_disks; d++) {
|
||||
if (d == sh->pd_idx || d == sh->qd_idx)
|
||||
@ -4153,6 +4198,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
set_bit(STRIPE_DISCARD, &sh->state);
|
||||
finish_wait(&conf->wait_for_overlap, &w);
|
||||
for (d = 0; d < conf->raid_disks; d++) {
|
||||
if (d == sh->pd_idx || d == sh->qd_idx)
|
||||
|
@ -221,10 +221,6 @@ struct stripe_head {
|
||||
struct stripe_operations {
|
||||
int target, target2;
|
||||
enum sum_check_flags zero_sum_result;
|
||||
#ifdef CONFIG_MULTICORE_RAID456
|
||||
unsigned long request;
|
||||
wait_queue_head_t wait_for_ops;
|
||||
#endif
|
||||
} ops;
|
||||
struct r5dev {
|
||||
/* rreq and rvec are used for the replacement device when
|
||||
@ -323,6 +319,7 @@ enum {
|
||||
STRIPE_COMPUTE_RUN,
|
||||
STRIPE_OPS_REQ_PENDING,
|
||||
STRIPE_ON_UNPLUG_LIST,
|
||||
STRIPE_DISCARD,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master,
|
||||
sprintf(linkname, "slave_%s", slave->name);
|
||||
ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
|
||||
linkname);
|
||||
|
||||
/* free the master link created earlier in case of error */
|
||||
if (ret)
|
||||
sysfs_remove_link(&(slave->dev.kobj), "master");
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid capability ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
|
||||
@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid TC-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid featrue-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "DCB disabled\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
|
||||
break;
|
||||
default:
|
||||
BNX2X_ERR("Non valid featrue-ID\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
|
||||
rval = -EINVAL;
|
||||
rval = 1;
|
||||
}
|
||||
|
||||
return rval;
|
||||
|
@ -1818,27 +1818,32 @@ out:
|
||||
**/
|
||||
void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
|
||||
{
|
||||
u32 dtxswc;
|
||||
u32 reg_val, reg_offset;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case e1000_82576:
|
||||
reg_offset = E1000_DTXSWC;
|
||||
break;
|
||||
case e1000_i350:
|
||||
dtxswc = rd32(E1000_DTXSWC);
|
||||
if (enable) {
|
||||
dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
/* The PF can spoof - it has to in order to
|
||||
* support emulation mode NICs */
|
||||
dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
|
||||
} else {
|
||||
dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
}
|
||||
wr32(E1000_DTXSWC, dtxswc);
|
||||
reg_offset = E1000_TXSWC;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
return;
|
||||
}
|
||||
|
||||
reg_val = rd32(reg_offset);
|
||||
if (enable) {
|
||||
reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
/* The PF can spoof - it has to in order to
|
||||
* support emulation mode NICs
|
||||
*/
|
||||
reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
|
||||
} else {
|
||||
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
|
||||
E1000_DTXSWC_VLAN_SPOOF_MASK);
|
||||
}
|
||||
wr32(reg_offset, reg_val);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
struct i2c_board_info i350_sensor_info = {
|
||||
static struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
|
||||
};
|
||||
|
||||
|
@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
|
||||
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
|
||||
return;
|
||||
|
||||
igb_enable_sriov(pdev, max_vfs);
|
||||
pci_sriov_set_totalvfs(pdev, 7);
|
||||
igb_enable_sriov(pdev, max_vfs);
|
||||
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
}
|
||||
@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
|
||||
if (max_vfs > 7) {
|
||||
dev_warn(&pdev->dev,
|
||||
"Maximum of 7 VFs per PF, using max\n");
|
||||
adapter->vfs_allocated_count = 7;
|
||||
max_vfs = adapter->vfs_allocated_count = 7;
|
||||
} else
|
||||
adapter->vfs_allocated_count = max_vfs;
|
||||
if (adapter->vfs_allocated_count)
|
||||
|
@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
|
||||
case e1000_82576:
|
||||
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
|
||||
adapter->ptp_caps.owner = THIS_MODULE;
|
||||
adapter->ptp_caps.max_adj = 1000000000;
|
||||
adapter->ptp_caps.max_adj = 999999881;
|
||||
adapter->ptp_caps.n_ext_ts = 0;
|
||||
adapter->ptp_caps.pps = 0;
|
||||
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
|
||||
|
@ -950,9 +950,17 @@ free_queue_irqs:
|
||||
free_irq(adapter->msix_entries[vector].vector,
|
||||
adapter->q_vector[vector]);
|
||||
}
|
||||
pci_disable_msix(adapter->pdev);
|
||||
kfree(adapter->msix_entries);
|
||||
adapter->msix_entries = NULL;
|
||||
/* This failure is non-recoverable - it indicates the system is
|
||||
* out of MSIX vector resources and the VF driver cannot run
|
||||
* without them. Set the number of msix vectors to zero
|
||||
* indicating that not enough can be allocated. The error
|
||||
* will be returned to the user indicating device open failed.
|
||||
* Any further attempts to force the driver to open will also
|
||||
* fail. The only way to recover is to unload the driver and
|
||||
* reload it again. If the system has recovered some MSIX
|
||||
* vectors then it may succeed.
|
||||
*/
|
||||
adapter->num_msix_vectors = 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2575,6 +2583,15 @@ static int ixgbevf_open(struct net_device *netdev)
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int err;
|
||||
|
||||
/* A previous failure to open the device because of a lack of
|
||||
* available MSIX vector resources may have reset the number
|
||||
* of msix vectors variable to zero. The only way to recover
|
||||
* is to unload/reload the driver and hope that the system has
|
||||
* been able to recover some MSIX vector resources.
|
||||
*/
|
||||
if (!adapter->num_msix_vectors)
|
||||
return -ENOMEM;
|
||||
|
||||
/* disallow open during test */
|
||||
if (test_bit(__IXGBEVF_TESTING, &adapter->state))
|
||||
return -EBUSY;
|
||||
@ -2631,7 +2648,6 @@ static int ixgbevf_open(struct net_device *netdev)
|
||||
|
||||
err_req_irq:
|
||||
ixgbevf_down(adapter);
|
||||
ixgbevf_free_irq(adapter);
|
||||
err_setup_rx:
|
||||
ixgbevf_free_all_rx_resources(adapter);
|
||||
err_setup_tx:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user