mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 02:04:19 +08:00
Merge branch 'for-rmk' of git://gitorious.org/linux-gemini/mainline
This commit is contained in:
commit
2b4f017579
@ -199,6 +199,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||||||
acpi_display_output=video
|
acpi_display_output=video
|
||||||
See above.
|
See above.
|
||||||
|
|
||||||
|
acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods
|
||||||
|
early. Needed on some platforms to properly
|
||||||
|
initialize the EC.
|
||||||
|
|
||||||
acpi_irq_balance [HW,ACPI]
|
acpi_irq_balance [HW,ACPI]
|
||||||
ACPI will balance active IRQs
|
ACPI will balance active IRQs
|
||||||
default in APIC mode
|
default in APIC mode
|
||||||
|
@ -616,10 +616,10 @@ M: Richard Purdie <rpurdie@rpsys.net>
|
|||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
|
ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
|
||||||
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
|
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
T: git git://gitorious.org/linux-gemini/mainline.git
|
T: git git://gitorious.org/linux-gemini/mainline.git
|
||||||
S: Maintained
|
S: Odd Fixes
|
||||||
F: arch/arm/mach-gemini/
|
F: arch/arm/mach-gemini/
|
||||||
|
|
||||||
ARM/EBSA110 MACHINE SUPPORT
|
ARM/EBSA110 MACHINE SUPPORT
|
||||||
@ -641,9 +641,9 @@ T: topgit git://git.openezx.org/openezx.git
|
|||||||
F: arch/arm/mach-pxa/ezx.c
|
F: arch/arm/mach-pxa/ezx.c
|
||||||
|
|
||||||
ARM/FARADAY FA526 PORT
|
ARM/FARADAY FA526 PORT
|
||||||
M: Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
|
M: Paulius Zaleckas <paulius.zaleckas@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Odd Fixes
|
||||||
F: arch/arm/mm/*-fa*
|
F: arch/arm/mm/*-fa*
|
||||||
|
|
||||||
ARM/FOOTBRIDGE ARCHITECTURE
|
ARM/FOOTBRIDGE ARCHITECTURE
|
||||||
|
@ -86,7 +86,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
|
|||||||
unsigned int reg_both, reg_level, reg_type;
|
unsigned int reg_both, reg_level, reg_type;
|
||||||
|
|
||||||
reg_type = __raw_readl(base + GPIO_INT_TYPE);
|
reg_type = __raw_readl(base + GPIO_INT_TYPE);
|
||||||
reg_level = __raw_readl(base + GPIO_INT_BOTH_EDGE);
|
reg_level = __raw_readl(base + GPIO_INT_LEVEL);
|
||||||
reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
|
reg_both = __raw_readl(base + GPIO_INT_BOTH_EDGE);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
@ -117,7 +117,7 @@ static int gpio_set_irq_type(unsigned int irq, unsigned int type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
__raw_writel(reg_type, base + GPIO_INT_TYPE);
|
__raw_writel(reg_type, base + GPIO_INT_TYPE);
|
||||||
__raw_writel(reg_level, base + GPIO_INT_BOTH_EDGE);
|
__raw_writel(reg_level, base + GPIO_INT_LEVEL);
|
||||||
__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
|
__raw_writel(reg_both, base + GPIO_INT_BOTH_EDGE);
|
||||||
|
|
||||||
gpio_ack_irq(irq);
|
gpio_ack_irq(irq);
|
||||||
|
@ -94,6 +94,7 @@ ia64_acpi_release_global_lock (unsigned int *lock)
|
|||||||
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
|
#define acpi_noirq 0 /* ACPI always enabled on IA64 */
|
||||||
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
|
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
|
||||||
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
|
#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
|
||||||
|
#define acpi_ht 0 /* no HT-only mode on IA64 */
|
||||||
#endif
|
#endif
|
||||||
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
|
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
|
||||||
static inline void disable_acpi(void) { }
|
static inline void disable_acpi(void) { }
|
||||||
|
@ -338,7 +338,8 @@ static void __init mpc85xx_mds_pic_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
mpic = mpic_alloc(np, r.start,
|
mpic = mpic_alloc(np, r.start,
|
||||||
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
|
MPIC_PRIMARY | MPIC_WANTS_RESET | MPIC_BIG_ENDIAN |
|
||||||
|
MPIC_BROKEN_FRR_NIRQS,
|
||||||
0, 256, " OpenPIC ");
|
0, 256, " OpenPIC ");
|
||||||
BUG_ON(mpic == NULL);
|
BUG_ON(mpic == NULL);
|
||||||
of_node_put(np);
|
of_node_put(np);
|
||||||
|
@ -46,6 +46,7 @@ smp_85xx_kick_cpu(int nr)
|
|||||||
__iomem u32 *bptr_vaddr;
|
__iomem u32 *bptr_vaddr;
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
int n = 0;
|
int n = 0;
|
||||||
|
int ioremappable;
|
||||||
|
|
||||||
WARN_ON (nr < 0 || nr >= NR_CPUS);
|
WARN_ON (nr < 0 || nr >= NR_CPUS);
|
||||||
|
|
||||||
@ -59,20 +60,36 @@ smp_85xx_kick_cpu(int nr)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A secondary core could be in a spinloop in the bootpage
|
||||||
|
* (0xfffff000), somewhere in highmem, or somewhere in lowmem.
|
||||||
|
* The bootpage and highmem can be accessed via ioremap(), but
|
||||||
|
* we need to directly access the spinloop if its in lowmem.
|
||||||
|
*/
|
||||||
|
ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
|
||||||
|
|
||||||
/* Map the spin table */
|
/* Map the spin table */
|
||||||
|
if (ioremappable)
|
||||||
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
|
bptr_vaddr = ioremap(*cpu_rel_addr, SIZE_BOOT_ENTRY);
|
||||||
|
else
|
||||||
|
bptr_vaddr = phys_to_virt(*cpu_rel_addr);
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
|
out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
|
||||||
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
|
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
|
||||||
|
|
||||||
|
if (!ioremappable)
|
||||||
|
flush_dcache_range((ulong)bptr_vaddr,
|
||||||
|
(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
|
||||||
|
|
||||||
/* Wait a bit for the CPU to ack. */
|
/* Wait a bit for the CPU to ack. */
|
||||||
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
|
while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
|
||||||
mdelay(1);
|
mdelay(1);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
|
if (ioremappable)
|
||||||
iounmap(bptr_vaddr);
|
iounmap(bptr_vaddr);
|
||||||
|
|
||||||
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
|
pr_debug("waited %d msecs for CPU #%d.\n", n, nr);
|
||||||
|
@ -1342,14 +1342,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
.callback = force_acpi_ht,
|
|
||||||
.ident = "ASUS P2B-DS",
|
|
||||||
.matches = {
|
|
||||||
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
|
|
||||||
DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
.callback = force_acpi_ht,
|
.callback = force_acpi_ht,
|
||||||
.ident = "ASUS CUR-DLS",
|
.ident = "ASUS CUR-DLS",
|
||||||
|
@ -935,6 +935,7 @@ static int dock_add(acpi_handle handle)
|
|||||||
struct platform_device *dd;
|
struct platform_device *dd;
|
||||||
|
|
||||||
id = dock_station_count;
|
id = dock_station_count;
|
||||||
|
memset(&ds, 0, sizeof(ds));
|
||||||
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
|
dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
|
||||||
if (IS_ERR(dd))
|
if (IS_ERR(dd))
|
||||||
return PTR_ERR(dd);
|
return PTR_ERR(dd);
|
||||||
|
@ -110,6 +110,14 @@ static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
|
|||||||
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
|
DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
|
||||||
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
|
DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
|
||||||
(void *)2},
|
(void *)2},
|
||||||
|
{ set_max_cstate, "Pavilion zv5000", {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
|
||||||
|
(void *)1},
|
||||||
|
{ set_max_cstate, "Asus L8400B", {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
|
||||||
|
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
|
||||||
|
(void *)1},
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -125,6 +125,8 @@ acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int early_pdc_done;
|
||||||
|
|
||||||
void acpi_processor_set_pdc(acpi_handle handle)
|
void acpi_processor_set_pdc(acpi_handle handle)
|
||||||
{
|
{
|
||||||
struct acpi_object_list *obj_list;
|
struct acpi_object_list *obj_list;
|
||||||
@ -132,6 +134,9 @@ void acpi_processor_set_pdc(acpi_handle handle)
|
|||||||
if (arch_has_acpi_pdc() == false)
|
if (arch_has_acpi_pdc() == false)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (early_pdc_done)
|
||||||
|
return;
|
||||||
|
|
||||||
obj_list = acpi_processor_alloc_pdc();
|
obj_list = acpi_processor_alloc_pdc();
|
||||||
if (!obj_list)
|
if (!obj_list)
|
||||||
return;
|
return;
|
||||||
@ -151,6 +156,13 @@ static int set_early_pdc_optin(const struct dmi_system_id *id)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int param_early_pdc_optin(char *s)
|
||||||
|
{
|
||||||
|
early_pdc_optin = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("acpi_early_pdc_eval", param_early_pdc_optin);
|
||||||
|
|
||||||
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
|
static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = {
|
||||||
{
|
{
|
||||||
set_early_pdc_optin, "HP Envy", {
|
set_early_pdc_optin, "HP Envy", {
|
||||||
@ -192,4 +204,6 @@ void __init acpi_early_processor_set_pdc(void)
|
|||||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||||
ACPI_UINT32_MAX,
|
ACPI_UINT32_MAX,
|
||||||
early_init_pdc, NULL, NULL, NULL);
|
early_init_pdc, NULL, NULL, NULL);
|
||||||
|
|
||||||
|
early_pdc_done = 1;
|
||||||
}
|
}
|
||||||
|
@ -1336,9 +1336,25 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
|
|||||||
|
|
||||||
if (child)
|
if (child)
|
||||||
*child = device;
|
*child = device;
|
||||||
|
|
||||||
|
if (device)
|
||||||
return 0;
|
return 0;
|
||||||
|
else
|
||||||
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* acpi_bus_add and acpi_bus_start
|
||||||
|
*
|
||||||
|
* scan a given ACPI tree and (probably recently hot-plugged)
|
||||||
|
* create and add or starts found devices.
|
||||||
|
*
|
||||||
|
* If no devices were found -ENODEV is returned which does not
|
||||||
|
* mean that this is a real error, there just have been no suitable
|
||||||
|
* ACPI objects in the table trunk from which the kernel could create
|
||||||
|
* a device and add/start an appropriate driver.
|
||||||
|
*/
|
||||||
|
|
||||||
int
|
int
|
||||||
acpi_bus_add(struct acpi_device **child,
|
acpi_bus_add(struct acpi_device **child,
|
||||||
struct acpi_device *parent, acpi_handle handle, int type)
|
struct acpi_device *parent, acpi_handle handle, int type)
|
||||||
@ -1348,8 +1364,7 @@ acpi_bus_add(struct acpi_device **child,
|
|||||||
memset(&ops, 0, sizeof(ops));
|
memset(&ops, 0, sizeof(ops));
|
||||||
ops.acpi_op_add = 1;
|
ops.acpi_op_add = 1;
|
||||||
|
|
||||||
acpi_bus_scan(handle, &ops, child);
|
return acpi_bus_scan(handle, &ops, child);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(acpi_bus_add);
|
EXPORT_SYMBOL(acpi_bus_add);
|
||||||
|
|
||||||
@ -1357,11 +1372,13 @@ int acpi_bus_start(struct acpi_device *device)
|
|||||||
{
|
{
|
||||||
struct acpi_bus_ops ops;
|
struct acpi_bus_ops ops;
|
||||||
|
|
||||||
|
if (!device)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
memset(&ops, 0, sizeof(ops));
|
memset(&ops, 0, sizeof(ops));
|
||||||
ops.acpi_op_start = 1;
|
ops.acpi_op_start = 1;
|
||||||
|
|
||||||
acpi_bus_scan(device->handle, &ops, NULL);
|
return acpi_bus_scan(device->handle, &ops, NULL);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(acpi_bus_start);
|
EXPORT_SYMBOL(acpi_bus_start);
|
||||||
|
|
||||||
|
@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
|
|||||||
unsigned long table_end;
|
unsigned long table_end;
|
||||||
acpi_size tbl_size;
|
acpi_size tbl_size;
|
||||||
|
|
||||||
if (acpi_disabled)
|
if (acpi_disabled && !acpi_ht)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!handler)
|
if (!handler)
|
||||||
@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
|
|||||||
struct acpi_table_header *table = NULL;
|
struct acpi_table_header *table = NULL;
|
||||||
acpi_size tbl_size;
|
acpi_size tbl_size;
|
||||||
|
|
||||||
if (acpi_disabled)
|
if (acpi_disabled && !acpi_ht)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
if (!handler)
|
if (!handler)
|
||||||
|
@ -598,6 +598,50 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
|
|||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* EDID is delightfully ambiguous about how interlaced modes are to be
|
||||||
|
* encoded. Our internal representation is of frame height, but some
|
||||||
|
* HDTV detailed timings are encoded as field height.
|
||||||
|
*
|
||||||
|
* The format list here is from CEA, in frame size. Technically we
|
||||||
|
* should be checking refresh rate too. Whatever.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
|
||||||
|
struct detailed_pixel_timing *pt)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
static const struct {
|
||||||
|
int w, h;
|
||||||
|
} cea_interlaced[] = {
|
||||||
|
{ 1920, 1080 },
|
||||||
|
{ 720, 480 },
|
||||||
|
{ 1440, 480 },
|
||||||
|
{ 2880, 480 },
|
||||||
|
{ 720, 576 },
|
||||||
|
{ 1440, 576 },
|
||||||
|
{ 2880, 576 },
|
||||||
|
};
|
||||||
|
static const int n_sizes =
|
||||||
|
sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
|
||||||
|
|
||||||
|
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (i = 0; i < n_sizes; i++) {
|
||||||
|
if ((mode->hdisplay == cea_interlaced[i].w) &&
|
||||||
|
(mode->vdisplay == cea_interlaced[i].h / 2)) {
|
||||||
|
mode->vdisplay *= 2;
|
||||||
|
mode->vsync_start *= 2;
|
||||||
|
mode->vsync_end *= 2;
|
||||||
|
mode->vtotal *= 2;
|
||||||
|
mode->vtotal |= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_mode_detailed - create a new mode from an EDID detailed timing section
|
* drm_mode_detailed - create a new mode from an EDID detailed timing section
|
||||||
* @dev: DRM device (needed to create new mode)
|
* @dev: DRM device (needed to create new mode)
|
||||||
@ -680,8 +724,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
|
|||||||
|
|
||||||
drm_mode_set_name(mode);
|
drm_mode_set_name(mode);
|
||||||
|
|
||||||
if (pt->misc & DRM_EDID_PT_INTERLACED)
|
drm_mode_do_interlace_quirk(mode, pt);
|
||||||
mode->flags |= DRM_MODE_FLAG_INTERLACE;
|
|
||||||
|
|
||||||
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
|
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
|
||||||
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
|
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
|
||||||
|
@ -636,6 +636,13 @@ static const struct dmi_system_id bad_lid_status[] = {
|
|||||||
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
|
DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.ident = "Clevo M5x0N",
|
||||||
|
.matches = {
|
||||||
|
DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."),
|
||||||
|
DMI_MATCH(DMI_BOARD_NAME, "M5x0N"),
|
||||||
|
},
|
||||||
|
},
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -5861,13 +5861,12 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
|
|||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
struct nvbios *bios = &dev_priv->VBIOS;
|
struct nvbios *bios = &dev_priv->VBIOS;
|
||||||
struct init_exec iexec = { true, false };
|
struct init_exec iexec = { true, false };
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&bios->lock, flags);
|
mutex_lock(&bios->lock);
|
||||||
bios->display.output = dcbent;
|
bios->display.output = dcbent;
|
||||||
parse_init_table(bios, table, &iexec);
|
parse_init_table(bios, table, &iexec);
|
||||||
bios->display.output = NULL;
|
bios->display.output = NULL;
|
||||||
spin_unlock_irqrestore(&bios->lock, flags);
|
mutex_unlock(&bios->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool NVInitVBIOS(struct drm_device *dev)
|
static bool NVInitVBIOS(struct drm_device *dev)
|
||||||
@ -5876,7 +5875,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
|
|||||||
struct nvbios *bios = &dev_priv->VBIOS;
|
struct nvbios *bios = &dev_priv->VBIOS;
|
||||||
|
|
||||||
memset(bios, 0, sizeof(struct nvbios));
|
memset(bios, 0, sizeof(struct nvbios));
|
||||||
spin_lock_init(&bios->lock);
|
mutex_init(&bios->lock);
|
||||||
bios->dev = dev;
|
bios->dev = dev;
|
||||||
|
|
||||||
if (!NVShadowVBIOS(dev, bios->data))
|
if (!NVShadowVBIOS(dev, bios->data))
|
||||||
|
@ -205,7 +205,7 @@ struct nvbios {
|
|||||||
struct drm_device *dev;
|
struct drm_device *dev;
|
||||||
struct nouveau_bios_info pub;
|
struct nouveau_bios_info pub;
|
||||||
|
|
||||||
spinlock_t lock;
|
struct mutex lock;
|
||||||
|
|
||||||
uint8_t data[NV_PROM_SIZE];
|
uint8_t data[NV_PROM_SIZE];
|
||||||
unsigned int length;
|
unsigned int length;
|
||||||
|
@ -579,6 +579,8 @@ static void nv17_tv_restore(struct drm_encoder *encoder)
|
|||||||
nouveau_encoder(encoder)->restore.output);
|
nouveau_encoder(encoder)->restore.output);
|
||||||
|
|
||||||
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
|
nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
|
||||||
|
|
||||||
|
nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nv17_tv_create_resources(struct drm_encoder *encoder,
|
static int nv17_tv_create_resources(struct drm_encoder *encoder,
|
||||||
|
@ -643,7 +643,7 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
|
|||||||
uint8_t count = U8((*ptr)++);
|
uint8_t count = U8((*ptr)++);
|
||||||
SDEBUG(" count: %d\n", count);
|
SDEBUG(" count: %d\n", count);
|
||||||
if (arg == ATOM_UNIT_MICROSEC)
|
if (arg == ATOM_UNIT_MICROSEC)
|
||||||
schedule_timeout_uninterruptible(usecs_to_jiffies(count));
|
udelay(count);
|
||||||
else
|
else
|
||||||
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
|
schedule_timeout_uninterruptible(msecs_to_jiffies(count));
|
||||||
}
|
}
|
||||||
|
@ -543,9 +543,6 @@ int r600_vb_ib_get(struct radeon_device *rdev)
|
|||||||
void r600_vb_ib_put(struct radeon_device *rdev)
|
void r600_vb_ib_put(struct radeon_device *rdev)
|
||||||
{
|
{
|
||||||
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
|
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
|
||||||
list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
|
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
|
||||||
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
|
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,6 +96,7 @@ extern int radeon_audio;
|
|||||||
* symbol;
|
* symbol;
|
||||||
*/
|
*/
|
||||||
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
|
||||||
|
/* RADEON_IB_POOL_SIZE must be a power of 2 */
|
||||||
#define RADEON_IB_POOL_SIZE 16
|
#define RADEON_IB_POOL_SIZE 16
|
||||||
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
|
||||||
#define RADEONFB_CONN_LIMIT 4
|
#define RADEONFB_CONN_LIMIT 4
|
||||||
@ -363,11 +364,12 @@ void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
|
|||||||
*/
|
*/
|
||||||
struct radeon_ib {
|
struct radeon_ib {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
unsigned long idx;
|
unsigned idx;
|
||||||
uint64_t gpu_addr;
|
uint64_t gpu_addr;
|
||||||
struct radeon_fence *fence;
|
struct radeon_fence *fence;
|
||||||
uint32_t *ptr;
|
uint32_t *ptr;
|
||||||
uint32_t length_dw;
|
uint32_t length_dw;
|
||||||
|
bool free;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -377,10 +379,9 @@ struct radeon_ib {
|
|||||||
struct radeon_ib_pool {
|
struct radeon_ib_pool {
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct radeon_bo *robj;
|
struct radeon_bo *robj;
|
||||||
struct list_head scheduled_ibs;
|
|
||||||
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
|
||||||
bool ready;
|
bool ready;
|
||||||
DECLARE_BITMAP(alloc_bm, RADEON_IB_POOL_SIZE);
|
unsigned head_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct radeon_cp {
|
struct radeon_cp {
|
||||||
|
@ -86,7 +86,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||||||
&p->validated);
|
&p->validated);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return radeon_bo_list_validate(&p->validated, p->ib->fence);
|
return radeon_bo_list_validate(&p->validated);
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||||
@ -189,12 +189,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
|||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (error && parser->ib) {
|
if (!error && parser->ib) {
|
||||||
radeon_bo_list_unvalidate(&parser->validated,
|
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
|
||||||
parser->ib->fence);
|
|
||||||
} else {
|
|
||||||
radeon_bo_list_unreserve(&parser->validated);
|
|
||||||
}
|
}
|
||||||
|
radeon_bo_list_unreserve(&parser->validated);
|
||||||
for (i = 0; i < parser->nrelocs; i++) {
|
for (i = 0; i < parser->nrelocs; i++) {
|
||||||
if (parser->relocs[i].gobj) {
|
if (parser->relocs[i].gobj) {
|
||||||
mutex_lock(&parser->rdev->ddev->struct_mutex);
|
mutex_lock(&parser->rdev->ddev->struct_mutex);
|
||||||
|
@ -306,11 +306,10 @@ void radeon_bo_list_unreserve(struct list_head *head)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_bo_list_validate(struct list_head *head, void *fence)
|
int radeon_bo_list_validate(struct list_head *head)
|
||||||
{
|
{
|
||||||
struct radeon_bo_list *lobj;
|
struct radeon_bo_list *lobj;
|
||||||
struct radeon_bo *bo;
|
struct radeon_bo *bo;
|
||||||
struct radeon_fence *old_fence = NULL;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = radeon_bo_list_reserve(head);
|
r = radeon_bo_list_reserve(head);
|
||||||
@ -334,32 +333,27 @@ int radeon_bo_list_validate(struct list_head *head, void *fence)
|
|||||||
}
|
}
|
||||||
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
|
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
|
||||||
lobj->tiling_flags = bo->tiling_flags;
|
lobj->tiling_flags = bo->tiling_flags;
|
||||||
if (fence) {
|
|
||||||
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
|
|
||||||
bo->tbo.sync_obj = radeon_fence_ref(fence);
|
|
||||||
bo->tbo.sync_obj_arg = NULL;
|
|
||||||
}
|
|
||||||
if (old_fence) {
|
|
||||||
radeon_fence_unref(&old_fence);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
|
void radeon_bo_list_fence(struct list_head *head, void *fence)
|
||||||
{
|
{
|
||||||
struct radeon_bo_list *lobj;
|
struct radeon_bo_list *lobj;
|
||||||
struct radeon_fence *old_fence;
|
struct radeon_bo *bo;
|
||||||
|
struct radeon_fence *old_fence = NULL;
|
||||||
|
|
||||||
if (fence)
|
|
||||||
list_for_each_entry(lobj, head, list) {
|
list_for_each_entry(lobj, head, list) {
|
||||||
old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
|
bo = lobj->bo;
|
||||||
if (old_fence == fence) {
|
spin_lock(&bo->tbo.lock);
|
||||||
lobj->bo->tbo.sync_obj = NULL;
|
old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
|
||||||
|
bo->tbo.sync_obj = radeon_fence_ref(fence);
|
||||||
|
bo->tbo.sync_obj_arg = NULL;
|
||||||
|
spin_unlock(&bo->tbo.lock);
|
||||||
|
if (old_fence) {
|
||||||
radeon_fence_unref(&old_fence);
|
radeon_fence_unref(&old_fence);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
radeon_bo_list_unreserve(head);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
||||||
|
@ -156,8 +156,8 @@ extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
|
|||||||
struct list_head *head);
|
struct list_head *head);
|
||||||
extern int radeon_bo_list_reserve(struct list_head *head);
|
extern int radeon_bo_list_reserve(struct list_head *head);
|
||||||
extern void radeon_bo_list_unreserve(struct list_head *head);
|
extern void radeon_bo_list_unreserve(struct list_head *head);
|
||||||
extern int radeon_bo_list_validate(struct list_head *head, void *fence);
|
extern int radeon_bo_list_validate(struct list_head *head);
|
||||||
extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
|
extern void radeon_bo_list_fence(struct list_head *head, void *fence);
|
||||||
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
|
extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
|
||||||
|
@ -41,68 +41,55 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
|
|||||||
{
|
{
|
||||||
struct radeon_fence *fence;
|
struct radeon_fence *fence;
|
||||||
struct radeon_ib *nib;
|
struct radeon_ib *nib;
|
||||||
unsigned long i;
|
int r = 0, i, c;
|
||||||
int r = 0;
|
|
||||||
|
|
||||||
*ib = NULL;
|
*ib = NULL;
|
||||||
r = radeon_fence_create(rdev, &fence);
|
r = radeon_fence_create(rdev, &fence);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("failed to create fence for new IB\n");
|
dev_err(rdev->dev, "failed to create fence for new IB\n");
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
i = find_first_zero_bit(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
|
||||||
if (i < RADEON_IB_POOL_SIZE) {
|
i &= (RADEON_IB_POOL_SIZE - 1);
|
||||||
set_bit(i, rdev->ib_pool.alloc_bm);
|
if (rdev->ib_pool.ibs[i].free) {
|
||||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
nib = &rdev->ib_pool.ibs[i];
|
||||||
*ib = &rdev->ib_pool.ibs[i];
|
break;
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
|
|
||||||
/* we go do nothings here */
|
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
|
||||||
DRM_ERROR("all IB allocated none scheduled.\n");
|
|
||||||
r = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
/* get the first ib on the scheduled list */
|
if (nib == NULL) {
|
||||||
nib = list_entry(rdev->ib_pool.scheduled_ibs.next,
|
/* This should never happen, it means we allocated all
|
||||||
struct radeon_ib, list);
|
* IB and haven't scheduled one yet, return EBUSY to
|
||||||
if (nib->fence == NULL) {
|
* userspace hoping that on ioctl recall we get better
|
||||||
/* we go do nothings here */
|
* luck
|
||||||
|
*/
|
||||||
|
dev_err(rdev->dev, "no free indirect buffer !\n");
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
|
radeon_fence_unref(&fence);
|
||||||
r = -EINVAL;
|
return -EBUSY;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
|
||||||
|
nib->free = false;
|
||||||
|
if (nib->fence) {
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
|
|
||||||
r = radeon_fence_wait(nib->fence, false);
|
r = radeon_fence_wait(nib->fence, false);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
|
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
|
||||||
(unsigned long)nib->gpu_addr, nib->length_dw);
|
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
|
||||||
DRM_ERROR("radeon: GPU lockup detected, fail to get a IB\n");
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
goto out;
|
nib->free = true;
|
||||||
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
|
radeon_fence_unref(&fence);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
}
|
}
|
||||||
radeon_fence_unref(&nib->fence);
|
radeon_fence_unref(&nib->fence);
|
||||||
|
nib->fence = fence;
|
||||||
nib->length_dw = 0;
|
nib->length_dw = 0;
|
||||||
|
|
||||||
/* scheduled list is accessed here */
|
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
|
||||||
list_del(&nib->list);
|
|
||||||
INIT_LIST_HEAD(&nib->list);
|
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
|
|
||||||
*ib = nib;
|
*ib = nib;
|
||||||
out:
|
return 0;
|
||||||
if (r) {
|
|
||||||
radeon_fence_unref(&fence);
|
|
||||||
} else {
|
|
||||||
(*ib)->fence = fence;
|
|
||||||
}
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
||||||
@ -114,18 +101,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
if (!list_empty(&tmp->list) && !radeon_fence_signaled(tmp->fence)) {
|
tmp->free = true;
|
||||||
/* IB is scheduled & not signaled don't do anythings */
|
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
list_del(&tmp->list);
|
|
||||||
INIT_LIST_HEAD(&tmp->list);
|
|
||||||
if (tmp->fence)
|
|
||||||
radeon_fence_unref(&tmp->fence);
|
|
||||||
|
|
||||||
tmp->length_dw = 0;
|
|
||||||
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
|
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,7 +111,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||||||
|
|
||||||
if (!ib->length_dw || !rdev->cp.ready) {
|
if (!ib->length_dw || !rdev->cp.ready) {
|
||||||
/* TODO: Nothings in the ib we should report. */
|
/* TODO: Nothings in the ib we should report. */
|
||||||
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
|
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -148,7 +124,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
|
|||||||
radeon_ring_ib_execute(rdev, ib);
|
radeon_ring_ib_execute(rdev, ib);
|
||||||
radeon_fence_emit(rdev, ib->fence);
|
radeon_fence_emit(rdev, ib->fence);
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
|
/* once scheduled IB is considered free and protected by the fence */
|
||||||
|
ib->free = true;
|
||||||
mutex_unlock(&rdev->ib_pool.mutex);
|
mutex_unlock(&rdev->ib_pool.mutex);
|
||||||
radeon_ring_unlock_commit(rdev);
|
radeon_ring_unlock_commit(rdev);
|
||||||
return 0;
|
return 0;
|
||||||
@ -164,7 +141,6 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
|||||||
if (rdev->ib_pool.robj)
|
if (rdev->ib_pool.robj)
|
||||||
return 0;
|
return 0;
|
||||||
/* Allocate 1M object buffer */
|
/* Allocate 1M object buffer */
|
||||||
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
|
|
||||||
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
|
||||||
true, RADEON_GEM_DOMAIN_GTT,
|
true, RADEON_GEM_DOMAIN_GTT,
|
||||||
&rdev->ib_pool.robj);
|
&rdev->ib_pool.robj);
|
||||||
@ -195,9 +171,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
|
|||||||
rdev->ib_pool.ibs[i].ptr = ptr + offset;
|
rdev->ib_pool.ibs[i].ptr = ptr + offset;
|
||||||
rdev->ib_pool.ibs[i].idx = i;
|
rdev->ib_pool.ibs[i].idx = i;
|
||||||
rdev->ib_pool.ibs[i].length_dw = 0;
|
rdev->ib_pool.ibs[i].length_dw = 0;
|
||||||
INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].list);
|
rdev->ib_pool.ibs[i].free = true;
|
||||||
}
|
}
|
||||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
rdev->ib_pool.head_id = 0;
|
||||||
rdev->ib_pool.ready = true;
|
rdev->ib_pool.ready = true;
|
||||||
DRM_INFO("radeon: ib pool ready.\n");
|
DRM_INFO("radeon: ib pool ready.\n");
|
||||||
if (radeon_debugfs_ib_init(rdev)) {
|
if (radeon_debugfs_ib_init(rdev)) {
|
||||||
@ -214,7 +190,6 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&rdev->ib_pool.mutex);
|
mutex_lock(&rdev->ib_pool.mutex);
|
||||||
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
|
|
||||||
if (rdev->ib_pool.robj) {
|
if (rdev->ib_pool.robj) {
|
||||||
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
|
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
|
||||||
if (likely(r == 0)) {
|
if (likely(r == 0)) {
|
||||||
@ -363,7 +338,7 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
|
|||||||
if (ib == NULL) {
|
if (ib == NULL) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
seq_printf(m, "IB %04lu\n", ib->idx);
|
seq_printf(m, "IB %04u\n", ib->idx);
|
||||||
seq_printf(m, "IB fence %p\n", ib->fence);
|
seq_printf(m, "IB fence %p\n", ib->fence);
|
||||||
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
|
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
|
||||||
for (i = 0; i < ib->length_dw; i++) {
|
for (i = 0; i < ib->length_dw; i++) {
|
||||||
|
@ -348,22 +348,19 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
DRM_INFO("It appears like vesafb is loaded. "
|
DRM_INFO("It appears like vesafb is loaded. "
|
||||||
"Ignore above error if any. Entering stealth mode.\n");
|
"Ignore above error if any.\n");
|
||||||
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
|
ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
|
DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
|
||||||
goto out_no_device;
|
goto out_no_device;
|
||||||
}
|
}
|
||||||
vmw_kms_init(dev_priv);
|
}
|
||||||
vmw_overlay_init(dev_priv);
|
|
||||||
} else {
|
|
||||||
ret = vmw_request_device(dev_priv);
|
ret = vmw_request_device(dev_priv);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_no_device;
|
goto out_no_device;
|
||||||
vmw_kms_init(dev_priv);
|
vmw_kms_init(dev_priv);
|
||||||
vmw_overlay_init(dev_priv);
|
vmw_overlay_init(dev_priv);
|
||||||
vmw_fb_init(dev_priv);
|
vmw_fb_init(dev_priv);
|
||||||
}
|
|
||||||
|
|
||||||
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
|
||||||
register_pm_notifier(&dev_priv->pm_nb);
|
register_pm_notifier(&dev_priv->pm_nb);
|
||||||
@ -406,17 +403,15 @@ static int vmw_driver_unload(struct drm_device *dev)
|
|||||||
|
|
||||||
unregister_pm_notifier(&dev_priv->pm_nb);
|
unregister_pm_notifier(&dev_priv->pm_nb);
|
||||||
|
|
||||||
if (!dev_priv->stealth) {
|
|
||||||
vmw_fb_close(dev_priv);
|
vmw_fb_close(dev_priv);
|
||||||
vmw_kms_close(dev_priv);
|
vmw_kms_close(dev_priv);
|
||||||
vmw_overlay_close(dev_priv);
|
vmw_overlay_close(dev_priv);
|
||||||
vmw_release_device(dev_priv);
|
vmw_release_device(dev_priv);
|
||||||
pci_release_regions(dev->pdev);
|
if (dev_priv->stealth)
|
||||||
} else {
|
|
||||||
vmw_kms_close(dev_priv);
|
|
||||||
vmw_overlay_close(dev_priv);
|
|
||||||
pci_release_region(dev->pdev, 2);
|
pci_release_region(dev->pdev, 2);
|
||||||
}
|
else
|
||||||
|
pci_release_regions(dev->pdev);
|
||||||
|
|
||||||
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
|
||||||
drm_irq_uninstall(dev_priv->dev);
|
drm_irq_uninstall(dev_priv->dev);
|
||||||
if (dev->devname == vmw_devname)
|
if (dev->devname == vmw_devname)
|
||||||
@ -585,11 +580,6 @@ static int vmw_master_set(struct drm_device *dev,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
DRM_INFO("Master set.\n");
|
DRM_INFO("Master set.\n");
|
||||||
if (dev_priv->stealth) {
|
|
||||||
ret = vmw_request_device(dev_priv);
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (active) {
|
if (active) {
|
||||||
BUG_ON(active != &dev_priv->fbdev_master);
|
BUG_ON(active != &dev_priv->fbdev_master);
|
||||||
@ -649,17 +639,10 @@ static void vmw_master_drop(struct drm_device *dev,
|
|||||||
|
|
||||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||||
|
|
||||||
if (dev_priv->stealth) {
|
|
||||||
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
|
||||||
if (unlikely(ret != 0))
|
|
||||||
DRM_ERROR("Unable to clean VRAM on master drop.\n");
|
|
||||||
vmw_release_device(dev_priv);
|
|
||||||
}
|
|
||||||
dev_priv->active_master = &dev_priv->fbdev_master;
|
dev_priv->active_master = &dev_priv->fbdev_master;
|
||||||
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
|
||||||
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
|
||||||
|
|
||||||
if (!dev_priv->stealth)
|
|
||||||
vmw_fb_on(dev_priv);
|
vmw_fb_on(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,6 +559,9 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
|
|||||||
info->pixmap.scan_align = 1;
|
info->pixmap.scan_align = 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
info->aperture_base = vmw_priv->vram_start;
|
||||||
|
info->aperture_size = vmw_priv->vram_size;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Dirty & Deferred IO
|
* Dirty & Deferred IO
|
||||||
*/
|
*/
|
||||||
|
@ -1161,9 +1161,17 @@ static int i8042_pm_restore(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int i8042_pm_thaw(struct device *dev)
|
||||||
|
{
|
||||||
|
i8042_interrupt(0, NULL);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct dev_pm_ops i8042_pm_ops = {
|
static const struct dev_pm_ops i8042_pm_ops = {
|
||||||
.suspend = i8042_pm_reset,
|
.suspend = i8042_pm_reset,
|
||||||
.resume = i8042_pm_restore,
|
.resume = i8042_pm_restore,
|
||||||
|
.thaw = i8042_pm_thaw,
|
||||||
.poweroff = i8042_pm_reset,
|
.poweroff = i8042_pm_reset,
|
||||||
.restore = i8042_pm_restore,
|
.restore = i8042_pm_restore,
|
||||||
};
|
};
|
||||||
|
@ -720,12 +720,6 @@ static int acpiphp_bus_add(struct acpiphp_func *func)
|
|||||||
-ret_val);
|
-ret_val);
|
||||||
goto acpiphp_bus_add_out;
|
goto acpiphp_bus_add_out;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* try to start anyway. We could have failed to add
|
|
||||||
* simply because this bus had previously been added
|
|
||||||
* on another add. Don't bother with the return value
|
|
||||||
* we just keep going.
|
|
||||||
*/
|
|
||||||
ret_val = acpi_bus_start(device);
|
ret_val = acpi_bus_start(device);
|
||||||
|
|
||||||
acpiphp_bus_add_out:
|
acpiphp_bus_add_out:
|
||||||
|
@ -5771,7 +5771,7 @@ static void thermal_exit(void)
|
|||||||
case TPACPI_THERMAL_ACPI_TMP07:
|
case TPACPI_THERMAL_ACPI_TMP07:
|
||||||
case TPACPI_THERMAL_ACPI_UPDT:
|
case TPACPI_THERMAL_ACPI_UPDT:
|
||||||
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
|
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||||
&thermal_temp_input16_group);
|
&thermal_temp_input8_group);
|
||||||
break;
|
break;
|
||||||
case TPACPI_THERMAL_NONE:
|
case TPACPI_THERMAL_NONE:
|
||||||
default:
|
default:
|
||||||
|
@ -376,6 +376,7 @@ struct input_absinfo {
|
|||||||
#define KEY_DISPLAY_OFF 245 /* display device to off state */
|
#define KEY_DISPLAY_OFF 245 /* display device to off state */
|
||||||
|
|
||||||
#define KEY_WIMAX 246
|
#define KEY_WIMAX 246
|
||||||
|
#define KEY_RFKILL 247 /* Key that controls all radios */
|
||||||
|
|
||||||
/* Range 248 - 255 is reserved for special needs of AT keyboard driver */
|
/* Range 248 - 255 is reserved for special needs of AT keyboard driver */
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user