mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
Kevin Hilman <khilman@ti.com>:
Updates for omap_device layer for v3.7. Allows omap_device layer to keep track of driver bound status in order to make more intelligent decisions about idling unused devices. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iQIcBAABAgAGBQJQUV9+AAoJEBvUPslcq6VzowIQAKB/qmgPDjNkHk7YsVRaIKJa Jh4owrr1i141Tdz0X5tyXZLXob356fXEphi20mBDnxBhx0RA+BIhVvUa4DM2AGZx qKENVws4my2uRWnsrIPY2GmrBY61wEYaSeqp6KeVQ1NJENKe9e1mcD/WakNZdhQt Nk16ObPjtcSXhL+3cWprp7qaIRfFXgWg0KDAC26RTd4S1YvGhnT442ZlPXKf6klG yTkfljNgGiwKeFHF8w1KSmFmuhTsEu8R7e4xUt9nwnqWj3RHjioZV70LyhUYMJis ttQT5+OywtLQu7hCDKMDtgDAkH7zPtww00PiMKZSATEUOX3cKVG4wFyAYrKz3HBe Hx1qj7g0vNqiTty7rGC1rJrIOVVCjwx2iR4JVgmZ1Q3iLIuL53COu+o20/IHcS+l E7PbXw0BF9dghcL12HtCx26TrW26iewA14kEo0KKie7aWor/R07G+Bh/ittbnfu1 PlR+VVJflPr8fJMSaW6+Pt8Od2xNQ1bh37V4MVY9KKBxkAfFS4uPowg44RiHz2nP wmC764DAZH+Qd7+jpz+hH3HLYiIjWqdhKof2BDejVnLUSymaYUJX/7cU9PoNn2h3 4nx76ecW1CoRR/rl9FaXhP1BcO9/AdOFQjRE+9UhoqBlHKjwIrFvRef8ZnQc7o2Q 8dj9vcRnI52jQQ4IHtUq =oGJ/ -----END PGP SIGNATURE----- Merge tag 'devel-omap-device-for-v3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/soc Kevin Hilman <khilman@ti.com>: Updates for omap_device layer for v3.7. Allows omap_device layer to keep track of driver bound status in order to make more intelligent decisions about idling unused devices. * tag 'devel-omap-device-for-v3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: ARM: OMAP: omap_device: idle devices with no driver bound ARM: OMAP: omap_device: don't attempt late suspend if no driver bound ARM: OMAP: omap_device: keep track of driver bound status + sync to 3.6-rc5
This commit is contained in:
commit
d1226e8f98
@ -210,3 +210,15 @@ Users:
|
||||
firmware assigned instance number of the PCI
|
||||
device that can help in understanding the firmware
|
||||
intended order of the PCI device.
|
||||
|
||||
What: /sys/bus/pci/devices/.../d3cold_allowed
|
||||
Date: July 2012
|
||||
Contact: Huang Ying <ying.huang@intel.com>
|
||||
Description:
|
||||
d3cold_allowed is bit to control whether the corresponding PCI
|
||||
device can be put into D3Cold state. If it is cleared, the
|
||||
device will never be put into D3Cold state. If it is set, the
|
||||
device may be put into D3Cold state if other requirements are
|
||||
satisfied too. Reading this attribute will show the current
|
||||
value of d3cold_allowed bit. Writing this attribute will set
|
||||
the value of d3cold_allowed bit.
|
||||
|
@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more
|
||||
----------------------------
|
||||
|
||||
What: at91-mci driver ("CONFIG_MMC_AT91")
|
||||
When: 3.7
|
||||
When: 3.8
|
||||
Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
|
||||
was added to atmel-mci as a first step to support more chips.
|
||||
Then at91-mci was kept only for old IP versions (on at91rm9200 and
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -6,7 +6,7 @@ config ARM
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_DMA_CONTIGUOUS if MMU
|
||||
select HAVE_MEMBLOCK
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
|
@ -15,7 +15,7 @@
|
||||
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
|
||||
|
||||
chosen {
|
||||
bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
|
||||
bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
|
||||
};
|
||||
|
||||
ahb {
|
||||
|
@ -33,7 +33,7 @@ CONFIG_AEABI=y
|
||||
CONFIG_FORCE_MAX_ZONEORDER=13
|
||||
CONFIG_ZBOOT_ROM_TEXT=0x0
|
||||
CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
|
||||
CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
|
||||
CONFIG_CMDLINE_FORCE=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_VFP=y
|
||||
|
@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* This can be called during early boot to increase the size of the atomic
|
||||
* coherent DMA pool above the default value of 256KiB. It must be called
|
||||
* before postcore_initcall.
|
||||
*/
|
||||
extern void __init init_dma_coherent_pool_size(unsigned long size);
|
||||
|
||||
/*
|
||||
* This can be called during boot to increase the size of the consistent
|
||||
* DMA region above it's default value of 2MB. It must be called before the
|
||||
|
@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
|
||||
at91_st_read(AT91_ST_SR);
|
||||
|
||||
/* Make IRQs happen for the system timer */
|
||||
setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
|
||||
setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
|
||||
|
||||
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
|
||||
* directly for the clocksource and all clockevents, after adjusting
|
||||
|
@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9260_rtt_device.num_resources = 2;
|
||||
at91sam9260_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9261_rtt_device.num_resources = 2;
|
||||
at91sam9261_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed only for the chosen RTT:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9263_rtt0_device.num_resources = 2;
|
||||
at91sam9263_rtt0_device.num_resources = 3;
|
||||
at91sam9263_rtt1_device.num_resources = 1;
|
||||
pdev = &at91sam9263_rtt0_device;
|
||||
r = rtt0_resources;
|
||||
break;
|
||||
case 1:
|
||||
at91sam9263_rtt0_device.num_resources = 1;
|
||||
at91sam9263_rtt1_device.num_resources = 2;
|
||||
at91sam9263_rtt1_device.num_resources = 3;
|
||||
pdev = &at91sam9263_rtt1_device;
|
||||
r = rtt1_resources;
|
||||
break;
|
||||
@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
pdev->name = "rtc-at91sam9";
|
||||
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
r[1].end = r[1].start + 3;
|
||||
r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9g45_rtt_device.num_resources = 2;
|
||||
at91sam9g45_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9rl_rtt_device.num_resources = 2;
|
||||
at91sam9rl_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
|
||||
|
||||
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
|
||||
|
||||
#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
|
||||
|| cpu_is_at91sam9263() \
|
||||
|| cpu_is_at91sam9rl())
|
||||
|
||||
#define cpu_has_210M_plla() (cpu_is_at91sam9260())
|
||||
|
||||
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|
||||
|| cpu_is_at91sam9g45() \
|
||||
|| cpu_is_at91sam9x5() \
|
||||
@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
|
||||
} else if (cpu_has_800M_plla()) {
|
||||
if (plla.rate_hz > 800000000)
|
||||
pll_overclock = true;
|
||||
} else if (cpu_has_240M_plla()) {
|
||||
if (plla.rate_hz > 240000000)
|
||||
pll_overclock = true;
|
||||
} else if (cpu_has_210M_plla()) {
|
||||
if (plla.rate_hz > 210000000)
|
||||
pll_overclock = true;
|
||||
} else {
|
||||
if (plla.rate_hz > 209000000)
|
||||
pll_overclock = true;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
|
||||
|
@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
|
||||
void __init kirkwood_init_early(void)
|
||||
{
|
||||
orion_time_set_base(TIMER_VIRT_BASE);
|
||||
|
||||
/*
|
||||
* Some Kirkwood devices allocate their coherent buffers from atomic
|
||||
* context. Increase size of atomic coherent pool to make sure such
|
||||
* the allocations won't fail.
|
||||
*/
|
||||
init_dma_coherent_pool_size(SZ_1M);
|
||||
}
|
||||
|
||||
int kirkwood_tclk;
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/ata_platform.h>
|
||||
|
@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
|
||||
};
|
||||
|
||||
/* GPIO KEY */
|
||||
#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
|
||||
#define GPIO_KEY(c, g, d, ...) \
|
||||
{ .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
|
||||
|
||||
static struct gpio_keys_button gpio_buttons[] = {
|
||||
GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
|
||||
GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
|
||||
GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
|
||||
GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
|
||||
GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
|
||||
GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
|
||||
GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
|
||||
GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
|
||||
};
|
||||
|
||||
static struct gpio_keys_platform_data gpio_key_info = {
|
||||
@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
|
||||
&camera_device,
|
||||
&ceu0_device,
|
||||
&fsi_device,
|
||||
&fsi_hdmi_device,
|
||||
&fsi_wm8978_device,
|
||||
&fsi_hdmi_device,
|
||||
};
|
||||
|
||||
static void __init eva_clock_init(void)
|
||||
|
@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
|
||||
* - J30 "open"
|
||||
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
|
||||
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
|
||||
* - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
|
||||
*/
|
||||
#define IRQ8 evt2irq(0x0300)
|
||||
#define USB_PHY_MODE (1 << 4)
|
||||
@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
|
||||
&nor_flash_device,
|
||||
&smc911x_device,
|
||||
&lcdc_device,
|
||||
&usbhs1_device,
|
||||
&usbhs0_device,
|
||||
&usbhs1_device,
|
||||
&leds_device,
|
||||
&fsi_device,
|
||||
&fsi_ak4643_device,
|
||||
|
@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
|
||||
|
||||
static struct platform_device eth_device = {
|
||||
.name = "smsc911x",
|
||||
.id = 0,
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &smsc911x_platdata,
|
||||
},
|
||||
|
@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
|
||||
return 0; /* always allow wakeup */
|
||||
}
|
||||
|
||||
#define RELOC_BASE 0x1000
|
||||
#define RELOC_BASE 0x1200
|
||||
|
||||
/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
|
||||
/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
|
||||
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
|
||||
|
||||
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
|
||||
|
@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
|
||||
vunmap(cpu_addr);
|
||||
}
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
|
||||
struct dma_pool {
|
||||
size_t size;
|
||||
spinlock_t lock;
|
||||
unsigned long *bitmap;
|
||||
unsigned long nr_pages;
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
static struct dma_pool atomic_pool = {
|
||||
.size = SZ_256K,
|
||||
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
|
||||
};
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
void __init init_dma_coherent_pool_size(unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Catch any attempt to set the pool size too late.
|
||||
*/
|
||||
BUG_ON(atomic_pool.vaddr);
|
||||
|
||||
/*
|
||||
* Set architecture specific coherent pool size only if
|
||||
* it has not been changed by kernel command line parameter.
|
||||
*/
|
||||
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
|
||||
atomic_pool.size = size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the coherent pool for atomic allocations.
|
||||
*/
|
||||
@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
|
||||
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||
unsigned long *bitmap;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
void *ptr;
|
||||
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||
|
||||
@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
|
||||
if (!bitmap)
|
||||
goto no_bitmap;
|
||||
|
||||
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto no_pages;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CMA))
|
||||
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
||||
else
|
||||
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
||||
&page, NULL);
|
||||
if (ptr) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = page + i;
|
||||
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->vaddr = ptr;
|
||||
pool->page = page;
|
||||
pool->pages = pages;
|
||||
pool->bitmap = bitmap;
|
||||
pool->nr_pages = nr_pages;
|
||||
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
return 0;
|
||||
}
|
||||
no_pages:
|
||||
kfree(bitmap);
|
||||
no_bitmap:
|
||||
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
||||
@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||
if (pageno < pool->nr_pages) {
|
||||
bitmap_set(pool->bitmap, pageno, count);
|
||||
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||
*ret_page = pool->page + pageno;
|
||||
*ret_page = pool->pages[pageno];
|
||||
} else {
|
||||
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
|
||||
"Please increase it with coherent_pool= kernel parameter!\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool __in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
void *end = start + size;
|
||||
void *pool_start = pool->vaddr;
|
||||
void *pool_end = pool->vaddr + pool->size;
|
||||
|
||||
if (start < pool_start || start > pool_end)
|
||||
return false;
|
||||
|
||||
if (end <= pool_end)
|
||||
return true;
|
||||
|
||||
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
|
||||
start, end - 1, pool_start, pool_end - 1);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __free_from_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
unsigned long pageno, count;
|
||||
unsigned long flags;
|
||||
|
||||
if (start < pool->vaddr || start > pool->vaddr + pool->size)
|
||||
if (!__in_atomic_pool(start, size))
|
||||
return 0;
|
||||
|
||||
if (start + size > pool->vaddr + pool->size) {
|
||||
WARN(1, "freeing wrong coherent size from pool\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
||||
count = size >> PAGE_SHIFT;
|
||||
|
||||
@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page **__atomic_get_pages(void *addr)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
struct page **pages = pool->pages;
|
||||
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
|
||||
|
||||
return pages + offs;
|
||||
}
|
||||
|
||||
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
|
||||
return __atomic_get_pages(cpu_addr);
|
||||
|
||||
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||
return cpu_addr;
|
||||
|
||||
@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
goto err_mapping;
|
||||
|
||||
return addr;
|
||||
|
||||
err_mapping:
|
||||
__free_from_pool(addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __iommu_free_atomic(struct device *dev, struct page **pages,
|
||||
dma_addr_t handle, size_t size)
|
||||
{
|
||||
__iommu_remove_mapping(dev, handle, size);
|
||||
__free_from_pool(page_address(pages[0]), size);
|
||||
}
|
||||
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
*handle = DMA_ERROR_CODE;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (gfp & GFP_ATOMIC)
|
||||
return __iommu_alloc_atomic(dev, size, handle);
|
||||
|
||||
pages = __iommu_alloc_buffer(dev, size, gfp);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_free_atomic(dev, pages, handle, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
||||
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||
vunmap(cpu_addr);
|
||||
|
@ -60,6 +60,7 @@ extern struct dev_pm_domain omap_device_pm_domain;
|
||||
* @_dev_wakeup_lat_limit: dev wakeup latency limit in nsec - set by OMAP PM
|
||||
* @_state: one of OMAP_DEVICE_STATE_* (see above)
|
||||
* @flags: device flags
|
||||
* @_driver_status: one of BUS_NOTIFY_*_DRIVER from <linux/device.h>
|
||||
*
|
||||
* Integrates omap_hwmod data into Linux platform_device.
|
||||
*
|
||||
@ -73,6 +74,7 @@ struct omap_device {
|
||||
struct omap_device_pm_latency *pm_lats;
|
||||
u32 dev_wakeup_lat;
|
||||
u32 _dev_wakeup_lat_limit;
|
||||
unsigned long _driver_status;
|
||||
u8 pm_lats_cnt;
|
||||
s8 pm_lat_level;
|
||||
u8 hwmods_cnt;
|
||||
|
@ -385,17 +385,21 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
|
||||
unsigned long event, void *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct omap_device *od;
|
||||
|
||||
switch (event) {
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
if (pdev->dev.of_node)
|
||||
omap_device_build_from_dt(pdev);
|
||||
break;
|
||||
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
if (pdev->archdata.od)
|
||||
omap_device_delete(pdev->archdata.od);
|
||||
break;
|
||||
case BUS_NOTIFY_ADD_DEVICE:
|
||||
if (pdev->dev.of_node)
|
||||
omap_device_build_from_dt(pdev);
|
||||
/* fall through */
|
||||
default:
|
||||
od = to_omap_device(pdev);
|
||||
if (od)
|
||||
od->_driver_status = event;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
@ -752,6 +756,10 @@ static int _od_suspend_noirq(struct device *dev)
|
||||
struct omap_device *od = to_omap_device(pdev);
|
||||
int ret;
|
||||
|
||||
/* Don't attempt late suspend on a driver that is not bound */
|
||||
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER)
|
||||
return 0;
|
||||
|
||||
ret = pm_generic_suspend_noirq(dev);
|
||||
|
||||
if (!ret && !pm_runtime_status_suspended(dev)) {
|
||||
@ -1125,3 +1133,41 @@ static int __init omap_device_init(void)
|
||||
return 0;
|
||||
}
|
||||
core_initcall(omap_device_init);
|
||||
|
||||
/**
|
||||
* omap_device_late_idle - idle devices without drivers
|
||||
* @dev: struct device * associated with omap_device
|
||||
* @data: unused
|
||||
*
|
||||
* Check the driver bound status of this device, and idle it
|
||||
* if there is no driver attached.
|
||||
*/
|
||||
static int __init omap_device_late_idle(struct device *dev, void *data)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct omap_device *od = to_omap_device(pdev);
|
||||
|
||||
if (!od)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If omap_device state is enabled, but has no driver bound,
|
||||
* idle it.
|
||||
*/
|
||||
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
|
||||
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
|
||||
dev_warn(dev, "%s: enabled but no driver. Idling\n",
|
||||
__func__);
|
||||
omap_device_idle(pdev);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init omap_device_late_init(void)
|
||||
{
|
||||
bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(omap_device_late_init);
|
||||
|
@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
|
||||
|
||||
extern int powersave_nap; /* set if nap mode can be used in idle loop */
|
||||
extern void power7_nap(void);
|
||||
|
||||
#ifdef CONFIG_PSERIES_IDLE
|
||||
extern void update_smt_snooze_delay(int snooze);
|
||||
|
@ -76,6 +76,7 @@ int main(void)
|
||||
DEFINE(SIGSEGV, SIGSEGV);
|
||||
DEFINE(NMI_MASK, NMI_MASK);
|
||||
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
|
||||
DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
|
||||
#else
|
||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
|
||||
|
||||
void doorbell_cause_ipi(int cpu, unsigned long data)
|
||||
{
|
||||
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
||||
mb();
|
||||
ppc_msgsnd(PPC_DBELL, 0, data);
|
||||
}
|
||||
|
||||
|
@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
|
||||
li r3,0
|
||||
b syscall_exit
|
||||
|
||||
.section ".toc","aw"
|
||||
DSCR_DEFAULT:
|
||||
.tc dscr_default[TC],dscr_default
|
||||
|
||||
.section ".text"
|
||||
|
||||
/*
|
||||
* This routine switches between two different tasks. The process
|
||||
* state of one is saved on its kernel stack. Then the state
|
||||
@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
mr r1,r8 /* start using new stack pointer */
|
||||
std r7,PACAKSAVE(r13)
|
||||
|
||||
ld r6,_CCR(r1)
|
||||
mtcrf 0xFF,r6
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
ld r0,THREAD_VRSAVE(r4)
|
||||
@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_PPC64
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r6,THREAD_DSCR_INHERIT(r4)
|
||||
ld r7,DSCR_DEFAULT@toc(2)
|
||||
ld r0,THREAD_DSCR(r4)
|
||||
cmpd r0,r25
|
||||
beq 1f
|
||||
cmpwi r6,0
|
||||
bne 1f
|
||||
ld r0,0(r7)
|
||||
1: cmpd r0,r25
|
||||
beq 2f
|
||||
mtspr SPRN_DSCR,r0
|
||||
1:
|
||||
2:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
|
||||
#endif
|
||||
|
||||
ld r6,_CCR(r1)
|
||||
mtcrf 0xFF,r6
|
||||
|
||||
/* r3-r13 are destroyed -- Cort */
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
|
@ -186,7 +186,7 @@ hardware_interrupt_hv:
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
|
||||
|
||||
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
|
||||
MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
|
||||
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
|
||||
@ -486,6 +486,7 @@ machine_check_common:
|
||||
|
||||
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
|
||||
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
|
||||
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
|
||||
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||
|
@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
|
||||
lwz r4,ADDROFF(powersave_nap)(r3)
|
||||
cmpwi 0,r4,0
|
||||
beqlr
|
||||
/* fall through */
|
||||
|
||||
_GLOBAL(power7_nap)
|
||||
/* NAP is a state loss, we create a regs frame on the
|
||||
* stack, fill it up with the state we care about and
|
||||
* stick a pointer to it in PACAR1. We really only
|
||||
|
@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
#ifdef CONFIG_PPC64
|
||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
if (current->thread.dscr_inherit) {
|
||||
p->thread.dscr_inherit = 1;
|
||||
p->thread.dscr = current->thread.dscr;
|
||||
} else if (0 != dscr_default) {
|
||||
p->thread.dscr_inherit = 1;
|
||||
p->thread.dscr = dscr_default;
|
||||
} else {
|
||||
p->thread.dscr_inherit = 0;
|
||||
p->thread.dscr = 0;
|
||||
}
|
||||
p->thread.dscr_inherit = current->thread.dscr_inherit;
|
||||
p->thread.dscr = current->thread.dscr;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
||||
char *message = (char *)&info->messages;
|
||||
|
||||
/*
|
||||
* Order previous accesses before accesses in the IPI handler.
|
||||
*/
|
||||
smp_mb();
|
||||
message[msg] = 1;
|
||||
mb();
|
||||
/*
|
||||
* cause_ipi functions are required to include a full barrier
|
||||
* before doing whatever causes the IPI.
|
||||
*/
|
||||
smp_ops->cause_ipi(cpu, info->data);
|
||||
}
|
||||
|
||||
@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
|
||||
mb(); /* order any irq clear */
|
||||
|
||||
do {
|
||||
all = xchg_local(&info->messages, 0);
|
||||
all = xchg(&info->messages, 0);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|
||||
|
@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
|
||||
return sprintf(buf, "%lx\n", dscr_default);
|
||||
}
|
||||
|
||||
static void update_dscr(void *dummy)
|
||||
{
|
||||
if (!current->thread.dscr_inherit) {
|
||||
current->thread.dscr = dscr_default;
|
||||
mtspr(SPRN_DSCR, dscr_default);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t __used store_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
|
||||
return -EINVAL;
|
||||
dscr_default = val;
|
||||
|
||||
on_each_cpu(update_dscr, NULL, 1);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
|
||||
trace_timer_interrupt_exit(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
||||
* left pending on exit from a KVM guest. We don't need to do anything
|
||||
* to clear them, as they are edge-triggered.
|
||||
*/
|
||||
void hdec_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void generic_suspend_disable_irqs(void)
|
||||
{
|
||||
|
@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
|
||||
cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
PPC_WARN_EMULATED(mtdscr, regs);
|
||||
rd = (instword >> 21) & 0x1f;
|
||||
mtspr(SPRN_DSCR, regs->gpr[rd]);
|
||||
current->thread.dscr = regs->gpr[rd];
|
||||
current->thread.dscr_inherit = 1;
|
||||
mtspr(SPRN_DSCR, current->thread.dscr);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __put_user(instr, addr);
|
||||
__put_user_size(instr, addr, 4, err);
|
||||
if (err)
|
||||
return err;
|
||||
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
|
||||
|
@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
|
||||
|
||||
/*
|
||||
* Update the node maps and sysfs entries for each cpu whose home node
|
||||
* has changed.
|
||||
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
|
||||
*/
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
int cpu, nid, old_nid;
|
||||
int cpu, nid, old_nid, changed = 0;
|
||||
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||
struct device *dev;
|
||||
|
||||
@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
|
||||
dev = get_cpu_device(cpu);
|
||||
if (dev)
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
changed = 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return changed;
|
||||
}
|
||||
|
||||
static void topology_work_fn(struct work_struct *work)
|
||||
|
@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* If powersave_nap is enabled, use NAP mode, else just
|
||||
* spin aimlessly
|
||||
*/
|
||||
if (!powersave_nap) {
|
||||
generic_mach_cpu_die();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Standard hot unplug procedure */
|
||||
local_irq_disable();
|
||||
idle_task_exit();
|
||||
@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
*/
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||
while (!generic_check_cpu_restart(cpu)) {
|
||||
power7_idle();
|
||||
power7_nap();
|
||||
if (!generic_check_cpu_restart(cpu)) {
|
||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||
/* We may be getting an IPI, so we re-enable
|
||||
|
@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
|
||||
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
|
||||
{
|
||||
int hw_cpu = get_hard_smp_processor_id(n_cpu);
|
||||
long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
||||
long rc;
|
||||
|
||||
/* Make sure all previous accesses are ordered before IPI sending */
|
||||
mb();
|
||||
rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
||||
if (rc != H_SUCCESS) {
|
||||
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
|
||||
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
|
||||
|
@ -114,7 +114,7 @@ static void deliver_alarm(void)
|
||||
skew += this_tick - last_tick;
|
||||
|
||||
while (skew >= one_tick) {
|
||||
alarm_handler(SIGVTALRM, NULL);
|
||||
alarm_handler(SIGVTALRM, NULL, NULL);
|
||||
skew -= one_tick;
|
||||
}
|
||||
|
||||
|
@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
|
||||
cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
|
||||
|
||||
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
|
||||
if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
|
||||
if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) {
|
||||
args->op.cmd = MMUEXT_INVLPG_MULTI;
|
||||
args->op.arg1.linear_addr = start;
|
||||
}
|
||||
|
@ -599,7 +599,7 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
|
||||
if (p2m_index(set_pfn))
|
||||
return false;
|
||||
|
||||
for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
|
||||
for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
|
||||
topidx = p2m_top_index(pfn);
|
||||
|
||||
if (!p2m_top[topidx])
|
||||
|
@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
|
||||
return -EINVAL;
|
||||
|
||||
/* Sanitise input arguments */
|
||||
alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
|
||||
alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
|
||||
base = ALIGN(base, alignment);
|
||||
size = ALIGN(size, alignment);
|
||||
limit &= ~(alignment - 1);
|
||||
|
@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ
|
||||
|
||||
config GPIO_MC9S08DZ60
|
||||
bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
|
||||
depends on I2C && MACH_MX35_3DS
|
||||
depends on I2C=y && MACH_MX35_3DS
|
||||
help
|
||||
Select this to enable the MC9S08DZ60 GPIO driver
|
||||
|
||||
|
@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
|
||||
|
||||
p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
|
||||
pdata->number_of_pins, numa_node_id());
|
||||
if (IS_ERR_VALUE(p->irq_base)) {
|
||||
if (p->irq_base < 0) {
|
||||
dev_err(&pdev->dev, "cannot get irq_desc\n");
|
||||
return -ENXIO;
|
||||
return p->irq_base;
|
||||
}
|
||||
pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
|
||||
pdata->gpio_base, pdata->number_of_pins, p->irq_base);
|
||||
|
@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
|
||||
rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
|
||||
|
||||
rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
|
||||
rdc321x_gpio_dev->chip.owner = THIS_MODULE;
|
||||
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
|
||||
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
|
||||
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
|
||||
|
@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
|
||||
gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
|
||||
|
||||
of_node_put(gg_data.gpiospec.np);
|
||||
pr_debug("%s exited with status %d\n", __func__, ret);
|
||||
pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
|
||||
return gg_data.out_gpio;
|
||||
}
|
||||
EXPORT_SYMBOL(of_get_named_gpio_flags);
|
||||
|
@ -996,7 +996,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
|
||||
struct hid_driver *hdrv = hid->driver;
|
||||
int ret;
|
||||
|
||||
hid_dump_input(hid, usage, value);
|
||||
if (!list_empty(&hid->debug_list))
|
||||
hid_dump_input(hid, usage, value);
|
||||
|
||||
if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
|
||||
ret = hdrv->event(hid, field, usage, value);
|
||||
@ -1558,7 +1559,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
|
||||
#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD)
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
|
||||
#endif
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
|
||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
|
||||
|
@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
|
||||
struct dj_report *dj_report;
|
||||
int retval;
|
||||
|
||||
dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
|
||||
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
|
||||
if (!dj_report)
|
||||
return -ENOMEM;
|
||||
dj_report->report_id = REPORT_ID_DJ_SHORT;
|
||||
@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
|
||||
struct dj_report *dj_report;
|
||||
int retval;
|
||||
|
||||
dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
|
||||
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
|
||||
if (!dj_report)
|
||||
return -ENOMEM;
|
||||
dj_report->report_id = REPORT_ID_DJ_SHORT;
|
||||
|
@ -70,6 +70,7 @@ static const struct hid_blacklist {
|
||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
|
||||
|
@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad)
|
||||
/* Inhibit KDI and KRI interrupts. */
|
||||
reg_val = readw(keypad->mmio_base + KPSR);
|
||||
reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE);
|
||||
reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD;
|
||||
writew(reg_val, keypad->mmio_base + KPSR);
|
||||
|
||||
/* Colums as open drain and disable all rows */
|
||||
@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev)
|
||||
input_set_drvdata(input_dev, keypad);
|
||||
|
||||
/* Ensure that the keypad will stay dormant until opened */
|
||||
clk_enable(keypad->clk);
|
||||
imx_keypad_inhibit(keypad);
|
||||
clk_disable(keypad->clk);
|
||||
|
||||
error = request_irq(irq, imx_keypad_irq_handler, 0,
|
||||
pdev->name, keypad);
|
||||
|
@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Gigabyte T1005 - defines wrong chassis type ("Other") */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "T1005"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Gigabyte T1005M/P - defines wrong chassis type ("Other") */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
|
@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A =
|
||||
{ "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047,
|
||||
63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||
static const struct wacom_features wacom_features_0xF4 =
|
||||
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
||||
{ "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
||||
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||
static const struct wacom_features wacom_features_0xF8 =
|
||||
{ "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
|
||||
63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
|
||||
static const struct wacom_features wacom_features_0x3F =
|
||||
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
|
||||
@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = {
|
||||
{ USB_DEVICE_WACOM(0xEF) },
|
||||
{ USB_DEVICE_WACOM(0x47) },
|
||||
{ USB_DEVICE_WACOM(0xF4) },
|
||||
{ USB_DEVICE_WACOM(0xF8) },
|
||||
{ USB_DEVICE_WACOM(0xFA) },
|
||||
{ USB_DEVICE_LENOVO(0x6004) },
|
||||
{ }
|
||||
|
@ -602,6 +602,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata)
|
||||
{
|
||||
if (tsdata->debug_dir)
|
||||
debugfs_remove_recursive(tsdata->debug_dir);
|
||||
kfree(tsdata->raw_buffer);
|
||||
}
|
||||
|
||||
#else
|
||||
@ -843,7 +844,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client)
|
||||
if (gpio_is_valid(pdata->reset_pin))
|
||||
gpio_free(pdata->reset_pin);
|
||||
|
||||
kfree(tsdata->raw_buffer);
|
||||
kfree(tsdata);
|
||||
|
||||
return 0;
|
||||
|
@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||
/* complete ongoing async transfer before issuing discard */
|
||||
if (card->host->areq)
|
||||
mmc_blk_issue_rw_rq(mq, NULL);
|
||||
if (req->cmd_flags & REQ_SECURE)
|
||||
if (req->cmd_flags & REQ_SECURE &&
|
||||
!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
|
||||
ret = mmc_blk_issue_secdiscard_rq(mq, req);
|
||||
else
|
||||
ret = mmc_blk_issue_discard_rq(mq, req);
|
||||
@ -1716,6 +1717,7 @@ force_ro_fail:
|
||||
#define CID_MANFID_SANDISK 0x2
|
||||
#define CID_MANFID_TOSHIBA 0x11
|
||||
#define CID_MANFID_MICRON 0x13
|
||||
#define CID_MANFID_SAMSUNG 0x15
|
||||
|
||||
static const struct mmc_fixup blk_fixups[] =
|
||||
{
|
||||
@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] =
|
||||
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
|
||||
MMC_QUIRK_LONG_READ_TIME),
|
||||
|
||||
/*
|
||||
* On these Samsung MoviNAND parts, performing secure erase or
|
||||
* secure trim can result in unrecoverable corruption due to a
|
||||
* firmware bug.
|
||||
*/
|
||||
MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
|
||||
MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
|
||||
|
||||
END_FIXUP
|
||||
};
|
||||
|
||||
|
@ -81,6 +81,7 @@ struct atmel_mci_caps {
|
||||
bool has_bad_data_ordering;
|
||||
bool need_reset_after_xfer;
|
||||
bool need_blksz_mul_4;
|
||||
bool need_notbusy_for_read_ops;
|
||||
};
|
||||
|
||||
struct atmel_mci_dma {
|
||||
@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv)
|
||||
__func__);
|
||||
atmci_set_completed(host, EVENT_XFER_COMPLETE);
|
||||
|
||||
if (host->data->flags & MMC_DATA_WRITE) {
|
||||
if (host->caps.need_notbusy_for_read_ops ||
|
||||
(host->data->flags & MMC_DATA_WRITE)) {
|
||||
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
|
||||
state = STATE_WAITING_NOTBUSY;
|
||||
} else if (host->mrq->stop) {
|
||||
@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
|
||||
host->caps.has_bad_data_ordering = 1;
|
||||
host->caps.need_reset_after_xfer = 1;
|
||||
host->caps.need_blksz_mul_4 = 1;
|
||||
host->caps.need_notbusy_for_read_ops = 0;
|
||||
|
||||
/* keep only major version number */
|
||||
switch (version & 0xf00) {
|
||||
@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
|
||||
case 0x200:
|
||||
host->caps.has_rwproof = 1;
|
||||
host->caps.need_blksz_mul_4 = 0;
|
||||
host->caps.need_notbusy_for_read_ops = 1;
|
||||
case 0x100:
|
||||
host->caps.has_bad_data_ordering = 0;
|
||||
host->caps.need_reset_after_xfer = 0;
|
||||
|
@ -49,13 +49,6 @@
|
||||
#define bfin_write_SDH_CFG bfin_write_RSI_CFG
|
||||
#endif
|
||||
|
||||
struct dma_desc_array {
|
||||
unsigned long start_addr;
|
||||
unsigned short cfg;
|
||||
unsigned short x_count;
|
||||
short x_modify;
|
||||
} __packed;
|
||||
|
||||
struct sdh_host {
|
||||
struct mmc_host *mmc;
|
||||
spinlock_t lock;
|
||||
|
@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
|
||||
{
|
||||
struct dw_mci *host = slot->host;
|
||||
u32 div;
|
||||
u32 clk_en_a;
|
||||
|
||||
if (slot->clock != host->current_speed) {
|
||||
div = host->bus_hz / slot->clock;
|
||||
@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
|
||||
mci_send_cmd(slot,
|
||||
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
|
||||
|
||||
/* enable clock */
|
||||
mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
|
||||
SDMMC_CLKEN_LOW_PWR) << slot->id));
|
||||
/* enable clock; only low power if no SDIO */
|
||||
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
|
||||
if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
|
||||
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
|
||||
mci_writel(host, CLKENA, clk_en_a);
|
||||
|
||||
/* inform CIU */
|
||||
mci_send_cmd(slot,
|
||||
@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
|
||||
return present;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable lower power mode.
|
||||
*
|
||||
* Low power mode will stop the card clock when idle. According to the
|
||||
* description of the CLKENA register we should disable low power mode
|
||||
* for SDIO cards if we need SDIO interrupts to work.
|
||||
*
|
||||
* This function is fast if low power mode is already disabled.
|
||||
*/
|
||||
static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
|
||||
{
|
||||
struct dw_mci *host = slot->host;
|
||||
u32 clk_en_a;
|
||||
const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
|
||||
|
||||
clk_en_a = mci_readl(host, CLKENA);
|
||||
|
||||
if (clk_en_a & clken_low_pwr) {
|
||||
mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
|
||||
mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
|
||||
SDMMC_CMD_PRV_DAT_WAIT, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
||||
{
|
||||
struct dw_mci_slot *slot = mmc_priv(mmc);
|
||||
@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
|
||||
/* Enable/disable Slot Specific SDIO interrupt */
|
||||
int_mask = mci_readl(host, INTMASK);
|
||||
if (enb) {
|
||||
/*
|
||||
* Turn off low power mode if it was enabled. This is a bit of
|
||||
* a heavy operation and we disable / enable IRQs a lot, so
|
||||
* we'll leave low power mode disabled and it will get
|
||||
* re-enabled again in dw_mci_setup_bus().
|
||||
*/
|
||||
dw_mci_disable_low_power(slot);
|
||||
|
||||
mci_writel(host, INTMASK,
|
||||
(int_mask | SDMMC_INT_SDIO(slot->id)));
|
||||
} else {
|
||||
@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
|
||||
nbytes += len;
|
||||
remain -= len;
|
||||
} while (remain);
|
||||
sg_miter->consumed = offset;
|
||||
|
||||
sg_miter->consumed = offset;
|
||||
status = mci_readl(host, MINTSTS);
|
||||
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
|
||||
if (status & DW_MCI_DATA_ERROR_FLAGS) {
|
||||
host->data_status = status;
|
||||
data->bytes_xfered += nbytes;
|
||||
sg_miter_stop(sg_miter);
|
||||
host->sg = NULL;
|
||||
smp_wmb();
|
||||
|
||||
set_bit(EVENT_DATA_ERROR, &host->pending_events);
|
||||
|
||||
tasklet_schedule(&host->tasklet);
|
||||
return;
|
||||
}
|
||||
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
|
||||
data->bytes_xfered += nbytes;
|
||||
|
||||
@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
|
||||
nbytes += len;
|
||||
remain -= len;
|
||||
} while (remain);
|
||||
sg_miter->consumed = offset;
|
||||
|
||||
sg_miter->consumed = offset;
|
||||
status = mci_readl(host, MINTSTS);
|
||||
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
|
||||
if (status & DW_MCI_DATA_ERROR_FLAGS) {
|
||||
host->data_status = status;
|
||||
data->bytes_xfered += nbytes;
|
||||
sg_miter_stop(sg_miter);
|
||||
host->sg = NULL;
|
||||
|
||||
smp_wmb();
|
||||
|
||||
set_bit(EVENT_DATA_ERROR, &host->pending_events);
|
||||
|
||||
tasklet_schedule(&host->tasklet);
|
||||
return;
|
||||
}
|
||||
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
|
||||
data->bytes_xfered += nbytes;
|
||||
|
||||
@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
|
||||
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct dw_mci *host = dev_id;
|
||||
u32 status, pending;
|
||||
u32 pending;
|
||||
unsigned int pass_count = 0;
|
||||
int i;
|
||||
|
||||
do {
|
||||
status = mci_readl(host, RINTSTS);
|
||||
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
|
||||
|
||||
/*
|
||||
@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
|
||||
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
|
||||
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
|
||||
host->cmd_status = status;
|
||||
host->cmd_status = pending;
|
||||
smp_wmb();
|
||||
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
|
||||
}
|
||||
@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
|
||||
/* if there is an error report DATA_ERROR */
|
||||
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
|
||||
host->data_status = status;
|
||||
host->data_status = pending;
|
||||
smp_wmb();
|
||||
set_bit(EVENT_DATA_ERROR, &host->pending_events);
|
||||
if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
|
||||
SDMMC_INT_SBE | SDMMC_INT_EBE)))
|
||||
tasklet_schedule(&host->tasklet);
|
||||
tasklet_schedule(&host->tasklet);
|
||||
}
|
||||
|
||||
if (pending & SDMMC_INT_DATA_OVER) {
|
||||
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
|
||||
if (!host->data_status)
|
||||
host->data_status = status;
|
||||
host->data_status = pending;
|
||||
smp_wmb();
|
||||
if (host->dir_status == DW_MCI_RECV_STATUS) {
|
||||
if (host->sg != NULL)
|
||||
@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
|
||||
if (pending & SDMMC_INT_CMD_DONE) {
|
||||
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
|
||||
dw_mci_cmd_interrupt(host, status);
|
||||
dw_mci_cmd_interrupt(host, pending);
|
||||
}
|
||||
|
||||
if (pending & SDMMC_INT_CD) {
|
||||
|
@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
|
||||
writel(stat & MXS_MMC_IRQ_BITS,
|
||||
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
|
||||
mmc_signal_sdio_irq(host->mmc);
|
||||
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
|
||||
cmd->error = -ETIMEDOUT;
|
||||
else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
|
||||
@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
|
||||
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
|
||||
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
|
||||
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
|
||||
|
||||
if (readl(host->base + HW_SSP_STATUS(host)) &
|
||||
BM_SSP_STATUS_SDIO_IRQ)
|
||||
mmc_signal_sdio_irq(host->mmc);
|
||||
|
||||
} else {
|
||||
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
|
||||
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
|
||||
@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
if (enable && readl(host->base + HW_SSP_STATUS(host)) &
|
||||
BM_SSP_STATUS_SDIO_IRQ)
|
||||
mmc_signal_sdio_irq(host->mmc);
|
||||
|
||||
}
|
||||
|
||||
static const struct mmc_host_ops mxs_mmc_ops = {
|
||||
|
@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data)
|
||||
static void
|
||||
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
|
||||
{
|
||||
int n;
|
||||
int n, nwords;
|
||||
|
||||
if (host->buffer_bytes_left == 0) {
|
||||
host->sg_idx++;
|
||||
@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
|
||||
n = 64;
|
||||
if (n > host->buffer_bytes_left)
|
||||
n = host->buffer_bytes_left;
|
||||
|
||||
nwords = n / 2;
|
||||
nwords += n & 1; /* handle odd number of bytes to transfer */
|
||||
|
||||
host->buffer_bytes_left -= n;
|
||||
host->total_bytes_left -= n;
|
||||
host->data->bytes_xfered += n;
|
||||
|
||||
if (write) {
|
||||
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
|
||||
__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
|
||||
host->buffer, nwords);
|
||||
} else {
|
||||
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
|
||||
__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
|
||||
host->buffer, nwords);
|
||||
}
|
||||
|
||||
host->buffer += nwords;
|
||||
}
|
||||
|
||||
static inline void mmc_omap_report_irq(u16 status)
|
||||
|
@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
int div = 1;
|
||||
u32 temp;
|
||||
|
||||
if (clock == 0)
|
||||
goto out;
|
||||
|
||||
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
|
||||
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
|
||||
| ESDHC_CLOCK_MASK);
|
||||
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
|
||||
|
||||
if (clock == 0)
|
||||
goto out;
|
||||
|
||||
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
|
||||
pre_div *= 2;
|
||||
|
||||
|
@ -340,7 +340,7 @@ retry:
|
||||
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
|
||||
*/
|
||||
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
|
||||
kfree(new_aeb);
|
||||
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
return err;
|
||||
|
||||
@ -353,7 +353,7 @@ write_error:
|
||||
list_add(&new_aeb->u.list, &ai->erase);
|
||||
goto retry;
|
||||
}
|
||||
kfree(new_aeb);
|
||||
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
|
||||
out_free:
|
||||
ubi_free_vid_hdr(ubi, vid_hdr);
|
||||
return err;
|
||||
|
@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
dev->irq = res_irq->start;
|
||||
priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
|
||||
priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
|
||||
if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
|
||||
priv->irq_flags |= IRQF_SHARED;
|
||||
priv->reg_base = addr;
|
||||
/* The CAN clock frequency is half the oscillator clock frequency */
|
||||
priv->can.clock.freq = pdata->osc_freq / 2;
|
||||
|
@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
|
||||
const uint8_t *mem, *end, *dat;
|
||||
uint16_t type, len;
|
||||
uint32_t addr;
|
||||
uint8_t *buf = NULL;
|
||||
uint8_t *buf = NULL, *new_buf;
|
||||
int buflen = 0;
|
||||
int8_t type_end = 0;
|
||||
|
||||
@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
|
||||
if (len > buflen) {
|
||||
/* align buflen */
|
||||
buflen = (len + (1024-1)) & ~(1024-1);
|
||||
buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
new_buf = krealloc(buf, buflen, GFP_KERNEL);
|
||||
if (!new_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto failed;
|
||||
}
|
||||
buf = new_buf;
|
||||
}
|
||||
/* verify record data */
|
||||
memcpy_fromio(buf, &dpram[addr + offset], len);
|
||||
|
@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
|
||||
continue; \
|
||||
else
|
||||
|
||||
#define for_each_napi_rx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
|
||||
|
||||
/* Skip OOO FP */
|
||||
#define for_each_tx_queue(bp, var) \
|
||||
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
|
||||
|
@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
|
||||
*/
|
||||
bnx2x_setup_tc(bp->dev, bp->max_cos);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
bnx2x_napi_enable(bp);
|
||||
|
||||
/* set pf load just before approaching the MCP */
|
||||
@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
|
||||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
|
@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
|
||||
bp->num_napi_queues = bp->num_queues;
|
||||
|
||||
/* Add NAPI objects */
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
||||
bnx2x_poll, BNX2X_NAPI_WEIGHT);
|
||||
}
|
||||
@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_napi_rx_queue(bp, i)
|
||||
for_each_rx_queue(bp, i)
|
||||
netif_napi_del(&bnx2x_fp(bp, i, napi));
|
||||
}
|
||||
|
||||
|
@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
|
||||
*/
|
||||
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
|
||||
{
|
||||
bnx2x_del_all_napi(bp);
|
||||
bnx2x_disable_msi(bp);
|
||||
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
|
||||
bnx2x_set_int_mode(bp);
|
||||
bnx2x_add_all_napi(bp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -8427,6 +8427,8 @@ unload_error:
|
||||
|
||||
/* Disable HW interrupts, NAPI */
|
||||
bnx2x_netif_stop(bp, 1);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Release IRQs */
|
||||
bnx2x_free_irq(bp);
|
||||
@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
static void poll_bnx2x(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
disable_irq(bp->pdev->irq);
|
||||
bnx2x_interrupt(bp->pdev->irq, dev);
|
||||
enable_irq(bp->pdev->irq);
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
|
||||
*/
|
||||
bnx2x_set_int_mode(bp);
|
||||
|
||||
/* Add all NAPI objects */
|
||||
bnx2x_add_all_napi(bp);
|
||||
|
||||
rc = register_netdev(dev);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Cannot register net device\n");
|
||||
@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
|
||||
|
||||
unregister_netdev(dev);
|
||||
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
/* Power on: we can't let PCI layer write to us while we are in D3 */
|
||||
bnx2x_set_power_state(bp, PCI_D0);
|
||||
|
||||
@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
||||
bnx2x_tx_disable(bp);
|
||||
|
||||
bnx2x_netif_stop(bp, 0);
|
||||
/* Delete all NAPI objects */
|
||||
bnx2x_del_all_napi(bp);
|
||||
|
||||
del_timer_sync(&bp->timer);
|
||||
|
||||
|
@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
|
||||
{
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
u16 cfg;
|
||||
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
|
||||
/* in promiscuous mode, we accept errored packets,
|
||||
* so we have to enable interrupts on them also
|
||||
*/
|
||||
writereg(dev, PP_RxCFG,
|
||||
(lp->curr_rx_cfg |
|
||||
(lp->rx_mode == RX_ALL_ACCEPT)
|
||||
? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
|
||||
: 0));
|
||||
cfg = lp->curr_rx_cfg;
|
||||
if (lp->rx_mode == RX_ALL_ACCEPT)
|
||||
cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
|
||||
writereg(dev, PP_RxCFG, cfg);
|
||||
spin_unlock_irqrestore(&lp->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
||||
int num = 0, status = 0;
|
||||
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
|
||||
|
||||
spin_lock_bh(&adapter->mcc_cq_lock);
|
||||
spin_lock(&adapter->mcc_cq_lock);
|
||||
while ((compl = be_mcc_compl_get(adapter))) {
|
||||
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
||||
/* Interpret flags as an async trailer */
|
||||
@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
|
||||
if (num)
|
||||
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
|
||||
|
||||
spin_unlock_bh(&adapter->mcc_cq_lock);
|
||||
spin_unlock(&adapter->mcc_cq_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
|
||||
if (be_error(adapter))
|
||||
return -EIO;
|
||||
|
||||
local_bh_disable();
|
||||
status = be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
|
||||
if (atomic_read(&mcc_obj->q.used) == 0)
|
||||
break;
|
||||
|
@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
|
||||
/* when interrupts are not yet enabled, just reap any pending
|
||||
* mcc completions */
|
||||
if (!netif_running(adapter->netdev)) {
|
||||
local_bh_disable();
|
||||
be_process_mcc(adapter);
|
||||
local_bh_enable();
|
||||
goto reschedule;
|
||||
}
|
||||
|
||||
|
@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
|
||||
dev->features |= NETIF_F_HW_VLAN_RX;
|
||||
}
|
||||
|
||||
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
|
||||
|
@ -310,6 +310,7 @@ struct e1000_adapter {
|
||||
*/
|
||||
struct e1000_ring *tx_ring /* One per active queue */
|
||||
____cacheline_aligned_in_smp;
|
||||
u32 tx_fifo_limit;
|
||||
|
||||
struct napi_struct napi;
|
||||
|
||||
|
@ -3516,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Alignment of Tx data is on an arbitrary byte boundary with the
|
||||
* maximum size per Tx descriptor limited only to the transmit
|
||||
* allocation of the packet buffer minus 96 bytes with an upper
|
||||
* limit of 24KB due to receive synchronization limitations.
|
||||
*/
|
||||
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
|
||||
24 << 10);
|
||||
|
||||
/*
|
||||
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
|
||||
* fit in receive buffer.
|
||||
@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define E1000_MAX_PER_TXD 8192
|
||||
#define E1000_MAX_TXD_PWR 12
|
||||
|
||||
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
|
||||
unsigned int first, unsigned int max_per_txd,
|
||||
unsigned int nr_frags, unsigned int mss)
|
||||
unsigned int nr_frags)
|
||||
{
|
||||
struct e1000_adapter *adapter = tx_ring->adapter;
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
|
||||
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
|
||||
{
|
||||
BUG_ON(size > tx_ring->count);
|
||||
|
||||
if (e1000_desc_unused(tx_ring) >= size)
|
||||
return 0;
|
||||
return __e1000_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
|
||||
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_ring *tx_ring = adapter->tx_ring;
|
||||
unsigned int first;
|
||||
unsigned int max_per_txd = E1000_MAX_PER_TXD;
|
||||
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
|
||||
unsigned int tx_flags = 0;
|
||||
unsigned int len = skb_headlen(skb);
|
||||
unsigned int nr_frags;
|
||||
@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
/*
|
||||
* The controller does a simple calculation to
|
||||
* make sure there is enough room in the FIFO before
|
||||
* initiating the DMA for each buffer. The calc is:
|
||||
* 4 = ceil(buffer len/mss). To make sure we don't
|
||||
* overrun the FIFO, adjust the max buffer len if mss
|
||||
* drops.
|
||||
*/
|
||||
if (mss) {
|
||||
u8 hdr_len;
|
||||
max_per_txd = min(mss << 2, max_per_txd);
|
||||
max_txd_pwr = fls(max_per_txd) - 1;
|
||||
|
||||
/*
|
||||
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
|
||||
@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
count++;
|
||||
count++;
|
||||
|
||||
count += TXD_USE_COUNT(len, max_txd_pwr);
|
||||
count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
for (f = 0; f < nr_frags; f++)
|
||||
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
max_txd_pwr);
|
||||
count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
|
||||
adapter->tx_fifo_limit);
|
||||
|
||||
if (adapter->hw.mac.tx_pkt_filtering)
|
||||
e1000_transfer_dhcp_info(adapter, skb);
|
||||
@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
|
||||
tx_flags |= E1000_TX_FLAGS_NO_FCS;
|
||||
|
||||
/* if count is 0 then mapping error has occurred */
|
||||
count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
|
||||
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
|
||||
nr_frags);
|
||||
if (count) {
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
netdev_sent_queue(netdev, skb->len);
|
||||
e1000_tx_queue(tx_ring, tx_flags, count);
|
||||
/* Make sure there is space in the ring for the next send. */
|
||||
e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
|
||||
|
||||
e1000_maybe_stop_tx(tx_ring,
|
||||
(MAX_SKB_FRAGS *
|
||||
DIV_ROUND_UP(PAGE_SIZE,
|
||||
adapter->tx_fifo_limit) + 2));
|
||||
} else {
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_ring->buffer_info[first].time_stamp = 0;
|
||||
@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||
adapter->hw.phy.autoneg_advertised = 0x2f;
|
||||
|
||||
/* ring size defaults */
|
||||
adapter->rx_ring->count = 256;
|
||||
adapter->tx_ring->count = 256;
|
||||
adapter->rx_ring->count = E1000_DEFAULT_RXD;
|
||||
adapter->tx_ring->count = E1000_DEFAULT_TXD;
|
||||
|
||||
/*
|
||||
* Initial Wake on LAN setting - If APM wake is enabled in
|
||||
|
@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
if (rc != 0) {
|
||||
rc = efx_filter_get_ipv4_full(
|
||||
&spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
|
||||
&ip_entry->ip4dst, &ip_entry->pdst);
|
||||
&spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
|
||||
&ip_entry->ip4src, &ip_entry->psrc);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
ip_mask->ip4src = ~0;
|
||||
ip_mask->psrc = ~0;
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __COMMON_H__
|
||||
#define __COMMON_H__
|
||||
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy.h>
|
||||
@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
|
||||
|
||||
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
|
||||
extern const struct stmmac_ring_mode_ops ring_mode_ops;
|
||||
|
||||
#endif /* __COMMON_H__ */
|
||||
|
@ -20,6 +20,10 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESCS_H__
|
||||
#define __DESCS_H__
|
||||
|
||||
struct dma_desc {
|
||||
/* Receive descriptor */
|
||||
union {
|
||||
@ -166,3 +170,5 @@ enum tdes_csum_insertion {
|
||||
* is not calculated */
|
||||
cic_full = 3, /* IP header and pseudoheader */
|
||||
};
|
||||
|
||||
#endif /* __DESCS_H__ */
|
||||
|
@ -27,6 +27,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DESC_COM_H__
|
||||
#define __DESC_COM_H__
|
||||
|
||||
#if defined(CONFIG_STMMAC_RING)
|
||||
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
|
||||
{
|
||||
@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
|
||||
p->des01.tx.buffer1_size = len;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __DESC_COM_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC100_H__
|
||||
#define __DWMAC100_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
|
||||
@ -119,3 +122,5 @@ enum ttc_control {
|
||||
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac100_dma_ops;
|
||||
|
||||
#endif /* __DWMAC100_H__ */
|
||||
|
@ -19,6 +19,8 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __DWMAC1000_H__
|
||||
#define __DWMAC1000_H__
|
||||
|
||||
#include <linux/phy.h>
|
||||
#include "common.h"
|
||||
@ -229,6 +231,7 @@ enum rtc_control {
|
||||
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
|
||||
|
||||
/* Synopsys Core versions */
|
||||
#define DWMAC_CORE_3_40 34
|
||||
#define DWMAC_CORE_3_40 0x34
|
||||
|
||||
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
|
||||
#endif /* __DWMAC1000_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __DWMAC_DMA_H__
|
||||
#define __DWMAC_DMA_H__
|
||||
|
||||
/* DMA CRS Control and Status Register Mapping */
|
||||
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||||
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||||
@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
|
||||
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
|
||||
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x);
|
||||
|
||||
#endif /* __DWMAC_DMA_H__ */
|
||||
|
@ -22,6 +22,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __MMC_H__
|
||||
#define __MMC_H__
|
||||
|
||||
/* MMC control register */
|
||||
/* When set, all counter are reset */
|
||||
#define MMC_CNTRL_COUNTER_RESET 0x1
|
||||
@ -129,3 +132,5 @@ struct stmmac_counters {
|
||||
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
|
||||
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
|
||||
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
|
||||
|
||||
#endif /* __MMC_H__ */
|
||||
|
@ -33,7 +33,7 @@
|
||||
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
|
||||
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
|
||||
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
|
||||
#define MMC_DEFAUL_MASK 0xffffffff
|
||||
#define MMC_DEFAULT_MASK 0xffffffff
|
||||
|
||||
/* MMC TX counter registers */
|
||||
|
||||
@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
|
||||
/* To mask all all interrupts.*/
|
||||
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
|
||||
{
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
|
||||
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
|
||||
}
|
||||
|
||||
/* This reads the MAC core counters (if actaully supported).
|
||||
|
@ -20,6 +20,9 @@
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#ifndef __STMMAC_H__
|
||||
#define __STMMAC_H__
|
||||
|
||||
#define STMMAC_RESOURCE_NAME "stmmaceth"
|
||||
#define DRV_MODULE_VERSION "March_2012"
|
||||
|
||||
@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_STMMAC_PCI */
|
||||
|
||||
#endif /* __STMMAC_H__ */
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
#ifndef __STMMAC_TIMER_H__
|
||||
#define __STMMAC_TIMER_H__
|
||||
|
||||
struct stmmac_timer {
|
||||
void (*timer_start) (unsigned int new_freq);
|
||||
@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
|
||||
extern int tmu2_register_user(void *fnt, void *data);
|
||||
extern void tmu2_unregister_user(void);
|
||||
#endif
|
||||
|
||||
#endif /* __STMMAC_TIMER_H__ */
|
||||
|
@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct davinci_mdio_data *data = dev_get_drvdata(dev);
|
||||
|
||||
if (data->bus)
|
||||
if (data->bus) {
|
||||
mdiobus_unregister(data->bus);
|
||||
mdiobus_free(data->bus);
|
||||
}
|
||||
|
||||
if (data->clk)
|
||||
clk_put(data->clk);
|
||||
|
@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
|
||||
sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
|
||||
break ;
|
||||
case SMT_P_REASON :
|
||||
* (u_long *) to = 0 ;
|
||||
*(u32 *)to = 0 ;
|
||||
sp_len = 4 ;
|
||||
goto sp_done ;
|
||||
case SMT_P1033 : /* time stamp */
|
||||
|
@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
|
||||
|
||||
/* 5. Gobi 2000 and 3000 devices */
|
||||
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
|
||||
{QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
|
||||
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
|
||||
@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
|
||||
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
|
||||
{QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
|
||||
{QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
|
||||
|
||||
{ } /* END */
|
||||
};
|
||||
|
@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
|
||||
netif_device_present(dev->net) &&
|
||||
!timer_pending(&dev->delay) &&
|
||||
!test_bit(EVENT_RX_HALT, &dev->flags))
|
||||
rx_alloc_submit(dev, GFP_KERNEL);
|
||||
rx_alloc_submit(dev, GFP_NOIO);
|
||||
|
||||
if (!(dev->txq.qlen >= TX_QLEN(dev)))
|
||||
netif_tx_wake_all_queues(dev->net);
|
||||
|
@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
|
||||
case AR5K_EEPROM_MODE_11A:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
|
||||
rate_pcal_info = ee->ee_rate_tpwr_a;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
|
||||
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
|
||||
break;
|
||||
case AR5K_EEPROM_MODE_11B:
|
||||
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
|
||||
|
@ -182,6 +182,7 @@
|
||||
#define AR5K_EEPROM_EEP_DELTA 10
|
||||
#define AR5K_EEPROM_N_MODES 3
|
||||
#define AR5K_EEPROM_N_5GHZ_CHAN 10
|
||||
#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN 3
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
|
||||
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
|
||||
|
@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
|
||||
/* dpc will not be rescheduled */
|
||||
wl->resched = false;
|
||||
|
||||
/* inform publicly that interface is down */
|
||||
wl->pub->up = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
|
||||
return;
|
||||
}
|
||||
len = ETH_ALEN;
|
||||
ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
|
||||
ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
|
||||
&len);
|
||||
if (ret) {
|
||||
IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
|
||||
__LINE__);
|
||||
|
@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
|
||||
const struct fw_img *img;
|
||||
size_t bufsz;
|
||||
|
||||
if (!iwl_is_ready_rf(priv))
|
||||
return -EAGAIN;
|
||||
|
||||
/* default is to dump the entire data segment */
|
||||
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
|
||||
priv->dbgfs_sram_offset = 0x800000;
|
||||
|
@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
|
||||
/*****************************************************
|
||||
* Error handling
|
||||
******************************************************/
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
|
||||
void iwl_dump_csr(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
|
||||
}
|
||||
|
||||
iwl_dump_csr(trans);
|
||||
iwl_dump_fh(trans, NULL, false);
|
||||
iwl_dump_fh(trans, NULL);
|
||||
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
}
|
||||
|
@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
|
||||
#undef IWL_CMD
|
||||
}
|
||||
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
||||
int iwl_dump_fh(struct iwl_trans *trans, char **buf)
|
||||
{
|
||||
int i;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
int pos = 0;
|
||||
size_t bufsz = 0;
|
||||
#endif
|
||||
static const u32 fh_tbl[] = {
|
||||
FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
|
||||
FH_TSSR_TX_STATUS_REG,
|
||||
FH_TSSR_TX_ERROR_REG
|
||||
};
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (display) {
|
||||
bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (buf) {
|
||||
int pos = 0;
|
||||
size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
|
||||
|
||||
*buf = kmalloc(bufsz, GFP_KERNEL);
|
||||
if (!*buf)
|
||||
return -ENOMEM;
|
||||
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
"FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
pos += scnprintf(*buf + pos, bufsz - pos,
|
||||
" %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
#endif
|
||||
|
||||
IWL_ERR(trans, "FH register values:\n");
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
|
||||
IWL_ERR(trans, " %34s: 0X%08x\n",
|
||||
get_fh_string(fh_tbl[i]),
|
||||
iwl_read_direct32(trans, fh_tbl[i]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
char *buf;
|
||||
char *buf = NULL;
|
||||
int pos = 0;
|
||||
ssize_t ret = -EFAULT;
|
||||
|
||||
ret = pos = iwl_dump_fh(trans, &buf, true);
|
||||
ret = pos = iwl_dump_fh(trans, &buf);
|
||||
if (buf) {
|
||||
ret = simple_read_from_buffer(user_buf,
|
||||
count, ppos, buf, pos);
|
||||
|
@ -57,8 +57,7 @@
|
||||
static const struct ethtool_ops xennet_ethtool_ops;
|
||||
|
||||
struct netfront_cb {
|
||||
struct page *page;
|
||||
unsigned offset;
|
||||
int pull_to;
|
||||
};
|
||||
|
||||
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
|
||||
@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = __skb_dequeue(rxq)) != NULL) {
|
||||
struct page *page = NETFRONT_SKB_CB(skb)->page;
|
||||
void *vaddr = page_address(page);
|
||||
unsigned offset = NETFRONT_SKB_CB(skb)->offset;
|
||||
int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
memcpy(skb->data, vaddr + offset,
|
||||
skb_headlen(skb));
|
||||
|
||||
if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
|
||||
__free_page(page);
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
|
||||
/* Ethernet work: Delayed to here as it peeks the header. */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
||||
struct sk_buff_head errq;
|
||||
struct sk_buff_head tmpq;
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
int err;
|
||||
|
||||
spin_lock(&np->rx_lock);
|
||||
@ -955,24 +947,13 @@ err:
|
||||
}
|
||||
}
|
||||
|
||||
NETFRONT_SKB_CB(skb)->page =
|
||||
skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
NETFRONT_SKB_CB(skb)->offset = rx->offset;
|
||||
NETFRONT_SKB_CB(skb)->pull_to = rx->status;
|
||||
if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
|
||||
NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
|
||||
|
||||
len = rx->status;
|
||||
if (len > RX_COPY_THRESHOLD)
|
||||
len = RX_COPY_THRESHOLD;
|
||||
skb_put(skb, len);
|
||||
|
||||
if (rx->status > len) {
|
||||
skb_shinfo(skb)->frags[0].page_offset =
|
||||
rx->offset + len;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
|
||||
skb->data_len = rx->status - len;
|
||||
} else {
|
||||
__skb_fill_page_desc(skb, 0, NULL, 0, 0);
|
||||
skb_shinfo(skb)->nr_frags = 0;
|
||||
}
|
||||
skb_shinfo(skb)->frags[0].page_offset = rx->offset;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
|
||||
skb->data_len = rx->status;
|
||||
|
||||
i = xennet_fill_frags(np, skb, &tmpq);
|
||||
|
||||
@ -999,7 +980,7 @@ err:
|
||||
* receive throughout using the standard receive
|
||||
* buffer size was cut by 25%(!!!).
|
||||
*/
|
||||
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
|
||||
skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
|
||||
skb->len += skb->data_len;
|
||||
|
||||
if (rx->flags & XEN_NETRXF_csum_blank)
|
||||
|
@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi)
|
||||
{
|
||||
struct drv_dev_and_id *ddi = _ddi;
|
||||
struct device *dev = &ddi->dev->dev;
|
||||
struct device *parent = dev->parent;
|
||||
int rc;
|
||||
|
||||
/* The parent bridge must be in active state when probing */
|
||||
if (parent)
|
||||
pm_runtime_get_sync(parent);
|
||||
/* Unbound PCI devices are always set to disabled and suspended.
|
||||
* During probe, the device is set to enabled and active and the
|
||||
* usage count is incremented. If the driver supports runtime PM,
|
||||
@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi)
|
||||
pm_runtime_set_suspended(dev);
|
||||
pm_runtime_put_noidle(dev);
|
||||
}
|
||||
if (parent)
|
||||
pm_runtime_put(parent);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user