mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 01:34:00 +08:00
Merge 3.5-rc3 into usb-next
This lets us catch the USB fixes that went into 3.5-rc3 into this branch, as we want them here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
ff446f2001
@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
|
||||
(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
|
||||
FF1152AMT0221 D1215994A VIRTEX FPGA board.
|
||||
|
||||
DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
|
||||
Universal version 4.0 have been used for developing this driver.
|
||||
DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
|
||||
MAC 10/100 Universal version 4.0 have been used for developing this driver.
|
||||
|
||||
This driver supports both the platform bus and PCI.
|
||||
|
||||
@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
|
||||
When one or more packets are received, an interrupt happens. The interrupts
|
||||
are not queued so the driver has to scan all the descriptors in the ring during
|
||||
the receive process.
|
||||
This is based on NAPI so the interrupt handler signals only if there is work to be
|
||||
done, and it exits.
|
||||
This is based on NAPI so the interrupt handler signals only if there is work
|
||||
to be done, and it exits.
|
||||
Then the poll method will be scheduled at some future point.
|
||||
The incoming packets are stored, by the DMA, in a list of pre-allocated socket
|
||||
buffers in order to avoid the memcpy (Zero-copy).
|
||||
|
||||
4.3) Timer-Driver Interrupt
|
||||
Instead of having the device that asynchronously notifies the frame receptions, the
|
||||
driver configures a timer to generate an interrupt at regular intervals.
|
||||
Based on the granularity of the timer, the frames that are received by the device
|
||||
will experience different levels of latency. Some NICs have dedicated timer
|
||||
device to perform this task. STMMAC can use either the RTC device or the TMU
|
||||
channel 2 on STLinux platforms.
|
||||
Instead of having the device that asynchronously notifies the frame receptions,
|
||||
the driver configures a timer to generate an interrupt at regular intervals.
|
||||
Based on the granularity of the timer, the frames that are received by the
|
||||
device will experience different levels of latency. Some NICs have dedicated
|
||||
timer device to perform this task. STMMAC can use either the RTC device or the
|
||||
TMU channel 2 on STLinux platforms.
|
||||
The timers frequency can be passed to the driver as parameter; when change it,
|
||||
take care of both hardware capability and network stability/performance impact.
|
||||
Several performance tests on STM platforms showed this optimisation allows to spare
|
||||
the CPU while having the maximum throughput.
|
||||
Several performance tests on STM platforms showed this optimisation allows to
|
||||
spare the CPU while having the maximum throughput.
|
||||
|
||||
4.4) WOL
|
||||
Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
|
||||
core.
|
||||
Wake up on Lan feature through Magic and Unicast frames are supported for the
|
||||
GMAC core.
|
||||
|
||||
4.5) DMA descriptors
|
||||
Driver handles both normal and enhanced descriptors. The latter has been only
|
||||
@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
|
||||
These are included in the include/linux/stmmac.h header file
|
||||
and detailed below as well:
|
||||
|
||||
struct plat_stmmacenet_data {
|
||||
struct plat_stmmacenet_data {
|
||||
char *phy_bus_name;
|
||||
int bus_id;
|
||||
int phy_addr;
|
||||
int interface;
|
||||
@ -124,19 +125,24 @@ and detailed below as well:
|
||||
void (*bus_setup)(void __iomem *ioaddr);
|
||||
int (*init)(struct platform_device *pdev);
|
||||
void (*exit)(struct platform_device *pdev);
|
||||
void *custom_cfg;
|
||||
void *custom_data;
|
||||
void *bsp_priv;
|
||||
};
|
||||
|
||||
Where:
|
||||
o phy_bus_name: phy bus name to attach to the stmmac.
|
||||
o bus_id: bus identifier.
|
||||
o phy_addr: the physical address can be passed from the platform.
|
||||
If it is set to -1 the driver will automatically
|
||||
detect it at run-time by probing all the 32 addresses.
|
||||
o interface: PHY device's interface.
|
||||
o mdio_bus_data: specific platform fields for the MDIO bus.
|
||||
o pbl: the Programmable Burst Length is maximum number of beats to
|
||||
o dma_cfg: internal DMA parameters
|
||||
o pbl: the Programmable Burst Length is maximum number of beats to
|
||||
be transferred in one DMA transaction.
|
||||
GMAC also enables the 4xPBL by default.
|
||||
o fixed_burst/mixed_burst/burst_len
|
||||
o clk_csr: fixed CSR Clock range selection.
|
||||
o has_gmac: uses the GMAC core.
|
||||
o enh_desc: if sets the MAC will use the enhanced descriptor structure.
|
||||
@ -160,8 +166,9 @@ Where:
|
||||
this is sometime necessary on some platforms (e.g. ST boxes)
|
||||
where the HW needs to have set some PIO lines or system cfg
|
||||
registers.
|
||||
o custom_cfg: this is a custom configuration that can be passed while
|
||||
initialising the resources.
|
||||
o custom_cfg/custom_data: this is a custom configuration that can be passed
|
||||
while initialising the resources.
|
||||
o bsp_priv: another private poiter.
|
||||
|
||||
For MDIO bus The we have:
|
||||
|
||||
@ -180,7 +187,6 @@ Where:
|
||||
o irqs: list of IRQs, one per PHY.
|
||||
o probed_phy_irq: if irqs is NULL, use this for probed PHY.
|
||||
|
||||
|
||||
For DMA engine we have the following internal fields that should be
|
||||
tuned according to the HW capabilities.
|
||||
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -1646,11 +1646,11 @@ S: Maintained
|
||||
F: drivers/gpio/gpio-bt8xx.c
|
||||
|
||||
BTRFS FILE SYSTEM
|
||||
M: Chris Mason <chris.mason@oracle.com>
|
||||
M: Chris Mason <chris.mason@fusionio.com>
|
||||
L: linux-btrfs@vger.kernel.org
|
||||
W: http://btrfs.wiki.kernel.org/
|
||||
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/btrfs.txt
|
||||
F: fs/btrfs/
|
||||
@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h
|
||||
CFG80211 and NL80211
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||
S: Maintained
|
||||
F: include/linux/nl80211.h
|
||||
F: include/net/cfg80211.h
|
||||
@ -4349,7 +4352,8 @@ MAC80211
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://linuxwireless.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||
S: Maintained
|
||||
F: Documentation/networking/mac80211-injection.txt
|
||||
F: include/net/mac80211.h
|
||||
@ -4360,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it>
|
||||
M: Mattias Nissler <mattias.nissler@gmx.de>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||
S: Maintained
|
||||
F: net/mac80211/rc80211_pid*
|
||||
|
||||
@ -5711,6 +5716,9 @@ F: include/linux/remoteproc.h
|
||||
RFKILL
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||
S: Maintained
|
||||
F: Documentation/rfkill.txt
|
||||
F: net/rfkill/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, sz, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
struct safe_buffer *buf;
|
||||
unsigned long off;
|
||||
|
||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, off, sz, dir);
|
||||
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||
__func__, addr, sz, dir);
|
||||
|
||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||
if (!buf)
|
||||
@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
|
||||
BUG_ON(buf->direction != dir);
|
||||
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||
buf->safe, buf->safe_dma_addr);
|
||||
|
||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||
|
@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
|
||||
goto err;
|
||||
}
|
||||
|
||||
r = omap_device_register(pdev);
|
||||
r = platform_device_add(pdev);
|
||||
if (r) {
|
||||
pr_err("Could not register omap_device for %s\n", pdev_name);
|
||||
pr_err("Could not register platform_device for %s\n", pdev_name);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ static pte_t **consistent_pte;
|
||||
|
||||
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
|
||||
|
||||
unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
|
||||
static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
|
||||
|
||||
void __init init_consistent_dma_size(unsigned long size)
|
||||
{
|
||||
@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
|
||||
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
|
||||
};
|
||||
|
||||
size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
|
||||
static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
{
|
||||
|
@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
|
||||
* allocations. This must be the smallest DMA mask in the system,
|
||||
* so a successful GFP_DMA allocation will always satisfy this.
|
||||
*/
|
||||
u32 arm_dma_limit;
|
||||
phys_addr_t arm_dma_limit;
|
||||
|
||||
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
|
||||
unsigned long dma_size)
|
||||
|
@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
extern u32 arm_dma_limit;
|
||||
extern phys_addr_t arm_dma_limit;
|
||||
#else
|
||||
#define arm_dma_limit ((u32)~0)
|
||||
#endif
|
||||
|
@ -7,6 +7,8 @@ config M68K
|
||||
select GENERIC_IRQ_SHOW
|
||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_STRNCPY_FROM_USER if MMU
|
||||
select GENERIC_STRNLEN_USER if MMU
|
||||
select FPU if MMU
|
||||
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
|
||||
|
||||
|
@ -1,2 +1,4 @@
|
||||
include include/asm-generic/Kbuild.asm
|
||||
header-y += cachectl.h
|
||||
|
||||
generic-y += word-at-a-time.h
|
||||
|
@ -86,7 +86,7 @@
|
||||
/*
|
||||
* QSPI module.
|
||||
*/
|
||||
#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
|
||||
#define MCFQSPI_BASE (MCF_IPSBAR + 0x340)
|
||||
#define MCFQSPI_SIZE 0x40
|
||||
|
||||
#define MCFQSPI_CS0 147
|
||||
|
@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
|
||||
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
|
||||
|
||||
long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
long strnlen_user(const char __user *src, long n);
|
||||
#define user_addr_max() \
|
||||
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
|
||||
|
||||
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
unsigned long __clear_user(void __user *to, unsigned long n);
|
||||
|
||||
#define clear_user __clear_user
|
||||
|
||||
#define strlen_user(str) strnlen_user(str, 32767)
|
||||
|
||||
#endif /* _M68K_UACCESS_H */
|
||||
|
@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COLDFIRE
|
||||
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
|
||||
asmlinkage int syscall_trace_enter(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -85,7 +85,7 @@ void __init time_init(void)
|
||||
mach_sched_init(timer_interrupt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_M68KCLASSIC
|
||||
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||
|
||||
u32 arch_gettimeoffset(void)
|
||||
{
|
||||
@ -108,4 +108,4 @@ static int __init rtc_init(void)
|
||||
|
||||
module_init(rtc_init);
|
||||
|
||||
#endif /* CONFIG_M68KCLASSIC */
|
||||
#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
|
||||
|
@ -103,80 +103,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
|
||||
}
|
||||
EXPORT_SYMBOL(__generic_copy_to_user);
|
||||
|
||||
/*
|
||||
* Copy a null terminated string from userspace.
|
||||
*/
|
||||
long strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
{
|
||||
long res;
|
||||
char c;
|
||||
|
||||
if (count <= 0)
|
||||
return count;
|
||||
|
||||
asm volatile ("\n"
|
||||
"1: "MOVES".b (%2)+,%4\n"
|
||||
" move.b %4,(%1)+\n"
|
||||
" jeq 2f\n"
|
||||
" subq.l #1,%3\n"
|
||||
" jne 1b\n"
|
||||
"2: sub.l %3,%0\n"
|
||||
"3:\n"
|
||||
" .section .fixup,\"ax\"\n"
|
||||
" .even\n"
|
||||
"10: move.l %5,%0\n"
|
||||
" jra 3b\n"
|
||||
" .previous\n"
|
||||
"\n"
|
||||
" .section __ex_table,\"a\"\n"
|
||||
" .align 4\n"
|
||||
" .long 1b,10b\n"
|
||||
" .previous"
|
||||
: "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
|
||||
: "i" (-EFAULT), "0" (count));
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strncpy_from_user);
|
||||
|
||||
/*
|
||||
* Return the size of a string (including the ending 0)
|
||||
*
|
||||
* Return 0 on exception, a value greater than N if too long
|
||||
*/
|
||||
long strnlen_user(const char __user *src, long n)
|
||||
{
|
||||
char c;
|
||||
long res;
|
||||
|
||||
asm volatile ("\n"
|
||||
"1: subq.l #1,%1\n"
|
||||
" jmi 3f\n"
|
||||
"2: "MOVES".b (%0)+,%2\n"
|
||||
" tst.b %2\n"
|
||||
" jne 1b\n"
|
||||
" jra 4f\n"
|
||||
"\n"
|
||||
"3: addq.l #1,%0\n"
|
||||
"4: sub.l %4,%0\n"
|
||||
"5:\n"
|
||||
" .section .fixup,\"ax\"\n"
|
||||
" .even\n"
|
||||
"20: sub.l %0,%0\n"
|
||||
" jra 5b\n"
|
||||
" .previous\n"
|
||||
"\n"
|
||||
" .section __ex_table,\"a\"\n"
|
||||
" .align 4\n"
|
||||
" .long 2b,20b\n"
|
||||
" .previous\n"
|
||||
: "=&a" (res), "+d" (n), "=&d" (c)
|
||||
: "0" (src), "r" (src));
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(strnlen_user);
|
||||
|
||||
/*
|
||||
* Zero Userspace
|
||||
*/
|
||||
|
@ -53,6 +53,7 @@
|
||||
#endif
|
||||
|
||||
static u32 m68328_tick_cnt;
|
||||
static irq_handler_t timer_interrupt;
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
|
||||
TSTAT &= 0;
|
||||
|
||||
m68328_tick_cnt += TICKS_PER_JIFFY;
|
||||
return arch_timer_interrupt(irq, dummy);
|
||||
return timer_interrupt(irq, dummy);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void hw_timer_init(void)
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
/* disable timer 1 */
|
||||
TCTL = 0;
|
||||
@ -115,6 +116,7 @@ void hw_timer_init(void)
|
||||
/* Enable timer 1 */
|
||||
TCTL |= TCTL_TEN;
|
||||
clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
|
||||
timer_interrupt = handler;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
|
||||
#define OSCILLATOR (unsigned long int)33000000
|
||||
#endif
|
||||
|
||||
static irq_handler_t timer_interrupt;
|
||||
unsigned long int system_clock;
|
||||
|
||||
extern QUICC *pquicc;
|
||||
@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
|
||||
|
||||
pquicc->timer_ter1 = 0x0002; /* clear timer event */
|
||||
|
||||
return arch_timer_interrupt(irq, dummy);
|
||||
return timer_interrupt(irq, dummy);
|
||||
}
|
||||
|
||||
static struct irqaction m68360_timer_irq = {
|
||||
@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
|
||||
.handler = hw_tick,
|
||||
};
|
||||
|
||||
void hw_timer_init(void)
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
unsigned char prescaler;
|
||||
unsigned short tgcr_save;
|
||||
@ -94,6 +95,8 @@ void hw_timer_init(void)
|
||||
|
||||
pquicc->timer_ter1 = 0x0003; /* clear timer events */
|
||||
|
||||
timer_interrupt = handler;
|
||||
|
||||
/* enable timer 1 interrupt in CIMR */
|
||||
setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
|
||||
|
||||
|
@ -100,6 +100,9 @@ static inline void hard_irq_disable(void)
|
||||
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
}
|
||||
|
||||
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
|
||||
#define hard_irq_disable hard_irq_disable
|
||||
|
||||
/*
|
||||
* This is called by asynchronous interrupts to conditionally
|
||||
* re-enable hard interrupts when soft-disabled after having
|
||||
|
@ -32,6 +32,8 @@ config SUPERH
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
|
||||
select GENERIC_STRNCPY_FROM_USER
|
||||
select GENERIC_STRNLEN_USER
|
||||
help
|
||||
The SuperH is a RISC processor targeted for use in embedded systems
|
||||
and consumer electronics; it was also used in the Sega Dreamcast
|
||||
|
@ -9,6 +9,12 @@
|
||||
# License. See the file "COPYING" in the main directory of this archive
|
||||
# for more details.
|
||||
#
|
||||
ifneq ($(SUBARCH),$(ARCH))
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
|
||||
endif
|
||||
endif
|
||||
|
||||
isa-y := any
|
||||
isa-$(CONFIG_SH_DSP) := sh
|
||||
isa-$(CONFIG_CPU_SH2) := sh2
|
||||
@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
|
||||
KBUILD_DEFCONFIG := cayman_defconfig
|
||||
endif
|
||||
|
||||
ifneq ($(SUBARCH),$(ARCH))
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
ld-bfd := elf32-$(UTS_MACHINE)-linux
|
||||
LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
|
||||
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
|
||||
LDFLAGS += -EL
|
||||
else
|
||||
ld-bfd := elf32-$(UTS_MACHINE)big-linux
|
||||
LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
|
||||
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
|
||||
LDFLAGS += -EB
|
||||
endif
|
||||
|
||||
|
@ -1,5 +1,39 @@
|
||||
include include/asm-generic/Kbuild.asm
|
||||
|
||||
generic-y += bitsperlong.h
|
||||
generic-y += cputime.h
|
||||
generic-y += current.h
|
||||
generic-y += delay.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += fcntl.h
|
||||
generic-y += ioctl.h
|
||||
generic-y += ipcbuf.h
|
||||
generic-y += irq_regs.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += local.h
|
||||
generic-y += local64.h
|
||||
generic-y += param.h
|
||||
generic-y += parport.h
|
||||
generic-y += percpu.h
|
||||
generic-y += poll.h
|
||||
generic-y += mman.h
|
||||
generic-y += msgbuf.h
|
||||
generic-y += resource.h
|
||||
generic-y += scatterlist.h
|
||||
generic-y += sembuf.h
|
||||
generic-y += serial.h
|
||||
generic-y += shmbuf.h
|
||||
generic-y += siginfo.h
|
||||
generic-y += sizes.h
|
||||
generic-y += socket.h
|
||||
generic-y += statfs.h
|
||||
generic-y += termbits.h
|
||||
generic-y += termios.h
|
||||
generic-y += ucontext.h
|
||||
generic-y += xor.h
|
||||
|
||||
header-y += cachectl.h
|
||||
header-y += cpu-features.h
|
||||
header-y += hw_breakpoint.h
|
||||
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/bitsperlong.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef __SH_CPUTIME_H
|
||||
#define __SH_CPUTIME_H
|
||||
|
||||
#include <asm-generic/cputime.h>
|
||||
|
||||
#endif /* __SH_CPUTIME_H */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/current.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/delay.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/div64.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef _ASM_EMERGENCY_RESTART_H
|
||||
#define _ASM_EMERGENCY_RESTART_H
|
||||
|
||||
#include <asm-generic/emergency-restart.h>
|
||||
|
||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_SH_ERRNO_H
|
||||
#define __ASM_SH_ERRNO_H
|
||||
|
||||
#include <asm-generic/errno.h>
|
||||
|
||||
#endif /* __ASM_SH_ERRNO_H */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/fcntl.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/ioctl.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/ipcbuf.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/irq_regs.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/kvm_para.h>
|
@ -1,7 +0,0 @@
|
||||
#ifndef __ASM_SH_LOCAL_H
|
||||
#define __ASM_SH_LOCAL_H
|
||||
|
||||
#include <asm-generic/local.h>
|
||||
|
||||
#endif /* __ASM_SH_LOCAL_H */
|
||||
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/local64.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/mman.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/msgbuf.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/param.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/parport.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ARCH_SH_PERCPU
|
||||
#define __ARCH_SH_PERCPU
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
#endif /* __ARCH_SH_PERCPU */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/poll.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_SH_RESOURCE_H
|
||||
#define __ASM_SH_RESOURCE_H
|
||||
|
||||
#include <asm-generic/resource.h>
|
||||
|
||||
#endif /* __ASM_SH_RESOURCE_H */
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_SH_SCATTERLIST_H
|
||||
#define __ASM_SH_SCATTERLIST_H
|
||||
|
||||
#include <asm-generic/scatterlist.h>
|
||||
|
||||
#endif /* __ASM_SH_SCATTERLIST_H */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/sembuf.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/serial.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/shmbuf.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_SH_SIGINFO_H
|
||||
#define __ASM_SH_SIGINFO_H
|
||||
|
||||
#include <asm-generic/siginfo.h>
|
||||
|
||||
#endif /* __ASM_SH_SIGINFO_H */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/sizes.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/socket.h>
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_SH_STATFS_H
|
||||
#define __ASM_SH_STATFS_H
|
||||
|
||||
#include <asm-generic/statfs.h>
|
||||
|
||||
#endif /* __ASM_SH_STATFS_H */
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/termbits.h>
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/termios.h>
|
@ -25,6 +25,8 @@
|
||||
(__chk_user_ptr(addr), \
|
||||
__access_ok((unsigned long __force)(addr), (size)))
|
||||
|
||||
#define user_addr_max() (current_thread_info()->addr_limit.seg)
|
||||
|
||||
/*
|
||||
* Uh, these should become the main single-value transfer routines ...
|
||||
* They automatically use the right size if we just have the right
|
||||
@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
# include "uaccess_64.h"
|
||||
#endif
|
||||
|
||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
/* Generic arbitrary sized copy. */
|
||||
/* Return the number of bytes NOT copied */
|
||||
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
|
||||
@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
|
||||
__cl_size; \
|
||||
})
|
||||
|
||||
/**
|
||||
* strncpy_from_user: - Copy a NUL terminated string from userspace.
|
||||
* @dst: Destination address, in kernel space. This buffer must be at
|
||||
* least @count bytes long.
|
||||
* @src: Source address, in user space.
|
||||
* @count: Maximum number of bytes to copy, including the trailing NUL.
|
||||
*
|
||||
* Copies a NUL-terminated string from userspace to kernel space.
|
||||
*
|
||||
* On success, returns the length of the string (not including the trailing
|
||||
* NUL).
|
||||
*
|
||||
* If access to userspace fails, returns -EFAULT (some data may have been
|
||||
* copied).
|
||||
*
|
||||
* If @count is smaller than the length of the string, copies @count bytes
|
||||
* and returns @count.
|
||||
*/
|
||||
#define strncpy_from_user(dest,src,count) \
|
||||
({ \
|
||||
unsigned long __sfu_src = (unsigned long)(src); \
|
||||
int __sfu_count = (int)(count); \
|
||||
long __sfu_res = -EFAULT; \
|
||||
\
|
||||
if (__access_ok(__sfu_src, __sfu_count)) \
|
||||
__sfu_res = __strncpy_from_user((unsigned long)(dest), \
|
||||
__sfu_src, __sfu_count); \
|
||||
\
|
||||
__sfu_res; \
|
||||
})
|
||||
|
||||
static inline unsigned long
|
||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
return __copy_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* strnlen_user: - Get the size of a string in user space.
|
||||
* @s: The string to measure.
|
||||
* @n: The maximum valid length
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
* Returns the size of the string INCLUDING the terminating NUL.
|
||||
* On exception, returns 0.
|
||||
* If the string is too long, returns a value greater than @n.
|
||||
*/
|
||||
static inline long strnlen_user(const char __user *s, long n)
|
||||
{
|
||||
if (!__addr_ok(s))
|
||||
return 0;
|
||||
else
|
||||
return __strnlen_user(s, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* strlen_user: - Get the size of a string in user space.
|
||||
* @str: The string to measure.
|
||||
*
|
||||
* Context: User context only. This function may sleep.
|
||||
*
|
||||
* Get the size of a NUL-terminated string in user space.
|
||||
*
|
||||
* Returns the size of the string INCLUDING the terminating NUL.
|
||||
* On exception, returns 0.
|
||||
*
|
||||
* If there is a limit on the length of a valid string, you may wish to
|
||||
* consider using strnlen_user() instead.
|
||||
*/
|
||||
#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
|
||||
|
||||
/*
|
||||
* The exception table consists of pairs of addresses: the first is the
|
||||
* address of an instruction that is allowed to fault, and the second is
|
||||
|
@ -170,79 +170,4 @@ __asm__ __volatile__( \
|
||||
|
||||
extern void __put_user_unknown(void);
|
||||
|
||||
static inline int
|
||||
__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
|
||||
{
|
||||
__kernel_size_t res;
|
||||
unsigned long __dummy, _d, _s, _c;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"9:\n"
|
||||
"mov.b @%2+, %1\n\t"
|
||||
"cmp/eq #0, %1\n\t"
|
||||
"bt/s 2f\n"
|
||||
"1:\n"
|
||||
"mov.b %1, @%3\n\t"
|
||||
"dt %4\n\t"
|
||||
"bf/s 9b\n\t"
|
||||
" add #1, %3\n\t"
|
||||
"2:\n\t"
|
||||
"sub %4, %0\n"
|
||||
"3:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
"4:\n\t"
|
||||
"mov.l 5f, %1\n\t"
|
||||
"jmp @%1\n\t"
|
||||
" mov %9, %0\n\t"
|
||||
".balign 4\n"
|
||||
"5: .long 3b\n"
|
||||
".previous\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .balign 4\n"
|
||||
" .long 9b,4b\n"
|
||||
".previous"
|
||||
: "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
|
||||
: "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
|
||||
"i" (-EFAULT)
|
||||
: "memory", "t");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the size of a string (including the ending 0 even when we have
|
||||
* exceeded the maximum string length).
|
||||
*/
|
||||
static inline long __strnlen_user(const char __user *__s, long __n)
|
||||
{
|
||||
unsigned long res;
|
||||
unsigned long __dummy;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1:\t"
|
||||
"mov.b @(%0,%3), %1\n\t"
|
||||
"cmp/eq %4, %0\n\t"
|
||||
"bt/s 2f\n\t"
|
||||
" add #1, %0\n\t"
|
||||
"tst %1, %1\n\t"
|
||||
"bf 1b\n\t"
|
||||
"2:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
"3:\n\t"
|
||||
"mov.l 4f, %1\n\t"
|
||||
"jmp @%1\n\t"
|
||||
" mov #0, %0\n"
|
||||
".balign 4\n"
|
||||
"4: .long 2b\n"
|
||||
".previous\n"
|
||||
".section __ex_table,\"a\"\n"
|
||||
" .balign 4\n"
|
||||
" .long 1b,3b\n"
|
||||
".previous"
|
||||
: "=z" (res), "=&r" (__dummy)
|
||||
: "0" (0), "r" (__s), "r" (__n)
|
||||
: "t");
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_UACCESS_32_H */
|
||||
|
@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
|
||||
extern long __put_user_asm_q(void *, long);
|
||||
extern void __put_user_unknown(void);
|
||||
|
||||
extern long __strnlen_user(const char *__s, long __n);
|
||||
extern int __strncpy_from_user(unsigned long __dest,
|
||||
unsigned long __user __src, int __count);
|
||||
|
||||
#endif /* __ASM_SH_UACCESS_64_H */
|
||||
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/ucontext.h>
|
53
arch/sh/include/asm/word-at-a-time.h
Normal file
53
arch/sh/include/asm/word-at-a-time.h
Normal file
@ -0,0 +1,53 @@
|
||||
#ifndef __ASM_SH_WORD_AT_A_TIME_H
|
||||
#define __ASM_SH_WORD_AT_A_TIME_H
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
# include <asm-generic/word-at-a-time.h>
|
||||
#else
|
||||
/*
|
||||
* Little-endian version cribbed from x86.
|
||||
*/
|
||||
struct word_at_a_time {
|
||||
const unsigned long one_bits, high_bits;
|
||||
};
|
||||
|
||||
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
|
||||
|
||||
/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
|
||||
static inline long count_masked_bytes(long mask)
|
||||
{
|
||||
/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
|
||||
long a = (0x0ff0001+mask) >> 23;
|
||||
/* Fix the 1 for 00 case */
|
||||
return a & mask;
|
||||
}
|
||||
|
||||
/* Return nonzero if it has a zero */
|
||||
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
|
||||
{
|
||||
unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
|
||||
*bits = mask;
|
||||
return mask;
|
||||
}
|
||||
|
||||
static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
|
||||
{
|
||||
return bits;
|
||||
}
|
||||
|
||||
static inline unsigned long create_zero_mask(unsigned long bits)
|
||||
{
|
||||
bits = (bits - 1) & ~bits;
|
||||
return bits >> 7;
|
||||
}
|
||||
|
||||
/* The mask we created is directly usable as a bytemask */
|
||||
#define zero_bytemask(mask) (mask)
|
||||
|
||||
static inline unsigned long find_zero(unsigned long mask)
|
||||
{
|
||||
return count_masked_bytes(mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -1 +0,0 @@
|
||||
#include <asm-generic/xor.h>
|
@ -1,28 +0,0 @@
|
||||
/*
|
||||
* SH-2A UBC definitions
|
||||
*
|
||||
* Copyright (C) 2008 Kieran Bingham
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_CPU_SH2A_UBC_H
|
||||
#define __ASM_CPU_SH2A_UBC_H
|
||||
|
||||
#define UBC_BARA 0xfffc0400
|
||||
#define UBC_BAMRA 0xfffc0404
|
||||
#define UBC_BBRA 0xfffc04a0 /* 16 bit access */
|
||||
#define UBC_BDRA 0xfffc0408
|
||||
#define UBC_BDMRA 0xfffc040c
|
||||
|
||||
#define UBC_BARB 0xfffc0410
|
||||
#define UBC_BAMRB 0xfffc0414
|
||||
#define UBC_BBRB 0xfffc04b0 /* 16 bit access */
|
||||
#define UBC_BDRB 0xfffc0418
|
||||
#define UBC_BDMRB 0xfffc041c
|
||||
|
||||
#define UBC_BRCR 0xfffc04c0
|
||||
|
||||
#endif /* __ASM_CPU_SH2A_UBC_H */
|
@ -1568,86 +1568,6 @@ ___clear_user_exit:
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
* int __strncpy_from_user(unsigned long __dest, unsigned long __src,
|
||||
* int __count)
|
||||
*
|
||||
* Inputs:
|
||||
* (r2) target address
|
||||
* (r3) source address
|
||||
* (r4) maximum size in bytes
|
||||
*
|
||||
* Ouputs:
|
||||
* (*r2) copied data
|
||||
* (r2) -EFAULT (in case of faulting)
|
||||
* copied data (otherwise)
|
||||
*/
|
||||
.global __strncpy_from_user
|
||||
__strncpy_from_user:
|
||||
pta ___strncpy_from_user1, tr0
|
||||
pta ___strncpy_from_user_done, tr1
|
||||
or r4, ZERO, r5 /* r5 = original count */
|
||||
beq/u r4, r63, tr1 /* early exit if r4==0 */
|
||||
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
|
||||
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
|
||||
|
||||
___strncpy_from_user1:
|
||||
ld.b r3, 0, r7 /* Fault address: only in reading */
|
||||
st.b r2, 0, r7
|
||||
addi r2, 1, r2
|
||||
addi r3, 1, r3
|
||||
beq/u ZERO, r7, tr1
|
||||
addi r4, -1, r4 /* return real number of copied bytes */
|
||||
bne/l ZERO, r4, tr0
|
||||
|
||||
___strncpy_from_user_done:
|
||||
sub r5, r4, r6 /* If done, return copied */
|
||||
|
||||
___strncpy_from_user_exit:
|
||||
or r6, ZERO, r2
|
||||
ptabs LINK, tr0
|
||||
blink tr0, ZERO
|
||||
|
||||
/*
|
||||
* extern long __strnlen_user(const char *__s, long __n)
|
||||
*
|
||||
* Inputs:
|
||||
* (r2) source address
|
||||
* (r3) source size in bytes
|
||||
*
|
||||
* Ouputs:
|
||||
* (r2) -EFAULT (in case of faulting)
|
||||
* string length (otherwise)
|
||||
*/
|
||||
.global __strnlen_user
|
||||
__strnlen_user:
|
||||
pta ___strnlen_user_set_reply, tr0
|
||||
pta ___strnlen_user1, tr1
|
||||
or ZERO, ZERO, r5 /* r5 = counter */
|
||||
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
|
||||
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
|
||||
beq r3, ZERO, tr0
|
||||
|
||||
___strnlen_user1:
|
||||
ldx.b r2, r5, r7 /* Fault address: only in reading */
|
||||
addi r3, -1, r3 /* No real fixup */
|
||||
addi r5, 1, r5
|
||||
beq r3, ZERO, tr0
|
||||
bne r7, ZERO, tr1
|
||||
! The line below used to be active. This meant led to a junk byte lying between each pair
|
||||
! of entries in the argv & envp structures in memory. Whilst the program saw the right data
|
||||
! via the argv and envp arguments to main, it meant the 'flat' representation visible through
|
||||
! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
|
||||
! addi r5, 1, r5 /* Include '\0' */
|
||||
|
||||
___strnlen_user_set_reply:
|
||||
or r5, ZERO, r6 /* If done, return counter */
|
||||
|
||||
___strnlen_user_exit:
|
||||
or r6, ZERO, r2
|
||||
ptabs LINK, tr0
|
||||
blink tr0, ZERO
|
||||
|
||||
/*
|
||||
* extern long __get_user_asm_?(void *val, long addr)
|
||||
*
|
||||
@ -1982,8 +1902,6 @@ asm_uaccess_start:
|
||||
.long ___copy_user2, ___copy_user_exit
|
||||
.long ___clear_user1, ___clear_user_exit
|
||||
#endif
|
||||
.long ___strncpy_from_user1, ___strncpy_from_user_exit
|
||||
.long ___strnlen_user1, ___strnlen_user_exit
|
||||
.long ___get_user_asm_b1, ___get_user_asm_b_exit
|
||||
.long ___get_user_asm_w1, ___get_user_asm_w_exit
|
||||
.long ___get_user_asm_l1, ___get_user_asm_l_exit
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/stackprotector.h>
|
||||
#include <asm/fpu.h>
|
||||
|
||||
struct kmem_cache *task_xstate_cachep = NULL;
|
||||
unsigned int xstate_size;
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
struct task_struct *last_task_used_math = NULL;
|
||||
struct pt_regs fake_swapper_regs = { 0, };
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
|
||||
EXPORT_SYMBOL(__get_user_asm_w);
|
||||
EXPORT_SYMBOL(__get_user_asm_l);
|
||||
EXPORT_SYMBOL(__get_user_asm_q);
|
||||
EXPORT_SYMBOL(__strnlen_user);
|
||||
EXPORT_SYMBOL(__strncpy_from_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
EXPORT_SYMBOL(__copy_user);
|
||||
|
@ -1,59 +0,0 @@
|
||||
#ifndef _SPARC64_CMT_H
|
||||
#define _SPARC64_CMT_H
|
||||
|
||||
/* cmt.h: Chip Multi-Threading register definitions
|
||||
*
|
||||
* Copyright (C) 2004 David S. Miller (davem@redhat.com)
|
||||
*/
|
||||
|
||||
/* ASI_CORE_ID - private */
|
||||
#define LP_ID 0x0000000000000010UL
|
||||
#define LP_ID_MAX 0x00000000003f0000UL
|
||||
#define LP_ID_ID 0x000000000000003fUL
|
||||
|
||||
/* ASI_INTR_ID - private */
|
||||
#define LP_INTR_ID 0x0000000000000000UL
|
||||
#define LP_INTR_ID_ID 0x00000000000003ffUL
|
||||
|
||||
/* ASI_CESR_ID - private */
|
||||
#define CESR_ID 0x0000000000000040UL
|
||||
#define CESR_ID_ID 0x00000000000000ffUL
|
||||
|
||||
/* ASI_CORE_AVAILABLE - shared */
|
||||
#define LP_AVAIL 0x0000000000000000UL
|
||||
#define LP_AVAIL_1 0x0000000000000002UL
|
||||
#define LP_AVAIL_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_CORE_ENABLE_STATUS - shared */
|
||||
#define LP_ENAB_STAT 0x0000000000000010UL
|
||||
#define LP_ENAB_STAT_1 0x0000000000000002UL
|
||||
#define LP_ENAB_STAT_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_CORE_ENABLE - shared */
|
||||
#define LP_ENAB 0x0000000000000020UL
|
||||
#define LP_ENAB_1 0x0000000000000002UL
|
||||
#define LP_ENAB_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_CORE_RUNNING - shared */
|
||||
#define LP_RUNNING_RW 0x0000000000000050UL
|
||||
#define LP_RUNNING_W1S 0x0000000000000060UL
|
||||
#define LP_RUNNING_W1C 0x0000000000000068UL
|
||||
#define LP_RUNNING_1 0x0000000000000002UL
|
||||
#define LP_RUNNING_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_CORE_RUNNING_STAT - shared */
|
||||
#define LP_RUN_STAT 0x0000000000000058UL
|
||||
#define LP_RUN_STAT_1 0x0000000000000002UL
|
||||
#define LP_RUN_STAT_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_XIR_STEERING - shared */
|
||||
#define LP_XIR_STEER 0x0000000000000030UL
|
||||
#define LP_XIR_STEER_1 0x0000000000000002UL
|
||||
#define LP_XIR_STEER_0 0x0000000000000001UL
|
||||
|
||||
/* ASI_CMT_ERROR_STEERING - shared */
|
||||
#define CMT_ER_STEER 0x0000000000000040UL
|
||||
#define CMT_ER_STEER_1 0x0000000000000002UL
|
||||
#define CMT_ER_STEER_0 0x0000000000000001UL
|
||||
|
||||
#endif /* _SPARC64_CMT_H */
|
@ -1,67 +0,0 @@
|
||||
/*
|
||||
* mpmbox.h: Interface and defines for the OpenProm mailbox
|
||||
* facilities for MP machines under Linux.
|
||||
*
|
||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
||||
*/
|
||||
|
||||
#ifndef _SPARC_MPMBOX_H
|
||||
#define _SPARC_MPMBOX_H
|
||||
|
||||
/* The prom allocates, for each CPU on the machine an unsigned
|
||||
* byte in physical ram. You probe the device tree prom nodes
|
||||
* for these values. The purpose of this byte is to be able to
|
||||
* pass messages from one cpu to another.
|
||||
*/
|
||||
|
||||
/* These are the main message types we have to look for in our
|
||||
* Cpu mailboxes, based upon these values we decide what course
|
||||
* of action to take.
|
||||
*/
|
||||
|
||||
/* The CPU is executing code in the kernel. */
|
||||
#define MAILBOX_ISRUNNING 0xf0
|
||||
|
||||
/* Another CPU called romvec->pv_exit(), you should call
|
||||
* prom_stopcpu() when you see this in your mailbox.
|
||||
*/
|
||||
#define MAILBOX_EXIT 0xfb
|
||||
|
||||
/* Another CPU called romvec->pv_enter(), you should call
|
||||
* prom_cpuidle() when this is seen.
|
||||
*/
|
||||
#define MAILBOX_GOSPIN 0xfc
|
||||
|
||||
/* Another CPU has hit a breakpoint either into kadb or the prom
|
||||
* itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
|
||||
* at this point.
|
||||
*/
|
||||
#define MAILBOX_BPT_SPIN 0xfd
|
||||
|
||||
/* Oh geese, some other nitwit got a damn watchdog reset. The party's
|
||||
* over so go call prom_stopcpu().
|
||||
*/
|
||||
#define MAILBOX_WDOG_STOP 0xfe
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* Handy macro's to determine a cpu's state. */
|
||||
|
||||
/* Is the cpu still in Power On Self Test? */
|
||||
#define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f)
|
||||
|
||||
/* Is the cpu at the 'ok' prompt of the PROM? */
|
||||
#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
|
||||
|
||||
/* Is the cpu spinning in the PROM? */
|
||||
#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
|
||||
|
||||
/* Sanity check... This is junk mail, throw it out. */
|
||||
#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
|
||||
|
||||
/* Is the cpu actively running an application/kernel-code? */
|
||||
#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
|
||||
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
||||
#endif /* !(_SPARC_MPMBOX_H) */
|
@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
|
||||
#ifdef __tilegx__
|
||||
#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
|
||||
#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
|
||||
#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
|
||||
#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
|
||||
#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
|
||||
#else
|
||||
#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
|
||||
|
@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
|
||||
pxor IN3, STATE4
|
||||
movaps IN4, IV
|
||||
#else
|
||||
pxor (INP), STATE2
|
||||
pxor 0x10(INP), STATE3
|
||||
pxor IN1, STATE4
|
||||
movaps IN2, IV
|
||||
movups (INP), IN1
|
||||
pxor IN1, STATE2
|
||||
movups 0x10(INP), IN2
|
||||
pxor IN2, STATE3
|
||||
#endif
|
||||
movups STATE1, (OUTP)
|
||||
movups STATE2, 0x10(OUTP)
|
||||
|
@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
|
||||
bool ret = false;
|
||||
struct pvclock_vcpu_time_info *src;
|
||||
|
||||
/*
|
||||
* per_cpu() is safe here because this function is only called from
|
||||
* timer functions where preemption is already disabled.
|
||||
*/
|
||||
WARN_ON(!in_atomic());
|
||||
src = &__get_cpu_var(hv_clock);
|
||||
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
|
||||
__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
|
||||
|
@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long dma_mask;
|
||||
struct page *page = NULL;
|
||||
struct page *page;
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
dma_addr_t addr;
|
||||
|
||||
@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
flag |= __GFP_ZERO;
|
||||
again:
|
||||
page = NULL;
|
||||
if (!(flag & GFP_ATOMIC))
|
||||
page = dma_alloc_from_contiguous(dev, count, get_order(size));
|
||||
if (!page)
|
||||
|
@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
|
||||
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
{
|
||||
if (c->phys_proc_id == o->phys_proc_id)
|
||||
return topology_sane(c, o, "mc");
|
||||
if (c->phys_proc_id == o->phys_proc_id) {
|
||||
if (cpu_has(c, X86_FEATURE_AMD_DCM))
|
||||
return true;
|
||||
|
||||
return topology_sane(c, o, "mc");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
||||
void *map;
|
||||
int ret;
|
||||
|
||||
if (__range_not_ok(from, n, TASK_SIZE) == 0)
|
||||
if (__range_not_ok(from, n, TASK_SIZE))
|
||||
return len;
|
||||
|
||||
do {
|
||||
|
@ -180,7 +180,7 @@ err_free_memtype:
|
||||
|
||||
/**
|
||||
* ioremap_nocache - map bus memory into CPU space
|
||||
* @offset: bus address of the memory
|
||||
* @phys_addr: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* ioremap_nocache performs a platform specific sequence of operations to
|
||||
@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache);
|
||||
|
||||
/**
|
||||
* ioremap_wc - map memory into CPU space write combined
|
||||
* @offset: bus address of the memory
|
||||
* @phys_addr: bus address of the memory
|
||||
* @size: size of the resource to map
|
||||
*
|
||||
* This version of ioremap ensures that the memory is marked write combining.
|
||||
|
@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
|
||||
|
||||
/**
|
||||
* clflush_cache_range - flush a cache range with clflush
|
||||
* @addr: virtual start address
|
||||
* @vaddr: virtual start address
|
||||
* @size: number of bytes to flush
|
||||
*
|
||||
* clflush is an unordered instruction which needs fencing with mfence
|
||||
|
@ -39,9 +39,9 @@
|
||||
#undef __SYSCALL_I386
|
||||
#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
|
||||
|
||||
typedef void (*sys_call_ptr_t)(void);
|
||||
typedef asmlinkage void (*sys_call_ptr_t)(void);
|
||||
|
||||
extern void sys_ni_syscall(void);
|
||||
extern asmlinkage void sys_ni_syscall(void);
|
||||
|
||||
const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
|
||||
/*
|
||||
|
@ -209,6 +209,9 @@ static void __init xen_banner(void)
|
||||
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
|
||||
}
|
||||
|
||||
#define CPUID_THERM_POWER_LEAF 6
|
||||
#define APERFMPERF_PRESENT 0
|
||||
|
||||
static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
|
||||
static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
|
||||
|
||||
@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
|
||||
*dx = cpuid_leaf5_edx_val;
|
||||
return;
|
||||
|
||||
case CPUID_THERM_POWER_LEAF:
|
||||
/* Disabling APERFMPERF for kernel usage */
|
||||
maskecx = ~(1 << APERFMPERF_PRESENT);
|
||||
break;
|
||||
|
||||
case 0xb:
|
||||
/* Suppress extended topology stuff */
|
||||
maskebx = 0;
|
||||
|
@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
if (!PageHighMem(page)) {
|
||||
@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
|
||||
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
|
||||
spin_unlock_irqrestore(&m2p_override_lock, flags);
|
||||
|
||||
/* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
|
||||
* this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
|
||||
* pfn so that the following mfn_to_pfn(mfn) calls will return the
|
||||
* pfn from the m2p_override (the backend pfn) instead.
|
||||
* We need to do this because the pages shared by the frontend
|
||||
* (xen-blkfront) can be already locked (lock_page, called by
|
||||
* do_read_cache_page); when the userspace backend tries to use them
|
||||
* with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
|
||||
* do_blockdev_direct_IO is going to try to lock the same pages
|
||||
* again resulting in a deadlock.
|
||||
* As a side effect get_user_pages_fast might not be safe on the
|
||||
* frontend pages while they are being shared with the backend,
|
||||
* because mfn_to_pfn (that ends up being called by GUPF) will
|
||||
* return the backend pfn rather than the frontend pfn. */
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == mfn)
|
||||
set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(m2p_add_override);
|
||||
@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
|
||||
unsigned long uninitialized_var(address);
|
||||
unsigned level;
|
||||
pte_t *ptep = NULL;
|
||||
int ret = 0;
|
||||
|
||||
pfn = page_to_pfn(page);
|
||||
mfn = get_phys_to_machine(pfn);
|
||||
@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
|
||||
} else
|
||||
set_phys_to_machine(pfn, page->index);
|
||||
|
||||
/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
|
||||
* somewhere in this domain, even before being added to the
|
||||
* m2p_override (see comment above in m2p_add_override).
|
||||
* If there are no other entries in the m2p_override corresponding
|
||||
* to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
|
||||
* the original pfn (the one shared by the frontend): the backend
|
||||
* cannot do any IO on this page anymore because it has been
|
||||
* unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
|
||||
* the original pfn causes mfn_to_pfn(mfn) to return the frontend
|
||||
* pfn again. */
|
||||
mfn &= ~FOREIGN_FRAME_BIT;
|
||||
ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
|
||||
if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
|
||||
m2p_find_override(mfn) == NULL)
|
||||
set_phys_to_machine(pfn, mfn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(m2p_remove_override);
|
||||
|
@ -371,7 +371,8 @@ char * __init xen_memory_setup(void)
|
||||
populated = xen_populate_chunk(map, memmap.nr_entries,
|
||||
max_pfn, &last_pfn, xen_released_pages);
|
||||
|
||||
extra_pages += (xen_released_pages - populated);
|
||||
xen_released_pages -= populated;
|
||||
extra_pages += xen_released_pages;
|
||||
|
||||
if (last_pfn > max_pfn) {
|
||||
max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
|
||||
|
@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
|
||||
map->lock = regmap_lock_mutex;
|
||||
map->unlock = regmap_unlock_mutex;
|
||||
}
|
||||
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
|
||||
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
|
||||
map->format.pad_bytes = config->pad_bits / 8;
|
||||
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
|
||||
map->format.buf_size += map->format.pad_bytes;
|
||||
map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
|
||||
config->val_bits + config->pad_bits, 8);
|
||||
map->reg_shift = config->pad_bits % 8;
|
||||
if (config->reg_stride)
|
||||
map->reg_stride = config->reg_stride;
|
||||
@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
|
||||
|
||||
ret = regcache_init(map, config);
|
||||
if (ret < 0)
|
||||
goto err_free_workbuf;
|
||||
goto err_debugfs;
|
||||
|
||||
/* Add a devres resource for dev_get_regmap() */
|
||||
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
|
||||
@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
|
||||
|
||||
err_cache:
|
||||
regcache_exit(map);
|
||||
err_free_workbuf:
|
||||
err_debugfs:
|
||||
regmap_debugfs_exit(map);
|
||||
kfree(map->work_buf);
|
||||
err_map:
|
||||
kfree(map);
|
||||
@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_reinit_cache);
|
||||
|
||||
/**
|
||||
* regmap_exit(): Free a previously allocated register map
|
||||
|
@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
|
||||
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
|
||||
break;
|
||||
case 0x4331:
|
||||
/* BCM4331 workaround is SPROM-related, we put it in sprom.c */
|
||||
case 43431:
|
||||
/* Ext PA lines must be enabled for tx on BCM4331 */
|
||||
bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
|
||||
break;
|
||||
case 43224:
|
||||
if (bus->chipinfo.rev == 0) {
|
||||
|
@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
|
||||
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
|
||||
bool enable)
|
||||
{
|
||||
struct pci_dev *pdev = pc->core->bus->host_pci;
|
||||
struct pci_dev *pdev;
|
||||
u32 coremask, tmp;
|
||||
int err = 0;
|
||||
|
||||
if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
|
||||
if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
|
||||
/* This bcma device is not on a PCI host-bus. So the IRQs are
|
||||
* not routed through the PCI core.
|
||||
* So we must not enable routing through the PCI core. */
|
||||
goto out;
|
||||
}
|
||||
|
||||
pdev = pc->core->bus->host_pci;
|
||||
|
||||
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
||||
if (!sprom)
|
||||
return -ENOMEM;
|
||||
|
||||
if (bus->chipinfo.id == 0x4331)
|
||||
if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
|
||||
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
|
||||
|
||||
pr_debug("SPROM offset 0x%x\n", offset);
|
||||
bcma_sprom_read(bus, offset, sprom);
|
||||
|
||||
if (bus->chipinfo.id == 0x4331)
|
||||
if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
|
||||
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
|
||||
|
||||
err = bcma_sprom_valid(sprom);
|
||||
|
@ -36,6 +36,13 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
|
||||
/* data ready? */
|
||||
if (readl(trng->base + TRNG_ODATA) & 1) {
|
||||
*data = readl(trng->base + TRNG_ODATA);
|
||||
/*
|
||||
ensure data ready is only set again AFTER the next data
|
||||
word is ready in case it got set between checking ISR
|
||||
and reading ODATA, so we don't risk re-reading the
|
||||
same word
|
||||
*/
|
||||
readl(trng->base + TRNG_ISR);
|
||||
return 4;
|
||||
} else
|
||||
return 0;
|
||||
|
@ -48,13 +48,13 @@ struct sh_cmt_priv {
|
||||
unsigned long next_match_value;
|
||||
unsigned long max_match_value;
|
||||
unsigned long rate;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
struct clock_event_device ced;
|
||||
struct clocksource cs;
|
||||
unsigned long total_cycles;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(sh_cmt_lock);
|
||||
static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
|
||||
|
||||
#define CMSTR -1 /* shared register */
|
||||
#define CMCSR 0 /* channel register */
|
||||
@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
|
||||
unsigned long flags, value;
|
||||
|
||||
/* start stop register shared by multiple timer channels */
|
||||
spin_lock_irqsave(&sh_cmt_lock, flags);
|
||||
raw_spin_lock_irqsave(&sh_cmt_lock, flags);
|
||||
value = sh_cmt_read(p, CMSTR);
|
||||
|
||||
if (start)
|
||||
@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
|
||||
value &= ~(1 << cfg->timer_bit);
|
||||
|
||||
sh_cmt_write(p, CMSTR, value);
|
||||
spin_unlock_irqrestore(&sh_cmt_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
|
||||
@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
__sh_cmt_set_next(p, delta);
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
|
||||
@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
|
||||
if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
|
||||
ret = sh_cmt_enable(p, &p->rate);
|
||||
@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
|
||||
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
|
||||
__sh_cmt_set_next(p, p->max_match_value);
|
||||
out:
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
|
||||
unsigned long flags;
|
||||
unsigned long f;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
|
||||
f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
|
||||
p->flags &= ~flag;
|
||||
@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
|
||||
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
|
||||
__sh_cmt_set_next(p, p->max_match_value);
|
||||
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
}
|
||||
|
||||
static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
|
||||
@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
|
||||
unsigned long value;
|
||||
int has_wrapped;
|
||||
|
||||
spin_lock_irqsave(&p->lock, flags);
|
||||
raw_spin_lock_irqsave(&p->lock, flags);
|
||||
value = p->total_cycles;
|
||||
raw = sh_cmt_get_counter(p, &has_wrapped);
|
||||
|
||||
if (unlikely(has_wrapped))
|
||||
raw += p->match_value + 1;
|
||||
spin_unlock_irqrestore(&p->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->lock, flags);
|
||||
|
||||
return value + raw;
|
||||
}
|
||||
@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
|
||||
p->max_match_value = (1 << p->width) - 1;
|
||||
|
||||
p->match_value = p->max_match_value;
|
||||
spin_lock_init(&p->lock);
|
||||
raw_spin_lock_init(&p->lock);
|
||||
|
||||
if (clockevent_rating)
|
||||
sh_cmt_register_clockevent(p, name, clockevent_rating);
|
||||
|
@ -43,7 +43,7 @@ struct sh_mtu2_priv {
|
||||
struct clock_event_device ced;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(sh_mtu2_lock);
|
||||
static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
|
||||
|
||||
#define TSTR -1 /* shared register */
|
||||
#define TCR 0 /* channel register */
|
||||
@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
|
||||
unsigned long flags, value;
|
||||
|
||||
/* start stop register shared by multiple timer channels */
|
||||
spin_lock_irqsave(&sh_mtu2_lock, flags);
|
||||
raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
|
||||
value = sh_mtu2_read(p, TSTR);
|
||||
|
||||
if (start)
|
||||
@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
|
||||
value &= ~(1 << cfg->timer_bit);
|
||||
|
||||
sh_mtu2_write(p, TSTR, value);
|
||||
spin_unlock_irqrestore(&sh_mtu2_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
|
||||
|
@ -45,7 +45,7 @@ struct sh_tmu_priv {
|
||||
struct clocksource cs;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(sh_tmu_lock);
|
||||
static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
|
||||
|
||||
#define TSTR -1 /* shared register */
|
||||
#define TCOR 0 /* channel register */
|
||||
@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
|
||||
unsigned long flags, value;
|
||||
|
||||
/* start stop register shared by multiple timer channels */
|
||||
spin_lock_irqsave(&sh_tmu_lock, flags);
|
||||
raw_spin_lock_irqsave(&sh_tmu_lock, flags);
|
||||
value = sh_tmu_read(p, TSTR);
|
||||
|
||||
if (start)
|
||||
@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
|
||||
value &= ~(1 << cfg->timer_bit);
|
||||
|
||||
sh_tmu_write(p, TSTR, value);
|
||||
spin_unlock_irqrestore(&sh_tmu_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
|
||||
}
|
||||
|
||||
static int sh_tmu_enable(struct sh_tmu_priv *p)
|
||||
@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
|
||||
|
||||
sh_tmu_enable(p);
|
||||
|
||||
/* TODO: calculate good shift from rate and counter bit width */
|
||||
|
||||
ced->shift = 32;
|
||||
ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
|
||||
ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
|
||||
ced->min_delta_ns = 5000;
|
||||
clockevents_config(ced, p->rate);
|
||||
|
||||
if (periodic) {
|
||||
p->periodic = (p->rate + HZ/2) / HZ;
|
||||
@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
|
||||
ced->set_mode = sh_tmu_clock_event_mode;
|
||||
|
||||
dev_info(&p->pdev->dev, "used for clock events\n");
|
||||
clockevents_register_device(ced);
|
||||
|
||||
clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
|
||||
|
||||
ret = setup_irq(p->irqaction.irq, &p->irqaction);
|
||||
if (ret) {
|
||||
|
@ -52,6 +52,7 @@ struct evergreen_cs_track {
|
||||
u32 cb_color_view[12];
|
||||
u32 cb_color_pitch[12];
|
||||
u32 cb_color_slice[12];
|
||||
u32 cb_color_slice_idx[12];
|
||||
u32 cb_color_attrib[12];
|
||||
u32 cb_color_cmask_slice[8];/* unused */
|
||||
u32 cb_color_fmask_slice[8];/* unused */
|
||||
@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
|
||||
track->cb_color_info[i] = 0;
|
||||
track->cb_color_view[i] = 0xFFFFFFFF;
|
||||
track->cb_color_pitch[i] = 0;
|
||||
track->cb_color_slice[i] = 0;
|
||||
track->cb_color_slice[i] = 0xfffffff;
|
||||
track->cb_color_slice_idx[i] = 0;
|
||||
}
|
||||
track->cb_target_mask = 0xFFFFFFFF;
|
||||
track->cb_shader_mask = 0xFFFFFFFF;
|
||||
track->cb_dirty = true;
|
||||
|
||||
track->db_depth_slice = 0xffffffff;
|
||||
track->db_depth_view = 0xFFFFC000;
|
||||
track->db_depth_size = 0xFFFFFFFF;
|
||||
track->db_depth_control = 0xFFFFFFFF;
|
||||
@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
|
||||
{
|
||||
struct evergreen_cs_track *track = p->track;
|
||||
unsigned palign, halign, tileb, slice_pt;
|
||||
unsigned mtile_pr, mtile_ps, mtileb;
|
||||
|
||||
tileb = 64 * surf->bpe * surf->nsamples;
|
||||
palign = track->group_size / (8 * surf->bpe * surf->nsamples);
|
||||
palign = MAX(8, palign);
|
||||
slice_pt = 1;
|
||||
if (tileb > surf->tsplit) {
|
||||
slice_pt = tileb / surf->tsplit;
|
||||
@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
|
||||
/* macro tile width & height */
|
||||
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
|
||||
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
|
||||
surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
|
||||
mtileb = (palign / 8) * (halign / 8) * tileb;;
|
||||
mtile_pr = surf->nbx / palign;
|
||||
mtile_ps = (mtile_pr * surf->nby) / halign;
|
||||
surf->layer_size = mtile_ps * mtileb * slice_pt;
|
||||
surf->base_align = (palign / 8) * (halign / 8) * tileb;
|
||||
surf->palign = palign;
|
||||
surf->halign = halign;
|
||||
@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
|
||||
|
||||
offset += surf.layer_size * mslice;
|
||||
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
|
||||
/* old ddx are broken they allocate bo with w*h*bpp but
|
||||
* program slice with ALIGN(h, 8), catch this and patch
|
||||
* command stream.
|
||||
*/
|
||||
if (!surf.mode) {
|
||||
volatile u32 *ib = p->ib.ptr;
|
||||
unsigned long tmp, nby, bsize, size, min = 0;
|
||||
|
||||
/* find the height the ddx wants */
|
||||
if (surf.nby > 8) {
|
||||
min = surf.nby - 8;
|
||||
}
|
||||
bsize = radeon_bo_size(track->cb_color_bo[id]);
|
||||
tmp = track->cb_color_bo_offset[id] << 8;
|
||||
for (nby = surf.nby; nby > min; nby--) {
|
||||
size = nby * surf.nbx * surf.bpe * surf.nsamples;
|
||||
if ((tmp + size * mslice) <= bsize) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (nby > min) {
|
||||
surf.nby = nby;
|
||||
slice = ((nby * surf.nbx) / 64) - 1;
|
||||
if (!evergreen_surface_check(p, &surf, "cb")) {
|
||||
/* check if this one works */
|
||||
tmp += surf.layer_size * mslice;
|
||||
if (tmp <= bsize) {
|
||||
ib[track->cb_color_slice_idx[id]] = slice;
|
||||
goto old_ddx_ok;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
|
||||
"offset %d, max layer %d, bo size %ld, slice %d)\n",
|
||||
__func__, __LINE__, id, surf.layer_size,
|
||||
@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
|
||||
surf.tsplit, surf.mtilea);
|
||||
return -EINVAL;
|
||||
}
|
||||
old_ddx_ok:
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case CB_COLOR7_SLICE:
|
||||
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
|
||||
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
|
||||
track->cb_color_slice_idx[tmp] = idx;
|
||||
track->cb_dirty = true;
|
||||
break;
|
||||
case CB_COLOR8_SLICE:
|
||||
@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case CB_COLOR11_SLICE:
|
||||
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
|
||||
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
|
||||
track->cb_color_slice_idx[tmp] = idx;
|
||||
track->cb_dirty = true;
|
||||
break;
|
||||
case CB_COLOR0_ATTRIB:
|
||||
|
@ -57,9 +57,10 @@
|
||||
* 2.13.0 - virtual memory support, streamout
|
||||
* 2.14.0 - add evergreen tiling informations
|
||||
* 2.15.0 - add max_pipes query
|
||||
* 2.16.0 - fix evergreen 2D tiled surface calculation
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 15
|
||||
#define KMS_DRIVER_MINOR 16
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
|
@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
|
||||
(*destroy)(bo);
|
||||
else
|
||||
kfree(bo);
|
||||
ttm_mem_global_free(mem_glob, acc_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
bo->destroy = destroy;
|
||||
@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
|
||||
struct ttm_buffer_object **p_bo)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
|
||||
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
|
||||
if (unlikely(bo == NULL)) {
|
||||
ttm_mem_global_free(mem_glob, acc_size);
|
||||
if (unlikely(bo == NULL))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
|
||||
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
|
||||
buffer_start, interruptible,
|
||||
persistent_swap_storage, acc_size, NULL, NULL);
|
||||
|
@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int vga_switcheroo_get_client_state(struct pci_dev *pdev)
|
||||
{
|
||||
struct vga_switcheroo_client *client;
|
||||
|
||||
client = find_client_from_pci(&vgasr_priv.clients, pdev);
|
||||
if (!client)
|
||||
return VGA_SWITCHEROO_NOT_FOUND;
|
||||
if (!vgasr_priv.active)
|
||||
return VGA_SWITCHEROO_INIT;
|
||||
return client->pwr_state;
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_get_client_state);
|
||||
|
||||
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
|
||||
{
|
||||
struct vga_switcheroo_client *client;
|
||||
@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
|
||||
vga_switchon(new_client);
|
||||
|
||||
vga_set_default_device(new_client->pdev);
|
||||
set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
|
||||
|
||||
active->active = false;
|
||||
|
||||
set_audio_state(active->id, VGA_SWITCHEROO_OFF);
|
||||
|
||||
if (new_client->fb_info) {
|
||||
struct fb_event event;
|
||||
event.info = new_client->fb_info;
|
||||
@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
|
||||
if (new_client->ops->reprobe)
|
||||
new_client->ops->reprobe(new_client->pdev);
|
||||
|
||||
set_audio_state(active->id, VGA_SWITCHEROO_OFF);
|
||||
|
||||
if (active->pwr_state == VGA_SWITCHEROO_ON)
|
||||
vga_switchoff(active);
|
||||
|
||||
set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
|
||||
|
||||
new_client->active = true;
|
||||
return 0;
|
||||
}
|
||||
@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
|
||||
/* pwr off the device not in use */
|
||||
if (strncmp(usercmd, "OFF", 3) == 0) {
|
||||
list_for_each_entry(client, &vgasr_priv.clients, list) {
|
||||
if (client->active)
|
||||
if (client->active || client_is_audio(client))
|
||||
continue;
|
||||
set_audio_state(client->id, VGA_SWITCHEROO_OFF);
|
||||
if (client->pwr_state == VGA_SWITCHEROO_ON)
|
||||
vga_switchoff(client);
|
||||
}
|
||||
@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
|
||||
/* pwr on the device not in use */
|
||||
if (strncmp(usercmd, "ON", 2) == 0) {
|
||||
list_for_each_entry(client, &vgasr_priv.clients, list) {
|
||||
if (client->active)
|
||||
if (client->active || client_is_audio(client))
|
||||
continue;
|
||||
if (client->pwr_state == VGA_SWITCHEROO_OFF)
|
||||
vga_switchon(client);
|
||||
set_audio_state(client->id, VGA_SWITCHEROO_ON);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
|
||||
*/
|
||||
static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
|
||||
{
|
||||
unsigned long cycle_time;
|
||||
unsigned long cycle_time = 0;
|
||||
int use_dma_info = 0;
|
||||
const u8 xfer_mode = drive->dma_mode;
|
||||
|
||||
@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
|
||||
|
||||
ide_set_drivedata(drive, (void *)cycle_time);
|
||||
|
||||
printk("%s: %s selected (peak %dMB/s)\n", drive->name,
|
||||
ide_xfer_verbose(xfer_mode),
|
||||
2000 / (unsigned long)ide_get_drivedata(drive));
|
||||
printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
|
||||
drive->name, ide_xfer_verbose(xfer_mode),
|
||||
2000 / (cycle_time ? cycle_time : (unsigned long) -1));
|
||||
}
|
||||
|
||||
static const struct ide_port_ops icside_v6_port_ops = {
|
||||
@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
|
||||
.dma_test_irq = icside_dma_test_irq,
|
||||
.dma_lost_irq = ide_dma_lost_irq,
|
||||
};
|
||||
#else
|
||||
#define icside_v6_dma_ops NULL
|
||||
#endif
|
||||
|
||||
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
|
||||
@ -456,7 +454,6 @@ err_free:
|
||||
static const struct ide_port_info icside_v6_port_info __initdata = {
|
||||
.init_dma = icside_dma_off_init,
|
||||
.port_ops = &icside_v6_no_dma_port_ops,
|
||||
.dma_ops = &icside_v6_dma_ops,
|
||||
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
|
||||
.mwdma_mask = ATA_MWDMA2,
|
||||
.swdma_mask = ATA_SWDMA2,
|
||||
@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
|
||||
|
||||
ecard_set_drvdata(ec, state);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
|
||||
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
|
||||
d.init_dma = icside_dma_init;
|
||||
d.port_ops = &icside_v6_port_ops;
|
||||
} else
|
||||
d.dma_ops = NULL;
|
||||
d.dma_ops = &icside_v6_dma_ops;
|
||||
}
|
||||
#endif
|
||||
|
||||
ret = ide_host_register(host, &d, hws);
|
||||
if (ret)
|
||||
|
@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
|
||||
{
|
||||
int *is_kme = priv_data;
|
||||
|
||||
if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
|
||||
if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
|
||||
!= IO_DATA_PATH_WIDTH_8) {
|
||||
pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
|
||||
pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ config LEDS_NETXBIG
|
||||
|
||||
config LEDS_ASIC3
|
||||
bool "LED support for the HTC ASIC3"
|
||||
depends on LEDS_CLASS
|
||||
depends on LEDS_CLASS=y
|
||||
depends on MFD_ASIC3
|
||||
default y
|
||||
help
|
||||
@ -390,7 +390,7 @@ config LEDS_ASIC3
|
||||
|
||||
config LEDS_RENESAS_TPU
|
||||
bool "LED support for Renesas TPU"
|
||||
depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
|
||||
depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
|
||||
help
|
||||
This option enables build of the LED TPU platform driver,
|
||||
suitable to drive any TPU channel on newer Renesas SoCs.
|
||||
|
@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
|
||||
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
|
||||
}
|
||||
|
||||
static ssize_t led_brightness_show(struct device *dev,
|
||||
static ssize_t led_brightness_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct led_classdev *led_cdev = dev_get_drvdata(dev);
|
||||
|
@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
|
||||
if (!led_cdev->blink_brightness)
|
||||
led_cdev->blink_brightness = led_cdev->max_brightness;
|
||||
|
||||
if (led_get_trigger_data(led_cdev) &&
|
||||
delay_on == led_cdev->blink_delay_on &&
|
||||
delay_off == led_cdev->blink_delay_off)
|
||||
return;
|
||||
|
||||
led_stop_software_blink(led_cdev);
|
||||
|
||||
led_cdev->blink_delay_on = delay_on;
|
||||
led_cdev->blink_delay_off = delay_off;
|
||||
|
||||
|
@ -76,6 +76,7 @@
|
||||
#include <net/route.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/netns/generic.h>
|
||||
#include <net/pkt_sched.h>
|
||||
#include "bonding.h"
|
||||
#include "bond_3ad.h"
|
||||
#include "bond_alb.h"
|
||||
@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
|
||||
return next;
|
||||
}
|
||||
|
||||
#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
|
||||
|
||||
/**
|
||||
* bond_dev_queue_xmit - Prepare skb for xmit.
|
||||
*
|
||||
@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
|
||||
{
|
||||
skb->dev = slave_dev;
|
||||
|
||||
skb->queue_mapping = bond_queue_mapping(skb);
|
||||
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
|
||||
sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
|
||||
skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
|
||||
|
||||
if (unlikely(netpoll_tx_running(slave_dev)))
|
||||
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
|
||||
@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
|
||||
/*
|
||||
* Save the original txq to restore before passing to the driver
|
||||
*/
|
||||
bond_queue_mapping(skb) = skb->queue_mapping;
|
||||
qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
|
||||
|
||||
if (unlikely(txq >= dev->real_num_tx_queues)) {
|
||||
do {
|
||||
|
@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
|
||||
}
|
||||
}
|
||||
|
||||
pr_info("%s: Unable to set %.*s as primary slave.\n",
|
||||
bond->dev->name, (int)strlen(buf) - 1, buf);
|
||||
strncpy(bond->params.primary, ifname, IFNAMSIZ);
|
||||
bond->params.primary[IFNAMSIZ - 1] = 0;
|
||||
|
||||
pr_info("%s: Recording %s as primary, "
|
||||
"but it has not been enslaved to %s yet.\n",
|
||||
bond->dev->name, ifname, bond->dev->name);
|
||||
out:
|
||||
write_unlock_bh(&bond->curr_slave_lock);
|
||||
read_unlock(&bond->lock);
|
||||
|
@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
|
||||
*
|
||||
* We iterate from priv->tx_echo to priv->tx_next and check if the
|
||||
* packet has been transmitted, echo it back to the CAN framework.
|
||||
* If we discover a not yet transmitted package, stop looking for more.
|
||||
* If we discover a not yet transmitted packet, stop looking for more.
|
||||
*/
|
||||
static void c_can_do_tx(struct net_device *dev)
|
||||
{
|
||||
@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
|
||||
msg_obj_no = get_tx_echo_msg_obj(priv);
|
||||
val = c_can_read_reg32(priv, &priv->regs->txrqst1);
|
||||
if (!(val & (1 << msg_obj_no))) {
|
||||
if (!(val & (1 << (msg_obj_no - 1)))) {
|
||||
can_get_echo_skb(dev,
|
||||
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
|
||||
stats->tx_bytes += priv->read_reg(priv,
|
||||
@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
|
||||
& IF_MCONT_DLC_MASK;
|
||||
stats->tx_packets++;
|
||||
c_can_inval_msg_object(dev, 0, msg_obj_no);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
|
||||
struct net_device *dev = napi->dev;
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
|
||||
irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
|
||||
irqstatus = priv->irqstatus;
|
||||
if (!irqstatus)
|
||||
goto end;
|
||||
|
||||
@ -1028,12 +1030,11 @@ end:
|
||||
|
||||
static irqreturn_t c_can_isr(int irq, void *dev_id)
|
||||
{
|
||||
u16 irqstatus;
|
||||
struct net_device *dev = (struct net_device *)dev_id;
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
|
||||
irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
|
||||
if (!irqstatus)
|
||||
priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
|
||||
if (!priv->irqstatus)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* disable all interrupts and schedule the NAPI */
|
||||
@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
|
||||
goto exit_irq_fail;
|
||||
}
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
/* start the c_can controller */
|
||||
c_can_start(dev);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
netif_start_queue(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -76,6 +76,7 @@ struct c_can_priv {
|
||||
unsigned int tx_next;
|
||||
unsigned int tx_echo;
|
||||
void *priv; /* for board-specific data */
|
||||
u16 irqstatus;
|
||||
};
|
||||
|
||||
struct net_device *alloc_c_can_dev(void);
|
||||
|
@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
|
||||
struct cc770_platform_data *pdata = pdev->dev.platform_data;
|
||||
|
||||
priv->can.clock.freq = pdata->osc_freq;
|
||||
if (priv->cpu_interface | CPUIF_DSC)
|
||||
if (priv->cpu_interface & CPUIF_DSC)
|
||||
priv->can.clock.freq /= 2;
|
||||
priv->clkout = pdata->cor;
|
||||
priv->bus_config = pdata->bcr;
|
||||
|
@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
|
||||
rtnl_lock();
|
||||
err = __rtnl_link_register(&dummy_link_ops);
|
||||
|
||||
for (i = 0; i < numdummies && !err; i++)
|
||||
for (i = 0; i < numdummies && !err; i++) {
|
||||
err = dummy_init_one();
|
||||
cond_resched();
|
||||
}
|
||||
if (err < 0)
|
||||
__rtnl_link_unregister(&dummy_link_ops);
|
||||
rtnl_unlock();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user