Merge branch 'for-linus' into for-next

This commit is contained in:
Takashi Iwai 2012-11-22 21:22:39 +01:00
commit 2ba509a6ba
178 changed files with 1978 additions and 1011 deletions

View File

@ -466,6 +466,10 @@ Note:
5.3 swappiness
Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only.
Please note that unlike the global swappiness, memcg knob set to 0
really prevents from any swapping even if there is a swap storage
available. This might lead to memcg OOM killer if there are no file
pages to reclaim.
Following cgroups' swappiness can't be changed.
- root cgroup (uses /proc/sys/vm/swappiness).

View File

@ -33,7 +33,7 @@ Table of Contents
2 Modifying System Parameters
3 Per-Process Parameters
3.1 /proc/<pid>/oom_score_adj - Adjust the oom-killer
3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj - Adjust the oom-killer
score
3.2 /proc/<pid>/oom_score - Display current oom-killer score
3.3 /proc/<pid>/io - Display the IO accounting fields
@ -1320,10 +1320,10 @@ of the kernel.
CHAPTER 3: PER-PROCESS PARAMETERS
------------------------------------------------------------------------------
3.1 /proc/<pid>/oom_score_adj- Adjust the oom-killer score
3.1 /proc/<pid>/oom_adj & /proc/<pid>/oom_score_adj- Adjust the oom-killer score
--------------------------------------------------------------------------------
This file can be used to adjust the badness heuristic used to select which
These file can be used to adjust the badness heuristic used to select which
process gets killed in out of memory conditions.
The badness heuristic assigns a value to each candidate task ranging from 0
@ -1361,6 +1361,12 @@ same system, cpuset, mempolicy, or memory controller resources to use at least
equivalent to discounting 50% of the task's allowed memory from being considered
as scoring against the task.
For backwards compatibility with previous kernels, /proc/<pid>/oom_adj may also
be used to tune the badness score. Its acceptable values range from -16
(OOM_ADJUST_MIN) to +15 (OOM_ADJUST_MAX) and a special value of -17
(OOM_DISABLE) to disable oom killing entirely for that task. Its value is
scaled linearly with /proc/<pid>/oom_score_adj.
The value of /proc/<pid>/oom_score_adj may be reduced no lower than the last
value set by a CAP_SYS_RESOURCE process. To reduce the value any lower
requires CAP_SYS_RESOURCE.
@ -1375,7 +1381,9 @@ minimal amount of work.
-------------------------------------------------------------
This file can be used to check the current score used by the oom-killer is for
any given <pid>.
any given <pid>. Use it together with /proc/<pid>/oom_score_adj to tune which
process should be killed in an out-of-memory situation.
3.3 /proc/<pid>/io - Display the IO accounting fields
-------------------------------------------------------

View File

@ -164,4 +164,4 @@ read the CRC recorded by the NIC on receipt of the packet.
This requests that the NIC receive all possible frames, including errored
frames (such as bad FCS, etc). This can be helpful when sniffing a link with
bad packets on it. Some NICs may receive more packets if also put into normal
PROMISC mdoe.
PROMISC mode.

View File

@ -3598,6 +3598,49 @@ F: drivers/hid/hid-hyperv.c
F: drivers/net/hyperv/
F: drivers/staging/hv/
I2C OVER PARALLEL PORT
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-parport
F: Documentation/i2c/busses/i2c-parport-light
F: drivers/i2c/busses/i2c-parport.c
F: drivers/i2c/busses/i2c-parport-light.c
I2C/SMBUS CONTROLLER DRIVERS FOR PC
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-ali1535
F: Documentation/i2c/busses/i2c-ali1563
F: Documentation/i2c/busses/i2c-ali15x3
F: Documentation/i2c/busses/i2c-amd756
F: Documentation/i2c/busses/i2c-amd8111
F: Documentation/i2c/busses/i2c-i801
F: Documentation/i2c/busses/i2c-nforce2
F: Documentation/i2c/busses/i2c-piix4
F: Documentation/i2c/busses/i2c-sis5595
F: Documentation/i2c/busses/i2c-sis630
F: Documentation/i2c/busses/i2c-sis96x
F: Documentation/i2c/busses/i2c-via
F: Documentation/i2c/busses/i2c-viapro
F: drivers/i2c/busses/i2c-ali1535.c
F: drivers/i2c/busses/i2c-ali1563.c
F: drivers/i2c/busses/i2c-ali15x3.c
F: drivers/i2c/busses/i2c-amd756.c
F: drivers/i2c/busses/i2c-amd756-s4882.c
F: drivers/i2c/busses/i2c-amd8111.c
F: drivers/i2c/busses/i2c-i801.c
F: drivers/i2c/busses/i2c-isch.c
F: drivers/i2c/busses/i2c-nforce2.c
F: drivers/i2c/busses/i2c-nforce2-s4985.c
F: drivers/i2c/busses/i2c-piix4.c
F: drivers/i2c/busses/i2c-sis5595.c
F: drivers/i2c/busses/i2c-sis630.c
F: drivers/i2c/busses/i2c-sis96x.c
F: drivers/i2c/busses/i2c-via.c
F: drivers/i2c/busses/i2c-viapro.c
I2C/SMBUS STUB DRIVER
M: "Mark M. Hoffman" <mhoffman@lightlink.com>
L: linux-i2c@vger.kernel.org
@ -3605,9 +3648,8 @@ S: Maintained
F: drivers/i2c/busses/i2c-stub.c
I2C SUBSYSTEM
M: "Jean Delvare (PC drivers, core)" <khali@linux-fr.org>
M: Wolfram Sang <w.sang@pengutronix.de>
M: "Ben Dooks (embedded platforms)" <ben-linux@fluff.org>
M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
L: linux-i2c@vger.kernel.org
W: http://i2c.wiki.kernel.org/
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
@ -3618,6 +3660,13 @@ F: drivers/i2c/
F: include/linux/i2c.h
F: include/linux/i2c-*.h
I2C-TAOS-EVM DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-taos-evm
F: drivers/i2c/busses/i2c-taos-evm.c
I2C-TINY-USB DRIVER
M: Till Harbaum <till@harbaum.org>
L: linux-i2c@vger.kernel.org
@ -7210,6 +7259,14 @@ L: linux-xtensa@linux-xtensa.org
S: Maintained
F: arch/xtensa/
THERMAL
M: Zhang Rui <rui.zhang@intel.com>
L: linux-pm@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
S: Supported
F: drivers/thermal/
F: include/linux/thermal.h
THINKPAD ACPI EXTRAS DRIVER
M: Henrique de Moraes Holschuh <ibm-acpi@hmh.eng.br>
L: ibm-acpi-devel@lists.sourceforge.net
@ -7887,13 +7944,6 @@ M: Roger Luethi <rl@hellgate.ch>
S: Maintained
F: drivers/net/ethernet/via/via-rhine.c
VIAPRO SMBUS DRIVER
M: Jean Delvare <khali@linux-fr.org>
L: linux-i2c@vger.kernel.org
S: Maintained
F: Documentation/i2c/busses/i2c-viapro
F: drivers/i2c/busses/i2c-viapro.c
VIA SD/MMC CARD CONTROLLER DRIVER
M: Bruce Chang <brucechang@via.com.tw>
M: Harald Welte <HaraldWelte@viatech.com>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Terrified Chipmunk
# *DOCUMENTATION*

View File

@ -33,7 +33,7 @@ ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
@$(kecho) ' Kernel: $@ is ready (physical address: $(CONFIG_XIP_PHYS_ADDR))'
$(obj)/Image $(obj)/zImage: FORCE
@echo 'Kernel configured for XIP (CONFIG_XIP_KERNEL=y)'
@ -48,14 +48,14 @@ $(obj)/xipImage: FORCE
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
$(obj)/compressed/vmlinux: $(obj)/Image FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
endif
@ -90,7 +90,7 @@ fi
$(obj)/uImage: $(obj)/zImage FORCE
@$(check_for_multiple_loadaddr)
$(call if_changed,uimage)
$(kecho) ' Image $@ is ready'
@$(kecho) ' Image $@ is ready'
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
@ -98,7 +98,7 @@ $(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
$(kecho) ' Kernel: $@ is ready'
@$(kecho) ' Kernel: $@ is ready'
PHONY += initrd FORCE
initrd:

View File

@ -73,8 +73,8 @@
pinmux: pinmux {
compatible = "nvidia,tegra30-pinmux";
reg = <0x70000868 0xd0 /* Pad control registers */
0x70003000 0x3e0>; /* Mux registers */
reg = <0x70000868 0xd4 /* Pad control registers */
0x70003000 0x3e4>; /* Mux registers */
};
serial@70006000 {

View File

@ -68,7 +68,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -72,7 +72,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -78,7 +78,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data)
/* Enable overcurrent notification */
for (i = 0; i < data->ports; i++) {
if (data->overcurrent_pin[i])
if (gpio_is_valid(data->overcurrent_pin[i]))
at91_set_gpio_input(data->overcurrent_pin[i], 1);
}

View File

@ -1841,8 +1841,8 @@ static struct resource sha_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@ -1874,8 +1874,8 @@ static struct resource tdes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};
@ -1910,8 +1910,8 @@ static struct resource aes_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91SAM9G45_ID_AESTDESSHA,
.end = AT91SAM9G45_ID_AESTDESSHA,
.start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AESTDESSHA,
.flags = IORESOURCE_IRQ,
},
};

View File

@ -28,6 +28,7 @@ void highbank_restart(char mode, const char *cmd)
hignbank_set_pwr_soft_reset();
scu_power_mode(scu_base_addr, SCU_PM_POWEROFF);
cpu_do_idle();
while (1)
cpu_do_idle();
}

View File

@ -112,7 +112,7 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
clk = clk_register(dev, &gate->hw);
if (IS_ERR(clk))
kfree(clk);
kfree(gate);
return clk;
}

View File

@ -30,7 +30,7 @@
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
#define MX25_H1_PP_BIT (1 << 18)
#define MX25_H1_PM_BIT (1 << 8)
#define MX25_H1_PM_BIT (1 << 16)
#define MX25_H1_IPPUE_UP_BIT (1 << 7)
#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX25_H1_TLL_BIT (1 << 5)

View File

@ -30,7 +30,7 @@
#define MX35_H1_SIC_SHIFT 21
#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
#define MX35_H1_PP_BIT (1 << 18)
#define MX35_H1_PM_BIT (1 << 8)
#define MX35_H1_PM_BIT (1 << 16)
#define MX35_H1_IPPUE_UP_BIT (1 << 7)
#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX35_H1_TLL_BIT (1 << 5)

View File

@ -359,7 +359,7 @@ static struct clockdomain iss_44xx_clkdm = {
.clkdm_offs = OMAP4430_CM2_CAM_CAM_CDOFFS,
.wkdep_srcs = iss_wkup_sleep_deps,
.sleepdep_srcs = iss_wkup_sleep_deps,
.flags = CLKDM_CAN_HWSUP_SWSUP,
.flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain l3_dss_44xx_clkdm = {

View File

@ -19,6 +19,7 @@
#include <linux/of.h>
#include <linux/pinctrl/machine.h>
#include <linux/platform_data/omap4-keypad.h>
#include <linux/platform_data/omap_ocp2scp.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
@ -613,6 +614,83 @@ static void omap_init_vout(void)
static inline void omap_init_vout(void) {}
#endif
#if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE)
static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev)
{
int cnt = 0;
while (ocp2scp_dev->drv_name != NULL) {
cnt++;
ocp2scp_dev++;
}
return cnt;
}
static void omap_init_ocp2scp(void)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
int bus_id = -1, dev_cnt = 0, i;
struct omap_ocp2scp_dev *ocp2scp_dev;
const char *oh_name, *name;
struct omap_ocp2scp_platform_data *pdata;
if (!cpu_is_omap44xx())
return;
oh_name = "ocp2scp_usb_phy";
name = "omap-ocp2scp";
oh = omap_hwmod_lookup(oh_name);
if (!oh) {
pr_err("%s: could not find omap_hwmod for %s\n", __func__,
oh_name);
return;
}
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata) {
pr_err("%s: No memory for ocp2scp pdata\n", __func__);
return;
}
ocp2scp_dev = oh->dev_attr;
dev_cnt = count_ocp2scp_devices(ocp2scp_dev);
if (!dev_cnt) {
pr_err("%s: No devices connected to ocp2scp\n", __func__);
kfree(pdata);
return;
}
pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *)
* dev_cnt, GFP_KERNEL);
if (!pdata->devices) {
pr_err("%s: No memory for ocp2scp pdata devices\n", __func__);
kfree(pdata);
return;
}
for (i = 0; i < dev_cnt; i++, ocp2scp_dev++)
pdata->devices[i] = ocp2scp_dev;
pdata->dev_cnt = dev_cnt;
pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL,
0, false);
if (IS_ERR(pdev)) {
pr_err("Could not build omap_device for %s %s\n",
name, oh_name);
kfree(pdata->devices);
kfree(pdata);
return;
}
}
#else
static inline void omap_init_ocp2scp(void) { }
#endif
/*-------------------------------------------------------------------------*/
static int __init omap2_init_devices(void)
@ -640,6 +718,7 @@ static int __init omap2_init_devices(void)
omap_init_sham();
omap_init_aes();
omap_init_vout();
omap_init_ocp2scp();
return 0;
}

View File

@ -421,6 +421,38 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
return 0;
}
/**
* _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on
*
* Wait until the IP block represented by @oh reports that its OCP
* softreset is complete. This can be triggered by software (see
* _ocp_softreset()) or by hardware upon returning from off-mode (one
* example is HSMMC). Waits for up to MAX_MODULE_SOFTRESET_WAIT
* microseconds. Returns the number of microseconds waited.
*/
static int _wait_softreset_complete(struct omap_hwmod *oh)
{
struct omap_hwmod_class_sysconfig *sysc;
u32 softrst_mask;
int c = 0;
sysc = oh->class->sysc;
if (sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
omap_test_timeout((omap_hwmod_read(oh, sysc->syss_offs)
& SYSS_RESETDONE_MASK),
MAX_MODULE_SOFTRESET_WAIT, c);
else if (sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
softrst_mask = (0x1 << sysc->sysc_fields->srst_shift);
omap_test_timeout(!(omap_hwmod_read(oh, sysc->sysc_offs)
& softrst_mask),
MAX_MODULE_SOFTRESET_WAIT, c);
}
return c;
}
/**
* _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
* @oh: struct omap_hwmod *
@ -1282,6 +1314,18 @@ static void _enable_sysc(struct omap_hwmod *oh)
if (!oh->class->sysc)
return;
/*
* Wait until reset has completed, this is needed as the IP
* block is reset automatically by hardware in some cases
* (off-mode for example), and the drivers require the
* IP to be ready when they access it
*/
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_enable_optional_clocks(oh);
_wait_softreset_complete(oh);
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_disable_optional_clocks(oh);
v = oh->_sysc_cache;
sf = oh->class->sysc->sysc_flags;
@ -1804,7 +1848,7 @@ static int _am33xx_disable_module(struct omap_hwmod *oh)
*/
static int _ocp_softreset(struct omap_hwmod *oh)
{
u32 v, softrst_mask;
u32 v;
int c = 0;
int ret = 0;
@ -1834,19 +1878,7 @@ static int _ocp_softreset(struct omap_hwmod *oh)
if (oh->class->sysc->srst_udelay)
udelay(oh->class->sysc->srst_udelay);
if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS)
omap_test_timeout((omap_hwmod_read(oh,
oh->class->sysc->syss_offs)
& SYSS_RESETDONE_MASK),
MAX_MODULE_SOFTRESET_WAIT, c);
else if (oh->class->sysc->sysc_flags & SYSC_HAS_RESET_STATUS) {
softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
omap_test_timeout(!(omap_hwmod_read(oh,
oh->class->sysc->sysc_offs)
& softrst_mask),
MAX_MODULE_SOFTRESET_WAIT, c);
}
c = _wait_softreset_complete(oh);
if (c == MAX_MODULE_SOFTRESET_WAIT)
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
oh->name, MAX_MODULE_SOFTRESET_WAIT);
@ -2352,6 +2384,9 @@ static int __init _setup_reset(struct omap_hwmod *oh)
if (oh->_state != _HWMOD_STATE_INITIALIZED)
return -EINVAL;
if (oh->flags & HWMOD_EXT_OPT_MAIN_CLK)
return -EPERM;
if (oh->rst_lines_cnt == 0) {
r = _enable(oh);
if (r) {

View File

@ -21,6 +21,7 @@
#include <linux/io.h>
#include <linux/platform_data/gpio-omap.h>
#include <linux/power/smartreflex.h>
#include <linux/platform_data/omap_ocp2scp.h>
#include <plat/omap_hwmod.h>
#include <plat/i2c.h>
@ -2125,6 +2126,14 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = {
.name = "mcpdm",
.class = &omap44xx_mcpdm_hwmod_class,
.clkdm_name = "abe_clkdm",
/*
* It's suspected that the McPDM requires an off-chip main
* functional clock, controlled via I2C. This IP block is
* currently reset very early during boot, before I2C is
* available, so it doesn't seem that we have any choice in
* the kernel other than to avoid resetting it.
*/
.flags = HWMOD_EXT_OPT_MAIN_CLK,
.mpu_irqs = omap44xx_mcpdm_irqs,
.sdma_reqs = omap44xx_mcpdm_sdma_reqs,
.main_clk = "mcpdm_fck",
@ -2681,6 +2690,32 @@ static struct omap_hwmod_class omap44xx_ocp2scp_hwmod_class = {
.sysc = &omap44xx_ocp2scp_sysc,
};
/* ocp2scp dev_attr */
static struct resource omap44xx_usb_phy_and_pll_addrs[] = {
{
.name = "usb_phy",
.start = 0x4a0ad080,
.end = 0x4a0ae000,
.flags = IORESOURCE_MEM,
},
{
/* XXX: Remove this once control module driver is in place */
.name = "ctrl_dev",
.start = 0x4a002300,
.end = 0x4a002303,
.flags = IORESOURCE_MEM,
},
{ }
};
static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
{
.drv_name = "omap-usb2",
.res = omap44xx_usb_phy_and_pll_addrs,
},
{ }
};
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
@ -2694,6 +2729,7 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.modulemode = MODULEMODE_HWCTRL,
},
},
.dev_attr = ocp2scp_dev_attr,
};
/*

View File

@ -366,7 +366,7 @@ static struct regulator_init_data omap4_clk32kg_idata = {
};
static struct regulator_consumer_supply omap4_vdd1_supply[] = {
REGULATOR_SUPPLY("vcc", "mpu.0"),
REGULATOR_SUPPLY("vcc", "cpu0"),
};
static struct regulator_consumer_supply omap4_vdd2_supply[] = {

View File

@ -264,7 +264,7 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).\n",
__func__, voltdm->name, i2c_high_speed);
return;
}

View File

@ -28,6 +28,7 @@
#include <linux/mfd/asic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/gpio-regulator.h>
@ -556,7 +557,7 @@ static struct platform_device hx4700_lcd = {
*/
static struct platform_pwm_backlight_data backlight_data = {
.pwm_id = 1,
.pwm_id = -1, /* Superseded by pwm_lookup */
.max_brightness = 200,
.dft_brightness = 100,
.pwm_period_ns = 30923,
@ -571,6 +572,10 @@ static struct platform_device backlight = {
},
};
static struct pwm_lookup hx4700_pwm_lookup[] = {
PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL),
};
/*
* USB "Transceiver"
*/
@ -872,6 +877,7 @@ static void __init hx4700_init(void)
pxa_set_stuart_info(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
pwm_add_table(hx4700_pwm_lookup, ARRAY_SIZE(hx4700_pwm_lookup));
pxa_set_ficp_info(&ficp_info);
pxa27x_set_i2c_power_info(NULL);

View File

@ -86,10 +86,7 @@ static void spitz_discharge1(int on)
gpio_set_value(SPITZ_GPIO_LED_GREEN, on);
}
static unsigned long gpio18_config[] = {
GPIO18_RDY,
GPIO18_GPIO,
};
static unsigned long gpio18_config = GPIO18_GPIO;
static void spitz_presuspend(void)
{
@ -112,7 +109,7 @@ static void spitz_presuspend(void)
PGSR3 &= ~SPITZ_GPIO_G3_STROBE_BIT;
PGSR2 |= GPIO_bit(SPITZ_GPIO_KEY_STROBE0);
pxa2xx_mfp_config(&gpio18_config[0], 1);
pxa2xx_mfp_config(&gpio18_config, 1);
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, "Unknown");
gpio_free(18);
@ -131,7 +128,6 @@ static void spitz_presuspend(void)
static void spitz_postsuspend(void)
{
pxa2xx_mfp_config(&gpio18_config[1], 1);
}
static int spitz_should_wakeup(unsigned int resume_on_alarm)

View File

@ -443,6 +443,11 @@ struct omap_hwmod_omap4_prcm {
* in order to complete the reset. Optional clocks will be disabled
* again after the reset.
* HWMOD_16BIT_REG: Module has 16bit registers
* HWMOD_EXT_OPT_MAIN_CLK: The only main functional clock source for
* this IP block comes from an off-chip source and is not always
* enabled. This prevents the hwmod code from being able to
* enable and reset the IP block early. XXX Eventually it should
* be possible to query the clock framework for this information.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@ -453,6 +458,7 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_NO_IDLEST (1 << 6)
#define HWMOD_CONTROL_OPT_CLKS_IN_RESET (1 << 7)
#define HWMOD_16BIT_REG (1 << 8)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
/*
* omap_hwmod._int_flags definitions

View File

@ -5,6 +5,6 @@
#
include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
$(kecho) ' Generating $@'
@$(kecho) ' Generating $@'
@mkdir -p $(dir $@)
$(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }

View File

@ -222,7 +222,7 @@ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot
extern void __iounmap(volatile void __iomem *addr);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_XN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))

View File

@ -38,7 +38,8 @@
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
@ -57,7 +58,8 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
#define PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).

View File

@ -62,23 +62,23 @@ extern pgprot_t pgprot_default;
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_XN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_DIRTY)
#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_XN | PTE_RDONLY)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_RDONLY)
#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
#endif /* __ASSEMBLY__ */
@ -130,10 +130,10 @@ extern struct page *empty_zero_page;
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
#define pte_exec(pte) (!(pte_val(pte) & PTE_XN))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_present_exec_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_XN)) == \
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \
(PTE_VALID | PTE_USER))
#define PTE_BIT_FUNC(fn,op) \
@ -262,7 +262,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_XN | PTE_RDONLY;
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}

View File

@ -637,7 +637,6 @@ mem_init (void)
high_memory = __va(max_low_pfn * PAGE_SIZE);
reset_zone_present_pages();
for_each_online_pgdat(pgdat)
if (pgdat->bdata->node_bootmem_map)
totalram_pages += free_all_bootmem_node(pgdat);

View File

@ -30,6 +30,7 @@
* measurement, and debugging facilities.
*/
#include <linux/irqflags.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>

View File

@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/bcache.h>

View File

@ -14,7 +14,6 @@
#endif
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
__mips_set_bit(nr, addr);
}
/*
@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
__mips_clear_bit(nr, addr);
}
/*
@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
__mips_change_bit(nr, addr);
}
/*
@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();

View File

@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */

View File

@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>

View File

@ -16,6 +16,105 @@
#include <linux/compiler.h>
#include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
" di \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
" di \\result \n"
" andi \\result, 1 \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
@ -57,55 +156,6 @@ static inline void arch_local_irq_enable(void)
}
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
@ -125,113 +175,6 @@ static inline unsigned long arch_local_save_flags(void)
return flags;
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#elif defined(CONFIG_CPU_MIPSR2)
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
#endif
#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.

View File

@ -112,12 +112,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#ifdef CONFIG_MIPS32_O32
#define TIF_32BIT TIF_32BIT_REGS
#elif defined(CONFIG_MIPS32_N32)
#define TIF_32BIT _TIF_32BIT_ADDR
#endif /* CONFIG_MIPS32_O32 */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)

View File

@ -2,8 +2,9 @@
# Makefile for MIPS-specific library files..
#
lib-y += csum_partial.o delay.o memcpy.o memset.o \
strlen_user.o strncpy_user.o strnlen_user.o uncached.o
lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strlen_user.o strncpy_user.o \
strnlen_user.o uncached.o
obj-y += iomap.o
obj-$(CONFIG_PCI) += iomap-pci.o

179
arch/mips/lib/bitops.c Normal file
View File

@ -0,0 +1,179 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1994-1997, 99, 2000, 06, 07 Ralf Baechle (ralf@linux-mips.org)
* Copyright (c) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bitops.h>
#include <linux/irqflags.h>
#include <linux/export.h>
/**
* __mips_set_bit - Atomically set a bit in memory. This is called by
* set_bit() if it cannot find a faster solution.
* @nr: the bit to set
* @addr: the address to start counting from
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_set_bit);
/**
* __mips_clear_bit - Clears a bit in memory. This is called by clear_bit() if
* it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to start counting from
*/
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_clear_bit);
/**
* __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
*/
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL(__mips_change_bit);
/**
* __mips_test_and_set_bit - Set a bit and return its old value. This is
* called by test_and_set_bit() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit);
/**
* __mips_test_and_set_bit_lock - Set a bit and return its old value. This is
* called by test_and_set_bit_lock() if it cannot find a faster solution.
* @nr: Bit to set
* @addr: Address to count from
*/
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_set_bit_lock);
/**
* __mips_test_and_clear_bit - Clear a bit and return its old value. This is
* called by test_and_clear_bit() if it cannot find a faster solution.
* @nr: Bit to clear
* @addr: Address to count from
*/
int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
* __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
*/
int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
volatile unsigned long *a = addr;
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
return res;
}
EXPORT_SYMBOL(__mips_test_and_change_bit);

176
arch/mips/lib/mips-atomic.c Normal file
View File

@ -0,0 +1,176 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
* Copyright (C) 1996 by Paul M. Antoine
* Copyright (C) 1999 Silicon Graphics
* Copyright (C) 2000 MIPS Technologies, Inc.
*/
#include <asm/irqflags.h>
#include <asm/hazards.h>
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <linux/export.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_disable(void)
{
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_disable);
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
unsigned long arch_local_irq_save(void)
{
unsigned long flags;
preempt_disable();
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
preempt_enable();
return flags;
}
EXPORT_SYMBOL(arch_local_irq_save);
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
preempt_disable();
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
preempt_enable();
}
EXPORT_SYMBOL(__arch_local_irq_restore);
#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */

View File

@ -29,6 +29,7 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_device.h>
#include <asm/mips-boards/maltaint.h>
#include <mtd/mtd-abi.h>
#define SMC_PORT(base, int) \
@ -48,7 +49,7 @@ static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x2F8, 3),
{
.mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + 2,
.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
.uartclk = 3686400, /* Twice the usual clk! */
.iotype = UPIO_MEM32,
.flags = CBUS_UART_FLAGS,

View File

@ -96,6 +96,7 @@ config S390
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_CMPXCHG_LOCAL
select HAVE_CMPXCHG_DOUBLE
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_VIRT_CPU_ACCOUNTING
select VIRT_CPU_ACCOUNTING
select ARCH_DISCARD_MEMBLOCK

View File

@ -20,7 +20,7 @@
#define PSW32_MASK_CC 0x00003000UL
#define PSW32_MASK_PM 0x00000f00UL
#define PSW32_MASK_USER 0x00003F00UL
#define PSW32_MASK_USER 0x0000FF00UL
#define PSW32_ADDR_AMODE 0x80000000UL
#define PSW32_ADDR_INSN 0x7FFFFFFFUL

View File

@ -8,6 +8,9 @@ struct cpu;
#ifdef CONFIG_SCHED_BOOK
extern unsigned char cpu_socket_id[NR_CPUS];
#define topology_physical_package_id(cpu) (cpu_socket_id[cpu])
extern unsigned char cpu_core_id[NR_CPUS];
extern cpumask_t cpu_core_map[NR_CPUS];

View File

@ -239,7 +239,7 @@ typedef struct
#define PSW_MASK_EA 0x00000000UL
#define PSW_MASK_BA 0x00000000UL
#define PSW_MASK_USER 0x00003F00UL
#define PSW_MASK_USER 0x0000FF00UL
#define PSW_ADDR_AMODE 0x80000000UL
#define PSW_ADDR_INSN 0x7FFFFFFFUL
@ -269,7 +269,7 @@ typedef struct
#define PSW_MASK_EA 0x0000000100000000UL
#define PSW_MASK_BA 0x0000000080000000UL
#define PSW_MASK_USER 0x00003F8180000000UL
#define PSW_MASK_USER 0x0000FF8180000000UL
#define PSW_ADDR_AMODE 0x0000000000000000UL
#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL

View File

@ -309,6 +309,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
(__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
for (i = 0; i < NUM_GPRS; i++)
regs->gprs[i] = (__u64) regs32.gprs[i];
@ -481,7 +485,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__force __u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);
@ -549,7 +556,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (__force __u64) frame;
regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */
/* Force 31 bit amode and default user address space control. */
regs->psw.mask = PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (__u64) ka->sa.sa_handler;
regs->gprs[2] = map_signal(sig);

View File

@ -136,6 +136,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
(user_sregs.regs.psw.mask & PSW_MASK_USER);
/* Check for invalid user address space control. */
if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
/* Check for invalid amode */
if (regs->psw.mask & PSW_MASK_EA)
regs->psw.mask |= PSW_MASK_BA;
@ -273,7 +277,10 @@ static int setup_frame(int sig, struct k_sigaction *ka,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);
@ -346,7 +353,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
/* Set up registers for signal handler */
regs->gprs[15] = (unsigned long) frame;
regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */
/* Force default amode and default user address space control. */
regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
(psw_user_bits & PSW_MASK_ASC) |
(regs->psw.mask & ~PSW_MASK_ASC);
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
regs->gprs[2] = map_signal(sig);

View File

@ -40,6 +40,7 @@ static DEFINE_SPINLOCK(topology_lock);
static struct mask_info core_info;
cpumask_t cpu_core_map[NR_CPUS];
unsigned char cpu_core_id[NR_CPUS];
unsigned char cpu_socket_id[NR_CPUS];
static struct mask_info book_info;
cpumask_t cpu_book_map[NR_CPUS];
@ -83,11 +84,12 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
cpumask_set_cpu(lcpu, &book->mask);
cpu_book_id[lcpu] = book->id;
cpumask_set_cpu(lcpu, &core->mask);
cpu_core_id[lcpu] = rcpu;
if (one_core_per_cpu) {
cpu_core_id[lcpu] = rcpu;
cpu_socket_id[lcpu] = rcpu;
core = core->next;
} else {
cpu_core_id[lcpu] = core->id;
cpu_socket_id[lcpu] = core->id;
}
smp_cpu_set_polarization(lcpu, tl_cpu->pp);
}

View File

@ -180,8 +180,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
(void __user *)start, len)))
if ((end < start) || (end > TASK_SIZE))
return 0;
local_irq_save(flags);
@ -229,7 +228,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
addr = start;
len = (unsigned long) nr_pages << PAGE_SHIFT;
end = start + len;
if (end < start)
if ((end < start) || (end > TASK_SIZE))
goto slow_irqon;
/*

View File

@ -16,6 +16,8 @@ config UNICORE32
select ARCH_WANT_FRAME_POINTERS
select GENERIC_IOMAP
select MODULES_USE_ELF_REL
select GENERIC_KERNEL_THREAD
select GENERIC_KERNEL_EXECVE
help
UniCore-32 is 32-bit Instruction Set Architecture,
including a series of low-power-consumption RISC chip
@ -64,6 +66,9 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_MAY_HAVE_PC_FDC
bool
config ZONE_DMA
def_bool y
config NEED_DMA_MAP_STATE
def_bool y
@ -216,7 +221,7 @@ config PUV3_GPIO
bool
depends on !ARCH_FPGA
select GENERIC_GPIO
select GPIO_SYSFS if EXPERIMENTAL
select GPIO_SYSFS
default y
if PUV3_NB0916

View File

@ -1,4 +1,3 @@
include include/asm-generic/Kbuild.asm
generic-y += atomic.h
generic-y += auxvec.h

View File

@ -19,9 +19,4 @@ extern void die(const char *msg, struct pt_regs *regs, int err);
extern void uc32_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, unsigned long err, unsigned long trap);
extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
extern void __show_regs(struct pt_regs *);
#endif /* __UNICORE_BUG_H__ */

View File

@ -35,7 +35,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
: "memory", "cc");
break;
default:
ret = __xchg_bad_pointer();
__xchg_bad_pointer();
}
return ret;

View File

@ -1 +0,0 @@
#include <asm-generic/kvm_para.h>

View File

@ -72,11 +72,6 @@ unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
/*
* Create a new kernel thread
*/
extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)

View File

@ -12,80 +12,10 @@
#ifndef __UNICORE_PTRACE_H__
#define __UNICORE_PTRACE_H__
#define PTRACE_GET_THREAD_AREA 22
/*
* PSR bits
*/
#define USER_MODE 0x00000010
#define REAL_MODE 0x00000011
#define INTR_MODE 0x00000012
#define PRIV_MODE 0x00000013
#define ABRT_MODE 0x00000017
#define EXTN_MODE 0x0000001b
#define SUSR_MODE 0x0000001f
#define MODE_MASK 0x0000001f
#define PSR_R_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_S_BIT 0x80000000
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000 /* Flags */
#define PSR_c 0x000000ff /* Control */
#include <uapi/asm/ptrace.h>
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are stored on the
* stack during a system call. Note that sizeof(struct pt_regs)
* has to be a multiple of 8.
*/
struct pt_regs {
unsigned long uregs[34];
};
#define UCreg_asr uregs[32]
#define UCreg_pc uregs[31]
#define UCreg_lr uregs[30]
#define UCreg_sp uregs[29]
#define UCreg_ip uregs[28]
#define UCreg_fp uregs[27]
#define UCreg_26 uregs[26]
#define UCreg_25 uregs[25]
#define UCreg_24 uregs[24]
#define UCreg_23 uregs[23]
#define UCreg_22 uregs[22]
#define UCreg_21 uregs[21]
#define UCreg_20 uregs[20]
#define UCreg_19 uregs[19]
#define UCreg_18 uregs[18]
#define UCreg_17 uregs[17]
#define UCreg_16 uregs[16]
#define UCreg_15 uregs[15]
#define UCreg_14 uregs[14]
#define UCreg_13 uregs[13]
#define UCreg_12 uregs[12]
#define UCreg_11 uregs[11]
#define UCreg_10 uregs[10]
#define UCreg_09 uregs[9]
#define UCreg_08 uregs[8]
#define UCreg_07 uregs[7]
#define UCreg_06 uregs[6]
#define UCreg_05 uregs[5]
#define UCreg_04 uregs[4]
#define UCreg_03 uregs[3]
#define UCreg_02 uregs[2]
#define UCreg_01 uregs[1]
#define UCreg_00 uregs[0]
#define UCreg_ORIG_00 uregs[33]
#ifdef __KERNEL__
#define user_mode(regs) \
(processor_mode(regs) == USER_MODE)
@ -125,9 +55,5 @@ static inline int valid_user_regs(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->UCreg_pc)
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif

View File

@ -1,3 +1,10 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
header-y += byteorder.h
header-y += kvm_para.h
header-y += ptrace.h
header-y += sigcontext.h
header-y += unistd.h
generic-y += kvm_para.h

View File

@ -0,0 +1,90 @@
/*
* linux/arch/unicore32/include/asm/ptrace.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _UAPI__UNICORE_PTRACE_H__
#define _UAPI__UNICORE_PTRACE_H__
#define PTRACE_GET_THREAD_AREA 22
/*
* PSR bits
*/
#define USER_MODE 0x00000010
#define REAL_MODE 0x00000011
#define INTR_MODE 0x00000012
#define PRIV_MODE 0x00000013
#define ABRT_MODE 0x00000017
#define EXTN_MODE 0x0000001b
#define SUSR_MODE 0x0000001f
#define MODE_MASK 0x0000001f
#define PSR_R_BIT 0x00000040
#define PSR_I_BIT 0x00000080
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
#define PSR_Z_BIT 0x40000000
#define PSR_S_BIT 0x80000000
/*
* Groups of PSR bits
*/
#define PSR_f 0xff000000 /* Flags */
#define PSR_c 0x000000ff /* Control */
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are stored on the
* stack during a system call. Note that sizeof(struct pt_regs)
* has to be a multiple of 8.
*/
struct pt_regs {
unsigned long uregs[34];
};
#define UCreg_asr uregs[32]
#define UCreg_pc uregs[31]
#define UCreg_lr uregs[30]
#define UCreg_sp uregs[29]
#define UCreg_ip uregs[28]
#define UCreg_fp uregs[27]
#define UCreg_26 uregs[26]
#define UCreg_25 uregs[25]
#define UCreg_24 uregs[24]
#define UCreg_23 uregs[23]
#define UCreg_22 uregs[22]
#define UCreg_21 uregs[21]
#define UCreg_20 uregs[20]
#define UCreg_19 uregs[19]
#define UCreg_18 uregs[18]
#define UCreg_17 uregs[17]
#define UCreg_16 uregs[16]
#define UCreg_15 uregs[15]
#define UCreg_14 uregs[14]
#define UCreg_13 uregs[13]
#define UCreg_12 uregs[12]
#define UCreg_11 uregs[11]
#define UCreg_10 uregs[10]
#define UCreg_09 uregs[9]
#define UCreg_08 uregs[8]
#define UCreg_07 uregs[7]
#define UCreg_06 uregs[6]
#define UCreg_05 uregs[5]
#define UCreg_04 uregs[4]
#define UCreg_03 uregs[3]
#define UCreg_02 uregs[2]
#define UCreg_01 uregs[1]
#define UCreg_00 uregs[0]
#define UCreg_ORIG_00 uregs[33]
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__UNICORE_PTRACE_H__ */

View File

@ -12,3 +12,4 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
#define __ARCH_WANT_SYS_EXECVE

View File

@ -573,17 +573,16 @@ ENDPROC(ret_to_user)
*/
ENTRY(ret_from_fork)
b.l schedule_tail
get_thread_info tsk
ldw r1, [tsk+], #TI_FLAGS @ check for syscall tracing
mov why, #1
cand.a r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
b.l syscall_trace
b ret_slow_syscall
ENDPROC(ret_from_fork)
ENTRY(ret_from_kernel_thread)
b.l schedule_tail
mov r0, r5
adr lr, ret_slow_syscall
mov pc, r4
ENDPROC(ret_from_kernel_thread)
/*=============================================================================
* SWI handler
*-----------------------------------------------------------------------------
@ -669,11 +668,6 @@ __cr_alignment:
#endif
.ltorg
ENTRY(sys_execve)
add r3, sp, #S_OFF
b __sys_execve
ENDPROC(sys_execve)
ENTRY(sys_clone)
add ip, sp, #S_OFF
stw ip, [sp+], #4

View File

@ -258,6 +258,7 @@ void release_thread(struct task_struct *dead_task)
}
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
int
copy_thread(unsigned long clone_flags, unsigned long stack_start,
@ -266,17 +267,22 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
struct thread_info *thread = task_thread_info(p);
struct pt_regs *childregs = task_pt_regs(p);
*childregs = *regs;
childregs->UCreg_00 = 0;
childregs->UCreg_sp = stack_start;
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
thread->cpu_context.sp = (unsigned long)childregs;
thread->cpu_context.pc = (unsigned long)ret_from_fork;
if (clone_flags & CLONE_SETTLS)
childregs->UCreg_16 = regs->UCreg_03;
if (unlikely(!regs)) {
thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
thread->cpu_context.r4 = stack_start;
thread->cpu_context.r5 = stk_sz;
memset(childregs, 0, sizeof(struct pt_regs));
} else {
thread->cpu_context.pc = (unsigned long)ret_from_fork;
*childregs = *regs;
childregs->UCreg_00 = 0;
childregs->UCreg_sp = stack_start;
if (clone_flags & CLONE_SETTLS)
childregs->UCreg_16 = regs->UCreg_03;
}
return 0;
}
@ -305,42 +311,6 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fp)
}
EXPORT_SYMBOL(dump_fpu);
/*
* Shuffle the argument into the correct register before calling the
* thread function. r1 is the thread argument, r2 is the pointer to
* the thread function, and r3 points to the exit function.
*/
asm(".pushsection .text\n"
" .align\n"
" .type kernel_thread_helper, #function\n"
"kernel_thread_helper:\n"
" mov.a asr, r7\n"
" mov r0, r4\n"
" mov lr, r6\n"
" mov pc, r5\n"
" .size kernel_thread_helper, . - kernel_thread_helper\n"
" .popsection");
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
regs.UCreg_04 = (unsigned long)arg;
regs.UCreg_05 = (unsigned long)fn;
regs.UCreg_06 = (unsigned long)do_exit;
regs.UCreg_07 = PRIV_MODE;
regs.UCreg_pc = (unsigned long)kernel_thread_helper;
regs.UCreg_asr = regs.UCreg_07 | PSR_I_BIT;
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
EXPORT_SYMBOL(kernel_thread);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;

View File

@ -30,4 +30,10 @@ extern char __vectors_start[], __vectors_end[];
extern void kernel_thread_helper(void);
extern void __init early_signal_init(void);
extern asmlinkage void __backtrace(void);
extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
extern void __show_regs(struct pt_regs *);
#endif

View File

@ -42,69 +42,6 @@ asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
parent_tid, child_tid);
}
/* sys_execve() executes a new program.
* This is called indirectly via a small wrapper
*/
asmlinkage long __sys_execve(const char __user *filename,
const char __user *const __user *argv,
const char __user *const __user *envp,
struct pt_regs *regs)
{
int error;
struct filename *fn;
fn = getname(filename);
error = PTR_ERR(fn);
if (IS_ERR(fn))
goto out;
error = do_execve(fn->name, argv, envp, regs);
putname(fn);
out:
return error;
}
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
struct pt_regs regs;
int ret;
memset(&regs, 0, sizeof(struct pt_regs));
ret = do_execve(filename,
(const char __user *const __user *)argv,
(const char __user *const __user *)envp, &regs);
if (ret < 0)
goto out;
/*
* Save argc to the register structure for userspace.
*/
regs.UCreg_00 = ret;
/*
* We were successful. We won't be returning to our caller, but
* instead to user space by manipulating the kernel stack.
*/
asm("add r0, %0, %1\n\t"
"mov r1, %2\n\t"
"mov r2, %3\n\t"
"mov r22, #0\n\t" /* not a syscall */
"mov r23, %0\n\t" /* thread structure */
"b.l memmove\n\t" /* copy regs to top of stack */
"mov sp, r0\n\t" /* reposition stack pointer */
"b ret_to_user"
:
: "r" (current_thread_info()),
"Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs),
"Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "ip", "lr", "memory");
out:
return ret;
}
/* Note: used by the compat code even in 64-bit Linux. */
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,

View File

@ -168,7 +168,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
}
static int __do_pf(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
struct task_struct *tsk)
unsigned int flags, struct task_struct *tsk)
{
struct vm_area_struct *vma;
int fault;
@ -194,14 +194,7 @@ good_area:
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the fault.
*/
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK,
(!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
if (unlikely(fault & VM_FAULT_ERROR))
return fault;
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
return fault;
check_stack:
@ -216,6 +209,8 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
tsk = current;
mm = tsk->mm;
@ -236,6 +231,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
if (!user_mode(regs)
&& !search_exception_tables(regs->UCreg_pc))
goto no_context;
retry:
down_read(&mm->mmap_sem);
} else {
/*
@ -251,7 +247,28 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
#endif
}
fault = __do_pf(mm, addr, fsr, tsk);
fault = __do_pf(mm, addr, fsr, flags, tsk);
/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return 0;
if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
if (fault & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
if (fault & VM_FAULT_RETRY) {
/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
* of starvation. */
flags &= ~FAULT_FLAG_ALLOW_RETRY;
goto retry;
}
}
up_read(&mm->mmap_sem);
/*

View File

@ -24,6 +24,9 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
if (!static_cpu_has(X86_FEATURE_XSAVE))
return 0;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
}

View File

@ -6549,19 +6549,22 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
guest_cpuid_has_pcid(vcpu)) {
exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
} else {
exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
exec_control);
}
if (best)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}

View File

@ -5781,6 +5781,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
int pending_vec, max_bits, idx;
struct desc_ptr dt;
if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
return -EINVAL;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);

View File

@ -67,6 +67,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
{ USB_DEVICE(0x0489, 0xE027) },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03F0, 0x311D) },

View File

@ -124,6 +124,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
/* Atheros AR9285 Malbec with sflash firmware */
{ USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },

View File

@ -22,6 +22,26 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_data/omap_ocp2scp.h>
/**
* _count_resources - count for the number of resources
* @res: struct resource *
*
* Count and return the number of resources populated for the device that is
* connected to ocp2scp.
*/
static unsigned _count_resources(struct resource *res)
{
int cnt = 0;
while (res->start != res->end) {
cnt++;
res++;
}
return cnt;
}
static int ocp2scp_remove_devices(struct device *dev, void *c)
{
@ -34,20 +54,62 @@ static int ocp2scp_remove_devices(struct device *dev, void *c)
static int __devinit omap_ocp2scp_probe(struct platform_device *pdev)
{
int ret;
struct device_node *np = pdev->dev.of_node;
int ret;
unsigned res_cnt, i;
struct device_node *np = pdev->dev.of_node;
struct platform_device *pdev_child;
struct omap_ocp2scp_platform_data *pdata = pdev->dev.platform_data;
struct omap_ocp2scp_dev *dev;
if (np) {
ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to add resources for ocp2scp child\n");
dev_err(&pdev->dev,
"failed to add resources for ocp2scp child\n");
goto err0;
}
} else if (pdata) {
for (i = 0, dev = *pdata->devices; i < pdata->dev_cnt; i++,
dev++) {
res_cnt = _count_resources(dev->res);
pdev_child = platform_device_alloc(dev->drv_name,
PLATFORM_DEVID_AUTO);
if (!pdev_child) {
dev_err(&pdev->dev,
"failed to allocate mem for ocp2scp child\n");
goto err0;
}
ret = platform_device_add_resources(pdev_child,
dev->res, res_cnt);
if (ret) {
dev_err(&pdev->dev,
"failed to add resources for ocp2scp child\n");
goto err1;
}
pdev_child->dev.parent = &pdev->dev;
ret = platform_device_add(pdev_child);
if (ret) {
dev_err(&pdev->dev,
"failed to register ocp2scp child device\n");
goto err1;
}
}
} else {
dev_err(&pdev->dev, "OCP2SCP initialized without plat data\n");
return -EINVAL;
}
pm_runtime_enable(&pdev->dev);
return 0;
err1:
platform_device_put(pdev_child);
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);

View File

@ -40,7 +40,7 @@ void u8500_clk_init(void)
CLK_IS_ROOT|CLK_IGNORE_UNUSED,
32768);
clk_register_clkdev(clk, "clk32k", NULL);
clk_register_clkdev(clk, NULL, "rtc-pl031");
clk_register_clkdev(clk, "apb_pclk", "rtc-pl031");
/* PRCMU clocks */
fw_version = prcmu_get_fw_version();
@ -228,10 +228,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk2", "per1clk", U8500_CLKRST1_BASE,
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.1");
clk = clk_reg_prcc_pclk("p1_pclk3", "per1clk", U8500_CLKRST1_BASE,
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "msp0");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.0");
clk = clk_reg_prcc_pclk("p1_pclk4", "per1clk", U8500_CLKRST1_BASE,
BIT(4), 0);
clk_register_clkdev(clk, "apb_pclk", "msp1");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.1");
clk = clk_reg_prcc_pclk("p1_pclk5", "per1clk", U8500_CLKRST1_BASE,
BIT(5), 0);
@ -239,6 +246,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk6", "per1clk", U8500_CLKRST1_BASE,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.2");
clk = clk_reg_prcc_pclk("p1_pclk7", "per1clk", U8500_CLKRST1_BASE,
BIT(7), 0);
@ -246,6 +254,7 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk8", "per1clk", U8500_CLKRST1_BASE,
BIT(8), 0);
clk_register_clkdev(clk, "apb_pclk", "slimbus0");
clk = clk_reg_prcc_pclk("p1_pclk9", "per1clk", U8500_CLKRST1_BASE,
BIT(9), 0);
@ -255,11 +264,16 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p1_pclk10", "per1clk", U8500_CLKRST1_BASE,
BIT(10), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.4");
clk = clk_reg_prcc_pclk("p1_pclk11", "per1clk", U8500_CLKRST1_BASE,
BIT(11), 0);
clk_register_clkdev(clk, "apb_pclk", "msp3");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.3");
clk = clk_reg_prcc_pclk("p2_pclk0", "per2clk", U8500_CLKRST2_BASE,
BIT(0), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.3");
clk = clk_reg_prcc_pclk("p2_pclk1", "per2clk", U8500_CLKRST2_BASE,
BIT(1), 0);
@ -279,12 +293,13 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p2_pclk5", "per2clk", U8500_CLKRST2_BASE,
BIT(5), 0);
clk_register_clkdev(clk, "apb_pclk", "msp2");
clk_register_clkdev(clk, "apb_pclk", "ux500-msp-i2s.2");
clk = clk_reg_prcc_pclk("p2_pclk6", "per2clk", U8500_CLKRST2_BASE,
BIT(6), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi1");
clk = clk_reg_prcc_pclk("p2_pclk7", "per2clk", U8500_CLKRST2_BASE,
BIT(7), 0);
clk_register_clkdev(clk, "apb_pclk", "sdi3");
@ -316,10 +331,15 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_pclk("p3_pclk1", "per3clk", U8500_CLKRST3_BASE,
BIT(1), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp0");
clk = clk_reg_prcc_pclk("p3_pclk2", "per3clk", U8500_CLKRST3_BASE,
BIT(2), 0);
clk_register_clkdev(clk, "apb_pclk", "ssp1");
clk = clk_reg_prcc_pclk("p3_pclk3", "per3clk", U8500_CLKRST3_BASE,
BIT(3), 0);
clk_register_clkdev(clk, "apb_pclk", "nmk-i2c.0");
clk = clk_reg_prcc_pclk("p3_pclk4", "per3clk", U8500_CLKRST3_BASE,
BIT(4), 0);
@ -401,10 +421,17 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c1_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.1");
clk = clk_reg_prcc_kclk("p1_msp0_kclk", "msp02clk",
U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp0");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.0");
clk = clk_reg_prcc_kclk("p1_msp1_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(4), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp1");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.1");
clk = clk_reg_prcc_kclk("p1_sdi0_kclk", "sdmmcclk",
U8500_CLKRST1_BASE, BIT(5), CLK_SET_RATE_GATE);
@ -412,17 +439,25 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p1_i2c2_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(6), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.2");
clk = clk_reg_prcc_kclk("p1_slimbus0_kclk", "slimclk",
U8500_CLKRST1_BASE, BIT(3), CLK_SET_RATE_GATE);
/* FIXME: Redefinition of BIT(3). */
U8500_CLKRST1_BASE, BIT(8), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "slimbus0");
clk = clk_reg_prcc_kclk("p1_i2c4_kclk", "i2cclk",
U8500_CLKRST1_BASE, BIT(9), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.4");
clk = clk_reg_prcc_kclk("p1_msp3_kclk", "msp1clk",
U8500_CLKRST1_BASE, BIT(10), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp3");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.3");
/* Periph2 */
clk = clk_reg_prcc_kclk("p2_i2c3_kclk", "i2cclk",
U8500_CLKRST2_BASE, BIT(0), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.3");
clk = clk_reg_prcc_kclk("p2_sdi4_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(2), CLK_SET_RATE_GATE);
@ -430,6 +465,8 @@ void u8500_clk_init(void)
clk = clk_reg_prcc_kclk("p2_msp2_kclk", "msp02clk",
U8500_CLKRST2_BASE, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "msp2");
clk_register_clkdev(clk, NULL, "ux500-msp-i2s.2");
clk = clk_reg_prcc_kclk("p2_sdi1_kclk", "sdmmcclk",
U8500_CLKRST2_BASE, BIT(4), CLK_SET_RATE_GATE);
@ -450,10 +487,15 @@ void u8500_clk_init(void)
/* Periph3 */
clk = clk_reg_prcc_kclk("p3_ssp0_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(1), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp0");
clk = clk_reg_prcc_kclk("p3_ssp1_kclk", "sspclk",
U8500_CLKRST3_BASE, BIT(2), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "ssp1");
clk = clk_reg_prcc_kclk("p3_i2c0_kclk", "i2cclk",
U8500_CLKRST3_BASE, BIT(3), CLK_SET_RATE_GATE);
clk_register_clkdev(clk, NULL, "nmk-i2c.0");
clk = clk_reg_prcc_kclk("p3_sdi2_kclk", "sdmmcclk",
U8500_CLKRST3_BASE, BIT(4), CLK_SET_RATE_GATE);

View File

@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
int old_dpms;
/* PCH platforms and VLV only support on/off. */
if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)

View File

@ -3841,6 +3841,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
if (edp_bpc < display_bpc) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
}
/*
* HDMI is either 12 or 8, so if the display lets 10bpc sneak
* through, clamp it down. (Note: >12bpc will be caught below.)

View File

@ -2382,6 +2382,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
return true;
}
static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
{
struct drm_device *dev = intel_sdvo->base.base.dev;
struct drm_connector *connector, *tmp;
list_for_each_entry_safe(connector, tmp,
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base)
intel_sdvo_destroy(connector);
}
}
static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *intel_sdvo_connector,
int type)
@ -2705,7 +2717,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
/* Output_setup can leave behind connectors! */
goto err_output;
}
/* Only enable the hotplug irq if we need it, to work around noisy
@ -2718,12 +2731,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
goto err;
goto err_output;
if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
&intel_sdvo->pixel_clock_min,
&intel_sdvo->pixel_clock_max))
goto err;
goto err_output;
DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
@ -2743,6 +2756,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
(SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
return true;
err_output:
intel_sdvo_output_cleanup(intel_sdvo);
err:
drm_encoder_cleanup(&intel_encoder->base);
i2c_del_adapter(&intel_sdvo->ddc);

View File

@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {

View File

@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p));
if (PageHighMem(p))
clear_highpage(p);
else
clear_page(page_address(p));
}
}

View File

@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
if (unlikely(to_page == NULL))
goto out_err;
preempt_disable();
copy_highpage(to_page, from_page);
preempt_enable();
page_cache_release(from_page);
}
@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
ret = PTR_ERR(to_page);
goto out_err;
}
preempt_disable();
copy_highpage(to_page, from_page);
preempt_enable();
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);

View File

@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
vfree(bounce);
if (unlikely(ret != 0))

View File

@ -46,9 +46,9 @@ static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[559] = 0x45;
}
/* the same as above (s/usage/physical/) */
if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
!memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
&rdesc[94], 4)) {
if ((quirks & MS_RDESC_3K) && *rsize == 106 && rdesc[94] == 0x19 &&
rdesc[95] == 0x00 && rdesc[96] == 0x29 &&
rdesc[97] == 0xff) {
rdesc[94] = 0x35;
rdesc[96] = 0x45;
}

View File

@ -169,7 +169,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
mux->busses = devm_kzalloc(&pdev->dev,
sizeof(mux->busses) * mux->pdata->bus_count,
GFP_KERNEL);
if (!mux->states) {
if (!mux->busses) {
dev_err(&pdev->dev, "Cannot allocate busses\n");
ret = -ENOMEM;
goto err;

View File

@ -168,7 +168,8 @@ static int __init armctrl_of_init(struct device_node *node,
}
static struct of_device_id irq_of_match[] __initconst = {
{ .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init }
{ .compatible = "brcm,bcm2835-armctrl-ic", .data = armctrl_of_init },
{ }
};
void __init bcm2835_init_irq(void)

View File

@ -33,8 +33,6 @@
struct led_trigger_cpu {
char name[MAX_NAME_LEN];
struct led_trigger *_trig;
struct mutex lock;
int lock_is_inited;
};
static DEFINE_PER_CPU(struct led_trigger_cpu, cpu_trig);
@ -50,12 +48,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
{
struct led_trigger_cpu *trig = &__get_cpu_var(cpu_trig);
/* mutex lock should be initialized before calling mutex_call() */
if (!trig->lock_is_inited)
return;
mutex_lock(&trig->lock);
/* Locate the correct CPU LED */
switch (ledevt) {
case CPU_LED_IDLE_END:
@ -75,8 +67,6 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
/* Will leave the LED as it is */
break;
}
mutex_unlock(&trig->lock);
}
EXPORT_SYMBOL(ledtrig_cpu);
@ -117,14 +107,9 @@ static int __init ledtrig_cpu_init(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
mutex_init(&trig->lock);
snprintf(trig->name, MAX_NAME_LEN, "cpu%d", cpu);
mutex_lock(&trig->lock);
led_trigger_register_simple(trig->name, &trig->_trig);
trig->lock_is_inited = 1;
mutex_unlock(&trig->lock);
}
register_syscore_ops(&ledtrig_cpu_syscore_ops);
@ -142,15 +127,9 @@ static void __exit ledtrig_cpu_exit(void)
for_each_possible_cpu(cpu) {
struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
mutex_lock(&trig->lock);
led_trigger_unregister_simple(trig->_trig);
trig->_trig = NULL;
memset(trig->name, 0, MAX_NAME_LEN);
trig->lock_is_inited = 0;
mutex_unlock(&trig->lock);
mutex_destroy(&trig->lock);
}
unregister_syscore_ops(&ledtrig_cpu_syscore_ops);

View File

@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
jme_clear_pm(jme);
JME_NAPI_ENABLE(jme);
tasklet_enable(&jme->linkch_task);
tasklet_enable(&jme->txclean_task);
tasklet_hi_enable(&jme->rxclean_task);
tasklet_hi_enable(&jme->rxempty_task);
tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
(unsigned long) jme);
tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
(unsigned long) jme);
rc = jme_request_irq(jme);
if (rc)
@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
tasklet_init(&jme->pcc_task,
jme_pcc_tasklet,
(unsigned long) jme);
tasklet_init(&jme->linkch_task,
jme_link_change_tasklet,
(unsigned long) jme);
tasklet_init(&jme->txclean_task,
jme_tx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxclean_task,
jme_rx_clean_tasklet,
(unsigned long) jme);
tasklet_init(&jme->rxempty_task,
jme_rx_empty_tasklet,
(unsigned long) jme);
tasklet_disable_nosync(&jme->linkch_task);
tasklet_disable_nosync(&jme->txclean_task);
tasklet_disable_nosync(&jme->rxclean_task);
tasklet_disable_nosync(&jme->rxempty_task);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;

View File

@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
tasklet_enable(&hw_priv->rx_tasklet);
tasklet_enable(&hw_priv->tx_tasklet);
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
hw->promiscuous = 0;
hw->all_multi = 0;
@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
/* tasklet is enabled. */
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
/* tasklet_enable will decrement the atomic counter. */
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);

View File

@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
static int __devinit smsc911x_init(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int byte_test;
unsigned int byte_test, mask;
unsigned int to = 100;
SMSC_TRACE(pdata, probe, "Driver Parameters:");
@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
/*
* poll the READY bit in PMT_CTRL. Any other access to the device is
* forbidden while this bit isn't set. Try for 100ms
*
* Note that this test is done before the WORD_SWAP register is
* programmed. So in some configurations the READY bit is at 16 before
* WORD_SWAP is written to. This issue is worked around by waiting
* until either bit 0 or bit 16 gets set in PMT_CTRL.
*
* SMSC has confirmed that checking bit 16 (marked as reserved in
* the datasheet) is fine since these bits "will either never be set
* or can only go high after READY does (so also indicate the device
* is ready)".
*/
while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to)
mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
udelay(1000);
if (to == 0) {
pr_err("Device not READY in 100ms aborting\n");
return -ENODEV;

View File

@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
ingress_irq = rc;
tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
0, NULL, NULL);
0, "tile_net", NULL);
if (rc != 0) {
netdev_err(dev, "request_irq failed: %d\n", rc);
destroy_irq(ingress_irq);

View File

@ -942,6 +942,10 @@ static int axienet_open(struct net_device *ndev)
phy_start(lp->phy_dev);
}
/* Enable tasklets for Axi DMA error handling */
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
if (ret)
@ -950,8 +954,7 @@ static int axienet_open(struct net_device *ndev)
ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
if (ret)
goto err_rx_irq;
/* Enable tasklets for Axi DMA error handling */
tasklet_enable(&lp->dma_err_tasklet);
return 0;
err_rx_irq:
@ -960,6 +963,7 @@ err_tx_irq:
if (lp->phy_dev)
phy_disconnect(lp->phy_dev);
lp->phy_dev = NULL;
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
}
@ -1613,10 +1617,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
goto err_iounmap_2;
}
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
tasklet_disable(&lp->dma_err_tasklet);
return 0;
err_iounmap_2:

View File

@ -234,7 +234,6 @@ void free_mdio_bitbang(struct mii_bus *bus)
struct mdiobb_ctrl *ctrl = bus->priv;
module_put(ctrl->ops->owner);
mdiobus_unregister(bus);
mdiobus_free(bus);
}
EXPORT_SYMBOL(free_mdio_bitbang);

View File

@ -540,10 +540,12 @@ advance:
(ctx->ether_desc == NULL) || (ctx->control != intf))
goto error;
/* claim interfaces, if any */
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp)
goto error;
/* claim data interface, if different from control */
if (ctx->data != ctx->control) {
temp = usb_driver_claim_interface(driver, ctx->data, dev);
if (temp)
goto error;
}
iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
@ -623,6 +625,10 @@ static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
tasklet_kill(&ctx->bh);
/* handle devices with combined control and data interface */
if (ctx->control == ctx->data)
ctx->data = NULL;
/* disconnect master --> disconnect slave */
if (intf == ctx->control && ctx->data) {
usb_set_intfdata(ctx->data, NULL);
@ -1245,6 +1251,14 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
/* Huawei NCM devices disguised as vendor specific */
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x16),
.driver_info = (unsigned long)&wwan_info,
},
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x46),
.driver_info = (unsigned long)&wwan_info,
},
/* Generic CDC-NCM devices */
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),

View File

@ -184,7 +184,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
/* set the address, index & direction (read from PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_READ_;
addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");
@ -221,7 +221,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
/* set the address, index & direction (write to PHY) */
phy_id &= dev->mii.phy_id_mask;
idx &= dev->mii.reg_num_mask;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_;
addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
ret = smsc95xx_write_reg(dev, MII_ADDR, addr);
check_warn_goto_done(ret, "Error writing MII_ADDR");

View File

@ -1,5 +1,5 @@
/*
* VXLAN: Virtual eXtensiable Local Area Network
* VXLAN: Virtual eXtensible Local Area Network
*
* Copyright (c) 2012 Vyatta Inc.
*
@ -50,8 +50,8 @@
#define VXLAN_N_VID (1u << 24)
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
/* VLAN + IP header + UDP + VXLAN */
#define VXLAN_HEADROOM (4 + 20 + 8 + 8)
/* IP header + UDP + VXLAN + Ethernet header */
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@ -1102,6 +1102,10 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
if (!tb[IFLA_MTU])
dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
VXLAN_HEADROOM;
}
if (data[IFLA_VXLAN_TOS])

View File

@ -4401,7 +4401,7 @@ static s32 brcmf_mode_to_nl80211_iftype(s32 mode)
static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
{
#ifndef CONFIG_BRCMFISCAN
#ifndef CONFIG_BRCMISCAN
/* scheduled scan settings */
wiphy->max_sched_scan_ssids = BRCMF_PNO_MAX_PFN_COUNT;
wiphy->max_match_sets = BRCMF_PNO_MAX_PFN_COUNT;

View File

@ -521,7 +521,7 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw,
ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
if (iwlagn_tx_skb(priv, control->sta, skb))
dev_kfree_skb_any(skb);
ieee80211_free_txskb(hw, skb);
}
static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,

View File

@ -2114,7 +2114,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
dev_kfree_skb_any(skb);
ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)

View File

@ -321,6 +321,14 @@ static void iwl_rx_allocate(struct iwl_trans *trans, gfp_t priority)
dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
spin_lock_irqsave(&rxq->lock, flags);
list_add(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
@ -488,8 +496,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order,
DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
* free the page(s) as well to not break
* the invariant that the items on the used
* list have no page(s)
*/
__free_pages(rxb->page, trans_pcie->rx_page_order);
rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);

View File

@ -401,7 +401,7 @@ EXPORT_SYMBOL_GPL(rio_release_inb_pwrite);
/**
* rio_map_inb_region -- Map inbound memory region.
* @mport: Master port.
* @lstart: physical address of memory region to be mapped
* @local: physical address of memory region to be mapped
* @rbase: RIO base address assigned to this window
* @size: Size of the memory region
* @rflags: Flags for mapping.

View File

@ -1381,22 +1381,14 @@ struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
* Note: drivers must ensure that all regulator_enable calls made on this
* regulator source are balanced by regulator_disable calls prior to calling
* this function.
*/
void regulator_put(struct regulator *regulator)
/* Locks held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
if (regulator == NULL || IS_ERR(regulator))
return;
mutex_lock(&regulator_list_mutex);
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@ -1412,6 +1404,20 @@ void regulator_put(struct regulator *regulator)
rdev->exclusive = 0;
module_put(rdev->owner);
}
/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
* Note: drivers must ensure that all regulator_enable calls made on this
* regulator source are balanced by regulator_disable calls prior to calling
* this function.
*/
void regulator_put(struct regulator *regulator)
{
mutex_lock(&regulator_list_mutex);
_regulator_put(regulator);
mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_put);
@ -1974,7 +1980,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
ret = regulator_get_voltage(regulator);
if (ret >= 0)
return (min_uV >= ret && ret <= max_uV);
return (min_uV <= ret && ret <= max_uV);
else
return ret;
}
@ -3365,7 +3371,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
config->ena_gpio, ret);
goto clean;
goto wash;
}
rdev->ena_gpio = config->ena_gpio;
@ -3445,10 +3451,11 @@ unset_supplies:
scrub:
if (rdev->supply)
regulator_put(rdev->supply);
_regulator_put(rdev->supply);
if (rdev->ena_gpio)
gpio_free(rdev->ena_gpio);
kfree(rdev->constraints);
wash:
device_unregister(&rdev->dev);
/* device core frees rdev */
rdev = ERR_PTR(ret);

View File

@ -44,7 +44,6 @@
#define RAW3215_NR_CCWS 3
#define RAW3215_TIMEOUT HZ/10 /* time for delayed output */
#define RAW3215_FIXED 1 /* 3215 console device is not be freed */
#define RAW3215_WORKING 4 /* set if a request is being worked on */
#define RAW3215_THROTTLED 8 /* set if reading is disabled */
#define RAW3215_STOPPED 16 /* set if writing is disabled */
@ -339,8 +338,10 @@ static void raw3215_wakeup(unsigned long data)
struct tty_struct *tty;
tty = tty_port_tty_get(&raw->port);
tty_wakeup(tty);
tty_kref_put(tty);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
}
}
/*
@ -629,8 +630,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
if (!(raw->port.flags & ASYNC_INITIALIZED) ||
(raw->flags & RAW3215_FIXED))
if (!(raw->port.flags & ASYNC_INITIALIZED))
return;
/* Wait for outstanding requests, then free irq */
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
@ -926,8 +926,6 @@ static int __init con3215_init(void)
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
raw->flags |= RAW3215_FIXED;
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
raw3215_free_info(raw);

Some files were not shown because too many files have changed in this diff Show More