mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
Merge branch 'l2x0' of rmk tree into prima2-l2x0
This commit is contained in:
commit
1e11bec9b0
44
Documentation/devicetree/bindings/arm/l2cc.txt
Normal file
44
Documentation/devicetree/bindings/arm/l2cc.txt
Normal file
@ -0,0 +1,44 @@
|
||||
* ARM L2 Cache Controller
|
||||
|
||||
ARM cores often have a separate level 2 cache controller. There are various
|
||||
implementations of the L2 cache controller with compatible programming models.
|
||||
The ARM L2 cache representation in the device tree should be done as follows:
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be one of:
|
||||
"arm,pl310-cache"
|
||||
"arm,l220-cache"
|
||||
"arm,l210-cache"
|
||||
- cache-unified : Specifies the cache is a unified cache.
|
||||
- cache-level : Should be set to 2 for a level 2 cache.
|
||||
- reg : Physical base address and size of cache controller's memory mapped
|
||||
registers.
|
||||
|
||||
Optional properties:
|
||||
|
||||
- arm,data-latency : Cycles of latency for Data RAM accesses. Specifies 3 cells of
|
||||
read, write and setup latencies. Minimum valid values are 1. Controllers
|
||||
without setup latency control should use a value of 0.
|
||||
- arm,tag-latency : Cycles of latency for Tag RAM accesses. Specifies 3 cells of
|
||||
read, write and setup latencies. Controllers without setup latency control
|
||||
should use 0. Controllers without separate read and write Tag RAM latency
|
||||
values should only use the first cell.
|
||||
- arm,dirty-latency : Cycles of latency for Dirty RAMs. This is a single cell.
|
||||
- arm,filter-ranges : <start length> Starting address and length of window to
|
||||
filter. Addresses in the filter window are directed to the M1 port. Other
|
||||
addresses will go to the M0 port.
|
||||
- interrupts : 1 combined interrupt.
|
||||
|
||||
Example:
|
||||
|
||||
L2: cache-controller {
|
||||
compatible = "arm,pl310-cache";
|
||||
reg = <0xfff12000 0x1000>;
|
||||
arm,data-latency = <1 1 1>;
|
||||
arm,tag-latency = <2 2 2>;
|
||||
arm,filter-latency = <0x80000000 0x8000000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
interrupts = <45>;
|
||||
};
|
@ -62,6 +62,13 @@ can be safely used to identify the chip. You will have to instantiate
|
||||
the devices explicitly. Please see Documentation/i2c/instantiating-devices for
|
||||
details.
|
||||
|
||||
WARNING: Do not access chip registers using the i2cdump command, and do not use
|
||||
any of the i2ctools commands on a command register (0xa5 to 0xac). The chips
|
||||
supported by this driver interpret any access to a command register (including
|
||||
read commands) as request to execute the command in question. This may result in
|
||||
power loss, board resets, and/or Flash corruption. Worst case, your board may
|
||||
turn into a brick.
|
||||
|
||||
|
||||
Sysfs entries
|
||||
-------------
|
||||
|
@ -45,8 +45,15 @@
|
||||
#define L2X0_CLEAN_INV_LINE_PA 0x7F0
|
||||
#define L2X0_CLEAN_INV_LINE_IDX 0x7F8
|
||||
#define L2X0_CLEAN_INV_WAY 0x7FC
|
||||
#define L2X0_LOCKDOWN_WAY_D 0x900
|
||||
#define L2X0_LOCKDOWN_WAY_I 0x904
|
||||
/*
|
||||
* The lockdown registers repeat 8 times for L310, the L210 has only one
|
||||
* D and one I lockdown register at 0x0900 and 0x0904.
|
||||
*/
|
||||
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900
|
||||
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904
|
||||
#define L2X0_LOCKDOWN_STRIDE 0x08
|
||||
#define L2X0_ADDR_FILTER_START 0xC00
|
||||
#define L2X0_ADDR_FILTER_END 0xC04
|
||||
#define L2X0_TEST_OPERATION 0xF00
|
||||
#define L2X0_LINE_DATA 0xF10
|
||||
#define L2X0_LINE_TAG 0xF30
|
||||
@ -60,8 +67,23 @@
|
||||
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
|
||||
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
|
||||
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
|
||||
#define L2X0_CACHE_ID_RTL_MASK 0x3f
|
||||
#define L2X0_CACHE_ID_RTL_R0P0 0x0
|
||||
#define L2X0_CACHE_ID_RTL_R1P0 0x2
|
||||
#define L2X0_CACHE_ID_RTL_R2P0 0x4
|
||||
#define L2X0_CACHE_ID_RTL_R3P0 0x5
|
||||
#define L2X0_CACHE_ID_RTL_R3P1 0x6
|
||||
#define L2X0_CACHE_ID_RTL_R3P2 0x8
|
||||
|
||||
#define L2X0_AUX_CTRL_MASK 0xc0000fff
|
||||
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
|
||||
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
|
||||
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
|
||||
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
|
||||
#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
|
||||
#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
|
||||
#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
|
||||
#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
|
||||
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
|
||||
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
|
||||
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
|
||||
@ -72,8 +94,33 @@
|
||||
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
|
||||
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
|
||||
|
||||
#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
|
||||
#define L2X0_LATENCY_CTRL_RD_SHIFT 4
|
||||
#define L2X0_LATENCY_CTRL_WR_SHIFT 8
|
||||
|
||||
#define L2X0_ADDR_FILTER_EN 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
|
||||
extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
|
||||
|
||||
struct l2x0_regs {
|
||||
unsigned long phy_base;
|
||||
unsigned long aux_ctrl;
|
||||
/*
|
||||
* Whether the following registers need to be saved/restored
|
||||
* depends on platform
|
||||
*/
|
||||
unsigned long tag_latency;
|
||||
unsigned long data_latency;
|
||||
unsigned long filter_start;
|
||||
unsigned long filter_end;
|
||||
unsigned long prefetch_ctrl;
|
||||
unsigned long pwr_ctrl;
|
||||
};
|
||||
|
||||
extern struct l2x0_regs l2x0_saved_regs;
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -34,6 +34,7 @@ struct outer_cache_fns {
|
||||
void (*sync)(void);
|
||||
#endif
|
||||
void (*set_debug)(unsigned long);
|
||||
void (*resume)(void);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OUTER_CACHE
|
||||
@ -74,6 +75,12 @@ static inline void outer_disable(void)
|
||||
outer_cache.disable();
|
||||
}
|
||||
|
||||
static inline void outer_resume(void)
|
||||
{
|
||||
if (outer_cache.resume)
|
||||
outer_cache.resume();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <linux/kbuild.h>
|
||||
|
||||
/*
|
||||
@ -92,6 +93,17 @@ int main(void)
|
||||
DEFINE(S_OLD_R0, offsetof(struct pt_regs, ARM_ORIG_r0));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base));
|
||||
DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl));
|
||||
DEFINE(L2X0_R_TAG_LATENCY, offsetof(struct l2x0_regs, tag_latency));
|
||||
DEFINE(L2X0_R_DATA_LATENCY, offsetof(struct l2x0_regs, data_latency));
|
||||
DEFINE(L2X0_R_FILTER_START, offsetof(struct l2x0_regs, filter_start));
|
||||
DEFINE(L2X0_R_FILTER_END, offsetof(struct l2x0_regs, filter_end));
|
||||
DEFINE(L2X0_R_PREFETCH_CTRL, offsetof(struct l2x0_regs, prefetch_ctrl));
|
||||
DEFINE(L2X0_R_PWR_CTRL, offsetof(struct l2x0_regs, pwr_ctrl));
|
||||
BLANK();
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_ASID
|
||||
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id));
|
||||
BLANK();
|
||||
|
@ -337,15 +337,15 @@ static unsigned long timer_reload;
|
||||
static void integrator_clocksource_init(u32 khz)
|
||||
{
|
||||
void __iomem *base = (void __iomem *)TIMER2_VA_BASE;
|
||||
u32 ctrl = TIMER_CTRL_ENABLE;
|
||||
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
|
||||
|
||||
if (khz >= 1500) {
|
||||
khz /= 16;
|
||||
ctrl = TIMER_CTRL_DIV16;
|
||||
ctrl |= TIMER_CTRL_DIV16;
|
||||
}
|
||||
|
||||
writel(ctrl, base + TIMER_CTRL);
|
||||
writel(0xffff, base + TIMER_LOAD);
|
||||
writel(ctrl, base + TIMER_CTRL);
|
||||
|
||||
clocksource_mmio_init(base + TIMER_VALUE, "timer2",
|
||||
khz * 1000, 200, 16, clocksource_mmio_readl_down);
|
||||
|
@ -17,7 +17,7 @@
|
||||
cmp \tmp, # 0x5600 @ Is it ldrsb?
|
||||
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
|
||||
tst \tmp, #1 << 11 @ L = 0 -> write
|
||||
orreq \psr, \psr, #1 << 11 @ yes.
|
||||
orreq \fsr, \fsr, #1 << 11 @ yes.
|
||||
b do_DataAbort
|
||||
not_thumb:
|
||||
.endm
|
||||
|
@ -16,9 +16,12 @@
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);
|
||||
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
|
||||
static uint32_t l2x0_size;
|
||||
|
||||
struct l2x0_regs l2x0_saved_regs;
|
||||
|
||||
struct l2x0_of_data {
|
||||
void (*setup)(const struct device_node *, __u32 *, __u32 *);
|
||||
void (*save)(void);
|
||||
void (*resume)(void);
|
||||
};
|
||||
|
||||
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
|
||||
{
|
||||
/* wait for cache operation by line or way to complete */
|
||||
while (readl_relaxed(reg) & mask)
|
||||
;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CACHE_PL310
|
||||
@ -277,6 +288,25 @@ static void l2x0_disable(void)
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_unlock(__u32 cache_id)
|
||||
{
|
||||
int lockregs;
|
||||
int i;
|
||||
|
||||
if (cache_id == L2X0_CACHE_ID_PART_L310)
|
||||
lockregs = 8;
|
||||
else
|
||||
/* L210 and unknown types */
|
||||
lockregs = 1;
|
||||
|
||||
for (i = 0; i < lockregs; i++) {
|
||||
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
|
||||
i * L2X0_LOCKDOWN_STRIDE);
|
||||
writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
|
||||
i * L2X0_LOCKDOWN_STRIDE);
|
||||
}
|
||||
}
|
||||
|
||||
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
|
||||
{
|
||||
__u32 aux;
|
||||
@ -328,10 +358,14 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
|
||||
* accessing the below registers will fault.
|
||||
*/
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
|
||||
/* Make sure that I&D is not locked down when starting */
|
||||
l2x0_unlock(cache_id);
|
||||
|
||||
/* l2x0 controller is disabled */
|
||||
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
l2x0_saved_regs.aux_ctrl = aux;
|
||||
|
||||
l2x0_inv_all();
|
||||
|
||||
/* enable L2X0 */
|
||||
@ -351,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
|
||||
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
|
||||
ways, cache_id, aux, l2x0_size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static void __init l2x0_of_setup(const struct device_node *np,
|
||||
__u32 *aux_val, __u32 *aux_mask)
|
||||
{
|
||||
u32 data[2] = { 0, 0 };
|
||||
u32 tag = 0;
|
||||
u32 dirty = 0;
|
||||
u32 val = 0, mask = 0;
|
||||
|
||||
of_property_read_u32(np, "arm,tag-latency", &tag);
|
||||
if (tag) {
|
||||
mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
|
||||
val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
|
||||
}
|
||||
|
||||
of_property_read_u32_array(np, "arm,data-latency",
|
||||
data, ARRAY_SIZE(data));
|
||||
if (data[0] && data[1]) {
|
||||
mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
|
||||
L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
|
||||
val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
|
||||
((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "arm,dirty-latency", &dirty);
|
||||
if (dirty) {
|
||||
mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
|
||||
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
|
||||
}
|
||||
|
||||
*aux_val &= ~mask;
|
||||
*aux_val |= val;
|
||||
*aux_mask &= ~mask;
|
||||
}
|
||||
|
||||
static void __init pl310_of_setup(const struct device_node *np,
|
||||
__u32 *aux_val, __u32 *aux_mask)
|
||||
{
|
||||
u32 data[3] = { 0, 0, 0 };
|
||||
u32 tag[3] = { 0, 0, 0 };
|
||||
u32 filter[2] = { 0, 0 };
|
||||
|
||||
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
|
||||
if (tag[0] && tag[1] && tag[2])
|
||||
writel_relaxed(
|
||||
((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
|
||||
((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
|
||||
((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
|
||||
l2x0_base + L2X0_TAG_LATENCY_CTRL);
|
||||
|
||||
of_property_read_u32_array(np, "arm,data-latency",
|
||||
data, ARRAY_SIZE(data));
|
||||
if (data[0] && data[1] && data[2])
|
||||
writel_relaxed(
|
||||
((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
|
||||
((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
|
||||
((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
|
||||
l2x0_base + L2X0_DATA_LATENCY_CTRL);
|
||||
|
||||
of_property_read_u32_array(np, "arm,filter-ranges",
|
||||
filter, ARRAY_SIZE(filter));
|
||||
if (filter[1]) {
|
||||
writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
|
||||
l2x0_base + L2X0_ADDR_FILTER_END);
|
||||
writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
|
||||
l2x0_base + L2X0_ADDR_FILTER_START);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init pl310_save(void)
|
||||
{
|
||||
u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
|
||||
L2X0_CACHE_ID_RTL_MASK;
|
||||
|
||||
l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
|
||||
L2X0_TAG_LATENCY_CTRL);
|
||||
l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
|
||||
L2X0_DATA_LATENCY_CTRL);
|
||||
l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
|
||||
L2X0_ADDR_FILTER_END);
|
||||
l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
|
||||
L2X0_ADDR_FILTER_START);
|
||||
|
||||
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
|
||||
/*
|
||||
* From r2p0, there is Prefetch offset/control register
|
||||
*/
|
||||
l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
|
||||
L2X0_PREFETCH_CTRL);
|
||||
/*
|
||||
* From r3p0, there is Power control register
|
||||
*/
|
||||
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
|
||||
l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
|
||||
L2X0_POWER_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
static void l2x0_resume(void)
|
||||
{
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
|
||||
/* restore aux ctrl and enable l2 */
|
||||
l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
|
||||
|
||||
writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
|
||||
L2X0_AUX_CTRL);
|
||||
|
||||
l2x0_inv_all();
|
||||
|
||||
writel_relaxed(1, l2x0_base + L2X0_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
static void pl310_resume(void)
|
||||
{
|
||||
u32 l2x0_revision;
|
||||
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
|
||||
/* restore pl310 setup */
|
||||
writel_relaxed(l2x0_saved_regs.tag_latency,
|
||||
l2x0_base + L2X0_TAG_LATENCY_CTRL);
|
||||
writel_relaxed(l2x0_saved_regs.data_latency,
|
||||
l2x0_base + L2X0_DATA_LATENCY_CTRL);
|
||||
writel_relaxed(l2x0_saved_regs.filter_end,
|
||||
l2x0_base + L2X0_ADDR_FILTER_END);
|
||||
writel_relaxed(l2x0_saved_regs.filter_start,
|
||||
l2x0_base + L2X0_ADDR_FILTER_START);
|
||||
|
||||
l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
|
||||
L2X0_CACHE_ID_RTL_MASK;
|
||||
|
||||
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
|
||||
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
|
||||
l2x0_base + L2X0_PREFETCH_CTRL);
|
||||
if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
|
||||
writel_relaxed(l2x0_saved_regs.pwr_ctrl,
|
||||
l2x0_base + L2X0_POWER_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
l2x0_resume();
|
||||
}
|
||||
|
||||
static const struct l2x0_of_data pl310_data = {
|
||||
pl310_of_setup,
|
||||
pl310_save,
|
||||
pl310_resume,
|
||||
};
|
||||
|
||||
static const struct l2x0_of_data l2x0_data = {
|
||||
l2x0_of_setup,
|
||||
NULL,
|
||||
l2x0_resume,
|
||||
};
|
||||
|
||||
static const struct of_device_id l2x0_ids[] __initconst = {
|
||||
{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
|
||||
{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
|
||||
{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
|
||||
{}
|
||||
};
|
||||
|
||||
int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct l2x0_of_data *data;
|
||||
struct resource res;
|
||||
|
||||
np = of_find_matching_node(NULL, l2x0_ids);
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
if (of_address_to_resource(np, 0, &res))
|
||||
return -ENODEV;
|
||||
|
||||
l2x0_base = ioremap(res.start, resource_size(&res));
|
||||
if (!l2x0_base)
|
||||
return -ENOMEM;
|
||||
|
||||
l2x0_saved_regs.phy_base = res.start;
|
||||
|
||||
data = of_match_node(l2x0_ids, np)->data;
|
||||
|
||||
/* L2 configuration can only be changed if the cache is disabled */
|
||||
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
|
||||
if (data->setup)
|
||||
data->setup(np, &aux_val, &aux_mask);
|
||||
}
|
||||
|
||||
if (data->save)
|
||||
data->save();
|
||||
|
||||
l2x0_init(l2x0_base, aux_val, aux_mask);
|
||||
|
||||
outer_cache.resume = data->resume;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -298,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
|
||||
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
return memblock_is_memory(pfn << PAGE_SHIFT);
|
||||
return memblock_is_memory(__pfn_to_phys(pfn));
|
||||
}
|
||||
EXPORT_SYMBOL(pfn_valid);
|
||||
#endif
|
||||
|
@ -44,7 +44,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
|
||||
: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
|
||||
#elif defined(__x86_64__)
|
||||
__asm__ (
|
||||
"mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
|
||||
"mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
|
||||
: [lo]"=a"(product),
|
||||
[hi]"=d"(tmp)
|
||||
: "0"(delta),
|
||||
|
@ -1900,6 +1900,9 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
|
||||
perf_callchain_store(entry, regs->ip);
|
||||
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (perf_callchain_user32(regs, entry))
|
||||
return;
|
||||
|
||||
|
@ -365,8 +365,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
|
||||
*/
|
||||
if (bus) {
|
||||
struct pci_bus *child;
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child, child->self->pcie_mpss);
|
||||
list_for_each_entry(child, &bus->children, node) {
|
||||
struct pci_dev *self = child->self;
|
||||
if (!self)
|
||||
continue;
|
||||
|
||||
pcie_bus_configure_settings(child, self->pcie_mpss);
|
||||
}
|
||||
}
|
||||
|
||||
if (!bus)
|
||||
|
@ -184,6 +184,19 @@ static unsigned long __init xen_set_identity(const struct e820entry *list,
|
||||
PFN_UP(start_pci), PFN_DOWN(last));
|
||||
return identity;
|
||||
}
|
||||
|
||||
static unsigned long __init xen_get_max_pages(void)
|
||||
{
|
||||
unsigned long max_pages = MAX_DOMAIN_PAGES;
|
||||
domid_t domid = DOMID_SELF;
|
||||
int ret;
|
||||
|
||||
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
|
||||
if (ret > 0)
|
||||
max_pages = ret;
|
||||
return min(max_pages, MAX_DOMAIN_PAGES);
|
||||
}
|
||||
|
||||
/**
|
||||
* machine_specific_memory_setup - Hook for machine specific memory setup.
|
||||
**/
|
||||
@ -292,6 +305,12 @@ char * __init xen_memory_setup(void)
|
||||
|
||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||
|
||||
extra_limit = xen_get_max_pages();
|
||||
if (extra_limit >= max_pfn)
|
||||
extra_pages = extra_limit - max_pfn;
|
||||
else
|
||||
extra_pages = 0;
|
||||
|
||||
extra_pages += xen_return_unused_memory(xen_start_info->nr_pages, &e820);
|
||||
|
||||
/*
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <xen/page.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
#include <xen/hvc-console.h>
|
||||
#include "xen-ops.h"
|
||||
#include "mmu.h"
|
||||
|
||||
@ -207,6 +208,15 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
||||
unsigned cpu;
|
||||
unsigned int i;
|
||||
|
||||
if (skip_ioapic_setup) {
|
||||
char *m = (max_cpus == 0) ?
|
||||
"The nosmp parameter is incompatible with Xen; " \
|
||||
"use Xen dom0_max_vcpus=1 parameter" :
|
||||
"The noapic parameter is incompatible with Xen";
|
||||
|
||||
xen_raw_printk(m);
|
||||
panic(m);
|
||||
}
|
||||
xen_init_lock_cpu(0);
|
||||
|
||||
smp_store_cpu_info(0);
|
||||
|
@ -113,11 +113,13 @@ xen_iret_start_crit:
|
||||
|
||||
/*
|
||||
* If there's something pending, mask events again so we can
|
||||
* jump back into xen_hypervisor_callback
|
||||
* jump back into xen_hypervisor_callback. Otherwise do not
|
||||
* touch XEN_vcpu_info_mask.
|
||||
*/
|
||||
sete XEN_vcpu_info_mask(%eax)
|
||||
jne 1f
|
||||
movb $1, XEN_vcpu_info_mask(%eax)
|
||||
|
||||
popl %eax
|
||||
1: popl %eax
|
||||
|
||||
/*
|
||||
* From this point on the registers are restored and the stack
|
||||
|
@ -168,13 +168,11 @@ struct regmap *regmap_init(struct device *dev,
|
||||
map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
|
||||
if (map->work_buf == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto err_bus;
|
||||
goto err_map;
|
||||
}
|
||||
|
||||
return map;
|
||||
|
||||
err_bus:
|
||||
module_put(map->bus->owner);
|
||||
err_map:
|
||||
kfree(map);
|
||||
err:
|
||||
@ -188,7 +186,6 @@ EXPORT_SYMBOL_GPL(regmap_init);
|
||||
void regmap_exit(struct regmap *map)
|
||||
{
|
||||
kfree(map->work_buf);
|
||||
module_put(map->bus->owner);
|
||||
kfree(map);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_exit);
|
||||
|
@ -174,8 +174,10 @@ struct d40_base;
|
||||
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
|
||||
* transfer and call client callback.
|
||||
* @client: Cliented owned descriptor list.
|
||||
* @pending_queue: Submitted jobs, to be issued by issue_pending()
|
||||
* @active: Active descriptor.
|
||||
* @queue: Queued jobs.
|
||||
* @prepare_queue: Prepared jobs.
|
||||
* @dma_cfg: The client configuration of this dma channel.
|
||||
* @configured: whether the dma_cfg configuration is valid
|
||||
* @base: Pointer to the device instance struct.
|
||||
@ -203,6 +205,7 @@ struct d40_chan {
|
||||
struct list_head pending_queue;
|
||||
struct list_head active;
|
||||
struct list_head queue;
|
||||
struct list_head prepare_queue;
|
||||
struct stedma40_chan_cfg dma_cfg;
|
||||
bool configured;
|
||||
struct d40_base *base;
|
||||
@ -477,7 +480,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
|
||||
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node)
|
||||
if (async_tx_test_ack(&d->txd)) {
|
||||
d40_pool_lli_free(d40c, d);
|
||||
d40_desc_remove(d);
|
||||
desc = d;
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
@ -644,8 +646,11 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
|
||||
return d;
|
||||
}
|
||||
|
||||
/* remove desc from current queue and add it to the pending_queue */
|
||||
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
|
||||
{
|
||||
d40_desc_remove(desc);
|
||||
desc->is_in_client_list = false;
|
||||
list_add_tail(&desc->node, &d40c->pending_queue);
|
||||
}
|
||||
|
||||
@ -803,6 +808,7 @@ done:
|
||||
static void d40_term_all(struct d40_chan *d40c)
|
||||
{
|
||||
struct d40_desc *d40d;
|
||||
struct d40_desc *_d;
|
||||
|
||||
/* Release active descriptors */
|
||||
while ((d40d = d40_first_active_get(d40c))) {
|
||||
@ -822,6 +828,21 @@ static void d40_term_all(struct d40_chan *d40c)
|
||||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
/* Release client owned descriptors */
|
||||
if (!list_empty(&d40c->client))
|
||||
list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
/* Release descriptors in prepare queue */
|
||||
if (!list_empty(&d40c->prepare_queue))
|
||||
list_for_each_entry_safe(d40d, _d,
|
||||
&d40c->prepare_queue, node) {
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
}
|
||||
|
||||
d40c->pending_tx = 0;
|
||||
d40c->busy = false;
|
||||
}
|
||||
@ -1208,7 +1229,6 @@ static void dma_tasklet(unsigned long data)
|
||||
|
||||
if (!d40d->cyclic) {
|
||||
if (async_tx_test_ack(&d40d->txd)) {
|
||||
d40_pool_lli_free(d40c, d40d);
|
||||
d40_desc_remove(d40d);
|
||||
d40_desc_free(d40c, d40d);
|
||||
} else {
|
||||
@ -1595,21 +1615,10 @@ static int d40_free_dma(struct d40_chan *d40c)
|
||||
u32 event;
|
||||
struct d40_phy_res *phy = d40c->phy_chan;
|
||||
bool is_src;
|
||||
struct d40_desc *d;
|
||||
struct d40_desc *_d;
|
||||
|
||||
|
||||
/* Terminate all queued and active transfers */
|
||||
d40_term_all(d40c);
|
||||
|
||||
/* Release client owned descriptors */
|
||||
if (!list_empty(&d40c->client))
|
||||
list_for_each_entry_safe(d, _d, &d40c->client, node) {
|
||||
d40_pool_lli_free(d40c, d);
|
||||
d40_desc_remove(d);
|
||||
d40_desc_free(d40c, d);
|
||||
}
|
||||
|
||||
if (phy == NULL) {
|
||||
chan_err(d40c, "phy == null\n");
|
||||
return -EINVAL;
|
||||
@ -1911,6 +1920,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* add descriptor to the prepare queue in order to be able
|
||||
* to free them later in terminate_all
|
||||
*/
|
||||
list_add_tail(&desc->node, &chan->prepare_queue);
|
||||
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
||||
return &desc->txd;
|
||||
@ -2400,6 +2415,7 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
|
||||
INIT_LIST_HEAD(&d40c->queue);
|
||||
INIT_LIST_HEAD(&d40c->pending_queue);
|
||||
INIT_LIST_HEAD(&d40c->client);
|
||||
INIT_LIST_HEAD(&d40c->prepare_queue);
|
||||
|
||||
tasklet_init(&d40c->tasklet, dma_tasklet,
|
||||
(unsigned long) d40c);
|
||||
|
@ -277,6 +277,7 @@
|
||||
#define USB_DEVICE_ID_PENPOWER 0x00f4
|
||||
|
||||
#define USB_VENDOR_ID_GREENASIA 0x0e8f
|
||||
#define USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD 0x3013
|
||||
|
||||
#define USB_VENDOR_ID_GRETAGMACBETH 0x0971
|
||||
#define USB_DEVICE_ID_GRETAGMACBETH_HUEY 0x2005
|
||||
|
@ -81,6 +81,28 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
|
||||
#define NO_TOUCHES -1
|
||||
#define SINGLE_TOUCH_UP -2
|
||||
|
||||
/* Touch surface information. Dimension is in hundredths of a mm, min and max
|
||||
* are in units. */
|
||||
#define MOUSE_DIMENSION_X (float)9056
|
||||
#define MOUSE_MIN_X -1100
|
||||
#define MOUSE_MAX_X 1258
|
||||
#define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100))
|
||||
#define MOUSE_DIMENSION_Y (float)5152
|
||||
#define MOUSE_MIN_Y -1589
|
||||
#define MOUSE_MAX_Y 2047
|
||||
#define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100))
|
||||
|
||||
#define TRACKPAD_DIMENSION_X (float)13000
|
||||
#define TRACKPAD_MIN_X -2909
|
||||
#define TRACKPAD_MAX_X 3167
|
||||
#define TRACKPAD_RES_X \
|
||||
((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100))
|
||||
#define TRACKPAD_DIMENSION_Y (float)11000
|
||||
#define TRACKPAD_MIN_Y -2456
|
||||
#define TRACKPAD_MAX_Y 2565
|
||||
#define TRACKPAD_RES_Y \
|
||||
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
|
||||
|
||||
/**
|
||||
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
|
||||
* @input: Input device through which we report events.
|
||||
@ -406,17 +428,31 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
|
||||
* inverse of the reported Y.
|
||||
*/
|
||||
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, -1100,
|
||||
1358, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, -1589,
|
||||
2047, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X,
|
||||
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y,
|
||||
MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
|
||||
|
||||
input_abs_set_res(input, ABS_MT_POSITION_X,
|
||||
MOUSE_RES_X);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_Y,
|
||||
MOUSE_RES_Y);
|
||||
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
|
||||
input_set_abs_params(input, ABS_X, -2909, 3167, 4, 0);
|
||||
input_set_abs_params(input, ABS_Y, -2456, 2565, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, -2909,
|
||||
3167, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, -2456,
|
||||
2565, 4, 0);
|
||||
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
|
||||
TRACKPAD_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
|
||||
TRACKPAD_MAX_Y, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X,
|
||||
TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y,
|
||||
TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
|
||||
|
||||
input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
|
||||
input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_X,
|
||||
TRACKPAD_RES_X);
|
||||
input_abs_set_res(input, ABS_MT_POSITION_Y,
|
||||
TRACKPAD_RES_Y);
|
||||
}
|
||||
|
||||
input_set_events_per_packet(input, 60);
|
||||
@ -501,9 +537,17 @@ static int magicmouse_probe(struct hid_device *hdev,
|
||||
}
|
||||
report->size = 6;
|
||||
|
||||
/*
|
||||
* Some devices repond with 'invalid report id' when feature
|
||||
* report switching it into multitouch mode is sent to it.
|
||||
*
|
||||
* This results in -EIO from the _raw low-level transport callback,
|
||||
* but there seems to be no other way of switching the mode.
|
||||
* Thus the super-ugly hacky success check below.
|
||||
*/
|
||||
ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
|
||||
HID_FEATURE_REPORT);
|
||||
if (ret != sizeof(feature)) {
|
||||
if (ret != -EIO && ret != sizeof(feature)) {
|
||||
hid_err(hdev, "unable to request touch data (%d)\n", ret);
|
||||
goto err_stop_hw;
|
||||
}
|
||||
|
@ -353,11 +353,7 @@ static int wacom_probe(struct hid_device *hdev,
|
||||
if (ret) {
|
||||
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
|
||||
ret);
|
||||
/*
|
||||
* battery attribute is not critical for the tablet, but if it
|
||||
* failed then there is no need to create ac attribute
|
||||
*/
|
||||
goto move_on;
|
||||
goto err_battery;
|
||||
}
|
||||
|
||||
wdata->ac.properties = wacom_ac_props;
|
||||
@ -371,14 +367,8 @@ static int wacom_probe(struct hid_device *hdev,
|
||||
if (ret) {
|
||||
hid_warn(hdev,
|
||||
"can't create ac battery attribute, err: %d\n", ret);
|
||||
/*
|
||||
* ac attribute is not critical for the tablet, but if it
|
||||
* failed then we don't want to battery attribute to exist
|
||||
*/
|
||||
power_supply_unregister(&wdata->battery);
|
||||
goto err_ac;
|
||||
}
|
||||
|
||||
move_on:
|
||||
#endif
|
||||
hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
|
||||
input = hidinput->input;
|
||||
@ -416,6 +406,13 @@ move_on:
|
||||
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
err_ac:
|
||||
power_supply_unregister(&wdata->battery);
|
||||
err_battery:
|
||||
device_remove_file(&hdev->dev, &dev_attr_speed);
|
||||
hid_hw_stop(hdev);
|
||||
#endif
|
||||
err_free:
|
||||
kfree(wdata);
|
||||
return ret;
|
||||
@ -426,6 +423,7 @@ static void wacom_remove(struct hid_device *hdev)
|
||||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
struct wacom_data *wdata = hid_get_drvdata(hdev);
|
||||
#endif
|
||||
device_remove_file(&hdev->dev, &dev_attr_speed);
|
||||
hid_hw_stop(hdev);
|
||||
|
||||
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
|
||||
|
@ -47,6 +47,7 @@ static const struct hid_blacklist {
|
||||
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
|
||||
|
||||
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
|
||||
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
|
||||
|
@ -124,7 +124,7 @@ static inline int MV_TO_LIMIT(int mv, int range)
|
||||
|
||||
static inline int ADC_TO_CURR(int adc, int gain)
|
||||
{
|
||||
return adc * 1400000 / gain * 255;
|
||||
return adc * 1400000 / (gain * 255);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -141,13 +141,11 @@ static int ucd9000_probe(struct i2c_client *client,
|
||||
block_buffer[ret] = '\0';
|
||||
dev_info(&client->dev, "Device ID %s\n", block_buffer);
|
||||
|
||||
mid = NULL;
|
||||
for (i = 0; i < ARRAY_SIZE(ucd9000_id); i++) {
|
||||
mid = &ucd9000_id[i];
|
||||
for (mid = ucd9000_id; mid->name[0]; mid++) {
|
||||
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
|
||||
break;
|
||||
}
|
||||
if (!mid || !strlen(mid->name)) {
|
||||
if (!mid->name[0]) {
|
||||
dev_err(&client->dev, "Unsupported device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -68,13 +68,11 @@ static int ucd9200_probe(struct i2c_client *client,
|
||||
block_buffer[ret] = '\0';
|
||||
dev_info(&client->dev, "Device ID %s\n", block_buffer);
|
||||
|
||||
mid = NULL;
|
||||
for (i = 0; i < ARRAY_SIZE(ucd9200_id); i++) {
|
||||
mid = &ucd9200_id[i];
|
||||
for (mid = ucd9200_id; mid->name[0]; mid++) {
|
||||
if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
|
||||
break;
|
||||
}
|
||||
if (!mid || !strlen(mid->name)) {
|
||||
if (!mid->name[0]) {
|
||||
dev_err(&client->dev, "Unsupported device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -109,12 +109,15 @@ static int __devinit ce4100_i2c_probe(struct pci_dev *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
sds = kzalloc(sizeof(*sds), GFP_KERNEL);
|
||||
if (!sds)
|
||||
if (!sds) {
|
||||
ret = -ENOMEM;
|
||||
goto err_mem;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) {
|
||||
sds->pdev[i] = add_i2c_device(dev, i);
|
||||
if (IS_ERR(sds->pdev[i])) {
|
||||
ret = PTR_ERR(sds->pdev[i]);
|
||||
while (--i >= 0)
|
||||
platform_device_unregister(sds->pdev[i]);
|
||||
goto err_dev_add;
|
||||
|
@ -270,14 +270,30 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
|
||||
|
||||
/* Rounds down to not include partial word at the end of buf */
|
||||
words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD;
|
||||
if (words_to_transfer > tx_fifo_avail)
|
||||
words_to_transfer = tx_fifo_avail;
|
||||
|
||||
i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
|
||||
/* It's very common to have < 4 bytes, so optimize that case. */
|
||||
if (words_to_transfer) {
|
||||
if (words_to_transfer > tx_fifo_avail)
|
||||
words_to_transfer = tx_fifo_avail;
|
||||
|
||||
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
tx_fifo_avail -= words_to_transfer;
|
||||
/*
|
||||
* Update state before writing to FIFO. If this casues us
|
||||
* to finish writing all bytes (AKA buf_remaining goes to 0) we
|
||||
* have a potential for an interrupt (PACKET_XFER_COMPLETE is
|
||||
* not maskable). We need to make sure that the isr sees
|
||||
* buf_remaining as 0 and doesn't call us back re-entrantly.
|
||||
*/
|
||||
buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
tx_fifo_avail -= words_to_transfer;
|
||||
i2c_dev->msg_buf_remaining = buf_remaining;
|
||||
i2c_dev->msg_buf = buf +
|
||||
words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
barrier();
|
||||
|
||||
i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
|
||||
|
||||
buf += words_to_transfer * BYTES_PER_FIFO_WORD;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there is a partial word at the end of buf, handle it manually to
|
||||
@ -287,14 +303,15 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
|
||||
if (tx_fifo_avail > 0 && buf_remaining > 0) {
|
||||
BUG_ON(buf_remaining > 3);
|
||||
memcpy(&val, buf, buf_remaining);
|
||||
|
||||
/* Again update before writing to FIFO to make sure isr sees. */
|
||||
i2c_dev->msg_buf_remaining = 0;
|
||||
i2c_dev->msg_buf = NULL;
|
||||
barrier();
|
||||
|
||||
i2c_writel(i2c_dev, val, I2C_TX_FIFO);
|
||||
buf_remaining = 0;
|
||||
tx_fifo_avail--;
|
||||
}
|
||||
|
||||
BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0);
|
||||
i2c_dev->msg_buf_remaining = buf_remaining;
|
||||
i2c_dev->msg_buf = buf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -411,9 +428,10 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
|
||||
tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
|
||||
}
|
||||
|
||||
if ((status & I2C_INT_PACKET_XFER_COMPLETE) &&
|
||||
!i2c_dev->msg_buf_remaining)
|
||||
if (status & I2C_INT_PACKET_XFER_COMPLETE) {
|
||||
BUG_ON(i2c_dev->msg_buf_remaining);
|
||||
complete(&i2c_dev->msg_complete);
|
||||
}
|
||||
|
||||
i2c_writel(i2c_dev, status, I2C_INT_STATUS);
|
||||
if (i2c_dev->is_dvc)
|
||||
@ -531,7 +549,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
|
||||
|
||||
static u32 tegra_i2c_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C;
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm tegra_i2c_algo = {
|
||||
@ -719,6 +737,17 @@ static int tegra_i2c_resume(struct platform_device *pdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
/* Match table for of_platform binding */
|
||||
static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
|
||||
{ .compatible = "nvidia,tegra20-i2c", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
|
||||
#else
|
||||
#define tegra_i2c_of_match NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver tegra_i2c_driver = {
|
||||
.probe = tegra_i2c_probe,
|
||||
.remove = tegra_i2c_remove,
|
||||
@ -729,6 +758,7 @@ static struct platform_driver tegra_i2c_driver = {
|
||||
.driver = {
|
||||
.name = "tegra-i2c",
|
||||
.owner = THIS_MODULE,
|
||||
.of_match_table = tegra_i2c_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -605,7 +605,9 @@ static void build_inv_all(struct iommu_cmd *cmd)
|
||||
* Writes the command to the IOMMUs command buffer and informs the
|
||||
* hardware about the new command.
|
||||
*/
|
||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
static int iommu_queue_command_sync(struct amd_iommu *iommu,
|
||||
struct iommu_cmd *cmd,
|
||||
bool sync)
|
||||
{
|
||||
u32 left, tail, head, next_tail;
|
||||
unsigned long flags;
|
||||
@ -639,13 +641,18 @@ again:
|
||||
copy_cmd_to_buffer(iommu, cmd, tail);
|
||||
|
||||
/* We need to sync now to make sure all commands are processed */
|
||||
iommu->need_sync = true;
|
||||
iommu->need_sync = sync;
|
||||
|
||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
|
||||
{
|
||||
return iommu_queue_command_sync(iommu, cmd, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function queues a completion wait command into the command
|
||||
* buffer of an IOMMU
|
||||
@ -661,7 +668,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
|
||||
|
||||
build_completion_wait(&cmd, (u64)&sem);
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
ret = iommu_queue_command_sync(iommu, &cmd, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -840,14 +847,9 @@ static void domain_flush_complete(struct protection_domain *domain)
|
||||
static void domain_flush_devices(struct protection_domain *domain)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
|
||||
list_for_each_entry(dev_data, &domain->dev_list, list)
|
||||
device_flush_dte(dev_data);
|
||||
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -1138,8 +1138,11 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
|
||||
ret = 0;
|
||||
}
|
||||
rdev->sectors = rdev->sb_start;
|
||||
/* Limit to 4TB as metadata cannot record more than that */
|
||||
if (rdev->sectors >= (2ULL << 32))
|
||||
rdev->sectors = (2ULL << 32) - 2;
|
||||
|
||||
if (rdev->sectors < sb->size * 2 && sb->level > 1)
|
||||
if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
|
||||
/* "this cannot possibly happen" ... */
|
||||
ret = -EINVAL;
|
||||
|
||||
@ -1173,7 +1176,7 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev)
|
||||
mddev->clevel[0] = 0;
|
||||
mddev->layout = sb->layout;
|
||||
mddev->raid_disks = sb->raid_disks;
|
||||
mddev->dev_sectors = sb->size * 2;
|
||||
mddev->dev_sectors = ((sector_t)sb->size) * 2;
|
||||
mddev->events = ev1;
|
||||
mddev->bitmap_info.offset = 0;
|
||||
mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9;
|
||||
@ -1415,6 +1418,11 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
|
||||
rdev->sb_start = calc_dev_sboffset(rdev);
|
||||
if (!num_sectors || num_sectors > rdev->sb_start)
|
||||
num_sectors = rdev->sb_start;
|
||||
/* Limit to 4TB as metadata cannot record more than that.
|
||||
* 4TB == 2^32 KB, or 2*2^32 sectors.
|
||||
*/
|
||||
if (num_sectors >= (2ULL << 32))
|
||||
num_sectors = (2ULL << 32) - 2;
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
rdev->sb_page);
|
||||
md_super_wait(rdev->mddev);
|
||||
|
@ -1099,12 +1099,11 @@ read_again:
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
r1_bio_write_done(r1_bio);
|
||||
|
||||
/* In case raid1d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
/* Mustn't call r1_bio_write_done before this next test,
|
||||
* as it could result in the bio being freed.
|
||||
*/
|
||||
if (sectors_handled < (bio->bi_size >> 9)) {
|
||||
r1_bio_write_done(r1_bio);
|
||||
/* We need another r1_bio. It has already been counted
|
||||
* in bio->bi_phys_segments
|
||||
*/
|
||||
@ -1117,6 +1116,11 @@ read_again:
|
||||
goto retry_write;
|
||||
}
|
||||
|
||||
r1_bio_write_done(r1_bio);
|
||||
|
||||
/* In case raid1d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
||||
|
@ -337,6 +337,21 @@ static void close_write(r10bio_t *r10_bio)
|
||||
md_write_end(r10_bio->mddev);
|
||||
}
|
||||
|
||||
static void one_write_done(r10bio_t *r10_bio)
|
||||
{
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
if (test_bit(R10BIO_WriteError, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else {
|
||||
close_write(r10_bio);
|
||||
if (test_bit(R10BIO_MadeGood, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void raid10_end_write_request(struct bio *bio, int error)
|
||||
{
|
||||
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
|
||||
@ -387,17 +402,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
|
||||
* Let's see if all mirrored write operations have finished
|
||||
* already.
|
||||
*/
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
if (test_bit(R10BIO_WriteError, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else {
|
||||
close_write(r10_bio);
|
||||
if (test_bit(R10BIO_MadeGood, &r10_bio->state))
|
||||
reschedule_retry(r10_bio);
|
||||
else
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
}
|
||||
one_write_done(r10_bio);
|
||||
if (dec_rdev)
|
||||
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
|
||||
}
|
||||
@ -1127,20 +1132,12 @@ retry_write:
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&r10_bio->remaining)) {
|
||||
/* This matches the end of raid10_end_write_request() */
|
||||
bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
|
||||
r10_bio->sectors,
|
||||
!test_bit(R10BIO_Degraded, &r10_bio->state),
|
||||
0);
|
||||
md_write_end(mddev);
|
||||
raid_end_bio_io(r10_bio);
|
||||
}
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
/* Don't remove the bias on 'remaining' (one_write_done) until
|
||||
* after checking if we need to go around again.
|
||||
*/
|
||||
|
||||
if (sectors_handled < (bio->bi_size >> 9)) {
|
||||
one_write_done(r10_bio);
|
||||
/* We need another r10_bio. It has already been counted
|
||||
* in bio->bi_phys_segments.
|
||||
*/
|
||||
@ -1154,6 +1151,10 @@ retry_write:
|
||||
r10_bio->state = 0;
|
||||
goto retry_write;
|
||||
}
|
||||
one_write_done(r10_bio);
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
wake_up(&conf->wait_barrier);
|
||||
|
||||
if (do_sync || !mddev->bitmap || !plugged)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
|
@ -133,7 +133,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
|
||||
if (mrq->done)
|
||||
mrq->done(mrq);
|
||||
|
||||
mmc_host_clk_gate(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
@ -192,7 +192,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
||||
mrq->stop->mrq = mrq;
|
||||
}
|
||||
}
|
||||
mmc_host_clk_ungate(host);
|
||||
mmc_host_clk_hold(host);
|
||||
led_trigger_event(host->led, LED_FULL);
|
||||
host->ops->request(host, mrq);
|
||||
}
|
||||
@ -728,15 +728,17 @@ static inline void mmc_set_ios(struct mmc_host *host)
|
||||
*/
|
||||
void mmc_set_chip_select(struct mmc_host *host, int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.chip_select = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the host clock to the highest possible frequency that
|
||||
* is below "hz".
|
||||
*/
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
WARN_ON(hz < host->f_min);
|
||||
|
||||
@ -747,6 +749,13 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
__mmc_set_clock(host, hz);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
/*
|
||||
* This gates the clock by setting it to 0 Hz.
|
||||
@ -779,7 +788,7 @@ void mmc_ungate_clock(struct mmc_host *host)
|
||||
if (host->clk_old) {
|
||||
BUG_ON(host->ios.clock);
|
||||
/* This call will also set host->clk_gated to false */
|
||||
mmc_set_clock(host, host->clk_old);
|
||||
__mmc_set_clock(host, host->clk_old);
|
||||
}
|
||||
}
|
||||
|
||||
@ -807,8 +816,10 @@ void mmc_set_ungated(struct mmc_host *host)
|
||||
*/
|
||||
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_mode = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -816,8 +827,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
||||
*/
|
||||
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_width = width;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1015,8 +1028,10 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
|
||||
|
||||
ocr &= 3 << bit;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.vdd = bit;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
} else {
|
||||
pr_warning("%s: host doesn't support card's voltages\n",
|
||||
mmc_hostname(host));
|
||||
@ -1063,8 +1078,10 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
|
||||
*/
|
||||
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.timing = timing;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1072,8 +1089,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
||||
*/
|
||||
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.drv_type = drv_type;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1091,6 +1110,8 @@ static void mmc_power_up(struct mmc_host *host)
|
||||
{
|
||||
int bit;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
/* If ocr is set, we use it */
|
||||
if (host->ocr)
|
||||
bit = ffs(host->ocr) - 1;
|
||||
@ -1126,10 +1147,14 @@ static void mmc_power_up(struct mmc_host *host)
|
||||
* time required to reach a stable voltage.
|
||||
*/
|
||||
mmc_delay(10);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
static void mmc_power_off(struct mmc_host *host)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
host->ios.clock = 0;
|
||||
host->ios.vdd = 0;
|
||||
|
||||
@ -1147,6 +1172,8 @@ static void mmc_power_off(struct mmc_host *host)
|
||||
host->ios.bus_width = MMC_BUS_WIDTH_1;
|
||||
host->ios.timing = MMC_TIMING_LEGACY;
|
||||
mmc_set_ios(host);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -119,14 +119,14 @@ static void mmc_host_clk_gate_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_ungate - ungate hardware MCI clocks
|
||||
* mmc_host_clk_hold - ungate hardware MCI clocks
|
||||
* @host: host to ungate.
|
||||
*
|
||||
* Makes sure the host ios.clock is restored to a non-zero value
|
||||
* past this call. Increase clock reference count and ungate clock
|
||||
* if we're the first user.
|
||||
*/
|
||||
void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -164,14 +164,14 @@ static bool mmc_host_may_gate_card(struct mmc_card *card)
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_gate - gate off hardware MCI clocks
|
||||
* mmc_host_clk_release - gate off hardware MCI clocks
|
||||
* @host: host to gate.
|
||||
*
|
||||
* Calls the host driver with ios.clock set to zero as often as possible
|
||||
* in order to gate off hardware MCI clocks. Decrease clock reference
|
||||
* count and schedule disabling of clock.
|
||||
*/
|
||||
void mmc_host_clk_gate(struct mmc_host *host)
|
||||
void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -179,7 +179,7 @@ void mmc_host_clk_gate(struct mmc_host *host)
|
||||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
schedule_work(&host->clk_gate_work);
|
||||
queue_work(system_nrt_wq, &host->clk_gate_work);
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
if (cancel_work_sync(&host->clk_gate_work))
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
if (host->clk_gated)
|
||||
mmc_host_clk_ungate(host);
|
||||
mmc_host_clk_hold(host);
|
||||
/* There should be only one user now */
|
||||
WARN_ON(host->clk_requests > 1);
|
||||
}
|
||||
|
@ -16,16 +16,16 @@ int mmc_register_host_class(void);
|
||||
void mmc_unregister_host_class(void);
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
void mmc_host_clk_ungate(struct mmc_host *host);
|
||||
void mmc_host_clk_gate(struct mmc_host *host);
|
||||
void mmc_host_clk_hold(struct mmc_host *host);
|
||||
void mmc_host_clk_release(struct mmc_host *host);
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host);
|
||||
|
||||
#else
|
||||
static inline void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
static inline void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_gate(struct mmc_host *host)
|
||||
static inline void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -469,56 +469,75 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
|
||||
static void sd_update_bus_speed_mode(struct mmc_card *card)
|
||||
{
|
||||
unsigned int bus_speed = 0, timing = 0;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* If the host doesn't support any of the UHS-I modes, fallback on
|
||||
* default speed.
|
||||
*/
|
||||
if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
|
||||
return 0;
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) {
|
||||
card->sd_bus_speed = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
|
||||
bus_speed = UHS_SDR104_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR104;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
|
||||
} else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
|
||||
bus_speed = UHS_DDR50_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_DDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
|
||||
SD_MODE_UHS_SDR50)) {
|
||||
bus_speed = UHS_SDR50_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
|
||||
(card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
|
||||
bus_speed = UHS_SDR25_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR25;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
|
||||
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
|
||||
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
|
||||
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
|
||||
SD_MODE_UHS_SDR12)) {
|
||||
bus_speed = UHS_SDR12_BUS_SPEED;
|
||||
timing = MMC_TIMING_UHS_SDR12;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
|
||||
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
|
||||
{
|
||||
int err;
|
||||
unsigned int timing = 0;
|
||||
|
||||
switch (card->sd_bus_speed) {
|
||||
case UHS_SDR104_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR104;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
|
||||
break;
|
||||
case UHS_DDR50_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_DDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR50_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR50;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR25_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR25;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
|
||||
break;
|
||||
case UHS_SDR12_BUS_SPEED:
|
||||
timing = MMC_TIMING_UHS_SDR12;
|
||||
card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
card->sd_bus_speed = bus_speed;
|
||||
err = mmc_sd_switch(card, 1, 0, bus_speed, status);
|
||||
err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if ((status[16] & 0xF) != bus_speed)
|
||||
if ((status[16] & 0xF) != card->sd_bus_speed)
|
||||
printk(KERN_WARNING "%s: Problem setting bus speed mode!\n",
|
||||
mmc_hostname(card->host));
|
||||
else {
|
||||
@ -618,18 +637,24 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
|
||||
mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Select the bus speed mode depending on host
|
||||
* and card capability.
|
||||
*/
|
||||
sd_update_bus_speed_mode(card);
|
||||
|
||||
/* Set the driver strength for the card */
|
||||
err = sd_select_driver_type(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Set bus speed mode of the card */
|
||||
err = sd_set_bus_speed_mode(card, status);
|
||||
/* Set current limit for the card */
|
||||
err = sd_set_current_limit(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* Set current limit for the card */
|
||||
err = sd_set_current_limit(card, status);
|
||||
/* Set bus speed mode of the card */
|
||||
err = sd_set_bus_speed_mode(card, status);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
|
@ -302,6 +302,8 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
|
||||
ctrl &= ~SDHCI_CTRL_8BITBUS;
|
||||
break;
|
||||
default:
|
||||
ctrl &= ~SDHCI_CTRL_4BITBUS;
|
||||
ctrl &= ~SDHCI_CTRL_8BITBUS;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -120,11 +120,11 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
|
||||
mmc_data->hclk = clk_get_rate(priv->clk);
|
||||
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
|
||||
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
|
||||
if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
|
||||
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
|
||||
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
|
||||
if (p) {
|
||||
mmc_data->flags = p->tmio_flags;
|
||||
if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
|
||||
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
|
||||
mmc_data->ocr_mask = p->tmio_ocr_mask;
|
||||
mmc_data->capabilities |= p->tmio_caps;
|
||||
|
||||
|
@ -181,7 +181,7 @@ static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
|
||||
|
||||
#define ubi_dbg_msg(fmt, ...) do { \
|
||||
if (0) \
|
||||
pr_debug(fmt "\n", ##__VA_ARGS__); \
|
||||
printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define dbg_msg(fmt, ...) ubi_dbg_msg(fmt, ##__VA_ARGS__)
|
||||
|
@ -308,8 +308,11 @@ static void am79c961_timer(unsigned long data)
|
||||
struct net_device *dev = (struct net_device *)data;
|
||||
struct dev_priv *priv = netdev_priv(dev);
|
||||
unsigned int lnkstat, carrier;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->chip_lock, flags);
|
||||
lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
|
||||
spin_unlock_irqrestore(&priv->chip_lock, flags);
|
||||
carrier = netif_carrier_ok(dev);
|
||||
|
||||
if (lnkstat && !carrier) {
|
||||
|
@ -169,7 +169,9 @@ void pci_configure_slot(struct pci_dev *dev)
|
||||
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
|
||||
return;
|
||||
|
||||
pcie_bus_configure_settings(dev->bus, dev->bus->self->pcie_mpss);
|
||||
if (dev->bus && dev->bus->self)
|
||||
pcie_bus_configure_settings(dev->bus,
|
||||
dev->bus->self->pcie_mpss);
|
||||
|
||||
memset(&hpp, 0, sizeof(hpp));
|
||||
ret = pci_get_hp_params(dev, &hpp);
|
||||
|
@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
|
||||
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
|
||||
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
|
||||
|
||||
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
|
||||
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
|
||||
|
||||
/*
|
||||
* The default CLS is used if arch didn't set CLS explicitly and not
|
||||
|
@ -1396,34 +1396,37 @@ static void pcie_write_mps(struct pci_dev *dev, int mps)
|
||||
|
||||
static void pcie_write_mrrs(struct pci_dev *dev, int mps)
|
||||
{
|
||||
int rc, mrrs;
|
||||
int rc, mrrs, dev_mpss;
|
||||
|
||||
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
|
||||
int dev_mpss = 128 << dev->pcie_mpss;
|
||||
/* In the "safe" case, do not configure the MRRS. There appear to be
|
||||
* issues with setting MRRS to 0 on a number of devices.
|
||||
*/
|
||||
|
||||
/* For Max performance, the MRRS must be set to the largest
|
||||
* supported value. However, it cannot be configured larger
|
||||
* than the MPS the device or the bus can support. This assumes
|
||||
* that the largest MRRS available on the device cannot be
|
||||
* smaller than the device MPSS.
|
||||
*/
|
||||
mrrs = mps < dev_mpss ? mps : dev_mpss;
|
||||
} else
|
||||
/* In the "safe" case, configure the MRRS for fairness on the
|
||||
* bus by making all devices have the same size
|
||||
*/
|
||||
mrrs = mps;
|
||||
if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
|
||||
return;
|
||||
|
||||
dev_mpss = 128 << dev->pcie_mpss;
|
||||
|
||||
/* For Max performance, the MRRS must be set to the largest supported
|
||||
* value. However, it cannot be configured larger than the MPS the
|
||||
* device or the bus can support. This assumes that the largest MRRS
|
||||
* available on the device cannot be smaller than the device MPSS.
|
||||
*/
|
||||
mrrs = min(mps, dev_mpss);
|
||||
|
||||
/* MRRS is a R/W register. Invalid values can be written, but a
|
||||
* subsiquent read will verify if the value is acceptable or not.
|
||||
* subsequent read will verify if the value is acceptable or not.
|
||||
* If the MRRS value provided is not acceptable (e.g., too large),
|
||||
* shrink the value until it is acceptable to the HW.
|
||||
*/
|
||||
while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
|
||||
dev_warn(&dev->dev, "Attempting to modify the PCI-E MRRS value"
|
||||
" to %d. If any issues are encountered, please try "
|
||||
"running with pci=pcie_bus_safe\n", mrrs);
|
||||
rc = pcie_set_readrq(dev, mrrs);
|
||||
if (rc)
|
||||
dev_err(&dev->dev, "Failed attempting to set the MRRS\n");
|
||||
dev_err(&dev->dev,
|
||||
"Failed attempting to set the MRRS\n");
|
||||
|
||||
mrrs /= 2;
|
||||
}
|
||||
@ -1436,13 +1439,13 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
|
||||
if (!pci_is_pcie(dev))
|
||||
return 0;
|
||||
|
||||
dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
|
||||
dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
|
||||
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
|
||||
|
||||
pcie_write_mps(dev, mps);
|
||||
pcie_write_mrrs(dev, mps);
|
||||
|
||||
dev_info(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
|
||||
dev_dbg(&dev->dev, "Dev MPS %d MPSS %d MRRS %d\n",
|
||||
pcie_get_mps(dev), 128<<dev->pcie_mpss, pcie_get_readrq(dev));
|
||||
|
||||
return 0;
|
||||
@ -1456,9 +1459,6 @@ void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
|
||||
{
|
||||
u8 smpss = mpss;
|
||||
|
||||
if (!bus->self)
|
||||
return;
|
||||
|
||||
if (!pci_is_pcie(bus->self))
|
||||
return;
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
*/
|
||||
struct ep93xx_rtc {
|
||||
void __iomem *mmio_base;
|
||||
struct rtc_device *rtc;
|
||||
};
|
||||
|
||||
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
|
||||
@ -130,7 +131,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ep93xx_rtc *ep93xx_rtc;
|
||||
struct resource *res;
|
||||
struct rtc_device *rtc;
|
||||
int err;
|
||||
|
||||
ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
|
||||
@ -151,12 +151,12 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
|
||||
return -ENXIO;
|
||||
|
||||
pdev->dev.platform_data = ep93xx_rtc;
|
||||
platform_set_drvdata(pdev, rtc);
|
||||
platform_set_drvdata(pdev, ep93xx_rtc);
|
||||
|
||||
rtc = rtc_device_register(pdev->name,
|
||||
ep93xx_rtc->rtc = rtc_device_register(pdev->name,
|
||||
&pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
|
||||
if (IS_ERR(rtc)) {
|
||||
err = PTR_ERR(rtc);
|
||||
if (IS_ERR(ep93xx_rtc->rtc)) {
|
||||
err = PTR_ERR(ep93xx_rtc->rtc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
rtc_device_unregister(rtc);
|
||||
rtc_device_unregister(ep93xx_rtc->rtc);
|
||||
exit:
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pdev->dev.platform_data = NULL;
|
||||
@ -176,11 +176,11 @@ exit:
|
||||
|
||||
static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct rtc_device *rtc = platform_get_drvdata(pdev);
|
||||
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
|
||||
|
||||
sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
rtc_device_unregister(rtc);
|
||||
rtc_device_unregister(ep93xx_rtc->rtc);
|
||||
pdev->dev.platform_data = NULL;
|
||||
|
||||
return 0;
|
||||
|
@ -85,6 +85,8 @@ void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
|
||||
time -= tm->tm_hour * 3600;
|
||||
tm->tm_min = time / 60;
|
||||
tm->tm_sec = time - tm->tm_min * 60;
|
||||
|
||||
tm->tm_isdst = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rtc_time_to_tm);
|
||||
|
||||
|
@ -362,14 +362,6 @@ static irqreturn_t twl_rtc_interrupt(int irq, void *rtc)
|
||||
int res;
|
||||
u8 rd_reg;
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
/* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
|
||||
* we don't want and can't tolerate. Although it might be
|
||||
* friendlier not to borrow this thread context...
|
||||
*/
|
||||
local_irq_enable();
|
||||
#endif
|
||||
|
||||
res = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
|
||||
if (res)
|
||||
goto out;
|
||||
@ -428,24 +420,12 @@ static struct rtc_class_ops twl_rtc_ops = {
|
||||
static int __devinit twl_rtc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct rtc_device *rtc;
|
||||
int ret = 0;
|
||||
int ret = -EINVAL;
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
u8 rd_reg;
|
||||
|
||||
if (irq <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
rtc = rtc_device_register(pdev->name,
|
||||
&pdev->dev, &twl_rtc_ops, THIS_MODULE);
|
||||
if (IS_ERR(rtc)) {
|
||||
ret = PTR_ERR(rtc);
|
||||
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
|
||||
PTR_ERR(rtc));
|
||||
goto out0;
|
||||
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, rtc);
|
||||
goto out1;
|
||||
|
||||
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
|
||||
if (ret < 0)
|
||||
@ -462,14 +442,6 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
|
||||
if (ret < 0)
|
||||
goto out1;
|
||||
|
||||
ret = request_irq(irq, twl_rtc_interrupt,
|
||||
IRQF_TRIGGER_RISING,
|
||||
dev_name(&rtc->dev), rtc);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "IRQ is not free.\n");
|
||||
goto out1;
|
||||
}
|
||||
|
||||
if (twl_class_is_6030()) {
|
||||
twl6030_interrupt_unmask(TWL6030_RTC_INT_MASK,
|
||||
REG_INT_MSK_LINE_A);
|
||||
@ -480,28 +452,44 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
|
||||
/* Check RTC module status, Enable if it is off */
|
||||
ret = twl_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
goto out1;
|
||||
|
||||
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
|
||||
dev_info(&pdev->dev, "Enabling TWL-RTC.\n");
|
||||
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
|
||||
ret = twl_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
/* init cached IRQ enable bits */
|
||||
ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
|
||||
if (ret < 0)
|
||||
goto out2;
|
||||
goto out1;
|
||||
|
||||
return ret;
|
||||
rtc = rtc_device_register(pdev->name,
|
||||
&pdev->dev, &twl_rtc_ops, THIS_MODULE);
|
||||
if (IS_ERR(rtc)) {
|
||||
ret = PTR_ERR(rtc);
|
||||
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
|
||||
PTR_ERR(rtc));
|
||||
goto out1;
|
||||
}
|
||||
|
||||
ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
|
||||
IRQF_TRIGGER_RISING,
|
||||
dev_name(&rtc->dev), rtc);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "IRQ is not free.\n");
|
||||
goto out2;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, rtc);
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
free_irq(irq, rtc);
|
||||
out1:
|
||||
rtc_device_unregister(rtc);
|
||||
out0:
|
||||
out1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <asm/backlight.h>
|
||||
#endif
|
||||
|
||||
static const char const *backlight_types[] = {
|
||||
static const char *const backlight_types[] = {
|
||||
[BACKLIGHT_RAW] = "raw",
|
||||
[BACKLIGHT_PLATFORM] = "platform",
|
||||
[BACKLIGHT_FIRMWARE] = "firmware",
|
||||
|
@ -54,9 +54,9 @@ extern struct kmem_cache *v9fs_inode_cache;
|
||||
|
||||
struct inode *v9fs_alloc_inode(struct super_block *sb);
|
||||
void v9fs_destroy_inode(struct inode *inode);
|
||||
struct inode *v9fs_get_inode(struct super_block *sb, int mode);
|
||||
struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t);
|
||||
int v9fs_init_inode(struct v9fs_session_info *v9ses,
|
||||
struct inode *inode, int mode);
|
||||
struct inode *inode, int mode, dev_t);
|
||||
void v9fs_evict_inode(struct inode *inode);
|
||||
ino_t v9fs_qid2ino(struct p9_qid *qid);
|
||||
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
|
||||
@ -83,4 +83,6 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
|
||||
v9inode->cache_validity |= V9FS_INO_INVALID_ATTR;
|
||||
return;
|
||||
}
|
||||
|
||||
int v9fs_open_to_dotl_flags(int flags);
|
||||
#endif
|
||||
|
@ -65,7 +65,7 @@ int v9fs_file_open(struct inode *inode, struct file *file)
|
||||
v9inode = V9FS_I(inode);
|
||||
v9ses = v9fs_inode2v9ses(inode);
|
||||
if (v9fs_proto_dotl(v9ses))
|
||||
omode = file->f_flags;
|
||||
omode = v9fs_open_to_dotl_flags(file->f_flags);
|
||||
else
|
||||
omode = v9fs_uflags2omode(file->f_flags,
|
||||
v9fs_proto_dotu(v9ses));
|
||||
@ -169,7 +169,18 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
|
||||
|
||||
/* convert posix lock to p9 tlock args */
|
||||
memset(&flock, 0, sizeof(flock));
|
||||
flock.type = fl->fl_type;
|
||||
/* map the lock type */
|
||||
switch (fl->fl_type) {
|
||||
case F_RDLCK:
|
||||
flock.type = P9_LOCK_TYPE_RDLCK;
|
||||
break;
|
||||
case F_WRLCK:
|
||||
flock.type = P9_LOCK_TYPE_WRLCK;
|
||||
break;
|
||||
case F_UNLCK:
|
||||
flock.type = P9_LOCK_TYPE_UNLCK;
|
||||
break;
|
||||
}
|
||||
flock.start = fl->fl_start;
|
||||
if (fl->fl_end == OFFSET_MAX)
|
||||
flock.length = 0;
|
||||
@ -245,7 +256,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
|
||||
|
||||
/* convert posix lock to p9 tgetlock args */
|
||||
memset(&glock, 0, sizeof(glock));
|
||||
glock.type = fl->fl_type;
|
||||
glock.type = P9_LOCK_TYPE_UNLCK;
|
||||
glock.start = fl->fl_start;
|
||||
if (fl->fl_end == OFFSET_MAX)
|
||||
glock.length = 0;
|
||||
@ -257,17 +268,26 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
|
||||
res = p9_client_getlock_dotl(fid, &glock);
|
||||
if (res < 0)
|
||||
return res;
|
||||
if (glock.type != F_UNLCK) {
|
||||
fl->fl_type = glock.type;
|
||||
/* map 9p lock type to os lock type */
|
||||
switch (glock.type) {
|
||||
case P9_LOCK_TYPE_RDLCK:
|
||||
fl->fl_type = F_RDLCK;
|
||||
break;
|
||||
case P9_LOCK_TYPE_WRLCK:
|
||||
fl->fl_type = F_WRLCK;
|
||||
break;
|
||||
case P9_LOCK_TYPE_UNLCK:
|
||||
fl->fl_type = F_UNLCK;
|
||||
break;
|
||||
}
|
||||
if (glock.type != P9_LOCK_TYPE_UNLCK) {
|
||||
fl->fl_start = glock.start;
|
||||
if (glock.length == 0)
|
||||
fl->fl_end = OFFSET_MAX;
|
||||
else
|
||||
fl->fl_end = glock.start + glock.length - 1;
|
||||
fl->fl_pid = glock.proc_id;
|
||||
} else
|
||||
fl->fl_type = F_UNLCK;
|
||||
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -95,15 +95,18 @@ static int unixmode2p9mode(struct v9fs_session_info *v9ses, int mode)
|
||||
/**
|
||||
* p9mode2unixmode- convert plan9 mode bits to unix mode bits
|
||||
* @v9ses: v9fs session information
|
||||
* @mode: mode to convert
|
||||
* @stat: p9_wstat from which mode need to be derived
|
||||
* @rdev: major number, minor number in case of device files.
|
||||
*
|
||||
*/
|
||||
|
||||
static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
|
||||
static int p9mode2unixmode(struct v9fs_session_info *v9ses,
|
||||
struct p9_wstat *stat, dev_t *rdev)
|
||||
{
|
||||
int res;
|
||||
int mode = stat->mode;
|
||||
|
||||
res = mode & 0777;
|
||||
res = mode & S_IALLUGO;
|
||||
*rdev = 0;
|
||||
|
||||
if ((mode & P9_DMDIR) == P9_DMDIR)
|
||||
res |= S_IFDIR;
|
||||
@ -116,9 +119,26 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
|
||||
&& (v9ses->nodev == 0))
|
||||
res |= S_IFIFO;
|
||||
else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses))
|
||||
&& (v9ses->nodev == 0))
|
||||
res |= S_IFBLK;
|
||||
else
|
||||
&& (v9ses->nodev == 0)) {
|
||||
char type = 0, ext[32];
|
||||
int major = -1, minor = -1;
|
||||
|
||||
strncpy(ext, stat->extension, sizeof(ext));
|
||||
sscanf(ext, "%c %u %u", &type, &major, &minor);
|
||||
switch (type) {
|
||||
case 'c':
|
||||
res |= S_IFCHR;
|
||||
break;
|
||||
case 'b':
|
||||
res |= S_IFBLK;
|
||||
break;
|
||||
default:
|
||||
P9_DPRINTK(P9_DEBUG_ERROR,
|
||||
"Unknown special type %c %s\n", type,
|
||||
stat->extension);
|
||||
};
|
||||
*rdev = MKDEV(major, minor);
|
||||
} else
|
||||
res |= S_IFREG;
|
||||
|
||||
if (v9fs_proto_dotu(v9ses)) {
|
||||
@ -131,7 +151,6 @@ static int p9mode2unixmode(struct v9fs_session_info *v9ses, int mode)
|
||||
if ((mode & P9_DMSETVTX) == P9_DMSETVTX)
|
||||
res |= S_ISVTX;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -242,13 +261,13 @@ void v9fs_destroy_inode(struct inode *inode)
|
||||
}
|
||||
|
||||
int v9fs_init_inode(struct v9fs_session_info *v9ses,
|
||||
struct inode *inode, int mode)
|
||||
struct inode *inode, int mode, dev_t rdev)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
inode_init_owner(inode, NULL, mode);
|
||||
inode->i_blocks = 0;
|
||||
inode->i_rdev = 0;
|
||||
inode->i_rdev = rdev;
|
||||
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
|
||||
inode->i_mapping->a_ops = &v9fs_addr_operations;
|
||||
|
||||
@ -335,7 +354,7 @@ error:
|
||||
*
|
||||
*/
|
||||
|
||||
struct inode *v9fs_get_inode(struct super_block *sb, int mode)
|
||||
struct inode *v9fs_get_inode(struct super_block *sb, int mode, dev_t rdev)
|
||||
{
|
||||
int err;
|
||||
struct inode *inode;
|
||||
@ -348,7 +367,7 @@ struct inode *v9fs_get_inode(struct super_block *sb, int mode)
|
||||
P9_EPRINTK(KERN_WARNING, "Problem allocating inode\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
err = v9fs_init_inode(v9ses, inode, mode);
|
||||
err = v9fs_init_inode(v9ses, inode, mode, rdev);
|
||||
if (err) {
|
||||
iput(inode);
|
||||
return ERR_PTR(err);
|
||||
@ -435,11 +454,12 @@ void v9fs_evict_inode(struct inode *inode)
|
||||
static int v9fs_test_inode(struct inode *inode, void *data)
|
||||
{
|
||||
int umode;
|
||||
dev_t rdev;
|
||||
struct v9fs_inode *v9inode = V9FS_I(inode);
|
||||
struct p9_wstat *st = (struct p9_wstat *)data;
|
||||
struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode);
|
||||
|
||||
umode = p9mode2unixmode(v9ses, st->mode);
|
||||
umode = p9mode2unixmode(v9ses, st, &rdev);
|
||||
/* don't match inode of different type */
|
||||
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
|
||||
return 0;
|
||||
@ -473,6 +493,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
|
||||
struct p9_wstat *st,
|
||||
int new)
|
||||
{
|
||||
dev_t rdev;
|
||||
int retval, umode;
|
||||
unsigned long i_ino;
|
||||
struct inode *inode;
|
||||
@ -496,8 +517,8 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
|
||||
* later.
|
||||
*/
|
||||
inode->i_ino = i_ino;
|
||||
umode = p9mode2unixmode(v9ses, st->mode);
|
||||
retval = v9fs_init_inode(v9ses, inode, umode);
|
||||
umode = p9mode2unixmode(v9ses, st, &rdev);
|
||||
retval = v9fs_init_inode(v9ses, inode, umode, rdev);
|
||||
if (retval)
|
||||
goto error;
|
||||
|
||||
@ -531,6 +552,19 @@ v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
|
||||
return inode;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_at_to_dotl_flags- convert Linux specific AT flags to
|
||||
* plan 9 AT flag.
|
||||
* @flags: flags to convert
|
||||
*/
|
||||
static int v9fs_at_to_dotl_flags(int flags)
|
||||
{
|
||||
int rflags = 0;
|
||||
if (flags & AT_REMOVEDIR)
|
||||
rflags |= P9_DOTL_AT_REMOVEDIR;
|
||||
return rflags;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_remove - helper function to remove files and directories
|
||||
* @dir: directory inode that is being deleted
|
||||
@ -558,7 +592,8 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
|
||||
return retval;
|
||||
}
|
||||
if (v9fs_proto_dotl(v9ses))
|
||||
retval = p9_client_unlinkat(dfid, dentry->d_name.name, flags);
|
||||
retval = p9_client_unlinkat(dfid, dentry->d_name.name,
|
||||
v9fs_at_to_dotl_flags(flags));
|
||||
if (retval == -EOPNOTSUPP) {
|
||||
/* Try the one based on path */
|
||||
v9fid = v9fs_fid_clone(dentry);
|
||||
@ -645,13 +680,11 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
|
||||
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
|
||||
goto error;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
err = v9fs_fid_add(dentry, fid);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
|
||||
d_instantiate(dentry, inode);
|
||||
return ofid;
|
||||
|
||||
error:
|
||||
if (ofid)
|
||||
p9_client_clunk(ofid);
|
||||
@ -792,6 +825,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
||||
struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
struct nameidata *nameidata)
|
||||
{
|
||||
struct dentry *res;
|
||||
struct super_block *sb;
|
||||
struct v9fs_session_info *v9ses;
|
||||
struct p9_fid *dfid, *fid;
|
||||
@ -823,22 +857,35 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
|
||||
return ERR_PTR(result);
|
||||
}
|
||||
|
||||
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
|
||||
/*
|
||||
* Make sure we don't use a wrong inode due to parallel
|
||||
* unlink. For cached mode create calls request for new
|
||||
* inode. But with cache disabled, lookup should do this.
|
||||
*/
|
||||
if (v9ses->cache)
|
||||
inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
|
||||
else
|
||||
inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb);
|
||||
if (IS_ERR(inode)) {
|
||||
result = PTR_ERR(inode);
|
||||
inode = NULL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
result = v9fs_fid_add(dentry, fid);
|
||||
if (result < 0)
|
||||
goto error_iput;
|
||||
|
||||
inst_out:
|
||||
d_add(dentry, inode);
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we had a rename on the server and a parallel lookup
|
||||
* for the new name, then make sure we instantiate with
|
||||
* the new name. ie look up for a/b, while on server somebody
|
||||
* moved b under k and client parallely did a lookup for
|
||||
* k/b.
|
||||
*/
|
||||
res = d_materialise_unique(dentry, inode);
|
||||
if (!IS_ERR(res))
|
||||
return res;
|
||||
result = PTR_ERR(res);
|
||||
error_iput:
|
||||
iput(inode);
|
||||
error:
|
||||
@ -1002,7 +1049,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
|
||||
return PTR_ERR(st);
|
||||
|
||||
v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb);
|
||||
generic_fillattr(dentry->d_inode, stat);
|
||||
generic_fillattr(dentry->d_inode, stat);
|
||||
|
||||
p9stat_free(st);
|
||||
kfree(st);
|
||||
@ -1086,6 +1133,7 @@ void
|
||||
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
||||
struct super_block *sb)
|
||||
{
|
||||
mode_t mode;
|
||||
char ext[32];
|
||||
char tag_name[14];
|
||||
unsigned int i_nlink;
|
||||
@ -1121,31 +1169,9 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
|
||||
inode->i_nlink = i_nlink;
|
||||
}
|
||||
}
|
||||
inode->i_mode = p9mode2unixmode(v9ses, stat->mode);
|
||||
if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) {
|
||||
char type = 0;
|
||||
int major = -1;
|
||||
int minor = -1;
|
||||
|
||||
strncpy(ext, stat->extension, sizeof(ext));
|
||||
sscanf(ext, "%c %u %u", &type, &major, &minor);
|
||||
switch (type) {
|
||||
case 'c':
|
||||
inode->i_mode &= ~S_IFBLK;
|
||||
inode->i_mode |= S_IFCHR;
|
||||
break;
|
||||
case 'b':
|
||||
break;
|
||||
default:
|
||||
P9_DPRINTK(P9_DEBUG_ERROR,
|
||||
"Unknown special type %c %s\n", type,
|
||||
stat->extension);
|
||||
};
|
||||
inode->i_rdev = MKDEV(major, minor);
|
||||
init_special_inode(inode, inode->i_mode, inode->i_rdev);
|
||||
} else
|
||||
inode->i_rdev = 0;
|
||||
|
||||
mode = stat->mode & S_IALLUGO;
|
||||
mode |= inode->i_mode & ~S_IALLUGO;
|
||||
inode->i_mode = mode;
|
||||
i_size_write(inode, stat->length);
|
||||
|
||||
/* not real number of blocks, but 512 byte ones ... */
|
||||
@ -1411,6 +1437,8 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
|
||||
|
||||
int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
|
||||
{
|
||||
int umode;
|
||||
dev_t rdev;
|
||||
loff_t i_size;
|
||||
struct p9_wstat *st;
|
||||
struct v9fs_session_info *v9ses;
|
||||
@ -1419,6 +1447,12 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
|
||||
st = p9_client_stat(fid);
|
||||
if (IS_ERR(st))
|
||||
return PTR_ERR(st);
|
||||
/*
|
||||
* Don't update inode if the file type is different
|
||||
*/
|
||||
umode = p9mode2unixmode(v9ses, st, &rdev);
|
||||
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
|
||||
goto out;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
/*
|
||||
@ -1430,6 +1464,7 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
|
||||
if (v9ses->cache)
|
||||
inode->i_size = i_size;
|
||||
spin_unlock(&inode->i_lock);
|
||||
out:
|
||||
p9stat_free(st);
|
||||
kfree(st);
|
||||
return 0;
|
||||
|
@ -153,7 +153,8 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
|
||||
* later.
|
||||
*/
|
||||
inode->i_ino = i_ino;
|
||||
retval = v9fs_init_inode(v9ses, inode, st->st_mode);
|
||||
retval = v9fs_init_inode(v9ses, inode,
|
||||
st->st_mode, new_decode_dev(st->st_rdev));
|
||||
if (retval)
|
||||
goto error;
|
||||
|
||||
@ -190,6 +191,58 @@ v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
|
||||
return inode;
|
||||
}
|
||||
|
||||
struct dotl_openflag_map {
|
||||
int open_flag;
|
||||
int dotl_flag;
|
||||
};
|
||||
|
||||
static int v9fs_mapped_dotl_flags(int flags)
|
||||
{
|
||||
int i;
|
||||
int rflags = 0;
|
||||
struct dotl_openflag_map dotl_oflag_map[] = {
|
||||
{ O_CREAT, P9_DOTL_CREATE },
|
||||
{ O_EXCL, P9_DOTL_EXCL },
|
||||
{ O_NOCTTY, P9_DOTL_NOCTTY },
|
||||
{ O_TRUNC, P9_DOTL_TRUNC },
|
||||
{ O_APPEND, P9_DOTL_APPEND },
|
||||
{ O_NONBLOCK, P9_DOTL_NONBLOCK },
|
||||
{ O_DSYNC, P9_DOTL_DSYNC },
|
||||
{ FASYNC, P9_DOTL_FASYNC },
|
||||
{ O_DIRECT, P9_DOTL_DIRECT },
|
||||
{ O_LARGEFILE, P9_DOTL_LARGEFILE },
|
||||
{ O_DIRECTORY, P9_DOTL_DIRECTORY },
|
||||
{ O_NOFOLLOW, P9_DOTL_NOFOLLOW },
|
||||
{ O_NOATIME, P9_DOTL_NOATIME },
|
||||
{ O_CLOEXEC, P9_DOTL_CLOEXEC },
|
||||
{ O_SYNC, P9_DOTL_SYNC},
|
||||
};
|
||||
for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) {
|
||||
if (flags & dotl_oflag_map[i].open_flag)
|
||||
rflags |= dotl_oflag_map[i].dotl_flag;
|
||||
}
|
||||
return rflags;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_open_to_dotl_flags- convert Linux specific open flags to
|
||||
* plan 9 open flag.
|
||||
* @flags: flags to convert
|
||||
*/
|
||||
int v9fs_open_to_dotl_flags(int flags)
|
||||
{
|
||||
int rflags = 0;
|
||||
|
||||
/*
|
||||
* We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY
|
||||
* and P9_DOTL_NOACCESS
|
||||
*/
|
||||
rflags |= flags & O_ACCMODE;
|
||||
rflags |= v9fs_mapped_dotl_flags(flags);
|
||||
|
||||
return rflags;
|
||||
}
|
||||
|
||||
/**
|
||||
* v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
|
||||
* @dir: directory inode that is being created
|
||||
@ -258,7 +311,8 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
|
||||
"Failed to get acl values in creat %d\n", err);
|
||||
goto error;
|
||||
}
|
||||
err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
|
||||
err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags),
|
||||
mode, gid, &qid);
|
||||
if (err < 0) {
|
||||
P9_DPRINTK(P9_DEBUG_VFS,
|
||||
"p9_client_open_dotl failed in creat %d\n",
|
||||
@ -281,10 +335,10 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
|
||||
P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
|
||||
goto error;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
err = v9fs_fid_add(dentry, fid);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
d_instantiate(dentry, inode);
|
||||
|
||||
/* Now set the ACL based on the default value */
|
||||
v9fs_set_create_acl(dentry, &dacl, &pacl);
|
||||
@ -403,10 +457,10 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
|
||||
err);
|
||||
goto error;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
err = v9fs_fid_add(dentry, fid);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
d_instantiate(dentry, inode);
|
||||
fid = NULL;
|
||||
} else {
|
||||
/*
|
||||
@ -414,7 +468,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
|
||||
* inode with stat. We need to get an inode
|
||||
* so that we can set the acl with dentry
|
||||
*/
|
||||
inode = v9fs_get_inode(dir->i_sb, mode);
|
||||
inode = v9fs_get_inode(dir->i_sb, mode, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto error;
|
||||
@ -540,6 +594,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
|
||||
void
|
||||
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
|
||||
{
|
||||
mode_t mode;
|
||||
struct v9fs_inode *v9inode = V9FS_I(inode);
|
||||
|
||||
if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
|
||||
@ -552,11 +607,10 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
|
||||
inode->i_uid = stat->st_uid;
|
||||
inode->i_gid = stat->st_gid;
|
||||
inode->i_nlink = stat->st_nlink;
|
||||
inode->i_mode = stat->st_mode;
|
||||
inode->i_rdev = new_decode_dev(stat->st_rdev);
|
||||
|
||||
if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
|
||||
init_special_inode(inode, inode->i_mode, inode->i_rdev);
|
||||
mode = stat->st_mode & S_IALLUGO;
|
||||
mode |= inode->i_mode & ~S_IALLUGO;
|
||||
inode->i_mode = mode;
|
||||
|
||||
i_size_write(inode, stat->st_size);
|
||||
inode->i_blocks = stat->st_blocks;
|
||||
@ -657,14 +711,14 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
|
||||
err);
|
||||
goto error;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
err = v9fs_fid_add(dentry, fid);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
d_instantiate(dentry, inode);
|
||||
fid = NULL;
|
||||
} else {
|
||||
/* Not in cached mode. No need to populate inode with stat */
|
||||
inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
|
||||
inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto error;
|
||||
@ -810,17 +864,17 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
|
||||
err);
|
||||
goto error;
|
||||
}
|
||||
d_instantiate(dentry, inode);
|
||||
err = v9fs_fid_add(dentry, fid);
|
||||
if (err < 0)
|
||||
goto error;
|
||||
d_instantiate(dentry, inode);
|
||||
fid = NULL;
|
||||
} else {
|
||||
/*
|
||||
* Not in cached mode. No need to populate inode with stat.
|
||||
* socket syscall returns a fd, so we need instantiate
|
||||
*/
|
||||
inode = v9fs_get_inode(dir->i_sb, mode);
|
||||
inode = v9fs_get_inode(dir->i_sb, mode, rdev);
|
||||
if (IS_ERR(inode)) {
|
||||
err = PTR_ERR(inode);
|
||||
goto error;
|
||||
@ -886,6 +940,11 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
|
||||
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
|
||||
if (IS_ERR(st))
|
||||
return PTR_ERR(st);
|
||||
/*
|
||||
* Don't update inode if the file type is different
|
||||
*/
|
||||
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
|
||||
goto out;
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
/*
|
||||
@ -897,6 +956,7 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
|
||||
if (v9ses->cache)
|
||||
inode->i_size = i_size;
|
||||
spin_unlock(&inode->i_lock);
|
||||
out:
|
||||
kfree(st);
|
||||
return 0;
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
|
||||
else
|
||||
sb->s_d_op = &v9fs_dentry_operations;
|
||||
|
||||
inode = v9fs_get_inode(sb, S_IFDIR | mode);
|
||||
inode = v9fs_get_inode(sb, S_IFDIR | mode, 0);
|
||||
if (IS_ERR(inode)) {
|
||||
retval = PTR_ERR(inode);
|
||||
goto release_sb;
|
||||
|
@ -1429,6 +1429,11 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
WARN_ON_ONCE(bdev->bd_holders);
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
/* ->release can cause the old bdi to disappear,
|
||||
* so must switch it out first
|
||||
*/
|
||||
bdev_inode_switch_bdi(bdev->bd_inode,
|
||||
&default_backing_dev_info);
|
||||
}
|
||||
if (bdev->bd_contains == bdev) {
|
||||
if (disk->fops->release)
|
||||
@ -1442,8 +1447,6 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
disk_put_part(bdev->bd_part);
|
||||
bdev->bd_part = NULL;
|
||||
bdev->bd_disk = NULL;
|
||||
bdev_inode_switch_bdi(bdev->bd_inode,
|
||||
&default_backing_dev_info);
|
||||
if (bdev != bdev->bd_contains)
|
||||
victim = bdev->bd_contains;
|
||||
bdev->bd_contains = NULL;
|
||||
|
@ -1595,7 +1595,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
|
||||
r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
|
||||
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
|
||||
*ppath);
|
||||
} else if (rpath) {
|
||||
} else if (rpath || rino) {
|
||||
*ino = rino;
|
||||
*ppath = rpath;
|
||||
*pathlen = strlen(rpath);
|
||||
|
@ -813,8 +813,8 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
|
||||
fsc = create_fs_client(fsopt, opt);
|
||||
if (IS_ERR(fsc)) {
|
||||
res = ERR_CAST(fsc);
|
||||
kfree(fsopt);
|
||||
kfree(opt);
|
||||
destroy_mount_options(fsopt);
|
||||
ceph_destroy_options(opt);
|
||||
goto out_final;
|
||||
}
|
||||
|
||||
|
33
fs/namei.c
33
fs/namei.c
@ -727,25 +727,22 @@ static int follow_automount(struct path *path, unsigned flags,
|
||||
if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT))
|
||||
return -EISDIR; /* we actually want to stop here */
|
||||
|
||||
/*
|
||||
* We don't want to mount if someone's just doing a stat and they've
|
||||
* set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and
|
||||
* appended a '/' to the name.
|
||||
/* We don't want to mount if someone's just doing a stat -
|
||||
* unless they're stat'ing a directory and appended a '/' to
|
||||
* the name.
|
||||
*
|
||||
* We do, however, want to mount if someone wants to open or
|
||||
* create a file of any type under the mountpoint, wants to
|
||||
* traverse through the mountpoint or wants to open the
|
||||
* mounted directory. Also, autofs may mark negative dentries
|
||||
* as being automount points. These will need the attentions
|
||||
* of the daemon to instantiate them before they can be used.
|
||||
*/
|
||||
if (!(flags & LOOKUP_FOLLOW)) {
|
||||
/* We do, however, want to mount if someone wants to open or
|
||||
* create a file of any type under the mountpoint, wants to
|
||||
* traverse through the mountpoint or wants to open the mounted
|
||||
* directory.
|
||||
* Also, autofs may mark negative dentries as being automount
|
||||
* points. These will need the attentions of the daemon to
|
||||
* instantiate them before they can be used.
|
||||
*/
|
||||
if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
|
||||
LOOKUP_OPEN | LOOKUP_CREATE)) &&
|
||||
path->dentry->d_inode)
|
||||
return -EISDIR;
|
||||
}
|
||||
if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
|
||||
LOOKUP_OPEN | LOOKUP_CREATE)) &&
|
||||
path->dentry->d_inode)
|
||||
return -EISDIR;
|
||||
|
||||
current->total_link_count++;
|
||||
if (current->total_link_count >= 40)
|
||||
return -ELOOP;
|
||||
|
@ -335,9 +335,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
|
||||
#define DBGKEY(key) ((char *)(key))
|
||||
#define DBGKEY1(key) ((char *)(key))
|
||||
|
||||
#define ubifs_dbg_msg(fmt, ...) do { \
|
||||
if (0) \
|
||||
pr_debug(fmt "\n", ##__VA_ARGS__); \
|
||||
#define ubifs_dbg_msg(fmt, ...) do { \
|
||||
if (0) \
|
||||
printk(KERN_DEBUG fmt "\n", ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define dbg_dump_stack()
|
||||
|
@ -944,8 +944,10 @@ extern void perf_pmu_unregister(struct pmu *pmu);
|
||||
|
||||
extern int perf_num_counters(void);
|
||||
extern const char *perf_pmu_name(void);
|
||||
extern void __perf_event_task_sched_in(struct task_struct *task);
|
||||
extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
|
||||
extern void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task);
|
||||
extern void __perf_event_task_sched_out(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
extern int perf_event_init_task(struct task_struct *child);
|
||||
extern void perf_event_exit_task(struct task_struct *child);
|
||||
extern void perf_event_free_task(struct task_struct *task);
|
||||
@ -1059,17 +1061,20 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
|
||||
|
||||
extern struct jump_label_key perf_sched_events;
|
||||
|
||||
static inline void perf_event_task_sched_in(struct task_struct *task)
|
||||
static inline void perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
if (static_branch(&perf_sched_events))
|
||||
__perf_event_task_sched_in(task);
|
||||
__perf_event_task_sched_in(prev, task);
|
||||
}
|
||||
|
||||
static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
|
||||
static inline void perf_event_task_sched_out(struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
|
||||
|
||||
__perf_event_task_sched_out(task, next);
|
||||
if (static_branch(&perf_sched_events))
|
||||
__perf_event_task_sched_out(prev, next);
|
||||
}
|
||||
|
||||
extern void perf_event_mmap(struct vm_area_struct *vma);
|
||||
@ -1139,10 +1144,11 @@ extern void perf_event_disable(struct perf_event *event);
|
||||
extern void perf_event_task_tick(void);
|
||||
#else
|
||||
static inline void
|
||||
perf_event_task_sched_in(struct task_struct *task) { }
|
||||
perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task) { }
|
||||
static inline void
|
||||
perf_event_task_sched_out(struct task_struct *task,
|
||||
struct task_struct *next) { }
|
||||
perf_event_task_sched_out(struct task_struct *prev,
|
||||
struct task_struct *next) { }
|
||||
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
|
||||
static inline void perf_event_exit_task(struct task_struct *child) { }
|
||||
static inline void perf_event_free_task(struct task_struct *task) { }
|
||||
|
@ -123,7 +123,7 @@ struct regulator_bulk_data {
|
||||
const char *supply;
|
||||
struct regulator *consumer;
|
||||
|
||||
/* Internal use */
|
||||
/* private: Internal use */
|
||||
int ret;
|
||||
};
|
||||
|
||||
|
@ -288,6 +288,35 @@ enum p9_perm_t {
|
||||
P9_DMSETVTX = 0x00010000,
|
||||
};
|
||||
|
||||
/* 9p2000.L open flags */
|
||||
#define P9_DOTL_RDONLY 00000000
|
||||
#define P9_DOTL_WRONLY 00000001
|
||||
#define P9_DOTL_RDWR 00000002
|
||||
#define P9_DOTL_NOACCESS 00000003
|
||||
#define P9_DOTL_CREATE 00000100
|
||||
#define P9_DOTL_EXCL 00000200
|
||||
#define P9_DOTL_NOCTTY 00000400
|
||||
#define P9_DOTL_TRUNC 00001000
|
||||
#define P9_DOTL_APPEND 00002000
|
||||
#define P9_DOTL_NONBLOCK 00004000
|
||||
#define P9_DOTL_DSYNC 00010000
|
||||
#define P9_DOTL_FASYNC 00020000
|
||||
#define P9_DOTL_DIRECT 00040000
|
||||
#define P9_DOTL_LARGEFILE 00100000
|
||||
#define P9_DOTL_DIRECTORY 00200000
|
||||
#define P9_DOTL_NOFOLLOW 00400000
|
||||
#define P9_DOTL_NOATIME 01000000
|
||||
#define P9_DOTL_CLOEXEC 02000000
|
||||
#define P9_DOTL_SYNC 04000000
|
||||
|
||||
/* 9p2000.L at flags */
|
||||
#define P9_DOTL_AT_REMOVEDIR 0x200
|
||||
|
||||
/* 9p2000.L lock type */
|
||||
#define P9_LOCK_TYPE_RDLCK 0
|
||||
#define P9_LOCK_TYPE_WRLCK 1
|
||||
#define P9_LOCK_TYPE_UNLCK 2
|
||||
|
||||
/**
|
||||
* enum p9_qid_t - QID types
|
||||
* @P9_QTDIR: directory
|
||||
|
@ -1744,6 +1744,8 @@ struct wiphy_wowlan_support {
|
||||
* by default for perm_addr. In this case, the mask should be set to
|
||||
* all-zeroes. In this case it is assumed that the device can handle
|
||||
* the same number of arbitrary MAC addresses.
|
||||
* @registered: protects ->resume and ->suspend sysfs callbacks against
|
||||
* unregister hardware
|
||||
* @debugfsdir: debugfs directory used for this wiphy, will be renamed
|
||||
* automatically on wiphy renames
|
||||
* @dev: (virtual) struct device for this wiphy
|
||||
|
@ -399,14 +399,54 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/*
|
||||
* next is NULL when called from perf_event_enable_on_exec()
|
||||
* that will systematically cause a cgroup_switch()
|
||||
*/
|
||||
if (next)
|
||||
cgrp2 = perf_cgroup_from_task(next);
|
||||
|
||||
/*
|
||||
* only schedule out current cgroup events if we know
|
||||
* that we are switching to a different cgroup. Otherwise,
|
||||
* do no touch the cgroup events.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
struct perf_cgroup *cgrp1;
|
||||
struct perf_cgroup *cgrp2 = NULL;
|
||||
|
||||
/*
|
||||
* we come here when we know perf_cgroup_events > 0
|
||||
*/
|
||||
cgrp1 = perf_cgroup_from_task(task);
|
||||
|
||||
/* prev can never be NULL */
|
||||
cgrp2 = perf_cgroup_from_task(prev);
|
||||
|
||||
/*
|
||||
* only need to schedule in cgroup events if we are changing
|
||||
* cgroup during ctxsw. Cgroup events were not scheduled
|
||||
* out of ctxsw out if that was not the case.
|
||||
*/
|
||||
if (cgrp1 != cgrp2)
|
||||
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
@ -518,11 +558,13 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_out(struct task_struct *task,
|
||||
struct task_struct *next)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *task)
|
||||
static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1988,7 +2030,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_out(task);
|
||||
perf_cgroup_sched_out(task, next);
|
||||
}
|
||||
|
||||
static void task_ctx_sched_out(struct perf_event_context *ctx)
|
||||
@ -2153,7 +2195,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
|
||||
* accessing the event control register. If a NMI hits, then it will
|
||||
* keep the event running.
|
||||
*/
|
||||
void __perf_event_task_sched_in(struct task_struct *task)
|
||||
void __perf_event_task_sched_in(struct task_struct *prev,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct perf_event_context *ctx;
|
||||
int ctxn;
|
||||
@ -2171,7 +2214,7 @@ void __perf_event_task_sched_in(struct task_struct *task)
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
|
||||
perf_cgroup_sched_in(task);
|
||||
perf_cgroup_sched_in(prev, task);
|
||||
}
|
||||
|
||||
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
@ -2427,7 +2470,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
|
||||
* ctxswin cgroup events which are already scheduled
|
||||
* in.
|
||||
*/
|
||||
perf_cgroup_sched_out(current);
|
||||
perf_cgroup_sched_out(current, NULL);
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
task_ctx_sched_out(ctx);
|
||||
@ -3353,8 +3396,8 @@ static int perf_event_index(struct perf_event *event)
|
||||
}
|
||||
|
||||
static void calc_timer_values(struct perf_event *event,
|
||||
u64 *running,
|
||||
u64 *enabled)
|
||||
u64 *enabled,
|
||||
u64 *running)
|
||||
{
|
||||
u64 now, ctx_time;
|
||||
|
||||
|
@ -3065,7 +3065,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_disable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
perf_event_task_sched_in(current);
|
||||
perf_event_task_sched_in(prev, current);
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
local_irq_enable();
|
||||
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
|
||||
@ -4279,9 +4279,9 @@ pick_next_task(struct rq *rq)
|
||||
}
|
||||
|
||||
/*
|
||||
* schedule() is the main scheduler function.
|
||||
* __schedule() is the main scheduler function.
|
||||
*/
|
||||
asmlinkage void __sched schedule(void)
|
||||
static void __sched __schedule(void)
|
||||
{
|
||||
struct task_struct *prev, *next;
|
||||
unsigned long *switch_count;
|
||||
@ -4322,16 +4322,6 @@ need_resched:
|
||||
if (to_wakeup)
|
||||
try_to_wake_up_local(to_wakeup);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are going to sleep and we have plugged IO
|
||||
* queued, make sure to submit it to avoid deadlocks.
|
||||
*/
|
||||
if (blk_needs_flush_plug(prev)) {
|
||||
raw_spin_unlock(&rq->lock);
|
||||
blk_schedule_flush_plug(prev);
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
}
|
||||
switch_count = &prev->nvcsw;
|
||||
}
|
||||
@ -4369,6 +4359,26 @@ need_resched:
|
||||
if (need_resched())
|
||||
goto need_resched;
|
||||
}
|
||||
|
||||
static inline void sched_submit_work(struct task_struct *tsk)
|
||||
{
|
||||
if (!tsk->state)
|
||||
return;
|
||||
/*
|
||||
* If we are going to sleep and we have plugged IO queued,
|
||||
* make sure to submit it to avoid deadlocks.
|
||||
*/
|
||||
if (blk_needs_flush_plug(tsk))
|
||||
blk_schedule_flush_plug(tsk);
|
||||
}
|
||||
|
||||
asmlinkage void schedule(void)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
sched_submit_work(tsk);
|
||||
__schedule();
|
||||
}
|
||||
EXPORT_SYMBOL(schedule);
|
||||
|
||||
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
|
||||
@ -4435,7 +4445,7 @@ asmlinkage void __sched notrace preempt_schedule(void)
|
||||
|
||||
do {
|
||||
add_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
__schedule();
|
||||
sub_preempt_count_notrace(PREEMPT_ACTIVE);
|
||||
|
||||
/*
|
||||
@ -4463,7 +4473,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
|
||||
do {
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
__schedule();
|
||||
local_irq_disable();
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
|
||||
@ -5588,7 +5598,7 @@ static inline int should_resched(void)
|
||||
static void __cond_resched(void)
|
||||
{
|
||||
add_preempt_count(PREEMPT_ACTIVE);
|
||||
schedule();
|
||||
__schedule();
|
||||
sub_preempt_count(PREEMPT_ACTIVE);
|
||||
}
|
||||
|
||||
@ -7443,6 +7453,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
||||
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
||||
if (sd && (sd->flags & SD_OVERLAP))
|
||||
free_sched_groups(sd->groups, 0);
|
||||
kfree(*per_cpu_ptr(sdd->sd, j));
|
||||
kfree(*per_cpu_ptr(sdd->sg, j));
|
||||
kfree(*per_cpu_ptr(sdd->sgp, j));
|
||||
}
|
||||
|
@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer)
|
||||
static void alarm_timer_get(struct k_itimer *timr,
|
||||
struct itimerspec *cur_setting)
|
||||
{
|
||||
memset(cur_setting, 0, sizeof(struct itimerspec));
|
||||
|
||||
cur_setting->it_interval =
|
||||
ktime_to_timespec(timr->it.alarmtimer.period);
|
||||
cur_setting->it_value =
|
||||
@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
|
||||
if (!rtcdev)
|
||||
return -ENOTSUPP;
|
||||
|
||||
/* Save old values */
|
||||
old_setting->it_interval =
|
||||
ktime_to_timespec(timr->it.alarmtimer.period);
|
||||
old_setting->it_value =
|
||||
ktime_to_timespec(timr->it.alarmtimer.node.expires);
|
||||
/*
|
||||
* XXX HACK! Currently we can DOS a system if the interval
|
||||
* period on alarmtimers is too small. Cap the interval here
|
||||
* to 100us and solve this properly in a future patch! -jstultz
|
||||
*/
|
||||
if ((new_setting->it_interval.tv_sec == 0) &&
|
||||
(new_setting->it_interval.tv_nsec < 100000))
|
||||
new_setting->it_interval.tv_nsec = 100000;
|
||||
|
||||
if (old_setting)
|
||||
alarm_timer_get(timr, old_setting);
|
||||
|
||||
/* If the timer was already set, cancel it */
|
||||
alarm_cancel(&timr->it.alarmtimer);
|
||||
|
@ -263,7 +263,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
|
||||
{
|
||||
int in, out, inp, outp;
|
||||
struct virtio_chan *chan = client->trans;
|
||||
char *rdata = (char *)req->rc+sizeof(struct p9_fcall);
|
||||
unsigned long flags;
|
||||
size_t pdata_off = 0;
|
||||
struct trans_rpage_info *rpinfo = NULL;
|
||||
@ -346,7 +345,8 @@ req_retry_pinned:
|
||||
* Arrange in such a way that server places header in the
|
||||
* alloced memory and payload onto the user buffer.
|
||||
*/
|
||||
inp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 11);
|
||||
inp = pack_sg_list(chan->sg, out,
|
||||
VIRTQUEUE_NUM, req->rc->sdata, 11);
|
||||
/*
|
||||
* Running executables in the filesystem may result in
|
||||
* a read request with kernel buffer as opposed to user buffer.
|
||||
@ -366,8 +366,8 @@ req_retry_pinned:
|
||||
}
|
||||
in += inp;
|
||||
} else {
|
||||
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
|
||||
req->rc->capacity);
|
||||
in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM,
|
||||
req->rc->sdata, req->rc->capacity);
|
||||
}
|
||||
|
||||
err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
|
||||
@ -592,7 +592,14 @@ static struct p9_trans_module p9_virtio_trans = {
|
||||
.close = p9_virtio_close,
|
||||
.request = p9_virtio_request,
|
||||
.cancel = p9_virtio_cancel,
|
||||
.maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
|
||||
|
||||
/*
|
||||
* We leave one entry for input and one entry for response
|
||||
* headers. We also skip one more entry to accomodate, address
|
||||
* that are not at page boundary, that can result in an extra
|
||||
* page in zero copy.
|
||||
*/
|
||||
.maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
|
||||
.pref = P9_TRANS_PREF_PAYLOAD_SEP,
|
||||
.def = 0,
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -7,27 +7,37 @@
|
||||
|
||||
#include <linux/ceph/msgpool.h>
|
||||
|
||||
static void *alloc_fn(gfp_t gfp_mask, void *arg)
|
||||
static void *msgpool_alloc(gfp_t gfp_mask, void *arg)
|
||||
{
|
||||
struct ceph_msgpool *pool = arg;
|
||||
void *p;
|
||||
struct ceph_msg *msg;
|
||||
|
||||
p = ceph_msg_new(0, pool->front_len, gfp_mask);
|
||||
if (!p)
|
||||
pr_err("msgpool %s alloc failed\n", pool->name);
|
||||
return p;
|
||||
msg = ceph_msg_new(0, pool->front_len, gfp_mask);
|
||||
if (!msg) {
|
||||
dout("msgpool_alloc %s failed\n", pool->name);
|
||||
} else {
|
||||
dout("msgpool_alloc %s %p\n", pool->name, msg);
|
||||
msg->pool = pool;
|
||||
}
|
||||
return msg;
|
||||
}
|
||||
|
||||
static void free_fn(void *element, void *arg)
|
||||
static void msgpool_free(void *element, void *arg)
|
||||
{
|
||||
ceph_msg_put(element);
|
||||
struct ceph_msgpool *pool = arg;
|
||||
struct ceph_msg *msg = element;
|
||||
|
||||
dout("msgpool_release %s %p\n", pool->name, msg);
|
||||
msg->pool = NULL;
|
||||
ceph_msg_put(msg);
|
||||
}
|
||||
|
||||
int ceph_msgpool_init(struct ceph_msgpool *pool,
|
||||
int front_len, int size, bool blocking, const char *name)
|
||||
{
|
||||
dout("msgpool %s init\n", name);
|
||||
pool->front_len = front_len;
|
||||
pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
|
||||
pool->pool = mempool_create(size, msgpool_alloc, msgpool_free, pool);
|
||||
if (!pool->pool)
|
||||
return -ENOMEM;
|
||||
pool->name = name;
|
||||
@ -36,14 +46,17 @@ int ceph_msgpool_init(struct ceph_msgpool *pool,
|
||||
|
||||
void ceph_msgpool_destroy(struct ceph_msgpool *pool)
|
||||
{
|
||||
dout("msgpool %s destroy\n", pool->name);
|
||||
mempool_destroy(pool->pool);
|
||||
}
|
||||
|
||||
struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
|
||||
int front_len)
|
||||
{
|
||||
struct ceph_msg *msg;
|
||||
|
||||
if (front_len > pool->front_len) {
|
||||
pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
|
||||
dout("msgpool_get %s need front %d, pool size is %d\n",
|
||||
pool->name, front_len, pool->front_len);
|
||||
WARN_ON(1);
|
||||
|
||||
@ -51,14 +64,19 @@ struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
|
||||
return ceph_msg_new(0, front_len, GFP_NOFS);
|
||||
}
|
||||
|
||||
return mempool_alloc(pool->pool, GFP_NOFS);
|
||||
msg = mempool_alloc(pool->pool, GFP_NOFS);
|
||||
dout("msgpool_get %s %p\n", pool->name, msg);
|
||||
return msg;
|
||||
}
|
||||
|
||||
void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
|
||||
{
|
||||
dout("msgpool_put %s %p\n", pool->name, msg);
|
||||
|
||||
/* reset msg front_len; user may have changed it */
|
||||
msg->front.iov_len = pool->front_len;
|
||||
msg->hdr.front_len = cpu_to_le32(pool->front_len);
|
||||
|
||||
kref_init(&msg->kref); /* retake single ref */
|
||||
mempool_free(msg, pool->pool);
|
||||
}
|
||||
|
@ -685,6 +685,18 @@ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
|
||||
put_osd(osd);
|
||||
}
|
||||
|
||||
static void remove_all_osds(struct ceph_osd_client *osdc)
|
||||
{
|
||||
dout("__remove_old_osds %p\n", osdc);
|
||||
mutex_lock(&osdc->request_mutex);
|
||||
while (!RB_EMPTY_ROOT(&osdc->osds)) {
|
||||
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
|
||||
struct ceph_osd, o_node);
|
||||
__remove_osd(osdc, osd);
|
||||
}
|
||||
mutex_unlock(&osdc->request_mutex);
|
||||
}
|
||||
|
||||
static void __move_osd_to_lru(struct ceph_osd_client *osdc,
|
||||
struct ceph_osd *osd)
|
||||
{
|
||||
@ -701,14 +713,14 @@ static void __remove_osd_from_lru(struct ceph_osd *osd)
|
||||
list_del_init(&osd->o_osd_lru);
|
||||
}
|
||||
|
||||
static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
|
||||
static void remove_old_osds(struct ceph_osd_client *osdc)
|
||||
{
|
||||
struct ceph_osd *osd, *nosd;
|
||||
|
||||
dout("__remove_old_osds %p\n", osdc);
|
||||
mutex_lock(&osdc->request_mutex);
|
||||
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
|
||||
if (!remove_all && time_before(jiffies, osd->lru_ttl))
|
||||
if (time_before(jiffies, osd->lru_ttl))
|
||||
break;
|
||||
__remove_osd(osdc, osd);
|
||||
}
|
||||
@ -751,6 +763,7 @@ static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
|
||||
struct rb_node *parent = NULL;
|
||||
struct ceph_osd *osd = NULL;
|
||||
|
||||
dout("__insert_osd %p osd%d\n", new, new->o_osd);
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
osd = rb_entry(parent, struct ceph_osd, o_node);
|
||||
@ -1144,7 +1157,7 @@ static void handle_osds_timeout(struct work_struct *work)
|
||||
|
||||
dout("osds timeout\n");
|
||||
down_read(&osdc->map_sem);
|
||||
remove_old_osds(osdc, 0);
|
||||
remove_old_osds(osdc);
|
||||
up_read(&osdc->map_sem);
|
||||
|
||||
schedule_delayed_work(&osdc->osds_timeout_work,
|
||||
@ -1862,8 +1875,7 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
|
||||
ceph_osdmap_destroy(osdc->osdmap);
|
||||
osdc->osdmap = NULL;
|
||||
}
|
||||
remove_old_osds(osdc, 1);
|
||||
WARN_ON(!RB_EMPTY_ROOT(&osdc->osds));
|
||||
remove_all_osds(osdc);
|
||||
mempool_destroy(osdc->req_mempool);
|
||||
ceph_msgpool_destroy(&osdc->msgpool_op);
|
||||
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
|
||||
|
Loading…
Reference in New Issue
Block a user