Linux 4.0-rc7

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJVIws/AAoJEHm+PkMAQRiGwEcH/1GCBqrBzXaKwDdCPMRcYVUb
 MYkXmGkCGRYWe5MXI8QNAaa/CdG6mAFMHWN6CaMMpLTxnM1m87uBg01fQMsh73BO
 mRVLKE/soiJDnR1gYzBBDBYV/AUvytN5PhgeNaA95YIJvU3T1f3iTnV8vs30Dp0L
 YpxSqwr3C0k7C9IE0VcgfzvWJPCnQ9IWHuX3jn5s1XjGKVNbBYHMt6FusHdyXMfT
 dp8ksuGHwm30mTFI5xJpKOrRzfi+P5EsEUrsnFRPRM/iFTVrM5R7eaUhsRZb2+Wo
 YApnbYhUYz7om1AuQ+UZ/+S6y7ZLlGWegI1lWI754GIsczG5vPHEYhhgkzMhTsc=
 =kR1V
 -----END PGP SIGNATURE-----

Merge tag 'v4.0-rc7' into drm-next

Linux 4.0-rc7

Requested by Alex for fixes -next needs.

Conflicts:
	drivers/gpu/drm/i915/intel_sprite.c
This commit is contained in:
Dave Airlie 2015-04-09 07:48:27 +10:00
commit 1d8ac08d49
176 changed files with 1510 additions and 723 deletions

View File

@ -19,7 +19,9 @@ the parent DSA node. The maximum number of allowed child nodes is 4
(DSA_MAX_SWITCHES).
Each of these switch child nodes should have the following required properties:
- reg : Describes the switch address on the MII bus
- reg : Contains two fields. The first one describes the
address on the MII bus. The second is the switch
number that must be unique in cascaded configurations
- #address-cells : Must be 1
- #size-cells : Must be 0

View File

@ -114,6 +114,9 @@ ALPS Absolute Mode - Protocol Version 2
byte 4: 0 y6 y5 y4 y3 y2 y1 y0
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
the DualPoint Stick.
Dualpoint device -- interleaved packet format
---------------------------------------------
@ -127,6 +130,11 @@ Dualpoint device -- interleaved packet format
byte 7: 0 y6 y5 y4 y3 y2 y1 y0
byte 8: 0 z6 z5 z4 z3 z2 z1 z0
Devices which use the interleaving format normally send standard PS/2 mouse
packets for the DualPoint Stick + ALPS Absolute Mode packets for the
touchpad, switching to the interleaved packet format when both the stick and
the touchpad are used at the same time.
ALPS Absolute Mode - Protocol Version 3
---------------------------------------

View File

@ -294,6 +294,12 @@ accordingly. This property does not affect kernel behavior.
The kernel does not provide button emulation for such devices but treats
them as any other INPUT_PROP_BUTTONPAD device.
INPUT_PROP_ACCELEROMETER
-------------------------
Directional axes on this device (absolute and/or relative x, y, z) represent
accelerometer data. All other axes retain their meaning. A device must not mix
regular directional axes and accelerometer axes on the same event node.
Guidelines:
==========
The guidelines below ensure proper single-touch and multi-finger functionality.

View File

@ -312,9 +312,12 @@ ABS_MT_TOOL_TYPE
The type of approaching tool. A lot of kernel drivers cannot distinguish
between different tool types, such as a finger or a pen. In such cases, the
event should be omitted. The protocol currently supports MT_TOOL_FINGER and
MT_TOOL_PEN [2]. For type B devices, this event is handled by input core;
drivers should instead use input_mt_report_slot_state().
event should be omitted. The protocol currently supports MT_TOOL_FINGER,
MT_TOOL_PEN, and MT_TOOL_PALM [2]. For type B devices, this event is handled
by input core; drivers should instead use input_mt_report_slot_state().
A contact's ABS_MT_TOOL_TYPE may change over time while still touching the
device, because the firmware may not be able to determine which tool is being
used when it first appears.
ABS_MT_BLOB_ID

View File

@ -637,8 +637,7 @@ F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: amd64-microcode@amd64.org
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/amd*
@ -5094,7 +5093,7 @@ S: Supported
F: drivers/platform/x86/intel_menlow.c
INTEL IA32 MICROCODE UPDATE SUPPORT
M: Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/core*
F: arch/x86/kernel/cpu/microcode/intel*
@ -5135,22 +5134,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
S: Maintained
F: drivers/char/hw_random/ixp4xx-rng.c
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
INTEL ETHERNET DRIVERS
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
M: Matthew Vick <matthew.vick@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
R: Jesse Brandeburg <jesse.brandeburg@intel.com>
R: Shannon Nelson <shannon.nelson@intel.com>
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
R: Don Skidmore <donald.c.skidmore@intel.com>
R: Matthew Vick <matthew.vick@intel.com>
R: John Ronciak <john.ronciak@intel.com>
R: Mitch Williams <mitch.a.williams@intel.com>
L: intel-wired-lan@lists.osuosl.org
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
S: Supported
F: Documentation/networking/e100.txt
F: Documentation/networking/e1000.txt

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -55,7 +55,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
static inline int cpu_nr_cores(void)
{
return NR_CPUS >> threads_shift;
return nr_cpu_ids >> threads_shift;
}
static inline cpumask_t cpu_online_cores_map(void)

View File

@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x08a3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4),
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_EVENT_CONSTRAINT(0x04a3, 0xf),
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
EVENT_CONSTRAINT_END
};
@ -1649,11 +1649,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if (c)
return c;
c = intel_pebs_constraints(event);
c = intel_shared_regs_constraints(cpuc, event);
if (c)
return c;
c = intel_shared_regs_constraints(cpuc, event);
c = intel_pebs_constraints(event);
if (c)
return c;

View File

@ -799,7 +799,21 @@ retint_swapgs: /* return to user-space */
cmpq %r11,(EFLAGS-ARGOFFSET)(%rsp) /* R11 == RFLAGS */
jne opportunistic_sysret_failed
testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
/*
* SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
* restoring TF results in a trap from userspace immediately after
* SYSRET. This would cause an infinite loop whenever #DB happens
* with register state that satisfies the opportunistic SYSRET
* conditions. For example, single-stepping this user code:
*
* movq $stuck_here,%rcx
* pushfq
* popq %r11
* stuck_here:
*
* would never get past 'stuck_here'.
*/
testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
jnz opportunistic_sysret_failed
/* nothing to check for RSP */

View File

@ -72,7 +72,7 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{ "bx", 8, offsetof(struct pt_regs, bx) },
{ "cx", 8, offsetof(struct pt_regs, cx) },
{ "dx", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, dx) },
{ "si", 8, offsetof(struct pt_regs, si) },
{ "di", 8, offsetof(struct pt_regs, di) },
{ "bp", 8, offsetof(struct pt_regs, bp) },
{ "sp", 8, offsetof(struct pt_regs, sp) },

View File

@ -183,6 +183,16 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
},
},
/* ASRock */
{ /* Handle problems with rebooting on ASRock Q1900DC-ITX */
.callback = set_pci_reboot,
.ident = "ASRock Q1900DC-ITX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASRock"),
DMI_MATCH(DMI_BOARD_NAME, "Q1900DC-ITX"),
},
},
/* ASUS */
{ /* Handle problems with rebooting on ASUS P4S800 */
.callback = set_bios_reboot,

View File

@ -91,6 +91,12 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
static DEFINE_SPINLOCK(p2m_update_lock);
static unsigned long *p2m_mid_missing_mfn;
@ -385,9 +391,11 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
void __init xen_vmalloc_p2m_tree(void)
{
static struct vm_struct vm;
unsigned long p2m_limit;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * xen_max_p2m_pfn,
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);

View File

@ -585,7 +585,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm(t->io_opt, b->io_opt);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->cluster &= b->cluster;
t->discard_zeroes_data &= b->discard_zeroes_data;
@ -616,7 +616,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->raid_partial_stripes_expensive);
/* Find lowest common alignment_offset */
t->alignment_offset = lcm(t->alignment_offset, alignment)
t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
% max(t->physical_block_size, t->io_min);
/* Verify that new alignment_offset is on a logical block boundary */
@ -643,7 +643,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
b->max_discard_sectors);
t->discard_granularity = max(t->discard_granularity,
b->discard_granularity);
t->discard_alignment = lcm(t->discard_alignment, alignment) %
t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
t->discard_granularity;
}

View File

@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "INTEL*SSDSC2MH*", NULL, 0, },
{ "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },

View File

@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
bcm2835_dma_desc_free(&c->desc->vd);
c->desc = NULL;
bcm2835_dma_abort(c->chan_base);

View File

@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
}
#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
static int jz4740_dma_probe(struct platform_device *pdev)
{
struct jz4740_dmaengine_chan *chan;
@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
dd->device_config = jz4740_dma_slave_config;
dd->device_terminate_all = jz4740_dma_terminate_all;
dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dd->dev = &pdev->dev;
INIT_LIST_HEAD(&dd->channels);

View File

@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
*/
if (echan->edesc) {
int cyclic = echan->edesc->cyclic;
/*
* free the running request descriptor
* since it is not in any of the vdesc lists
*/
edma_desc_free(&echan->edesc->vdesc);
echan->edesc = NULL;
edma_stop(echan->ch_num);
/* Move the cyclic channel back to default queue */

View File

@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&ch->vc.lock, flags);
if (ch->desc)
if (ch->desc) {
moxart_dma_desc_free(&ch->desc->vd);
ch->desc = NULL;
}
ctrl = readl(ch->base + REG_OFF_CTRL);
ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);

View File

@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
* c->desc is NULL and exit.)
*/
if (c->desc) {
omap_dma_desc_free(&c->desc->vd);
c->desc = NULL;
/* Avoid stopping the dma twice */
if (!c->paused)

View File

@ -86,10 +86,13 @@ static void dmi_table(u8 *buf, u32 len, int num,
int i = 0;
/*
* Stop when we see all the items the table claimed to have
* OR we run off the end of the table (also happens)
* Stop when we have seen all the items the table claimed to have
* (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
* off the end of the table (should never happen but sometimes does
* on bogus implementations.)
*/
while ((i < num) && (data - buf + sizeof(struct dmi_header)) <= len) {
while ((!num || i < num) &&
(data - buf + sizeof(struct dmi_header)) <= len) {
const struct dmi_header *dm = (const struct dmi_header *)data;
/*
@ -529,21 +532,10 @@ static int __init dmi_smbios3_present(const u8 *buf)
if (memcmp(buf, "_SM3_", 5) == 0 &&
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
dmi_ver = get_unaligned_be16(buf + 7);
dmi_num = 0; /* No longer specified */
dmi_len = get_unaligned_le32(buf + 12);
dmi_base = get_unaligned_le64(buf + 16);
/*
* The 64-bit SMBIOS 3.0 entry point no longer has a field
* containing the number of structures present in the table.
* Instead, it defines the table size as a maximum size, and
* relies on the end-of-table structure type (#127) to be used
* to signal the end of the table.
* So let's define dmi_num as an upper bound as well: each
* structure has a 4 byte header, so dmi_len / 4 is an upper
* bound for the number of structures in the table.
*/
dmi_num = dmi_len / 4;
if (dmi_walk_early(dmi_decode) == 0) {
pr_info("SMBIOS %d.%d present.\n",
dmi_ver >> 8, dmi_ver & 0xFF);

View File

@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
.xlate = irq_domain_xlate_twocell,
};
static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
static struct of_device_id mpc8xxx_gpio_ids[] = {
{ .compatible = "fsl,mpc8349-gpio", },
{ .compatible = "fsl,mpc8572-gpio", },
{ .compatible = "fsl,mpc8610-gpio", },

View File

@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
&priv->dir_reg_offset);
if (ret)
dev_err(dev, "can't read the dir register offset!\n");
dev_dbg(dev, "can't read the dir register offset!\n");
priv->dir_reg_offset <<= 3;
}

View File

@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
if (!handler)
return AE_BAD_PARAMETER;
pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
if (pin < 0)
return AE_BAD_PARAMETER;
desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->dev, "Failed to request GPIO\n");
@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_desc *desc;
bool found;
pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
if (pin < 0) {
status = AE_BAD_PARAMETER;
goto out;
}
mutex_lock(&achip->conn_lock);
found = false;

View File

@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
kfree(edid);
return ret;

View File

@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
struct edid *edid = (struct edid *) connector->edid_blob_ptr->data;
count = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
} else
count = (*connector_funcs->get_modes)(connector);
}

View File

@ -147,6 +147,7 @@ struct fimd_win_data {
unsigned int ovl_height;
unsigned int fb_width;
unsigned int fb_height;
unsigned int fb_pitch;
unsigned int bpp;
unsigned int pixel_format;
dma_addr_t dma_addr;
@ -532,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->offset_y = plane->crtc_y;
win_data->ovl_width = plane->crtc_width;
win_data->ovl_height = plane->crtc_height;
win_data->fb_pitch = plane->pitch;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
win_data->dma_addr = plane->dma_addr[0] + offset;
win_data->bpp = plane->bpp;
win_data->pixel_format = plane->pixel_format;
win_data->buf_offsize = (plane->fb_width - plane->crtc_width) *
(plane->bpp >> 3);
win_data->buf_offsize =
plane->pitch - (plane->crtc_width * (plane->bpp >> 3));
win_data->line_size = plane->crtc_width * (plane->bpp >> 3);
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
@ -704,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos)
writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
/* buffer end address */
size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3);
val = (unsigned long)(win_data->dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));

View File

@ -55,6 +55,7 @@ struct hdmi_win_data {
unsigned int fb_x;
unsigned int fb_y;
unsigned int fb_width;
unsigned int fb_pitch;
unsigned int fb_height;
unsigned int src_width;
unsigned int src_height;
@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
} else {
luma_addr[0] = win_data->dma_addr;
chroma_addr[0] = win_data->dma_addr
+ (win_data->fb_width * win_data->fb_height);
+ (win_data->fb_pitch * win_data->fb_height);
}
if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
luma_addr[1] = luma_addr[0] + 0x40;
chroma_addr[1] = chroma_addr[0] + 0x40;
} else {
luma_addr[1] = luma_addr[0] + win_data->fb_width;
chroma_addr[1] = chroma_addr[0] + win_data->fb_width;
luma_addr[1] = luma_addr[0] + win_data->fb_pitch;
chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch;
}
} else {
ctx->interlace = false;
@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) |
vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) |
vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) |
VP_IMG_VSIZE(win_data->fb_height / 2));
vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width);
@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
/* converting dma address base and source offset */
dma_addr = win_data->dma_addr
+ (win_data->fb_x * win_data->bpp >> 3)
+ (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3);
+ (win_data->fb_y * win_data->fb_pitch);
src_x_offset = 0;
src_y_offset = 0;
@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width);
mixer_reg_write(res, MXR_GRAPHIC_SPAN(win),
win_data->fb_pitch / (win_data->bpp >> 3));
/* setup display size */
if (ctx->mxr_ver == MXR_VER_128_0_0_184 &&
@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc,
win_data->fb_y = plane->fb_y;
win_data->fb_width = plane->fb_width;
win_data->fb_height = plane->fb_height;
win_data->fb_pitch = plane->pitch;
win_data->src_width = plane->src_width;
win_data->src_height = plane->src_height;

View File

@ -1113,7 +1113,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
plane = drm_plane_find(dev, set->plane_id);
if (!plane) {
if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) {
ret = -ENOENT;
goto out_unlock;
}

View File

@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
struct radeon_bo *bo;
struct fence *fence;
int r;
bo = container_of(it, struct radeon_bo, mn_it);
@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
continue;
}
fence = reservation_object_get_excl(bo->tbo.resv);
if (fence) {
r = radeon_fence_wait((struct radeon_fence *)fence, false);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true,
false, MAX_SCHEDULE_TIMEOUT);
if (r)
DRM_ERROR("(%d) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);

View File

@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
if (!ttm->sg->sgl)
return;
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);

View File

@ -659,7 +659,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = bma180_get_data_reg(data, bit);
if (ret < 0) {

View File

@ -168,14 +168,14 @@ static const struct {
int val;
int val2;
u8 bw_bits;
} bmc150_accel_samp_freq_table[] = { {7, 810000, 0x08},
{15, 630000, 0x09},
{31, 250000, 0x0A},
{62, 500000, 0x0B},
{125, 0, 0x0C},
{250, 0, 0x0D},
{500, 0, 0x0E},
{1000, 0, 0x0F} };
} bmc150_accel_samp_freq_table[] = { {15, 620000, 0x08},
{31, 260000, 0x09},
{62, 500000, 0x0A},
{125, 0, 0x0B},
{250, 0, 0x0C},
{500, 0, 0x0D},
{1000, 0, 0x0E},
{2000, 0, 0x0F} };
static const struct {
int bw_bits;
@ -840,7 +840,7 @@ static int bmc150_accel_validate_trigger(struct iio_dev *indio_dev,
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"7.810000 15.630000 31.250000 62.500000 125 250 500 1000");
"15.620000 31.260000 62.50000 125 250 500 1000 2000");
static struct attribute *bmc150_accel_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
@ -986,7 +986,7 @@ static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMC150_ACCEL_AXIS_TO_REG(bit));

View File

@ -956,7 +956,7 @@ static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kxcjk1013_get_acc_reg(data, bit);
if (ret < 0) {

View File

@ -137,7 +137,8 @@ config AXP288_ADC
config CC10001_ADC
tristate "Cosmic Circuits 10001 ADC driver"
depends on HAS_IOMEM || HAVE_CLK || REGULATOR
depends on HAVE_CLK || REGULATOR
depends on HAS_IOMEM
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help

View File

@ -544,7 +544,6 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
{
struct iio_dev *idev = iio_trigger_get_drvdata(trig);
struct at91_adc_state *st = iio_priv(idev);
struct iio_buffer *buffer = idev->buffer;
struct at91_adc_reg_desc *reg = st->registers;
u32 status = at91_adc_readl(st, reg->trigger_register);
int value;
@ -564,7 +563,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status | value);
for_each_set_bit(bit, buffer->scan_mask,
for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHER,
@ -579,7 +578,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_writel(st, reg->trigger_register,
status & ~value);
for_each_set_bit(bit, buffer->scan_mask,
for_each_set_bit(bit, idev->active_scan_mask,
st->num_channels) {
struct iio_chan_spec const *chan = idev->channels + bit;
at91_adc_writel(st, AT91_ADC_CHDR,

View File

@ -188,12 +188,11 @@ static int tiadc_buffer_preenable(struct iio_dev *indio_dev)
static int tiadc_buffer_postenable(struct iio_dev *indio_dev)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
struct iio_buffer *buffer = indio_dev->buffer;
unsigned int enb = 0;
u8 bit;
tiadc_step_config(indio_dev);
for_each_set_bit(bit, buffer->scan_mask, adc_dev->channels)
for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels)
enb |= (get_adc_step_bit(adc_dev, bit) << 1);
adc_dev->buffer_en_ch_steps = enb;

View File

@ -141,9 +141,13 @@ struct vf610_adc {
struct regulator *vref;
struct vf610_adc_feature adc_feature;
u32 sample_freq_avail[5];
struct completion completion;
};
static const u32 vf610_hw_avgs[] = { 1, 4, 8, 16, 32 };
#define VF610_ADC_CHAN(_idx, _chan_type) { \
.type = (_chan_type), \
.indexed = 1, \
@ -180,35 +184,47 @@ static const struct iio_chan_spec vf610_adc_iio_channels[] = {
/* sentinel */
};
/*
* ADC sample frequency, unit is ADCK cycles.
* ADC clk source is ipg clock, which is the same as bus clock.
*
* ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
* SFCAdder: fixed to 6 ADCK cycles
* AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
* BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
* LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
*
* By default, enable 12 bit resolution mode, clock source
* set to ipg clock, So get below frequency group:
*/
static const u32 vf610_sample_freq_avail[5] =
{1941176, 559332, 286957, 145374, 73171};
static inline void vf610_adc_calculate_rates(struct vf610_adc *info)
{
unsigned long adck_rate, ipg_rate = clk_get_rate(info->clk);
int i;
/*
* Calculate ADC sample frequencies
* Sample time unit is ADCK cycles. ADCK clk source is ipg clock,
* which is the same as bus clock.
*
* ADC conversion time = SFCAdder + AverageNum x (BCT + LSTAdder)
* SFCAdder: fixed to 6 ADCK cycles
* AverageNum: 1, 4, 8, 16, 32 samples for hardware average.
* BCT (Base Conversion Time): fixed to 25 ADCK cycles for 12 bit mode
* LSTAdder(Long Sample Time): fixed to 3 ADCK cycles
*/
adck_rate = ipg_rate / info->adc_feature.clk_div;
for (i = 0; i < ARRAY_SIZE(vf610_hw_avgs); i++)
info->sample_freq_avail[i] =
adck_rate / (6 + vf610_hw_avgs[i] * (25 + 3));
}
static inline void vf610_adc_cfg_init(struct vf610_adc *info)
{
struct vf610_adc_feature *adc_feature = &info->adc_feature;
/* set default Configuration for ADC controller */
info->adc_feature.clk_sel = VF610_ADCIOC_BUSCLK_SET;
info->adc_feature.vol_ref = VF610_ADCIOC_VR_VREF_SET;
adc_feature->clk_sel = VF610_ADCIOC_BUSCLK_SET;
adc_feature->vol_ref = VF610_ADCIOC_VR_VREF_SET;
info->adc_feature.calibration = true;
info->adc_feature.ovwren = true;
adc_feature->calibration = true;
adc_feature->ovwren = true;
info->adc_feature.clk_div = 1;
info->adc_feature.res_mode = 12;
info->adc_feature.sample_rate = 1;
info->adc_feature.lpm = true;
adc_feature->res_mode = 12;
adc_feature->sample_rate = 1;
adc_feature->lpm = true;
/* Use a save ADCK which is below 20MHz on all devices */
adc_feature->clk_div = 8;
vf610_adc_calculate_rates(info);
}
static void vf610_adc_cfg_post_set(struct vf610_adc *info)
@ -290,12 +306,10 @@ static void vf610_adc_cfg_set(struct vf610_adc *info)
cfg_data = readl(info->regs + VF610_REG_ADC_CFG);
/* low power configuration */
cfg_data &= ~VF610_ADC_ADLPC_EN;
if (adc_feature->lpm)
cfg_data |= VF610_ADC_ADLPC_EN;
/* disable high speed */
cfg_data &= ~VF610_ADC_ADHSC_EN;
writel(cfg_data, info->regs + VF610_REG_ADC_CFG);
@ -435,10 +449,27 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("1941176, 559332, 286957, 145374, 73171");
static ssize_t vf610_show_samp_freq_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vf610_adc *info = iio_priv(dev_to_iio_dev(dev));
size_t len = 0;
int i;
for (i = 0; i < ARRAY_SIZE(info->sample_freq_avail); i++)
len += scnprintf(buf + len, PAGE_SIZE - len,
"%u ", info->sample_freq_avail[i]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
return len;
}
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(vf610_show_samp_freq_avail);
static struct attribute *vf610_attributes[] = {
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@ -502,7 +533,7 @@ static int vf610_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = vf610_sample_freq_avail[info->adc_feature.sample_rate];
*val = info->sample_freq_avail[info->adc_feature.sample_rate];
*val2 = 0;
return IIO_VAL_INT;
@ -525,9 +556,9 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
for (i = 0;
i < ARRAY_SIZE(vf610_sample_freq_avail);
i < ARRAY_SIZE(info->sample_freq_avail);
i++)
if (val == vf610_sample_freq_avail[i]) {
if (val == info->sample_freq_avail[i]) {
info->adc_feature.sample_rate = i;
vf610_adc_sample_set(info);
return 0;

View File

@ -822,7 +822,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p)
int bit, ret, i = 0;
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = i2c_smbus_read_word_data(data->client,
BMG160_AXIS_TO_REG(bit));

View File

@ -60,7 +60,7 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
indio_dev->trig = adis->trig;
indio_dev->trig = iio_trigger_get(adis->trig);
if (ret)
goto error_free_irq;

View File

@ -410,42 +410,46 @@ error_read_raw:
}
}
static int inv_mpu6050_write_fsr(struct inv_mpu6050_state *st, int fsr)
static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
{
int result;
int result, i;
u8 d;
if (fsr < 0 || fsr > INV_MPU6050_MAX_GYRO_FS_PARAM)
return -EINVAL;
if (fsr == st->chip_config.fsr)
return 0;
for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
if (gyro_scale_6050[i] == val) {
d = (i << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st,
st->reg->gyro_config, d);
if (result)
return result;
d = (fsr << INV_MPU6050_GYRO_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st, st->reg->gyro_config, d);
if (result)
return result;
st->chip_config.fsr = fsr;
st->chip_config.fsr = i;
return 0;
}
}
return 0;
return -EINVAL;
}
static int inv_mpu6050_write_accel_fs(struct inv_mpu6050_state *st, int fs)
static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
{
int result;
int result, i;
u8 d;
if (fs < 0 || fs > INV_MPU6050_MAX_ACCL_FS_PARAM)
return -EINVAL;
if (fs == st->chip_config.accl_fs)
return 0;
for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
if (accel_scale[i] == val) {
d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st,
st->reg->accl_config, d);
if (result)
return result;
d = (fs << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
result = inv_mpu6050_write_reg(st, st->reg->accl_config, d);
if (result)
return result;
st->chip_config.accl_fs = fs;
st->chip_config.accl_fs = i;
return 0;
}
}
return 0;
return -EINVAL;
}
static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
@ -471,10 +475,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
result = inv_mpu6050_write_fsr(st, val);
result = inv_mpu6050_write_gyro_scale(st, val2);
break;
case IIO_ACCEL:
result = inv_mpu6050_write_accel_fs(st, val);
result = inv_mpu6050_write_accel_scale(st, val2);
break;
default:
result = -EINVAL;

View File

@ -24,6 +24,16 @@
#include <linux/poll.h>
#include "inv_mpu_iio.h"
static void inv_clear_kfifo(struct inv_mpu6050_state *st)
{
unsigned long flags;
/* take the spin lock sem to avoid interrupt kick in */
spin_lock_irqsave(&st->time_stamp_lock, flags);
kfifo_reset(&st->timestamps);
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
int inv_reset_fifo(struct iio_dev *indio_dev)
{
int result;
@ -50,6 +60,10 @@ int inv_reset_fifo(struct iio_dev *indio_dev)
INV_MPU6050_BIT_FIFO_RST);
if (result)
goto reset_fifo_fail;
/* clear timestamps fifo */
inv_clear_kfifo(st);
/* enable interrupt */
if (st->chip_config.accl_fifo_enable ||
st->chip_config.gyro_fifo_enable) {
@ -83,16 +97,6 @@ reset_fifo_fail:
return result;
}
static void inv_clear_kfifo(struct inv_mpu6050_state *st)
{
unsigned long flags;
/* take the spin lock sem to avoid interrupt kick in */
spin_lock_irqsave(&st->time_stamp_lock, flags);
kfifo_reset(&st->timestamps);
spin_unlock_irqrestore(&st->time_stamp_lock, flags);
}
/**
* inv_mpu6050_irq_handler() - Cache a timestamp at each data ready interrupt.
*/
@ -184,7 +188,6 @@ end_session:
flush_fifo:
/* Flush HW and SW FIFOs. */
inv_reset_fifo(indio_dev);
inv_clear_kfifo(st);
mutex_unlock(&indio_dev->mlock);
iio_trigger_notify_done(indio_dev->trig);

View File

@ -1227,7 +1227,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p)
base = KMX61_MAG_XOUT_L;
mutex_lock(&data->lock);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = kmx61_read_measurement(data, base, bit);
if (ret < 0) {

View File

@ -847,8 +847,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
* @attr_list: List of IIO device attributes
*
* This function frees the memory allocated for each of the IIO device
* attributes in the list. Note: if you want to reuse the list after calling
* this function you have to reinitialize it using INIT_LIST_HEAD().
* attributes in the list.
*/
void iio_free_chan_devattr_list(struct list_head *attr_list)
{
@ -856,6 +855,7 @@ void iio_free_chan_devattr_list(struct list_head *attr_list)
list_for_each_entry_safe(p, n, attr_list, l) {
kfree(p->dev_attr.attr.name);
list_del(&p->l);
kfree(p);
}
}
@ -936,6 +936,7 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
iio_free_chan_devattr_list(&indio_dev->channel_attr_list);
kfree(indio_dev->chan_attr_group.attrs);
indio_dev->chan_attr_group.attrs = NULL;
}
static void iio_dev_release(struct device *device)

View File

@ -500,6 +500,7 @@ int iio_device_register_eventset(struct iio_dev *indio_dev)
error_free_setup_event_lines:
iio_free_chan_devattr_list(&indio_dev->event_interface->dev_attr_list);
kfree(indio_dev->event_interface);
indio_dev->event_interface = NULL;
return ret;
}

View File

@ -494,7 +494,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
mutex_lock(&data->mutex);
for_each_set_bit(bit, indio_dev->buffer->scan_mask,
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = sx9500_read_proximity(data, &indio_dev->channels[bit],
&val);

View File

@ -99,6 +99,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
if ((PAGE_ALIGN(addr + size) <= size) ||
(PAGE_ALIGN(addr + size) <= addr))
return ERR_PTR(-EINVAL);
if (!can_do_mlock())
return ERR_PTR(-EPERM);

View File

@ -1154,10 +1154,28 @@ out:
mutex_unlock(&alps_mutex);
}
static void alps_report_bare_ps2_packet(struct input_dev *dev,
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
{
struct alps_data *priv = psmouse->private;
struct input_dev *dev;
/* Figure out which device to use to report the bare packet */
if (priv->proto_version == ALPS_PROTO_V2 &&
(priv->flags & ALPS_DUALPOINT)) {
/* On V2 devices the DualPoint Stick reports bare packets */
dev = priv->dev2;
} else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (!IS_ERR(priv->dev3))
psmouse_queue_work(psmouse, &priv->dev3_register_work,
0);
return;
} else {
dev = priv->dev3;
}
if (report_buttons)
alps_report_buttons(dev, NULL,
packet[0] & 1, packet[0] & 2, packet[0] & 4);
@ -1232,8 +1250,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
* de-synchronization.
*/
alps_report_bare_ps2_packet(priv->dev2,
&psmouse->packet[3], false);
alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
false);
/*
* Continue with the standard ALPS protocol handling,
@ -1289,18 +1307,9 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
* properly we only do this if the device is fully synchronized.
*/
if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (unlikely(!priv->dev3))
psmouse_queue_work(psmouse,
&priv->dev3_register_work, 0);
if (psmouse->pktcnt == 3) {
/* Once dev3 mouse device is registered report data */
if (likely(!IS_ERR_OR_NULL(priv->dev3)))
alps_report_bare_ps2_packet(priv->dev3,
psmouse->packet,
true);
alps_report_bare_ps2_packet(psmouse, psmouse->packet,
true);
return PSMOUSE_FULL_PACKET;
}
return PSMOUSE_GOOD_DATA;
@ -2281,10 +2290,12 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
priv->x_max = 1360;
priv->y_max = 660;
priv->x_bits = 23;
priv->y_bits = 12;
if (alps_dolphin_get_device_area(psmouse, priv))
return -EIO;
break;
case ALPS_PROTO_V6:
@ -2303,9 +2314,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
priv->set_abs_params = alps_set_abs_params_mt;
priv->nibble_commands = alps_v3_nibble_commands;
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
if (alps_dolphin_get_device_area(psmouse, priv))
return -EIO;
priv->x_max = 0xfff;
priv->y_max = 0x7ff;
if (priv->fw_ver[1] != 0xba)
priv->flags |= ALPS_BUTTONPAD;

View File

@ -152,6 +152,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
{ANY_BOARD_ID, ANY_BOARD_ID},
1024, 5022, 2508, 4832
},
{
(const char * const []){"LEN2006", NULL},
{2691, 2691},
1024, 5045, 2457, 4832
},
{
(const char * const []){"LEN2006", NULL},
{ANY_BOARD_ID, ANY_BOARD_ID},
@ -189,7 +194,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
"LEN2006",
"LEN2006", /* Edge E440/E540 */
"LEN2007",
"LEN2008",
"LEN2009",

View File

@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return 0;
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ret = arm_smmu_iova_to_phys_hard(domain, iova);
else
} else {
ret = ops->iova_to_phys(ops, iova);
}
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
return ret;
@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
dev_notice(smmu->dev, "\taddress translation ops\n");
}

View File

@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
struct page *freelist = NULL;
int i;
/* Domain 0 is reserved, so dont process it */
if (!domain)
@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
/* clear attached or cached domains */
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_detach_domain(domain, iommu);
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
iommu_detach_domain(domain, g_iommus[i]);
rcu_read_unlock();
dma_free_pagelist(freelist);

View File

@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
static const struct of_device_id ipmmu_of_ids[] = {
{ .compatible = "renesas,ipmmu-vmsa", },
{ }
};
static struct platform_driver ipmmu_driver = {

View File

@ -169,7 +169,7 @@ static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
{
cmd->raw_cmd[0] &= ~(0xffffUL << 32);
cmd->raw_cmd[0] &= BIT_ULL(32) - 1;
cmd->raw_cmd[0] |= ((u64)devid) << 32;
}
@ -802,6 +802,7 @@ static int its_alloc_tables(struct its_node *its)
int i;
int psz = SZ_64K;
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_WaWb;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
@ -848,7 +849,7 @@ retry_baser:
val = (virt_to_phys(base) |
(type << GITS_BASER_TYPE_SHIFT) |
((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
GITS_BASER_WaWb |
cache |
shr |
GITS_BASER_VALID);
@ -874,9 +875,12 @@ retry_baser:
* Shareability didn't stick. Just use
* whatever the read reported, which is likely
* to be the only thing this redistributor
* supports.
* supports. If that's zero, make it
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr)
cache = GITS_BASER_nC;
goto retry_baser;
}
@ -980,16 +984,39 @@ static void its_cpu_init_lpis(void)
tmp = readq_relaxed(rbase + GICR_PROPBASER);
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
GICR_PROPBASER_CACHEABILITY_MASK);
val |= GICR_PROPBASER_nC;
writeq_relaxed(val, rbase + GICR_PROPBASER);
}
pr_info_once("GIC: using cache flushing for LPI property table\n");
gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
}
/* set PENDBASE */
val = (page_to_phys(pend_page) |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_WaWb);
GICR_PENDBASER_InnerShareable |
GICR_PENDBASER_WaWb);
writeq_relaxed(val, rbase + GICR_PENDBASER);
tmp = readq_relaxed(rbase + GICR_PENDBASER);
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must remove the
* cacheability attributes as well.
*/
val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
GICR_PENDBASER_CACHEABILITY_MASK);
val |= GICR_PENDBASER_nC;
writeq_relaxed(val, rbase + GICR_PENDBASER);
}
/* Enable LPIs */
val = readl_relaxed(rbase + GICR_CTLR);
@ -1026,7 +1053,7 @@ static void its_cpu_init_collection(void)
* This ITS wants a linear CPU number.
*/
target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER);
target = GICR_TYPER_CPU_NUMBER(target);
target = GICR_TYPER_CPU_NUMBER(target) << 16;
}
/* Perform collection mapping */
@ -1422,14 +1449,26 @@ static int its_probe(struct device_node *node, struct irq_domain *parent)
writeq_relaxed(baser, its->base + GITS_CBASER);
tmp = readq_relaxed(its->base + GITS_CBASER);
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) {
if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
/*
* The HW reports non-shareable, we must
* remove the cacheability attributes as
* well.
*/
baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
GITS_CBASER_CACHEABILITY_MASK);
baser |= GITS_CBASER_nC;
writeq_relaxed(baser, its->base + GITS_CBASER);
}
pr_info("ITS: using cache flushing for cmd queue\n");
its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
}
writeq_relaxed(0, its->base + GITS_CWRITER);
writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) {
its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its);
if (!its->domain) {

View File

@ -1,6 +1,6 @@
config LGUEST
tristate "Linux hypervisor example code"
depends on X86_32 && EVENTFD && TTY
depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
select HVC_DRIVER
---help---
This is a very simple module which allows you to run

View File

@ -3850,7 +3850,8 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
if (slave->queue_id == skb->queue_mapping) {
if (bond_slave_can_tx(slave)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
return 0;
}

View File

@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
new_state = max(tx_state, rx_state);
} else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
} else {
__flexcan_get_berr_counter(dev, &bec);
new_state = CAN_STATE_ERROR_PASSIVE;
new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
} else {
new_state = CAN_STATE_BUS_OFF;
}
/* state hasn't changed */
@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct regulator *reg_xceiver;
struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base;
int err, irq;
u32 clock_freq = 0;
reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
return -EPROBE_DEFER;
else if (IS_ERR(reg_xceiver))
reg_xceiver = NULL;
if (pdev->dev.of_node)
of_property_read_u32(pdev->dev.of_node,
"clock-frequency", &clock_freq);
@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(&pdev->dev);
priv->devtype_data = devtype_data;
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
if (IS_ERR(priv->reg_xceiver))
priv->reg_xceiver = NULL;
priv->reg_xceiver = reg_xceiver;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);

View File

@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
init_usb_anchor(&dev->rx_submitted);
atomic_set(&dev->active_channels, 0);

View File

@ -25,7 +25,6 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
#define MAX_TX_URBS 16
#define MAX_RX_URBS 4
#define START_TIMEOUT 1000 /* msecs */
#define STOP_TIMEOUT 1000 /* msecs */
@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
};
};
/* Context for an outstanding, not yet ACKed, transmission */
struct kvaser_usb_tx_urb_context {
struct kvaser_usb_net_priv *priv;
u32 echo_index;
@ -456,8 +456,13 @@ struct kvaser_usb {
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
struct usb_anchor rx_submitted;
/* @max_tx_urbs: Firmware-reported maximum number of oustanding,
* not yet ACKed, transmissions on this device. This value is
* also used as a sentinel for marking free tx contexts.
*/
u32 fw_version;
unsigned int nchannels;
unsigned int max_tx_urbs;
enum kvaser_usb_family family;
bool rxinitdone;
@ -467,19 +472,18 @@ struct kvaser_usb {
struct kvaser_usb_net_priv {
struct can_priv can;
spinlock_t tx_contexts_lock;
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
struct usb_anchor tx_submitted;
struct completion start_comp, stop_comp;
struct can_berr_counter bec;
struct kvaser_usb *dev;
struct net_device *netdev;
int channel;
struct can_berr_counter bec;
struct completion start_comp, stop_comp;
struct usb_anchor tx_submitted;
spinlock_t tx_contexts_lock;
int active_tx_contexts;
struct kvaser_usb_tx_urb_context tx_contexts[];
};
static const struct usb_device_id kvaser_usb_table[] = {
@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
* for further details.
*/
if (tmp->len == 0) {
pos = round_up(pos,
dev->bulk_in->wMaxPacketSize);
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
wMaxPacketSize));
continue;
}
@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
switch (dev->family) {
case KVASER_LEAF:
dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
break;
case KVASER_USBCAN:
dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
dev->max_tx_urbs =
le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
break;
}
@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
stats = &priv->netdev->stats;
context = &priv->tx_contexts[tid % MAX_TX_URBS];
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
/* Sometimes the state change doesn't come after a bus-off event */
if (priv->can.restart_ms &&
@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_get_echo_skb(priv->netdev, context->echo_index);
context->echo_index = MAX_TX_URBS;
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(priv->netdev);
@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
* number of events in case of a heavy rx load on the bus.
*/
if (msg->len == 0) {
pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
wMaxPacketSize));
continue;
}
@ -1512,11 +1521,13 @@ error:
static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
{
int i;
int i, max_tx_urbs;
max_tx_urbs = priv->dev->max_tx_urbs;
priv->active_tx_contexts = 0;
for (i = 0; i < MAX_TX_URBS; i++)
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
for (i = 0; i < max_tx_urbs; i++)
priv->tx_contexts[i].echo_index = max_tx_urbs;
}
/* This method might sleep. Do not call it in the atomic context
@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
for (i = 0; i < dev->max_tx_urbs; i++) {
if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
context = &priv->tx_contexts[i];
context->echo_index = i;
can_put_echo_skb(skb, netdev, context->echo_index);
++priv->active_tx_contexts;
if (priv->active_tx_contexts >= MAX_TX_URBS)
if (priv->active_tx_contexts >= dev->max_tx_urbs)
netif_stop_queue(netdev);
break;
@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_free_echo_skb(netdev, context->echo_index);
context->echo_index = MAX_TX_URBS;
context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts;
netif_wake_queue(netdev);
@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
if (err)
return err;
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
netdev = alloc_candev(sizeof(*priv) +
dev->max_tx_urbs * sizeof(*priv->tx_contexts),
dev->max_tx_urbs);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
return -ENOMEM;
@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
((dev->fw_version >> 24) & 0xff),
((dev->fw_version >> 16) & 0xff),
(dev->fw_version & 0xffff));
dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
err = kvaser_usb_get_card_info(dev);
if (err) {
dev_err(&intf->dev,
@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
return err;
}
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
((dev->fw_version >> 24) & 0xff),
((dev->fw_version >> 16) & 0xff),
(dev->fw_version & 0xffff));
for (i = 0; i < dev->nchannels; i++) {
err = kvaser_usb_init_one(intf, id, i);
if (err) {

View File

@ -26,8 +26,8 @@
#define PUCAN_CMD_FILTER_STD 0x008
#define PUCAN_CMD_TX_ABORT 0x009
#define PUCAN_CMD_WR_ERR_CNT 0x00a
#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
#define PUCAN_CMD_SET_EN_OPTION 0x00b
#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
/* uCAN received messages list */
@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
u16 unused;
};
/* uCAN RX_FRAME_ENABLE command fields */
#define PUCAN_FLTEXT_ERROR 0x0001
#define PUCAN_FLTEXT_BUSLOAD 0x0002
/* uCAN SET_EN/CLR_DIS _OPTION command fields */
#define PUCAN_OPTION_ERROR 0x0001
#define PUCAN_OPTION_BUSLOAD 0x0002
#define PUCAN_OPTION_CANDFDISO 0x0004
struct __packed pucan_filter_ext {
struct __packed pucan_options {
__le16 opcode_channel;
__le16 ext_mask;
__le16 options;
u32 unused;
};

View File

@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
u8 unused[5];
};
/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
struct __packed pcan_ufd_filter_ext {
struct __packed pcan_ufd_options {
__le16 opcode_channel;
__le16 ext_mask;
__le16 ucan_mask;
u16 unused;
__le16 usb_mask;
};
@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
/* moves the pointer forward */
pc += sizeof(struct pucan_wr_err_cnt);
/* add command to switch from ISO to non-ISO mode, if fw allows it */
if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
struct pucan_options *puo = (struct pucan_options *)pc;
puo->opcode_channel =
(dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
pucan_cmd_opcode_channel(dev,
PUCAN_CMD_CLR_DIS_OPTION) :
pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
/* to be sure that no other extended bits will be taken into
* account
*/
puo->unused = 0;
/* moves the pointer forward */
pc += sizeof(struct pucan_options);
}
/* next, go back to operational mode */
cmd = (struct pucan_command *)pc;
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
return pcan_usb_fd_send_cmd(dev, cmd);
}
/* set/unset notifications filter:
/* set/unset options
*
* onoff sets(1)/unset(0) notifications
* mask each bit defines a kind of notification to set/unset
* onoff set(1)/unset(0) options
* mask each bit defines a kind of options to set/unset
*/
static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
bool onoff, u16 ext_mask, u16 usb_mask)
static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
bool onoff, u16 ucan_mask, u16 usb_mask)
{
struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
(onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
PUCAN_CMD_RX_FRAME_DISABLE);
(onoff) ? PUCAN_CMD_SET_EN_OPTION :
PUCAN_CMD_CLR_DIS_OPTION);
cmd->ext_mask = cpu_to_le16(ext_mask);
cmd->ucan_mask = cpu_to_le16(ucan_mask);
cmd->usb_mask = cpu_to_le16(usb_mask);
/* send the command */
@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
&pcan_usb_pro_fd);
/* enable USB calibration messages */
err = pcan_usb_fd_set_filter_ext(dev, 1,
PUCAN_FLTEXT_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
err = pcan_usb_fd_set_options(dev, 1,
PUCAN_OPTION_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
}
pdev->usb_if->dev_opened_count++;
@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
/* turn off special msgs for that interface if no other dev opened */
if (pdev->usb_if->dev_opened_count == 1)
pcan_usb_fd_set_filter_ext(dev, 0,
PUCAN_FLTEXT_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
pcan_usb_fd_set_options(dev, 0,
PUCAN_OPTION_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
pdev->usb_if->dev_opened_count--;
return 0;
@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
pdev->usb_if->fw_info.fw_version[2],
dev->adapter->ctrl_count);
/* the currently supported hw is non-ISO */
dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
/* check for ability to switch between ISO/non-ISO modes */
if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
/* firmware >= 2.x supports ISO/non-ISO switching */
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
} else {
/* firmware < 2.x only supports fixed(!) non-ISO */
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
}
/* tell the hardware the can driver is running */
err = pcan_usb_fd_drv_loaded(dev, 1);
@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
if (dev->ctrl_idx == 0) {
/* turn off calibration message if any device were opened */
if (pdev->usb_if->dev_opened_count > 0)
pcan_usb_fd_set_filter_ext(dev, 0,
PUCAN_FLTEXT_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
pcan_usb_fd_set_options(dev, 0,
PUCAN_OPTION_ERROR,
PCAN_UFD_FLTEXT_CALIBRATION);
/* tell USB adapter that the driver is being unloaded */
pcan_usb_fd_drv_loaded(dev, 0);

View File

@ -1811,7 +1811,7 @@ struct bnx2x {
int stats_state;
/* used for synchronization of concurrent threads statistics handling */
spinlock_t stats_lock;
struct mutex stats_lock;
/* used by dmae command loader */
struct dmae_command stats_dmae;
@ -1935,8 +1935,6 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
bool stats_started;
struct semaphore stats_sema;
u8 phys_port_id[ETH_ALEN];

View File

@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
u32 xmac_val;
u32 emac_addr;
u32 emac_val;
u32 umac_addr;
u32 umac_val;
u32 umac_addr[2];
u32 umac_val[2];
u32 bmac_addr;
u32 bmac_val[2];
};
@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
return 0;
}
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
* and boot began, or when kdump kernel was loaded. Either case would invalidate
* the addresses of the transaction, resulting in was-error bit set in the pci
* causing all hw-to-host pcie transactions to timeout. If this happened we want
* to clear the interrupt which detected this from the pglueb and the was done
* bit
*/
static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp))
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_ABS_FUNC(bp));
}
static int bnx2x_init_hw_func(struct bnx2x *bp)
{
int port = BP_PORT(bp);
@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
if (!CHIP_IS_E1x(bp))
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
bnx2x_clean_pglue_errors(bp);
bnx2x_init_block(bp, BLOCK_ATC, init_phase);
bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
return base + (BP_ABS_FUNC(bp)) * stride;
}
static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
u8 port, u32 reset_reg,
struct bnx2x_mac_vals *vals)
{
u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
u32 base_addr;
if (!(mask & reset_reg))
return false;
BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
REG_WR(bp, vals->umac_addr[port], 0);
return true;
}
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals)
{
@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
u8 port = BP_PORT(bp);
/* reset addresses as they also mark which values were changed */
vals->bmac_addr = 0;
vals->umac_addr = 0;
vals->xmac_addr = 0;
vals->emac_addr = 0;
memset(vals, 0, sizeof(*vals));
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
REG_WR(bp, vals->xmac_addr, 0);
mac_stopped = true;
}
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
if (mask & reset_reg) {
BNX2X_DEV_INFO("Disable umac Rx\n");
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
vals->umac_val = REG_RD(bp, vals->umac_addr);
REG_WR(bp, vals->umac_addr, 0);
mac_stopped = true;
}
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
reset_reg, vals);
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
reset_reg, vals);
}
if (mac_stopped)
@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Close the MAC Rx to prevent BRB from filling up */
bnx2x_prev_unload_close_mac(bp, &mac_vals);
/* close LLH filters towards the BRB */
/* close LLH filters for both ports towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
bp->link_params.port ^= 1;
bnx2x_set_rx_filter(&bp->link_params, 0);
bp->link_params.port ^= 1;
/* Check if the UNDI driver was previously loaded */
if (bnx2x_prev_is_after_undi(bp)) {
@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
if (mac_vals.xmac_addr)
REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
if (mac_vals.umac_addr)
REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
if (mac_vals.umac_addr[0])
REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
if (mac_vals.umac_addr[1])
REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
if (mac_vals.emac_addr)
REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
if (mac_vals.bmac_addr) {
@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
return bnx2x_prev_mcp_done(bp);
}
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
* and boot began, or when kdump kernel was loaded. Either case would invalidate
* the addresses of the transaction, resulting in was-error bit set in the pci
* causing all hw-to-host pcie transactions to timeout. If this happened we want
* to clear the interrupt which detected this from the pglueb and the was done
* bit
*/
static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
if (!CHIP_IS_E1x(bp)) {
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
DP(BNX2X_MSG_SP,
"'was error' bit was found to be set in pglueb upon startup. Clearing\n");
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
1 << BP_FUNC(bp));
}
}
}
static int bnx2x_prev_unload(struct bnx2x *bp)
{
int time_counter = 10;
@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
/* clear hw from errors which may have resulted from an interrupted
* dmae transaction.
*/
bnx2x_prev_interrupted_dmae(bp);
bnx2x_clean_pglue_errors(bp);
/* Release previously held locks */
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
@ -12037,9 +12047,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
mutex_init(&bp->drv_info_mutex);
mutex_init(&bp->stats_lock);
bp->drv_info_mng_owner = false;
spin_lock_init(&bp->stats_lock);
sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@ -13668,9 +13677,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
spin_lock_bh(&bp->stats_lock);
mutex_lock(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
spin_unlock_bh(&bp->stats_lock);
mutex_unlock(&bp->stats_lock);
bnx2x_save_statistics(bp);

View File

@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
cookie.vf = vf;
cookie.state = VF_ACQUIRED;
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
if (rc)
goto op_err;
}
DP(BNX2X_MSG_IOV, "set state to acquired\n");

View File

@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
*/
static void bnx2x_storm_stats_post(struct bnx2x *bp)
{
if (!bp->stats_pending) {
int rc;
int rc;
spin_lock_bh(&bp->stats_lock);
if (bp->stats_pending)
return;
if (bp->stats_pending) {
spin_unlock_bh(&bp->stats_lock);
return;
}
bp->fw_stats_req->hdr.drv_stats_counter =
cpu_to_le16(bp->stats_counter++);
bp->fw_stats_req->hdr.drv_stats_counter =
cpu_to_le16(bp->stats_counter++);
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
/* adjust the ramrod to include VF queues statistics */
bnx2x_iov_adjust_stats_req(bp);
bnx2x_dp_stats(bp);
/* adjust the ramrod to include VF queues statistics */
bnx2x_iov_adjust_stats_req(bp);
bnx2x_dp_stats(bp);
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
U64_HI(bp->fw_stats_req_mapping),
U64_LO(bp->fw_stats_req_mapping),
NONE_CONNECTION_TYPE);
if (rc == 0)
bp->stats_pending = 1;
spin_unlock_bh(&bp->stats_lock);
}
/* send FW stats ramrod */
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
U64_HI(bp->fw_stats_req_mapping),
U64_LO(bp->fw_stats_req_mapping),
NONE_CONNECTION_TYPE);
if (rc == 0)
bp->stats_pending = 1;
}
static void bnx2x_hw_stats_post(struct bnx2x *bp)
@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
*/
/* should be called under stats_sema */
static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
}
/* should be called under stats_sema */
static void __bnx2x_stats_start(struct bnx2x *bp)
static void bnx2x_stats_start(struct bnx2x *bp)
{
if (IS_PF(bp)) {
if (bp->port.pmf)
@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
}
bp->stats_started = true;
}
static void bnx2x_stats_start(struct bnx2x *bp)
{
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
__bnx2x_stats_pmf_update(bp);
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
{
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
__bnx2x_stats_pmf_update(bp);
up(&bp->stats_sema);
bnx2x_stats_pmf_update(bp);
bnx2x_stats_start(bp);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
bnx2x_stats_start(bp);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
/* we run update from timer context, so give up
* if somebody is in the middle of transition
*/
if (down_trylock(&bp->stats_sema))
if (bnx2x_edebug_stats_stopped(bp))
return;
if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
goto out;
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
goto out;
return;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
goto out;
return;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
goto out;
return;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
out:
up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
static void bnx2x_stats_stop(struct bnx2x *bp)
{
int update = 0;
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
bp->stats_started = false;
bool update = false;
bnx2x_stats_comp(bp);
@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@ -1410,18 +1363,28 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
void (*action)(struct bnx2x *bp);
enum bnx2x_stats_state state = bp->stats_state;
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
/* Statistics update run from timer context, and we don't want to stop
* that context in case someone is in the middle of a transition.
* For other events, wait a bit until lock is taken.
*/
if (!mutex_trylock(&bp->stats_lock)) {
if (event == STATS_EVENT_UPDATE)
return;
action(bp);
DP(BNX2X_MSG_STATS,
"Unlikely stats' lock contention [event %d]\n", event);
mutex_lock(&bp->stats_lock);
}
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
mutex_unlock(&bp->stats_lock);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
}
}
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie){
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
int bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie)
{
int cnt = 10, rc = 0;
/* Wait for statistics to end [while blocking further requests],
* then run supplied function 'safely'.
*/
mutex_lock(&bp->stats_lock);
bnx2x_stats_comp(bp);
while (bp->stats_pending && cnt--)
if (bnx2x_storm_stats_update(bp))
usleep_range(1000, 2000);
if (bp->stats_pending) {
BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
rc = -EBUSY;
goto out;
}
func_to_exec(cookie);
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
out:
/* No need to restart statistics - if they're enabled, the timer
* will restart the statistics.
*/
mutex_unlock(&bp->stats_lock);
return rc;
}

View File

@ -539,9 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie);
int bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.

View File

@ -376,8 +376,6 @@ enum {
enum {
INGQ_EXTRAS = 2, /* firmware event queue and */
/* forwarded interrupts */
MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
+ MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
@ -616,11 +614,13 @@ struct sge {
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
unsigned int egr_start;
unsigned int egr_sz;
unsigned int ingr_start;
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
unsigned int ingr_sz;
void **egr_map; /* qid->queue egress queue map */
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
unsigned long *starving_fl;
unsigned long *txq_maperr;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@ -1136,6 +1136,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qtimer_val(const struct adapter *adap,
const struct sge_rspq *q);
int t4_init_devlog_params(struct adapter *adapter);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);

View File

@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
"0.9375" };
int i;
u16 incr[NMTUS][NCCTRL_WIN];
u16 (*incr)[NCCTRL_WIN];
struct adapter *adap = seq->private;
incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
if (!incr)
return -ENOMEM;
t4_read_cong_tbl(adap, incr);
for (i = 0; i < NCCTRL_WIN; ++i) {
@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
adap->params.a_wnd[i],
dec_fac[adap->params.b_wnd[i]]);
}
kfree(incr);
return 0;
}

View File

@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
{
int i;
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler) {
@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
}
}
/* Disable interrupt and napi handler */
static void disable_interrupts(struct adapter *adap)
{
if (adap->flags & FULL_INIT_DONE) {
t4_intr_disable(adap);
if (adap->flags & USING_MSIX) {
free_msix_queue_irqs(adap);
free_irq(adap->msix_info[0].vec, adap);
} else {
free_irq(adap->pdev->irq, adap);
}
quiesce_rx(adap);
}
}
/*
* Enable NAPI scheduling and interrupt generation for all Rx queues.
*/
@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
{
int i;
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q)
@ -970,8 +985,8 @@ static int setup_sge_queues(struct adapter *adap)
int err, msi_idx, i, j;
struct sge *s = &adap->sge;
bitmap_zero(s->starving_fl, MAX_EGRQ);
bitmap_zero(s->txq_maperr, MAX_EGRQ);
bitmap_zero(s->starving_fl, s->egr_sz);
bitmap_zero(s->txq_maperr, s->egr_sz);
if (adap->flags & USING_MSIX)
msi_idx = 1; /* vector 0 is for non-queue interrupts */
@ -983,6 +998,19 @@ static int setup_sge_queues(struct adapter *adap)
msi_idx = -((int)s->intrq.abs_id + 1);
}
/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
* don't forget to update the following which need to be
* synchronized to and changes here.
*
* 1. The calculations of MAX_INGQ in cxgb4.h.
*
* 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
* to accommodate any new/deleted Ingress Queues
* which need MSI-X Vectors.
*
* 3. Update sge_qinfo_show() to include information on the
* new/deleted queues.
*/
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
msi_idx, NULL, fwevtq_handler);
if (err) {
@ -4244,19 +4272,12 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
t4_intr_disable(adapter);
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
adapter->tid_release_task_busy = false;
adapter->tid_release_head = NULL;
if (adapter->flags & USING_MSIX) {
free_msix_queue_irqs(adapter);
free_irq(adapter->msix_info[0].vec, adapter);
} else
free_irq(adapter->pdev->irq, adapter);
quiesce_rx(adapter);
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
adapter->flags &= ~FULL_INIT_DONE;
@ -4733,8 +4754,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
if (ret < 0)
return ret;
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
FW_CMD_CAP_PF);
if (ret < 0)
return ret;
@ -5088,10 +5110,15 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
struct fw_devlog_cmd devlog_cmd;
u32 devlog_meminfo;
int reset = 1;
/* Grab Firmware Device Log parameters as early as possible so we have
* access to it for debugging, etc.
*/
ret = t4_init_devlog_params(adap);
if (ret < 0)
return ret;
/* Contact FW, advertising Master capability */
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
if (ret < 0) {
@ -5169,30 +5196,6 @@ static int adap_init0(struct adapter *adap)
if (ret < 0)
goto bye;
/* Read firmware device log parameters. We really need to find a way
* to get these parameters initialized with some default values (which
* are likely to be correct) for the case where we either don't
* attache to the firmware or it's crashed when we probe the adapter.
* That way we'll still be able to perform early firmware startup
* debugging ... If the request to get the Firmware's Device Log
* parameters fails, we'll live so we don't make that a fatal error.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret == 0) {
devlog_meminfo =
ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
adap->params.devlog.memtype =
FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
adap->params.devlog.start =
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
}
/*
* Find out what ports are available to us. Note that we need to do
* this before calling adap_init0_no_config() since it needs nports
@ -5293,6 +5296,51 @@ static int adap_init0(struct adapter *adap)
adap->tids.nftids = val[4] - val[3] + 1;
adap->sge.ingr_start = val[5];
/* qids (ingress/egress) returned from firmware can be anywhere
* in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
* Hence driver needs to allocate memory for this range to
* store the queue info. Get the highest IQFLINT/EQ index returned
* in FW_EQ_*_CMD.alloc command.
*/
params[0] = FW_PARAM_PFVF(EQ_END);
params[1] = FW_PARAM_PFVF(IQFLINT_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
sizeof(*adap->sge.egr_map), GFP_KERNEL);
if (!adap->sge.egr_map) {
ret = -ENOMEM;
goto bye;
}
adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
sizeof(*adap->sge.ingr_map), GFP_KERNEL);
if (!adap->sge.ingr_map) {
ret = -ENOMEM;
goto bye;
}
/* Allocate the memory for the vaious egress queue bitmaps
* ie starving_fl and txq_maperr.
*/
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
if (!adap->sge.starving_fl) {
ret = -ENOMEM;
goto bye;
}
adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
if (!adap->sge.txq_maperr) {
ret = -ENOMEM;
goto bye;
}
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
@ -5501,6 +5549,10 @@ static int adap_init0(struct adapter *adap)
* happened to HW/FW, stop issuing commands.
*/
bye:
kfree(adap->sge.egr_map);
kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl);
kfree(adap->sge.txq_maperr);
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@ -5528,6 +5580,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
netif_carrier_off(dev);
}
spin_unlock(&adap->stats_lock);
disable_interrupts(adap);
if (adap->flags & FULL_INIT_DONE)
cxgb_down(adap);
rtnl_unlock();
@ -5912,6 +5965,10 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_free_mem(adapter->tids.tid_tab);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
kfree(adapter->sge.txq_maperr);
disable_msi(adapter);
for_each_port(adapter, i)
@ -6237,6 +6294,8 @@ static void remove_one(struct pci_dev *pdev)
if (is_offload(adapter))
detach_ulds(adapter);
disable_interrupts(adapter);
for_each_port(adapter, i)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
unregister_netdev(adapter->port[i]);

View File

@ -2171,7 +2171,7 @@ static void sge_rx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->starving_fl[i]; m; m &= m - 1) {
struct sge_eth_rxq *rxq;
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
@ -2259,7 +2259,7 @@ static void sge_tx_timer_cb(unsigned long data)
struct adapter *adap = (struct adapter *)data;
struct sge *s = &adap->sge;
for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
for (m = s->txq_maperr[i]; m; m &= m - 1) {
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
struct sge_ofld_txq *txq = s->egr_map[id];
@ -2741,7 +2741,8 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &adap->sge.intrq, NULL);
/* clear the reverse egress queue map */
memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
memset(adap->sge.egr_map, 0,
adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
}
void t4_sge_start(struct adapter *adap)

View File

@ -4458,6 +4458,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
return 0;
}
/**
* t4_init_devlog_params - initialize adapter->params.devlog
* @adap: the adapter
*
* Initialize various fields of the adapter's Firmware Device Log
* Parameters structure.
*/
int t4_init_devlog_params(struct adapter *adap)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
unsigned int devlog_meminfo;
struct fw_devlog_cmd devlog_cmd;
int ret;
/* If we're dealing with newer firmware, the Device Log Paramerters
* are stored in a designated register which allows us to access the
* Device Log even if we can't talk to the firmware.
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
if (pf_dparams) {
unsigned int nentries, nentries128;
dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
return 0;
}
/* Otherwise, ask the firmware for it's Device Log Parameters.
*/
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
&devlog_cmd);
if (ret)
return ret;
devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
dparams->size = ntohl(devlog_cmd.memsize_devlog);
return 0;
}
/**
* t4_init_sge_params - initialize adap->params.sge
* @adapter: the adapter

View File

@ -63,6 +63,8 @@
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
#define SGE_PF_KDOORBELL_A 0x0
#define QID_S 15
@ -707,6 +709,7 @@
#define PFNUM_V(x) ((x) << PFNUM_S)
#define PCIE_FW_A 0x30b8
#define PCIE_FW_PF_A 0x30bc
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908

View File

@ -101,7 +101,7 @@ enum fw_wr_opcodes {
FW_RI_BIND_MW_WR = 0x18,
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_LASTC2E_WR = 0x40
FW_LASTC2E_WR = 0x70
};
struct fw_wr_hdr {
@ -993,6 +993,7 @@ enum fw_memtype_cf {
FW_MEMTYPE_CF_EXTMEM = 0x2,
FW_MEMTYPE_CF_FLASH = 0x4,
FW_MEMTYPE_CF_INTERNAL = 0x5,
FW_MEMTYPE_CF_EXTMEM1 = 0x6,
};
struct fw_caps_config_cmd {
@ -1035,6 +1036,7 @@ enum fw_params_mnem {
FW_PARAMS_MNEM_PFVF = 2, /* function params */
FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
FW_PARAMS_MNEM_LAST
};
@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
FW_DEVLOG_FACILITY_FCOE = 0x2E,
FW_DEVLOG_FACILITY_FOISCSI = 0x30,
FW_DEVLOG_FACILITY_FOFCOE = 0x32,
FW_DEVLOG_FACILITY_MAX = 0x32,
FW_DEVLOG_FACILITY_CHNET = 0x34,
FW_DEVLOG_FACILITY_MAX = 0x34,
};
/* log message format */
@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
/* P C I E F W P F 7 R E G I S T E R */
/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
* access the "devlog" which needing to contact firmware. The encoding is
* mostly the same as that returned by the DEVLOG command except for the size
* which is encoded as the number of entries in multiples-1 of 128 here rather
* than the memory size as is done in the DEVLOG command. Thus, 0 means 128
* and 15 means 2048. This of course in turn constrains the allowed values
* for the devlog size ...
*/
#define PCIE_FW_PF_DEVLOG 7
#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
PCIE_FW_PF_DEVLOG_NENTRIES128_M)
#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
#endif /* _T4FW_INTERFACE_H_ */

View File

@ -36,13 +36,13 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
#define T4FW_VERSION_MINOR 0x0C
#define T4FW_VERSION_MICRO 0x19
#define T4FW_VERSION_MINOR 0x0D
#define T4FW_VERSION_MICRO 0x20
#define T4FW_VERSION_BUILD 0x00
#define T5FW_VERSION_MAJOR 0x01
#define T5FW_VERSION_MINOR 0x0C
#define T5FW_VERSION_MICRO 0x19
#define T5FW_VERSION_MINOR 0x0D
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
#endif

View File

@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
? (tq->pidx - 1)
: (tq->size - 1));
__be64 *src = (__be64 *)&tq->desc[index];
__be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
SGE_UDB_WCDOORBELL);
unsigned int count = EQ_UNIT / sizeof(__be64);
@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
* DMA.
*/
while (count) {
writeq(*src, dst);
/* the (__force u64) is because the compiler
* doesn't understand the endian swizzling
* going on
*/
writeq((__force u64)*src, dst);
src++;
dst++;
count--;
@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
wr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be64(0);
wr->r3[1] = cpu_to_be64(0);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
end = (u64 *)wr + flits;

View File

@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
get_mbox_rpl(adapter, rpl, size, mbox_data);
WARN_ON((be32_to_cpu(*(u32 *)rpl)
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
* o The BAR2 Queue ID.
* o The BAR2 Queue ID Offset into the BAR2 page.
*/
bar2_page_offset = ((qid >> qpp_shift) << page_shift);
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
bar2_qid = qid & qpp_mask;
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;

View File

@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
struct fec_enet_private *fep = netdev_priv(ndev);
struct device_node *node;
int err = -ENXIO, i;
u32 mii_speed, holdtime;
/*
* The i.MX28 dual fec interfaces are not equal.
@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (fep->quirks & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
mii_speed--;
if (mii_speed > 63) {
dev_err(&pdev->dev,
"fec clock (%lu) to fast to get right mii speed\n",
clk_get_rate(fep->clk_ipg));
err = -EINVAL;
goto err_out;
}
/*
* The i.MX28 and i.MX6 types have another filed in the MSCR (aka
* MII_SPEED) register that defines the MDIO output hold time. Earlier
* versions are RAZ there, so just ignore the difference and write the
* register always.
* The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
* HOLDTIME + 1 is the number of clk cycles the fec is holding the
* output.
* The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
* Given that ceil(clkrate / 5000000) <= 64, the calculation for
* holdtime cannot result in a value greater than 3.
*/
holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
fep->phy_speed = mii_speed << 1 | holdtime << 8;
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
fep->mii_bus = mdiobus_alloc();

View File

@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ugeth->phy_interface = phy_interface;
ugeth->max_speed = max_speed;
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
err = register_netdev(dev);
if (err) {
if (netif_msg_probe(ugeth))

View File

@ -2658,16 +2658,11 @@ static int mvneta_stop(struct net_device *dev)
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvneta_port *pp = netdev_priv(dev);
int ret;
if (!pp->phy_dev)
return -ENOTSUPP;
ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
if (!ret)
mvneta_adjust_link(dev);
return ret;
return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
}
/* Ethtool methods */

View File

@ -724,7 +724,8 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
* on the host, we deprecate the error message for this
* specific command/input_mod/opcode_mod/fw-status to be debug.
*/
if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
if (op == MLX4_CMD_SET_PORT &&
(in_modifier == 1 || in_modifier == 2) &&
op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
op, context->fw_status);
@ -1993,7 +1994,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
goto reset_slave;
slave_state[slave].vhcr_dma = ((u64) param) << 48;
priv->mfunc.master.slave_state[slave].cookie = 0;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
break;
case MLX4_COMM_CMD_VHCR1:
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
@ -2225,6 +2225,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
for (i = 0; i < dev->num_slaves; ++i) {
s_state = &priv->mfunc.master.slave_state[i];
s_state->last_cmd = MLX4_COMM_CMD_RESET;
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
s_state->event_eq[j].eqn = -1;
__raw_writel((__force u32) 0,

View File

@ -2805,13 +2805,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_carrier_off(dev);
mlx4_en_set_default_moderation(priv);
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
priv->registered = 1;
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
@ -2853,6 +2846,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
err = register_netdev(dev);
if (err) {
en_err(priv, "Netdev registration failed for port %d\n", port);
goto out;
}
priv->registered = 1;
return 0;
out:

View File

@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
/* All active slaves need to receive the event */
if (slave == ALL_SLAVES) {
for (i = 0; i < dev->num_slaves; i++) {
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
for (i = 0; i <= dev->persist->num_vfs; i++) {
if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
struct mlx4_eqe *eqe)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_slave_state *s_slave =
&priv->mfunc.master.slave_state[slave];
if (!s_slave->active) {
/*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
if (slave < 0 || slave > dev->persist->num_vfs ||
slave == dev->caps.function ||
!priv->mfunc.master.slave_state[slave].active)
return;
}
slave_event(dev, slave, eqe);
}

View File

@ -3095,6 +3095,12 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
if (!priv->mfunc.master.slave_state)
return -EINVAL;
/* check for slave valid, slave not PF, and slave active */
if (slave < 0 || slave > dev->persist->num_vfs ||
slave == dev->caps.function ||
!priv->mfunc.master.slave_state[slave].active)
return 0;
event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
/* Create the event only if the slave is registered */

View File

@ -4468,10 +4468,16 @@ static int rocker_port_master_changed(struct net_device *dev)
struct net_device *master = netdev_master_upper_dev_get(dev);
int err = 0;
/* There are currently three cases handled here:
* 1. Joining a bridge
* 2. Leaving a previously joined bridge
* 3. Other, e.g. being added to or removed from a bond or openvswitch,
* in which case nothing is done
*/
if (master && master->rtnl_link_ops &&
!strcmp(master->rtnl_link_ops->kind, "bridge"))
err = rocker_port_bridge_join(rocker_port, master);
else
else if (rocker_port_is_bridged(rocker_port))
err = rocker_port_bridge_leave(rocker_port);
return err;

View File

@ -114,7 +114,9 @@ unsigned int ipvlan_mac_hash(const unsigned char *addr);
rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb);
int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev);
void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr);
bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6);
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6);
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync);

View File

@ -81,19 +81,20 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
hash = (addr->atype == IPVL_IPV6) ?
ipvlan_get_v6_hash(&addr->ip6addr) :
ipvlan_get_v4_hash(&addr->ip4addr);
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
if (hlist_unhashed(&addr->hlnode))
hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
}
void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync)
{
hlist_del_rcu(&addr->hlnode);
hlist_del_init_rcu(&addr->hlnode);
if (sync)
synchronize_rcu();
}
bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
struct ipvl_port *port = ipvlan->port;
struct ipvl_addr *addr;
list_for_each_entry(addr, &ipvlan->addrs, anode) {
@ -101,12 +102,21 @@ bool ipvlan_addr_busy(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
(!is_v6 && addr->atype == IPVL_IPV4 &&
addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
return addr;
}
return NULL;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
{
struct ipvl_dev *ipvlan;
ASSERT_RTNL();
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
return true;
}
if (ipvlan_ht_addr_lookup(port, iaddr, is_v6))
return true;
return false;
}
@ -192,7 +202,8 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_PAUSE))
return;
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
rcu_read_lock();
list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
if (local && (ipvlan == in_dev))
continue;
@ -219,6 +230,7 @@ static void ipvlan_multicast_frame(struct ipvl_port *port, struct sk_buff *skb,
mcast_acct:
ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
}
rcu_read_unlock();
/* Locally generated? ...Forward a copy to the main-device as
* well. On the RX side we'll ignore it (wont give it to any

View File

@ -505,7 +505,7 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr, !dev->dismantle);
list_del_rcu(&addr->anode);
list_del(&addr->anode);
}
}
list_del_rcu(&ipvlan->pnode);
@ -607,7 +607,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv6=%pI6c addr for %s intf\n",
ip6_addr, ipvlan->dev->name);
@ -620,9 +620,13 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
addr->master = ipvlan;
memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
addr->atype = IPVL_IPV6;
list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv6cnt++;
ipvlan_ht_addr_add(ipvlan, addr);
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
if (netif_running(ipvlan->dev))
ipvlan_ht_addr_add(ipvlan, addr);
return 0;
}
@ -631,12 +635,12 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
{
struct ipvl_addr *addr;
addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
list_del_rcu(&addr->anode);
list_del(&addr->anode);
ipvlan->ipv6cnt--;
WARN_ON(ipvlan->ipv6cnt < 0);
kfree_rcu(addr, rcu);
@ -675,7 +679,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
@ -688,9 +692,13 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
addr->master = ipvlan;
memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
list_add_tail(&addr->anode, &ipvlan->addrs);
ipvlan->ipv4cnt++;
ipvlan_ht_addr_add(ipvlan, addr);
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
*/
if (netif_running(ipvlan->dev))
ipvlan_ht_addr_add(ipvlan, addr);
ipvlan_set_broadcast_mac_filter(ipvlan, true);
return 0;
@ -700,12 +708,12 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
struct ipvl_addr *addr;
addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
if (!addr)
return;
ipvlan_ht_addr_del(addr, true);
list_del_rcu(&addr->anode);
list_del(&addr->anode);
ipvlan->ipv4cnt--;
WARN_ON(ipvlan->ipv4cnt < 0);
if (!ipvlan->ipv4cnt)

View File

@ -188,6 +188,8 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
}
usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}

View File

@ -522,6 +522,7 @@ static const struct driver_info wwan_info = {
#define DELL_VENDOR_ID 0x413C
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@ -702,6 +703,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = 0,
},
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.

View File

@ -1172,17 +1172,17 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* return skb */
ctx->tx_curr_skb = NULL;
dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
/* keep private stats: framing overhead and number of NTBs */
ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload;
ctx->tx_ntbs++;
/* usbnet has already counted all the framing overhead.
/* usbnet will count all the framing overhead by default.
* Adjust the stats so that the tx_bytes counter show real
* payload data instead.
*/
dev->net->stats.tx_bytes -= skb_out->len - ctx->tx_curr_frame_payload;
usbnet_set_skb_tx_stats(skb_out, n,
ctx->tx_curr_frame_payload - skb_out->len);
return skb_out;

View File

@ -492,6 +492,7 @@ enum rtl8152_flags {
/* Define these values to match your device */
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@ -4037,6 +4038,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)},
{REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{}
};

View File

@ -144,6 +144,7 @@ static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
skb_put(skb, sizeof(padbytes));
}
usbnet_set_skb_tx_stats(skb, 1, 0);
return skb;
}

View File

@ -1188,8 +1188,7 @@ static void tx_complete (struct urb *urb)
struct usbnet *dev = entry->dev;
if (urb->status == 0) {
if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
dev->net->stats.tx_packets++;
dev->net->stats.tx_packets += entry->packets;
dev->net->stats.tx_bytes += entry->length;
} else {
dev->net->stats.tx_errors++;
@ -1347,7 +1346,19 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
} else
urb->transfer_flags |= URB_ZERO_PACKET;
}
entry->length = urb->transfer_buffer_length = length;
urb->transfer_buffer_length = length;
if (info->flags & FLAG_MULTI_PACKET) {
/* Driver has set number of packets and a length delta.
* Calculate the complete length and ensure that it's
* positive.
*/
entry->length += length;
if (WARN_ON_ONCE(entry->length <= 0))
entry->length = length;
} else {
usbnet_set_skb_tx_stats(skb, 1, length);
}
spin_lock_irqsave(&dev->txq.lock, flags);
retval = usb_autopm_get_interface_async(dev->intf);

View File

@ -219,12 +219,15 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_vif *avp = (void *)vif->drv_priv;
struct ath_buf *bf = avp->av_bcbuf;
struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon;
ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
avp->av_bslot);
tasklet_disable(&sc->bcon_tasklet);
cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
if (bf && bf->bf_mpdu) {
struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
@ -521,8 +524,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
}
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
if ((vif->type != NL80211_IFTYPE_AP) ||
(sc->nbcnvifs > 1)) {
if (vif->type != NL80211_IFTYPE_AP) {
ath_dbg(common, CONFIG,
"An AP interface is already present !\n");
return false;
@ -616,12 +618,14 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
* enabling/disabling SWBA.
*/
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (!bss_conf->enable_beacon &&
(sc->nbcnvifs <= 1)) {
cur_conf->enable_beacon = false;
} else if (bss_conf->enable_beacon) {
cur_conf->enable_beacon = true;
ath9k_cache_beacon_config(sc, ctx, bss_conf);
bool enabled = cur_conf->enable_beacon;
if (!bss_conf->enable_beacon) {
cur_conf->enable_beacon &= ~BIT(avp->av_bslot);
} else {
cur_conf->enable_beacon |= BIT(avp->av_bslot);
if (!enabled)
ath9k_cache_beacon_config(sc, ctx, bss_conf);
}
}

View File

@ -54,7 +54,7 @@ struct ath_beacon_config {
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
bool enable_beacon;
u8 enable_beacon;
bool ibss_creator;
u32 nexttbtt;
u32 intval;

View File

@ -424,7 +424,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
ah->tpc_enabled = true;
ah->tpc_enabled = false;
ah->ani_function = ATH9K_ANI_ALL;
if (!AR_SREV_9300_20_OR_LATER(ah))

View File

@ -126,7 +126,8 @@ void brcmf_feat_attach(struct brcmf_pub *drvr)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MCHAN, "mchan");
if (drvr->bus_if->wowl_supported)
brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl");
brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
if (drvr->bus_if->chip != BRCM_CC_43362_CHIP_ID)
brcmf_feat_iovar_int_set(ifp, BRCMF_FEAT_MBSS, "mbss", 0);
/* set chip related quirks */
switch (drvr->bus_if->chip) {

View File

@ -708,7 +708,6 @@ struct iwl_priv {
unsigned long reload_jiffies;
int reload_count;
bool ucode_loaded;
bool init_ucode_run; /* Don't run init uCode again */
u8 plcp_delta_threshold;

View File

@ -1114,16 +1114,17 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
if (vif)
scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
if (iwlagn_txfifo_flush(priv, scd_queues)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
if (drop) {
IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
scd_queues);
if (iwlagn_txfifo_flush(priv, scd_queues)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
}
IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues);
done:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");

View File

@ -418,9 +418,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
return 0;
if (priv->init_ucode_run)
return 0;
iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
calib_complete, ARRAY_SIZE(calib_complete),
iwlagn_wait_calib, priv);
@ -440,8 +437,6 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
*/
ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
UCODE_CALIB_TIMEOUT);
if (!ret)
priv->init_ucode_run = true;
goto out;

View File

@ -1257,6 +1257,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
op->name, err);
#endif
}
kfree(pieces);
return;
try_again:

View File

@ -1278,6 +1278,9 @@ static void rs_mac80211_tx_status(void *mvm_r,
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
return;
if (!ieee80211_is_data(hdr->frame_control) ||
info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
@ -2511,6 +2514,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_lq_sta *lq_sta = mvm_sta;
if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
/* if vif isn't initialized mvm doesn't know about
* this station, so don't do anything with the it
*/
sta = NULL;
mvm_sta = NULL;
}
/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
/* Treat uninitialized rate scaling data same as non-existing. */
@ -2827,6 +2838,9 @@ static void rs_rate_update(void *mvm_r,
(struct iwl_op_mode *)mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
return;
/* Stop any ongoing aggregations as rs starts off assuming no agg */
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
ieee80211_stop_tx_ba_session(sta, tid);
@ -3587,9 +3601,15 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
struct iwl_lq_sta *lq_sta = priv_sta;
struct iwl_mvm_sta *mvmsta;
mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
if (!mvmsta->vif)
return;
debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
lq_sta, &rs_sta_dbgfs_scale_table_ops);

Some files were not shown because too many files have changed in this diff Show More