mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
Merge branch 'fortglx/3.10/time' of git://git.linaro.org/people/jstultz/linux into timers/core
This commit is contained in:
commit
0ed2aef9b3
@ -3,10 +3,26 @@ ALPS Touchpad Protocol
|
||||
|
||||
Introduction
|
||||
------------
|
||||
Currently the ALPS touchpad driver supports five protocol versions in use by
|
||||
ALPS touchpads, called versions 1, 2, 3, 4 and 5.
|
||||
|
||||
Currently the ALPS touchpad driver supports four protocol versions in use by
|
||||
ALPS touchpads, called versions 1, 2, 3, and 4. Information about the various
|
||||
protocol versions is contained in the following sections.
|
||||
Since roughly mid-2010 several new ALPS touchpads have been released and
|
||||
integrated into a variety of laptops and netbooks. These new touchpads
|
||||
have enough behavior differences that the alps_model_data definition
|
||||
table, describing the properties of the different versions, is no longer
|
||||
adequate. The design choices were to re-define the alps_model_data
|
||||
table, with the risk of regression testing existing devices, or isolate
|
||||
the new devices outside of the alps_model_data table. The latter design
|
||||
choice was made. The new touchpad signatures are named: "Rushmore",
|
||||
"Pinnacle", and "Dolphin", which you will see in the alps.c code.
|
||||
For the purposes of this document, this group of ALPS touchpads will
|
||||
generically be called "new ALPS touchpads".
|
||||
|
||||
We experimented with probing the ACPI interface _HID (Hardware ID)/_CID
|
||||
(Compatibility ID) definition as a way to uniquely identify the
|
||||
different ALPS variants but there did not appear to be a 1:1 mapping.
|
||||
In fact, it appeared to be an m:n mapping between the _HID and actual
|
||||
hardware type.
|
||||
|
||||
Detection
|
||||
---------
|
||||
@ -20,9 +36,13 @@ If the E6 report is successful, the touchpad model is identified using the "E7
|
||||
report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
|
||||
matched against known models in the alps_model_data_array.
|
||||
|
||||
With protocol versions 3 and 4, the E7 report model signature is always
|
||||
73-02-64. To differentiate between these versions, the response from the
|
||||
"Enter Command Mode" sequence must be inspected as described below.
|
||||
For older touchpads supporting protocol versions 3 and 4, the E7 report
|
||||
model signature is always 73-02-64. To differentiate between these
|
||||
versions, the response from the "Enter Command Mode" sequence must be
|
||||
inspected as described below.
|
||||
|
||||
The new ALPS touchpads have an E7 signature of 73-03-50 or 73-03-0A but
|
||||
seem to be better differentiated by the EC Command Mode response.
|
||||
|
||||
Command Mode
|
||||
------------
|
||||
@ -47,6 +67,14 @@ address of the register being read, and the third contains the value of the
|
||||
register. Registers are written by writing the value one nibble at a time
|
||||
using the same encoding used for addresses.
|
||||
|
||||
For the new ALPS touchpads, the EC command is used to enter command
|
||||
mode. The response in the new ALPS touchpads is significantly different,
|
||||
and more important in determining the behavior. This code has been
|
||||
separated from the original alps_model_data table and put in the
|
||||
alps_identify function. For example, there seem to be two hardware init
|
||||
sequences for the "Dolphin" touchpads as determined by the second byte
|
||||
of the EC response.
|
||||
|
||||
Packet Format
|
||||
-------------
|
||||
|
||||
@ -187,3 +215,28 @@ There are several things worth noting here.
|
||||
well.
|
||||
|
||||
So far no v4 devices with tracksticks have been encountered.
|
||||
|
||||
ALPS Absolute Mode - Protocol Version 5
|
||||
---------------------------------------
|
||||
This is basically Protocol Version 3 but with different logic for packet
|
||||
decode. It uses the same alps_process_touchpad_packet_v3 call with a
|
||||
specialized decode_fields function pointer to correctly interpret the
|
||||
packets. This appears to only be used by the Dolphin devices.
|
||||
|
||||
For single-touch, the 6-byte packet format is:
|
||||
|
||||
byte 0: 1 1 0 0 1 0 0 0
|
||||
byte 1: 0 x6 x5 x4 x3 x2 x1 x0
|
||||
byte 2: 0 y6 y5 y4 y3 y2 y1 y0
|
||||
byte 3: 0 M R L 1 m r l
|
||||
byte 4: y10 y9 y8 y7 x10 x9 x8 x7
|
||||
byte 5: 0 z6 z5 z4 z3 z2 z1 z0
|
||||
|
||||
For mt, the format is:
|
||||
|
||||
byte 0: 1 1 1 n3 1 n2 n1 x24
|
||||
byte 1: 1 y7 y6 y5 y4 y3 y2 y1
|
||||
byte 2: ? x2 x1 y12 y11 y10 y9 y8
|
||||
byte 3: 0 x23 x22 x21 x20 x19 x18 x17
|
||||
byte 4: 0 x9 x8 x7 x6 x5 x4 x3
|
||||
byte 5: 0 x16 x15 x14 x13 x12 x11 x10
|
||||
|
@ -105,6 +105,83 @@ Copyright (C) 1999-2000 Maxim Krasnyansky <max_mk@yahoo.com>
|
||||
Proto [2 bytes]
|
||||
Raw protocol(IP, IPv6, etc) frame.
|
||||
|
||||
3.3 Multiqueue tuntap interface:
|
||||
|
||||
From version 3.8, Linux supports multiqueue tuntap which can uses multiple
|
||||
file descriptors (queues) to parallelize packets sending or receiving. The
|
||||
device allocation is the same as before, and if user wants to create multiple
|
||||
queues, TUNSETIFF with the same device name must be called many times with
|
||||
IFF_MULTI_QUEUE flag.
|
||||
|
||||
char *dev should be the name of the device, queues is the number of queues to
|
||||
be created, fds is used to store and return the file descriptors (queues)
|
||||
created to the caller. Each file descriptor were served as the interface of a
|
||||
queue which could be accessed by userspace.
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_tun.h>
|
||||
|
||||
int tun_alloc_mq(char *dev, int queues, int *fds)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
int fd, err, i;
|
||||
|
||||
if (!dev)
|
||||
return -1;
|
||||
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
/* Flags: IFF_TUN - TUN device (no Ethernet headers)
|
||||
* IFF_TAP - TAP device
|
||||
*
|
||||
* IFF_NO_PI - Do not provide packet information
|
||||
* IFF_MULTI_QUEUE - Create a queue of multiqueue device
|
||||
*/
|
||||
ifr.ifr_flags = IFF_TAP | IFF_NO_PI | IFF_MULTI_QUEUE;
|
||||
strcpy(ifr.ifr_name, dev);
|
||||
|
||||
for (i = 0; i < queues; i++) {
|
||||
if ((fd = open("/dev/net/tun", O_RDWR)) < 0)
|
||||
goto err;
|
||||
err = ioctl(fd, TUNSETIFF, (void *)&ifr);
|
||||
if (err) {
|
||||
close(fd);
|
||||
goto err;
|
||||
}
|
||||
fds[i] = fd;
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
for (--i; i >= 0; i--)
|
||||
close(fds[i]);
|
||||
return err;
|
||||
}
|
||||
|
||||
A new ioctl(TUNSETQUEUE) were introduced to enable or disable a queue. When
|
||||
calling it with IFF_DETACH_QUEUE flag, the queue were disabled. And when
|
||||
calling it with IFF_ATTACH_QUEUE flag, the queue were enabled. The queue were
|
||||
enabled by default after it was created through TUNSETIFF.
|
||||
|
||||
fd is the file descriptor (queue) that we want to enable or disable, when
|
||||
enable is true we enable it, otherwise we disable it
|
||||
|
||||
#include <linux/if.h>
|
||||
#include <linux/if_tun.h>
|
||||
|
||||
int tun_set_queue(int fd, int enable)
|
||||
{
|
||||
struct ifreq ifr;
|
||||
|
||||
memset(&ifr, 0, sizeof(ifr));
|
||||
|
||||
if (enable)
|
||||
ifr.ifr_flags = IFF_ATTACH_QUEUE;
|
||||
else
|
||||
ifr.ifr_flags = IFF_DETACH_QUEUE;
|
||||
|
||||
return ioctl(fd, TUNSETQUEUE, (void *)&ifr);
|
||||
}
|
||||
|
||||
Universal TUN/TAP device driver Frequently Asked Question.
|
||||
|
||||
1. What platforms are supported by TUN/TAP driver ?
|
||||
|
@ -1873,7 +1873,7 @@ feature:
|
||||
|
||||
status\input | 0 | 1 | else |
|
||||
--------------+------------+------------+------------+
|
||||
not allocated |(do nothing)| alloc+swap | EINVAL |
|
||||
not allocated |(do nothing)| alloc+swap |(do nothing)|
|
||||
--------------+------------+------------+------------+
|
||||
allocated | free | swap | clear |
|
||||
--------------+------------+------------+------------+
|
||||
|
@ -6412,6 +6412,8 @@ F: Documentation/networking/LICENSE.qla3xxx
|
||||
F: drivers/net/ethernet/qlogic/qla3xxx.*
|
||||
|
||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
|
||||
M: Shahed Shaikh <shahed.shaikh@qlogic.com>
|
||||
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
|
||||
M: Sony Chacko <sony.chacko@qlogic.com>
|
||||
M: linux-driver@qlogic.com
|
||||
|
@ -6,6 +6,7 @@ config ARCH_BCM
|
||||
select ARM_ERRATA_764369 if SMP
|
||||
select ARM_GIC
|
||||
select CPU_V7
|
||||
select CLKSRC_OF
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_TIME
|
||||
select GPIO_BCM
|
||||
|
@ -16,14 +16,11 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
static void timer_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
static void __init board_init(void)
|
||||
{
|
||||
@ -35,7 +32,7 @@ static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, };
|
||||
|
||||
DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor")
|
||||
.init_irq = irqchip_init,
|
||||
.init_time = timer_init,
|
||||
.init_time = clocksource_of_init,
|
||||
.init_machine = board_init,
|
||||
.dt_compat = bcm11351_dt_compat,
|
||||
MACHINE_END
|
||||
|
@ -12,6 +12,7 @@
|
||||
#ifndef _ASM_S390_CPU_MF_H
|
||||
#define _ASM_S390_CPU_MF_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <asm/facility.h>
|
||||
|
||||
#define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
|
||||
|
@ -37,7 +37,7 @@ extern int console_write_chan(struct chan *chan, const char *buf,
|
||||
extern int console_open_chan(struct line *line, struct console *co);
|
||||
extern void deactivate_chan(struct chan *chan, int irq);
|
||||
extern void reactivate_chan(struct chan *chan, int irq);
|
||||
extern void chan_enable_winch(struct chan *chan, struct tty_struct *tty);
|
||||
extern void chan_enable_winch(struct chan *chan, struct tty_port *port);
|
||||
extern int enable_chan(struct line *line);
|
||||
extern void close_chan(struct line *line);
|
||||
extern int chan_window_size(struct line *line,
|
||||
|
@ -122,10 +122,10 @@ static int open_chan(struct list_head *chans)
|
||||
return err;
|
||||
}
|
||||
|
||||
void chan_enable_winch(struct chan *chan, struct tty_struct *tty)
|
||||
void chan_enable_winch(struct chan *chan, struct tty_port *port)
|
||||
{
|
||||
if (chan && chan->primary && chan->ops->winch)
|
||||
register_winch(chan->fd, tty);
|
||||
register_winch(chan->fd, port);
|
||||
}
|
||||
|
||||
static void line_timer_cb(struct work_struct *work)
|
||||
|
@ -216,7 +216,7 @@ static int winch_thread(void *arg)
|
||||
}
|
||||
}
|
||||
|
||||
static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
|
||||
static int winch_tramp(int fd, struct tty_port *port, int *fd_out,
|
||||
unsigned long *stack_out)
|
||||
{
|
||||
struct winch_data data;
|
||||
@ -271,7 +271,7 @@ static int winch_tramp(int fd, struct tty_struct *tty, int *fd_out,
|
||||
return err;
|
||||
}
|
||||
|
||||
void register_winch(int fd, struct tty_struct *tty)
|
||||
void register_winch(int fd, struct tty_port *port)
|
||||
{
|
||||
unsigned long stack;
|
||||
int pid, thread, count, thread_fd = -1;
|
||||
@ -281,17 +281,17 @@ void register_winch(int fd, struct tty_struct *tty)
|
||||
return;
|
||||
|
||||
pid = tcgetpgrp(fd);
|
||||
if (is_skas_winch(pid, fd, tty)) {
|
||||
register_winch_irq(-1, fd, -1, tty, 0);
|
||||
if (is_skas_winch(pid, fd, port)) {
|
||||
register_winch_irq(-1, fd, -1, port, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (pid == -1) {
|
||||
thread = winch_tramp(fd, tty, &thread_fd, &stack);
|
||||
thread = winch_tramp(fd, port, &thread_fd, &stack);
|
||||
if (thread < 0)
|
||||
return;
|
||||
|
||||
register_winch_irq(thread_fd, fd, thread, tty, stack);
|
||||
register_winch_irq(thread_fd, fd, thread, port, stack);
|
||||
|
||||
count = write(thread_fd, &c, sizeof(c));
|
||||
if (count != sizeof(c))
|
||||
|
@ -38,10 +38,10 @@ extern int generic_window_size(int fd, void *unused, unsigned short *rows_out,
|
||||
unsigned short *cols_out);
|
||||
extern void generic_free(void *data);
|
||||
|
||||
struct tty_struct;
|
||||
extern void register_winch(int fd, struct tty_struct *tty);
|
||||
struct tty_port;
|
||||
extern void register_winch(int fd, struct tty_port *port);
|
||||
extern void register_winch_irq(int fd, int tty_fd, int pid,
|
||||
struct tty_struct *tty, unsigned long stack);
|
||||
struct tty_port *port, unsigned long stack);
|
||||
|
||||
#define __channel_help(fn, prefix) \
|
||||
__uml_help(fn, prefix "[0-9]*=<channel description>\n" \
|
||||
|
@ -305,7 +305,7 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
|
||||
return ret;
|
||||
|
||||
if (!line->sigio) {
|
||||
chan_enable_winch(line->chan_out, tty);
|
||||
chan_enable_winch(line->chan_out, port);
|
||||
line->sigio = 1;
|
||||
}
|
||||
|
||||
@ -315,8 +315,22 @@ static int line_activate(struct tty_port *port, struct tty_struct *tty)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unregister_winch(struct tty_struct *tty);
|
||||
|
||||
static void line_destruct(struct tty_port *port)
|
||||
{
|
||||
struct tty_struct *tty = tty_port_tty_get(port);
|
||||
struct line *line = tty->driver_data;
|
||||
|
||||
if (line->sigio) {
|
||||
unregister_winch(tty);
|
||||
line->sigio = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct tty_port_operations line_port_ops = {
|
||||
.activate = line_activate,
|
||||
.destruct = line_destruct,
|
||||
};
|
||||
|
||||
int line_open(struct tty_struct *tty, struct file *filp)
|
||||
@ -340,18 +354,6 @@ int line_install(struct tty_driver *driver, struct tty_struct *tty,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void unregister_winch(struct tty_struct *tty);
|
||||
|
||||
void line_cleanup(struct tty_struct *tty)
|
||||
{
|
||||
struct line *line = tty->driver_data;
|
||||
|
||||
if (line->sigio) {
|
||||
unregister_winch(tty);
|
||||
line->sigio = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void line_close(struct tty_struct *tty, struct file * filp)
|
||||
{
|
||||
struct line *line = tty->driver_data;
|
||||
@ -601,7 +603,7 @@ struct winch {
|
||||
int fd;
|
||||
int tty_fd;
|
||||
int pid;
|
||||
struct tty_struct *tty;
|
||||
struct tty_port *port;
|
||||
unsigned long stack;
|
||||
struct work_struct work;
|
||||
};
|
||||
@ -655,7 +657,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
tty = winch->tty;
|
||||
tty = tty_port_tty_get(winch->port);
|
||||
if (tty != NULL) {
|
||||
line = tty->driver_data;
|
||||
if (line != NULL) {
|
||||
@ -663,6 +665,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
|
||||
&tty->winsize.ws_col);
|
||||
kill_pgrp(tty->pgrp, SIGWINCH, 1);
|
||||
}
|
||||
tty_kref_put(tty);
|
||||
}
|
||||
out:
|
||||
if (winch->fd != -1)
|
||||
@ -670,7 +673,7 @@ static irqreturn_t winch_interrupt(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
|
||||
void register_winch_irq(int fd, int tty_fd, int pid, struct tty_port *port,
|
||||
unsigned long stack)
|
||||
{
|
||||
struct winch *winch;
|
||||
@ -685,7 +688,7 @@ void register_winch_irq(int fd, int tty_fd, int pid, struct tty_struct *tty,
|
||||
.fd = fd,
|
||||
.tty_fd = tty_fd,
|
||||
.pid = pid,
|
||||
.tty = tty,
|
||||
.port = port,
|
||||
.stack = stack });
|
||||
|
||||
if (um_request_irq(WINCH_IRQ, fd, IRQ_READ, winch_interrupt,
|
||||
@ -714,15 +717,18 @@ static void unregister_winch(struct tty_struct *tty)
|
||||
{
|
||||
struct list_head *ele, *next;
|
||||
struct winch *winch;
|
||||
struct tty_struct *wtty;
|
||||
|
||||
spin_lock(&winch_handler_lock);
|
||||
|
||||
list_for_each_safe(ele, next, &winch_handlers) {
|
||||
winch = list_entry(ele, struct winch, list);
|
||||
if (winch->tty == tty) {
|
||||
wtty = tty_port_tty_get(winch->port);
|
||||
if (wtty == tty) {
|
||||
free_winch(winch);
|
||||
break;
|
||||
}
|
||||
tty_kref_put(wtty);
|
||||
}
|
||||
spin_unlock(&winch_handler_lock);
|
||||
}
|
||||
|
@ -218,6 +218,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
spin_lock_irqsave(&lp->lock, flags);
|
||||
|
||||
len = (*lp->write)(lp->fd, skb, lp);
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
if (len == skb->len) {
|
||||
dev->stats.tx_packets++;
|
||||
@ -281,6 +282,7 @@ static void uml_net_get_drvinfo(struct net_device *dev,
|
||||
static const struct ethtool_ops uml_net_ethtool_ops = {
|
||||
.get_drvinfo = uml_net_get_drvinfo,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
};
|
||||
|
||||
static void uml_net_user_timer_expire(unsigned long _conn)
|
||||
|
@ -105,7 +105,6 @@ static const struct tty_operations ssl_ops = {
|
||||
.throttle = line_throttle,
|
||||
.unthrottle = line_unthrottle,
|
||||
.install = ssl_install,
|
||||
.cleanup = line_cleanup,
|
||||
.hangup = line_hangup,
|
||||
};
|
||||
|
||||
|
@ -110,7 +110,6 @@ static const struct tty_operations console_ops = {
|
||||
.set_termios = line_set_termios,
|
||||
.throttle = line_throttle,
|
||||
.unthrottle = line_unthrottle,
|
||||
.cleanup = line_cleanup,
|
||||
.hangup = line_hangup,
|
||||
};
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <sysdep/mcontext.h>
|
||||
#include "internal.h"
|
||||
|
||||
void (*sig_info[NSIG])(int, siginfo_t *, struct uml_pt_regs *) = {
|
||||
void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
|
||||
[SIGTRAP] = relay_signal,
|
||||
[SIGFPE] = relay_signal,
|
||||
[SIGILL] = relay_signal,
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/resource.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <init.h>
|
||||
#include <os.h>
|
||||
|
@ -120,6 +120,7 @@ config X86
|
||||
select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
|
||||
select OLD_SIGACTION if X86_32
|
||||
select COMPAT_OLD_SIGACTION if IA32_EMULATION
|
||||
select RTC_LIB
|
||||
|
||||
config INSTRUCTION_DECODER
|
||||
def_bool y
|
||||
|
@ -100,6 +100,7 @@
|
||||
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
|
||||
#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
|
||||
|
@ -96,6 +96,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
|
||||
sched_clock_stable = 1;
|
||||
}
|
||||
|
||||
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
|
||||
if (c->x86 == 6) {
|
||||
switch (c->x86_model) {
|
||||
case 0x27: /* Penwell */
|
||||
case 0x35: /* Cloverview */
|
||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* There is a known erratum on Pentium III and Core Solo
|
||||
* and Core Duo CPUs.
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/mrst.h>
|
||||
#include <asm/rtc.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
@ -36,70 +37,24 @@ EXPORT_SYMBOL(rtc_lock);
|
||||
* nowtime is written into the registers of the CMOS clock, it will
|
||||
* jump to the next second precisely 500 ms later. Check the Motorola
|
||||
* MC146818A or Dallas DS12887 data sheet for details.
|
||||
*
|
||||
* BUG: This routine does not handle hour overflow properly; it just
|
||||
* sets the minutes. Usually you'll only notice that after reboot!
|
||||
*/
|
||||
int mach_set_rtc_mmss(unsigned long nowtime)
|
||||
{
|
||||
int real_seconds, real_minutes, cmos_minutes;
|
||||
unsigned char save_control, save_freq_select;
|
||||
unsigned long flags;
|
||||
struct rtc_time tm;
|
||||
int retval = 0;
|
||||
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
|
||||
/* tell the clock it's being set */
|
||||
save_control = CMOS_READ(RTC_CONTROL);
|
||||
CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
|
||||
|
||||
/* stop and reset prescaler */
|
||||
save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
|
||||
CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
|
||||
|
||||
cmos_minutes = CMOS_READ(RTC_MINUTES);
|
||||
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
|
||||
cmos_minutes = bcd2bin(cmos_minutes);
|
||||
|
||||
/*
|
||||
* since we're only adjusting minutes and seconds,
|
||||
* don't interfere with hour overflow. This avoids
|
||||
* messing with unknown time zones but requires your
|
||||
* RTC not to be off by more than 15 minutes
|
||||
*/
|
||||
real_seconds = nowtime % 60;
|
||||
real_minutes = nowtime / 60;
|
||||
/* correct for half hour time zone */
|
||||
if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
|
||||
real_minutes += 30;
|
||||
real_minutes %= 60;
|
||||
|
||||
if (abs(real_minutes - cmos_minutes) < 30) {
|
||||
if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
|
||||
real_seconds = bin2bcd(real_seconds);
|
||||
real_minutes = bin2bcd(real_minutes);
|
||||
}
|
||||
CMOS_WRITE(real_seconds, RTC_SECONDS);
|
||||
CMOS_WRITE(real_minutes, RTC_MINUTES);
|
||||
rtc_time_to_tm(nowtime, &tm);
|
||||
if (!rtc_valid_tm(&tm)) {
|
||||
retval = set_rtc_time(&tm);
|
||||
if (retval)
|
||||
printk(KERN_ERR "%s: RTC write failed with error %d\n",
|
||||
__FUNCTION__, retval);
|
||||
} else {
|
||||
printk_once(KERN_NOTICE
|
||||
"set_rtc_mmss: can't update from %d to %d\n",
|
||||
cmos_minutes, real_minutes);
|
||||
retval = -1;
|
||||
printk(KERN_ERR
|
||||
"%s: Invalid RTC value: write of %lx to RTC failed\n",
|
||||
__FUNCTION__, nowtime);
|
||||
retval = -EINVAL;
|
||||
}
|
||||
|
||||
/* The following flags have to be released exactly in this order,
|
||||
* otherwise the DS12887 (popular MC146818A clone with integrated
|
||||
* battery and quartz) will not reset the oscillator and will not
|
||||
* update precisely 500 ms later. You won't find this mentioned in
|
||||
* the Dallas Semiconductor data sheets, but who believes data
|
||||
* sheets anyway ... -- Markus Kuhn
|
||||
*/
|
||||
CMOS_WRITE(save_control, RTC_CONTROL);
|
||||
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
|
||||
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -768,6 +768,7 @@ static cycle_t read_tsc(struct clocksource *cs)
|
||||
|
||||
static void resume_tsc(struct clocksource *cs)
|
||||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||
clocksource_tsc.cycle_last = 0;
|
||||
}
|
||||
|
||||
@ -939,6 +940,9 @@ static int __init init_tsc_clocksource(void)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
/*
|
||||
* Trust the results of the earlier calibration on systems
|
||||
* exporting a reliable TSC.
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/rtc.h>
|
||||
|
||||
#define EFI_DEBUG 1
|
||||
|
||||
@ -258,10 +259,10 @@ static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
|
||||
|
||||
int efi_set_rtc_mmss(unsigned long nowtime)
|
||||
{
|
||||
int real_seconds, real_minutes;
|
||||
efi_status_t status;
|
||||
efi_time_t eft;
|
||||
efi_time_cap_t cap;
|
||||
struct rtc_time tm;
|
||||
|
||||
status = efi.get_time(&eft, &cap);
|
||||
if (status != EFI_SUCCESS) {
|
||||
@ -269,13 +270,20 @@ int efi_set_rtc_mmss(unsigned long nowtime)
|
||||
return -1;
|
||||
}
|
||||
|
||||
real_seconds = nowtime % 60;
|
||||
real_minutes = nowtime / 60;
|
||||
if (((abs(real_minutes - eft.minute) + 15)/30) & 1)
|
||||
real_minutes += 30;
|
||||
real_minutes %= 60;
|
||||
eft.minute = real_minutes;
|
||||
eft.second = real_seconds;
|
||||
rtc_time_to_tm(nowtime, &tm);
|
||||
if (!rtc_valid_tm(&tm)) {
|
||||
eft.year = tm.tm_year + 1900;
|
||||
eft.month = tm.tm_mon + 1;
|
||||
eft.day = tm.tm_mday;
|
||||
eft.minute = tm.tm_min;
|
||||
eft.second = tm.tm_sec;
|
||||
eft.nanosecond = 0;
|
||||
} else {
|
||||
printk(KERN_ERR
|
||||
"%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n",
|
||||
__FUNCTION__, nowtime);
|
||||
return -1;
|
||||
}
|
||||
|
||||
status = efi.set_time(&eft);
|
||||
if (status != EFI_SUCCESS) {
|
||||
|
@ -85,27 +85,35 @@ unsigned long vrtc_get_time(void)
|
||||
return mktime(year, mon, mday, hour, min, sec);
|
||||
}
|
||||
|
||||
/* Only care about the minutes and seconds */
|
||||
int vrtc_set_mmss(unsigned long nowtime)
|
||||
{
|
||||
int real_sec, real_min;
|
||||
unsigned long flags;
|
||||
int vrtc_min;
|
||||
struct rtc_time tm;
|
||||
int year;
|
||||
int retval = 0;
|
||||
|
||||
rtc_time_to_tm(nowtime, &tm);
|
||||
if (!rtc_valid_tm(&tm) && tm.tm_year >= 72) {
|
||||
/*
|
||||
* tm.year is the number of years since 1900, and the
|
||||
* vrtc need the years since 1972.
|
||||
*/
|
||||
year = tm.tm_year - 72;
|
||||
spin_lock_irqsave(&rtc_lock, flags);
|
||||
vrtc_min = vrtc_cmos_read(RTC_MINUTES);
|
||||
|
||||
real_sec = nowtime % 60;
|
||||
real_min = nowtime / 60;
|
||||
if (((abs(real_min - vrtc_min) + 15)/30) & 1)
|
||||
real_min += 30;
|
||||
real_min %= 60;
|
||||
|
||||
vrtc_cmos_write(real_sec, RTC_SECONDS);
|
||||
vrtc_cmos_write(real_min, RTC_MINUTES);
|
||||
vrtc_cmos_write(year, RTC_YEAR);
|
||||
vrtc_cmos_write(tm.tm_mon, RTC_MONTH);
|
||||
vrtc_cmos_write(tm.tm_mday, RTC_DAY_OF_MONTH);
|
||||
vrtc_cmos_write(tm.tm_hour, RTC_HOURS);
|
||||
vrtc_cmos_write(tm.tm_min, RTC_MINUTES);
|
||||
vrtc_cmos_write(tm.tm_sec, RTC_SECONDS);
|
||||
spin_unlock_irqrestore(&rtc_lock, flags);
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
printk(KERN_ERR
|
||||
"%s: Invalid vRTC value: write of %lx to vRTC failed\n",
|
||||
__FUNCTION__, nowtime);
|
||||
retval = -EINVAL;
|
||||
}
|
||||
return retval;
|
||||
}
|
||||
|
||||
void __init mrst_rtc_init(void)
|
||||
|
@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
|
||||
obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
|
||||
obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
|
||||
obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
|
||||
obj-$(CONFIG_ARCH_BCM) += bcm_kona_timer.o
|
||||
|
||||
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
|
||||
obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
|
||||
|
211
drivers/clocksource/bcm_kona_timer.c
Normal file
211
drivers/clocksource/bcm_kona_timer.c
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Broadcom Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation version 2.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
|
||||
#define KONA_GPTIMER_STCS_OFFSET 0x00000000
|
||||
#define KONA_GPTIMER_STCLO_OFFSET 0x00000004
|
||||
#define KONA_GPTIMER_STCHI_OFFSET 0x00000008
|
||||
#define KONA_GPTIMER_STCM0_OFFSET 0x0000000C
|
||||
|
||||
#define KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT 0
|
||||
#define KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT 4
|
||||
|
||||
struct kona_bcm_timers {
|
||||
int tmr_irq;
|
||||
void __iomem *tmr_regs;
|
||||
};
|
||||
|
||||
static struct kona_bcm_timers timers;
|
||||
|
||||
static u32 arch_timer_rate;
|
||||
|
||||
/*
|
||||
* We use the peripheral timers for system tick, the cpu global timer for
|
||||
* profile tick
|
||||
*/
|
||||
static void kona_timer_disable_and_clear(void __iomem *base)
|
||||
{
|
||||
uint32_t reg;
|
||||
|
||||
/*
|
||||
* clear and disable interrupts
|
||||
* We are using compare/match register 0 for our system interrupts
|
||||
*/
|
||||
reg = readl(base + KONA_GPTIMER_STCS_OFFSET);
|
||||
|
||||
/* Clear compare (0) interrupt */
|
||||
reg |= 1 << KONA_GPTIMER_STCS_TIMER_MATCH_SHIFT;
|
||||
/* disable compare */
|
||||
reg &= ~(1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
|
||||
|
||||
writel(reg, base + KONA_GPTIMER_STCS_OFFSET);
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
|
||||
{
|
||||
void __iomem *base = IOMEM(timer_base);
|
||||
int loop_limit = 4;
|
||||
|
||||
/*
|
||||
* Read 64-bit free running counter
|
||||
* 1. Read hi-word
|
||||
* 2. Read low-word
|
||||
* 3. Read hi-word again
|
||||
* 4.1
|
||||
* if new hi-word is not equal to previously read hi-word, then
|
||||
* start from #1
|
||||
* 4.2
|
||||
* if new hi-word is equal to previously read hi-word then stop.
|
||||
*/
|
||||
|
||||
while (--loop_limit) {
|
||||
*msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
|
||||
*lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
|
||||
if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
|
||||
break;
|
||||
}
|
||||
if (!loop_limit) {
|
||||
pr_err("bcm_kona_timer: getting counter failed.\n");
|
||||
pr_err(" Timer will be impacted\n");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct of_device_id bcm_timer_ids[] __initconst = {
|
||||
{.compatible = "bcm,kona-timer"},
|
||||
{},
|
||||
};
|
||||
|
||||
static void __init kona_timers_init(void)
|
||||
{
|
||||
struct device_node *node;
|
||||
u32 freq;
|
||||
|
||||
node = of_find_matching_node(NULL, bcm_timer_ids);
|
||||
|
||||
if (!node)
|
||||
panic("No timer");
|
||||
|
||||
if (!of_property_read_u32(node, "clock-frequency", &freq))
|
||||
arch_timer_rate = freq;
|
||||
else
|
||||
panic("clock-frequency not set in the .dts file");
|
||||
|
||||
/* Setup IRQ numbers */
|
||||
timers.tmr_irq = irq_of_parse_and_map(node, 0);
|
||||
|
||||
/* Setup IO addresses */
|
||||
timers.tmr_regs = of_iomap(node, 0);
|
||||
|
||||
kona_timer_disable_and_clear(timers.tmr_regs);
|
||||
}
|
||||
|
||||
static int kona_timer_set_next_event(unsigned long clc,
|
||||
struct clock_event_device *unused)
|
||||
{
|
||||
/*
|
||||
* timer (0) is disabled by the timer interrupt already
|
||||
* so, here we reload the next event value and re-enable
|
||||
* the timer.
|
||||
*
|
||||
* This way, we are potentially losing the time between
|
||||
* timer-interrupt->set_next_event. CPU local timers, when
|
||||
* they come in should get rid of skew.
|
||||
*/
|
||||
|
||||
uint32_t lsw, msw;
|
||||
uint32_t reg;
|
||||
|
||||
kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
|
||||
|
||||
/* Load the "next" event tick value */
|
||||
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
|
||||
|
||||
/* Enable compare */
|
||||
reg = readl(timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
|
||||
reg |= (1 << KONA_GPTIMER_STCS_COMPARE_ENABLE_SHIFT);
|
||||
writel(reg, timers.tmr_regs + KONA_GPTIMER_STCS_OFFSET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kona_timer_set_mode(enum clock_event_mode mode,
|
||||
struct clock_event_device *unused)
|
||||
{
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
/* by default mode is one shot don't do any thing */
|
||||
break;
|
||||
case CLOCK_EVT_MODE_UNUSED:
|
||||
case CLOCK_EVT_MODE_SHUTDOWN:
|
||||
default:
|
||||
kona_timer_disable_and_clear(timers.tmr_regs);
|
||||
}
|
||||
}
|
||||
|
||||
static struct clock_event_device kona_clockevent_timer = {
|
||||
.name = "timer 1",
|
||||
.features = CLOCK_EVT_FEAT_ONESHOT,
|
||||
.set_next_event = kona_timer_set_next_event,
|
||||
.set_mode = kona_timer_set_mode
|
||||
};
|
||||
|
||||
static void __init kona_timer_clockevents_init(void)
|
||||
{
|
||||
kona_clockevent_timer.cpumask = cpumask_of(0);
|
||||
clockevents_config_and_register(&kona_clockevent_timer,
|
||||
arch_timer_rate, 6, 0xffffffff);
|
||||
}
|
||||
|
||||
static irqreturn_t kona_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct clock_event_device *evt = &kona_clockevent_timer;
|
||||
|
||||
kona_timer_disable_and_clear(timers.tmr_regs);
|
||||
evt->event_handler(evt);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction kona_timer_irq = {
|
||||
.name = "Kona Timer Tick",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = kona_timer_interrupt,
|
||||
};
|
||||
|
||||
static void __init kona_timer_init(void)
|
||||
{
|
||||
kona_timers_init();
|
||||
kona_timer_clockevents_init();
|
||||
setup_irq(timers.tmr_irq, &kona_timer_irq);
|
||||
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
|
||||
}
|
||||
|
||||
CLOCKSOURCE_OF_DECLARE(bcm_kona, "bcm,kona-timer",
|
||||
kona_timer_init);
|
@ -70,8 +70,6 @@
|
||||
#define TC3589x_EVT_INT_CLR 0x2
|
||||
#define TC3589x_KBD_INT_CLR 0x1
|
||||
|
||||
#define TC3589x_KBD_KEYMAP_SIZE 64
|
||||
|
||||
/**
|
||||
* struct tc_keypad - data structure used by keypad driver
|
||||
* @tc3589x: pointer to tc35893
|
||||
@ -88,7 +86,7 @@ struct tc_keypad {
|
||||
const struct tc3589x_keypad_platform_data *board;
|
||||
unsigned int krow;
|
||||
unsigned int kcol;
|
||||
unsigned short keymap[TC3589x_KBD_KEYMAP_SIZE];
|
||||
unsigned short *keymap;
|
||||
bool keypad_stopped;
|
||||
};
|
||||
|
||||
@ -338,12 +336,14 @@ static int tc3589x_keypad_probe(struct platform_device *pdev)
|
||||
|
||||
error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
|
||||
TC3589x_MAX_KPROW, TC3589x_MAX_KPCOL,
|
||||
keypad->keymap, input);
|
||||
NULL, input);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Failed to build keymap\n");
|
||||
goto err_free_mem;
|
||||
}
|
||||
|
||||
keypad->keymap = input->keycode;
|
||||
|
||||
input_set_capability(input, EV_MSC, MSC_SCAN);
|
||||
if (!plat->no_autorepeat)
|
||||
__set_bit(EV_REP, input->evbit);
|
||||
|
@ -490,6 +490,29 @@ static void alps_decode_rushmore(struct alps_fields *f, unsigned char *p)
|
||||
f->y_map |= (p[5] & 0x20) << 6;
|
||||
}
|
||||
|
||||
static void alps_decode_dolphin(struct alps_fields *f, unsigned char *p)
|
||||
{
|
||||
f->first_mp = !!(p[0] & 0x02);
|
||||
f->is_mp = !!(p[0] & 0x20);
|
||||
|
||||
f->fingers = ((p[0] & 0x6) >> 1 |
|
||||
(p[0] & 0x10) >> 2);
|
||||
f->x_map = ((p[2] & 0x60) >> 5) |
|
||||
((p[4] & 0x7f) << 2) |
|
||||
((p[5] & 0x7f) << 9) |
|
||||
((p[3] & 0x07) << 16) |
|
||||
((p[3] & 0x70) << 15) |
|
||||
((p[0] & 0x01) << 22);
|
||||
f->y_map = (p[1] & 0x7f) |
|
||||
((p[2] & 0x1f) << 7);
|
||||
|
||||
f->x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7));
|
||||
f->y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3));
|
||||
f->z = (p[0] & 4) ? 0 : p[5] & 0x7f;
|
||||
|
||||
alps_decode_buttons_v3(f, p);
|
||||
}
|
||||
|
||||
static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
|
||||
{
|
||||
struct alps_data *priv = psmouse->private;
|
||||
@ -874,7 +897,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
|
||||
}
|
||||
|
||||
/* Bytes 2 - pktsize should have 0 in the highest bit */
|
||||
if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
|
||||
if (priv->proto_version != ALPS_PROTO_V5 &&
|
||||
psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
|
||||
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
|
||||
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
|
||||
psmouse->pktcnt - 1,
|
||||
@ -994,8 +1018,7 @@ static int alps_rpt_cmd(struct psmouse *psmouse, int init_command,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int alps_enter_command_mode(struct psmouse *psmouse,
|
||||
unsigned char *resp)
|
||||
static int alps_enter_command_mode(struct psmouse *psmouse)
|
||||
{
|
||||
unsigned char param[4];
|
||||
|
||||
@ -1004,14 +1027,12 @@ static int alps_enter_command_mode(struct psmouse *psmouse,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) {
|
||||
if ((param[0] != 0x88 || (param[1] != 0x07 && param[1] != 0x08)) &&
|
||||
param[0] != 0x73) {
|
||||
psmouse_dbg(psmouse,
|
||||
"unknown response while entering command mode\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (resp)
|
||||
*resp = param[2];
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1176,7 +1197,7 @@ static int alps_passthrough_mode_v3(struct psmouse *psmouse,
|
||||
{
|
||||
int reg_val, ret = -1;
|
||||
|
||||
if (alps_enter_command_mode(psmouse, NULL))
|
||||
if (alps_enter_command_mode(psmouse))
|
||||
return -1;
|
||||
|
||||
reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008);
|
||||
@ -1216,7 +1237,7 @@ static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base)
|
||||
{
|
||||
int ret = -EIO, reg_val;
|
||||
|
||||
if (alps_enter_command_mode(psmouse, NULL))
|
||||
if (alps_enter_command_mode(psmouse))
|
||||
goto error;
|
||||
|
||||
reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08);
|
||||
@ -1279,7 +1300,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
|
||||
* supported by this driver. If bit 1 isn't set the packet
|
||||
* format is different.
|
||||
*/
|
||||
if (alps_enter_command_mode(psmouse, NULL) ||
|
||||
if (alps_enter_command_mode(psmouse) ||
|
||||
alps_command_mode_write_reg(psmouse,
|
||||
reg_base + 0x08, 0x82) ||
|
||||
alps_exit_command_mode(psmouse))
|
||||
@ -1306,7 +1327,7 @@ static int alps_hw_init_v3(struct psmouse *psmouse)
|
||||
alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO)
|
||||
goto error;
|
||||
|
||||
if (alps_enter_command_mode(psmouse, NULL) ||
|
||||
if (alps_enter_command_mode(psmouse) ||
|
||||
alps_absolute_mode_v3(psmouse)) {
|
||||
psmouse_err(psmouse, "Failed to enter absolute mode\n");
|
||||
goto error;
|
||||
@ -1381,7 +1402,7 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
|
||||
priv->flags &= ~ALPS_DUALPOINT;
|
||||
}
|
||||
|
||||
if (alps_enter_command_mode(psmouse, NULL) ||
|
||||
if (alps_enter_command_mode(psmouse) ||
|
||||
alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 ||
|
||||
alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00))
|
||||
goto error;
|
||||
@ -1431,7 +1452,7 @@ static int alps_hw_init_v4(struct psmouse *psmouse)
|
||||
struct ps2dev *ps2dev = &psmouse->ps2dev;
|
||||
unsigned char param[4];
|
||||
|
||||
if (alps_enter_command_mode(psmouse, NULL))
|
||||
if (alps_enter_command_mode(psmouse))
|
||||
goto error;
|
||||
|
||||
if (alps_absolute_mode_v4(psmouse)) {
|
||||
@ -1499,6 +1520,23 @@ error:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int alps_hw_init_dolphin_v1(struct psmouse *psmouse)
|
||||
{
|
||||
struct ps2dev *ps2dev = &psmouse->ps2dev;
|
||||
unsigned char param[2];
|
||||
|
||||
/* This is dolphin "v1" as empirically defined by florin9doi */
|
||||
param[0] = 0x64;
|
||||
param[1] = 0x28;
|
||||
|
||||
if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) ||
|
||||
ps2_command(ps2dev, ¶m[0], PSMOUSE_CMD_SETRATE) ||
|
||||
ps2_command(ps2dev, ¶m[1], PSMOUSE_CMD_SETRATE))
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void alps_set_defaults(struct alps_data *priv)
|
||||
{
|
||||
priv->byte0 = 0x8f;
|
||||
@ -1532,6 +1570,21 @@ static void alps_set_defaults(struct alps_data *priv)
|
||||
priv->nibble_commands = alps_v4_nibble_commands;
|
||||
priv->addr_command = PSMOUSE_CMD_DISABLE;
|
||||
break;
|
||||
case ALPS_PROTO_V5:
|
||||
priv->hw_init = alps_hw_init_dolphin_v1;
|
||||
priv->process_packet = alps_process_packet_v3;
|
||||
priv->decode_fields = alps_decode_dolphin;
|
||||
priv->set_abs_params = alps_set_abs_params_mt;
|
||||
priv->nibble_commands = alps_v3_nibble_commands;
|
||||
priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
|
||||
priv->byte0 = 0xc8;
|
||||
priv->mask0 = 0xc8;
|
||||
priv->flags = 0;
|
||||
priv->x_max = 1360;
|
||||
priv->y_max = 660;
|
||||
priv->x_bits = 23;
|
||||
priv->y_bits = 12;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1591,6 +1644,12 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
|
||||
return -EIO;
|
||||
|
||||
if (alps_match_table(psmouse, priv, e7, ec) == 0) {
|
||||
return 0;
|
||||
} else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
|
||||
ec[0] == 0x73 && ec[1] == 0x01) {
|
||||
priv->proto_version = ALPS_PROTO_V5;
|
||||
alps_set_defaults(priv);
|
||||
|
||||
return 0;
|
||||
} else if (ec[0] == 0x88 && ec[1] == 0x08) {
|
||||
priv->proto_version = ALPS_PROTO_V3;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define ALPS_PROTO_V2 2
|
||||
#define ALPS_PROTO_V3 3
|
||||
#define ALPS_PROTO_V4 4
|
||||
#define ALPS_PROTO_V5 5
|
||||
|
||||
/**
|
||||
* struct alps_model_info - touchpad ID table
|
||||
|
@ -236,6 +236,13 @@ static int cypress_read_fw_version(struct psmouse *psmouse)
|
||||
cytp->fw_version = param[2] & FW_VERSION_MASX;
|
||||
cytp->tp_metrics_supported = (param[2] & TP_METRICS_MASK) ? 1 : 0;
|
||||
|
||||
/*
|
||||
* Trackpad fw_version 11 (in Dell XPS12) yields a bogus response to
|
||||
* CYTP_CMD_READ_TP_METRICS so do not try to use it. LP: #1103594.
|
||||
*/
|
||||
if (cytp->fw_version >= 11)
|
||||
cytp->tp_metrics_supported = 0;
|
||||
|
||||
psmouse_dbg(psmouse, "cytp->fw_version = %d\n", cytp->fw_version);
|
||||
psmouse_dbg(psmouse, "cytp->tp_metrics_supported = %d\n",
|
||||
cytp->tp_metrics_supported);
|
||||
@ -258,6 +265,9 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
|
||||
cytp->tp_res_x = cytp->tp_max_abs_x / cytp->tp_width;
|
||||
cytp->tp_res_y = cytp->tp_max_abs_y / cytp->tp_high;
|
||||
|
||||
if (!cytp->tp_metrics_supported)
|
||||
return 0;
|
||||
|
||||
memset(param, 0, sizeof(param));
|
||||
if (cypress_send_ext_cmd(psmouse, CYTP_CMD_READ_TP_METRICS, param) == 0) {
|
||||
/* Update trackpad parameters. */
|
||||
@ -315,18 +325,15 @@ static int cypress_read_tp_metrics(struct psmouse *psmouse)
|
||||
|
||||
static int cypress_query_hardware(struct psmouse *psmouse)
|
||||
{
|
||||
struct cytp_data *cytp = psmouse->private;
|
||||
int ret;
|
||||
|
||||
ret = cypress_read_fw_version(psmouse);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cytp->tp_metrics_supported) {
|
||||
ret = cypress_read_tp_metrics(psmouse);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2017,6 +2017,9 @@ static const struct wacom_features wacom_features_0x100 =
|
||||
static const struct wacom_features wacom_features_0x101 =
|
||||
{ "Wacom ISDv4 101", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
|
||||
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x10D =
|
||||
{ "Wacom ISDv4 10D", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
|
||||
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
static const struct wacom_features wacom_features_0x4001 =
|
||||
{ "Wacom ISDv4 4001", WACOM_PKGLEN_MTTPC, 26202, 16325, 255,
|
||||
0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
|
||||
@ -2201,6 +2204,7 @@ const struct usb_device_id wacom_ids[] = {
|
||||
{ USB_DEVICE_WACOM(0xEF) },
|
||||
{ USB_DEVICE_WACOM(0x100) },
|
||||
{ USB_DEVICE_WACOM(0x101) },
|
||||
{ USB_DEVICE_WACOM(0x10D) },
|
||||
{ USB_DEVICE_WACOM(0x4001) },
|
||||
{ USB_DEVICE_WACOM(0x47) },
|
||||
{ USB_DEVICE_WACOM(0xF4) },
|
||||
|
@ -236,7 +236,12 @@ static void __ads7846_disable(struct ads7846 *ts)
|
||||
/* Must be called with ts->lock held */
|
||||
static void __ads7846_enable(struct ads7846 *ts)
|
||||
{
|
||||
regulator_enable(ts->reg);
|
||||
int error;
|
||||
|
||||
error = regulator_enable(ts->reg);
|
||||
if (error != 0)
|
||||
dev_err(&ts->spi->dev, "Failed to enable supply: %d\n", error);
|
||||
|
||||
ads7846_restart(ts);
|
||||
}
|
||||
|
||||
|
@ -314,15 +314,27 @@ static int mms114_start(struct mms114_data *data)
|
||||
struct i2c_client *client = data->client;
|
||||
int error;
|
||||
|
||||
if (data->core_reg)
|
||||
regulator_enable(data->core_reg);
|
||||
if (data->io_reg)
|
||||
regulator_enable(data->io_reg);
|
||||
error = regulator_enable(data->core_reg);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Failed to enable avdd: %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = regulator_enable(data->io_reg);
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Failed to enable vdd: %d\n", error);
|
||||
regulator_disable(data->core_reg);
|
||||
return error;
|
||||
}
|
||||
|
||||
mdelay(MMS114_POWERON_DELAY);
|
||||
|
||||
error = mms114_setup_regs(data);
|
||||
if (error < 0)
|
||||
if (error < 0) {
|
||||
regulator_disable(data->io_reg);
|
||||
regulator_disable(data->core_reg);
|
||||
return error;
|
||||
}
|
||||
|
||||
if (data->pdata->cfg_pin)
|
||||
data->pdata->cfg_pin(true);
|
||||
@ -335,16 +347,20 @@ static int mms114_start(struct mms114_data *data)
|
||||
static void mms114_stop(struct mms114_data *data)
|
||||
{
|
||||
struct i2c_client *client = data->client;
|
||||
int error;
|
||||
|
||||
disable_irq(client->irq);
|
||||
|
||||
if (data->pdata->cfg_pin)
|
||||
data->pdata->cfg_pin(false);
|
||||
|
||||
if (data->io_reg)
|
||||
regulator_disable(data->io_reg);
|
||||
if (data->core_reg)
|
||||
regulator_disable(data->core_reg);
|
||||
error = regulator_disable(data->io_reg);
|
||||
if (error)
|
||||
dev_warn(&client->dev, "Failed to disable vdd: %d\n", error);
|
||||
|
||||
error = regulator_disable(data->core_reg);
|
||||
if (error)
|
||||
dev_warn(&client->dev, "Failed to disable avdd: %d\n", error);
|
||||
}
|
||||
|
||||
static int mms114_input_open(struct input_dev *dev)
|
||||
|
@ -902,7 +902,9 @@ isdn_tty_send_msg(modem_info *info, atemu *m, char *msg)
|
||||
int j;
|
||||
int l;
|
||||
|
||||
l = strlen(msg);
|
||||
l = min(strlen(msg), sizeof(cmd.parm) - sizeof(cmd.parm.cmsg)
|
||||
+ sizeof(cmd.parm.cmsg.para) - 2);
|
||||
|
||||
if (!l) {
|
||||
isdn_tty_modem_result(RESULT_ERROR, info);
|
||||
return;
|
||||
|
@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev,
|
||||
}
|
||||
|
||||
block_netpoll_tx();
|
||||
call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
|
||||
write_lock_bh(&bond->lock);
|
||||
|
||||
slave = bond_get_slave_by_dev(bond, slave_dev);
|
||||
@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev,
|
||||
write_unlock_bh(&bond->lock);
|
||||
unblock_netpoll_tx();
|
||||
|
||||
if (bond->slave_cnt == 0)
|
||||
if (bond->slave_cnt == 0) {
|
||||
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
|
||||
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
|
||||
}
|
||||
|
||||
bond_compute_features(bond);
|
||||
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
|
||||
|
@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
|
||||
MDIO_WC_DEVAD,
|
||||
MDIO_WC_REG_DIGITAL5_MISC6,
|
||||
&rx_tx_in_reset);
|
||||
if (!rx_tx_in_reset) {
|
||||
if ((!rx_tx_in_reset) &&
|
||||
(params->link_flags &
|
||||
PHY_INITIALIZED)) {
|
||||
bnx2x_warpcore_reset_lane(bp, phy, 1);
|
||||
bnx2x_warpcore_config_sfi(phy, params);
|
||||
bnx2x_warpcore_reset_lane(bp, phy, 0);
|
||||
@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
|
||||
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
|
||||
vars->mac_type = MAC_TYPE_NONE;
|
||||
vars->phy_flags = 0;
|
||||
vars->check_kr2_recovery_cnt = 0;
|
||||
params->link_flags = PHY_INITIALIZED;
|
||||
/* Driver opens NIG-BRB filters */
|
||||
bnx2x_set_rx_filter(params, 1);
|
||||
/* Check if link flap can be avoided */
|
||||
@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
|
||||
struct bnx2x *bp = params->bp;
|
||||
vars->link_up = 0;
|
||||
vars->phy_flags = 0;
|
||||
params->link_flags &= ~PHY_INITIALIZED;
|
||||
if (!params->lfa_base)
|
||||
return bnx2x_link_reset(params, vars, 1);
|
||||
/*
|
||||
@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
|
||||
vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
|
||||
bnx2x_update_link_attr(params, vars->link_attr_sync);
|
||||
|
||||
vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
|
||||
/* Restart AN on leading lane */
|
||||
bnx2x_warpcore_restart_AN_KR(phy, params);
|
||||
}
|
||||
@ -13439,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
|
||||
* since some switches tend to reinit the AN process and clear the
|
||||
* advertised BP/NP after ~2 seconds causing the KR2 to be disabled
|
||||
* and recovered many times
|
||||
*/
|
||||
if (vars->check_kr2_recovery_cnt > 0) {
|
||||
vars->check_kr2_recovery_cnt--;
|
||||
return;
|
||||
}
|
||||
lane = bnx2x_get_warpcore_lane(phy, params);
|
||||
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
|
||||
MDIO_AER_BLOCK_AER_REG, lane);
|
||||
|
@ -309,6 +309,7 @@ struct link_params {
|
||||
req_flow_ctrl is set to AUTO */
|
||||
u16 link_flags;
|
||||
#define LINK_FLAGS_INT_DISABLED (1<<0)
|
||||
#define PHY_INITIALIZED (1<<1)
|
||||
u32 lfa_base;
|
||||
};
|
||||
|
||||
@ -342,7 +343,8 @@ struct link_vars {
|
||||
u32 link_status;
|
||||
u32 eee_status;
|
||||
u8 fault_detected;
|
||||
u8 rsrv1;
|
||||
u8 check_kr2_recovery_cnt;
|
||||
#define CHECK_KR2_RECOVERY_CNT 5
|
||||
u16 periodic_flags;
|
||||
#define PERIODIC_FLAGS_LINK_EVENT 0x0001
|
||||
|
||||
|
@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp)
|
||||
|
||||
tg3_ump_link_report(tp);
|
||||
}
|
||||
|
||||
tp->link_up = netif_carrier_ok(tp->dev);
|
||||
}
|
||||
|
||||
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
|
||||
@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tg3_carrier_on(struct tg3 *tp)
|
||||
{
|
||||
netif_carrier_on(tp->dev);
|
||||
tp->link_up = true;
|
||||
}
|
||||
|
||||
static void tg3_carrier_off(struct tg3 *tp)
|
||||
{
|
||||
netif_carrier_off(tp->dev);
|
||||
@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp)
|
||||
return -EBUSY;
|
||||
|
||||
if (netif_running(tp->dev) && tp->link_up) {
|
||||
tg3_carrier_off(tp);
|
||||
netif_carrier_off(tp->dev);
|
||||
tg3_link_report(tp);
|
||||
}
|
||||
|
||||
@ -4262,9 +4258,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
|
||||
{
|
||||
if (curr_link_up != tp->link_up) {
|
||||
if (curr_link_up) {
|
||||
tg3_carrier_on(tp);
|
||||
netif_carrier_on(tp->dev);
|
||||
} else {
|
||||
tg3_carrier_off(tp);
|
||||
netif_carrier_off(tp->dev);
|
||||
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
|
||||
tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
|
||||
}
|
||||
|
@ -349,6 +349,7 @@ struct be_adapter {
|
||||
struct pci_dev *pdev;
|
||||
struct net_device *netdev;
|
||||
|
||||
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
|
||||
u8 __iomem *db; /* Door Bell */
|
||||
|
||||
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
|
||||
|
@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
|
||||
static u16 be_POST_stage_get(struct be_adapter *adapter)
|
||||
{
|
||||
u32 sem;
|
||||
u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
|
||||
SLIPORT_SEMAPHORE_OFFSET_BE;
|
||||
|
||||
pci_read_config_dword(adapter->pdev, reg, &sem);
|
||||
*stage = sem & POST_STAGE_MASK;
|
||||
|
||||
if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
|
||||
return -1;
|
||||
if (BEx_chip(adapter))
|
||||
sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
|
||||
else
|
||||
return 0;
|
||||
pci_read_config_dword(adapter->pdev,
|
||||
SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
|
||||
|
||||
return sem & POST_STAGE_MASK;
|
||||
}
|
||||
|
||||
int lancer_wait_ready(struct be_adapter *adapter)
|
||||
@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
|
||||
}
|
||||
|
||||
do {
|
||||
status = be_POST_stage_get(adapter, &stage);
|
||||
if (status) {
|
||||
dev_err(dev, "POST error; stage=0x%x\n", stage);
|
||||
return -1;
|
||||
} else if (stage != POST_STAGE_ARMFW_RDY) {
|
||||
stage = be_POST_stage_get(adapter);
|
||||
if (stage == POST_STAGE_ARMFW_RDY)
|
||||
return 0;
|
||||
|
||||
dev_info(dev, "Waiting for POST, %ds elapsed\n",
|
||||
timeout);
|
||||
if (msleep_interruptible(2000)) {
|
||||
dev_err(dev, "Waiting for POST aborted\n");
|
||||
return -EINTR;
|
||||
}
|
||||
timeout += 2;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} while (timeout < 60);
|
||||
|
||||
dev_err(dev, "POST timeout; stage=0x%x\n", stage);
|
||||
|
@ -32,8 +32,8 @@
|
||||
#define MPU_EP_CONTROL 0
|
||||
|
||||
/********** MPU semphore: used for SH & BE *************/
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
|
||||
#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
|
||||
#define POST_STAGE_MASK 0x0000FFFF
|
||||
#define POST_ERR_MASK 0x1
|
||||
#define POST_ERR_SHIFT 31
|
||||
|
@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
|
||||
|
||||
static void be_unmap_pci_bars(struct be_adapter *adapter)
|
||||
{
|
||||
if (adapter->csr)
|
||||
pci_iounmap(adapter->pdev, adapter->csr);
|
||||
if (adapter->db)
|
||||
pci_iounmap(adapter->pdev, adapter->db);
|
||||
}
|
||||
@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
|
||||
adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
|
||||
SLI_INTF_IF_TYPE_SHIFT;
|
||||
|
||||
if (BEx_chip(adapter) && be_physfn(adapter)) {
|
||||
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
|
||||
if (adapter->csr == NULL)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
|
||||
if (addr == NULL)
|
||||
goto pci_map_err;
|
||||
@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/* Check if card is ok and fw is ready */
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Waiting for FW to be ready after EEH reset\n");
|
||||
status = be_fw_wait_ready(adapter);
|
||||
if (status)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mdio.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "e1000.h"
|
||||
|
||||
@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int e1000e_ethtool_begin(struct net_device *netdev)
|
||||
{
|
||||
return pm_runtime_get_sync(netdev->dev.parent);
|
||||
}
|
||||
|
||||
static void e1000e_ethtool_complete(struct net_device *netdev)
|
||||
{
|
||||
pm_runtime_put_sync(netdev->dev.parent);
|
||||
}
|
||||
|
||||
static const struct ethtool_ops e1000_ethtool_ops = {
|
||||
.begin = e1000e_ethtool_begin,
|
||||
.complete = e1000e_ethtool_complete,
|
||||
.get_settings = e1000_get_settings,
|
||||
.set_settings = e1000_set_settings,
|
||||
.get_drvinfo = e1000_get_drvinfo,
|
||||
|
@ -781,6 +781,59 @@ release:
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
|
||||
* @hw: pointer to the HW structure
|
||||
* @link: link up bool flag
|
||||
*
|
||||
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
|
||||
* preventing further DMA write requests. Workaround the issue by disabling
|
||||
* the de-assertion of the clock request when in 1Gpbs mode.
|
||||
**/
|
||||
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
|
||||
{
|
||||
u32 fextnvm6 = er32(FEXTNVM6);
|
||||
s32 ret_val = 0;
|
||||
|
||||
if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
|
||||
u16 kmrn_reg;
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val =
|
||||
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
&kmrn_reg);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
ret_val =
|
||||
e1000e_write_kmrn_reg_locked(hw,
|
||||
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
kmrn_reg &
|
||||
~E1000_KMRNCTRLSTA_K1_ENABLE);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
usleep_range(10, 20);
|
||||
|
||||
ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
|
||||
ret_val =
|
||||
e1000e_write_kmrn_reg_locked(hw,
|
||||
E1000_KMRNCTRLSTA_K1_CONFIG,
|
||||
kmrn_reg);
|
||||
release:
|
||||
hw->phy.ops.release(hw);
|
||||
} else {
|
||||
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
|
||||
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/**
|
||||
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
|
||||
* @hw: pointer to the HW structure
|
||||
@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Work-around I218 hang issue */
|
||||
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
|
||||
(hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
|
||||
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/* Clear link partner's EEE ability */
|
||||
hw->dev_spec.ich8lan.eee_lp_ability = 0;
|
||||
|
||||
@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
|
||||
|
||||
phy_ctrl = er32(PHY_CTRL);
|
||||
phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
|
||||
|
||||
if (hw->phy.type == e1000_phy_i217) {
|
||||
u16 phy_reg;
|
||||
u16 phy_reg, device_id = hw->adapter->pdev->device;
|
||||
|
||||
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
|
||||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
|
||||
u32 fextnvm6 = er32(FEXTNVM6);
|
||||
|
||||
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
|
||||
}
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
|
@ -92,6 +92,8 @@
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
|
||||
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
|
||||
|
||||
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
|
||||
|
||||
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
|
||||
|
||||
#define E1000_ICH_RAR_ENTRIES 7
|
||||
|
@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
|
||||
netif_start_queue(netdev);
|
||||
|
||||
adapter->idle_check = true;
|
||||
hw->mac.get_link_status = true;
|
||||
pm_runtime_put(&pdev->dev);
|
||||
|
||||
/* fire a link status change interrupt to start the watchdog */
|
||||
@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
|
||||
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
|
||||
int ret_val;
|
||||
|
||||
pm_runtime_get_sync(&adapter->pdev->dev);
|
||||
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
|
||||
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
|
||||
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
|
||||
@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
|
||||
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
|
||||
if (ret_val)
|
||||
e_warn("Error reading PHY register\n");
|
||||
pm_runtime_put_sync(&adapter->pdev->dev);
|
||||
} else {
|
||||
/* Do not read PHY registers if link is not up
|
||||
* Set values to typical power-on defaults
|
||||
@ -5887,8 +5890,7 @@ release:
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
||||
bool runtime)
|
||||
static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
||||
}
|
||||
e1000e_reset_interrupt_capability(adapter);
|
||||
|
||||
retval = pci_save_state(pdev);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
status = er32(STATUS);
|
||||
if (status & E1000_STATUS_LU)
|
||||
wufc &= ~E1000_WUFC_LNKC;
|
||||
@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
||||
ew32(WUFC, 0);
|
||||
}
|
||||
|
||||
*enable_wake = !!wufc;
|
||||
|
||||
/* make sure adapter isn't asleep if manageability is enabled */
|
||||
if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
|
||||
(hw->mac.ops.check_mng_mode(hw)))
|
||||
*enable_wake = true;
|
||||
|
||||
if (adapter->hw.phy.type == e1000_phy_igp_3)
|
||||
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
|
||||
|
||||
@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
|
||||
*/
|
||||
e1000e_release_hw_control(adapter);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
|
||||
{
|
||||
if (sleep && wake) {
|
||||
pci_prepare_to_sleep(pdev);
|
||||
return;
|
||||
}
|
||||
|
||||
pci_wake_from_d3(pdev, wake);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
}
|
||||
|
||||
static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
|
||||
bool wake)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
pci_clear_master(pdev);
|
||||
|
||||
/* The pci-e switch on some quad port adapters will report a
|
||||
* correctable error when the MAC transitions from D0 to D3. To
|
||||
@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
|
||||
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
|
||||
(devctl & ~PCI_EXP_DEVCTL_CERE));
|
||||
|
||||
e1000_power_off(pdev, sleep, wake);
|
||||
pci_save_state(pdev);
|
||||
pci_prepare_to_sleep(pdev);
|
||||
|
||||
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
|
||||
} else {
|
||||
e1000_power_off(pdev, sleep, wake);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
|
||||
if (aspm_disable_flag)
|
||||
e1000e_disable_aspm(pdev, aspm_disable_flag);
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
e1000e_set_interrupt_capability(adapter);
|
||||
if (netif_running(netdev)) {
|
||||
@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
|
||||
static int e1000_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
int retval;
|
||||
bool wake;
|
||||
|
||||
retval = __e1000_shutdown(pdev, &wake, false);
|
||||
if (!retval)
|
||||
e1000_complete_shutdown(pdev, true, wake);
|
||||
|
||||
return retval;
|
||||
return __e1000_shutdown(pdev, false);
|
||||
}
|
||||
|
||||
static int e1000_resume(struct device *dev)
|
||||
@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (e1000e_pm_ready(adapter)) {
|
||||
bool wake;
|
||||
|
||||
__e1000_shutdown(pdev, &wake, true);
|
||||
}
|
||||
|
||||
if (!e1000e_pm_ready(adapter))
|
||||
return 0;
|
||||
|
||||
return __e1000_shutdown(pdev, true);
|
||||
}
|
||||
|
||||
static int e1000_idle(struct device *dev)
|
||||
@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
|
||||
|
||||
static void e1000_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
bool wake = false;
|
||||
|
||||
__e1000_shutdown(pdev, &wake, false);
|
||||
|
||||
if (system_state == SYSTEM_POWER_OFF)
|
||||
e1000_complete_shutdown(pdev, false, wake);
|
||||
__e1000_shutdown(pdev, false);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
|
||||
"Cannot re-enable PCI device after reset.\n");
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
} else {
|
||||
pci_set_master(pdev);
|
||||
pdev->state_saved = true;
|
||||
pci_restore_state(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
pci_enable_wake(pdev, PCI_D3cold, 0);
|
||||
@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
/* initialize the wol settings based on the eeprom settings */
|
||||
adapter->wol = adapter->eeprom_wol;
|
||||
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
|
||||
|
||||
/* make sure adapter isn't asleep if manageability is enabled */
|
||||
if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
|
||||
(hw->mac.ops.check_mng_mode(hw)))
|
||||
device_wakeup_enable(&pdev->dev);
|
||||
|
||||
/* save off EEPROM version number */
|
||||
e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
|
||||
|
@ -42,6 +42,7 @@
|
||||
#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
|
||||
#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
|
||||
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
|
||||
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
|
||||
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
|
||||
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
|
||||
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
|
||||
|
@ -1361,12 +1361,17 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
|
||||
switch (hw->phy.type) {
|
||||
case e1000_phy_i210:
|
||||
case e1000_phy_m88:
|
||||
if (hw->phy.id == I347AT4_E_PHY_ID ||
|
||||
hw->phy.id == M88E1112_E_PHY_ID)
|
||||
switch (hw->phy.id) {
|
||||
case I347AT4_E_PHY_ID:
|
||||
case M88E1112_E_PHY_ID:
|
||||
case I210_I_PHY_ID:
|
||||
ret_val = igb_copper_link_setup_m88_gen2(hw);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
ret_val = igb_copper_link_setup_m88(hw);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case e1000_phy_igp_3:
|
||||
ret_val = igb_copper_link_setup_igp(hw);
|
||||
break;
|
||||
|
@ -447,7 +447,7 @@ struct igb_adapter {
|
||||
#endif
|
||||
struct i2c_algo_bit_data i2c_algo;
|
||||
struct i2c_adapter i2c_adap;
|
||||
struct igb_i2c_client_list *i2c_clients;
|
||||
struct i2c_client *i2c_client;
|
||||
};
|
||||
|
||||
#define IGB_FLAG_HAS_MSI (1 << 0)
|
||||
|
@ -39,6 +39,10 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#ifdef CONFIG_IGB_HWMON
|
||||
struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
|
||||
};
|
||||
|
||||
/* hwmon callback functions */
|
||||
static ssize_t igb_hwmon_show_location(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
|
||||
unsigned int i;
|
||||
int n_attrs;
|
||||
int rc = 0;
|
||||
struct i2c_client *client = NULL;
|
||||
|
||||
/* If this method isn't defined we don't support thermals */
|
||||
if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
|
||||
@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
/* init i2c_client */
|
||||
client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
|
||||
if (client == NULL) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to create new i2c device..\n");
|
||||
goto exit;
|
||||
}
|
||||
adapter->i2c_client = client;
|
||||
|
||||
/* Allocation space for max attributes
|
||||
* max num sensors * values (loc, temp, max, caution)
|
||||
*/
|
||||
|
@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct i2c_board_info i350_sensor_info = {
|
||||
I2C_BOARD_INFO("i350bb", 0Xf8),
|
||||
};
|
||||
|
||||
/* igb_init_i2c - Init I2C interface
|
||||
* @adapter: pointer to adapter structure
|
||||
*
|
||||
@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
|
||||
/* If we spanned a buffer we have a huge mess so test for it */
|
||||
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
|
||||
|
||||
/* Guarantee this function can be used by verifying buffer sizes */
|
||||
BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
|
||||
NET_IP_ALIGN +
|
||||
IGB_TS_HDR_LEN +
|
||||
ETH_FRAME_LEN +
|
||||
ETH_FCS_LEN));
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
page = rx_buffer->page;
|
||||
prefetchw(page);
|
||||
@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(i2c_clients_lock);
|
||||
|
||||
/* igb_get_i2c_client - returns matching client
|
||||
* in adapters's client list.
|
||||
* @adapter: adapter struct
|
||||
* @dev_addr: device address of i2c needed.
|
||||
*/
|
||||
static struct i2c_client *
|
||||
igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
|
||||
{
|
||||
ulong flags;
|
||||
struct igb_i2c_client_list *client_list;
|
||||
struct i2c_client *client = NULL;
|
||||
struct i2c_board_info client_info = {
|
||||
I2C_BOARD_INFO("igb", 0x00),
|
||||
};
|
||||
|
||||
spin_lock_irqsave(&i2c_clients_lock, flags);
|
||||
client_list = adapter->i2c_clients;
|
||||
|
||||
/* See if we already have an i2c_client */
|
||||
while (client_list) {
|
||||
if (client_list->client->addr == (dev_addr >> 1)) {
|
||||
client = client_list->client;
|
||||
goto exit;
|
||||
} else {
|
||||
client_list = client_list->next;
|
||||
}
|
||||
}
|
||||
|
||||
/* no client_list found, create a new one */
|
||||
client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
|
||||
if (client_list == NULL)
|
||||
goto exit;
|
||||
|
||||
/* dev_addr passed to us is left-shifted by 1 bit
|
||||
* i2c_new_device call expects it to be flush to the right.
|
||||
*/
|
||||
client_info.addr = dev_addr >> 1;
|
||||
client_info.platform_data = adapter;
|
||||
client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
|
||||
if (client_list->client == NULL) {
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to create new i2c device..\n");
|
||||
goto err_no_client;
|
||||
}
|
||||
|
||||
/* insert new client at head of list */
|
||||
client_list->next = adapter->i2c_clients;
|
||||
adapter->i2c_clients = client_list;
|
||||
|
||||
client = client_list->client;
|
||||
goto exit;
|
||||
|
||||
err_no_client:
|
||||
kfree(client_list);
|
||||
exit:
|
||||
spin_unlock_irqrestore(&i2c_clients_lock, flags);
|
||||
return client;
|
||||
}
|
||||
|
||||
/* igb_read_i2c_byte - Reads 8 bit word over I2C
|
||||
* @hw: pointer to hardware structure
|
||||
* @byte_offset: byte offset to read
|
||||
@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 *data)
|
||||
{
|
||||
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
|
||||
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
|
||||
struct i2c_client *this_client = adapter->i2c_client;
|
||||
s32 status;
|
||||
u16 swfw_mask = 0;
|
||||
|
||||
@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
|
||||
u8 dev_addr, u8 data)
|
||||
{
|
||||
struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
|
||||
struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr);
|
||||
struct i2c_client *this_client = adapter->i2c_client;
|
||||
s32 status;
|
||||
u16 swfw_mask = E1000_SWFW_PHY0_SM;
|
||||
|
||||
|
@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
|
||||
|
||||
|
||||
/* mii management interface *************************************************/
|
||||
static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
|
||||
{
|
||||
u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
|
||||
u32 autoneg_disable = FORCE_LINK_PASS |
|
||||
DISABLE_AUTO_NEG_SPEED_GMII |
|
||||
DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
|
||||
DISABLE_AUTO_NEG_FOR_DUPLEX;
|
||||
|
||||
if (mp->phy->autoneg == AUTONEG_ENABLE) {
|
||||
/* enable auto negotiation */
|
||||
pscr &= ~autoneg_disable;
|
||||
goto out_write;
|
||||
}
|
||||
|
||||
pscr |= autoneg_disable;
|
||||
|
||||
if (mp->phy->speed == SPEED_1000) {
|
||||
/* force gigabit, half duplex not supported */
|
||||
pscr |= SET_GMII_SPEED_TO_1000;
|
||||
pscr |= SET_FULL_DUPLEX_MODE;
|
||||
goto out_write;
|
||||
}
|
||||
|
||||
pscr &= ~SET_GMII_SPEED_TO_1000;
|
||||
|
||||
if (mp->phy->speed == SPEED_100)
|
||||
pscr |= SET_MII_SPEED_TO_100;
|
||||
else
|
||||
pscr &= ~SET_MII_SPEED_TO_100;
|
||||
|
||||
if (mp->phy->duplex == DUPLEX_FULL)
|
||||
pscr |= SET_FULL_DUPLEX_MODE;
|
||||
else
|
||||
pscr &= ~SET_FULL_DUPLEX_MODE;
|
||||
|
||||
out_write:
|
||||
wrlp(mp, PORT_SERIAL_CONTROL, pscr);
|
||||
}
|
||||
|
||||
static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct mv643xx_eth_shared_private *msp = dev_id;
|
||||
@ -1499,6 +1538,7 @@ static int
|
||||
mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
if (mp->phy == NULL)
|
||||
return -EINVAL;
|
||||
@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
*/
|
||||
cmd->advertising &= ~ADVERTISED_1000baseT_Half;
|
||||
|
||||
return phy_ethtool_sset(mp->phy, cmd);
|
||||
ret = phy_ethtool_sset(mp->phy, cmd);
|
||||
if (!ret)
|
||||
mv643xx_adjust_pscr(mp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
|
||||
@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev)
|
||||
static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
||||
{
|
||||
struct mv643xx_eth_private *mp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
if (mp->phy != NULL)
|
||||
return phy_mii_ioctl(mp->phy, ifr, cmd);
|
||||
if (mp->phy == NULL)
|
||||
return -ENOTSUPP;
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
ret = phy_mii_ioctl(mp->phy, ifr, cmd);
|
||||
if (!ret)
|
||||
mv643xx_adjust_pscr(mp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
||||
|
||||
static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
|
@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct mlx4_dev *dev = mdev->dev;
|
||||
int qpn = priv->base_qpn;
|
||||
u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
||||
u64 mac;
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
|
||||
mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
priv->dev->dev_addr);
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
|
||||
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
|
||||
} else {
|
||||
struct mlx4_mac_entry *entry;
|
||||
struct hlist_node *tmp;
|
||||
struct hlist_head *bucket;
|
||||
unsigned int mac_hash;
|
||||
unsigned int i;
|
||||
|
||||
mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
|
||||
bucket = &priv->mac_hash[mac_hash];
|
||||
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
|
||||
bucket = &priv->mac_hash[i];
|
||||
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
|
||||
if (ether_addr_equal_64bits(entry->mac,
|
||||
priv->dev->dev_addr)) {
|
||||
en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
|
||||
priv->port, priv->dev->dev_addr, qpn);
|
||||
mac = mlx4_en_mac_to_u64(entry->mac);
|
||||
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
|
||||
entry->mac);
|
||||
mlx4_en_uc_steer_release(priv, entry->mac,
|
||||
qpn, entry->reg_id);
|
||||
mlx4_qp_release_range(dev, qpn, 1);
|
||||
|
||||
mlx4_unregister_mac(dev, priv->port, mac);
|
||||
hlist_del_rcu(&entry->hlist);
|
||||
kfree_rcu(entry, rcu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
|
||||
priv->port, qpn);
|
||||
mlx4_qp_release_range(dev, qpn, 1);
|
||||
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
|
||||
}
|
||||
}
|
||||
|
||||
@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr)
|
||||
return mac;
|
||||
}
|
||||
|
||||
static int mlx4_en_set_mac(struct net_device *dev, void *addr)
|
||||
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct sockaddr *saddr = addr;
|
||||
|
||||
if (!is_valid_ether_addr(saddr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
|
||||
queue_work(mdev->workqueue, &priv->mac_task);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx4_en_do_set_mac(struct work_struct *work)
|
||||
{
|
||||
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
|
||||
mac_task);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
if (priv->port_up) {
|
||||
/* Remove old MAC and insert the new one */
|
||||
err = mlx4_en_replace_mac(priv, priv->base_qpn,
|
||||
@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
|
||||
} else
|
||||
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx4_en_set_mac(struct net_device *dev, void *addr)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct sockaddr *saddr = addr;
|
||||
int err;
|
||||
|
||||
if (!is_valid_ether_addr(saddr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
err = mlx4_en_do_set_mac(priv);
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx4_en_clear_list(struct net_device *dev)
|
||||
@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
|
||||
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
||||
}
|
||||
if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
|
||||
queue_work(mdev->workqueue, &priv->mac_task);
|
||||
mlx4_en_do_set_mac(priv);
|
||||
mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
|
||||
}
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (priv->mdev->dev->caps.comp_pool) {
|
||||
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
|
||||
if (!priv->dev->rx_cpu_rmap)
|
||||
goto err;
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||
priv->msg_enable = MLX4_EN_MSG_LEVEL;
|
||||
spin_lock_init(&priv->stats_lock);
|
||||
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
|
||||
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
|
||||
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
|
||||
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
|
||||
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
|
||||
|
@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
|
||||
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
|
||||
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
|
||||
|
||||
/* turn off device-managed steering capability if not enabled */
|
||||
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
MLX4_GET(field, outbox->buf,
|
||||
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
|
||||
field &= 0x7f;
|
||||
MLX4_PUT(outbox->buf, field,
|
||||
QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
|
||||
|
||||
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, idx);
|
||||
|
@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
|
||||
|
||||
static inline void set_param_l(u64 *arg, u32 val)
|
||||
{
|
||||
*((u32 *)arg) = val;
|
||||
*arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
|
||||
}
|
||||
|
||||
static inline void set_param_h(u64 *arg, u32 val)
|
||||
|
@ -509,7 +509,6 @@ struct mlx4_en_priv {
|
||||
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
|
||||
struct mlx4_qp drop_qp;
|
||||
struct work_struct rx_mode_task;
|
||||
struct work_struct mac_task;
|
||||
struct work_struct watchdog_task;
|
||||
struct work_struct linkstate_task;
|
||||
struct delayed_work stats_task;
|
||||
|
@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
||||
|
||||
static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
|
||||
|
||||
static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
|
||||
|
||||
static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, index);
|
||||
@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
|
||||
|
||||
static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
|
||||
{
|
||||
u64 param;
|
||||
u64 param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(¶m, index);
|
||||
@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
|
||||
|
||||
static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, index);
|
||||
|
@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
|
||||
|
||||
void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
|
@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
|
||||
|
||||
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
||||
{
|
||||
u64 out_param;
|
||||
u64 out_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
|
||||
|
||||
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
|
||||
{
|
||||
u64 out_param;
|
||||
u64 out_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&out_param, port);
|
||||
@ -361,7 +361,7 @@ out:
|
||||
|
||||
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
|
||||
{
|
||||
u64 out_param;
|
||||
u64 out_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
@ -406,7 +406,7 @@ out:
|
||||
|
||||
void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
|
@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
|
||||
|
||||
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
u64 out_param;
|
||||
int err;
|
||||
|
||||
@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
|
||||
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
int err;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
@ -319,7 +319,7 @@ err_out:
|
||||
|
||||
static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
|
||||
{
|
||||
u64 param;
|
||||
u64 param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(¶m, qpn);
|
||||
@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
|
||||
static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, qpn);
|
||||
|
@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
u8 steer_type_mask = 2;
|
||||
enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
|
||||
|
||||
if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
|
||||
return -EINVAL;
|
||||
|
||||
qpn = vhcr->in_modifier & 0xffffff;
|
||||
err = get_res(dev, slave, qpn, RES_QP, &rqp);
|
||||
if (err)
|
||||
|
@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
|
||||
|
||||
static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
|
||||
{
|
||||
u64 in_param;
|
||||
u64 in_param = 0;
|
||||
|
||||
if (mlx4_is_mfunc(dev)) {
|
||||
set_param_l(&in_param, srqn);
|
||||
|
@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
|
||||
* TX scheduler is stopped when we're done and before
|
||||
* netif_device_present() becomes false.
|
||||
*/
|
||||
netif_tx_lock(dev);
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_device_detach(dev);
|
||||
netif_tx_unlock(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
#endif /* EFX_EFX_H */
|
||||
|
@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->u.page = page;
|
||||
rx_buf->page_offset = page_offset;
|
||||
rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
|
||||
rx_buf->flags = EFX_RX_BUF_PAGE;
|
||||
++rx_queue->added_count;
|
||||
|
@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (rrpriv->evt_ring)
|
||||
pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
|
||||
rrpriv->evt_ring_dma);
|
||||
if (rrpriv->rx_ring)
|
||||
pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
|
||||
rrpriv->rx_ring_dma);
|
||||
|
@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev)
|
||||
ether_setup(dev);
|
||||
|
||||
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
|
||||
dev->priv_flags |= IFF_UNICAST_FLT;
|
||||
dev->netdev_ops = &macvlan_netdev_ops;
|
||||
dev->destructor = free_netdev;
|
||||
dev->header_ops = &macvlan_hard_header_ops,
|
||||
|
@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
|
||||
netdev_upper_dev_unlink(port_dev, dev);
|
||||
team_port_disable_netpoll(port);
|
||||
vlan_vids_del_by_dev(port_dev, dev);
|
||||
dev_uc_unsync(port_dev, dev);
|
||||
dev_mc_unsync(port_dev, dev);
|
||||
dev_close(port_dev);
|
||||
team_port_leave(team, port);
|
||||
|
||||
|
@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
goto drop;
|
||||
skb_orphan(skb);
|
||||
|
||||
nf_reset(skb);
|
||||
|
||||
/* Enqueue packet */
|
||||
skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
|
||||
|
||||
|
@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
||||
|
||||
adapter->num_rx_queues = num_rx_queues;
|
||||
adapter->num_tx_queues = num_tx_queues;
|
||||
adapter->rx_buf_per_pkt = 1;
|
||||
|
||||
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
|
||||
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
|
||||
|
@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
|
||||
VMXNET3_RX_RING_MAX_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
/* if adapter not yet initialized, do nothing */
|
||||
if (adapter->rx_buf_per_pkt == 0) {
|
||||
netdev_err(netdev, "adapter not completely initialized, "
|
||||
"ring size cannot be changed yet\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
|
||||
new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
|
||||
|
@ -70,10 +70,10 @@
|
||||
/*
|
||||
* Version numbers
|
||||
*/
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k"
|
||||
#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
|
||||
|
||||
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00
|
||||
#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
|
||||
|
||||
#if defined(CONFIG_PCI_MSI)
|
||||
/* RSS only makes sense if MSI-X is supported. */
|
||||
|
@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
||||
tunnel_ip_select_ident(skb, old_iph, &rt->dst);
|
||||
|
||||
nf_reset(skb);
|
||||
|
||||
vxlan_set_owner(dev, skb);
|
||||
|
||||
/* See iptunnel_xmit() */
|
||||
@ -1504,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net)
|
||||
static __net_exit void vxlan_exit_net(struct net *net)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan;
|
||||
unsigned h;
|
||||
|
||||
rtnl_lock();
|
||||
for (h = 0; h < VNI_HASH_SIZE; ++h)
|
||||
hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
|
||||
dev_close(vxlan->dev);
|
||||
rtnl_unlock();
|
||||
|
||||
if (vn->sock) {
|
||||
sk_release_kernel(vn->sock->sk);
|
||||
|
@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
||||
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
|
||||
|
||||
if (!(flags & CMD_ASYNC)) {
|
||||
cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
|
||||
cmd.flags |= CMD_WANT_SKB;
|
||||
might_sleep();
|
||||
}
|
||||
|
||||
|
@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
|
||||
__entry->flags = cmd->flags;
|
||||
memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
|
||||
|
@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
|
||||
|
||||
/* shared module parameters */
|
||||
struct iwl_mod_params iwlwifi_mod_params = {
|
||||
.amsdu_size_8K = 1,
|
||||
.restart_fw = 1,
|
||||
.plcp_check = true,
|
||||
.bt_coex_active = true,
|
||||
@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
|
||||
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
|
||||
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
|
||||
int, S_IRUGO);
|
||||
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
|
||||
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
|
||||
module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
|
||||
|
||||
|
@ -91,7 +91,7 @@ enum iwl_power_level {
|
||||
* @sw_crypto: using hardware encryption, default = 0
|
||||
* @disable_11n: disable 11n capabilities, default = 0,
|
||||
* use IWL_DISABLE_HT_* constants
|
||||
* @amsdu_size_8K: enable 8K amsdu size, default = 1
|
||||
* @amsdu_size_8K: enable 8K amsdu size, default = 0
|
||||
* @restart_fw: restart firmware, default = 1
|
||||
* @plcp_check: enable plcp health check, default = true
|
||||
* @wd_disable: enable stuck queue check, default = 0
|
||||
|
@ -186,19 +186,13 @@ struct iwl_rx_packet {
|
||||
* @CMD_ASYNC: Return right away and don't want for the response
|
||||
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
|
||||
* response. The caller needs to call iwl_free_resp when done.
|
||||
* @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
|
||||
* response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
|
||||
* copied. The pointer passed to the response handler is in the transport
|
||||
* ownership and don't need to be freed by the op_mode. This also means
|
||||
* that the pointer is invalidated after the op_mode's handler returns.
|
||||
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
|
||||
*/
|
||||
enum CMD_MODE {
|
||||
CMD_SYNC = 0,
|
||||
CMD_ASYNC = BIT(0),
|
||||
CMD_WANT_SKB = BIT(1),
|
||||
CMD_WANT_HCMD = BIT(2),
|
||||
CMD_ON_DEMAND = BIT(3),
|
||||
CMD_ON_DEMAND = BIT(2),
|
||||
};
|
||||
|
||||
#define DEF_CMD_PAYLOAD_SIZE 320
|
||||
@ -217,7 +211,11 @@ struct iwl_device_cmd {
|
||||
|
||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
||||
|
||||
#define IWL_MAX_CMD_TFDS 2
|
||||
/*
|
||||
* number of transfer buffers (fragments) per transmit frame descriptor;
|
||||
* this is just the driver's idea, the hardware supports 20
|
||||
*/
|
||||
#define IWL_MAX_CMD_TBS_PER_TFD 2
|
||||
|
||||
/**
|
||||
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
||||
@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
|
||||
* @id: id of the host command
|
||||
*/
|
||||
struct iwl_host_cmd {
|
||||
const void *data[IWL_MAX_CMD_TFDS];
|
||||
const void *data[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
struct iwl_rx_packet *resp_pkt;
|
||||
unsigned long _rx_page_addr;
|
||||
u32 _rx_page_order;
|
||||
int handler_status;
|
||||
|
||||
u32 flags;
|
||||
u16 len[IWL_MAX_CMD_TFDS];
|
||||
u8 dataflags[IWL_MAX_CMD_TFDS];
|
||||
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
u8 id;
|
||||
};
|
||||
|
||||
|
@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
|
||||
#define IWL_RX_INFO_PHY_CNT 8
|
||||
#define IWL_RX_INFO_AGC_IDX 1
|
||||
#define IWL_RX_INFO_RSSI_AB_IDX 2
|
||||
#define IWL_RX_INFO_RSSI_C_IDX 3
|
||||
#define IWL_OFDM_AGC_DB_MSK 0xfe00
|
||||
#define IWL_OFDM_AGC_DB_POS 9
|
||||
#define IWL_OFDM_AGC_A_MSK 0x0000007f
|
||||
#define IWL_OFDM_AGC_A_POS 0
|
||||
#define IWL_OFDM_AGC_B_MSK 0x00003f80
|
||||
#define IWL_OFDM_AGC_B_POS 7
|
||||
#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
|
||||
#define IWL_OFDM_AGC_CODE_POS 20
|
||||
#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
|
||||
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
|
||||
#define IWL_OFDM_RSSI_A_POS 0
|
||||
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
|
||||
#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
|
||||
#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
|
||||
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
|
||||
#define IWL_OFDM_RSSI_B_POS 16
|
||||
#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
|
||||
#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
|
||||
#define IWL_OFDM_RSSI_C_POS 0
|
||||
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
|
||||
#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
|
||||
|
||||
/**
|
||||
* struct iwl_rx_phy_info - phy info
|
||||
|
@ -79,17 +79,8 @@
|
||||
#define UCODE_VALID_OK cpu_to_le32(0x1)
|
||||
|
||||
/* Default calibration values for WkP - set to INIT image w/o running */
|
||||
static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
|
||||
0x00, 0x18, 0x00 };
|
||||
static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
||||
0x7f, 0x7f, 0x7f };
|
||||
static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
|
||||
static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
|
||||
0x00 };
|
||||
static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
|
||||
static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
|
||||
static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
|
||||
static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
|
||||
|
||||
struct iwl_calib_default_data {
|
||||
u16 size;
|
||||
@ -99,12 +90,7 @@ struct iwl_calib_default_data {
|
||||
#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
|
||||
|
||||
static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
|
||||
[5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
|
||||
[6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
|
||||
[7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
|
||||
[8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
|
||||
[9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
|
||||
[10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
|
||||
[11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
|
||||
};
|
||||
|
||||
@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||
|
||||
return 0;
|
||||
}
|
||||
#define IWL_HW_REV_ID_RAINBOW 0x2
|
||||
#define IWL_PROJ_TYPE_LHP 0x5
|
||||
|
||||
static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_nvm_data *data = mvm->nvm_data;
|
||||
/* Temp calls to static definitions, will be changed to CSR calls */
|
||||
u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
|
||||
u8 project_type = IWL_PROJ_TYPE_LHP;
|
||||
|
||||
return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
|
||||
(hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
|
||||
(data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
|
||||
}
|
||||
|
||||
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
{
|
||||
@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
enum iwl_ucode_type ucode_type = mvm->cur_ucode;
|
||||
|
||||
/* Set parameters */
|
||||
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
|
||||
phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
|
||||
phy_cfg_cmd.calib_control.event_trigger =
|
||||
mvm->fw->default_calib[ucode_type].event_trigger;
|
||||
phy_cfg_cmd.calib_control.flow_trigger =
|
||||
@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
|
||||
}
|
||||
|
||||
/* Starting with the new PHY DB implementation - New calibs are enabled */
|
||||
/* Value - 0x405e7 */
|
||||
#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
||||
IWL_CALIB_CFG_DC_IDX |\
|
||||
IWL_CALIB_CFG_BB_FILTER_IDX |\
|
||||
IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
|
||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
||||
IWL_CALIB_CFG_RX_IQ_IDX |\
|
||||
IWL_CALIB_CFG_AGC_IDX)
|
||||
|
||||
#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
|
||||
|
||||
/* Value 0x41567 */
|
||||
#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
||||
IWL_CALIB_CFG_BB_FILTER_IDX |\
|
||||
IWL_CALIB_CFG_DC_IDX |\
|
||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
||||
IWL_CALIB_CFG_RX_IQ_IDX |\
|
||||
IWL_CALIB_CFG_SENSITIVITY_IDX |\
|
||||
IWL_CALIB_CFG_AGC_IDX)
|
||||
|
||||
#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
|
||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
||||
IWL_CALIB_CFG_TX_PWR_IDX |\
|
||||
IWL_CALIB_CFG_DC_IDX |\
|
||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
||||
IWL_CALIB_CFG_SENSITIVITY_IDX)
|
||||
|
||||
/*
|
||||
* Sets the calibrations trigger values that will be sent to the FW for runtime
|
||||
* and init calibrations.
|
||||
* The ones given in the FW TLV are not correct.
|
||||
*/
|
||||
static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_tlv_calib_ctrl default_calib;
|
||||
|
||||
/*
|
||||
* WkP FW TLV calib bits are wrong, overwrite them.
|
||||
* This defines the dynamic calibrations which are implemented in the
|
||||
* uCode both for init(flow) calculation and event driven calibs.
|
||||
*/
|
||||
|
||||
/* Init Image */
|
||||
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
|
||||
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
|
||||
|
||||
if (default_calib.event_trigger !=
|
||||
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
|
||||
IWL_ERR(mvm,
|
||||
"Updating the event calib for INIT image: 0x%x -> 0x%x\n",
|
||||
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
|
||||
default_calib.event_trigger);
|
||||
if (default_calib.flow_trigger !=
|
||||
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
|
||||
IWL_ERR(mvm,
|
||||
"Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
|
||||
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
|
||||
default_calib.flow_trigger);
|
||||
|
||||
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
|
||||
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
|
||||
IWL_ERR(mvm,
|
||||
"Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
|
||||
default_calib.event_trigger,
|
||||
default_calib.flow_trigger);
|
||||
|
||||
/* Run time image */
|
||||
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
|
||||
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
|
||||
|
||||
if (default_calib.event_trigger !=
|
||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
|
||||
IWL_ERR(mvm,
|
||||
"Updating the event calib for RT image: 0x%x -> 0x%x\n",
|
||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
|
||||
default_calib.event_trigger);
|
||||
if (default_calib.flow_trigger !=
|
||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
|
||||
IWL_ERR(mvm,
|
||||
"Updating the flow calib for RT image: 0x%x -> 0x%x\n",
|
||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
|
||||
default_calib.flow_trigger);
|
||||
|
||||
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
|
||||
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
|
||||
IWL_ERR(mvm,
|
||||
"Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
|
||||
default_calib.event_trigger,
|
||||
default_calib.flow_trigger);
|
||||
}
|
||||
|
||||
static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
|
||||
{
|
||||
u8 cmd_raw[16]; /* holds the variable size commands */
|
||||
@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||
WARN_ON(ret);
|
||||
|
||||
/* Override the calibrations from TLV and the const of fw */
|
||||
iwl_set_default_calib_trigger(mvm);
|
||||
/* Send TX valid antennas before triggering calibrations */
|
||||
ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
/* WkP doesn't have all calibrations, need to set default values */
|
||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||
|
@ -80,7 +80,8 @@
|
||||
|
||||
#define IWL_INVALID_MAC80211_QUEUE 0xff
|
||||
#define IWL_MVM_MAX_ADDRESSES 2
|
||||
#define IWL_RSSI_OFFSET 44
|
||||
/* RSSI offset for WkP */
|
||||
#define IWL_RSSI_OFFSET 50
|
||||
|
||||
enum iwl_mvm_tx_fifo {
|
||||
IWL_MVM_TX_FIFO_BK = 0,
|
||||
|
@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
|
||||
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||
static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
|
||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||
|
||||
/*
|
||||
@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
|
||||
iwl_mvm_nic_restart(mvm);
|
||||
}
|
||||
|
||||
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
|
||||
WARN_ON(1);
|
||||
iwl_mvm_nic_restart(mvm);
|
||||
}
|
||||
|
||||
static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
||||
|
@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||
static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_phy_info *phy_info)
|
||||
{
|
||||
u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
|
||||
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
|
||||
int rssi_all_band_a, rssi_all_band_b;
|
||||
u32 agc_a, agc_b, max_agc;
|
||||
u32 val;
|
||||
|
||||
/* Find max rssi among 3 possible receivers.
|
||||
/* Find max rssi among 2 possible receivers.
|
||||
* These values are measured by the Digital Signal Processor (DSP).
|
||||
* They should stay fairly constant even as the signal strength varies,
|
||||
* if the radio's Automatic Gain Control (AGC) is working right.
|
||||
* AGC value (see below) will provide the "interesting" info.
|
||||
*/
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
|
||||
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
|
||||
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
|
||||
max_agc = max_t(u32, agc_a, agc_b);
|
||||
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
|
||||
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
|
||||
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
|
||||
rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
|
||||
rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
|
||||
IWL_OFDM_RSSI_ALLBAND_A_POS;
|
||||
rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
|
||||
IWL_OFDM_RSSI_ALLBAND_B_POS;
|
||||
|
||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
|
||||
agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
|
||||
/*
|
||||
* dBm = rssi dB - agc dB - constant.
|
||||
* Higher AGC (higher radio gain) means lower signal.
|
||||
*/
|
||||
rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
|
||||
rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
|
||||
max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
|
||||
|
||||
max_rssi = max_t(u32, rssi_a, rssi_b);
|
||||
max_rssi = max_t(u32, max_rssi, rssi_c);
|
||||
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
|
||||
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
|
||||
|
||||
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
|
||||
rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
|
||||
|
||||
/* dBm = max_rssi dB - agc dB - constant.
|
||||
* Higher AGC (higher radio gain) means lower signal. */
|
||||
return max_rssi - agc_db - IWL_RSSI_OFFSET;
|
||||
return max_rssi_dbm;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
u16 txq_id;
|
||||
int err;
|
||||
|
||||
|
||||
/*
|
||||
* If mac80211 is cleaning its state, then say that we finished since
|
||||
* our state has been cleared anyway.
|
||||
*/
|
||||
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
|
||||
txq_id = tid_data->txq_id;
|
||||
|
@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||
|
||||
/* Single frame failure in an AMPDU queue => send BAR */
|
||||
if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
|
||||
!(info->flags & IEEE80211_TX_STAT_ACK)) {
|
||||
/* there must be only one skb in the skb_list */
|
||||
WARN_ON_ONCE(skb_freed > 1 ||
|
||||
!skb_queue_empty(&skbs));
|
||||
!(info->flags & IEEE80211_TX_STAT_ACK))
|
||||
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
||||
}
|
||||
|
||||
/* W/A FW bug: seq_ctl is wrong when the queue is flushed */
|
||||
if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
||||
|
@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
|
||||
struct iwl_cmd_meta {
|
||||
/* only for SYNC commands, iff the reply skb is wanted */
|
||||
struct iwl_host_cmd *source;
|
||||
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
DEFINE_DMA_UNMAP_LEN(len);
|
||||
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
@ -185,25 +181,36 @@ struct iwl_queue {
|
||||
/*
|
||||
* The FH will write back to the first TB only, so we need
|
||||
* to copy some data into the buffer regardless of whether
|
||||
* it should be mapped or not. This indicates how much to
|
||||
* copy, even for HCMDs it must be big enough to fit the
|
||||
* DRAM scratch from the TX cmd, at least 16 bytes.
|
||||
* it should be mapped or not. This indicates how big the
|
||||
* first TB must be to include the scratch buffer. Since
|
||||
* the scratch is 4 bytes at offset 12, it's 16 now. If we
|
||||
* make it bigger then allocations will be bigger and copy
|
||||
* slower, so that's probably not useful.
|
||||
*/
|
||||
#define IWL_HCMD_MIN_COPY_SIZE 16
|
||||
#define IWL_HCMD_SCRATCHBUF_SIZE 16
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_device_cmd *copy_cmd;
|
||||
struct sk_buff *skb;
|
||||
/* buffer to free after command completes */
|
||||
const void *free_buf;
|
||||
struct iwl_cmd_meta meta;
|
||||
};
|
||||
|
||||
struct iwl_pcie_txq_scratch_buf {
|
||||
struct iwl_cmd_header hdr;
|
||||
u8 buf[8];
|
||||
__le32 scratch;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_txq - Tx Queue for DMA
|
||||
* @q: generic Rx/Tx queue descriptor
|
||||
* @tfds: transmit frame descriptors (DMA memory)
|
||||
* @scratchbufs: start of command headers, including scratch buffers, for
|
||||
* the writeback -- this is DMA memory and an array holding one buffer
|
||||
* for each command on the queue
|
||||
* @scratchbufs_dma: DMA address for the scratchbufs start
|
||||
* @entries: transmit entries (driver state)
|
||||
* @lock: queue lock
|
||||
* @stuck_timer: timer that fires if queue gets stuck
|
||||
@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
|
||||
struct iwl_txq {
|
||||
struct iwl_queue q;
|
||||
struct iwl_tfd *tfds;
|
||||
struct iwl_pcie_txq_scratch_buf *scratchbufs;
|
||||
dma_addr_t scratchbufs_dma;
|
||||
struct iwl_pcie_txq_entry *entries;
|
||||
spinlock_t lock;
|
||||
struct timer_list stuck_timer;
|
||||
@ -225,6 +234,13 @@ struct iwl_txq {
|
||||
u8 active;
|
||||
};
|
||||
|
||||
static inline dma_addr_t
|
||||
iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->scratchbufs_dma +
|
||||
sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
|
@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||
index = SEQ_TO_INDEX(sequence);
|
||||
cmd_index = get_cmd_index(&txq->q, index);
|
||||
|
||||
if (reclaim) {
|
||||
struct iwl_pcie_txq_entry *ent;
|
||||
ent = &txq->entries[cmd_index];
|
||||
cmd = ent->copy_cmd;
|
||||
WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
|
||||
} else {
|
||||
if (reclaim)
|
||||
cmd = txq->entries[cmd_index].cmd;
|
||||
else
|
||||
cmd = NULL;
|
||||
}
|
||||
|
||||
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
|
||||
|
||||
if (reclaim) {
|
||||
/* The original command isn't needed any more */
|
||||
kfree(txq->entries[cmd_index].copy_cmd);
|
||||
txq->entries[cmd_index].copy_cmd = NULL;
|
||||
/* nor is the duplicated part of the command */
|
||||
kfree(txq->entries[cmd_index].free_buf);
|
||||
txq->entries[cmd_index].free_buf = NULL;
|
||||
}
|
||||
|
@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
||||
}
|
||||
|
||||
for (i = q->read_ptr; i != q->write_ptr;
|
||||
i = iwl_queue_inc_wrap(i, q->n_bd)) {
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
|
||||
i = iwl_queue_inc_wrap(i, q->n_bd))
|
||||
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
|
||||
get_unaligned_le32(&tx_cmd->scratch));
|
||||
}
|
||||
le32_to_cpu(txq->scratchbufs[i].scratch));
|
||||
|
||||
iwl_op_mode_nic_error(trans->op_mode);
|
||||
}
|
||||
@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
||||
}
|
||||
|
||||
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||
struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
|
||||
enum dma_data_direction dma_dir)
|
||||
struct iwl_cmd_meta *meta,
|
||||
struct iwl_tfd *tfd)
|
||||
{
|
||||
int i;
|
||||
int num_tbs;
|
||||
@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
dma_unmap_single(trans->dev,
|
||||
dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
/* first TB is never freed - it's the scratchbuf data */
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
|
||||
iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
|
||||
iwl_pcie_tfd_tb_get_len(tfd, i),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
tfd->num_tbs = 0;
|
||||
}
|
||||
@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||
* Does NOT advance any TFD circular buffer read/write indexes
|
||||
* Does NOT free the TFD itself (which is within circular buffer)
|
||||
*/
|
||||
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
enum dma_data_direction dma_dir)
|
||||
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_tfd *tfd_tmp = txq->tfds;
|
||||
|
||||
@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
|
||||
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
|
||||
dma_dir);
|
||||
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->entries) {
|
||||
@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
||||
size_t scratchbuf_sz;
|
||||
int i;
|
||||
|
||||
if (WARN_ON(txq->entries || txq->tfds))
|
||||
@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
|
||||
goto error;
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
|
||||
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
|
||||
sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch));
|
||||
|
||||
scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
|
||||
|
||||
txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
|
||||
&txq->scratchbufs_dma,
|
||||
GFP_KERNEL);
|
||||
if (!txq->scratchbufs)
|
||||
goto err_free_tfds;
|
||||
|
||||
txq->q.id = txq_id;
|
||||
|
||||
return 0;
|
||||
err_free_tfds:
|
||||
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
|
||||
error:
|
||||
if (txq->entries && txq_id == trans_pcie->cmd_queue)
|
||||
for (i = 0; i < slots_num; i++)
|
||||
@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||
struct iwl_queue *q = &txq->q;
|
||||
enum dma_data_direction dma_dir;
|
||||
|
||||
if (!q->n_bd)
|
||||
return;
|
||||
|
||||
/* In the command queue, all the TBs are mapped as BIDI
|
||||
* so unmap them as such.
|
||||
*/
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
dma_dir = DMA_BIDIRECTIONAL;
|
||||
else
|
||||
dma_dir = DMA_TO_DEVICE;
|
||||
|
||||
spin_lock_bh(&txq->lock);
|
||||
while (q->write_ptr != q->read_ptr) {
|
||||
iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
|
||||
iwl_pcie_txq_free_tfd(trans, txq);
|
||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
||||
}
|
||||
spin_unlock_bh(&txq->lock);
|
||||
@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
if (txq_id == trans_pcie->cmd_queue)
|
||||
for (i = 0; i < txq->q.n_window; i++) {
|
||||
kfree(txq->entries[i].cmd);
|
||||
kfree(txq->entries[i].copy_cmd);
|
||||
kfree(txq->entries[i].free_buf);
|
||||
}
|
||||
|
||||
@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
|
||||
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
||||
txq->q.dma_addr = 0;
|
||||
|
||||
dma_free_coherent(dev,
|
||||
sizeof(*txq->scratchbufs) * txq->q.n_window,
|
||||
txq->scratchbufs, txq->scratchbufs_dma);
|
||||
}
|
||||
|
||||
kfree(txq->entries);
|
||||
@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||
|
||||
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
||||
|
||||
iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
|
||||
iwl_pcie_txq_free_tfd(trans, txq);
|
||||
}
|
||||
|
||||
iwl_pcie_txq_progress(trans_pcie, txq);
|
||||
@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
void *dup_buf = NULL;
|
||||
dma_addr_t phys_addr;
|
||||
int idx;
|
||||
u16 copy_size, cmd_size, dma_size;
|
||||
u16 copy_size, cmd_size, scratch_size;
|
||||
bool had_nocopy = false;
|
||||
int i;
|
||||
u32 cmd_pos;
|
||||
const u8 *cmddata[IWL_MAX_CMD_TFDS];
|
||||
u16 cmdlen[IWL_MAX_CMD_TFDS];
|
||||
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
|
||||
copy_size = sizeof(out_cmd->hdr);
|
||||
cmd_size = sizeof(out_cmd->hdr);
|
||||
|
||||
/* need one for the header if the first is NOCOPY */
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
cmddata[i] = cmd->data[i];
|
||||
cmdlen[i] = cmd->len[i];
|
||||
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
|
||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
||||
int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
||||
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
|
||||
int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
|
||||
|
||||
if (copy > cmdlen[i])
|
||||
copy = cmdlen[i];
|
||||
@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
/* and copy the data that needs to be copied */
|
||||
cmd_pos = offsetof(struct iwl_device_cmd, payload);
|
||||
copy_size = sizeof(out_cmd->hdr);
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
int copy = 0;
|
||||
|
||||
if (!cmd->len)
|
||||
continue;
|
||||
|
||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
||||
copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
||||
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
|
||||
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
|
||||
copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
|
||||
|
||||
if (copy > cmd->len[i])
|
||||
copy = cmd->len[i];
|
||||
@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(txq->entries[idx].copy_cmd);
|
||||
|
||||
/*
|
||||
* since out_cmd will be the source address of the FH, it will write
|
||||
* the retry count there. So when the user needs to receivce the HCMD
|
||||
* that corresponds to the response in the response handler, it needs
|
||||
* to set CMD_WANT_HCMD.
|
||||
*/
|
||||
if (cmd->flags & CMD_WANT_HCMD) {
|
||||
txq->entries[idx].copy_cmd =
|
||||
kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
|
||||
if (unlikely(!txq->entries[idx].copy_cmd)) {
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
IWL_DEBUG_HC(trans,
|
||||
"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
|
||||
get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
|
||||
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
|
||||
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
|
||||
|
||||
/*
|
||||
* If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
|
||||
* still map at least that many bytes for the hardware to write back to.
|
||||
* We have enough space, so that's not a problem.
|
||||
*/
|
||||
dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
|
||||
/* start the TFD with the scratchbuf */
|
||||
scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
|
||||
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
|
||||
iwl_pcie_txq_build_tfd(trans, txq,
|
||||
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
|
||||
scratch_size, 1);
|
||||
|
||||
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
||||
/* map first command fragment, if any remains */
|
||||
if (copy_size > scratch_size) {
|
||||
phys_addr = dma_map_single(trans->dev,
|
||||
((u8 *)&out_cmd->hdr) + scratch_size,
|
||||
copy_size - scratch_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr]);
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
||||
dma_unmap_len_set(out_meta, len, dma_size);
|
||||
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
|
||||
copy_size - scratch_size, 0);
|
||||
}
|
||||
|
||||
/* map the remaining (adjusted) nocopy/dup fragments */
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||
const void *data = cmddata[i];
|
||||
|
||||
if (!cmdlen[i])
|
||||
@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||
data = dup_buf;
|
||||
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||
cmdlen[i], DMA_BIDIRECTIONAL);
|
||||
cmdlen[i], DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr],
|
||||
DMA_BIDIRECTIONAL);
|
||||
&txq->tfds[q->write_ptr]);
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||
cmd = txq->entries[cmd_index].cmd;
|
||||
meta = &txq->entries[cmd_index].meta;
|
||||
|
||||
iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
|
||||
iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
|
||||
|
||||
/* Input error checking is done when commands are added to queue. */
|
||||
if (meta->flags & CMD_WANT_SKB) {
|
||||
@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
struct iwl_txq *txq;
|
||||
struct iwl_queue *q;
|
||||
dma_addr_t phys_addr = 0;
|
||||
dma_addr_t txcmd_phys;
|
||||
dma_addr_t scratch_phys;
|
||||
u16 len, firstlen, secondlen;
|
||||
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
|
||||
void *tb1_addr;
|
||||
u16 len, tb1_len, tb2_len;
|
||||
u8 wait_write_ptr = 0;
|
||||
__le16 fc = hdr->frame_control;
|
||||
u8 hdr_len = ieee80211_hdrlen(fc);
|
||||
@ -1638,35 +1625,73 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
||||
INDEX_TO_SEQ(q->write_ptr)));
|
||||
|
||||
tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
|
||||
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
||||
out_meta = &txq->entries[q->write_ptr].meta;
|
||||
|
||||
/*
|
||||
* Use the first empty entry in this queue's command buffer array
|
||||
* to contain the Tx command and MAC header concatenated together
|
||||
* (payload data will be in another buffer).
|
||||
* Size of this varies, due to varying MAC header length.
|
||||
* If end is not dword aligned, we'll have 2 extra bytes at the end
|
||||
* of the MAC header (device reads on dword boundaries).
|
||||
* We'll tell device about this padding later.
|
||||
* The second TB (tb1) points to the remainder of the TX command
|
||||
* and the 802.11 header - dword aligned size
|
||||
* (This calculation modifies the TX command, so do it before the
|
||||
* setup of the first TB)
|
||||
*/
|
||||
len = sizeof(struct iwl_tx_cmd) +
|
||||
sizeof(struct iwl_cmd_header) + hdr_len;
|
||||
firstlen = (len + 3) & ~3;
|
||||
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
|
||||
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
|
||||
tb1_len = (len + 3) & ~3;
|
||||
|
||||
/* Tell NIC about any 2-byte padding after MAC header */
|
||||
if (firstlen != len)
|
||||
if (tb1_len != len)
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys = dma_map_single(trans->dev,
|
||||
&dev_cmd->hdr, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
|
||||
/* The first TB points to the scratchbuf data - min_copy bytes */
|
||||
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
|
||||
IWL_HCMD_SCRATCHBUF_SIZE);
|
||||
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
|
||||
IWL_HCMD_SCRATCHBUF_SIZE, 1);
|
||||
|
||||
/* there must be data left over for TB1 or this code must be changed */
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
|
||||
|
||||
/* map the data for TB1 */
|
||||
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
|
||||
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
|
||||
goto out_err;
|
||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||
dma_unmap_len_set(out_meta, len, firstlen);
|
||||
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
|
||||
|
||||
/*
|
||||
* Set up TFD's third entry to point directly to remainder
|
||||
* of skb, if any (802.11 null frames have no payload).
|
||||
*/
|
||||
tb2_len = skb->len - hdr_len;
|
||||
if (tb2_len > 0) {
|
||||
dma_addr_t tb2_phys = dma_map_single(trans->dev,
|
||||
skb->data + hdr_len,
|
||||
tb2_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
|
||||
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||
&txq->tfds[q->write_ptr]);
|
||||
goto out_err;
|
||||
}
|
||||
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
|
||||
}
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
&txq->tfds[txq->q.write_ptr],
|
||||
sizeof(struct iwl_tfd),
|
||||
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
|
||||
skb->data + hdr_len, tb2_len);
|
||||
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
||||
skb->data + hdr_len, tb2_len);
|
||||
|
||||
if (!ieee80211_has_morefrags(fc)) {
|
||||
txq->need_update = 1;
|
||||
@ -1675,49 +1700,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
txq->need_update = 0;
|
||||
}
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
|
||||
secondlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
||||
dma_unmap_single(trans->dev,
|
||||
dma_unmap_addr(out_meta, mapping),
|
||||
dma_unmap_len(out_meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Attach buffers to TFD */
|
||||
iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
|
||||
if (secondlen > 0)
|
||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
|
||||
|
||||
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
||||
/* take back ownership of DMA buffer to enable update */
|
||||
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
||||
|
||||
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
&txq->tfds[txq->q.write_ptr],
|
||||
sizeof(struct iwl_tfd),
|
||||
&dev_cmd->hdr, firstlen,
|
||||
skb->data + hdr_len, secondlen);
|
||||
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
||||
skb->data + hdr_len, secondlen);
|
||||
|
||||
/* start timer if queue currently empty */
|
||||
if (txq->need_update && q->read_ptr == q->write_ptr &&
|
||||
trans_pcie->wd_timeout)
|
||||
|
@ -845,15 +845,8 @@ int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
|
||||
return err;
|
||||
|
||||
if ((attr->ia_valid & ATTR_SIZE) &&
|
||||
attr->ia_size != i_size_read(inode)) {
|
||||
int error;
|
||||
|
||||
error = inode_newsize_ok(inode, attr->ia_size);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
attr->ia_size != i_size_read(inode))
|
||||
truncate_setsize(inode, attr->ia_size);
|
||||
}
|
||||
|
||||
setattr_copy(inode, attr);
|
||||
mark_inode_dirty(inode);
|
||||
|
@ -206,6 +206,7 @@ struct clocksource {
|
||||
#define CLOCK_SOURCE_WATCHDOG 0x10
|
||||
#define CLOCK_SOURCE_VALID_FOR_HRES 0x20
|
||||
#define CLOCK_SOURCE_UNSTABLE 0x40
|
||||
#define CLOCK_SOURCE_SUSPEND_NONSTOP 0x80
|
||||
|
||||
/* simplify initialization of mask field */
|
||||
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
|
||||
|
@ -157,6 +157,7 @@ enum hrtimer_base_type {
|
||||
HRTIMER_BASE_MONOTONIC,
|
||||
HRTIMER_BASE_REALTIME,
|
||||
HRTIMER_BASE_BOOTTIME,
|
||||
HRTIMER_BASE_TAI,
|
||||
HRTIMER_MAX_CLOCK_BASES,
|
||||
};
|
||||
|
||||
@ -327,7 +328,9 @@ extern ktime_t ktime_get(void);
|
||||
extern ktime_t ktime_get_real(void);
|
||||
extern ktime_t ktime_get_boottime(void);
|
||||
extern ktime_t ktime_get_monotonic_offset(void);
|
||||
extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
|
||||
extern ktime_t ktime_get_clocktai(void);
|
||||
extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
|
||||
ktime_t *offs_tai);
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
|
||||
|
@ -75,7 +75,6 @@ extern int register_refined_jiffies(long clock_tick_rate);
|
||||
*/
|
||||
extern u64 __jiffy_data jiffies_64;
|
||||
extern unsigned long volatile __jiffy_data jiffies;
|
||||
extern seqlock_t jiffies_lock;
|
||||
|
||||
#if (BITS_PER_LONG < 64)
|
||||
u64 get_jiffies_64(void);
|
||||
|
@ -181,6 +181,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
|
||||
extern int timekeeping_valid_for_hres(void);
|
||||
extern u64 timekeeping_max_deferment(void);
|
||||
extern int timekeeping_inject_offset(struct timespec *ts);
|
||||
extern s32 timekeeping_get_tai_offset(void);
|
||||
extern void timekeeping_set_tai_offset(s32 tai_offset);
|
||||
extern void timekeeping_clocktai(struct timespec *ts);
|
||||
|
||||
struct tms;
|
||||
extern void do_sys_times(struct tms *);
|
||||
|
@ -62,8 +62,11 @@ struct timekeeper {
|
||||
ktime_t offs_boot;
|
||||
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
|
||||
struct timespec raw_time;
|
||||
/* Seqlock for all timekeeper values */
|
||||
seqlock_t lock;
|
||||
/* The current UTC to TAI offset in seconds */
|
||||
s32 tai_offset;
|
||||
/* Offset clock monotonic -> clock tai */
|
||||
ktime_t offs_tai;
|
||||
|
||||
};
|
||||
|
||||
static inline struct timespec tk_xtime(struct timekeeper *tk)
|
||||
|
@ -54,11 +54,9 @@ struct itimerval {
|
||||
#define CLOCK_BOOTTIME 7
|
||||
#define CLOCK_REALTIME_ALARM 8
|
||||
#define CLOCK_BOOTTIME_ALARM 9
|
||||
#define CLOCK_SGI_CYCLE 10 /* Hardware specific */
|
||||
#define CLOCK_TAI 11
|
||||
|
||||
/*
|
||||
* The IDs of various hardware clocks:
|
||||
*/
|
||||
#define CLOCK_SGI_CYCLE 10
|
||||
#define MAX_CLOCKS 16
|
||||
#define CLOCKS_MASK (CLOCK_REALTIME | CLOCK_MONOTONIC)
|
||||
#define CLOCKS_MONO CLOCK_MONOTONIC
|
||||
|
@ -83,6 +83,12 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
|
||||
.get_time = &ktime_get_boottime,
|
||||
.resolution = KTIME_LOW_RES,
|
||||
},
|
||||
{
|
||||
.index = HRTIMER_BASE_TAI,
|
||||
.clockid = CLOCK_TAI,
|
||||
.get_time = &ktime_get_clocktai,
|
||||
.resolution = KTIME_LOW_RES,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
@ -90,6 +96,7 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
|
||||
[CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
|
||||
[CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
|
||||
[CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
|
||||
[CLOCK_TAI] = HRTIMER_BASE_TAI,
|
||||
};
|
||||
|
||||
static inline int hrtimer_clockid_to_base(clockid_t clock_id)
|
||||
@ -106,8 +113,10 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
ktime_t xtim, mono, boot;
|
||||
struct timespec xts, tom, slp;
|
||||
s32 tai_offset;
|
||||
|
||||
get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
|
||||
tai_offset = timekeeping_get_tai_offset();
|
||||
|
||||
xtim = timespec_to_ktime(xts);
|
||||
mono = ktime_add(xtim, timespec_to_ktime(tom));
|
||||
@ -115,6 +124,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
|
||||
base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
|
||||
base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
|
||||
base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
|
||||
base->clock_base[HRTIMER_BASE_TAI].softirq_time =
|
||||
ktime_add(xtim, ktime_set(tai_offset, 0));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -651,8 +662,9 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
|
||||
{
|
||||
ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
|
||||
ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
|
||||
ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
|
||||
|
||||
return ktime_get_update_offsets(offs_real, offs_boot);
|
||||
return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -221,6 +221,11 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
|
||||
{
|
||||
timekeeping_clocktai(tp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize everything, well, just everything in Posix clocks/timers ;)
|
||||
@ -261,6 +266,16 @@ static __init int init_posix_timers(void)
|
||||
.clock_getres = posix_get_coarse_res,
|
||||
.clock_get = posix_get_monotonic_coarse,
|
||||
};
|
||||
struct k_clock clock_tai = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_get = posix_get_tai,
|
||||
.nsleep = common_nsleep,
|
||||
.nsleep_restart = hrtimer_nanosleep_restart,
|
||||
.timer_create = common_timer_create,
|
||||
.timer_set = common_timer_set,
|
||||
.timer_get = common_timer_get,
|
||||
.timer_del = common_timer_del,
|
||||
};
|
||||
struct k_clock clock_boottime = {
|
||||
.clock_getres = hrtimer_get_res,
|
||||
.clock_get = posix_get_boottime,
|
||||
@ -278,6 +293,7 @@ static __init int init_posix_timers(void)
|
||||
posix_timers_register_clock(CLOCK_REALTIME_COARSE, &clock_realtime_coarse);
|
||||
posix_timers_register_clock(CLOCK_MONOTONIC_COARSE, &clock_monotonic_coarse);
|
||||
posix_timers_register_clock(CLOCK_BOOTTIME, &clock_boottime);
|
||||
posix_timers_register_clock(CLOCK_TAI, &clock_tai);
|
||||
|
||||
posix_timers_cache = kmem_cache_create("posix_timers_cache",
|
||||
sizeof (struct k_itimer), 0, SLAB_PANIC,
|
||||
|
@ -138,13 +138,14 @@ int persistent_clock_is_local;
|
||||
*/
|
||||
static inline void warp_clock(void)
|
||||
{
|
||||
if (sys_tz.tz_minuteswest != 0) {
|
||||
struct timespec adjust;
|
||||
|
||||
adjust = current_kernel_time();
|
||||
if (sys_tz.tz_minuteswest != 0)
|
||||
persistent_clock_is_local = 1;
|
||||
adjust.tv_sec += sys_tz.tz_minuteswest * 60;
|
||||
do_settimeofday(&adjust);
|
||||
adjust.tv_sec = sys_tz.tz_minuteswest * 60;
|
||||
adjust.tv_nsec = 0;
|
||||
timekeeping_inject_offset(&adjust);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -53,9 +53,6 @@ static int time_state = TIME_OK;
|
||||
/* clock status bits: */
|
||||
static int time_status = STA_UNSYNC;
|
||||
|
||||
/* TAI offset (secs): */
|
||||
static long time_tai;
|
||||
|
||||
/* time adjustment (nsecs): */
|
||||
static s64 time_offset;
|
||||
|
||||
@ -415,7 +412,6 @@ int second_overflow(unsigned long secs)
|
||||
else if (secs % 86400 == 0) {
|
||||
leap = -1;
|
||||
time_state = TIME_OOP;
|
||||
time_tai++;
|
||||
printk(KERN_NOTICE
|
||||
"Clock: inserting leap second 23:59:60 UTC\n");
|
||||
}
|
||||
@ -425,7 +421,6 @@ int second_overflow(unsigned long secs)
|
||||
time_state = TIME_OK;
|
||||
else if ((secs + 1) % 86400 == 0) {
|
||||
leap = 1;
|
||||
time_tai--;
|
||||
time_state = TIME_WAIT;
|
||||
printk(KERN_NOTICE
|
||||
"Clock: deleting leap second 23:59:59 UTC\n");
|
||||
@ -579,7 +574,9 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
|
||||
* Called with ntp_lock held, so we can access and modify
|
||||
* all the global NTP state:
|
||||
*/
|
||||
static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
|
||||
static inline void process_adjtimex_modes(struct timex *txc,
|
||||
struct timespec *ts,
|
||||
s32 *time_tai)
|
||||
{
|
||||
if (txc->modes & ADJ_STATUS)
|
||||
process_adj_status(txc, ts);
|
||||
@ -613,7 +610,7 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts
|
||||
}
|
||||
|
||||
if (txc->modes & ADJ_TAI && txc->constant > 0)
|
||||
time_tai = txc->constant;
|
||||
*time_tai = txc->constant;
|
||||
|
||||
if (txc->modes & ADJ_OFFSET)
|
||||
ntp_update_offset(txc->offset);
|
||||
@ -632,6 +629,7 @@ static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts
|
||||
int do_adjtimex(struct timex *txc)
|
||||
{
|
||||
struct timespec ts;
|
||||
u32 time_tai, orig_tai;
|
||||
int result;
|
||||
|
||||
/* Validate the data before disabling interrupts */
|
||||
@ -671,6 +669,7 @@ int do_adjtimex(struct timex *txc)
|
||||
}
|
||||
|
||||
getnstimeofday(&ts);
|
||||
orig_tai = time_tai = timekeeping_get_tai_offset();
|
||||
|
||||
raw_spin_lock_irq(&ntp_lock);
|
||||
|
||||
@ -687,7 +686,7 @@ int do_adjtimex(struct timex *txc)
|
||||
|
||||
/* If there are input parameters, then process them: */
|
||||
if (txc->modes)
|
||||
process_adjtimex_modes(txc, &ts);
|
||||
process_adjtimex_modes(txc, &ts, &time_tai);
|
||||
|
||||
txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
|
||||
NTP_SCALE_SHIFT);
|
||||
@ -716,6 +715,9 @@ int do_adjtimex(struct timex *txc)
|
||||
|
||||
raw_spin_unlock_irq(&ntp_lock);
|
||||
|
||||
if (time_tai != orig_tai)
|
||||
timekeeping_set_tai_offset(time_tai);
|
||||
|
||||
txc->time.tv_sec = ts.tv_sec;
|
||||
txc->time.tv_usec = ts.tv_nsec;
|
||||
if (!(time_status & STA_NANO))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user