Omap driver changes for v4.2 to switch drivers over to Linux generic

wake IRQ events for omap_hsmmc, 8250_omap and omap-serial
 drivers.
 
 The generic wake IRQs also fix issues that these drivers potentially
 have with IRQ re-entrancy at least for serial-omap.
 
 Note that because of dependencies and merge conflicts these are
 based on Rafael's pm-wakeirq and Greg's tty-next branches.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJVd97YAAoJEBvUPslcq6VzpikQAOAwwRMTdy9trxpNbGA+yI3W
 rmboPX/jp86PV5EywsD1VMMWWfd5toJawyC2GKJ7lpme9wdB6zpJZFkGxsxFkMXS
 peoeY5HyWHVT2gdeO8auT0GsxdN0U0SUlMz051aVdLz6DjajSVnKwFJpJvUNLgUx
 d3MCNoxQAxIad5G9LHhtMlZNvZXg8ibp6TGyEkQvUDz/V37J+22hxlbaAZ0/SiOo
 ymBZ8osCCHZvOLeyPogHxSkXcLP5cwGDOXzClZ76OCUKYIbcP8Y59ibb78S2cuOB
 UaOh4To1aKENYtE9B2QVcA6bbOPulwT2qAJvrt9poGFFOfi+jd2NZLQC13ia8EWG
 NrFFIcAm+dgrnhZedJVUGKGxrqpFKxUyoaRKO+FNvCJ8tu0xK7vBJLB+XLb5fdXY
 WeeVUv+GgQWPkzahJrXkEh/9syccVtM42+fXJW+cHH7jV2jDtSLCpWWW8lQ1u/dE
 VSHQCH+2f/vDeKd3u8Pe3FLhOA9jBfZTVM4simbZ4qecFBdPZKoLReiX4sY2S+39
 2vBBdKHNwWQFtOb07nq/iLp0KKCLxVHVJKWSYQ2fNcyc5RNAAu+/iZx6WfFwdhW2
 e5AAfXOvEHU1522vwY/V621N8low69MuST8zkPZ0aPXwyzQQ1A5WzqCQjqu5Sg8u
 72Iie7TLuUqLSGnJXo2x
 =ocy8
 -----END PGP SIGNATURE-----

Merge tag 'omap-for-v4.2/wakeirq-drivers-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/late

Merge "omap generic wakeirq for v4.2 merge window" from Tony Lindgren:

Omap driver changes for v4.2 to switch drivers over to Linux generic
wake IRQ events for omap_hsmmc, 8250_omap and omap-serial
drivers.

The generic wake IRQs also fix issues that these drivers potentially
have with IRQ re-entrancy at least for serial-omap.

Note that because of dependencies and merge conflicts these are
based on Rafael's pm-wakeirq and Greg's tty-next branches.

* tag 'omap-for-v4.2/wakeirq-drivers-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (148 commits)
  serial: 8250_omap: Move wake-up interrupt to generic wakeirq
  serial: omap: Switch wake-up interrupt to generic wakeirq
  tty: move linux/gsmmux.h to uapi
  doc: dt: add documentation for nxp,lpc1850-uart
  serial: 8250: add LPC18xx/43xx UART driver
  serial: 8250_uniphier: add UniPhier serial driver
  serial: 8250_dw: support ACPI platforms with integrated DMA engine
  serial: of_serial: check the return value of clk_prepare_enable()
  serial: of_serial: use devm_clk_get() instead of clk_get()
  serial: earlycon: Add support for big-endian MMIO accesses
  serial: sirf: use hrtimer for data rx
  serial: sirf: correct the fifo empty_bit
  serial: sirf: fix system hung on console log output
  serial: 8250: remove return statements from void function
  sc16is7xx: use kworker for RS-485 configuration
  sc16is7xx: use kworker to update ier bits
  sc16is7xx: use kworker for md_proc
  sc16is7xx: move RTS delay to workqueue
  sc16is7xx: use kthread_worker for tx_work and irq
  sc16is7xx: use LSR_TEMT_BIT in .tx_empty()
  ...
This commit is contained in:
Kevin Hilman 2015-07-01 12:25:13 -07:00
commit 593aae2142
101 changed files with 2894 additions and 1892 deletions

View File

@ -0,0 +1,10 @@
* ARM SBSA defined generic UART
This UART uses a subset of the PL011 registers and consequently lives
in the PL011 driver. It's baudrate and other communication parameters
cannot be adjusted at runtime, so it lacks a clock specifier here.
Required properties:
- compatible: must be "arm,sbsa-uart"
- reg: exactly one register range
- interrupts: exactly one interrupt specifier
- current-speed: the (fixed) baud rate set by the firmware

View File

@ -14,7 +14,14 @@ Required properties:
- interrupts: A single interrupt specifier.
- clocks: Clock driving the hardware.
- clocks : Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names:
- "baud": The clock the baudrate is derived from
- "bus": The bus clock for register accesses (optional)
For compatibility with older device trees an unnamed clock is used for the
baud clock if the baudclk does not exist. Do not use this for new designs.
Example:
@ -22,5 +29,6 @@ Example:
compatible = "mediatek,mt6589-uart", "mediatek,mt6577-uart";
reg = <0x11006000 0x400>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_LOW>;
clocks = <&uart_clk>;
clocks = <&uart_clk>, <&bus_clk>;
clock-names = "baud", "bus";
};

View File

@ -0,0 +1,28 @@
* NXP LPC1850 UART
Required properties:
- compatible : "nxp,lpc1850-uart", "ns16550a".
- reg : offset and length of the register set for the device.
- interrupts : should contain uart interrupt.
- clocks : phandle to the input clocks.
- clock-names : required elements: "uartclk", "reg".
Optional properties:
- dmas : Two or more DMA channel specifiers following the
convention outlined in bindings/dma/dma.txt
- dma-names : Names for the dma channels, if present. There must
be at least one channel named "tx" for transmit
and named "rx" for receive.
Since it's also possible to also use the of_serial.c driver all
parameters from 8250.txt also apply but are optional.
Example:
uart0: serial@40081000 {
compatible = "nxp,lpc1850-uart", "ns16550a";
reg = <0x40081000 0x1000>;
reg-shift = <2>;
interrupts = <24>;
clocks = <&ccu2 CLK_APB0_UART0>, <&ccu1 CLK_CPU_UART0>;
clock-names = "uartclk", "reg";
};

View File

@ -1,4 +1,5 @@
* NXP SC16IS7xx advanced Universal Asynchronous Receiver-Transmitter (UART)
* i2c as bus
Required properties:
- compatible: Should be one of the following:
@ -31,3 +32,39 @@ Example:
gpio-controller;
#gpio-cells = <2>;
};
* spi as bus
Required properties:
- compatible: Should be one of the following:
- "nxp,sc16is740" for NXP SC16IS740,
- "nxp,sc16is741" for NXP SC16IS741,
- "nxp,sc16is750" for NXP SC16IS750,
- "nxp,sc16is752" for NXP SC16IS752,
- "nxp,sc16is760" for NXP SC16IS760,
- "nxp,sc16is762" for NXP SC16IS762.
- reg: SPI chip select number.
- interrupt-parent: The phandle for the interrupt controller that
services interrupts for this IC.
- interrupts: Specifies the interrupt source of the parent interrupt
controller. The format of the interrupt specifier depends on the
parent interrupt controller.
- clocks: phandle to the IC source clock.
Optional properties:
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells: Should be two. The first cell is the GPIO number and
the second cell is used to specify the GPIO polarity:
0 = active high,
1 = active low.
Example:
sc16is750: sc16is750@0 {
compatible = "nxp,sc16is750";
reg = <0>;
clocks = <&clk20m>;
interrupt-parent = <&gpio3>;
interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
gpio-controller;
#gpio-cells = <2>;
};

View File

@ -44,6 +44,11 @@ Required properties:
Note: Each enabled SCIx UART should have an alias correctly numbered in the
"aliases" node.
Optional properties:
- dmas: Must contain a list of two references to DMA specifiers, one for
transmission, and one for reception.
- dma-names: Must contain a list of two DMA names, "tx" and "rx".
Example:
aliases {
serial0 = &scifa0;
@ -56,4 +61,6 @@ Example:
interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp2_clks R8A7790_CLK_SCIFA0>;
clock-names = "sci_ick";
dmas = <&dmac0 0x21>, <&dmac0 0x22>;
dma-names = "tx", "rx";
};

View File

@ -2,8 +2,7 @@
Required properties:
- compatible : Should be "sirf,prima2-uart", "sirf, prima2-usp-uart",
"sirf,atlas7-uart" or "sirf,atlas7-bt-uart" which means
uart located in BT module and used for BT.
"sirf,atlas7-uart" or "sirf,atlas7-usp-uart".
- reg : Offset and length of the register set for the device
- interrupts : Should contain uart interrupt
- fifosize : Should define hardware rx/tx fifo size
@ -33,15 +32,3 @@ usp@b0090000 {
rts-gpios = <&gpio 15 0>;
cts-gpios = <&gpio 46 0>;
};
for uart use in BT module,
uart6: uart@11000000 {
cell-index = <6>;
compatible = "sirf,atlas7-bt-uart", "sirf,atlas7-uart";
reg = <0x11000000 0x1000>;
interrupts = <0 100 0>;
clocks = <&clks 138>, <&clks 140>, <&clks 141>;
clock-names = "uart", "general", "noc";
fifosize = <128>;
status = "disabled";
}

View File

@ -959,14 +959,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
uart[8250],io,<addr>[,options]
uart[8250],mmio,<addr>[,options]
uart[8250],mmio32,<addr>[,options]
uart[8250],mmio32be,<addr>[,options]
uart[8250],0x<addr>[,options]
Start an early, polled-mode console on the 8250/16550
UART at the specified I/O port or MMIO address.
MMIO inter-register address stride is either 8-bit
(mmio) or 32-bit (mmio32).
If none of [io|mmio|mmio32], <addr> is assumed to be
equivalent to 'mmio'. 'options' are specified in the
same format described for "console=ttyS<n>"; if
(mmio) or 32-bit (mmio32 or mmio32be).
If none of [io|mmio|mmio32|mmio32be], <addr> is assumed
to be equivalent to 'mmio'. 'options' are specified
in the same format described for "console=ttyS<n>"; if
unspecified, the h/w is not initialized.
pl011,<addr>

View File

@ -556,6 +556,12 @@ helper functions described in Section 4. In that case, pm_runtime_resume()
should be used. Of course, for this purpose the device's runtime PM has to be
enabled earlier by calling pm_runtime_enable().
Note, if the device may execute pm_runtime calls during the probe (such as
if it is registers with a subsystem that may call back in) then the
pm_runtime_get_sync() call paired with a pm_runtime_put() call will be
appropriate to ensure that the device is not put back to sleep during the
probe. This can happen with systems such as the network device layer.
It may be desirable to suspend the device once ->probe() has finished.
Therefore the driver core uses the asyncronous pm_request_idle() to submit a
request to execute the subsystem-level idle callback for the device at that

View File

@ -13,7 +13,7 @@
#define BASE_BAUD ( 1843200 / 16 )
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else

View File

@ -1350,6 +1350,9 @@ void edma_stop(unsigned channel)
edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
edma_write_array(ctlr, EDMA_EMCR, j, mask);
/* clear possibly pending completion interrupt */
edma_shadow0_write_array(ctlr, SH_ICR, j, mask);
pr_debug("EDMA: EER%d %08x\n", j,
edma_shadow0_read_array(ctlr, SH_EER, j));

View File

@ -22,9 +22,9 @@
defined(CONFIG_BFIN_UART2_CTSRTS) || \
defined(CONFIG_BFIN_UART3_CTSRTS)
# if defined(BFIN_UART_BF54X_STYLE) || defined(BFIN_UART_BF60X_STYLE)
# define CONFIG_SERIAL_BFIN_HARD_CTSRTS
# define SERIAL_BFIN_HARD_CTSRTS
# else
# define CONFIG_SERIAL_BFIN_CTSRTS
# define SERIAL_BFIN_CTSRTS
# endif
#endif
@ -50,8 +50,8 @@ struct bfin_serial_port {
#elif ANOMALY_05000363
unsigned int anomaly_threshold;
#endif
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
#if defined(SERIAL_BFIN_CTSRTS) || \
defined(SERIAL_BFIN_HARD_CTSRTS)
int cts_pin;
int rts_pin;
#endif

View File

@ -17,7 +17,7 @@
#define BASE_BAUD ( 1843200 / 16 )
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else

View File

@ -13,7 +13,7 @@
#define _ASM_SERIAL_H
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else
@ -21,7 +21,7 @@
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#endif
#ifdef CONFIG_SERIAL_MANY_PORTS
#ifdef CONFIG_SERIAL_8250_MANY_PORTS
#define FOURPORT_FLAGS ASYNC_FOURPORT
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0

View File

@ -11,7 +11,7 @@
#define BASE_BAUD (1843200/16)
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
# define STD_COMX_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
# define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | 0 | UPF_AUTO_IRQ)
#else

View File

@ -1,4 +1,4 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o

View File

@ -24,6 +24,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm-trace.h>
#include <linux/pm_wakeirq.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/async.h>
@ -587,6 +588,7 @@ void dpm_resume_noirq(pm_message_t state)
async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@ -1104,6 +1106,7 @@ int dpm_suspend_noirq(pm_message_t state)
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;

View File

@ -20,6 +20,46 @@ static inline void pm_runtime_early_init(struct device *dev)
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
struct wake_irq {
struct device *dev;
int irq;
bool dedicated_irq:1;
};
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
#ifdef CONFIG_PM_SLEEP
extern int device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq);
extern void device_wakeup_detach_irq(struct device *dev);
extern void device_wakeup_arm_wake_irqs(void);
extern void device_wakeup_disarm_wake_irqs(void);
#else
static inline int
device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
return 0;
}
static inline void device_wakeup_detach_irq(struct device *dev)
{
}
static inline void device_wakeup_arm_wake_irqs(void)
{
}
static inline void device_wakeup_disarm_wake_irqs(void)
{
}
#endif /* CONFIG_PM_SLEEP */
/*
* sysfs.c
*/
@ -52,6 +92,14 @@ static inline void wakeup_sysfs_remove(struct device *dev) {}
static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
static inline void pm_qos_sysfs_remove(struct device *dev) {}
static inline void dev_pm_arm_wake_irq(struct wake_irq *wirq)
{
}
static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
}
#endif
#ifdef CONFIG_PM_SLEEP

View File

@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
#include "power.h"
@ -514,6 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
dev_pm_enable_wake_irq(dev);
retval = rpm_callback(callback, dev);
if (retval)
goto fail;
@ -552,6 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
dev_pm_disable_wake_irq(dev);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@ -734,13 +737,16 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
dev_pm_disable_wake_irq(dev);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_cancel_pending(dev);
dev_pm_enable_wake_irq(dev);
} else {
no_callback:
__update_runtime_status(dev, RPM_ACTIVE);
pm_runtime_mark_last_busy(dev);
if (parent)
atomic_inc(&parent->power.child_count);
}

View File

@ -0,0 +1,273 @@
/*
* wakeirq.c - Device wakeirq helper functions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include "power.h"
/**
* dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
* @dev: Device entry
* @irq: Device wake-up capable interrupt
* @wirq: Wake irq specific data
*
* Internal function to attach either a device IO interrupt or a
* dedicated wake-up interrupt as a wake IRQ.
*/
static int dev_pm_attach_wake_irq(struct device *dev, int irq,
struct wake_irq *wirq)
{
unsigned long flags;
int err;
if (!dev || !wirq)
return -EINVAL;
spin_lock_irqsave(&dev->power.lock, flags);
if (dev_WARN_ONCE(dev, dev->power.wakeirq,
"wake irq already initialized\n")) {
spin_unlock_irqrestore(&dev->power.lock, flags);
return -EEXIST;
}
dev->power.wakeirq = wirq;
spin_unlock_irqrestore(&dev->power.lock, flags);
err = device_wakeup_attach_irq(dev, wirq);
if (err)
return err;
return 0;
}
/**
* dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
* @dev: Device entry
* @irq: Device IO interrupt
*
* Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
* automatically configured for wake-up from suspend based
* on the device specific sysfs wakeup entry. Typically called
* during driver probe after calling device_init_wakeup().
*/
int dev_pm_set_wake_irq(struct device *dev, int irq)
{
struct wake_irq *wirq;
int err;
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
wirq->dev = dev;
wirq->irq = irq;
err = dev_pm_attach_wake_irq(dev, irq, wirq);
if (err)
kfree(wirq);
return err;
}
EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
/**
* dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
* @dev: Device entry
*
* Detach a device wake IRQ and free resources.
*
* Note that it's OK for drivers to call this without calling
* dev_pm_set_wake_irq() as all the driver instances may not have
* a wake IRQ configured. This avoid adding wake IRQ specific
* checks into the drivers.
*/
void dev_pm_clear_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
unsigned long flags;
if (!wirq)
return;
spin_lock_irqsave(&dev->power.lock, flags);
dev->power.wakeirq = NULL;
spin_unlock_irqrestore(&dev->power.lock, flags);
device_wakeup_detach_irq(dev);
if (wirq->dedicated_irq)
free_irq(wirq->irq, wirq);
kfree(wirq);
}
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
/**
* handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
* @irq: Device specific dedicated wake-up interrupt
* @_wirq: Wake IRQ data
*
* Some devices have a separate wake-up interrupt in addition to the
* device IO interrupt. The wake-up interrupt signals that a device
* should be woken up from it's idle state. This handler uses device
* specific pm_runtime functions to wake the device, and then it's
* up to the device to do whatever it needs to. Note that as the
* device may need to restore context and start up regulators, we
* use a threaded IRQ.
*
* Also note that we are not resending the lost device interrupts.
* We assume that the wake-up interrupt just needs to wake-up the
* device, and then device's pm_runtime_resume() can deal with the
* situation.
*/
static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
{
struct wake_irq *wirq = _wirq;
int res;
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
res = pm_runtime_resume(wirq->dev);
if (res < 0)
dev_warn(wirq->dev,
"wake IRQ with no resume: %i\n", res);
return IRQ_HANDLED;
}
/**
* dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
* @dev: Device entry
* @irq: Device wake-up interrupt
*
* Unless your hardware has separate wake-up interrupts in addition
* to the device IO interrupts, you don't need this.
*
* Sets up a threaded interrupt handler for a device that has
* a dedicated wake-up interrupt in addition to the device IO
* interrupt.
*
* The interrupt starts disabled, and needs to be managed for
* the device by the bus code or the device driver using
* dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
* functions.
*/
int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
{
struct wake_irq *wirq;
int err;
wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
if (!wirq)
return -ENOMEM;
wirq->dev = dev;
wirq->irq = irq;
wirq->dedicated_irq = true;
irq_set_status_flags(irq, IRQ_NOAUTOEN);
/*
* Consumer device may need to power up and restore state
* so we use a threaded irq.
*/
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
IRQF_ONESHOT, dev_name(dev), wirq);
if (err)
goto err_free;
err = dev_pm_attach_wake_irq(dev, irq, wirq);
if (err)
goto err_free_irq;
return err;
err_free_irq:
free_irq(irq, wirq);
err_free:
kfree(wirq);
return err;
}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
* Called from the bus code or the device driver for
* runtime_suspend() to enable the wake-up interrupt while
* the device is running.
*
* Note that for runtime_suspend()) the wake-up interrupts
* should be unconditionally enabled unlike for suspend()
* that is conditional.
*/
void dev_pm_enable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (wirq && wirq->dedicated_irq)
enable_irq(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
/**
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
* @dev: Device
*
* Called from the bus code or the device driver for
* runtime_resume() to disable the wake-up interrupt while
* the device is running.
*/
void dev_pm_disable_wake_irq(struct device *dev)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (wirq && wirq->dedicated_irq)
disable_irq_nosync(wirq->irq);
}
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
* Sets up the wake-up event conditionally based on the
* device_may_wake().
*/
void dev_pm_arm_wake_irq(struct wake_irq *wirq)
{
if (!wirq)
return;
if (device_may_wakeup(wirq->dev))
enable_irq_wake(wirq->irq);
}
/**
* dev_pm_disarm_wake_irq - Disarm device wake-up
* @wirq: Device wake-up interrupt
*
* Clears up the wake-up event conditionally based on the
* device_may_wake().
*/
void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
{
if (!wirq)
return;
if (device_may_wakeup(wirq->dev))
disable_irq_wake(wirq->irq);
}

View File

@ -14,6 +14,7 @@
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/power.h>
#include "power.h"
@ -238,6 +239,97 @@ int device_wakeup_enable(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_wakeup_enable);
/**
* device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
* @dev: Device to handle
* @wakeirq: Device specific wakeirq entry
*
* Attach a device wakeirq to the wakeup source so the device
* wake IRQ can be configured automatically for suspend and
* resume.
*/
int device_wakeup_attach_irq(struct device *dev,
struct wake_irq *wakeirq)
{
struct wakeup_source *ws;
int ret = 0;
spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
if (!ws) {
dev_err(dev, "forgot to call call device_init_wakeup?\n");
ret = -EINVAL;
goto unlock;
}
if (ws->wakeirq) {
ret = -EEXIST;
goto unlock;
}
ws->wakeirq = wakeirq;
unlock:
spin_unlock_irq(&dev->power.lock);
return ret;
}
/**
* device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
* @dev: Device to handle
*
* Removes a device wakeirq from the wakeup source.
*/
void device_wakeup_detach_irq(struct device *dev)
{
struct wakeup_source *ws;
spin_lock_irq(&dev->power.lock);
ws = dev->power.wakeup;
if (!ws)
goto unlock;
ws->wakeirq = NULL;
unlock:
spin_unlock_irq(&dev->power.lock);
}
/**
* device_wakeup_arm_wake_irqs(void)
*
* Itereates over the list of device wakeirqs to arm them.
*/
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_arm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
}
/**
* device_wakeup_disarm_wake_irqs(void)
*
* Itereates over the list of device wakeirqs to disarm them.
*/
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
rcu_read_lock();
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_disarm_wake_irq(ws->wakeirq);
}
rcu_read_unlock();
}
/**
* device_wakeup_detach - Detach a device's wakeup source object from it.
* @dev: Device to detach the wakeup source object from.

View File

@ -300,8 +300,7 @@ static int edma_dma_pause(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
/* Pause/Resume only allowed with cyclic mode */
if (!echan->edesc || !echan->edesc->cyclic)
if (!echan->edesc)
return -EINVAL;
edma_pause(echan->ch_num);
@ -312,10 +311,6 @@ static int edma_dma_resume(struct dma_chan *chan)
{
struct edma_chan *echan = to_edma_chan(chan);
/* Pause/Resume only allowed with cyclic mode */
if (!echan->edesc->cyclic)
return -EINVAL;
edma_resume(echan->ch_num);
return 0;
}

View File

@ -167,7 +167,6 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
{
struct serport *serport = (struct serport*) tty->disc_data;
struct serio *serio;
char name[64];
if (test_and_set_bit(SERPORT_BUSY, &serport->flags))
return -EBUSY;
@ -177,7 +176,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
return -ENOMEM;
strlcpy(serio->name, "Serial port", sizeof(serio->name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", tty_name(tty, name));
snprintf(serio->phys, sizeof(serio->phys), "%s/serio0", tty_name(tty));
serio->id = serport->id;
serio->id.type = SERIO_RS232;
serio->write = serport_serio_write;
@ -187,7 +186,7 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u
serio->dev.parent = tty->dev;
serio_register_port(serport->serio);
printk(KERN_INFO "serio: Serial port %s\n", tty_name(tty, name));
printk(KERN_INFO "serio: Serial port %s\n", tty_name(tty));
wait_event_interruptible(serport->wait, test_bit(SERPORT_DEAD, &serport->flags));
serio_unregister_port(serport->serio);

View File

@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_data/hsmmc-omap.h>
/* OMAP HSMMC Host Controller Registers */
@ -218,7 +219,6 @@ struct omap_hsmmc_host {
unsigned int flags;
#define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */
#define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */
#define HSMMC_WAKE_IRQ_ENABLED (1 << 2)
struct omap_hsmmc_next next_data;
struct omap_hsmmc_platform_data *pdata;
@ -1117,22 +1117,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id)
{
struct omap_hsmmc_host *host = dev_id;
/* cirq is level triggered, disable to avoid infinite loop */
spin_lock(&host->irq_lock);
if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
disable_irq_nosync(host->wake_irq);
host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
}
spin_unlock(&host->irq_lock);
pm_request_resume(host->dev); /* no use counter */
return IRQ_HANDLED;
}
static void set_sd_bus_power(struct omap_hsmmc_host *host)
{
unsigned long i;
@ -1665,7 +1649,6 @@ static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
{
struct mmc_host *mmc = host->mmc;
int ret;
/*
@ -1677,11 +1660,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
if (!host->dev->of_node || !host->wake_irq)
return -ENODEV;
/* Prevent auto-enabling of IRQ */
irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
mmc_hostname(mmc), host);
ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq);
if (ret) {
dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n");
goto err;
@ -1718,7 +1697,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
return 0;
err_free_irq:
devm_free_irq(host->dev, host->wake_irq, host);
dev_pm_clear_wake_irq(host->dev);
err:
dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n");
host->wake_irq = 0;
@ -2007,6 +1986,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk;
}
device_init_wakeup(&pdev->dev, true);
pm_runtime_enable(host->dev);
pm_runtime_get_sync(host->dev);
pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
@ -2147,6 +2127,7 @@ err_slot_name:
if (host->use_reg)
omap_hsmmc_reg_put(host);
err_irq:
device_init_wakeup(&pdev->dev, false);
if (host->tx_chan)
dma_release_channel(host->tx_chan);
if (host->rx_chan)
@ -2178,6 +2159,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@ -2204,11 +2186,6 @@ static int omap_hsmmc_suspend(struct device *dev)
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
}
/* do not wake up due to sdio irq */
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
!(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
disable_irq(host->wake_irq);
if (host->dbclk)
clk_disable_unprepare(host->dbclk);
@ -2233,11 +2210,6 @@ static int omap_hsmmc_resume(struct device *dev)
omap_hsmmc_conf_bus_power(host);
omap_hsmmc_protect_card(host);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
!(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ))
enable_irq(host->wake_irq);
pm_runtime_mark_last_busy(host->dev);
pm_runtime_put_autosuspend(host->dev);
return 0;
@ -2277,10 +2249,6 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
}
pinctrl_pm_select_idle_state(dev);
WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED);
enable_irq(host->wake_irq);
host->flags |= HSMMC_WAKE_IRQ_ENABLED;
} else {
pinctrl_pm_select_idle_state(dev);
}
@ -2302,11 +2270,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_lock_irqsave(&host->irq_lock, flags);
if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) &&
(host->flags & HSMMC_SDIO_IRQ_ENABLED)) {
/* sdio irq flag can't change while in runtime suspend */
if (host->flags & HSMMC_WAKE_IRQ_ENABLED) {
disable_irq_nosync(host->wake_irq);
host->flags &= ~HSMMC_WAKE_IRQ_ENABLED;
}
pinctrl_pm_select_default_state(host->dev);

View File

@ -966,9 +966,7 @@ static void rs_throttle(struct tty_struct * tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("throttle %s: %d....\n", tty_name(tty, buf),
printk("throttle %s: %d....\n", tty_name(tty),
tty->ldisc.chars_in_buffer(tty));
#endif
@ -991,9 +989,7 @@ static void rs_unthrottle(struct tty_struct * tty)
struct serial_state *info = tty->driver_data;
unsigned long flags;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("unthrottle %s: %d....\n", tty_name(tty, buf),
printk("unthrottle %s: %d....\n", tty_name(tty),
tty->ldisc.chars_in_buffer(tty));
#endif
@ -1786,7 +1782,8 @@ static int __exit amiga_serial_remove(struct platform_device *pdev)
struct serial_state *state = platform_get_drvdata(pdev);
/* printk("Unloading %s: version %s\n", serial_name, serial_version); */
if ((error = tty_unregister_driver(serial_driver)))
error = tty_unregister_driver(serial_driver);
if (error)
printk("SERIAL: failed to unregister serial driver (%d)\n",
error);
put_tty_driver(serial_driver);

View File

@ -2861,9 +2861,7 @@ static void cy_throttle(struct tty_struct *tty)
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty, buf),
printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty),
tty->ldisc.chars_in_buffer(tty), info->line);
#endif
@ -2902,10 +2900,8 @@ static void cy_unthrottle(struct tty_struct *tty)
unsigned long flags;
#ifdef CY_DEBUG_THROTTLE
char buf[64];
printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
tty_name(tty), tty_chars_in_buffer(tty), info->line);
#endif
if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))

View File

@ -42,13 +42,6 @@ config HVC_RTAS
help
IBM Console device driver which makes use of RTAS
config HVC_BEAT
bool "Toshiba's Beat Hypervisor Console support"
depends on PPC_CELLEB
select HVC_DRIVER
help
Toshiba's Cell Reference Set Beat Console device driver
config HVC_IUCV
bool "z/VM IUCV Hypervisor console support (VM only)"
depends on S390

View File

@ -4,7 +4,6 @@ obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
obj-$(CONFIG_HVC_TILE) += hvc_tile.o
obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
obj-$(CONFIG_HVC_BEAT) += hvc_beat.o
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
obj-$(CONFIG_HVC_XEN) += hvc_xen.o

View File

@ -1,134 +0,0 @@
/*
* Beat hypervisor console driver
*
* (C) Copyright 2006 TOSHIBA CORPORATION
*
* This code is based on drivers/char/hvc_rtas.c:
* (C) Copyright IBM Corporation 2001-2005
* (C) Copyright Red Hat, Inc. 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/console.h>
#include <asm/prom.h>
#include <asm/hvconsole.h>
#include <asm/firmware.h>
#include "hvc_console.h"
extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
struct hvc_struct *hvc_beat_dev = NULL;
/* bug: only one queue is available regardless of vtermno */
static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
{
static unsigned char q[sizeof(unsigned long) * 2]
__attribute__((aligned(sizeof(unsigned long))));
static int qlen = 0;
u64 got;
again:
if (qlen) {
if (qlen > cnt) {
memcpy(buf, q, cnt);
qlen -= cnt;
memmove(q + cnt, q, qlen);
return cnt;
} else { /* qlen <= cnt */
int r;
memcpy(buf, q, qlen);
r = qlen;
qlen = 0;
return r;
}
}
if (beat_get_term_char(vtermno, &got,
((u64 *)q), ((u64 *)q) + 1) == 0) {
qlen = got;
goto again;
}
return 0;
}
static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
{
unsigned long kb[2];
int rest, nlen;
for (rest = cnt; rest > 0; rest -= nlen) {
nlen = (rest > 16) ? 16 : rest;
memcpy(kb, buf, nlen);
beat_put_term_char(vtermno, nlen, kb[0], kb[1]);
buf += nlen;
}
return cnt;
}
static const struct hv_ops hvc_beat_get_put_ops = {
.get_chars = hvc_beat_get_chars,
.put_chars = hvc_beat_put_chars,
};
static int hvc_beat_useit = 1;
static int hvc_beat_config(char *p)
{
hvc_beat_useit = simple_strtoul(p, NULL, 0);
return 0;
}
static int __init hvc_beat_console_init(void)
{
if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
}
return 0;
}
/* temp */
static int __init hvc_beat_init(void)
{
struct hvc_struct *hp;
if (!firmware_has_feature(FW_FEATURE_BEAT))
return -ENODEV;
hp = hvc_alloc(0, 0, &hvc_beat_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_beat_dev = hp;
return 0;
}
static void __exit hvc_beat_exit(void)
{
if (hvc_beat_dev)
hvc_remove(hvc_beat_dev);
}
module_init(hvc_beat_init);
module_exit(hvc_beat_exit);
__setup("hvc_beat=", hvc_beat_config);
console_initcall(hvc_beat_console_init);

View File

@ -319,7 +319,8 @@ static int hvc_install(struct tty_driver *driver, struct tty_struct *tty)
int rc;
/* Auto increments kref reference if found. */
if (!(hp = hvc_get_by_index(tty->index)))
hp = hvc_get_by_index(tty->index);
if (!hp)
return -ENODEV;
tty->driver_data = hp;

View File

@ -1044,8 +1044,8 @@ static int hvcs_enable_device(struct hvcs_struct *hvcsd, uint32_t unit_address,
* It is possible that the vty-server was removed between the time that
* the conn was registered and now.
*/
if (!(rc = request_irq(irq, &hvcs_handle_interrupt,
0, "ibmhvcs", hvcsd))) {
rc = request_irq(irq, &hvcs_handle_interrupt, 0, "ibmhvcs", hvcsd);
if (!rc) {
/*
* It is possible the vty-server was removed after the irq was
* requested but before we have time to enable interrupts.

View File

@ -161,7 +161,7 @@ struct gsm_dlci {
struct net_device *net; /* network interface, if created */
};
/* DLCI 0, 62/63 are special or reseved see gsmtty_open */
/* DLCI 0, 62/63 are special or reserved see gsmtty_open */
#define NUM_DLCI 64
@ -2274,7 +2274,6 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
const unsigned char *dp;
char *f;
int i;
char buf[64];
char flags = TTY_NORMAL;
if (debug & 4)
@ -2296,7 +2295,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
break;
default:
WARN_ONCE(1, "%s: unknown flag %d\n",
tty_name(tty, buf), flags);
tty_name(tty), flags);
break;
}
}

View File

@ -1190,13 +1190,12 @@ static void n_tty_receive_break(struct tty_struct *tty)
static void n_tty_receive_overrun(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
char buf[64];
ldata->num_overrun++;
if (time_after(jiffies, ldata->overrun_time + HZ) ||
time_after(ldata->overrun_time, jiffies)) {
printk(KERN_WARNING "%s: %d input overrun(s)\n",
tty_name(tty, buf),
tty_name(tty),
ldata->num_overrun);
ldata->overrun_time = jiffies;
ldata->num_overrun = 0;
@ -1471,8 +1470,6 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
static void
n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag)
{
char buf[64];
switch (flag) {
case TTY_BREAK:
n_tty_receive_break(tty);
@ -1486,7 +1483,7 @@ n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag)
break;
default:
printk(KERN_ERR "%s: unknown flag %d\n",
tty_name(tty, buf), flag);
tty_name(tty), flag);
break;
}
}

View File

@ -140,7 +140,7 @@ static int debug;
#define R_FCR 0x0000 /* Flow Control Register */
#define R_IER 0x0004 /* Interrupt Enable Register */
#define CONFIG_MAGIC 0xEFEFFEFE
#define NOZOMI_CONFIG_MAGIC 0xEFEFFEFE
#define TOGGLE_VALID 0x0000
/* Definition of interrupt tokens */
@ -660,9 +660,9 @@ static int nozomi_read_config_table(struct nozomi *dc)
read_mem32((u32 *) &dc->config_table, dc->base_addr + 0,
sizeof(struct config_table));
if (dc->config_table.signature != CONFIG_MAGIC) {
if (dc->config_table.signature != NOZOMI_CONFIG_MAGIC) {
dev_err(&dc->pdev->dev, "ConfigTable Bad! 0x%08X != 0x%08X\n",
dc->config_table.signature, CONFIG_MAGIC);
dc->config_table.signature, NOZOMI_CONFIG_MAGIC);
return 0;
}

View File

@ -44,7 +44,7 @@ struct rocket_version {
#define ROCKET_HUP_NOTIFY 0x00000004
#define ROCKET_SPLIT_TERMIOS 0x00000008
#define ROCKET_SPD_MASK 0x00000070
#define ROCKET_SPD_HI 0x00000010 /* Use 56000 instead of 38400 bps */
#define ROCKET_SPD_HI 0x00000010 /* Use 57600 instead of 38400 bps */
#define ROCKET_SPD_VHI 0x00000020 /* Use 115200 instead of 38400 bps */
#define ROCKET_SPD_SHI 0x00000030 /* Use 230400 instead of 38400 bps */
#define ROCKET_SPD_WARP 0x00000040 /* Use 460800 instead of 38400 bps */

View File

@ -508,7 +508,8 @@ static void change_speed(struct m68k_serial *info, struct tty_struct *tty)
int i;
cflag = tty->termios.c_cflag;
if (!(port = info->port))
port = info->port;
if (!port)
return;
ustcnt = uart->ustcnt;

View File

@ -85,19 +85,6 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define CONFIG_SERIAL_DETECT_IRQ 1
#endif
#ifdef CONFIG_SERIAL_8250_MANY_PORTS
#define CONFIG_SERIAL_MANY_PORTS 1
#endif
/*
* HUB6 is always on. This will be removed once the header
* files have been cleaned.
*/
#define CONFIG_HUB6 1
#include <asm/serial.h>
/*
* SERIAL_PORT_DFNS tells us about built-in ports that have no
@ -2019,8 +2006,9 @@ EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
static void serial8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
if (port->set_mctrl)
return port->set_mctrl(port, mctrl);
return serial8250_do_set_mctrl(port, mctrl);
port->set_mctrl(port, mctrl);
else
serial8250_do_set_mctrl(port, mctrl);
}
static void serial8250_break_ctl(struct uart_port *port, int break_state)
@ -3548,6 +3536,9 @@ static struct console univ8250_console = {
static int __init univ8250_console_init(void)
{
if (nr_uarts == 0)
return -ENODEV;
serial8250_isa_init_ports();
register_console(&univ8250_console);
return 0;
@ -3578,7 +3569,7 @@ int __init early_serial_setup(struct uart_port *port)
{
struct uart_port *p;
if (port->line >= ARRAY_SIZE(serial8250_ports))
if (port->line >= ARRAY_SIZE(serial8250_ports) || nr_uarts == 0)
return -ENODEV;
serial8250_isa_init_ports();
@ -3850,7 +3841,6 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
uart->port.mapbase = up->port.mapbase;
uart->port.mapsize = up->port.mapsize;
uart->port.private_data = up->port.private_data;
uart->port.fifosize = up->port.fifosize;
uart->tx_loadsz = up->tx_loadsz;
uart->capabilities = up->capabilities;
uart->port.throttle = up->port.throttle;
@ -3945,6 +3935,9 @@ static int __init serial8250_init(void)
{
int ret;
if (nr_uarts == 0)
return -ENODEV;
serial8250_isa_init_ports();
printk(KERN_INFO "Serial: 8250/16550 driver, "

View File

@ -377,6 +377,16 @@ static int dw8250_probe_of(struct uart_port *p,
return 0;
}
static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{
struct device *dev = param;
if (dev != chan->device->dev->parent)
return false;
return true;
}
static int dw8250_probe_acpi(struct uart_8250_port *up,
struct dw8250_data *data)
{
@ -389,8 +399,15 @@ static int dw8250_probe_acpi(struct uart_8250_port *up,
p->serial_out = dw8250_serial_out32;
p->regshift = 2;
up->dma = &data->dma;
/* Platforms with iDMA */
if (platform_get_resource_byname(to_platform_device(up->port.dev),
IORESOURCE_MEM, "lpss_priv")) {
data->dma.rx_param = up->port.dev->parent;
data->dma.tx_param = up->port.dev->parent;
data->dma.fn = dw8250_idma_filter;
}
up->dma = &data->dma;
up->dma->rxconf.src_maxburst = p->fifosize / 4;
up->dma->txconf.dst_maxburst = p->fifosize / 4;

View File

@ -131,7 +131,7 @@ static void __init init_port(struct earlycon_device *device)
serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
}
static int __init early_serial8250_setup(struct earlycon_device *device,
int __init early_serial8250_setup(struct earlycon_device *device,
const char *options)
{
if (!(device->port.membase || device->port.iobase))

View File

@ -0,0 +1,230 @@
/*
* Serial port driver for NXP LPC18xx/43xx UART
*
* Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
*
* Based on 8250_mtk.c:
* Copyright (c) 2014 MundoReader S.L.
* Matthias Brugger <matthias.bgg@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "8250.h"
/* Additional LPC18xx/43xx 8250 registers and bits */
#define LPC18XX_UART_RS485CTRL (0x04c / sizeof(u32))
#define LPC18XX_UART_RS485CTRL_NMMEN BIT(0)
#define LPC18XX_UART_RS485CTRL_DCTRL BIT(4)
#define LPC18XX_UART_RS485CTRL_OINV BIT(5)
#define LPC18XX_UART_RS485DLY (0x054 / sizeof(u32))
#define LPC18XX_UART_RS485DLY_MAX 255
struct lpc18xx_uart_data {
struct uart_8250_dma dma;
struct clk *clk_uart;
struct clk *clk_reg;
int line;
};
static int lpc18xx_rs485_config(struct uart_port *port,
struct serial_rs485 *rs485)
{
struct uart_8250_port *up = up_to_u8250p(port);
u32 rs485_ctrl_reg = 0;
u32 rs485_dly_reg = 0;
unsigned baud_clk;
if (rs485->flags & SER_RS485_ENABLED)
memset(rs485->padding, 0, sizeof(rs485->padding));
else
memset(rs485, 0, sizeof(*rs485));
rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
SER_RS485_RTS_AFTER_SEND;
if (rs485->flags & SER_RS485_ENABLED) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN |
LPC18XX_UART_RS485CTRL_DCTRL;
if (rs485->flags & SER_RS485_RTS_ON_SEND) {
rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV;
rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
} else {
rs485->flags |= SER_RS485_RTS_AFTER_SEND;
}
}
if (rs485->delay_rts_after_send) {
baud_clk = port->uartclk / up->dl_read(up);
rs485_dly_reg = DIV_ROUND_UP(rs485->delay_rts_after_send
* baud_clk, MSEC_PER_SEC);
if (rs485_dly_reg > LPC18XX_UART_RS485DLY_MAX)
rs485_dly_reg = LPC18XX_UART_RS485DLY_MAX;
/* Calculate the resulting delay in ms */
rs485->delay_rts_after_send = (rs485_dly_reg * MSEC_PER_SEC)
/ baud_clk;
}
/* Delay RTS before send not supported */
rs485->delay_rts_before_send = 0;
serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg);
serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg);
port->rs485 = *rs485;
return 0;
}
static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value)
{
/*
* For DMA mode one must ensure that the UART_FCR_DMA_SELECT
* bit is set when FIFO is enabled. Even if DMA is not used
* setting this bit doesn't seem to affect anything.
*/
if (offset == UART_FCR && (value & UART_FCR_ENABLE_FIFO))
value |= UART_FCR_DMA_SELECT;
offset = offset << p->regshift;
writel(value, p->membase + offset);
}
static int lpc18xx_serial_probe(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data;
struct uart_8250_port uart;
struct resource *res;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "irq not found");
return irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "memory resource not found");
return -EINVAL;
}
memset(&uart, 0, sizeof(uart));
uart.port.membase = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!uart.port.membase)
return -ENOMEM;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->clk_uart = devm_clk_get(&pdev->dev, "uartclk");
if (IS_ERR(data->clk_uart)) {
dev_err(&pdev->dev, "uart clock not found\n");
return PTR_ERR(data->clk_uart);
}
data->clk_reg = devm_clk_get(&pdev->dev, "reg");
if (IS_ERR(data->clk_reg)) {
dev_err(&pdev->dev, "reg clock not found\n");
return PTR_ERR(data->clk_reg);
}
ret = clk_prepare_enable(data->clk_reg);
if (ret) {
dev_err(&pdev->dev, "unable to enable reg clock\n");
return ret;
}
ret = clk_prepare_enable(data->clk_uart);
if (ret) {
dev_err(&pdev->dev, "unable to enable uart clock\n");
goto dis_clk_reg;
}
ret = of_alias_get_id(pdev->dev.of_node, "serial");
if (ret >= 0)
uart.port.line = ret;
data->dma.rx_param = data;
data->dma.tx_param = data;
spin_lock_init(&uart.port.lock);
uart.port.dev = &pdev->dev;
uart.port.irq = irq;
uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = res->start;
uart.port.regshift = 2;
uart.port.type = PORT_16550A;
uart.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST;
uart.port.uartclk = clk_get_rate(data->clk_uart);
uart.port.private_data = data;
uart.port.rs485_config = lpc18xx_rs485_config;
uart.port.serial_out = lpc18xx_uart_serial_out;
uart.dma = &data->dma;
uart.dma->rxconf.src_maxburst = 1;
uart.dma->txconf.dst_maxburst = 1;
ret = serial8250_register_8250_port(&uart);
if (ret < 0) {
dev_err(&pdev->dev, "unable to register 8250 port\n");
goto dis_uart_clk;
}
data->line = ret;
platform_set_drvdata(pdev, data);
return 0;
dis_uart_clk:
clk_disable_unprepare(data->clk_uart);
dis_clk_reg:
clk_disable_unprepare(data->clk_reg);
return ret;
}
static int lpc18xx_serial_remove(struct platform_device *pdev)
{
struct lpc18xx_uart_data *data = platform_get_drvdata(pdev);
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk_uart);
clk_disable_unprepare(data->clk_reg);
return 0;
}
static const struct of_device_id lpc18xx_serial_match[] = {
{ .compatible = "nxp,lpc1850-uart" },
{ },
};
MODULE_DEVICE_TABLE(of, lpc18xx_serial_match);
static struct platform_driver lpc18xx_serial_driver = {
.probe = lpc18xx_serial_probe,
.remove = lpc18xx_serial_remove,
.driver = {
.name = "lpc18xx-uart",
.of_match_table = lpc18xx_serial_match,
},
};
module_platform_driver(lpc18xx_serial_driver);
MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
MODULE_DESCRIPTION("Serial port driver NXP LPC18xx/43xx devices");
MODULE_LICENSE("GPL v2");

View File

@ -34,6 +34,7 @@
struct mtk8250_data {
int line;
struct clk *uart_clk;
struct clk *bus_clk;
};
static void
@ -115,6 +116,36 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
tty_termios_encode_baud_rate(termios, baud, baud);
}
static int mtk8250_runtime_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
clk_disable_unprepare(data->uart_clk);
clk_disable_unprepare(data->bus_clk);
return 0;
}
static int mtk8250_runtime_resume(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
int err;
err = clk_prepare_enable(data->uart_clk);
if (err) {
dev_warn(dev, "Can't enable clock\n");
return err;
}
err = clk_prepare_enable(data->bus_clk);
if (err) {
dev_warn(dev, "Can't enable bus clock\n");
return err;
}
return 0;
}
static void
mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
{
@ -130,22 +161,24 @@ mtk8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old)
static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p,
struct mtk8250_data *data)
{
int err;
struct device_node *np = pdev->dev.of_node;
data->uart_clk = of_clk_get(np, 0);
data->uart_clk = devm_clk_get(&pdev->dev, "baud");
if (IS_ERR(data->uart_clk)) {
dev_warn(&pdev->dev, "Can't get timer clock\n");
/*
* For compatibility with older device trees try unnamed
* clk when no baud clk can be found.
*/
data->uart_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->uart_clk)) {
dev_warn(&pdev->dev, "Can't get uart clock\n");
return PTR_ERR(data->uart_clk);
}
err = clk_prepare_enable(data->uart_clk);
if (err) {
dev_warn(&pdev->dev, "Can't prepare clock\n");
clk_put(data->uart_clk);
return err;
return 0;
}
p->uartclk = clk_get_rate(data->uart_clk);
data->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(data->bus_clk))
return PTR_ERR(data->bus_clk);
return 0;
}
@ -190,20 +223,25 @@ static int mtk8250_probe(struct platform_device *pdev)
uart.port.regshift = 2;
uart.port.private_data = data;
uart.port.set_termios = mtk8250_set_termios;
uart.port.uartclk = clk_get_rate(data->uart_clk);
/* Disable Rate Fix function */
writel(0x0, uart.port.membase +
(MTK_UART_RATE_FIX << uart.port.regshift));
platform_set_drvdata(pdev, data);
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
err = mtk8250_runtime_resume(&pdev->dev);
if (err)
return err;
}
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
return data->line;
platform_set_drvdata(pdev, data);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
return 0;
}
@ -214,13 +252,13 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
serial8250_unregister_port(data->line);
if (!IS_ERR(data->uart_clk)) {
clk_disable_unprepare(data->uart_clk);
clk_put(data->uart_clk);
}
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mtk8250_runtime_suspend(&pdev->dev);
return 0;
}
@ -244,28 +282,6 @@ static int mtk8250_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mtk8250_runtime_suspend(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
if (!IS_ERR(data->uart_clk))
clk_disable_unprepare(data->uart_clk);
return 0;
}
static int mtk8250_runtime_resume(struct device *dev)
{
struct mtk8250_data *data = dev_get_drvdata(dev);
if (!IS_ERR(data->uart_clk))
clk_prepare_enable(data->uart_clk);
return 0;
}
#endif
static const struct dev_pm_ops mtk8250_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(mtk8250_suspend, mtk8250_resume)
SET_RUNTIME_PM_OPS(mtk8250_runtime_suspend, mtk8250_runtime_resume,
@ -289,6 +305,21 @@ static struct platform_driver mtk8250_platform_driver = {
};
module_platform_driver(mtk8250_platform_driver);
#ifdef CONFIG_SERIAL_8250_CONSOLE
static int __init early_mtk8250_setup(struct earlycon_device *device,
const char *options)
{
if (!device->port.membase)
return -ENODEV;
device->port.iotype = UPIO_MEM32;
return early_serial8250_setup(device, NULL);
}
OF_EARLYCON_DECLARE(mtk8250, "mediatek,mt6577-uart", early_mtk8250_setup);
#endif
MODULE_AUTHOR("Matthias Brugger");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Mediatek 8250 serial port driver");

View File

@ -22,6 +22,7 @@
#include <linux/pm_runtime.h>
#include <linux/console.h>
#include <linux/pm_qos.h>
#include <linux/pm_wakeirq.h>
#include <linux/dma-mapping.h>
#include "8250.h"
@ -98,6 +99,7 @@ struct omap8250_priv {
struct pm_qos_request pm_qos_request;
struct work_struct qos_work;
struct uart_8250_dma omap8250_dma;
spinlock_t rx_dma_lock;
};
static u32 uart_read(struct uart_8250_port *up, u32 reg)
@ -551,17 +553,6 @@ static void omap8250_uart_qos_work(struct work_struct *work)
pm_qos_update_request(&priv->pm_qos_request, priv->latency);
}
static irqreturn_t omap_wake_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
int ret;
ret = port->handle_irq(port);
if (ret)
return IRQ_HANDLED;
return IRQ_NONE;
}
#ifdef CONFIG_SERIAL_8250_DMA
static int omap_8250_dma_handle_irq(struct uart_port *port);
#endif
@ -595,11 +586,9 @@ static int omap_8250_startup(struct uart_port *port)
int ret;
if (priv->wakeirq) {
ret = request_irq(priv->wakeirq, omap_wake_irq,
port->irqflags, "uart wakeup irq", port);
ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq);
if (ret)
return ret;
disable_irq(priv->wakeirq);
}
pm_runtime_get_sync(port->dev);
@ -648,8 +637,7 @@ static int omap_8250_startup(struct uart_port *port)
err:
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
if (priv->wakeirq)
free_irq(priv->wakeirq, port);
dev_pm_clear_wake_irq(port->dev);
return ret;
}
@ -681,10 +669,8 @@ static void omap_8250_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(port->dev);
pm_runtime_put_autosuspend(port->dev);
free_irq(port->irq, port);
if (priv->wakeirq)
free_irq(priv->wakeirq, port);
dev_pm_clear_wake_irq(port->dev);
}
static void omap_8250_throttle(struct uart_port *port)
@ -726,14 +712,21 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir);
static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
{
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
struct dma_tx_state state;
int count;
unsigned long flags;
dma_sync_single_for_cpu(dma->rxchan->device->dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
spin_lock_irqsave(&priv->rx_dma_lock, flags);
if (!dma->rx_running)
goto unlock;
dma->rx_running = 0;
dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
dmaengine_terminate_all(dma->rxchan);
@ -742,6 +735,9 @@ static void __dma_rx_do_complete(struct uart_8250_port *p, bool error)
tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += count;
unlock:
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
if (!error)
omap_8250_rx_dma(p, 0);
@ -753,28 +749,45 @@ static void __dma_rx_complete(void *param)
__dma_rx_do_complete(param, false);
}
static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
{
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
spin_lock_irqsave(&priv->rx_dma_lock, flags);
if (!dma->rx_running) {
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
return;
}
dmaengine_pause(dma->rxchan);
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
__dma_rx_do_complete(p, true);
}
static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
{
struct omap8250_priv *priv = p->port.private_data;
struct uart_8250_dma *dma = p->dma;
int err = 0;
struct dma_async_tx_descriptor *desc;
unsigned long flags;
switch (iir & 0x3f) {
case UART_IIR_RLSI:
/* 8250_core handles errors and break interrupts */
if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_do_complete(p, true);
}
omap_8250_rx_dma_flush(p);
return -EIO;
case UART_IIR_RX_TIMEOUT:
/*
* If RCVR FIFO trigger level was not reached, complete the
* transfer and let 8250_core copy the remaining data.
*/
if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_do_complete(p, true);
}
omap_8250_rx_dma_flush(p);
return -ETIMEDOUT;
case UART_IIR_RDI:
/*
@ -786,24 +799,25 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
* the DMA won't do anything soon so we have to cancel the DMA
* transfer and purge the FIFO manually.
*/
if (dma->rx_running) {
dmaengine_pause(dma->rxchan);
__dma_rx_do_complete(p, true);
}
omap_8250_rx_dma_flush(p);
return -ETIMEDOUT;
default:
break;
}
spin_lock_irqsave(&priv->rx_dma_lock, flags);
if (dma->rx_running)
return 0;
goto out;
desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
dma->rx_size, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
return -EBUSY;
if (!desc) {
err = -EBUSY;
goto out;
}
dma->rx_running = 1;
desc->callback = __dma_rx_complete;
@ -815,7 +829,9 @@ static int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
dma->rx_size, DMA_FROM_DEVICE);
dma_async_issue_pending(dma->rxchan);
return 0;
out:
spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
return err;
}
static int omap_8250_tx_dma(struct uart_8250_port *p);
@ -1129,6 +1145,8 @@ static int omap8250_probe(struct platform_device *pdev)
priv->latency);
INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
spin_lock_init(&priv->rx_dma_lock);
device_init_wakeup(&pdev->dev, true);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
@ -1193,31 +1211,6 @@ static int omap8250_remove(struct platform_device *pdev)
return 0;
}
#ifdef CONFIG_PM
static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
bool enable)
{
if (!priv->wakeirq)
return;
if (enable)
enable_irq(priv->wakeirq);
else
disable_irq_nosync(priv->wakeirq);
}
static void omap8250_enable_wakeup(struct omap8250_priv *priv,
bool enable)
{
if (enable == priv->wakeups_enabled)
return;
omap8250_enable_wakeirq(priv, enable);
priv->wakeups_enabled = enable;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int omap8250_prepare(struct device *dev)
{
@ -1244,11 +1237,6 @@ static int omap8250_suspend(struct device *dev)
serial8250_suspend_port(priv->line);
flush_work(&priv->qos_work);
if (device_may_wakeup(dev))
omap8250_enable_wakeup(priv, true);
else
omap8250_enable_wakeup(priv, false);
return 0;
}
@ -1256,9 +1244,6 @@ static int omap8250_resume(struct device *dev)
{
struct omap8250_priv *priv = dev_get_drvdata(dev);
if (device_may_wakeup(dev))
omap8250_enable_wakeup(priv, false);
serial8250_resume_port(priv->line);
return 0;
}
@ -1300,7 +1285,6 @@ static int omap8250_runtime_suspend(struct device *dev)
return -EBUSY;
}
omap8250_enable_wakeup(priv, true);
if (up->dma)
omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT);
@ -1321,7 +1305,6 @@ static int omap8250_runtime_resume(struct device *dev)
return 0;
up = serial8250_get_port(priv->line);
omap8250_enable_wakeup(priv, false);
loss_cntx = omap8250_lost_context(up);
if (loss_cntx)

View File

@ -0,0 +1,257 @@
/*
* Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "8250.h"
/* Most (but not all) of UniPhier UART devices have 64-depth FIFO. */
#define UNIPHIER_UART_DEFAULT_FIFO_SIZE 64
#define UNIPHIER_UART_CHAR_FCR 3 /* Character / FIFO Control Register */
#define UNIPHIER_UART_LCR_MCR 4 /* Line/Modem Control Register */
#define UNIPHIER_UART_LCR_SHIFT 8
#define UNIPHIER_UART_DLR 9 /* Divisor Latch Register */
struct uniphier8250_priv {
int line;
struct clk *clk;
spinlock_t atomic_write_lock;
};
/*
* The register map is slightly different from that of 8250.
* IO callbacks must be overridden for correct access to FCR, LCR, and MCR.
*/
static unsigned int uniphier_serial_in(struct uart_port *p, int offset)
{
unsigned int valshift = 0;
switch (offset) {
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
default:
break;
}
offset <<= p->regshift;
/*
* The return value must be masked with 0xff because LCR and MCR reside
* in the same register that must be accessed by 32-bit write/read.
* 8 or 16 bit access to this hardware result in unexpected behavior.
*/
return (readl(p->membase + offset) >> valshift) & 0xff;
}
static void uniphier_serial_out(struct uart_port *p, int offset, int value)
{
unsigned int valshift = 0;
bool normal = false;
switch (offset) {
case UART_FCR:
offset = UNIPHIER_UART_CHAR_FCR;
break;
case UART_LCR:
valshift = UNIPHIER_UART_LCR_SHIFT;
/* Divisor latch access bit does not exist. */
value &= ~(UART_LCR_DLAB << valshift);
/* fall through */
case UART_MCR:
offset = UNIPHIER_UART_LCR_MCR;
break;
default:
normal = true;
break;
}
offset <<= p->regshift;
if (normal) {
writel(value, p->membase + offset);
} else {
/*
* Special case: two registers share the same address that
* must be 32-bit accessed. As this is not longer atomic safe,
* take a lock just in case.
*/
struct uniphier8250_priv *priv = p->private_data;
unsigned long flags;
u32 tmp;
spin_lock_irqsave(&priv->atomic_write_lock, flags);
tmp = readl(p->membase + offset);
tmp &= ~(0xff << valshift);
tmp |= value << valshift;
writel(tmp, p->membase + offset);
spin_unlock_irqrestore(&priv->atomic_write_lock, flags);
}
}
/*
* This hardware does not have the divisor latch access bit.
* The divisor latch register exists at different address.
* Override dl_read/write callbacks.
*/
static int uniphier_serial_dl_read(struct uart_8250_port *up)
{
return readl(up->port.membase + UNIPHIER_UART_DLR);
}
static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
{
writel(value, up->port.membase + UNIPHIER_UART_DLR);
}
static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
struct uniphier8250_priv *priv)
{
int ret;
u32 prop;
struct device_node *np = dev->of_node;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(dev, "failed to get alias id\n");
return ret;
}
port->line = priv->line = ret;
/* Get clk rate through clk driver */
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get clock\n");
return PTR_ERR(priv->clk);
}
ret = clk_prepare_enable(priv->clk);
if (ret < 0)
return ret;
port->uartclk = clk_get_rate(priv->clk);
/* Check for fifo size */
if (of_property_read_u32(np, "fifo-size", &prop) == 0)
port->fifosize = prop;
else
port->fifosize = UNIPHIER_UART_DEFAULT_FIFO_SIZE;
return 0;
}
static int uniphier_uart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uart_8250_port up;
struct uniphier8250_priv *priv;
struct resource *regs;
void __iomem *membase;
int irq;
int ret;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(dev, "failed to get memory resource");
return -EINVAL;
}
membase = devm_ioremap(dev, regs->start, resource_size(regs));
if (!membase)
return -ENOMEM;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "failed to get IRQ number");
return irq;
}
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
memset(&up, 0, sizeof(up));
ret = uniphier_of_serial_setup(dev, &up.port, priv);
if (ret < 0)
return ret;
spin_lock_init(&priv->atomic_write_lock);
up.port.dev = dev;
up.port.private_data = priv;
up.port.mapbase = regs->start;
up.port.mapsize = resource_size(regs);
up.port.membase = membase;
up.port.irq = irq;
up.port.type = PORT_16550A;
up.port.iotype = UPIO_MEM32;
up.port.regshift = 2;
up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE;
up.capabilities = UART_CAP_FIFO;
up.port.serial_in = uniphier_serial_in;
up.port.serial_out = uniphier_serial_out;
up.dl_read = uniphier_serial_dl_read;
up.dl_write = uniphier_serial_dl_write;
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(dev, "failed to register 8250 port\n");
return ret;
}
platform_set_drvdata(pdev, priv);
return 0;
}
static int uniphier_uart_remove(struct platform_device *pdev)
{
struct uniphier8250_priv *priv = platform_get_drvdata(pdev);
serial8250_unregister_port(priv->line);
clk_disable_unprepare(priv->clk);
return 0;
}
static const struct of_device_id uniphier_uart_match[] = {
{ .compatible = "socionext,uniphier-uart" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_uart_match);
static struct platform_driver uniphier_uart_platform_driver = {
.probe = uniphier_uart_probe,
.remove = uniphier_uart_remove,
.driver = {
.name = "uniphier-uart",
.of_match_table = uniphier_uart_match,
},
};
module_platform_driver(uniphier_uart_platform_driver);
MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
MODULE_DESCRIPTION("UniPhier UART driver");
MODULE_LICENSE("GPL");

View File

@ -336,9 +336,24 @@ config SERIAL_8250_FINTEK
LPC to 4 UART. This device has some RS485 functionality not available
through the PNP driver. If unsure, say N.
config SERIAL_8250_LPC18XX
bool "NXP LPC18xx/43xx serial port support"
depends on SERIAL_8250 && OF && (ARCH_LPC18XX || COMPILE_TEST)
default ARCH_LPC18XX
help
If you have a LPC18xx/43xx based board and want to use the
serial port, say Y to this option. If unsure, say Y.
config SERIAL_8250_MT6577
bool "Mediatek serial port support"
depends on SERIAL_8250 && ARCH_MEDIATEK
help
If you have a Mediatek based board and want to use the
serial port, say Y to this option. If unsure, say N.
config SERIAL_8250_UNIPHIER
tristate "Support for UniPhier on-chip UART"
depends on SERIAL_8250 && ARCH_UNIPHIER
help
If you have a UniPhier based board and want to use the on-chip
serial ports, say Y to this option. If unsure, say N.

View File

@ -22,4 +22,6 @@ obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o
obj-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
obj-$(CONFIG_SERIAL_8250_LPC18XX) += 8250_lpc18xx.o
obj-$(CONFIG_SERIAL_8250_MT6577) += 8250_mtk.o
obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o

View File

@ -241,7 +241,6 @@ config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
depends on PLAT_SAMSUNG || ARCH_EXYNOS
select SERIAL_CORE
select SERIAL_EARLYCON
help
Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
providing /dev/ttySAC0, 1 and 2 (note, some machines may not
@ -277,6 +276,7 @@ config SERIAL_SAMSUNG_CONSOLE
bool "Support for console on Samsung SoC serial port"
depends on SERIAL_SAMSUNG=y
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
Allow selection of the S3C24XX on-board serial ports for use as
an virtual console.
@ -1179,15 +1179,42 @@ config SERIAL_SCCNXP_CONSOLE
help
Support for console on SCCNXP serial ports.
config SERIAL_SC16IS7XX_CORE
tristate
config SERIAL_SC16IS7XX
tristate "SC16IS7xx serial support"
depends on I2C
select SERIAL_CORE
select REGMAP_I2C if I2C
depends on I2C || SPI_MASTER
help
This selects support for SC16IS7xx serial ports.
Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
SC16IS760 and SC16IS762.
SC16IS760 and SC16IS762. Select supported buses using options below.
config SERIAL_SC16IS7XX_I2C
bool "SC16IS7xx for I2C interface"
depends on SERIAL_SC16IS7XX
depends on I2C
select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
select REGMAP_I2C if I2C
default y
help
Enable SC16IS7xx driver on I2C bus,
If required say y, and say n to i2c if not required,
Enabled by default to support oldconfig.
You must select at least one bus for the driver to be built.
config SERIAL_SC16IS7XX_SPI
bool "SC16IS7xx for spi interface"
depends on SERIAL_SC16IS7XX
depends on SPI_MASTER
select SERIAL_SC16IS7XX_CORE if SERIAL_SC16IS7XX
select REGMAP_SPI if SPI_MASTER
help
Enable SC16IS7xx driver on SPI bus,
If required say y, and say n to spi if not required,
This is additional support to exsisting driver.
You must select at least one bus for the driver to be built.
config SERIAL_BFIN_SPORT
tristate "Blackfin SPORT emulate UART"
@ -1349,7 +1376,7 @@ config SERIAL_ALTERA_UART_CONSOLE
config SERIAL_IFX6X60
tristate "SPI protocol driver for Infineon 6x60 modem (EXPERIMENTAL)"
depends on GPIOLIB && SPI
depends on GPIOLIB && SPI && HAS_DMA
help
Support for the IFX6x60 modem devices on Intel MID platforms.
@ -1378,14 +1405,6 @@ config SERIAL_PCH_UART_CONSOLE
(the system console is the device which receives all kernel messages and
warnings and which allows logins in single user mode).
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
default n
depends on MSM_SMD
help
Enables userspace clients to read and write to some streaming SMD
ports via tty device interface for MSM chipset.
config SERIAL_MXS_AUART
depends on ARCH_MXS
tristate "MXS AUART support"

View File

@ -53,7 +53,7 @@ obj-$(CONFIG_SERIAL_SB1250_DUART) += sb1250-duart.o
obj-$(CONFIG_ETRAX_SERIAL) += crisv10.o
obj-$(CONFIG_SERIAL_ETRAXFS) += etraxfs-uart.o
obj-$(CONFIG_SERIAL_SCCNXP) += sccnxp.o
obj-$(CONFIG_SERIAL_SC16IS7XX) += sc16is7xx.o
obj-$(CONFIG_SERIAL_SC16IS7XX_CORE) += sc16is7xx.o
obj-$(CONFIG_SERIAL_JSM) += jsm/
obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
@ -79,7 +79,6 @@ obj-$(CONFIG_SERIAL_ALTERA_JTAGUART) += altera_jtaguart.o
obj-$(CONFIG_SERIAL_VT8500) += vt8500_serial.o
obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o

View File

@ -387,7 +387,7 @@ console_initcall(altera_jtaguart_console_init);
#define ALTERA_JTAGUART_CONSOLE NULL
#endif /* CONFIG_ALTERA_JTAGUART_CONSOLE */
#endif /* CONFIG_SERIAL_ALTERA_JTAGUART_CONSOLE */
static struct uart_driver altera_jtaguart_driver = {
.owner = THIS_MODULE,

View File

@ -493,7 +493,7 @@ console_initcall(altera_uart_console_init);
#define ALTERA_UART_CONSOLE NULL
#endif /* CONFIG_ALTERA_UART_CONSOLE */
#endif /* CONFIG_SERIAL_ALTERA_UART_CONSOLE */
/*
* Define the altera_uart UART driver structure.

View File

@ -58,7 +58,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/sizes.h>
#include <linux/io.h>
#include <linux/workqueue.h>
#include <linux/acpi.h>
#define UART_NR 14
@ -79,6 +79,8 @@ struct vendor_data {
bool oversampling;
bool dma_threshold;
bool cts_event_workaround;
bool always_enabled;
bool fixed_options;
unsigned int (*get_fifosize)(struct amba_device *dev);
};
@ -95,9 +97,19 @@ static struct vendor_data vendor_arm = {
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
.always_enabled = false,
.fixed_options = false,
.get_fifosize = get_fifosize_arm,
};
static struct vendor_data vendor_sbsa = {
.oversampling = false,
.dma_threshold = false,
.cts_event_workaround = false,
.always_enabled = true,
.fixed_options = true,
};
static unsigned int get_fifosize_st(struct amba_device *dev)
{
return 64;
@ -110,6 +122,8 @@ static struct vendor_data vendor_st = {
.oversampling = true,
.dma_threshold = true,
.cts_event_workaround = true,
.always_enabled = false,
.fixed_options = false,
.get_fifosize = get_fifosize_st,
};
@ -157,9 +171,8 @@ struct uart_amba_port {
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
unsigned int old_cr; /* state during shutdown */
struct delayed_work tx_softirq_work;
bool autorts;
unsigned int tx_irq_seen; /* 0=none, 1=1, 2=2 or more */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
@ -1172,15 +1185,14 @@ static void pl011_stop_tx(struct uart_port *port)
pl011_dma_tx_stop(uap);
}
static bool pl011_tx_chars(struct uart_amba_port *uap);
static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
/* Start TX with programmed I/O only (no DMA) */
static void pl011_start_tx_pio(struct uart_amba_port *uap)
{
uap->im |= UART011_TXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
if (!uap->tx_irq_seen)
pl011_tx_chars(uap);
pl011_tx_chars(uap, false);
}
static void pl011_start_tx(struct uart_port *port)
@ -1247,15 +1259,11 @@ __acquires(&uap->port.lock)
spin_lock(&uap->port.lock);
}
/*
* Transmit a character
*
* Returns true if the character was successfully queued to the FIFO.
* Returns false otherwise.
*/
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
bool from_irq)
{
if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
if (unlikely(!from_irq) &&
readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
return false; /* unable to transmit character */
writew(c, uap->port.membase + UART01x_DR);
@ -1264,70 +1272,41 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
return true;
}
static bool pl011_tx_chars(struct uart_amba_port *uap)
static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
{
struct circ_buf *xmit = &uap->port.state->xmit;
int count;
if (unlikely(uap->tx_irq_seen < 2))
/*
* Initial FIFO fill level unknown: we must check TXFF
* after each write, so just try to fill up the FIFO.
*/
count = uap->fifosize;
else /* tx_irq_seen >= 2 */
/*
* FIFO initially at least half-empty, so we can simply
* write half the FIFO without polling TXFF.
* Note: the *first* TX IRQ can still race with
* pl011_start_tx_pio(), which can result in the FIFO
* being fuller than expected in that case.
*/
count = uap->fifosize >> 1;
/*
* If the FIFO is full we're guaranteed a TX IRQ at some later point,
* and can't transmit immediately in any case:
*/
if (unlikely(uap->tx_irq_seen < 2 &&
readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF))
return false;
int count = uap->fifosize >> 1;
if (uap->port.x_char) {
if (!pl011_tx_char(uap, uap->port.x_char))
goto done;
if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
return;
uap->port.x_char = 0;
--count;
}
if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
pl011_stop_tx(&uap->port);
goto done;
return;
}
/* If we are using DMA mode, try to send some characters. */
if (pl011_dma_tx_irq(uap))
goto done;
return;
while (count-- > 0 && pl011_tx_char(uap, xmit->buf[xmit->tail])) {
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
if (uart_circ_empty(xmit))
do {
if (likely(from_irq) && count-- == 0)
break;
}
if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
break;
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
} while (!uart_circ_empty(xmit));
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
if (uart_circ_empty(xmit)) {
if (uart_circ_empty(xmit))
pl011_stop_tx(&uap->port);
goto done;
}
if (unlikely(!uap->tx_irq_seen))
schedule_delayed_work(&uap->tx_softirq_work, uap->port.timeout);
done:
return false;
}
static void pl011_modem_status(struct uart_amba_port *uap)
@ -1354,41 +1333,13 @@ static void pl011_modem_status(struct uart_amba_port *uap)
wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
}
static void pl011_tx_softirq(struct work_struct *work)
static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
{
struct delayed_work *dwork = to_delayed_work(work);
struct uart_amba_port *uap =
container_of(dwork, struct uart_amba_port, tx_softirq_work);
spin_lock(&uap->port.lock);
while (pl011_tx_chars(uap)) ;
spin_unlock(&uap->port.lock);
}
static void pl011_tx_irq_seen(struct uart_amba_port *uap)
{
if (likely(uap->tx_irq_seen > 1))
return;
uap->tx_irq_seen++;
if (uap->tx_irq_seen < 2)
/* first TX IRQ */
cancel_delayed_work(&uap->tx_softirq_work);
}
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
unsigned int dummy_read;
spin_lock_irqsave(&uap->port.lock, flags);
status = readw(uap->port.membase + UART011_MIS);
if (status) {
do {
if (uap->vendor->cts_event_workaround) {
if (!uap->vendor->cts_event_workaround)
return;
/* workaround to make sure that all bits are unlocked.. */
writew(0x00, uap->port.membase + UART011_ICR);
@ -1399,7 +1350,22 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
*/
dummy_read = readw(uap->port.membase + UART011_ICR);
dummy_read = readw(uap->port.membase + UART011_ICR);
}
}
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
u16 imsc;
int handled = 0;
spin_lock_irqsave(&uap->port.lock, flags);
imsc = readw(uap->port.membase + UART011_IMSC);
status = readw(uap->port.membase + UART011_RIS) & imsc;
if (status) {
do {
check_apply_cts_event_workaround(uap);
writew(status & ~(UART011_TXIS|UART011_RTIS|
UART011_RXIS),
@ -1414,15 +1380,13 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (status & (UART011_DSRMIS|UART011_DCDMIS|
UART011_CTSMIS|UART011_RIMIS))
pl011_modem_status(uap);
if (status & UART011_TXIS) {
pl011_tx_irq_seen(uap);
pl011_tx_chars(uap);
}
if (status & UART011_TXIS)
pl011_tx_chars(uap, true);
if (pass_counter-- == 0)
break;
status = readw(uap->port.membase + UART011_MIS);
status = readw(uap->port.membase + UART011_RIS) & imsc;
} while (status != 0);
handled = 1;
}
@ -1617,6 +1581,32 @@ static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
}
}
static int pl011_allocate_irq(struct uart_amba_port *uap)
{
writew(uap->im, uap->port.membase + UART011_IMSC);
return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
}
/*
* Enable interrupts, only timeouts when using DMA
* if initial RX DMA job failed, start in interrupt mode
* as well.
*/
static void pl011_enable_interrupts(struct uart_amba_port *uap)
{
spin_lock_irq(&uap->port.lock);
/* Clear out any spuriously appearing RX interrupts */
writew(UART011_RTIS | UART011_RXIS,
uap->port.membase + UART011_ICR);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
}
static int pl011_startup(struct uart_port *port)
{
struct uart_amba_port *uap =
@ -1628,20 +1618,12 @@ static int pl011_startup(struct uart_port *port)
if (retval)
goto clk_dis;
writew(uap->im, uap->port.membase + UART011_IMSC);
/*
* Allocate the IRQ
*/
retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
retval = pl011_allocate_irq(uap);
if (retval)
goto clk_dis;
writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
/* Assume that TX IRQ doesn't work until we see one: */
uap->tx_irq_seen = 0;
spin_lock_irq(&uap->port.lock);
/* restore RTS and DTR */
@ -1659,20 +1641,7 @@ static int pl011_startup(struct uart_port *port)
/* Startup DMA */
pl011_dma_startup(uap);
/*
* Finally, enable interrupts, only timeouts when using DMA
* if initial RX DMA job failed, start in interrupt mode
* as well.
*/
spin_lock_irq(&uap->port.lock);
/* Clear out any spuriously appearing RX interrupts */
writew(UART011_RTIS | UART011_RXIS,
uap->port.membase + UART011_ICR);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
pl011_enable_interrupts(uap);
return 0;
@ -1681,6 +1650,28 @@ static int pl011_startup(struct uart_port *port)
return retval;
}
static int sbsa_uart_startup(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
int retval;
retval = pl011_hwinit(port);
if (retval)
return retval;
retval = pl011_allocate_irq(uap);
if (retval)
return retval;
/* The SBSA UART does not support any modem status lines. */
uap->old_status = 0;
pl011_enable_interrupts(uap);
return 0;
}
static void pl011_shutdown_channel(struct uart_amba_port *uap,
unsigned int lcrh)
{
@ -1691,36 +1682,15 @@ static void pl011_shutdown_channel(struct uart_amba_port *uap,
writew(val, uap->port.membase + lcrh);
}
static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned int cr;
cancel_delayed_work_sync(&uap->tx_softirq_work);
/*
* disable all interrupts
*/
spin_lock_irq(&uap->port.lock);
uap->im = 0;
writew(uap->im, uap->port.membase + UART011_IMSC);
writew(0xffff, uap->port.membase + UART011_ICR);
spin_unlock_irq(&uap->port.lock);
pl011_dma_shutdown(uap);
/*
* Free the interrupt
*/
free_irq(uap->port.irq, uap);
/*
* disable the port
/*
* disable the port. It should not disable RTS and DTR.
* Also RTS and DTR state should be preserved to restore
* it during startup().
*/
static void pl011_disable_uart(struct uart_amba_port *uap)
{
unsigned int cr;
uap->autorts = false;
spin_lock_irq(&uap->port.lock);
cr = readw(uap->port.membase + UART011_CR);
@ -1736,6 +1706,32 @@ static void pl011_shutdown(struct uart_port *port)
pl011_shutdown_channel(uap, uap->lcrh_rx);
if (uap->lcrh_rx != uap->lcrh_tx)
pl011_shutdown_channel(uap, uap->lcrh_tx);
}
static void pl011_disable_interrupts(struct uart_amba_port *uap)
{
spin_lock_irq(&uap->port.lock);
/* mask all interrupts and clear all pending ones */
uap->im = 0;
writew(uap->im, uap->port.membase + UART011_IMSC);
writew(0xffff, uap->port.membase + UART011_ICR);
spin_unlock_irq(&uap->port.lock);
}
static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
pl011_disable_interrupts(uap);
pl011_dma_shutdown(uap);
free_irq(uap->port.irq, uap);
pl011_disable_uart(uap);
/*
* Shut down the clock producer
@ -1756,6 +1752,51 @@ static void pl011_shutdown(struct uart_port *port)
uap->port.ops->flush_buffer(port);
}
static void sbsa_uart_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
pl011_disable_interrupts(uap);
free_irq(uap->port.irq, uap);
if (uap->port.ops->flush_buffer)
uap->port.ops->flush_buffer(port);
}
static void
pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
{
port->read_status_mask = UART011_DR_OE | 255;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART011_DR_BE;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= UART011_DR_BE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART011_DR_OE;
}
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_DUMMY_DR_RX;
}
static void
pl011_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@ -1820,33 +1861,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
*/
uart_update_timeout(port, termios->c_cflag, baud);
port->read_status_mask = UART011_DR_OE | 255;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART011_DR_BE;
/*
* Characters to ignore
*/
port->ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= UART011_DR_BE;
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= UART011_DR_OE;
}
/*
* Ignore all characters if CREAD is not set.
*/
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= UART_DUMMY_DR_RX;
pl011_setup_status_masks(port, termios);
if (UART_ENABLE_MS(port, termios->c_cflag))
pl011_enable_ms(port);
@ -1901,6 +1916,27 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
spin_unlock_irqrestore(&port->lock, flags);
}
static void
sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct uart_amba_port *uap =
container_of(port, struct uart_amba_port, port);
unsigned long flags;
tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
/* The SBSA UART only supports 8n1 without hardware flow control. */
termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
termios->c_cflag &= ~(CMSPAR | CRTSCTS);
termios->c_cflag |= CS8 | CLOCAL;
spin_lock_irqsave(&port->lock, flags);
uart_update_timeout(port, CS8, uap->fixed_baud);
pl011_setup_status_masks(port, termios);
spin_unlock_irqrestore(&port->lock, flags);
}
static const char *pl011_type(struct uart_port *port)
{
struct uart_amba_port *uap =
@ -1976,6 +2012,37 @@ static struct uart_ops amba_pl011_pops = {
#endif
};
static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
{
return 0;
}
static const struct uart_ops sbsa_uart_pops = {
.tx_empty = pl011_tx_empty,
.set_mctrl = sbsa_uart_set_mctrl,
.get_mctrl = sbsa_uart_get_mctrl,
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
.startup = sbsa_uart_startup,
.shutdown = sbsa_uart_shutdown,
.set_termios = sbsa_uart_set_termios,
.type = pl011_type,
.release_port = pl011_release_port,
.request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = pl011_hwinit,
.poll_get_char = pl011_get_poll_char,
.poll_put_char = pl011_put_poll_char,
#endif
};
static struct uart_amba_port *amba_ports[UART_NR];
#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
@ -1994,7 +2061,7 @@ static void
pl011_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_amba_port *uap = amba_ports[co->index];
unsigned int status, old_cr, new_cr;
unsigned int status, old_cr = 0, new_cr;
unsigned long flags;
int locked = 1;
@ -2011,10 +2078,12 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
/*
* First save the CR then disable the interrupts
*/
if (!uap->vendor->always_enabled) {
old_cr = readw(uap->port.membase + UART011_CR);
new_cr = old_cr & ~UART011_CR_CTSEN;
new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
writew(new_cr, uap->port.membase + UART011_CR);
}
uart_console_write(&uap->port, s, count, pl011_console_putchar);
@ -2025,6 +2094,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
do {
status = readw(uap->port.membase + UART01x_FR);
} while (status & UART01x_FR_BUSY);
if (!uap->vendor->always_enabled)
writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
@ -2106,10 +2176,15 @@ static int __init pl011_console_setup(struct console *co, char *options)
uap->port.uartclk = clk_get_rate(uap->clk);
if (uap->vendor->fixed_options) {
baud = uap->fixed_baud;
} else {
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
uart_parse_options(options,
&baud, &parity, &bits, &flow);
else
pl011_console_get_options(uap, &baud, &parity, &bits);
}
return uart_set_options(&uap->port, co, baud, parity, bits, flow);
}
@ -2201,30 +2276,96 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
return ret;
}
/* unregisters the driver also if no more ports are left */
static void pl011_unregister_port(struct uart_amba_port *uap)
{
int i;
bool busy = false;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
if (amba_ports[i] == uap)
amba_ports[i] = NULL;
else if (amba_ports[i])
busy = true;
}
pl011_dma_remove(uap);
if (!busy)
uart_unregister_driver(&amba_reg);
}
static int pl011_find_free_port(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == NULL)
return i;
return -EBUSY;
}
static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
struct resource *mmiobase, int index)
{
void __iomem *base;
base = devm_ioremap_resource(dev, mmiobase);
if (!base)
return -ENOMEM;
index = pl011_probe_dt_alias(index, dev);
uap->old_cr = 0;
uap->port.dev = dev;
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
uap->port.iotype = UPIO_MEM;
uap->port.fifosize = uap->fifosize;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = index;
amba_ports[index] = uap;
return 0;
}
static int pl011_register_port(struct uart_amba_port *uap)
{
int ret;
/* Ensure interrupts from this UART are masked and cleared */
writew(0, uap->port.membase + UART011_IMSC);
writew(0xffff, uap->port.membase + UART011_ICR);
if (!amba_reg.state) {
ret = uart_register_driver(&amba_reg);
if (ret < 0) {
dev_err(uap->port.dev,
"Failed to register AMBA-PL011 driver\n");
return ret;
}
}
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret)
pl011_unregister_port(uap);
return ret;
}
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
void __iomem *base;
int i, ret;
int portnr, ret;
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == NULL)
break;
if (i == ARRAY_SIZE(amba_ports))
return -EBUSY;
portnr = pl011_find_free_port();
if (portnr < 0)
return portnr;
uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
GFP_KERNEL);
if (uap == NULL)
return -ENOMEM;
i = pl011_probe_dt_alias(i, &dev->dev);
base = devm_ioremap(&dev->dev, dev->res.start,
resource_size(&dev->res));
if (!base)
if (!uap)
return -ENOMEM;
uap->clk = devm_clk_get(&dev->dev, NULL);
@ -2234,64 +2375,27 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->vendor = vendor;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->old_cr = 0;
uap->fifosize = vendor->get_fifosize(dev);
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
uap->port.iotype = UPIO_MEM;
uap->port.irq = dev->irq[0];
uap->port.fifosize = uap->fifosize;
uap->port.ops = &amba_pl011_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
INIT_DELAYED_WORK(&uap->tx_softirq_work, pl011_tx_softirq);
/* Ensure interrupts from this UART are masked and cleared */
writew(0, uap->port.membase + UART011_IMSC);
writew(0xffff, uap->port.membase + UART011_ICR);
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
amba_ports[i] = uap;
ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
if (ret)
return ret;
amba_set_drvdata(dev, uap);
if (!amba_reg.state) {
ret = uart_register_driver(&amba_reg);
if (ret < 0) {
dev_err(&dev->dev,
"Failed to register AMBA-PL011 driver\n");
return ret;
}
}
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
amba_ports[i] = NULL;
uart_unregister_driver(&amba_reg);
}
return ret;
return pl011_register_port(uap);
}
static int pl011_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
bool busy = false;
int i;
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == uap)
amba_ports[i] = NULL;
else if (amba_ports[i])
busy = true;
pl011_dma_remove(uap);
if (!busy)
uart_unregister_driver(&amba_reg);
pl011_unregister_port(uap);
return 0;
}
@ -2319,6 +2423,86 @@ static int pl011_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
static int sbsa_uart_probe(struct platform_device *pdev)
{
struct uart_amba_port *uap;
struct resource *r;
int portnr, ret;
int baudrate;
/*
* Check the mandatory baud rate parameter in the DT node early
* so that we can easily exit with the error.
*/
if (pdev->dev.of_node) {
struct device_node *np = pdev->dev.of_node;
ret = of_property_read_u32(np, "current-speed", &baudrate);
if (ret)
return ret;
} else {
baudrate = 115200;
}
portnr = pl011_find_free_port();
if (portnr < 0)
return portnr;
uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
GFP_KERNEL);
if (!uap)
return -ENOMEM;
uap->vendor = &vendor_sbsa;
uap->fifosize = 32;
uap->port.irq = platform_get_irq(pdev, 0);
uap->port.ops = &sbsa_uart_pops;
uap->fixed_baud = baudrate;
snprintf(uap->type, sizeof(uap->type), "SBSA");
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
if (ret)
return ret;
platform_set_drvdata(pdev, uap);
return pl011_register_port(uap);
}
static int sbsa_uart_remove(struct platform_device *pdev)
{
struct uart_amba_port *uap = platform_get_drvdata(pdev);
uart_remove_one_port(&amba_reg, &uap->port);
pl011_unregister_port(uap);
return 0;
}
static const struct of_device_id sbsa_uart_of_match[] = {
{ .compatible = "arm,sbsa-uart", },
{},
};
MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
{ "ARMH0011", 0 },
{},
};
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
static struct platform_driver arm_sbsa_uart_platform_driver = {
.probe = sbsa_uart_probe,
.remove = sbsa_uart_remove,
.driver = {
.name = "sbsa-uart",
.of_match_table = of_match_ptr(sbsa_uart_of_match),
.acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
},
};
static struct amba_id pl011_ids[] = {
{
.id = 0x00041011,
@ -2349,11 +2533,14 @@ static int __init pl011_init(void)
{
printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
if (platform_driver_register(&arm_sbsa_uart_platform_driver))
pr_warn("could not register SBSA UART platform driver\n");
return amba_driver_register(&pl011_driver);
}
static void __exit pl011_exit(void)
{
platform_driver_unregister(&arm_sbsa_uart_platform_driver);
amba_driver_unregister(&pl011_driver);
}

View File

@ -165,6 +165,7 @@ struct atmel_uart_port {
struct tasklet_struct tasklet;
unsigned int irq_status;
unsigned int irq_status_prev;
unsigned int status_change;
struct circ_buf rx_ring;
@ -315,7 +316,6 @@ static int atmel_config_rs485(struct uart_port *port,
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
if ((rs485conf->delay_rts_after_send) > 0)
UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else {
@ -354,7 +354,6 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
/* override mode to RS485 if needed, otherwise keep the current mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
if ((port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
@ -1177,6 +1176,9 @@ atmel_handle_status(struct uart_port *port, unsigned int pending,
if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
| ATMEL_US_CTSIC)) {
atmel_port->irq_status = status;
atmel_port->status_change = atmel_port->irq_status ^
atmel_port->irq_status_prev;
atmel_port->irq_status_prev = status;
tasklet_schedule(&atmel_port->tasklet);
}
}
@ -1523,17 +1525,14 @@ static void atmel_tasklet_func(unsigned long data)
{
struct uart_port *port = (struct uart_port *)data;
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int status;
unsigned int status_change;
unsigned int status = atmel_port->irq_status;
unsigned int status_change = atmel_port->status_change;
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
atmel_port->schedule_tx(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
| ATMEL_US_DCD | ATMEL_US_CTS)) {
/* TODO: All reads to CSR will clear these interrupts! */
@ -1548,7 +1547,7 @@ static void atmel_tasklet_func(unsigned long data)
wake_up_interruptible(&port->state->port.delta_msr_wait);
atmel_port->irq_status_prev = status;
atmel_port->status_change = 0;
}
atmel_port->schedule_rx(port);
@ -2061,7 +2060,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
/* mode */
if (port->rs485.flags & SER_RS485_ENABLED) {
if ((port->rs485.delay_rts_after_send) > 0)
UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
mode |= ATMEL_US_USMODE_RS485;
} else if (termios->c_cflag & CRTSCTS) {

View File

@ -74,8 +74,8 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart);
static void bfin_serial_reset_irda(struct uart_port *port);
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
#if defined(SERIAL_BFIN_CTSRTS) || \
defined(SERIAL_BFIN_HARD_CTSRTS)
static unsigned int bfin_serial_get_mctrl(struct uart_port *port)
{
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
@ -110,7 +110,7 @@ static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
struct bfin_serial_port *uart = dev_id;
struct uart_port *uport = &uart->port;
unsigned int status = bfin_serial_get_mctrl(uport);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
#ifdef SERIAL_BFIN_HARD_CTSRTS
UART_CLEAR_SCTS(uart);
if (uport->hw_stopped) {
@ -700,7 +700,7 @@ static int bfin_serial_startup(struct uart_port *port)
# endif
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
#ifdef SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(gpio_to_irq(uart->cts_pin),
bfin_serial_mctrl_cts_int,
@ -718,7 +718,7 @@ static int bfin_serial_startup(struct uart_port *port)
gpio_direction_output(uart->rts_pin, 0);
}
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
#ifdef SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0) {
if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
0, "BFIN_UART_MODEM_STATUS", uart)) {
@ -766,13 +766,13 @@ static void bfin_serial_shutdown(struct uart_port *port)
free_irq(uart->tx_irq, uart);
#endif
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
#ifdef SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
if (uart->rts_pin >= 0)
gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
#ifdef SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
free_irq(uart->status_irq, uart);
#endif
@ -788,7 +788,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
unsigned int ier, lcr = 0;
unsigned long timeout;
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
#ifdef SERIAL_BFIN_CTSRTS
if (old == NULL && uart->cts_pin != -1)
termios->c_cflag |= CRTSCTS;
else if (uart->cts_pin == -1)
@ -1110,8 +1110,8 @@ bfin_serial_console_setup(struct console *co, char *options)
int baud = 57600;
int bits = 8;
int parity = 'n';
# if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
# if defined(SERIAL_BFIN_CTSRTS) || \
defined(SERIAL_BFIN_HARD_CTSRTS)
int flow = 'r';
# else
int flow = 'n';
@ -1322,8 +1322,8 @@ static int bfin_serial_probe(struct platform_device *pdev)
init_timer(&(uart->rx_dma_timer));
#endif
#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
#if defined(SERIAL_BFIN_CTSRTS) || \
defined(SERIAL_BFIN_HARD_CTSRTS)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;

View File

@ -56,10 +56,6 @@ static char *serial_version = "$Revision: 1.25 $";
#error "RX_TIMEOUT_TICKS == 0 not allowed, use 1"
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PA) && defined(CONFIG_ETRAX_RS485_ON_PORT_G)
#error "Disable either CONFIG_ETRAX_RS485_ON_PA or CONFIG_ETRAX_RS485_ON_PORT_G"
#endif
/*
* All of the compatibilty code so we can compile serial.c against
* older kernels is hidden in serial_compat.h
@ -455,30 +451,6 @@ static struct e100_serial rs_table[] = {
static struct fast_timer fast_timers[NR_PORTS];
#endif
#ifdef CONFIG_ETRAX_SERIAL_PROC_ENTRY
#define PROCSTAT(x) x
struct ser_statistics_type {
int overrun_cnt;
int early_errors_cnt;
int ser_ints_ok_cnt;
int errors_cnt;
unsigned long int processing_flip;
unsigned long processing_flip_still_room;
unsigned long int timeout_flush_cnt;
int rx_dma_ints;
int tx_dma_ints;
int rx_tot;
int tx_tot;
};
static struct ser_statistics_type ser_stat[NR_PORTS];
#else
#define PROCSTAT(x)
#endif /* CONFIG_ETRAX_SERIAL_PROC_ENTRY */
/* RS-485 */
#if defined(CONFIG_ETRAX_RS485)
#ifdef CONFIG_ETRAX_FAST_TIMER
@ -487,9 +459,6 @@ static struct fast_timer fast_timers_rs485[NR_PORTS];
#if defined(CONFIG_ETRAX_RS485_ON_PA)
static int rs485_pa_bit = CONFIG_ETRAX_RS485_ON_PA_BIT;
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
static int rs485_port_g_bit = CONFIG_ETRAX_RS485_ON_PORT_G_BIT;
#endif
#endif
/* Info and macros needed for each ports extra control/status signals. */
@ -739,10 +708,10 @@ static unsigned char dummy_ser[NR_PORTS] = {0xFF, 0xFF, 0xFF,0xFF};
defined(CONFIG_ETRAX_SER1_DTR_RI_DSR_CD_MIXED) || \
defined(CONFIG_ETRAX_SER2_DTR_RI_DSR_CD_MIXED) || \
defined(CONFIG_ETRAX_SER3_DTR_RI_DSR_CD_MIXED)
#define CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
#define ETRAX_SERX_DTR_RI_DSR_CD_MIXED
#endif
#ifdef CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED
#ifdef ETRAX_SERX_DTR_RI_DSR_CD_MIXED
/* The pins can be mixed on PA and PB */
#define CONTROL_PINS_PORT_NOT_USED(line) \
&dummy_ser[line], &dummy_ser[line], \
@ -835,7 +804,7 @@ static const struct control_pins e100_modem_pins[NR_PORTS] =
#endif
}
};
#else /* CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
#else /* ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
/* All pins are on either PA or PB for each serial port */
#define CONTROL_PINS_PORT_NOT_USED(line) \
@ -917,7 +886,7 @@ static const struct control_pins e100_modem_pins[NR_PORTS] =
#endif
}
};
#endif /* !CONFIG_ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
#endif /* !ETRAX_SERX_DTR_RI_DSR_CD_MIXED */
#define E100_RTS_MASK 0x20
#define E100_CTS_MASK 0x40
@ -1367,16 +1336,6 @@ e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r)
#if defined(CONFIG_ETRAX_RS485_ON_PA)
*R_PORT_PA_DATA = port_pa_data_shadow |= (1 << rs485_pa_bit);
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
rs485_port_g_bit, 1);
#endif
#if defined(CONFIG_ETRAX_RS485_LTC1387)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 1);
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
#endif
info->rs485 = *r;
@ -1676,7 +1635,8 @@ alloc_recv_buffer(unsigned int size)
{
struct etrax_recv_buffer *buffer;
if (!(buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC)))
buffer = kmalloc(sizeof *buffer + size, GFP_ATOMIC);
if (!buffer)
return NULL;
buffer->next = NULL;
@ -1712,7 +1672,8 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl
{
struct etrax_recv_buffer *buffer;
if (info->uses_dma_in) {
if (!(buffer = alloc_recv_buffer(4)))
buffer = alloc_recv_buffer(4);
if (!buffer)
return 0;
buffer->length = 1;
@ -1750,7 +1711,8 @@ static unsigned int handle_descr_data(struct e100_serial *info,
append_recv_buffer(info, buffer);
if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
if (!buffer)
panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
descr->buf = virt_to_phys(buffer->buffer);
@ -1841,7 +1803,6 @@ static void receive_chars_dma(struct e100_serial *info)
*/
unsigned char data = info->ioport[REG_DATA];
PROCSTAT(ser_stat[info->line].errors_cnt++);
DEBUG_LOG(info->line, "#dERR: s d 0x%04X\n",
((rstat & SER_ERROR_MASK) << 8) | data);
@ -1867,7 +1828,8 @@ static int start_recv_dma(struct e100_serial *info)
/* Set up the receiving descriptors */
for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE);
if (!buffer)
panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
descr[i].ctrl = d_int;
@ -1943,7 +1905,6 @@ tr_interrupt(int irq, void *dev_id)
/* Read jiffies_usec first,
* we want this time to be as late as possible
*/
PROCSTAT(ser_stat[info->line].tx_dma_ints++);
info->last_tx_active_usec = GET_JIFFIES_USEC();
info->last_tx_active = jiffies;
transmit_chars_dma(info);
@ -2022,7 +1983,6 @@ static int force_eop_if_needed(struct e100_serial *info)
*/
if (!info->forced_eop) {
info->forced_eop = 1;
PROCSTAT(ser_stat[info->line].timeout_flush_cnt++);
TIMERD(DEBUG_LOG(info->line, "timeout EOP %i\n", info->line));
FORCE_EOP(info);
}
@ -2374,7 +2334,6 @@ static void handle_ser_rx_interrupt(struct e100_serial *info)
DEBUG_LOG(info->line, "#iERR s d %04X\n",
((rstat & SER_ERROR_MASK) << 8) | data);
}
PROCSTAT(ser_stat[info->line].early_errors_cnt++);
} else { /* It was a valid byte, now let the DMA do the rest */
unsigned long curr_time_u = GET_JIFFIES_USEC();
unsigned long curr_time = jiffies;
@ -2407,7 +2366,6 @@ static void handle_ser_rx_interrupt(struct e100_serial *info)
DINTR2(DEBUG_LOG(info->line, "ser_rx OK %d\n", info->line));
info->break_detected_cnt = 0;
PROCSTAT(ser_stat[info->line].ser_ints_ok_cnt++);
}
/* Restarting the DMA never hurts */
*info->icmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, restart);
@ -2867,19 +2825,6 @@ change_speed(struct e100_serial *info)
*R_SERIAL_PRESCALE = divisor;
info->baud = SERIAL_PRESCALE_BASE/divisor;
}
#ifdef CONFIG_ETRAX_EXTERN_PB6CLK_ENABLED
else if ((info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8 &&
info->custom_divisor == 1) ||
(info->baud_base==CONFIG_ETRAX_EXTERN_PB6CLK_FREQ &&
info->custom_divisor == 8)) {
/* ext_clk selected */
alt_source =
IO_STATE(R_ALT_SER_BAUDRATE, ser0_rec, extern) |
IO_STATE(R_ALT_SER_BAUDRATE, ser0_tr, extern);
DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8));
info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8;
}
#endif
else
{
/* Bad baudbase, we don't support using timer0
@ -3216,9 +3161,7 @@ rs_throttle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("throttle %s: %lu....\n", tty_name(tty, buf),
printk("throttle %s: %lu....\n", tty_name(tty),
(unsigned long)tty->ldisc.chars_in_buffer(tty));
#endif
DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
@ -3238,9 +3181,7 @@ rs_unthrottle(struct tty_struct * tty)
{
struct e100_serial *info = (struct e100_serial *)tty->driver_data;
#ifdef SERIAL_DEBUG_THROTTLE
char buf[64];
printk("unthrottle %s: %lu....\n", tty_name(tty, buf),
printk("unthrottle %s: %lu....\n", tty_name(tty),
(unsigned long)tty->ldisc.chars_in_buffer(tty));
#endif
DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
@ -3724,16 +3665,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
info->rs485.flags &= ~(SER_RS485_ENABLED);
#if defined(CONFIG_ETRAX_RS485_ON_PA)
*R_PORT_PA_DATA = port_pa_data_shadow &= ~(1 << rs485_pa_bit);
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
rs485_port_g_bit, 0);
#endif
#if defined(CONFIG_ETRAX_RS485_LTC1387)
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_DXEN_PORT_G_BIT, 0);
REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow,
CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 0);
#endif
}
#endif
@ -4263,15 +4194,6 @@ static int __init rs_init(void)
return -EBUSY;
}
#endif
#if defined(CONFIG_ETRAX_RS485_ON_PORT_G)
if (cris_io_interface_allocate_pins(if_serial_0, 'g', rs485_pa_bit,
rs485_port_g_bit)) {
printk(KERN_ERR "ETRAX100LX serial: Could not allocate "
"RS485 pin\n");
put_tty_driver(driver);
return -EBUSY;
}
#endif
#endif
/* Initialize the tty_driver structure */

View File

@ -72,6 +72,7 @@ static int __init parse_options(struct earlycon_device *device, char *options)
switch (port->iotype) {
case UPIO_MEM32:
case UPIO_MEM32BE:
port->regshift = 2; /* fall-through */
case UPIO_MEM:
port->mapbase = addr;
@ -90,9 +91,11 @@ static int __init parse_options(struct earlycon_device *device, char *options)
strlcpy(device->options, options, length);
}
if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32)
if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM32 ||
port->iotype == UPIO_MEM32BE)
pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
(port->iotype == UPIO_MEM32) ? "32" : "",
(port->iotype == UPIO_MEM) ? "" :
(port->iotype == UPIO_MEM32) ? "32" : "32be",
(unsigned long long)port->mapbase,
device->options);
else
@ -133,7 +136,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
*
* Registers the earlycon console matching the earlycon specified
* in the param string @buf. Acceptable param strings are of the form
* <name>,io|mmio|mmio32,<addr>,<options>
* <name>,io|mmio|mmio32|mmio32be,<addr>,<options>
* <name>,0x<addr>,<options>
* <name>,<options>
* <name>

View File

@ -1504,7 +1504,8 @@ static int icom_probe(struct pci_dev *dev,
return retval;
}
if ( (retval = pci_request_regions(dev, "icom"))) {
retval = pci_request_regions(dev, "icom");
if (retval) {
dev_err(&dev->dev, "pci_request_regions FAILED\n");
pci_disable_device(dev);
return retval;
@ -1512,7 +1513,8 @@ static int icom_probe(struct pci_dev *dev,
pci_set_master(dev);
if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg);
if (retval) {
dev_err(&dev->dev, "PCI Config read FAILED\n");
return retval;
}
@ -1556,9 +1558,8 @@ static int icom_probe(struct pci_dev *dev,
}
/* save off irq and request irq line */
if ( (retval = request_irq(dev->irq, icom_interrupt,
IRQF_SHARED, ICOM_DRIVER_NAME,
(void *) icom_adapter))) {
retval = request_irq(dev->irq, icom_interrupt, IRQF_SHARED, ICOM_DRIVER_NAME, (void *)icom_adapter);
if (retval) {
goto probe_exit2;
}

View File

@ -1175,7 +1175,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
ret = request_irq(gpio_to_irq(ifx_dev->gpio.reset_out),
ifx_spi_reset_interrupt,
IRQF_TRIGGER_RISING|IRQF_TRIGGER_FALLING, DRVNAME,
(void *)ifx_dev);
ifx_dev);
if (ret) {
dev_err(&spi->dev, "Unable to get irq %x\n",
gpio_to_irq(ifx_dev->gpio.reset_out));
@ -1185,9 +1185,8 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
ret = ifx_spi_reset(ifx_dev);
ret = request_irq(gpio_to_irq(ifx_dev->gpio.srdy),
ifx_spi_srdy_interrupt,
IRQF_TRIGGER_RISING, DRVNAME,
(void *)ifx_dev);
ifx_spi_srdy_interrupt, IRQF_TRIGGER_RISING, DRVNAME,
ifx_dev);
if (ret) {
dev_err(&spi->dev, "Unable to get irq %x",
gpio_to_irq(ifx_dev->gpio.srdy));
@ -1212,7 +1211,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
return 0;
error_ret7:
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
error_ret6:
gpio_free(ifx_dev->gpio.srdy);
error_ret5:
@ -1243,8 +1242,8 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
/* stop activity */
tasklet_kill(&ifx_dev->io_work_tasklet);
/* free irq */
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), (void *)ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), (void *)ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
gpio_free(ifx_dev->gpio.srdy);
gpio_free(ifx_dev->gpio.mrdy);
@ -1381,7 +1380,7 @@ static void __exit ifx_spi_exit(void)
/* unregister */
tty_unregister_driver(tty_drv);
put_tty_driver(tty_drv);
spi_unregister_driver((void *)&ifx_spi_driver);
spi_unregister_driver(&ifx_spi_driver);
unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
}
@ -1420,7 +1419,7 @@ static int __init ifx_spi_init(void)
goto err_free_tty;
}
result = spi_register_driver((void *)&ifx_spi_driver);
result = spi_register_driver(&ifx_spi_driver);
if (result) {
pr_err("%s: spi_register_driver failed(%d)",
DRVNAME, result);
@ -1436,7 +1435,7 @@ static int __init ifx_spi_init(void)
return 0;
err_unreg_spi:
spi_unregister_driver((void *)&ifx_spi_driver);
spi_unregister_driver(&ifx_spi_driver);
err_unreg_tty:
tty_unregister_driver(tty_drv);
err_free_tty:

View File

@ -239,7 +239,7 @@ static struct imx_uart_data imx_uart_devdata[] = {
},
};
static struct platform_device_id imx_uart_devtype[] = {
static const struct platform_device_id imx_uart_devtype[] = {
{
.name = "imx1-uart",
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
@ -853,7 +853,7 @@ static void imx_break_ctl(struct uart_port *port, int break_state)
#define TXTL 2 /* reset default */
#define RXTL 1 /* reset default */
static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
static void imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
{
unsigned int val;
@ -861,7 +861,6 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE);
val |= TXTL << UFCR_TXTL_SHF | RXTL;
writel(val, sport->port.membase + UFCR);
return 0;
}
#define RX_BUF_SIZE (PAGE_SIZE)
@ -1122,6 +1121,12 @@ static int imx_startup(struct uart_port *port)
writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
/* Can we enable the DMA support? */
if (is_imx6q_uart(sport) && !uart_console(port) &&
!sport->dma_is_inited)
imx_uart_dma_init(sport);
spin_lock_irqsave(&sport->port.lock, flags);
/* Reset fifo's and state machines */
i = 100;
@ -1132,13 +1137,6 @@ static int imx_startup(struct uart_port *port)
while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
udelay(1);
/* Can we enable the DMA support? */
if (is_imx6q_uart(sport) && !uart_console(port) &&
!sport->dma_is_inited)
imx_uart_dma_init(sport);
spin_lock_irqsave(&sport->port.lock, flags);
/*
* Finally, clear and enable interrupts
*/

View File

@ -2137,7 +2137,8 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
/* register port with the serial core */
if ((ret = ioc3_serial_core_attach(is, idd)))
ret = ioc3_serial_core_attach(is, idd);
if (ret)
goto out4;
Num_of_ioc3_cards++;

View File

@ -1011,7 +1011,8 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
*/
for (xx = 0; xx < num_intrs; xx++) {
intr_info = &soft->is_intr_type[intr_type].is_intr_info[xx];
if ((this_mir = this_ir & intr_info->sd_bits)) {
this_mir = this_ir & intr_info->sd_bits;
if (this_mir) {
/* Disable owned interrupts, call handler */
handled++;
write_ireg(soft, intr_info->sd_bits, IOC4_W_IEC,
@ -2865,10 +2866,12 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
/* register port with the serial core - 1 rs232, 1 rs422 */
if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS232)))
ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS232);
if (ret)
goto out4;
if ((ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS422)))
ret = ioc4_serial_core_attach(idd->idd_pdev, PROTO_RS422);
if (ret)
goto out5;
Num_of_ioc4_cards++;

View File

@ -173,18 +173,18 @@ static int kgdb_nmi_poll_one_knock(void)
bool kgdb_nmi_poll_knock(void)
{
if (kgdb_nmi_knock < 0)
return 1;
return true;
while (1) {
int ret;
ret = kgdb_nmi_poll_one_knock();
if (ret == NO_POLL_CHAR)
return 0;
return false;
else if (ret == 1)
break;
}
return 1;
return true;
}
/*

View File

@ -597,7 +597,7 @@ console_initcall(mcf_console_init);
#define MCF_CONSOLE NULL
/****************************************************************************/
#endif /* CONFIG_MCF_CONSOLE */
#endif /* CONFIG_SERIAL_MCF_CONSOLE */
/****************************************************************************/
/*

View File

@ -370,7 +370,7 @@ static int meson_uart_verify_port(struct uart_port *port,
static void meson_uart_release_port(struct uart_port *port)
{
if (port->flags & UPF_IOREMAP) {
iounmap(port->membase);
devm_iounmap(port->dev, port->membase);
port->membase = NULL;
}
}

View File

@ -405,7 +405,7 @@ static struct psc_ops mpc5200b_psc_ops = {
.get_mr1 = mpc52xx_psc_get_mr1,
};
#endif /* CONFIG_MPC52xx */
#endif /* CONFIG_PPC_MPC52xx */
#ifdef CONFIG_PPC_MPC512x
#define FIFO_512x(port) ((struct mpc512x_psc_fifo __iomem *)(PSC(port)+1))

View File

@ -913,7 +913,8 @@ static int mpsc_make_ready(struct mpsc_port_info *pi)
if (!pi->ready) {
mpsc_init_hw(pi);
if ((rc = mpsc_alloc_ring_mem(pi)))
rc = mpsc_alloc_ring_mem(pi);
if (rc)
return rc;
mpsc_init_rings(pi);
pi->ready = 1;
@ -1895,7 +1896,8 @@ static int mpsc_shared_drv_probe(struct platform_device *dev)
int rc = -ENODEV;
if (dev->id == 0) {
if (!(rc = mpsc_shared_map_regs(dev))) {
rc = mpsc_shared_map_regs(dev);
if (!rc) {
pdata = (struct mpsc_shared_pdata *)
dev_get_platdata(&dev->dev);
@ -2081,14 +2083,16 @@ static int mpsc_drv_probe(struct platform_device *dev)
if (dev->id < MPSC_NUM_CTLRS) {
pi = &mpsc_ports[dev->id];
if (!(rc = mpsc_drv_map_regs(pi, dev))) {
rc = mpsc_drv_map_regs(pi, dev);
if (!rc) {
mpsc_drv_get_platform_data(pi, dev, dev->id);
pi->port.dev = &dev->dev;
if (!(rc = mpsc_make_ready(pi))) {
rc = mpsc_make_ready(pi);
if (!rc) {
spin_lock_init(&pi->tx_lock);
if (!(rc = uart_add_one_port(&mpsc_reg,
&pi->port))) {
rc = uart_add_one_port(&mpsc_reg, &pi->port);
if (!rc) {
rc = 0;
} else {
mpsc_release_port((struct uart_port *)
@ -2136,9 +2140,12 @@ static int __init mpsc_drv_init(void)
memset(mpsc_ports, 0, sizeof(mpsc_ports));
memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
if (!(rc = uart_register_driver(&mpsc_reg))) {
if (!(rc = platform_driver_register(&mpsc_shared_driver))) {
if ((rc = platform_driver_register(&mpsc_driver))) {
rc = uart_register_driver(&mpsc_reg);
if (!rc) {
rc = platform_driver_register(&mpsc_shared_driver);
if (!rc) {
rc = platform_driver_register(&mpsc_driver);
if (rc) {
platform_driver_unregister(&mpsc_shared_driver);
uart_unregister_driver(&mpsc_reg);
}

View File

@ -1,232 +0,0 @@
/*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <mach/msm_smd.h>
#define MAX_SMD_TTYS 32
struct smd_tty_info {
struct tty_port port;
smd_channel_t *ch;
};
struct smd_tty_channel_desc {
int id;
const char *name;
};
static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
static const struct smd_tty_channel_desc smd_default_tty_channels[] = {
{ .id = 0, .name = "SMD_DS" },
{ .id = 27, .name = "SMD_GPSNMEA" },
};
static const struct smd_tty_channel_desc *smd_tty_channels =
smd_default_tty_channels;
static int smd_tty_channels_len = ARRAY_SIZE(smd_default_tty_channels);
static void smd_tty_notify(void *priv, unsigned event)
{
unsigned char *ptr;
int avail;
struct smd_tty_info *info = priv;
struct tty_struct *tty;
if (event != SMD_EVENT_DATA)
return;
tty = tty_port_tty_get(&info->port);
if (!tty)
return;
for (;;) {
if (test_bit(TTY_THROTTLED, &tty->flags))
break;
avail = smd_read_avail(info->ch);
if (avail == 0)
break;
avail = tty_prepare_flip_string(&info->port, &ptr, avail);
if (smd_read(info->ch, ptr, avail) != avail) {
/* shouldn't be possible since we're in interrupt
** context here and nobody else could 'steal' our
** characters.
*/
pr_err("OOPS - smd_tty_buffer mismatch?!");
}
tty_flip_buffer_push(&info->port);
}
/* XXX only when writable and necessary */
tty_wakeup(tty);
tty_kref_put(tty);
}
static int smd_tty_port_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct smd_tty_info *info = container_of(tport, struct smd_tty_info,
port);
int i, res = 0;
const char *name = NULL;
for (i = 0; i < smd_tty_channels_len; i++) {
if (smd_tty_channels[i].id == tty->index) {
name = smd_tty_channels[i].name;
break;
}
}
if (!name)
return -ENODEV;
if (info->ch)
smd_kick(info->ch);
else
res = smd_open(name, &info->ch, info, smd_tty_notify);
if (!res)
tty->driver_data = info;
return res;
}
static void smd_tty_port_shutdown(struct tty_port *tport)
{
struct smd_tty_info *info = container_of(tport, struct smd_tty_info,
port);
if (info->ch) {
smd_close(info->ch);
info->ch = 0;
}
}
static int smd_tty_open(struct tty_struct *tty, struct file *f)
{
struct smd_tty_info *info = smd_tty + tty->index;
return tty_port_open(&info->port, tty, f);
}
static void smd_tty_close(struct tty_struct *tty, struct file *f)
{
struct smd_tty_info *info = tty->driver_data;
tty_port_close(&info->port, tty, f);
}
static int smd_tty_write(struct tty_struct *tty,
const unsigned char *buf, int len)
{
struct smd_tty_info *info = tty->driver_data;
int avail;
/* if we're writing to a packet channel we will
** never be able to write more data than there
** is currently space for
*/
avail = smd_write_avail(info->ch);
if (len > avail)
len = avail;
return smd_write(info->ch, buf, len);
}
static int smd_tty_write_room(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
return smd_write_avail(info->ch);
}
static int smd_tty_chars_in_buffer(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
return smd_read_avail(info->ch);
}
static void smd_tty_unthrottle(struct tty_struct *tty)
{
struct smd_tty_info *info = tty->driver_data;
smd_kick(info->ch);
}
static const struct tty_port_operations smd_tty_port_ops = {
.shutdown = smd_tty_port_shutdown,
.activate = smd_tty_port_activate,
};
static const struct tty_operations smd_tty_ops = {
.open = smd_tty_open,
.close = smd_tty_close,
.write = smd_tty_write,
.write_room = smd_tty_write_room,
.chars_in_buffer = smd_tty_chars_in_buffer,
.unthrottle = smd_tty_unthrottle,
};
static struct tty_driver *smd_tty_driver;
static int __init smd_tty_init(void)
{
int ret, i;
smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
if (smd_tty_driver == 0)
return -ENOMEM;
smd_tty_driver->driver_name = "smd_tty_driver";
smd_tty_driver->name = "smd";
smd_tty_driver->major = 0;
smd_tty_driver->minor_start = 0;
smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
smd_tty_driver->init_termios = tty_std_termios;
smd_tty_driver->init_termios.c_iflag = 0;
smd_tty_driver->init_termios.c_oflag = 0;
smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
smd_tty_driver->init_termios.c_lflag = 0;
smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(smd_tty_driver, &smd_tty_ops);
ret = tty_register_driver(smd_tty_driver);
if (ret)
return ret;
for (i = 0; i < smd_tty_channels_len; i++) {
struct tty_port *port = &smd_tty[smd_tty_channels[i].id].port;
tty_port_init(port);
port->ops = &smd_tty_port_ops;
tty_port_register_device(port, smd_tty_driver,
smd_tty_channels[i].id, NULL);
}
return 0;
}
module_init(smd_tty_init);

View File

@ -169,7 +169,7 @@ struct mxs_auart_port {
bool ms_irq_enabled;
};
static struct platform_device_id mxs_auart_devtype[] = {
static const struct platform_device_id mxs_auart_devtype[] = {
{ .name = "mxs-auart-imx23", .driver_data = IMX23_AUART },
{ .name = "mxs-auart-imx28", .driver_data = IMX28_AUART },
{ /* sentinel */ }

View File

@ -67,14 +67,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
if (of_property_read_u32(np, "clock-frequency", &clk)) {
/* Get clk rate through clk driver if present */
info->clk = clk_get(&ofdev->dev, NULL);
info->clk = devm_clk_get(&ofdev->dev, NULL);
if (IS_ERR(info->clk)) {
dev_warn(&ofdev->dev,
"clk or clock-frequency not defined\n");
return PTR_ERR(info->clk);
}
clk_prepare_enable(info->clk);
ret = clk_prepare_enable(info->clk);
if (ret < 0)
return ret;
clk = clk_get_rate(info->clk);
}
/* If current-speed was set, then try not to change it. */
@ -188,7 +191,6 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
{
struct uart_8250_port port8250;
memset(&port8250, 0, sizeof(port8250));
port.type = port_type;
port8250.port = port;
if (port.fifosize)

View File

@ -38,6 +38,7 @@
#include <linux/serial_core.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/gpio.h>
@ -160,7 +161,6 @@ struct uart_omap_port {
unsigned long port_activity;
int context_loss_cnt;
u32 errata;
u8 wakeups_enabled;
u32 features;
int rts_gpio;
@ -209,28 +209,11 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
return pdata->get_context_loss_count(up->dev);
}
static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up,
bool enable)
{
if (!up->wakeirq)
return;
if (enable)
enable_irq(up->wakeirq);
else
disable_irq_nosync(up->wakeirq);
}
/* REVISIT: Remove this when omap3 boots in device tree only mode */
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (enable == up->wakeups_enabled)
return;
serial_omap_enable_wakeirq(up, enable);
up->wakeups_enabled = enable;
if (!pdata || !pdata->enable_wakeup)
return;
@ -750,13 +733,11 @@ static int serial_omap_startup(struct uart_port *port)
/* Optional wake-up IRQ */
if (up->wakeirq) {
retval = request_irq(up->wakeirq, serial_omap_irq,
up->port.irqflags, up->name, up);
retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq);
if (retval) {
free_irq(up->port.irq, up);
return retval;
}
disable_irq(up->wakeirq);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@ -845,8 +826,7 @@ static void serial_omap_shutdown(struct uart_port *port)
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
free_irq(up->port.irq, up);
if (up->wakeirq)
free_irq(up->wakeirq, up);
dev_pm_clear_wake_irq(up->dev);
}
static void serial_omap_uart_qos_work(struct work_struct *work)
@ -1139,13 +1119,6 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
if (!device_may_wakeup(up->dev)) {
if (!state)
pm_runtime_forbid(up->dev);
else
pm_runtime_allow(up->dev);
}
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}

View File

@ -348,7 +348,7 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
s3c24xx_serial_start_tx_dma(ourport, count);
}
void s3c24xx_serial_start_tx(struct uart_port *port)
static void s3c24xx_serial_start_tx(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct circ_buf *xmit = &port->state->xmit;
@ -2337,7 +2337,7 @@ static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
#endif
static struct platform_device_id s3c24xx_serial_driver_ids[] = {
static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
{
.name = "s3c2410-uart",
.driver_data = S3C2410_SERIAL_DRV_DATA,

View File

@ -25,6 +25,7 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
#define SC16IS7XX_NAME "sc16is7xx"
@ -300,25 +301,38 @@ struct sc16is7xx_devtype {
int nr_uart;
};
#define SC16IS7XX_RECONF_MD (1 << 0)
#define SC16IS7XX_RECONF_IER (1 << 1)
#define SC16IS7XX_RECONF_RS485 (1 << 2)
struct sc16is7xx_one_config {
unsigned int flags;
u8 ier_clear;
};
struct sc16is7xx_one {
struct uart_port port;
struct work_struct tx_work;
struct work_struct md_work;
struct kthread_work tx_work;
struct kthread_work reg_work;
struct sc16is7xx_one_config config;
};
struct sc16is7xx_port {
struct uart_driver uart;
struct sc16is7xx_devtype *devtype;
struct regmap *regmap;
struct mutex mutex;
struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
#endif
unsigned char buf[SC16IS7XX_FIFO_SIZE];
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work irq_work;
struct sc16is7xx_one p[0];
};
#define to_sc16is7xx_port(p,e) ((container_of((p), struct sc16is7xx_port, e)))
#define to_sc16is7xx_one(p,e) ((container_of((p), struct sc16is7xx_one, e)))
static u8 sc16is7xx_port_read(struct uart_port *port, u8 reg)
@ -615,9 +629,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
!!(msr & SC16IS7XX_MSR_CTS_BIT));
break;
case SC16IS7XX_IIR_THRI_SRC:
mutex_lock(&s->mutex);
sc16is7xx_handle_tx(port);
mutex_unlock(&s->mutex);
break;
default:
dev_err_ratelimited(port->dev,
@ -628,81 +640,115 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
} while (1);
}
static irqreturn_t sc16is7xx_ist(int irq, void *dev_id)
static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
int i;
for (i = 0; i < s->uart.nr; ++i)
sc16is7xx_port_irq(s, i);
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
{
struct sc16is7xx_port *s = (struct sc16is7xx_port *)dev_id;
queue_kthread_work(&s->kworker, &s->irq_work);
return IRQ_HANDLED;
}
static void sc16is7xx_wq_proc(struct work_struct *ws)
static void sc16is7xx_tx_proc(struct kthread_work *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, tx_work);
struct sc16is7xx_port *s = dev_get_drvdata(one->port.dev);
struct uart_port *port = &(to_sc16is7xx_one(ws, tx_work)->port);
mutex_lock(&s->mutex);
sc16is7xx_handle_tx(&one->port);
mutex_unlock(&s->mutex);
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0))
msleep(port->rs485.delay_rts_before_send);
sc16is7xx_handle_tx(port);
}
static void sc16is7xx_stop_tx(struct uart_port* port)
static void sc16is7xx_reconf_rs485(struct uart_port *port)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
struct circ_buf *xmit = &one->port.state->xmit;
const u32 mask = SC16IS7XX_EFCR_AUTO_RS485_BIT |
SC16IS7XX_EFCR_RTS_INVERT_BIT;
u32 efcr = 0;
struct serial_rs485 *rs485 = &port->rs485;
unsigned long irqflags;
/* handle rs485 */
if (port->rs485.flags & SER_RS485_ENABLED) {
/* do nothing if current tx not yet completed */
int lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG);
if (!(lsr & SC16IS7XX_LSR_TEMT_BIT))
return;
spin_lock_irqsave(&port->lock, irqflags);
if (rs485->flags & SER_RS485_ENABLED) {
efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
if (uart_circ_empty(xmit) &&
(port->rs485.delay_rts_after_send > 0))
mdelay(port->rs485.delay_rts_after_send);
if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
}
spin_unlock_irqrestore(&port->lock, irqflags);
sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
SC16IS7XX_IER_THRI_BIT,
0);
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
}
static void sc16is7xx_stop_rx(struct uart_port* port)
static void sc16is7xx_reg_proc(struct kthread_work *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, reg_work);
struct sc16is7xx_one_config config;
unsigned long irqflags;
spin_lock_irqsave(&one->port.lock, irqflags);
config = one->config;
memset(&one->config, 0, sizeof(one->config));
spin_unlock_irqrestore(&one->port.lock, irqflags);
if (config.flags & SC16IS7XX_RECONF_MD)
sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_LOOP_BIT,
(one->port.mctrl & TIOCM_LOOP) ?
SC16IS7XX_MCR_LOOP_BIT : 0);
if (config.flags & SC16IS7XX_RECONF_IER)
sc16is7xx_port_update(&one->port, SC16IS7XX_IER_REG,
config.ier_clear, 0);
if (config.flags & SC16IS7XX_RECONF_RS485)
sc16is7xx_reconf_rs485(&one->port);
}
static void sc16is7xx_ier_clear(struct uart_port *port, u8 bit)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
one->port.read_status_mask &= ~SC16IS7XX_LSR_DR_BIT;
sc16is7xx_port_update(port, SC16IS7XX_IER_REG,
SC16IS7XX_LSR_DR_BIT,
0);
one->config.flags |= SC16IS7XX_RECONF_IER;
one->config.ier_clear |= bit;
queue_kthread_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_stop_tx(struct uart_port *port)
{
sc16is7xx_ier_clear(port, SC16IS7XX_IER_THRI_BIT);
}
static void sc16is7xx_stop_rx(struct uart_port *port)
{
sc16is7xx_ier_clear(port, SC16IS7XX_IER_RDI_BIT);
}
static void sc16is7xx_start_tx(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
/* handle rs485 */
if ((port->rs485.flags & SER_RS485_ENABLED) &&
(port->rs485.delay_rts_before_send > 0)) {
mdelay(port->rs485.delay_rts_before_send);
}
if (!work_pending(&one->tx_work))
schedule_work(&one->tx_work);
queue_kthread_work(&s->kworker, &one->tx_work);
}
static unsigned int sc16is7xx_tx_empty(struct uart_port *port)
{
unsigned int lvl, lsr;
unsigned int lsr;
lvl = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG);
lsr = sc16is7xx_port_read(port, SC16IS7XX_LSR_REG);
return ((lsr & SC16IS7XX_LSR_THRE_BIT) && !lvl) ? TIOCSER_TEMT : 0;
return (lsr & SC16IS7XX_LSR_TEMT_BIT) ? TIOCSER_TEMT : 0;
}
static unsigned int sc16is7xx_get_mctrl(struct uart_port *port)
@ -713,21 +759,13 @@ static unsigned int sc16is7xx_get_mctrl(struct uart_port *port)
return TIOCM_DSR | TIOCM_CAR;
}
static void sc16is7xx_md_proc(struct work_struct *ws)
{
struct sc16is7xx_one *one = to_sc16is7xx_one(ws, md_work);
sc16is7xx_port_update(&one->port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_LOOP_BIT,
(one->port.mctrl & TIOCM_LOOP) ?
SC16IS7XX_MCR_LOOP_BIT : 0);
}
static void sc16is7xx_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
schedule_work(&one->md_work);
one->config.flags |= SC16IS7XX_RECONF_MD;
queue_kthread_work(&s->kworker, &one->reg_work);
}
static void sc16is7xx_break_ctl(struct uart_port *port, int break_state)
@ -831,9 +869,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
static int sc16is7xx_config_rs485(struct uart_port *port,
struct serial_rs485 *rs485)
{
const u32 mask = SC16IS7XX_EFCR_AUTO_RS485_BIT |
SC16IS7XX_EFCR_RTS_INVERT_BIT;
u32 efcr = 0;
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
struct sc16is7xx_one *one = to_sc16is7xx_one(port, port);
if (rs485->flags & SER_RS485_ENABLED) {
bool rts_during_rx, rts_during_tx;
@ -841,21 +878,23 @@ static int sc16is7xx_config_rs485(struct uart_port *port,
rts_during_rx = rs485->flags & SER_RS485_RTS_AFTER_SEND;
rts_during_tx = rs485->flags & SER_RS485_RTS_ON_SEND;
efcr |= SC16IS7XX_EFCR_AUTO_RS485_BIT;
if (!rts_during_rx && rts_during_tx)
/* default */;
else if (rts_during_rx && !rts_during_tx)
efcr |= SC16IS7XX_EFCR_RTS_INVERT_BIT;
else
if (rts_during_rx == rts_during_tx)
dev_err(port->dev,
"unsupported RTS signalling on_send:%d after_send:%d - exactly one of RS485 RTS flags should be set\n",
rts_during_tx, rts_during_rx);
/*
* RTS signal is handled by HW, it's timing can't be influenced.
* However, it's sometimes useful to delay TX even without RTS
* control therefore we try to handle .delay_rts_before_send.
*/
if (rs485->delay_rts_after_send)
return -EINVAL;
}
sc16is7xx_port_update(port, SC16IS7XX_EFCR_REG, mask, efcr);
port->rs485 = *rs485;
one->config.flags |= SC16IS7XX_RECONF_RS485;
queue_kthread_work(&s->kworker, &one->reg_work);
return 0;
}
@ -916,6 +955,8 @@ static int sc16is7xx_startup(struct uart_port *port)
static void sc16is7xx_shutdown(struct uart_port *port)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
/* Disable all interrupts */
sc16is7xx_port_write(port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
@ -926,6 +967,8 @@ static void sc16is7xx_shutdown(struct uart_port *port)
SC16IS7XX_EFCR_TXDISABLE_BIT);
sc16is7xx_power(port, 0);
flush_kthread_worker(&s->kworker);
}
static const char *sc16is7xx_type(struct uart_port *port)
@ -1043,6 +1086,7 @@ static int sc16is7xx_probe(struct device *dev,
struct sc16is7xx_devtype *devtype,
struct regmap *regmap, int irq, unsigned long flags)
{
struct sched_param sched_param = { .sched_priority = MAX_RT_PRIO / 2 };
unsigned long freq, *pfreq = dev_get_platdata(dev);
int i, ret;
struct sc16is7xx_port *s;
@ -1084,6 +1128,16 @@ static int sc16is7xx_probe(struct device *dev,
goto out_clk;
}
init_kthread_worker(&s->kworker);
init_kthread_work(&s->irq_work, sc16is7xx_ist);
s->kworker_task = kthread_run(kthread_worker_fn, &s->kworker,
"sc16is7xx");
if (IS_ERR(s->kworker_task)) {
ret = PTR_ERR(s->kworker_task);
goto out_uart;
}
sched_setscheduler(s->kworker_task, SCHED_FIFO, &sched_param);
#ifdef CONFIG_GPIOLIB
if (devtype->nr_gpio) {
/* Setup GPIO cotroller */
@ -1099,12 +1153,10 @@ static int sc16is7xx_probe(struct device *dev,
s->gpio.can_sleep = 1;
ret = gpiochip_add(&s->gpio);
if (ret)
goto out_uart;
goto out_thread;
}
#endif
mutex_init(&s->mutex);
for (i = 0; i < devtype->nr_uart; ++i) {
/* Initialize port data */
s->p[i].port.line = i;
@ -1123,10 +1175,9 @@ static int sc16is7xx_probe(struct device *dev,
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_EFCR_REG,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
/* Initialize queue for start TX */
INIT_WORK(&s->p[i].tx_work, sc16is7xx_wq_proc);
/* Initialize queue for changing mode */
INIT_WORK(&s->p[i].md_work, sc16is7xx_md_proc);
/* Initialize kthread work structs */
init_kthread_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
init_kthread_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
/* Register port */
uart_add_one_port(&s->uart, &s->p[i].port);
/* Go to suspend mode */
@ -1134,7 +1185,7 @@ static int sc16is7xx_probe(struct device *dev,
}
/* Setup interrupt */
ret = devm_request_threaded_irq(dev, irq, NULL, sc16is7xx_ist,
ret = devm_request_irq(dev, irq, sc16is7xx_irq,
IRQF_ONESHOT | flags, dev_name(dev), s);
if (!ret)
return 0;
@ -1142,14 +1193,15 @@ static int sc16is7xx_probe(struct device *dev,
for (i = 0; i < s->uart.nr; i++)
uart_remove_one_port(&s->uart, &s->p[i].port);
mutex_destroy(&s->mutex);
#ifdef CONFIG_GPIOLIB
if (devtype->nr_gpio)
gpiochip_remove(&s->gpio);
out_uart:
out_thread:
#endif
kthread_stop(s->kworker_task);
out_uart:
uart_unregister_driver(&s->uart);
out_clk:
@ -1170,13 +1222,13 @@ static int sc16is7xx_remove(struct device *dev)
#endif
for (i = 0; i < s->uart.nr; i++) {
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
uart_remove_one_port(&s->uart, &s->p[i].port);
sc16is7xx_power(&s->p[i].port, 0);
}
mutex_destroy(&s->mutex);
flush_kthread_worker(&s->kworker);
kthread_stop(s->kworker_task);
uart_unregister_driver(&s->uart);
if (!IS_ERR(s->clk))
clk_disable_unprepare(s->clk);
@ -1204,6 +1256,75 @@ static struct regmap_config regcfg = {
.precious_reg = sc16is7xx_regmap_precious,
};
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
static int sc16is7xx_spi_probe(struct spi_device *spi)
{
struct sc16is7xx_devtype *devtype;
unsigned long flags = 0;
struct regmap *regmap;
int ret;
/* Setup SPI bus */
spi->bits_per_word = 8;
/* only supports mode 0 on SC16IS762 */
spi->mode = spi->mode ? : SPI_MODE_0;
spi->max_speed_hz = spi->max_speed_hz ? : 15000000;
ret = spi_setup(spi);
if (ret)
return ret;
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(sc16is7xx_dt_ids, &spi->dev);
devtype = (struct sc16is7xx_devtype *)of_id->data;
} else {
const struct spi_device_id *id_entry = spi_get_device_id(spi);
devtype = (struct sc16is7xx_devtype *)id_entry->driver_data;
flags = IRQF_TRIGGER_FALLING;
}
regcfg.max_register = (0xf << SC16IS7XX_REG_SHIFT) |
(devtype->nr_uart - 1);
regmap = devm_regmap_init_spi(spi, &regcfg);
return sc16is7xx_probe(&spi->dev, devtype, regmap, spi->irq, flags);
}
static int sc16is7xx_spi_remove(struct spi_device *spi)
{
return sc16is7xx_remove(&spi->dev);
}
static const struct spi_device_id sc16is7xx_spi_id_table[] = {
{ "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is750", (kernel_ulong_t)&sc16is750_devtype, },
{ "sc16is752", (kernel_ulong_t)&sc16is752_devtype, },
{ "sc16is760", (kernel_ulong_t)&sc16is760_devtype, },
{ "sc16is762", (kernel_ulong_t)&sc16is762_devtype, },
{ }
};
MODULE_DEVICE_TABLE(spi, sc16is7xx_spi_id_table);
static struct spi_driver sc16is7xx_spi_uart_driver = {
.driver = {
.name = SC16IS7XX_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(sc16is7xx_dt_ids),
},
.probe = sc16is7xx_spi_probe,
.remove = sc16is7xx_spi_remove,
.id_table = sc16is7xx_spi_id_table,
};
MODULE_ALIAS("spi:sc16is7xx");
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
static int sc16is7xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@ -1235,6 +1356,8 @@ static int sc16is7xx_i2c_remove(struct i2c_client *client)
static const struct i2c_device_id sc16is7xx_i2c_id_table[] = {
{ "sc16is74x", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is740", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is741", (kernel_ulong_t)&sc16is74x_devtype, },
{ "sc16is750", (kernel_ulong_t)&sc16is750_devtype, },
{ "sc16is752", (kernel_ulong_t)&sc16is752_devtype, },
{ "sc16is760", (kernel_ulong_t)&sc16is760_devtype, },
@ -1253,8 +1376,43 @@ static struct i2c_driver sc16is7xx_i2c_uart_driver = {
.remove = sc16is7xx_i2c_remove,
.id_table = sc16is7xx_i2c_id_table,
};
module_i2c_driver(sc16is7xx_i2c_uart_driver);
MODULE_ALIAS("i2c:sc16is7xx");
#endif
static int __init sc16is7xx_init(void)
{
int ret = 0;
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
return ret;
}
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
if (ret < 0) {
pr_err("failed to init sc16is7xx spi --> %d\n", ret);
return ret;
}
#endif
return ret;
}
module_init(sc16is7xx_init);
static void __exit sc16is7xx_exit(void)
{
#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
i2c_del_driver(&sc16is7xx_i2c_uart_driver);
#endif
#ifdef CONFIG_SERIAL_SC16IS7XX_SPI
spi_unregister_driver(&sc16is7xx_spi_uart_driver);
#endif
}
module_exit(sc16is7xx_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");

View File

@ -131,8 +131,8 @@ struct tegra_uart_port {
struct dma_async_tx_descriptor *rx_dma_desc;
dma_cookie_t tx_cookie;
dma_cookie_t rx_cookie;
int tx_bytes_requested;
int rx_bytes_requested;
unsigned int tx_bytes_requested;
unsigned int rx_bytes_requested;
};
static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
@ -234,6 +234,22 @@ static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
tup->lcr_shadow = lcr;
}
/**
* tegra_uart_wait_cycle_time: Wait for N UART clock periods
*
* @tup: Tegra serial port data structure.
* @cycles: Number of clock periods to wait.
*
* Tegra UARTs are clocked at 16X the baud/bit rate and hence the UART
* clock speed is 16X the current baud rate.
*/
static void tegra_uart_wait_cycle_time(struct tegra_uart_port *tup,
unsigned int cycles)
{
if (tup->current_baud)
udelay(DIV_ROUND_UP(cycles * 1000000, tup->current_baud * 16));
}
/* Wait for a symbol-time. */
static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
unsigned int syms)
@ -263,8 +279,12 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
/* Wait for the flush to propagate. */
tegra_uart_wait_sym_time(tup, 1);
/*
* For all tegra devices (up to t210), there is a hardware issue that
* requires software to wait for 32 UART clock periods for the flush
* to propagate, otherwise data could be lost.
*/
tegra_uart_wait_cycle_time(tup, 32);
}
static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
@ -388,9 +408,9 @@ static void tegra_uart_tx_dma_complete(void *args)
struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
unsigned long flags;
int count;
unsigned int count;
dmaengine_tx_status(tup->tx_dma_chan, tup->rx_cookie, &state);
dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
count = tup->tx_bytes_requested - state.residue;
async_tx_ack(tup->tx_dma_desc);
spin_lock_irqsave(&tup->uport.lock, flags);
@ -480,7 +500,7 @@ static void tegra_uart_stop_tx(struct uart_port *u)
struct tegra_uart_port *tup = to_tegra_uport(u);
struct circ_buf *xmit = &tup->uport.state->xmit;
struct dma_tx_state state;
int count;
unsigned int count;
if (tup->tx_in_progress != TEGRA_UART_TX_DMA)
return;
@ -530,10 +550,15 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
}
static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
struct tty_port *tty, int count)
struct tty_port *tty,
unsigned int count)
{
int copied;
/* If count is zero, then there is no data to be copied */
if (!count)
return;
tup->uport.icount.rx += count;
if (!tty) {
dev_err(tup->uport.dev, "No tty port\n");
@ -555,20 +580,29 @@ static void tegra_uart_rx_dma_complete(void *args)
{
struct tegra_uart_port *tup = args;
struct uart_port *u = &tup->uport;
int count = tup->rx_bytes_requested;
unsigned int count = tup->rx_bytes_requested;
struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
struct tty_port *port = &u->state->port;
unsigned long flags;
struct dma_tx_state state;
enum dma_status status;
spin_lock_irqsave(&u->lock, flags);
status = dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
if (status == DMA_IN_PROGRESS) {
dev_dbg(tup->uport.dev, "RX DMA is in progress\n");
goto done;
}
async_tx_ack(tup->rx_dma_desc);
spin_lock_irqsave(&u->lock, flags);
/* Deactivate flow control to stop sender */
if (tup->rts_active)
set_rts(tup, false);
/* If we are here, DMA is stopped */
if (count)
tegra_uart_copy_rx_to_tty(tup, port, count);
tegra_uart_handle_rx_pio(tup, port);
@ -584,6 +618,7 @@ static void tegra_uart_rx_dma_complete(void *args)
if (tup->rts_active)
set_rts(tup, true);
done:
spin_unlock_irqrestore(&u->lock, flags);
}
@ -594,7 +629,7 @@ static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
struct tty_port *port = &tup->uport.state->port;
struct uart_port *u = &tup->uport;
int count;
unsigned int count;
/* Deactivate flow control to stop sender */
if (tup->rts_active)
@ -606,7 +641,6 @@ static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
count = tup->rx_bytes_requested - state.residue;
/* If we are here, DMA is stopped */
if (count)
tegra_uart_copy_rx_to_tty(tup, port, count);
tegra_uart_handle_rx_pio(tup, port);
@ -865,6 +899,16 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
/* Dummy read to ensure the write is posted */
tegra_uart_read(tup, UART_SCR);
/*
* For all tegra devices (up to t210), there is a hardware issue that
* requires software to wait for 3 UART clock periods after enabling
* the TX fifo, otherwise data could be lost.
*/
tegra_uart_wait_cycle_time(tup, 3);
/*
* Initialize the UART with default configuration
* (115200, N, 8, 1) so that the receive DMA buffer may be
@ -905,6 +949,28 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
return 0;
}
static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
bool dma_to_memory)
{
if (dma_to_memory) {
dmaengine_terminate_all(tup->rx_dma_chan);
dma_release_channel(tup->rx_dma_chan);
dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
tup->rx_dma_chan = NULL;
tup->rx_dma_buf_phys = 0;
tup->rx_dma_buf_virt = NULL;
} else {
dmaengine_terminate_all(tup->tx_dma_chan);
dma_release_channel(tup->tx_dma_chan);
dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
UART_XMIT_SIZE, DMA_TO_DEVICE);
tup->tx_dma_chan = NULL;
tup->tx_dma_buf_phys = 0;
tup->tx_dma_buf_virt = NULL;
}
}
static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
bool dma_to_memory)
{
@ -933,67 +999,39 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
dma_release_channel(dma_chan);
return -ENOMEM;
}
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = 4;
tup->rx_dma_chan = dma_chan;
tup->rx_dma_buf_virt = dma_buf;
tup->rx_dma_buf_phys = dma_phys;
} else {
dma_phys = dma_map_single(tup->uport.dev,
tup->uport.state->xmit.buf, UART_XMIT_SIZE,
DMA_TO_DEVICE);
dma_buf = tup->uport.state->xmit.buf;
if (dma_mapping_error(tup->uport.dev, dma_phys)) {
dev_err(tup->uport.dev, "dma_map_single tx failed\n");
dma_release_channel(dma_chan);
return -ENOMEM;
}
if (dma_to_memory) {
dma_sconfig.src_addr = tup->uport.mapbase;
dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.src_maxburst = 4;
} else {
dma_buf = tup->uport.state->xmit.buf;
dma_sconfig.dst_addr = tup->uport.mapbase;
dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma_sconfig.dst_maxburst = 16;
tup->tx_dma_chan = dma_chan;
tup->tx_dma_buf_virt = dma_buf;
tup->tx_dma_buf_phys = dma_phys;
}
ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
if (ret < 0) {
dev_err(tup->uport.dev,
"Dma slave config failed, err = %d\n", ret);
goto scrub;
}
if (dma_to_memory) {
tup->rx_dma_chan = dma_chan;
tup->rx_dma_buf_virt = dma_buf;
tup->rx_dma_buf_phys = dma_phys;
} else {
tup->tx_dma_chan = dma_chan;
tup->tx_dma_buf_virt = dma_buf;
tup->tx_dma_buf_phys = dma_phys;
}
return 0;
scrub:
dma_release_channel(dma_chan);
tegra_uart_dma_channel_free(tup, dma_to_memory);
return ret;
}
static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
bool dma_to_memory)
{
struct dma_chan *dma_chan;
if (dma_to_memory) {
dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
dma_chan = tup->rx_dma_chan;
tup->rx_dma_chan = NULL;
tup->rx_dma_buf_phys = 0;
tup->rx_dma_buf_virt = NULL;
} else {
dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_chan = tup->tx_dma_chan;
tup->tx_dma_chan = NULL;
tup->tx_dma_buf_phys = 0;
tup->tx_dma_buf_virt = NULL;
}
dma_release_channel(dma_chan);
return 0;
}
static int tegra_uart_startup(struct uart_port *u)
@ -1060,8 +1098,6 @@ static void tegra_uart_shutdown(struct uart_port *u)
tegra_uart_dma_channel_free(tup, true);
tegra_uart_dma_channel_free(tup, false);
free_irq(u->irq, tup);
tegra_uart_flush_buffer(u);
}
static void tegra_uart_enable_ms(struct uart_port *u)

View File

@ -894,12 +894,10 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
* need to rate-limit; it's CAP_SYS_ADMIN only.
*/
if (uport->flags & UPF_SPD_MASK) {
char buf[64];
dev_notice(uport->dev,
"%s sets custom speed on %s. This is deprecated.\n",
current->comm,
tty_name(port->tty, buf));
tty_name(port->tty));
}
uart_change_speed(tty, state, NULL);
}
@ -1816,8 +1814,8 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
* @options: ptr for <options> field; NULL if not present (out)
*
* Decodes earlycon kernel command line parameters of the form
* earlycon=<name>,io|mmio|mmio32,<addr>,<options>
* console=<name>,io|mmio|mmio32,<addr>,<options>
* earlycon=<name>,io|mmio|mmio32|mmio32be,<addr>,<options>
* console=<name>,io|mmio|mmio32|mmio32be,<addr>,<options>
*
* The optional form
* earlycon=<name>,0x<addr>,<options>
@ -1835,6 +1833,9 @@ int uart_parse_earlycon(char *p, unsigned char *iotype, unsigned long *addr,
} else if (strncmp(p, "mmio32,", 7) == 0) {
*iotype = UPIO_MEM32;
p += 7;
} else if (strncmp(p, "mmio32be,", 9) == 0) {
*iotype = UPIO_MEM32BE;
p += 9;
} else if (strncmp(p, "io,", 3) == 0) {
*iotype = UPIO_PORT;
p += 3;

View File

@ -49,8 +49,7 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
unsigned int count = 0;
for (i = 0; i < UART_GPIO_MAX; i++)
if (!IS_ERR_OR_NULL(gpios->gpio[i]) &&
mctrl_gpios_desc[i].dir_out) {
if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
desc_array[count] = gpios->gpio[i];
value_array[count] = !!(mctrl & mctrl_gpios_desc[i].mctrl);
count++;
@ -118,7 +117,7 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
enum mctrl_gpio_idx i;
for (i = 0; i < UART_GPIO_MAX; i++)
if (!IS_ERR_OR_NULL(gpios->gpio[i]))
if (gpios->gpio[i])
devm_gpiod_put(dev, gpios->gpio[i]);
devm_kfree(dev, gpios);
}

View File

@ -81,7 +81,8 @@ struct sci_port {
/* Platform configuration */
struct plat_sci_port *cfg;
int overrun_bit;
unsigned int overrun_reg;
unsigned int overrun_mask;
unsigned int error_mask;
unsigned int sampling_rate;
@ -168,6 +169,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -188,6 +191,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -207,6 +212,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = { 0x30, 16 },
[SCPDR] = { 0x34, 16 },
},
/*
@ -226,6 +233,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = { 0x30, 16 },
[SCPDR] = { 0x34, 16 },
},
/*
@ -246,6 +255,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = { 0x20, 16 },
[SCLSR] = { 0x24, 16 },
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -265,6 +276,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -284,6 +297,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = { 0x20, 16 },
[SCLSR] = { 0x24, 16 },
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -303,6 +318,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = { 0x20, 16 },
[SCLSR] = { 0x24, 16 },
[HSSRR] = { 0x40, 16 },
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -323,6 +340,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = { 0x24, 16 },
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -343,6 +362,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = { 0x24, 16 },
[SCLSR] = { 0x28, 16 },
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
/*
@ -363,6 +384,8 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
[SCSPTR] = sci_reg_invalid,
[SCLSR] = sci_reg_invalid,
[HSSRR] = sci_reg_invalid,
[SCPCR] = sci_reg_invalid,
[SCPDR] = sci_reg_invalid,
},
};
@ -781,7 +804,7 @@ static int sci_handle_errors(struct uart_port *port)
struct sci_port *s = to_sci_port(port);
/* Handle overruns */
if (status & (1 << s->overrun_bit)) {
if (status & s->overrun_mask) {
port->icount.overrun++;
/* overrun error */
@ -844,32 +867,17 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
struct plat_sci_reg *reg;
int copied = 0, offset;
u16 status, bit;
int copied = 0;
u16 status;
switch (port->type) {
case PORT_SCIF:
case PORT_HSCIF:
offset = SCLSR;
break;
case PORT_SCIFA:
case PORT_SCIFB:
offset = SCxSR;
break;
default:
return 0;
}
reg = sci_getreg(port, offset);
reg = sci_getreg(port, s->overrun_reg);
if (!reg->size)
return 0;
status = serial_port_in(port, offset);
bit = 1 << s->overrun_bit;
if (status & bit) {
status &= ~bit;
serial_port_out(port, offset, status);
status = serial_port_in(port, s->overrun_reg);
if (status & s->overrun_mask) {
status &= ~s->overrun_mask;
serial_port_out(port, s->overrun_reg, status);
port->icount.overrun++;
@ -1021,15 +1029,11 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
ssr_status = serial_port_in(port, SCxSR);
scr_status = serial_port_in(port, SCSCR);
switch (port->type) {
case PORT_SCIF:
case PORT_HSCIF:
orer_status = serial_port_in(port, SCLSR);
break;
case PORT_SCIFA:
case PORT_SCIFB:
if (s->overrun_reg == SCxSR)
orer_status = ssr_status;
break;
else {
if (sci_getreg(port, s->overrun_reg)->size)
orer_status = serial_port_in(port, s->overrun_reg);
}
err_enabled = scr_status & port_rx_irq_mask(port);
@ -1059,7 +1063,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
ret = sci_br_interrupt(irq, ptr);
/* Overrun Interrupt */
if (orer_status & (1 << s->overrun_bit))
if (orer_status & s->overrun_mask)
sci_handle_fifo_overrun(port);
return ret;
@ -2234,32 +2238,38 @@ static int sci_init_single(struct platform_device *dev,
switch (p->type) {
case PORT_SCIFB:
port->fifosize = 256;
sci_port->overrun_bit = 9;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
sampling_rate = 16;
break;
case PORT_HSCIF:
port->fifosize = 128;
sampling_rate = 0;
sci_port->overrun_bit = 0;
sci_port->overrun_reg = SCLSR;
sci_port->overrun_mask = SCLSR_ORER;
break;
case PORT_SCIFA:
port->fifosize = 64;
sci_port->overrun_bit = 9;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
sampling_rate = 16;
break;
case PORT_SCIF:
port->fifosize = 16;
if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
sci_port->overrun_bit = 9;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCIFA_ORER;
sampling_rate = 16;
} else {
sci_port->overrun_bit = 0;
sci_port->overrun_reg = SCLSR;
sci_port->overrun_mask = SCLSR_ORER;
sampling_rate = 32;
}
break;
default:
port->fifosize = 1;
sci_port->overrun_bit = 5;
sci_port->overrun_reg = SCxSR;
sci_port->overrun_mask = SCI_ORER;
sampling_rate = 32;
break;
}
@ -2304,16 +2314,12 @@ static int sci_init_single(struct platform_device *dev,
sci_port->error_mask = (p->type == PORT_SCI) ?
SCI_DEFAULT_ERROR_MASK : SCIF_DEFAULT_ERROR_MASK;
/*
* Establish sensible defaults for the overrun detection, unless
* the part has explicitly disabled support for it.
*/
/*
* Make the error mask inclusive of overrun detection, if
* supported.
*/
sci_port->error_mask |= 1 << sci_port->overrun_bit;
if (sci_port->overrun_reg == SCxSR)
sci_port->error_mask |= sci_port->overrun_mask;
port->type = p->type;
port->flags = UPF_FIXED_PORT | p->flags;

View File

@ -1,7 +1,115 @@
#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/gpio.h>
#define SCI_MAJOR 204
#define SCI_MINOR_START 8
/*
* SCI register subset common for all port types.
* Not all registers will exist on all parts.
*/
enum {
SCSMR, /* Serial Mode Register */
SCBRR, /* Bit Rate Register */
SCSCR, /* Serial Control Register */
SCxSR, /* Serial Status Register */
SCFCR, /* FIFO Control Register */
SCFDR, /* FIFO Data Count Register */
SCxTDR, /* Transmit (FIFO) Data Register */
SCxRDR, /* Receive (FIFO) Data Register */
SCLSR, /* Line Status Register */
SCTFDR, /* Transmit FIFO Data Count Register */
SCRFDR, /* Receive FIFO Data Count Register */
SCSPTR, /* Serial Port Register */
HSSRR, /* Sampling Rate Register */
SCPCR, /* Serial Port Control Register */
SCPDR, /* Serial Port Data Register */
SCIx_NR_REGS,
};
/* SCSMR (Serial Mode Register) */
#define SCSMR_CHR BIT(6) /* 7-bit Character Length */
#define SCSMR_PE BIT(5) /* Parity Enable */
#define SCSMR_ODD BIT(4) /* Odd Parity */
#define SCSMR_STOP BIT(3) /* Stop Bit Length */
#define SCSMR_CKS 0x0003 /* Clock Select */
/* Serial Control Register, SCIFA/SCIFB only bits */
#define SCSCR_TDRQE BIT(15) /* Tx Data Transfer Request Enable */
#define SCSCR_RDRQE BIT(14) /* Rx Data Transfer Request Enable */
/* SCxSR (Serial Status Register) on SCI */
#define SCI_TDRE BIT(7) /* Transmit Data Register Empty */
#define SCI_RDRF BIT(6) /* Receive Data Register Full */
#define SCI_ORER BIT(5) /* Overrun Error */
#define SCI_FER BIT(4) /* Framing Error */
#define SCI_PER BIT(3) /* Parity Error */
#define SCI_TEND BIT(2) /* Transmit End */
#define SCI_RESERVED 0x03 /* All reserved bits */
#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
#define SCI_RDxF_CLEAR ~(SCI_RESERVED | SCI_RDRF)
#define SCI_ERROR_CLEAR ~(SCI_RESERVED | SCI_PER | SCI_FER | SCI_ORER)
#define SCI_TDxE_CLEAR ~(SCI_RESERVED | SCI_TEND | SCI_TDRE)
#define SCI_BREAK_CLEAR ~(SCI_RESERVED | SCI_PER | SCI_FER | SCI_ORER)
/* SCxSR (Serial Status Register) on SCIF, SCIFA, SCIFB, HSCIF */
#define SCIF_ER BIT(7) /* Receive Error */
#define SCIF_TEND BIT(6) /* Transmission End */
#define SCIF_TDFE BIT(5) /* Transmit FIFO Data Empty */
#define SCIF_BRK BIT(4) /* Break Detect */
#define SCIF_FER BIT(3) /* Framing Error */
#define SCIF_PER BIT(2) /* Parity Error */
#define SCIF_RDF BIT(1) /* Receive FIFO Data Full */
#define SCIF_DR BIT(0) /* Receive Data Ready */
/* SCIF only (optional) */
#define SCIF_PERC 0xf000 /* Number of Parity Errors */
#define SCIF_FERC 0x0f00 /* Number of Framing Errors */
/*SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 only */
#define SCIFA_ORER BIT(9) /* Overrun Error */
#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_BRK | SCIF_ER)
#define SCIF_RDxF_CLEAR ~(SCIF_DR | SCIF_RDF)
#define SCIF_ERROR_CLEAR ~(SCIFA_ORER | SCIF_PER | SCIF_FER | SCIF_ER)
#define SCIF_TDxE_CLEAR ~(SCIF_TDFE)
#define SCIF_BREAK_CLEAR ~(SCIF_PER | SCIF_FER | SCIF_BRK)
/* SCFCR (FIFO Control Register) */
#define SCFCR_MCE BIT(3) /* Modem Control Enable */
#define SCFCR_TFRST BIT(2) /* Transmit FIFO Data Register Reset */
#define SCFCR_RFRST BIT(1) /* Receive FIFO Data Register Reset */
#define SCFCR_LOOP BIT(0) /* Loopback Test */
/* SCLSR (Line Status Register) on (H)SCIF */
#define SCLSR_ORER BIT(0) /* Overrun Error */
/* SCSPTR (Serial Port Register), optional */
#define SCSPTR_RTSIO BIT(7) /* Serial Port RTS Pin Input/Output */
#define SCSPTR_RTSDT BIT(6) /* Serial Port RTS Pin Data */
#define SCSPTR_CTSIO BIT(5) /* Serial Port CTS Pin Input/Output */
#define SCSPTR_CTSDT BIT(4) /* Serial Port CTS Pin Data */
#define SCSPTR_SPB2IO BIT(1) /* Serial Port Break Input/Output */
#define SCSPTR_SPB2DT BIT(0) /* Serial Port Break Data */
/* HSSRR HSCIF */
#define HSCIF_SRE BIT(15) /* Sampling Rate Register Enable */
/* SCPCR (Serial Port Control Register), SCIFA/SCIFB only */
#define SCPCR_RTSC BIT(4) /* Serial Port RTS Pin / Output Pin */
#define SCPCR_CTSC BIT(3) /* Serial Port CTS Pin / Input Pin */
/* SCPDR (Serial Port Data Register), SCIFA/SCIFB only */
#define SCPDR_RTSD BIT(4) /* Serial Port RTS Output Pin Data */
#define SCPDR_CTSD BIT(3) /* Serial Port CTS Input Pin Data */
#define SCxSR_TEND(port) (((port)->type == PORT_SCI) ? SCI_TEND : SCIF_TEND)
#define SCxSR_RDxF(port) (((port)->type == PORT_SCI) ? SCI_RDRF : SCIF_RDF)
#define SCxSR_TDxE(port) (((port)->type == PORT_SCI) ? SCI_TDRE : SCIF_TDFE)
@ -15,24 +123,24 @@
defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7372) || \
defined(CONFIG_ARCH_R8A7740)
# define SCxSR_RDxF_CLEAR(port) (serial_port_in(port, SCxSR) & 0xfffc)
# define SCxSR_ERROR_CLEAR(port) (serial_port_in(port, SCxSR) & 0xfd73)
# define SCxSR_TDxE_CLEAR(port) (serial_port_in(port, SCxSR) & 0xffdf)
# define SCxSR_BREAK_CLEAR(port) (serial_port_in(port, SCxSR) & 0xffe3)
# define SCxSR_RDxF_CLEAR(port) \
(serial_port_in(port, SCxSR) & SCIF_RDxF_CLEAR)
# define SCxSR_ERROR_CLEAR(port) \
(serial_port_in(port, SCxSR) & SCIF_ERROR_CLEAR)
# define SCxSR_TDxE_CLEAR(port) \
(serial_port_in(port, SCxSR) & SCIF_TDxE_CLEAR)
# define SCxSR_BREAK_CLEAR(port) \
(serial_port_in(port, SCxSR) & SCIF_BREAK_CLEAR)
#else
# define SCxSR_RDxF_CLEAR(port) (((port)->type == PORT_SCI) ? 0xbc : 0x00fc)
# define SCxSR_ERROR_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x0073)
# define SCxSR_TDxE_CLEAR(port) (((port)->type == PORT_SCI) ? 0x78 : 0x00df)
# define SCxSR_BREAK_CLEAR(port) (((port)->type == PORT_SCI) ? 0xc4 : 0x00e3)
# define SCxSR_RDxF_CLEAR(port) \
((((port)->type == PORT_SCI) ? SCI_RDxF_CLEAR : SCIF_RDxF_CLEAR) & 0xff)
# define SCxSR_ERROR_CLEAR(port) \
((((port)->type == PORT_SCI) ? SCI_ERROR_CLEAR : SCIF_ERROR_CLEAR) & 0xff)
# define SCxSR_TDxE_CLEAR(port) \
((((port)->type == PORT_SCI) ? SCI_TDxE_CLEAR : SCIF_TDxE_CLEAR) & 0xff)
# define SCxSR_BREAK_CLEAR(port) \
((((port)->type == PORT_SCI) ? SCI_BREAK_CLEAR : SCIF_BREAK_CLEAR) & 0xff)
#endif
/* SCFCR */
#define SCFCR_RFRST 0x0002
#define SCFCR_TFRST 0x0004
#define SCFCR_MCE 0x0008
#define SCI_MAJOR 204
#define SCI_MINOR_START 8

View File

@ -36,8 +36,6 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
static struct uart_driver sirfsoc_uart_drv;
static void sirfsoc_uart_tx_dma_complete_callback(void *param);
static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
static void sirfsoc_uart_rx_dma_complete_callback(void *param);
static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
{4000000, 2359296},
{3500000, 1310721},
@ -59,50 +57,7 @@ static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
{9600, 1114979},
};
static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
[0] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 0,
},
},
[1] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 1,
},
},
[2] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 2,
},
},
[3] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 3,
},
},
[4] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 4,
},
},
[5] = {
.port = {
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF,
.line = 5,
},
},
};
static struct sirfsoc_uart_port *sirf_ports[SIRFSOC_UART_NR];
static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
{
@ -116,8 +71,7 @@ static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
return (reg & ufifo_st->ff_empty(port)) ? TIOCSER_TEMT : 0;
}
static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
@ -152,6 +106,26 @@ static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
unsigned int current_val;
if (mctrl & TIOCM_LOOP) {
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
wr_regl(port, ureg->sirfsoc_line_ctrl,
rd_regl(port, ureg->sirfsoc_line_ctrl) |
SIRFUART_LOOP_BACK);
else
wr_regl(port, ureg->sirfsoc_mode1,
rd_regl(port, ureg->sirfsoc_mode1) |
SIRFSOC_USP_LOOP_BACK_CTRL);
} else {
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART)
wr_regl(port, ureg->sirfsoc_line_ctrl,
rd_regl(port, ureg->sirfsoc_line_ctrl) &
~SIRFUART_LOOP_BACK);
else
wr_regl(port, ureg->sirfsoc_mode1,
rd_regl(port, ureg->sirfsoc_mode1) &
~SIRFSOC_USP_LOOP_BACK_CTRL);
}
if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
return;
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
@ -182,16 +156,19 @@ static void sirfsoc_uart_stop_tx(struct uart_port *port)
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~uint_en->sirfsoc_txfifo_empty_en);
else
wr_regl(port, SIRFUART_INT_EN_CLR,
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_txfifo_empty_en);
}
} else {
if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
ureg->sirfsoc_tx_rx_en) & ~SIRFUART_TX_EN);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~uint_en->sirfsoc_txfifo_empty_en);
else
wr_regl(port, SIRFUART_INT_EN_CLR,
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_txfifo_empty_en);
}
}
@ -222,7 +199,7 @@ static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~(uint_en->sirfsoc_txfifo_empty_en));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_txfifo_empty_en);
/*
* DMA requires buffer address and buffer length are both aligned with
@ -290,8 +267,11 @@ static void sirfsoc_uart_start_tx(struct uart_port *port)
if (sirfport->tx_dma_chan)
sirfsoc_uart_tx_with_dma(sirfport);
else {
sirfsoc_uart_pio_tx_chars(sirfport,
SIRFSOC_UART_IO_TX_REASONABLE_CNT);
if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
wr_regl(port, ureg->sirfsoc_tx_rx_en, rd_regl(port,
ureg->sirfsoc_tx_rx_en) | SIRFUART_TX_EN);
wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
sirfsoc_uart_pio_tx_chars(sirfport, port->fifosize);
wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
@ -314,21 +294,25 @@ static void sirfsoc_uart_stop_rx(struct uart_port *port)
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
~(SIRFUART_RX_DMA_INT_EN(uint_en,
sirfport->uart_reg->uart_type) |
uint_en->sirfsoc_rx_done_en));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
SIRFUART_RX_DMA_INT_EN(port, uint_en)|
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
SIRFUART_RX_DMA_INT_EN(uint_en,
sirfport->uart_reg->uart_type)|
uint_en->sirfsoc_rx_done_en);
dmaengine_terminate_all(sirfport->rx_dma_chan);
} else {
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
~(SIRFUART_RX_IO_INT_EN(uint_en,
sirfport->uart_reg->uart_type)));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
SIRFUART_RX_IO_INT_EN(port, uint_en));
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
SIRFUART_RX_IO_INT_EN(uint_en,
sirfport->uart_reg->uart_type));
}
}
@ -349,7 +333,7 @@ static void sirfsoc_uart_disable_ms(struct uart_port *port)
rd_regl(port, ureg->sirfsoc_int_en_reg)&
~uint_en->sirfsoc_cts_en);
else
wr_regl(port, SIRFUART_INT_EN_CLR,
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_cts_en);
} else
disable_irq(gpio_to_irq(sirfport->cts_gpio));
@ -379,7 +363,8 @@ static void sirfsoc_uart_enable_ms(struct uart_port *port)
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
wr_regl(port, ureg->sirfsoc_afc_ctrl,
rd_regl(port, ureg->sirfsoc_afc_ctrl) |
SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN |
SIRFUART_AFC_CTRL_RX_THD);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
@ -417,7 +402,7 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
if (!tty)
return -ENODEV;
while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
ufifo_st->ff_empty(port->line))) {
ufifo_st->ff_empty(port))) {
ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
SIRFUART_DUMMY_READ;
if (unlikely(uart_handle_sysrq_char(port, ch)))
@ -444,7 +429,7 @@ sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
unsigned int num_tx = 0;
while (!uart_circ_empty(xmit) &&
!(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
ufifo_st->ff_full(port->line)) &&
ufifo_st->ff_full(port)) &&
count--) {
wr_regl(port, ureg->sirfsoc_tx_fifo_data,
xmit->buf[xmit->tail]);
@ -478,139 +463,6 @@ static void sirfsoc_uart_tx_dma_complete_callback(void *param)
spin_unlock_irqrestore(&port->lock, flags);
}
static void sirfsoc_uart_insert_rx_buf_to_tty(
struct sirfsoc_uart_port *sirfport, int count)
{
struct uart_port *port = &sirfport->port;
struct tty_port *tport = &port->state->port;
int inserted;
inserted = tty_insert_flip_string(tport,
sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
port->icount.rx += inserted;
}
static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
sirfport->rx_dma_items[index].xmit.tail =
sirfport->rx_dma_items[index].xmit.head = 0;
sirfport->rx_dma_items[index].desc =
dmaengine_prep_slave_single(sirfport->rx_dma_chan,
sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!sirfport->rx_dma_items[index].desc) {
dev_err(port->dev, "DMA slave single fail\n");
return;
}
sirfport->rx_dma_items[index].desc->callback =
sirfsoc_uart_rx_dma_complete_callback;
sirfport->rx_dma_items[index].desc->callback_param = sirfport;
sirfport->rx_dma_items[index].cookie =
dmaengine_submit(sirfport->rx_dma_items[index].desc);
dma_async_issue_pending(sirfport->rx_dma_chan);
}
static void sirfsoc_rx_tmo_process_tl(unsigned long param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
struct uart_port *port = &sirfport->port;
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
unsigned int count;
unsigned long flags;
struct dma_tx_state tx_state;
spin_lock_irqsave(&port->lock, flags);
while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
SIRFSOC_RX_DMA_BUF_SIZE);
if (count > 0)
sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
if (sirfport->rx_io_count == 4) {
sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_done_en));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
uint_en->sirfsoc_rx_done_en);
sirfsoc_uart_start_next_rx_dma(port);
} else {
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_done);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
(uint_en->sirfsoc_rx_done_en));
else
wr_regl(port, ureg->sirfsoc_int_en_reg,
uint_en->sirfsoc_rx_done_en);
}
spin_unlock_irqrestore(&port->lock, flags);
tty_flip_buffer_push(&port->state->port);
}
static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
{
struct uart_port *port = &sirfport->port;
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
struct dma_tx_state tx_state;
dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
dmaengine_terminate_all(sirfport->rx_dma_chan);
sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_timeout_en));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
uint_en->sirfsoc_rx_timeout_en);
tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
}
static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
{
struct uart_port *port = &sirfport->port;
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
if (sirfport->rx_io_count == 4) {
sirfport->rx_io_count = 0;
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) &
~(uint_en->sirfsoc_rx_done_en));
else
wr_regl(port, SIRFUART_INT_EN_CLR,
uint_en->sirfsoc_rx_done_en);
wr_regl(port, ureg->sirfsoc_int_st_reg,
uint_st->sirfsoc_rx_timeout);
sirfsoc_uart_start_next_rx_dma(port);
}
}
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
{
unsigned long intr_status;
@ -628,20 +480,25 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(uint_st,
sirfport->uart_reg->uart_type)))) {
if (intr_status & uint_st->sirfsoc_rxd_brk) {
port->icount.brk++;
if (uart_handle_break(port))
goto recv_char;
}
if (intr_status & uint_st->sirfsoc_rx_oflow)
if (intr_status & uint_st->sirfsoc_rx_oflow) {
port->icount.overrun++;
flag = TTY_OVERRUN;
}
if (intr_status & uint_st->sirfsoc_frm_err) {
port->icount.frame++;
flag = TTY_FRAME;
}
if (intr_status & uint_st->sirfsoc_parity_err)
if (intr_status & uint_st->sirfsoc_parity_err) {
port->icount.parity++;
flag = TTY_PARITY;
}
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
@ -662,15 +519,51 @@ recv_char:
uart_handle_cts_change(port, cts_status);
wake_up_interruptible(&state->port.delta_msr_wait);
}
if (sirfport->rx_dma_chan) {
if (intr_status & uint_st->sirfsoc_rx_timeout)
sirfsoc_uart_handle_rx_tmo(sirfport);
if (intr_status & uint_st->sirfsoc_rx_done)
sirfsoc_uart_handle_rx_done(sirfport);
if (!sirfport->rx_dma_chan &&
(intr_status & SIRFUART_RX_IO_INT_ST(uint_st))) {
/*
* chip will trigger continuous RX_TIMEOUT interrupt
* in RXFIFO empty and not trigger if RXFIFO recevice
* data in limit time, original method use RX_TIMEOUT
* will trigger lots of useless interrupt in RXFIFO
* empty.RXFIFO received one byte will trigger RX_DONE
* interrupt.use RX_DONE to wait for data received
* into RXFIFO, use RX_THD/RX_FULL for lots data receive
* and use RX_TIMEOUT for the last left data.
*/
if (intr_status & uint_st->sirfsoc_rx_done) {
if (!sirfport->is_atlas7) {
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
& ~(uint_en->sirfsoc_rx_done_en));
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
| (uint_en->sirfsoc_rx_timeout_en));
} else {
if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
sirfsoc_uart_pio_rx_chars(port,
SIRFSOC_UART_IO_RX_MAX_CNT);
wr_regl(port, ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_rx_done_en);
wr_regl(port, ureg->sirfsoc_int_en_reg,
uint_en->sirfsoc_rx_timeout_en);
}
} else {
if (intr_status & uint_st->sirfsoc_rx_timeout) {
if (!sirfport->is_atlas7) {
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
& ~(uint_en->sirfsoc_rx_timeout_en));
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg)
| (uint_en->sirfsoc_rx_done_en));
} else {
wr_regl(port,
ureg->sirfsoc_int_en_clr_reg,
uint_en->sirfsoc_rx_timeout_en);
wr_regl(port, ureg->sirfsoc_int_en_reg,
uint_en->sirfsoc_rx_done_en);
}
}
sirfsoc_uart_pio_rx_chars(port, port->fifosize);
}
}
spin_unlock(&port->lock);
tty_flip_buffer_push(&state->port);
@ -684,10 +577,10 @@ recv_char:
return IRQ_HANDLED;
} else {
sirfsoc_uart_pio_tx_chars(sirfport,
SIRFSOC_UART_IO_TX_REASONABLE_CNT);
port->fifosize);
if ((uart_circ_empty(xmit)) &&
(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
ufifo_st->ff_empty(port->line)))
ufifo_st->ff_empty(port)))
sirfsoc_uart_stop_tx(port);
}
}
@ -697,41 +590,8 @@ recv_char:
return IRQ_HANDLED;
}
static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
struct uart_port *port = &sirfport->port;
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long flags;
struct dma_tx_state tx_state;
spin_lock_irqsave(&port->lock, flags);
while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
SIRFSOC_RX_DMA_BUF_SIZE);
if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
uint_en->sirfsoc_rx_timeout_en)
sirfsoc_rx_submit_one_dma_desc(port,
sirfport->rx_completed++);
else
sirfport->rx_completed++;
sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
}
spin_unlock_irqrestore(&port->lock, flags);
tty_flip_buffer_push(&port->state->port);
}
static void sirfsoc_uart_rx_dma_complete_callback(void *param)
{
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
unsigned long flags;
spin_lock_irqsave(&sirfport->port.lock, flags);
sirfport->rx_issued++;
sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
spin_unlock_irqrestore(&sirfport->port.lock, flags);
}
/* submit rx dma task into dmaengine */
@ -740,21 +600,36 @@ static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
int i;
sirfport->rx_io_count = 0;
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_IO_MODE);
for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
sirfsoc_rx_submit_one_dma_desc(port, i);
sirfport->rx_completed = sirfport->rx_issued = 0;
sirfport->rx_dma_items.xmit.tail =
sirfport->rx_dma_items.xmit.head = 0;
sirfport->rx_dma_items.desc =
dmaengine_prep_dma_cyclic(sirfport->rx_dma_chan,
sirfport->rx_dma_items.dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
SIRFSOC_RX_DMA_BUF_SIZE / 2,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (IS_ERR_OR_NULL(sirfport->rx_dma_items.desc)) {
dev_err(port->dev, "DMA slave single fail\n");
return;
}
sirfport->rx_dma_items.desc->callback =
sirfsoc_uart_rx_dma_complete_callback;
sirfport->rx_dma_items.desc->callback_param = sirfport;
sirfport->rx_dma_items.cookie =
dmaengine_submit(sirfport->rx_dma_items.desc);
dma_async_issue_pending(sirfport->rx_dma_chan);
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
SIRFUART_RX_DMA_INT_EN(port, uint_en));
SIRFUART_RX_DMA_INT_EN(uint_en,
sirfport->uart_reg->uart_type));
else
wr_regl(port, ureg->sirfsoc_int_en_reg,
SIRFUART_RX_DMA_INT_EN(port, uint_en));
SIRFUART_RX_DMA_INT_EN(uint_en,
sirfport->uart_reg->uart_type));
}
static void sirfsoc_uart_start_rx(struct uart_port *port)
@ -773,10 +648,12 @@ static void sirfsoc_uart_start_rx(struct uart_port *port)
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg,
rd_regl(port, ureg->sirfsoc_int_en_reg) |
SIRFUART_RX_IO_INT_EN(port, uint_en));
SIRFUART_RX_IO_INT_EN(uint_en,
sirfport->uart_reg->uart_type));
else
wr_regl(port, ureg->sirfsoc_int_en_reg,
SIRFUART_RX_IO_INT_EN(port, uint_en));
SIRFUART_RX_IO_INT_EN(uint_en,
sirfport->uart_reg->uart_type));
}
}
@ -789,7 +666,7 @@ sirfsoc_usp_calc_sample_div(unsigned long set_rate,
unsigned long ioclk_div = 0;
unsigned long temp_delta;
for (sample_div = SIRF_MIN_SAMPLE_DIV;
for (sample_div = SIRF_USP_MIN_SAMPLE_DIV;
sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
temp_delta = ioclk_rate -
(ioclk_rate + (set_rate * sample_div) / 2)
@ -910,9 +787,10 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
config_reg |= SIRFUART_STICK_BIT_MARK;
else
config_reg |= SIRFUART_STICK_BIT_SPACE;
} else if (termios->c_cflag & PARODD) {
config_reg |= SIRFUART_STICK_BIT_ODD;
} else {
if (termios->c_cflag & PARODD)
config_reg |= SIRFUART_STICK_BIT_ODD;
else
config_reg |= SIRFUART_STICK_BIT_EVEN;
}
}
@ -976,7 +854,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
wr_regl(port, ureg->sirfsoc_tx_fifo_op,
(txfifo_op_reg & ~SIRFUART_FIFO_START));
if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
config_reg |= SIRFUART_UART_RECV_TIMEOUT(rx_time_out);
wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
} else {
/*tx frame ctrl*/
@ -999,7 +877,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
/*async param*/
wr_regl(port, ureg->sirfsoc_async_param_reg,
(SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
(SIRFUART_USP_RECV_TIMEOUT(rx_time_out)) |
(sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
SIRFSOC_USP_ASYNC_DIV2_OFFSET);
}
@ -1011,6 +889,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
else
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
sirfport->rx_period_time = 20000000;
/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
if (set_baud < 1000000)
threshold_div = 1;
@ -1032,19 +911,10 @@ static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
if (!state) {
if (sirfport->is_bt_uart) {
clk_prepare_enable(sirfport->clk_noc);
clk_prepare_enable(sirfport->clk_general);
}
if (!state)
clk_prepare_enable(sirfport->clk);
} else {
else
clk_disable_unprepare(sirfport->clk);
if (sirfport->is_bt_uart) {
clk_disable_unprepare(sirfport->clk_general);
clk_disable_unprepare(sirfport->clk_noc);
}
}
}
static int sirfsoc_uart_startup(struct uart_port *port)
@ -1064,7 +934,6 @@ static int sirfsoc_uart_startup(struct uart_port *port)
index, port->irq);
goto irq_err;
}
/* initial hardware settings */
wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
@ -1072,6 +941,9 @@ static int sirfsoc_uart_startup(struct uart_port *port)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_RX_DMA_FLUSH);
wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
@ -1080,7 +952,6 @@ static int sirfsoc_uart_startup(struct uart_port *port)
SIRFSOC_USP_ENDIAN_CTRL_LSBF |
SIRFSOC_USP_EN);
wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
@ -1110,8 +981,16 @@ static int sirfsoc_uart_startup(struct uart_port *port)
goto init_rx_err;
}
}
enable_irq(port->irq);
if (sirfport->rx_dma_chan && !sirfport->is_hrt_enabled) {
sirfport->is_hrt_enabled = true;
sirfport->rx_period_time = 20000000;
sirfport->rx_dma_items.xmit.tail =
sirfport->rx_dma_items.xmit.head = 0;
hrtimer_start(&sirfport->hrt,
ns_to_ktime(sirfport->rx_period_time),
HRTIMER_MODE_REL);
}
return 0;
init_rx_err:
@ -1127,7 +1006,7 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
if (!sirfport->is_atlas7)
wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
else
wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
wr_regl(port, ureg->sirfsoc_int_en_clr_reg, ~0UL);
free_irq(port->irq, sirfport);
if (sirfport->ms_enabled)
@ -1139,6 +1018,13 @@ static void sirfsoc_uart_shutdown(struct uart_port *port)
}
if (sirfport->tx_dma_chan)
sirfport->tx_dma_state = TX_DMA_IDLE;
if (sirfport->rx_dma_chan && sirfport->is_hrt_enabled) {
while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
SIRFUART_RX_FIFO_MASK) > 0)
;
sirfport->is_hrt_enabled = false;
hrtimer_cancel(&sirfport->hrt);
}
}
static const char *sirfsoc_uart_type(struct uart_port *port)
@ -1196,27 +1082,29 @@ sirfsoc_uart_console_setup(struct console *co, char *options)
unsigned int bits = 8;
unsigned int parity = 'n';
unsigned int flow = 'n';
struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_uart_port *sirfport;
struct sirfsoc_register *ureg;
if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
return -EINVAL;
if (!port->mapbase)
co->index = 1;
sirfport = sirf_ports[co->index];
if (!sirfport)
return -ENODEV;
ureg = &sirfport->uart_reg->uart_reg;
if (!sirfport->port.mapbase)
return -ENODEV;
/* enable usp in mode1 register */
if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
wr_regl(&sirfport->port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
SIRFSOC_USP_ENDIAN_CTRL_LSBF);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
port->cons = co;
sirfport->port.cons = co;
/* default console tx/rx transfer using io mode */
sirfport->rx_dma_chan = NULL;
sirfport->tx_dma_chan = NULL;
return uart_set_options(port, co, baud, parity, bits, flow);
return uart_set_options(&sirfport->port, co, baud, parity, bits, flow);
}
static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
@ -1224,8 +1112,8 @@ static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
while (rd_regl(port,
ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
while (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
ufifo_st->ff_full(port))
cpu_relax();
wr_regl(port, ureg->sirfsoc_tx_fifo_data, ch);
}
@ -1233,8 +1121,10 @@ static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
static void sirfsoc_uart_console_write(struct console *co, const char *s,
unsigned int count)
{
struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
struct sirfsoc_uart_port *sirfport = sirf_ports[co->index];
uart_console_write(&sirfport->port, s, count,
sirfsoc_uart_console_putchar);
}
static struct console sirfsoc_uart_console = {
@ -1269,10 +1159,75 @@ static struct uart_driver sirfsoc_uart_drv = {
#endif
};
static const struct of_device_id sirfsoc_uart_ids[] = {
static enum hrtimer_restart
sirfsoc_uart_rx_dma_hrtimer_callback(struct hrtimer *hrt)
{
struct sirfsoc_uart_port *sirfport;
struct uart_port *port;
int count, inserted;
struct dma_tx_state tx_state;
struct tty_struct *tty;
struct sirfsoc_register *ureg;
struct circ_buf *xmit;
sirfport = container_of(hrt, struct sirfsoc_uart_port, hrt);
port = &sirfport->port;
inserted = 0;
tty = port->state->port.tty;
ureg = &sirfport->uart_reg->uart_reg;
xmit = &sirfport->rx_dma_items.xmit;
dmaengine_tx_status(sirfport->rx_dma_chan,
sirfport->rx_dma_items.cookie, &tx_state);
xmit->head = SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
SIRFSOC_RX_DMA_BUF_SIZE);
while (count > 0) {
inserted = tty_insert_flip_string(tty->port,
(const unsigned char *)&xmit->buf[xmit->tail], count);
if (!inserted)
goto next_hrt;
port->icount.rx += inserted;
xmit->tail = (xmit->tail + inserted) &
(SIRFSOC_RX_DMA_BUF_SIZE - 1);
count = CIRC_CNT_TO_END(xmit->head, xmit->tail,
SIRFSOC_RX_DMA_BUF_SIZE);
tty_flip_buffer_push(tty->port);
}
/*
* if RX DMA buffer data have all push into tty buffer, and there is
* only little data(less than a dma transfer unit) left in rxfifo,
* fetch it out in pio mode and switch back to dma immediately
*/
if (!inserted && !count &&
((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
SIRFUART_RX_FIFO_MASK) > 0)) {
/* switch to pio mode */
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
while ((rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
SIRFUART_RX_FIFO_MASK) > 0) {
if (sirfsoc_uart_pio_rx_chars(port, 16) > 0)
tty_flip_buffer_push(tty->port);
}
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
/* switch back to dma mode */
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
~SIRFUART_IO_MODE);
}
next_hrt:
hrtimer_forward_now(hrt, ns_to_ktime(sirfport->rx_period_time));
return HRTIMER_RESTART;
}
static struct of_device_id sirfsoc_uart_ids[] = {
{ .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
{ .compatible = "sirf,atlas7-uart", .data = &sirfsoc_uart},
{ .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
{ .compatible = "sirf,atlas7-usp-uart", .data = &sirfsoc_usp},
{}
};
MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
@ -1283,7 +1238,6 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
struct uart_port *port;
struct resource *res;
int ret;
int i, j;
struct dma_slave_config slv_cfg = {
.src_maxburst = 2,
};
@ -1293,16 +1247,15 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
const struct of_device_id *match;
match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
dev_err(&pdev->dev,
"Unable to find cell-index in uart node.\n");
ret = -EFAULT;
sirfport = devm_kzalloc(&pdev->dev, sizeof(*sirfport), GFP_KERNEL);
if (!sirfport) {
ret = -ENOMEM;
goto err;
}
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
pdev->id += ((struct sirfsoc_uart_register *)
match->data)->uart_param.register_uart_nr;
sirfport = &sirfsoc_uart_ports[pdev->id];
sirfport->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
sirf_ports[sirfport->port.line] = sirfport;
sirfport->port.iotype = UPIO_MEM;
sirfport->port.flags = UPF_BOOT_AUTOCONF;
port = &sirfport->port;
port->dev = &pdev->dev;
port->private_data = sirfport;
@ -1310,9 +1263,12 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
"sirf,uart-has-rtscts");
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart"))
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart") ||
of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
sirfport->uart_reg->uart_type = SIRF_REAL_UART;
if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
if (of_device_is_compatible(pdev->dev.of_node,
"sirf,prima2-usp-uart") || of_device_is_compatible(
pdev->dev.of_node, "sirf,atlas7-usp-uart")) {
sirfport->uart_reg->uart_type = SIRF_USP_UART;
if (!sirfport->hw_flow_ctrl)
goto usp_no_flow_control;
@ -1350,7 +1306,8 @@ static int sirfsoc_uart_probe(struct platform_device *pdev)
gpio_direction_output(sirfport->rts_gpio, 1);
}
usp_no_flow_control:
if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart"))
if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-uart") ||
of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-usp-uart"))
sirfport->is_atlas7 = true;
if (of_property_read_u32(pdev->dev.of_node,
@ -1368,12 +1325,9 @@ usp_no_flow_control:
ret = -EFAULT;
goto err;
}
tasklet_init(&sirfport->rx_dma_complete_tasklet,
sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
tasklet_init(&sirfport->rx_tmo_process_tasklet,
sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
port->mapbase = res->start;
port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
port->membase = devm_ioremap(&pdev->dev,
res->start, resource_size(res));
if (!port->membase) {
dev_err(&pdev->dev, "Cannot remap resource.\n");
ret = -ENOMEM;
@ -1393,20 +1347,6 @@ usp_no_flow_control:
goto err;
}
port->uartclk = clk_get_rate(sirfport->clk);
if (of_device_is_compatible(pdev->dev.of_node, "sirf,atlas7-bt-uart")) {
sirfport->clk_general = devm_clk_get(&pdev->dev, "general");
if (IS_ERR(sirfport->clk_general)) {
ret = PTR_ERR(sirfport->clk_general);
goto err;
}
sirfport->clk_noc = devm_clk_get(&pdev->dev, "noc");
if (IS_ERR(sirfport->clk_noc)) {
ret = PTR_ERR(sirfport->clk_noc);
goto err;
}
sirfport->is_bt_uart = true;
} else
sirfport->is_bt_uart = false;
port->ops = &sirfsoc_uart_ops;
spin_lock_init(&port->lock);
@ -1419,30 +1359,32 @@ usp_no_flow_control:
}
sirfport->rx_dma_chan = dma_request_slave_channel(port->dev, "rx");
for (i = 0; sirfport->rx_dma_chan && i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
sirfport->rx_dma_items[i].xmit.buf =
sirfport->rx_dma_items.xmit.buf =
dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
&sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
if (!sirfport->rx_dma_items[i].xmit.buf) {
&sirfport->rx_dma_items.dma_addr, GFP_KERNEL);
if (!sirfport->rx_dma_items.xmit.buf) {
dev_err(port->dev, "Uart alloc bufa failed\n");
ret = -ENOMEM;
goto alloc_coherent_err;
}
sirfport->rx_dma_items[i].xmit.head =
sirfport->rx_dma_items[i].xmit.tail = 0;
}
sirfport->rx_dma_items.xmit.head =
sirfport->rx_dma_items.xmit.tail = 0;
if (sirfport->rx_dma_chan)
dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
sirfport->tx_dma_chan = dma_request_slave_channel(port->dev, "tx");
if (sirfport->tx_dma_chan)
dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
if (sirfport->rx_dma_chan) {
hrtimer_init(&sirfport->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sirfport->hrt.function = sirfsoc_uart_rx_dma_hrtimer_callback;
sirfport->is_hrt_enabled = false;
}
return 0;
alloc_coherent_err:
for (j = 0; j < i; j++)
dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
sirfport->rx_dma_items[j].xmit.buf,
sirfport->rx_dma_items[j].dma_addr);
sirfport->rx_dma_items.xmit.buf,
sirfport->rx_dma_items.dma_addr);
dma_release_channel(sirfport->rx_dma_chan);
err:
return ret;
@ -1454,13 +1396,11 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
struct uart_port *port = &sirfport->port;
uart_remove_one_port(&sirfsoc_uart_drv, port);
if (sirfport->rx_dma_chan) {
int i;
dmaengine_terminate_all(sirfport->rx_dma_chan);
dma_release_channel(sirfport->rx_dma_chan);
for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
sirfport->rx_dma_items[i].xmit.buf,
sirfport->rx_dma_items[i].dma_addr);
sirfport->rx_dma_items.xmit.buf,
sirfport->rx_dma_items.dma_addr);
}
if (sirfport->tx_dma_chan) {
dmaengine_terminate_all(sirfport->tx_dma_chan);

View File

@ -6,11 +6,11 @@
* Licensed under GPLv2 or later.
*/
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/hrtimer.h>
struct sirfsoc_uart_param {
const char *uart_name;
const char *port_name;
u32 uart_nr;
u32 register_uart_nr;
};
struct sirfsoc_register {
@ -21,6 +21,7 @@ struct sirfsoc_register {
u32 sirfsoc_tx_rx_en;
u32 sirfsoc_int_en_reg;
u32 sirfsoc_int_st_reg;
u32 sirfsoc_int_en_clr_reg;
u32 sirfsoc_tx_dma_io_ctrl;
u32 sirfsoc_tx_dma_io_len;
u32 sirfsoc_tx_fifo_ctrl;
@ -45,8 +46,8 @@ struct sirfsoc_register {
u32 sirfsoc_async_param_reg;
};
typedef u32 (*fifo_full_mask)(int line);
typedef u32 (*fifo_empty_mask)(int line);
typedef u32 (*fifo_full_mask)(struct uart_port *port);
typedef u32 (*fifo_empty_mask)(struct uart_port *port);
struct sirfsoc_fifo_status {
fifo_full_mask ff_full;
@ -105,21 +106,20 @@ struct sirfsoc_uart_register {
enum sirfsoc_uart_type uart_type;
};
u32 usp_ff_full(int line)
u32 uart_usp_ff_full_mask(struct uart_port *port)
{
return 0x80;
u32 full_bit;
full_bit = ilog2(port->fifosize);
return (1 << full_bit);
}
u32 usp_ff_empty(int line)
u32 uart_usp_ff_empty_mask(struct uart_port *port)
{
return 0x100;
}
u32 uart_ff_full(int line)
{
return (line == 1) ? (0x20) : (0x80);
}
u32 uart_ff_empty(int line)
{
return (line == 1) ? (0x40) : (0x100);
u32 empty_bit;
empty_bit = ilog2(port->fifosize) + 1;
return (1 << empty_bit);
}
struct sirfsoc_uart_register sirfsoc_usp = {
.uart_reg = {
@ -145,6 +145,7 @@ struct sirfsoc_uart_register sirfsoc_usp = {
.sirfsoc_rx_fifo_op = 0x0130,
.sirfsoc_rx_fifo_status = 0x0134,
.sirfsoc_rx_fifo_data = 0x0138,
.sirfsoc_int_en_clr_reg = 0x140,
},
.uart_int_en = {
.sirfsoc_rx_done_en = BIT(0),
@ -177,14 +178,12 @@ struct sirfsoc_uart_register sirfsoc_usp = {
.sirfsoc_rxd_brk = BIT(15),
},
.fifo_status = {
.ff_full = usp_ff_full,
.ff_empty = usp_ff_empty,
.ff_full = uart_usp_ff_full_mask,
.ff_empty = uart_usp_ff_empty_mask,
},
.uart_param = {
.uart_name = "ttySiRF",
.port_name = "sirfsoc-uart",
.uart_nr = 2,
.register_uart_nr = 3,
},
};
@ -195,6 +194,7 @@ struct sirfsoc_uart_register sirfsoc_uart = {
.sirfsoc_divisor = 0x0050,
.sirfsoc_int_en_reg = 0x0054,
.sirfsoc_int_st_reg = 0x0058,
.sirfsoc_int_en_clr_reg = 0x0060,
.sirfsoc_tx_dma_io_ctrl = 0x0100,
.sirfsoc_tx_dma_io_len = 0x0104,
.sirfsoc_tx_fifo_ctrl = 0x0108,
@ -249,14 +249,12 @@ struct sirfsoc_uart_register sirfsoc_uart = {
.sirfsoc_rts = BIT(15),
},
.fifo_status = {
.ff_full = uart_ff_full,
.ff_empty = uart_ff_empty,
.ff_full = uart_usp_ff_full_mask,
.ff_empty = uart_usp_ff_empty_mask,
},
.uart_param = {
.uart_name = "ttySiRF",
.port_name = "sirfsoc_uart",
.uart_nr = 3,
.register_uart_nr = 0,
},
};
/* uart io ctrl */
@ -296,10 +294,10 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFUART_IO_MODE BIT(0)
#define SIRFUART_DMA_MODE 0x0
#define SIRFUART_RX_DMA_FLUSH 0x4
/* Macro Specific*/
#define SIRFUART_INT_EN_CLR 0x0060
/* Baud Rate Calculation */
#define SIRF_USP_MIN_SAMPLE_DIV 0x1
#define SIRF_MIN_SAMPLE_DIV 0xf
#define SIRF_MAX_SAMPLE_DIV 0x3f
#define SIRF_IOCLK_DIV_MAX 0xffff
@ -326,55 +324,54 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET 24
#define SIRFSOC_USP_ASYNC_DIV2_MASK 0x3f
#define SIRFSOC_USP_ASYNC_DIV2_OFFSET 16
#define SIRFSOC_USP_LOOP_BACK_CTRL BIT(2)
/* USP-UART Common */
#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
#define SIRFUART_RECV_TIMEOUT_VALUE(x) \
(((x) > 0xFFFF) ? 0xFFFF : ((x) & 0xFFFF))
#define SIRFUART_RECV_TIMEOUT(port, x) \
(((port)->line > 2) ? (x & 0xFFFF) : ((x) & 0xFFFF) << 16)
#define SIRFUART_USP_RECV_TIMEOUT(x) (x & 0xFFFF)
#define SIRFUART_UART_RECV_TIMEOUT(x) ((x & 0xFFFF) << 16)
#define SIRFUART_FIFO_THD(port) ((port->line) == 1 ? 16 : 64)
#define SIRFUART_ERR_INT_STAT(port, unit_st) \
#define SIRFUART_FIFO_THD(port) (port->fifosize >> 1)
#define SIRFUART_ERR_INT_STAT(unit_st, uart_type) \
(uint_st->sirfsoc_rx_oflow | \
uint_st->sirfsoc_frm_err | \
uint_st->sirfsoc_rxd_brk | \
((port->line > 2) ? 0 : uint_st->sirfsoc_parity_err))
#define SIRFUART_RX_IO_INT_EN(port, uint_en) \
(uint_en->sirfsoc_rx_timeout_en |\
((uart_type != SIRF_REAL_UART) ? \
0 : uint_st->sirfsoc_parity_err))
#define SIRFUART_RX_IO_INT_EN(uint_en, uart_type) \
(uint_en->sirfsoc_rx_done_en |\
uint_en->sirfsoc_rxfifo_thd_en |\
uint_en->sirfsoc_rxfifo_full_en |\
uint_en->sirfsoc_frm_err_en |\
uint_en->sirfsoc_rx_oflow_en |\
uint_en->sirfsoc_rxd_brk_en |\
((port->line > 2) ? 0 : uint_en->sirfsoc_parity_err_en))
((uart_type != SIRF_REAL_UART) ? \
0 : uint_en->sirfsoc_parity_err_en))
#define SIRFUART_RX_IO_INT_ST(uint_st) \
(uint_st->sirfsoc_rx_timeout |\
uint_st->sirfsoc_rxfifo_thd |\
uint_st->sirfsoc_rxfifo_full)
(uint_st->sirfsoc_rxfifo_thd |\
uint_st->sirfsoc_rxfifo_full|\
uint_st->sirfsoc_rx_done |\
uint_st->sirfsoc_rx_timeout)
#define SIRFUART_CTS_INT_ST(uint_st) (uint_st->sirfsoc_cts)
#define SIRFUART_RX_DMA_INT_EN(port, uint_en) \
(uint_en->sirfsoc_rx_timeout_en |\
uint_en->sirfsoc_frm_err_en |\
#define SIRFUART_RX_DMA_INT_EN(uint_en, uart_type) \
(uint_en->sirfsoc_frm_err_en |\
uint_en->sirfsoc_rx_oflow_en |\
uint_en->sirfsoc_rxd_brk_en |\
((port->line > 2) ? 0 : uint_en->sirfsoc_parity_err_en))
((uart_type != SIRF_REAL_UART) ? \
0 : uint_en->sirfsoc_parity_err_en))
/* Generic Definitions */
#define SIRFSOC_UART_NAME "ttySiRF"
#define SIRFSOC_UART_MAJOR 0
#define SIRFSOC_UART_MINOR 0
#define SIRFUART_PORT_NAME "sirfsoc-uart"
#define SIRFUART_MAP_SIZE 0x200
#define SIRFSOC_UART_NR 6
#define SIRFSOC_UART_NR 11
#define SIRFSOC_PORT_TYPE 0xa5
/* Uart Common Use Macro*/
#define SIRFSOC_RX_DMA_BUF_SIZE 256
#define SIRFSOC_RX_DMA_BUF_SIZE (1024 * 32)
#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
#define LOOP_DMA_BUFA_FILL 1
#define LOOP_DMA_BUFB_FILL 2
#define TX_TRAN_PIO 1
#define TX_TRAN_DMA 2
/* Uart Fifo Level Chk */
#define SIRFUART_TX_FIFO_SC_OFFSET 0
#define SIRFUART_TX_FIFO_LC_OFFSET 10
@ -389,8 +386,8 @@ struct sirfsoc_uart_register sirfsoc_uart = {
#define SIRFUART_RX_FIFO_CHK_SC SIRFUART_TX_FIFO_CHK_SC
#define SIRFUART_RX_FIFO_CHK_LC SIRFUART_TX_FIFO_CHK_LC
#define SIRFUART_RX_FIFO_CHK_HC SIRFUART_TX_FIFO_CHK_HC
#define SIRFUART_RX_FIFO_MASK 0x7f
/* Indicate how many buffers used */
#define SIRFSOC_RX_LOOP_BUF_CNT 2
/* For Fast Baud Rate Calculation */
struct sirfsoc_baudrate_to_regv {
@ -404,7 +401,7 @@ enum sirfsoc_tx_state {
TX_DMA_PAUSE,
};
struct sirfsoc_loop_buffer {
struct sirfsoc_rx_buffer {
struct circ_buf xmit;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *desc;
@ -417,10 +414,6 @@ struct sirfsoc_uart_port {
struct uart_port port;
struct clk *clk;
/* UART6 for BT usage in A7DA platform need multi-clock source */
bool is_bt_uart;
struct clk *clk_general;
struct clk *clk_noc;
/* for SiRFatlas7, there are SET/CLR for UART_INT_EN */
bool is_atlas7;
struct sirfsoc_uart_register *uart_reg;
@ -428,17 +421,16 @@ struct sirfsoc_uart_port {
struct dma_chan *tx_dma_chan;
dma_addr_t tx_dma_addr;
struct dma_async_tx_descriptor *tx_dma_desc;
struct tasklet_struct rx_dma_complete_tasklet;
struct tasklet_struct rx_tmo_process_tasklet;
unsigned int rx_io_count;
unsigned long transfer_size;
enum sirfsoc_tx_state tx_dma_state;
unsigned int cts_gpio;
unsigned int rts_gpio;
struct sirfsoc_loop_buffer rx_dma_items[SIRFSOC_RX_LOOP_BUF_CNT];
int rx_completed;
int rx_issued;
struct sirfsoc_rx_buffer rx_dma_items;
struct hrtimer hrt;
bool is_hrt_enabled;
unsigned long rx_period_time;
};
/* Register Access Control */
@ -447,10 +439,6 @@ struct sirfsoc_uart_port {
#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
/* UART Port Mask */
#define SIRFUART_FIFOLEVEL_MASK(port) ((port->line == 1) ? (0x1f) : (0x7f))
#define SIRFUART_FIFOFULL_MASK(port) ((port->line == 1) ? (0x20) : (0x80))
#define SIRFUART_FIFOEMPTY_MASK(port) ((port->line == 1) ? (0x40) : (0x100))
/* I/O Mode */
#define SIRFSOC_UART_IO_RX_MAX_CNT 256
#define SIRFSOC_UART_IO_TX_REASONABLE_CNT 256
#define SIRFUART_FIFOLEVEL_MASK(port) ((port->fifosize - 1) & 0xFFF)
#define SIRFUART_FIFOFULL_MASK(port) (port->fifosize & 0xFFF)
#define SIRFUART_FIFOEMPTY_MASK(port) ((port->fifosize & 0xFFF) << 1)

View File

@ -1075,7 +1075,8 @@ static void cdns_uart_console_putchar(struct uart_port *port, int ch)
writel(ch, port->membase + CDNS_UART_FIFO_OFFSET);
}
static void cdns_early_write(struct console *con, const char *s, unsigned n)
static void __init cdns_early_write(struct console *con, const char *s,
unsigned n)
{
struct earlycon_device *dev = con->data;

View File

@ -4410,7 +4410,8 @@ static void synclink_cleanup(void)
printk("Unloading %s: %s\n", driver_name, driver_version);
if (serial_driver) {
if ((rc = tty_unregister_driver(serial_driver)))
rc = tty_unregister_driver(serial_driver);
if (rc)
printk("%s(%d) failed to unregister tty driver err=%d\n",
__FILE__,__LINE__,rc);
put_tty_driver(serial_driver);
@ -7751,7 +7752,8 @@ static int hdlcdev_open(struct net_device *dev)
printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
/* generic HDLC layer open processing */
if ((rc = hdlc_open(dev)))
rc = hdlc_open(dev);
if (rc)
return rc;
/* arbitrate between network and tty opens */
@ -8018,7 +8020,8 @@ static int hdlcdev_init(struct mgsl_struct *info)
/* allocate and initialize network and HDLC layer objects */
if (!(dev = alloc_hdlcdev(info))) {
dev = alloc_hdlcdev(info);
if (!dev) {
printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
return -ENOMEM;
}
@ -8039,7 +8042,8 @@ static int hdlcdev_init(struct mgsl_struct *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
if ((rc = register_hdlc_device(dev))) {
rc = register_hdlc_device(dev);
if (rc) {
printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
free_netdev(dev);
return rc;
@ -8075,7 +8079,8 @@ static int synclink_init_one (struct pci_dev *dev,
return -EIO;
}
if (!(info = mgsl_allocate_device())) {
info = mgsl_allocate_device();
if (!info) {
printk("can't allocate device instance data.\n");
return -EIO;
}

View File

@ -1539,7 +1539,8 @@ static int hdlcdev_open(struct net_device *dev)
DBGINFO(("%s hdlcdev_open\n", dev->name));
/* generic HDLC layer open processing */
if ((rc = hdlc_open(dev)))
rc = hdlc_open(dev);
if (rc)
return rc;
/* arbitrate between network and tty opens */
@ -1803,7 +1804,8 @@ static int hdlcdev_init(struct slgt_info *info)
/* allocate and initialize network and HDLC layer objects */
if (!(dev = alloc_hdlcdev(info))) {
dev = alloc_hdlcdev(info);
if (!dev) {
printk(KERN_ERR "%s hdlc device alloc failure\n", info->device_name);
return -ENOMEM;
}
@ -1824,7 +1826,8 @@ static int hdlcdev_init(struct slgt_info *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
if ((rc = register_hdlc_device(dev))) {
rc = register_hdlc_device(dev);
if (rc) {
printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
free_netdev(dev);
return rc;
@ -1879,7 +1882,8 @@ static void rx_async(struct slgt_info *info)
stat = 0;
if ((status = *(p+1) & (BIT1 + BIT0))) {
status = *(p + 1) & (BIT1 + BIT0);
if (status) {
if (status & BIT1)
icount->parity++;
else if (status & BIT0)
@ -3755,7 +3759,8 @@ static void slgt_cleanup(void)
if (serial_driver) {
for (info=slgt_device_list ; info != NULL ; info=info->next_device)
tty_unregister_device(serial_driver, info->line);
if ((rc = tty_unregister_driver(serial_driver)))
rc = tty_unregister_driver(serial_driver);
if (rc)
DBGERR(("tty_unregister_driver error=%d\n", rc));
put_tty_driver(serial_driver);
}

View File

@ -1655,7 +1655,8 @@ static int hdlcdev_open(struct net_device *dev)
printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
/* generic HDLC layer open processing */
if ((rc = hdlc_open(dev)))
rc = hdlc_open(dev);
if (rc)
return rc;
/* arbitrate between network and tty opens */
@ -1922,7 +1923,8 @@ static int hdlcdev_init(SLMP_INFO *info)
/* allocate and initialize network and HDLC layer objects */
if (!(dev = alloc_hdlcdev(info))) {
dev = alloc_hdlcdev(info);
if (!dev) {
printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
return -ENOMEM;
}
@ -1943,7 +1945,8 @@ static int hdlcdev_init(SLMP_INFO *info)
hdlc->xmit = hdlcdev_xmit;
/* register objects with HDLC layer */
if ((rc = register_hdlc_device(dev))) {
rc = register_hdlc_device(dev);
if (rc) {
printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
free_netdev(dev);
return rc;
@ -3920,7 +3923,8 @@ static void synclinkmp_cleanup(void)
printk("Unloading %s %s\n", driver_name, driver_version);
if (serial_driver) {
if ((rc = tty_unregister_driver(serial_driver)))
rc = tty_unregister_driver(serial_driver);
if (rc)
printk("%s(%d) failed to unregister tty driver err=%d\n",
__FILE__,__LINE__,rc);
put_tty_driver(serial_driver);

View File

@ -55,9 +55,6 @@
static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
int sysrq_reset_downtime_ms __weak;
static bool sysrq_on(void)
{
return sysrq_enabled || sysrq_always_enabled;
@ -569,6 +566,7 @@ void handle_sysrq(int key)
EXPORT_SYMBOL(handle_sysrq);
#ifdef CONFIG_INPUT
static int sysrq_reset_downtime_ms;
/* Simple translation table for the SysRq keys */
static const unsigned char sysrq_xlate[KEY_CNT] =
@ -949,23 +947,8 @@ static bool sysrq_handler_registered;
static inline void sysrq_register_handler(void)
{
unsigned short key;
int error;
int i;
/* First check if a __weak interface was instantiated. */
for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
key = platform_sysrq_reset_seq[i];
if (key == KEY_RESERVED || key > KEY_MAX)
break;
sysrq_reset_seq[sysrq_reset_seq_len++] = key;
}
/*
* DT configuration takes precedence over anything that would
* have been defined via the __weak interface.
*/
sysrq_of_get_keyreset_config();
error = input_register_handler(&sysrq_handler);

View File

@ -286,7 +286,8 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL);
if (change || left < size) {
/* This is the slow path - looking for new buffers to use */
if ((n = tty_buffer_alloc(port, size)) != NULL) {
n = tty_buffer_alloc(port, size);
if (n != NULL) {
n->flags = flags;
buf->tail = n;
b->commit = b->used;

View File

@ -235,7 +235,6 @@ static void tty_del_file(struct file *file)
/**
* tty_name - return tty naming
* @tty: tty structure
* @buf: buffer for output
*
* Convert a tty structure into a name. The name reflects the kernel
* naming policy and if udev is in use may not reflect user space
@ -243,13 +242,11 @@ static void tty_del_file(struct file *file)
* Locking: none
*/
char *tty_name(struct tty_struct *tty, char *buf)
const char *tty_name(const struct tty_struct *tty)
{
if (!tty) /* Hmm. NULL pointer. That's fun. */
strcpy(buf, "NULL tty");
else
strcpy(buf, tty->name);
return buf;
return "NULL tty";
return tty->name;
}
EXPORT_SYMBOL(tty_name);
@ -770,8 +767,7 @@ static void do_tty_hangup(struct work_struct *work)
void tty_hangup(struct tty_struct *tty)
{
#ifdef TTY_DEBUG_HANGUP
char buf[64];
printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf));
printk(KERN_DEBUG "%s hangup...\n", tty_name(tty));
#endif
schedule_work(&tty->hangup_work);
}
@ -790,9 +786,7 @@ EXPORT_SYMBOL(tty_hangup);
void tty_vhangup(struct tty_struct *tty)
{
#ifdef TTY_DEBUG_HANGUP
char buf[64];
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty, buf));
printk(KERN_DEBUG "%s vhangup...\n", tty_name(tty));
#endif
__tty_hangup(tty, 0);
}
@ -831,9 +825,7 @@ void tty_vhangup_self(void)
static void tty_vhangup_session(struct tty_struct *tty)
{
#ifdef TTY_DEBUG_HANGUP
char buf[64];
printk(KERN_DEBUG "%s vhangup session...\n", tty_name(tty, buf));
printk(KERN_DEBUG "%s vhangup session...\n", tty_name(tty));
#endif
__tty_hangup(tty, 1);
}
@ -1769,7 +1761,6 @@ int tty_release(struct inode *inode, struct file *filp)
struct tty_struct *o_tty = NULL;
int do_sleep, final;
int idx;
char buf[64];
long timeout = 0;
int once = 1;
@ -1793,7 +1784,7 @@ int tty_release(struct inode *inode, struct file *filp)
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
tty_name(tty, buf), tty->count);
tty_name(tty), tty->count);
#endif
if (tty->ops->close)
@ -1844,7 +1835,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (once) {
once = 0;
printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
__func__, tty_name(tty, buf));
__func__, tty_name(tty));
}
schedule_timeout_killable(timeout);
if (timeout < 120 * HZ)
@ -1856,13 +1847,13 @@ int tty_release(struct inode *inode, struct file *filp)
if (o_tty) {
if (--o_tty->count < 0) {
printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
__func__, o_tty->count, tty_name(o_tty, buf));
__func__, o_tty->count, tty_name(o_tty));
o_tty->count = 0;
}
}
if (--tty->count < 0) {
printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n",
__func__, tty->count, tty_name(tty, buf));
__func__, tty->count, tty_name(tty));
tty->count = 0;
}
@ -1905,7 +1896,7 @@ int tty_release(struct inode *inode, struct file *filp)
return 0;
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: %s: final close\n", __func__, tty_name(tty, buf));
printk(KERN_DEBUG "%s: %s: final close\n", __func__, tty_name(tty));
#endif
/*
* Ask the line discipline code to release its structures
@ -1916,7 +1907,8 @@ int tty_release(struct inode *inode, struct file *filp)
tty_flush_works(tty);
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: %s: freeing structure...\n", __func__, tty_name(tty, buf));
printk(KERN_DEBUG "%s: %s: freeing structure...\n", __func__,
tty_name(tty));
#endif
/*
* The release_tty function takes care of the details of clearing

View File

@ -211,9 +211,7 @@ int tty_unthrottle_safe(struct tty_struct *tty)
void tty_wait_until_sent(struct tty_struct *tty, long timeout)
{
#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
char buf[64];
printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf));
printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty));
#endif
if (!timeout)
timeout = MAX_SCHEDULE_TIMEOUT;

View File

@ -23,8 +23,7 @@
#ifdef LDISC_DEBUG_HANGUP
#define tty_ldisc_debug(tty, f, args...) ({ \
char __b[64]; \
printk(KERN_DEBUG "%s: %s: " f, __func__, tty_name(tty, __b), ##args); \
printk(KERN_DEBUG "%s: %s: " f, __func__, tty_name(tty), ##args); \
})
#else
#define tty_ldisc_debug(tty, f, args...)
@ -483,7 +482,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
char buf[64];
struct tty_ldisc *new_ldisc;
int r;
@ -504,7 +502,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
if (r < 0)
panic("Couldn't open N_TTY ldisc for "
"%s --- error %d.",
tty_name(tty, buf), r);
tty_name(tty), r);
}
}

View File

@ -299,7 +299,8 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
timeout = schedule_timeout(timeout);
raw_spin_lock_irq(&sem->wait_lock);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
if ((locked = writer_trylock(sem)))
locked = writer_trylock(sem);
if (locked)
break;
}

View File

@ -261,7 +261,9 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
int m;
if (glyph < 0 || glyph >= MAX_GLYPH)
return 0;
else if (!(p = *conp->vc_uni_pagedir_loc))
else {
p = *conp->vc_uni_pagedir_loc;
if (!p)
return glyph;
else if (use_unicode) {
if (!p->inverse_trans_unicode)
@ -275,6 +277,7 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
else
return p->inverse_translations[m][glyph];
}
}
}
EXPORT_SYMBOL_GPL(inverse_translate);
@ -397,7 +400,8 @@ static void con_release_unimap(struct uni_pagedir *p)
if (p == dflt) dflt = NULL;
for (i = 0; i < 32; i++) {
if ((p1 = p->uni_pgdir[i]) != NULL) {
p1 = p->uni_pgdir[i];
if (p1 != NULL) {
for (j = 0; j < 32; j++)
kfree(p1[j]);
kfree(p1);
@ -473,14 +477,16 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
int i, n;
u16 **p1, *p2;
if (!(p1 = p->uni_pgdir[n = unicode >> 11])) {
p1 = p->uni_pgdir[n = unicode >> 11];
if (!p1) {
p1 = p->uni_pgdir[n] = kmalloc(32*sizeof(u16 *), GFP_KERNEL);
if (!p1) return -ENOMEM;
for (i = 0; i < 32; i++)
p1[i] = NULL;
}
if (!(p2 = p1[n = (unicode >> 6) & 0x1f])) {
p2 = p1[n = (unicode >> 6) & 0x1f];
if (!p2) {
p2 = p1[n] = kmalloc(64*sizeof(u16), GFP_KERNEL);
if (!p2) return -ENOMEM;
memset(p2, 0xff, 64*sizeof(u16)); /* No glyphs for the characters (yet) */
@ -569,10 +575,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
* entries from "p" (old) to "q" (new).
*/
l = 0; /* unicode value */
for (i = 0; i < 32; i++)
if ((p1 = p->uni_pgdir[i]))
for (j = 0; j < 32; j++)
if ((p2 = p1[j])) {
for (i = 0; i < 32; i++) {
p1 = p->uni_pgdir[i];
if (p1)
for (j = 0; j < 32; j++) {
p2 = p1[j];
if (p2) {
for (k = 0; k < 64; k++, l++)
if (p2[k] != 0xffff) {
/*
@ -593,9 +601,11 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
/* Account for row of 64 empty entries */
l += 64;
}
}
else
/* Account for empty table */
l += 32 * 64;
}
/*
* Finished copying font table, set vc_uni_pagedir to new table
@ -735,10 +745,12 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
ect = 0;
if (*vc->vc_uni_pagedir_loc) {
p = *vc->vc_uni_pagedir_loc;
for (i = 0; i < 32; i++)
if ((p1 = p->uni_pgdir[i]))
for (j = 0; j < 32; j++)
if ((p2 = *(p1++)))
for (i = 0; i < 32; i++) {
p1 = p->uni_pgdir[i];
if (p1)
for (j = 0; j < 32; j++) {
p2 = *(p1++);
if (p2)
for (k = 0; k < 64; k++) {
if (*p2 < MAX_GLYPH && ect++ < ct) {
__put_user((u_short)((i<<11)+(j<<6)+k),
@ -750,6 +762,8 @@ int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct uni
p2++;
}
}
}
}
__put_user(ect, uct);
console_unlock();
return ((ect <= ct) ? 0 : -ENOMEM);

View File

@ -108,6 +108,7 @@
#define CON_DRIVER_FLAG_MODULE 1
#define CON_DRIVER_FLAG_INIT 2
#define CON_DRIVER_FLAG_ATTR 4
#define CON_DRIVER_FLAG_ZOMBIE 8
struct con_driver {
const struct consw *con;
@ -135,6 +136,7 @@ const struct consw *conswitchp;
*/
#define DEFAULT_BELL_PITCH 750
#define DEFAULT_BELL_DURATION (HZ/8)
#define DEFAULT_CURSOR_BLINK_MS 200
struct vc vc_cons [MAX_NR_CONSOLES];
@ -153,6 +155,7 @@ static int set_vesa_blanking(char __user *p);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
static void blank_screen_t(unsigned long dummy);
static void set_palette(struct vc_data *vc);
@ -182,6 +185,7 @@ static int blankinterval = 10*60;
core_param(consoleblank, blankinterval, int, 0444);
static DECLARE_WORK(console_work, console_callback);
static DECLARE_WORK(con_driver_unregister_work, con_driver_unregister_callback);
/*
* fg_console is the current virtual console,
@ -1590,6 +1594,13 @@ static void setterm_command(struct vc_data *vc)
case 15: /* activate the previous console */
set_console(last_console);
break;
case 16: /* set cursor blink duration in msec */
if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
vc->vc_par[1] <= USHRT_MAX)
vc->vc_cur_blink_ms = vc->vc_par[1];
else
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
break;
}
}
@ -1717,6 +1728,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
gotoxy(vc, 0, 0);
save_cur(vc);
@ -3192,22 +3204,6 @@ err:
#ifdef CONFIG_VT_HW_CONSOLE_BINDING
static int con_is_graphics(const struct consw *csw, int first, int last)
{
int i, retval = 0;
for (i = first; i <= last; i++) {
struct vc_data *vc = vc_cons[i].d;
if (vc && vc->vc_mode == KD_GRAPHICS) {
retval = 1;
break;
}
}
return retval;
}
/* unlocked version of unbind_con_driver() */
int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
{
@ -3293,8 +3289,7 @@ static int vt_bind(struct con_driver *con)
const struct consw *defcsw = NULL, *csw = NULL;
int i, more = 1, first = -1, last = -1, deflt = 0;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
con_is_graphics(con->con, con->first, con->last))
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE))
goto err;
csw = con->con;
@ -3345,8 +3340,7 @@ static int vt_unbind(struct con_driver *con)
int i, more = 1, first = -1, last = -1, deflt = 0;
int ret;
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
con_is_graphics(con->con, con->first, con->last))
if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE))
goto err;
csw = con->con;
@ -3596,7 +3590,8 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
con_driver = &registered_con_driver[i];
if (con_driver->con == NULL) {
if (con_driver->con == NULL &&
!(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE)) {
con_driver->con = csw;
con_driver->desc = desc;
con_driver->node = i;
@ -3658,16 +3653,20 @@ int do_unregister_con_driver(const struct consw *csw)
struct con_driver *con_driver = &registered_con_driver[i];
if (con_driver->con == csw) {
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class,
MKDEV(0, con_driver->node));
/*
* Defer the removal of the sysfs entries since that
* will acquire the kernfs s_active lock and we can't
* acquire this lock while holding the console lock:
* the unbind sysfs entry imposes already the opposite
* order. Reset con already here to prevent any later
* lookup to succeed and mark this slot as zombie, so
* it won't get reused until we complete the removal
* in the deferred work.
*/
con_driver->con = NULL;
con_driver->desc = NULL;
con_driver->dev = NULL;
con_driver->node = 0;
con_driver->flag = 0;
con_driver->first = 0;
con_driver->last = 0;
con_driver->flag = CON_DRIVER_FLAG_ZOMBIE;
schedule_work(&con_driver_unregister_work);
return 0;
}
}
@ -3676,6 +3675,39 @@ int do_unregister_con_driver(const struct consw *csw)
}
EXPORT_SYMBOL_GPL(do_unregister_con_driver);
static void con_driver_unregister_callback(struct work_struct *ignored)
{
int i;
console_lock();
for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
struct con_driver *con_driver = &registered_con_driver[i];
if (!(con_driver->flag & CON_DRIVER_FLAG_ZOMBIE))
continue;
console_unlock();
vtconsole_deinit_device(con_driver);
device_destroy(vtconsole_class, MKDEV(0, con_driver->node));
console_lock();
if (WARN_ON_ONCE(con_driver->con))
con_driver->con = NULL;
con_driver->desc = NULL;
con_driver->dev = NULL;
con_driver->node = 0;
WARN_ON_ONCE(con_driver->flag != CON_DRIVER_FLAG_ZOMBIE);
con_driver->flag = 0;
con_driver->first = 0;
con_driver->last = 0;
}
console_unlock();
}
/*
* If we support more console drivers, this function is used
* when a driver wants to take over some existing consoles

View File

@ -402,7 +402,7 @@ static void cursor_timer_handler(unsigned long dev_addr)
struct fbcon_ops *ops = info->fbcon_par;
queue_work(system_power_efficient_wq, &info->queue);
mod_timer(&ops->cursor_timer, jiffies + HZ/5);
mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies);
}
static void fbcon_add_cursor_timer(struct fb_info *info)
@ -417,7 +417,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info)
init_timer(&ops->cursor_timer);
ops->cursor_timer.function = cursor_timer_handler;
ops->cursor_timer.expires = jiffies + HZ / 5;
ops->cursor_timer.expires = jiffies + ops->cur_blink_jiffies;
ops->cursor_timer.data = (unsigned long ) info;
add_timer(&ops->cursor_timer);
ops->flags |= FBCON_FLAGS_CURSOR_TIMER;
@ -1309,6 +1309,7 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
return;
ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
if (vc->vc_cursor_type & 0x10)
fbcon_del_cursor_timer(info);
else

View File

@ -70,6 +70,7 @@ struct fbcon_ops {
struct fb_cursor cursor_state;
struct display *p;
int currcon; /* Current VC. */
int cur_blink_jiffies;
int cursor_flash;
int cursor_reset;
int blank_state;

View File

@ -104,6 +104,7 @@ struct vc_data {
unsigned int vc_resize_user; /* resize request from user */
unsigned int vc_bell_pitch; /* Console bell pitch */
unsigned int vc_bell_duration; /* Console bell duration */
unsigned short vc_cur_blink_ms; /* Cursor blink duration */
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */

View File

@ -529,6 +529,7 @@ enum rpm_request {
};
struct wakeup_source;
struct wake_irq;
struct pm_domain_data;
struct pm_subsys_data {
@ -568,6 +569,7 @@ struct dev_pm_info {
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;

View File

@ -0,0 +1,52 @@
/*
* pm_wakeirq.h - Device wakeirq helper functions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _LINUX_PM_WAKEIRQ_H
#define _LINUX_PM_WAKEIRQ_H
#ifdef CONFIG_PM
extern int dev_pm_set_wake_irq(struct device *dev, int irq);
extern int dev_pm_set_dedicated_wake_irq(struct device *dev,
int irq);
extern void dev_pm_clear_wake_irq(struct device *dev);
extern void dev_pm_enable_wake_irq(struct device *dev);
extern void dev_pm_disable_wake_irq(struct device *dev);
#else /* !CONFIG_PM */
static inline int dev_pm_set_wake_irq(struct device *dev, int irq)
{
return 0;
}
static inline int dev_pm_set_dedicated__wake_irq(struct device *dev,
int irq)
{
return 0;
}
static inline void dev_pm_clear_wake_irq(struct device *dev)
{
}
static inline void dev_pm_enable_wake_irq(struct device *dev)
{
}
static inline void dev_pm_disable_wake_irq(struct device *dev)
{
}
#endif /* CONFIG_PM */
#endif /* _LINUX_PM_WAKEIRQ_H */

View File

@ -28,9 +28,17 @@
#include <linux/types.h>
struct wake_irq;
/**
* struct wakeup_source - Representation of wakeup sources
*
* @name: Name of the wakeup source
* @entry: Wakeup source list entry
* @lock: Wakeup source lock
* @wakeirq: Optional device specific wakeirq
* @timer: Wakeup timer list
* @timer_expires: Wakeup timer expiration
* @total_time: Total time this wakeup source has been active.
* @max_time: Maximum time this wakeup source has been continuously active.
* @last_time: Monotonic clock when the wakeup source's was touched last time.
@ -47,6 +55,7 @@ struct wakeup_source {
const char *name;
struct list_head entry;
spinlock_t lock;
struct wake_irq *wakeirq;
struct timer_list timer;
unsigned long timer_expires;
ktime_t total_time;

View File

@ -12,6 +12,7 @@
#define _LINUX_SERIAL_8250_H
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/platform_device.h>
/*
@ -137,6 +138,8 @@ extern int early_serial_setup(struct uart_port *port);
extern unsigned int serial8250_early_in(struct uart_port *port, int offset);
extern void serial8250_early_out(struct uart_port *port, int offset, int value);
extern int early_serial8250_setup(struct earlycon_device *device,
const char *options);
extern void serial8250_do_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old);
extern int serial8250_do_startup(struct uart_port *port);

View File

@ -35,7 +35,7 @@
#define uart_console(port) \
((port)->cons && (port)->cons->index == (port)->line)
#else
#define uart_console(port) (0)
#define uart_console(port) ({ (void)port; 0; })
#endif
struct uart_port;

View File

@ -1,6 +1,7 @@
#ifndef __LINUX_SERIAL_SCI_H
#define __LINUX_SERIAL_SCI_H
#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/sh_dma.h>
@ -10,59 +11,16 @@
#define SCIx_NOT_SUPPORTED (-1)
/* SCSMR (Serial Mode Register) */
#define SCSMR_CHR (1 << 6) /* 7-bit Character Length */
#define SCSMR_PE (1 << 5) /* Parity Enable */
#define SCSMR_ODD (1 << 4) /* Odd Parity */
#define SCSMR_STOP (1 << 3) /* Stop Bit Length */
#define SCSMR_CKS 0x0003 /* Clock Select */
/* Serial Control Register (@ = not supported by all parts) */
#define SCSCR_TIE (1 << 7) /* Transmit Interrupt Enable */
#define SCSCR_RIE (1 << 6) /* Receive Interrupt Enable */
#define SCSCR_TE (1 << 5) /* Transmit Enable */
#define SCSCR_RE (1 << 4) /* Receive Enable */
#define SCSCR_REIE (1 << 3) /* Receive Error Interrupt Enable @ */
#define SCSCR_TOIE (1 << 2) /* Timeout Interrupt Enable @ */
#define SCSCR_CKE1 (1 << 1) /* Clock Enable 1 */
#define SCSCR_CKE0 (1 << 0) /* Clock Enable 0 */
/* SCIFA/SCIFB only */
#define SCSCR_TDRQE (1 << 15) /* Tx Data Transfer Request Enable */
#define SCSCR_RDRQE (1 << 14) /* Rx Data Transfer Request Enable */
#define SCSCR_TIE BIT(7) /* Transmit Interrupt Enable */
#define SCSCR_RIE BIT(6) /* Receive Interrupt Enable */
#define SCSCR_TE BIT(5) /* Transmit Enable */
#define SCSCR_RE BIT(4) /* Receive Enable */
#define SCSCR_REIE BIT(3) /* Receive Error Interrupt Enable @ */
#define SCSCR_TOIE BIT(2) /* Timeout Interrupt Enable @ */
#define SCSCR_CKE1 BIT(1) /* Clock Enable 1 */
#define SCSCR_CKE0 BIT(0) /* Clock Enable 0 */
/* SCxSR (Serial Status Register) on SCI */
#define SCI_TDRE 0x80 /* Transmit Data Register Empty */
#define SCI_RDRF 0x40 /* Receive Data Register Full */
#define SCI_ORER 0x20 /* Overrun Error */
#define SCI_FER 0x10 /* Framing Error */
#define SCI_PER 0x08 /* Parity Error */
#define SCI_TEND 0x04 /* Transmit End */
#define SCI_DEFAULT_ERROR_MASK (SCI_PER | SCI_FER)
/* SCxSR (Serial Status Register) on SCIF, HSCIF */
#define SCIF_ER 0x0080 /* Receive Error */
#define SCIF_TEND 0x0040 /* Transmission End */
#define SCIF_TDFE 0x0020 /* Transmit FIFO Data Empty */
#define SCIF_BRK 0x0010 /* Break Detect */
#define SCIF_FER 0x0008 /* Framing Error */
#define SCIF_PER 0x0004 /* Parity Error */
#define SCIF_RDF 0x0002 /* Receive FIFO Data Full */
#define SCIF_DR 0x0001 /* Receive Data Ready */
#define SCIF_DEFAULT_ERROR_MASK (SCIF_PER | SCIF_FER | SCIF_ER | SCIF_BRK)
/* SCFCR (FIFO Control Register) */
#define SCFCR_LOOP (1 << 0) /* Loopback Test */
/* SCSPTR (Serial Port Register), optional */
#define SCSPTR_RTSIO (1 << 7) /* Serial Port RTS Pin Input/Output */
#define SCSPTR_CTSIO (1 << 5) /* Serial Port CTS Pin Input/Output */
#define SCSPTR_SPB2IO (1 << 1) /* Serial Port Break Input/Output */
#define SCSPTR_SPB2DT (1 << 0) /* Serial Port Break Data */
/* HSSRR HSCIF */
#define HSCIF_SRE 0x8000 /* Sampling Rate Register Enable */
enum {
SCIx_PROBE_REGTYPE,
@ -82,28 +40,6 @@ enum {
SCIx_NR_REGTYPES,
};
/*
* SCI register subset common for all port types.
* Not all registers will exist on all parts.
*/
enum {
SCSMR, /* Serial Mode Register */
SCBRR, /* Bit Rate Register */
SCSCR, /* Serial Control Register */
SCxSR, /* Serial Status Register */
SCFCR, /* FIFO Control Register */
SCFDR, /* FIFO Data Count Register */
SCxTDR, /* Transmit (FIFO) Data Register */
SCxRDR, /* Receive (FIFO) Data Register */
SCLSR, /* Line Status Register */
SCTFDR, /* Transmit FIFO Data Count Register */
SCRFDR, /* Receive FIFO Data Count Register */
SCSPTR, /* Serial Port Register */
HSSRR, /* Sampling Rate Register */
SCIx_NR_REGS,
};
struct device;
struct plat_sci_port_ops {
@ -113,7 +49,7 @@ struct plat_sci_port_ops {
/*
* Port-specific capabilities
*/
#define SCIx_HAVE_RTSCTS (1 << 0)
#define SCIx_HAVE_RTSCTS BIT(0)
/*
* Platform device specific platform_data struct

View File

@ -422,7 +422,7 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
const char *routine);
extern char *tty_name(struct tty_struct *tty, char *buf);
extern const char *tty_name(const struct tty_struct *tty);
extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
extern int tty_check_change(struct tty_struct *tty);
extern void __stop_tty(struct tty_struct *tty);

View File

@ -138,6 +138,7 @@ header-y += genetlink.h
header-y += gen_stats.h
header-y += gfs2_ondisk.h
header-y += gigaset_dev.h
header-y += gsmmux.h
header-y += hdlcdrv.h
header-y += hdlc.h
header-y += hdreg.h

View File

@ -1,6 +1,9 @@
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
#include <linux/if.h>
#include <linux/ioctl.h>
struct gsm_config
{
unsigned int adaption;

Some files were not shown because too many files have changed in this diff Show More