2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-29 04:54:49 +08:00

The core framework has a handful of patches this time around, mostly due

to the clk rate protection support added by Jerome Brunet. This feature
 will allow consumers to lock in a certain rate on the output of a clk so
 that things like audio playback don't hear pops when the clk frequency
 changes due to shared parent clks changing rates. Currently the clk
 API doesn't guarantee the rate of a clk stays at the rate you request
 after clk_set_rate() is called, so this new API will allow drivers
 to express that requirement. Beyond this, the core got some debugfs
 pretty printing patches and a couple minor non-critical fixes.
 
 Looking outside of the core framework diff we have some new driver
 additions and the removal of a legacy TI clk driver. Both of these hit
 high in the dirstat. Also, the removal of the asm-generic/clkdev.h file
 causes small one-liners in all the architecture Kbuild files. Overall, the
 driver diff seems to be the normal stuff that comes all the time to
 fix little problems here and there and to support new hardware.
 
 Core:
  - Clk rate protection
  - Symbolic clk flags in debugfs output
  - Clk registration enabled clks while doing bookkeeping updates
 
 New Drivers:
  - Spreadtrum SC9860
  - HiSilicon hi3660 stub
  - Qualcomm A53 PLL, SPMI clkdiv, and MSM8916 APCS
  - Amlogic Meson-AXG
  - ASPEED BMC
 
 Removed Drivers:
  - TI OMAP 3xxx legacy clk (non-DT) support
  - asm*/clkdev.h got removed (not really a driver)
 
 Updates:
  - Renesas FDP1-0 module clock on R-Car M3-W
  - Renesas LVDS module clock on R-Car V3M
  - Misc fixes to pr_err() prints
  - Qualcomm MSM8916 audio fixes
  - Qualcomm IPQ8074 rounded out support for more peripherals
  - Qualcomm Alpha PLL variants
  - Divider code was using container_of() on bad pointers
  - Allwinner DE2 clks on H3
  - Amlogic minor data fixes and dropping of CLK_IGNORE_UNUSED
  - Mediatek clk driver compile test support
  - AT91 PMC clk suspend/resume restoration support
  - PLL issues fixed on si5351
  - Broadcom IProc PLL calculation updates
  - DVFS support for Armada mvebu CPU clks
  - Allwinner fixed post-divider support
  - TI clkctrl fixes and support for newer SoCs
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABCAAGBQJac5vRAAoJEK0CiJfG5JUlUaIP/Riq0tbApfc4k4GMvSvaieR/
 AwZFIMCxOxO+KGdUsBWj7UUoDfBYmxyknHZkVUA/m+Lm7cRH/YHHMghEceZLaBYW
 zPQmDfkTl/QkwysXZMCw9vg4vO0tt5gWbHljQnvVhxVVTCkIRpaE8Vkktj1RZzpY
 WU/TkvPbVGY3SNm504TRXKWC9KpMTEXVvzqlg6zLDJ/jE7PGzBKtewqMoLDCBH2L
 q6b50BSXDo2Hep0vm6e5xneXKjLNR4kgN4PkbM4Yoi4iWLLbgAu79NfyOvvr/imS
 HxOHRms9tejtyaiR6bQSF0pbLOERZ3QSbMFEbxdxnCTuPEfy3Nw/2W7mNJlhJa8g
 EGLMnLL4WdloL4Z83dAcMrj9OmxYf7Yobf5dMidLrQT5EYuafdj0ParbI8TQpWSB
 eTqaffSUGPE/7xuKouYBcbvocpXXWCcokrP/mEn3OEHXkIeeut1Jd3RmEvsi3gtJ
 pNraJTIpvt4c05rj6yLUOhWfyqlA+fH3p4Fx3rrH1tmKEiG+lrhKoxF26uALZe0V
 OvarhG+LPIE10pCIYlQjZjQVnYLGCxsGAIoK1uz7VYvFPh2T0cxQlzzeqFgrlTyN
 32hMj3LhkQw82FG9xZqjTX1935R35mySRlx63x7HStI1YFief2X9+RHjJR/lofG0
 nC0JWTp5sC/pKf54QBXj
 =bGPp
 -----END PGP SIGNATURE-----

Merge tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux

Pull clk updates from Stephen Boyd:
 "The core framework has a handful of patches this time around, mostly
  due to the clk rate protection support added by Jerome Brunet.

  This feature will allow consumers to lock in a certain rate on the
  output of a clk so that things like audio playback don't hear pops
  when the clk frequency changes due to shared parent clks changing
  rates. Currently the clk API doesn't guarantee the rate of a clk stays
  at the rate you request after clk_set_rate() is called, so this new
  API will allow drivers to express that requirement.

  Beyond this, the core got some debugfs pretty printing patches and a
  couple minor non-critical fixes.

  Looking outside of the core framework diff we have some new driver
  additions and the removal of a legacy TI clk driver. Both of these hit
  high in the dirstat. Also, the removal of the asm-generic/clkdev.h
  file causes small one-liners in all the architecture Kbuild files.

  Overall, the driver diff seems to be the normal stuff that comes all
  the time to fix little problems here and there and to support new
  hardware.

  Summary:

  Core:
   - Clk rate protection
   - Symbolic clk flags in debugfs output
   - Clk registration enabled clks while doing bookkeeping updates

  New Drivers:
   - Spreadtrum SC9860
   - HiSilicon hi3660 stub
   - Qualcomm A53 PLL, SPMI clkdiv, and MSM8916 APCS
   - Amlogic Meson-AXG
   - ASPEED BMC

  Removed Drivers:
   - TI OMAP 3xxx legacy clk (non-DT) support
   - asm*/clkdev.h got removed (not really a driver)

  Updates:
   - Renesas FDP1-0 module clock on R-Car M3-W
   - Renesas LVDS module clock on R-Car V3M
   - Misc fixes to pr_err() prints
   - Qualcomm MSM8916 audio fixes
   - Qualcomm IPQ8074 rounded out support for more peripherals
   - Qualcomm Alpha PLL variants
   - Divider code was using container_of() on bad pointers
   - Allwinner DE2 clks on H3
   - Amlogic minor data fixes and dropping of CLK_IGNORE_UNUSED
   - Mediatek clk driver compile test support
   - AT91 PMC clk suspend/resume restoration support
   - PLL issues fixed on si5351
   - Broadcom IProc PLL calculation updates
   - DVFS support for Armada mvebu CPU clks
   - Allwinner fixed post-divider support
   - TI clkctrl fixes and support for newer SoCs"

* tag 'clk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux: (125 commits)
  clk: aspeed: Handle inverse polarity of USB port 1 clock gate
  clk: aspeed: Fix return value check in aspeed_cc_init()
  clk: aspeed: Add reset controller
  clk: aspeed: Register gated clocks
  clk: aspeed: Add platform driver and register PLLs
  clk: aspeed: Register core clocks
  clk: Add clock driver for ASPEED BMC SoCs
  clk: mediatek: adjust dependency of reset.c to avoid unexpectedly being built
  clk: fix reentrancy of clk_enable() on UP systems
  clk: meson-axg: fix potential NULL dereference in axg_clkc_probe()
  clk: Simplify debugfs registration
  clk: Fix debugfs_create_*() usage
  clk: Show symbolic clock flags in debugfs
  clk: renesas: r8a7796: Add FDP clock
  clk: Move __clk_{get,put}() into private clk.h API
  clk: sunxi: Use CLK_IS_CRITICAL flag for critical clks
  clk: Improve flags doc for of_clk_detect_critical()
  arch: Remove clkdev.h asm-generic from Kbuild
  clk: sunxi-ng: a83t: Add M divider to TCON1 clock
  clk: Prepare to remove asm-generic/clkdev.h
  ...
This commit is contained in:
Linus Torvalds 2018-02-01 16:56:07 -08:00
commit 3879ae653a
154 changed files with 12667 additions and 5870 deletions

View File

@ -13,12 +13,18 @@ Required Properties:
- "hisilicon,hi3660-pmuctrl"
- "hisilicon,hi3660-sctrl"
- "hisilicon,hi3660-iomcu"
- "hisilicon,hi3660-stub-clk"
- reg: physical base address of the controller and length of memory mapped
region.
- #clock-cells: should be 1.
Optional Properties:
- mboxes: Phandle to the mailbox for sending message to MCU.
(See: ../mailbox/hisilicon,hi3660-mailbox.txt for more info)
Each clock is assigned an identifier and client nodes use this identifier
to specify the clock which they consume.

View File

@ -0,0 +1,22 @@
Qualcomm MSM8916 A53 PLL Binding
--------------------------------
The A53 PLL on MSM8916 platforms is the main CPU PLL used used for frequencies
above 1GHz.
Required properties :
- compatible : Shall contain only one of the following:
"qcom,msm8916-a53pll"
- reg : shall contain base register location and length
- #clock-cells : must be set to <0>
Example:
a53pll: clock@b016000 {
compatible = "qcom,msm8916-a53pll";
reg = <0xb016000 0x40>;
#clock-cells = <0>;
};

View File

@ -0,0 +1,59 @@
Qualcomm Technologies, Inc. SPMI PMIC clock divider (clkdiv)
clkdiv configures the clock frequency of a set of outputs on the PMIC.
These clocks are typically wired through alternate functions on
gpio pins.
=======================
Properties
=======================
- compatible
Usage: required
Value type: <string>
Definition: must be "qcom,spmi-clkdiv".
- reg
Usage: required
Value type: <prop-encoded-array>
Definition: base address of CLKDIV peripherals.
- qcom,num-clkdivs
Usage: required
Value type: <u32>
Definition: number of CLKDIV peripherals.
- clocks:
Usage: required
Value type: <prop-encoded-array>
Definition: reference to the xo clock.
- clock-names:
Usage: required
Value type: <stringlist>
Definition: must be "xo".
- #clock-cells:
Usage: required
Value type: <u32>
Definition: shall contain 1.
=======
Example
=======
pm8998_clk_divs: clock-controller@5b00 {
compatible = "qcom,spmi-clkdiv";
reg = <0x5b00>;
#clock-cells = <1>;
qcom,num-clkdivs = <3>;
clocks = <&xo_board>;
clock-names = "xo";
assigned-clocks = <&pm8998_clk_divs 1>,
<&pm8998_clk_divs 2>,
<&pm8998_clk_divs 3>;
assigned-clock-rates = <9600000>,
<9600000>,
<9600000>;
};

View File

@ -78,6 +78,7 @@ second cell is the clock index for the specified type.
2 hwaccel index (n in CLKCGnHWACSR)
3 fman 0 for fm1, 1 for fm2
4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4
4=pll/5, 5=pll/6, 6=pll/7, 7=pll/8
5 coreclk must be 0
3. Example

View File

@ -49,6 +49,7 @@ Optional child node properties:
- silabs,multisynth-source: source pll A(0) or B(1) of corresponding multisynth
divider.
- silabs,pll-master: boolean, multisynth can change pll frequency.
- silabs,pll-reset: boolean, clock output can reset its pll.
- silabs,disable-state : clock output disable state, shall be
0 = clock output is driven LOW when disabled
1 = clock output is driven HIGH when disabled

View File

@ -0,0 +1,63 @@
Spreadtrum Clock Binding
------------------------
Required properties:
- compatible: should contain the following compatible strings:
- "sprd,sc9860-pmu-gate"
- "sprd,sc9860-pll"
- "sprd,sc9860-ap-clk"
- "sprd,sc9860-aon-prediv"
- "sprd,sc9860-apahb-gate"
- "sprd,sc9860-aon-gate"
- "sprd,sc9860-aonsecure-clk"
- "sprd,sc9860-agcp-gate"
- "sprd,sc9860-gpu-clk"
- "sprd,sc9860-vsp-clk"
- "sprd,sc9860-vsp-gate"
- "sprd,sc9860-cam-clk"
- "sprd,sc9860-cam-gate"
- "sprd,sc9860-disp-clk"
- "sprd,sc9860-disp-gate"
- "sprd,sc9860-apapb-gate"
- #clock-cells: must be 1
- clocks : Should be the input parent clock(s) phandle for the clock, this
property here just simply shows which clock group the clocks'
parents are in, since each clk node would represent many clocks
which are defined in the driver. The detailed dependency
relationship (i.e. how many parents and which are the parents)
are implemented in driver code.
Optional properties:
- reg: Contain the registers base address and length. It must be configured
only if no 'sprd,syscon' under the node.
- sprd,syscon: phandle to the syscon which is in the same address area with
the clock, and so we can get regmap for the clocks from the
syscon device.
Example:
pmu_gate: pmu-gate {
compatible = "sprd,sc9860-pmu-gate";
sprd,syscon = <&pmu_regs>;
clocks = <&ext_26m>;
#clock-cells = <1>;
};
pll: pll {
compatible = "sprd,sc9860-pll";
sprd,syscon = <&ana_regs>;
clocks = <&pmu_gate 0>;
#clock-cells = <1>;
};
ap_clk: clock-controller@20000000 {
compatible = "sprd,sc9860-ap-clk";
reg = <0 0x20000000 0 0x400>;
clocks = <&ext_26m>, <&pll 0>,
<&pmu_gate 0>;
#clock-cells = <1>;
};

View File

@ -4,13 +4,14 @@ Allwinner Display Engine 2.0 Clock Control Binding
Required properties :
- compatible: must contain one of the following compatibles:
- "allwinner,sun8i-a83t-de2-clk"
- "allwinner,sun8i-h3-de2-clk"
- "allwinner,sun8i-v3s-de2-clk"
- "allwinner,sun50i-h5-de2-clk"
- reg: Must contain the registers base address and length
- clocks: phandle to the clocks feeding the display engine subsystem.
Three are needed:
- "mod": the display engine module clock
- "mod": the display engine module clock (on A83T it's the DE PLL)
- "bus": the bus clock for the whole display engine subsystem
- clock-names: Must contain the clock names described just above
- resets: phandle to the reset control for the display engine subsystem.
@ -19,7 +20,7 @@ Required properties :
Example:
de2_clocks: clock@1000000 {
compatible = "allwinner,sun8i-a83t-de2-clk";
compatible = "allwinner,sun8i-h3-de2-clk";
reg = <0x01000000 0x100000>;
clocks = <&ccu CLK_BUS_DE>,
<&ccu CLK_DE>;

View File

@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += clkdev.h
generic-y += exec.h
generic-y += export.h
generic-y += fb.h

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += bugs.h
generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h

View File

@ -1,4 +1,3 @@
generic-y += clkdev.h
generic-y += current.h
generic-y += early_ioremap.h
generic-y += emergency-restart.h

View File

@ -105,6 +105,7 @@ config ARCH_MESON
select PINCTRL_MESON
select COMMON_CLK_AMLOGIC
select COMMON_CLK_GXBB
select COMMON_CLK_AXG
select MESON_IRQ_GPIO
help
This enables support for the Amlogic S905 SoCs.

View File

@ -1,5 +1,4 @@
generic-y += bugs.h
generic-y += clkdev.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h

View File

@ -1,17 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CLKDEV__H_
#define __ASM_CLKDEV__H_
#include <linux/slab.h>
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
#ifndef CONFIG_COMMON_CLK
#define __clk_put(clk)
#define __clk_get(clk) ({ 1; })
#endif
#endif

View File

@ -1,7 +1,6 @@
generic-y += atomic.h
generic-y += barrier.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h

View File

@ -1,6 +1,5 @@
generic-y += atomic.h
generic-y += barrier.h
generic-y += clkdev.h
generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h

View File

@ -1,5 +1,4 @@
generic-y += clkdev.h
generic-y += device.h
generic-y += exec.h
generic-y += extable.h

View File

@ -3,7 +3,6 @@ generic-y += barrier.h
generic-y += bugs.h
generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += current.h
generic-y += delay.h
generic-y += device.h

View File

@ -2,7 +2,6 @@
generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h

View File

@ -1,4 +1,3 @@
generic-y += clkdev.h
generic-y += exec.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h

View File

@ -1,4 +1,3 @@
generic-y += clkdev.h
generic-y += current.h
generic-y += dma-mapping.h
generic-y += exec.h

View File

@ -1,5 +1,4 @@
generic-y += barrier.h
generic-y += clkdev.h
generic-y += device.h
generic-y += emergency-restart.h
generic-y += exec.h

View File

@ -1,5 +1,4 @@
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += dma.h

View File

@ -2,7 +2,6 @@ generic-y += barrier.h
generic-y += bitops.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h

View File

@ -1,6 +1,5 @@
# MIPS headers
generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += clkdev.h
generic-y += current.h
generic-y += dma-contiguous.h
generic-y += emergency-restart.h

View File

@ -1,6 +1,5 @@
generic-y += barrier.h
generic-y += clkdev.h
generic-y += device.h
generic-y += exec.h
generic-y += extable.h

View File

@ -3,7 +3,6 @@ generic-y += barrier.h
generic-y += bitops.h
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += cmpxchg.h
generic-y += current.h
generic-y += device.h

View File

@ -2,7 +2,6 @@ generic-y += barrier.h
generic-y += bug.h
generic-y += bugs.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h

View File

@ -1,5 +1,4 @@
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h

View File

@ -1,4 +1,3 @@
generic-y += clkdev.h
generic-y += div64.h
generic-y += export.h
generic-y += irq_regs.h

View File

@ -1,7 +1,6 @@
generic-y += bugs.h
generic-y += cacheflush.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
generic-y += device.h
generic-y += div64.h

View File

@ -6,7 +6,6 @@ generated-y += unistd_nr.h
generic-y += asm-offsets.h
generic-y += cacheflush.h
generic-y += clkdev.h
generic-y += device.h
generic-y += dma-contiguous.h
generic-y += dma-mapping.h

View File

@ -1,5 +1,4 @@
generic-y += barrier.h
generic-y += clkdev.h
generic-y += current.h
generic-y += extable.h
generic-y += irq_work.h

View File

@ -1,4 +1,3 @@
generic-y += clkdev.h
generic-y += current.h
generic-y += delay.h
generic-y += div64.h

View File

@ -1,7 +1,6 @@
# User exported sparc header files
generic-y += clkdev.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h

View File

@ -1,6 +1,5 @@
generic-y += bug.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += emergency-restart.h
generic-y += exec.h
generic-y += extable.h

View File

@ -1,7 +1,6 @@
generic-y += barrier.h
generic-y += bpf_perf_event.h
generic-y += bug.h
generic-y += clkdev.h
generic-y += current.h
generic-y += delay.h
generic-y += device.h

View File

@ -1,6 +1,5 @@
generic-y += atomic.h
generic-y += bugs.h
generic-y += clkdev.h
generic-y += current.h
generic-y += device.h
generic-y += div64.h

View File

@ -6,7 +6,6 @@ generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
generic-y += clkdev.h
generic-y += dma-contiguous.h
generic-y += early_ioremap.h
generic-y += mcs_spinlock.h

View File

@ -1,5 +1,4 @@
generic-y += bug.h
generic-y += clkdev.h
generic-y += device.h
generic-y += div64.h
generic-y += dma-contiguous.h

View File

@ -142,6 +142,18 @@ config COMMON_CLK_GEMINI
This driver supports the SoC clocks on the Cortina Systems Gemini
platform, also known as SL3516 or CS3516.
config COMMON_CLK_ASPEED
bool "Clock driver for Aspeed BMC SoCs"
depends on ARCH_ASPEED || COMPILE_TEST
default ARCH_ASPEED
select MFD_SYSCON
select RESET_CONTROLLER
---help---
This driver supports the SoC clocks on the Aspeed BMC platforms.
The G4 and G5 series, including the ast2400 and ast2500, are supported
by this driver.
config COMMON_CLK_S2MPS11
tristate "Clock driver for S2MPS1X/S5M8767 MFD"
depends on MFD_SEC_CORE || COMPILE_TEST
@ -236,6 +248,7 @@ source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/qcom/Kconfig"
source "drivers/clk/renesas/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/sprd/Kconfig"
source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"

View File

@ -27,6 +27,7 @@ obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
@ -67,7 +68,7 @@ obj-$(CONFIG_ARCH_MXC) += imx/
obj-$(CONFIG_MACH_INGENIC) += ingenic/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
obj-$(CONFIG_MACH_LOONGSON32) += loongson1/
obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
obj-y += mediatek/
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += meson/
obj-$(CONFIG_MACH_PIC32) += microchip/
ifeq ($(CONFIG_COMMON_CLK), y)
@ -85,6 +86,7 @@ obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/
obj-$(CONFIG_ARCH_SIRF) += sirf/
obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/

View File

@ -204,6 +204,8 @@ at91_clk_register_programmable(struct regmap *regmap,
if (ret) {
kfree(prog);
hw = ERR_PTR(ret);
} else {
pmc_register_pck(id);
}
return hw;

View File

@ -22,6 +22,7 @@
#include "pmc.h"
#define PMC_MAX_IDS 128
#define PMC_MAX_PCKS 8
int of_at91_get_clk_range(struct device_node *np, const char *propname,
struct clk_range *range)
@ -50,6 +51,7 @@ EXPORT_SYMBOL_GPL(of_at91_get_clk_range);
static struct regmap *pmcreg;
static u8 registered_ids[PMC_MAX_IDS];
static u8 registered_pcks[PMC_MAX_PCKS];
static struct
{
@ -66,8 +68,13 @@ static struct
u32 pcr[PMC_MAX_IDS];
u32 audio_pll0;
u32 audio_pll1;
u32 pckr[PMC_MAX_PCKS];
} pmc_cache;
/*
* As Peripheral ID 0 is invalid on AT91 chips, the identifier is stored
* without alteration in the table, and 0 is for unused clocks.
*/
void pmc_register_id(u8 id)
{
int i;
@ -82,11 +89,30 @@ void pmc_register_id(u8 id)
}
}
static int pmc_suspend(void)
/*
* As Programmable Clock 0 is valid on AT91 chips, there is an offset
* of 1 between the stored value and the real clock ID.
*/
void pmc_register_pck(u8 pck)
{
int i;
regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.scsr);
for (i = 0; i < PMC_MAX_PCKS; i++) {
if (registered_pcks[i] == 0) {
registered_pcks[i] = pck + 1;
break;
}
if (registered_pcks[i] == (pck + 1))
break;
}
}
static int pmc_suspend(void)
{
int i;
u8 num;
regmap_read(pmcreg, AT91_PMC_SCSR, &pmc_cache.scsr);
regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0);
regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr);
regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor);
@ -103,14 +129,29 @@ static int pmc_suspend(void)
regmap_read(pmcreg, AT91_PMC_PCR,
&pmc_cache.pcr[registered_ids[i]]);
}
for (i = 0; registered_pcks[i]; i++) {
num = registered_pcks[i] - 1;
regmap_read(pmcreg, AT91_PMC_PCKR(num), &pmc_cache.pckr[num]);
}
return 0;
}
static bool pmc_ready(unsigned int mask)
{
unsigned int status;
regmap_read(pmcreg, AT91_PMC_SR, &status);
return ((status & mask) == mask) ? 1 : 0;
}
static void pmc_resume(void)
{
int i, ret = 0;
int i;
u8 num;
u32 tmp;
u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA;
regmap_read(pmcreg, AT91_PMC_MCKR, &tmp);
if (pmc_cache.mckr != tmp)
@ -119,7 +160,7 @@ static void pmc_resume(void)
if (pmc_cache.pllar != tmp)
pr_warn("PLLAR was not configured properly by the firmware\n");
regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.scsr);
regmap_write(pmcreg, AT91_PMC_SCER, pmc_cache.scsr);
regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0);
regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr);
regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor);
@ -133,14 +174,16 @@ static void pmc_resume(void)
pmc_cache.pcr[registered_ids[i]] |
AT91_PMC_PCR_CMD);
}
if (pmc_cache.uckr & AT91_PMC_UPLLEN) {
ret = regmap_read_poll_timeout(pmcreg, AT91_PMC_SR, tmp,
!(tmp & AT91_PMC_LOCKU),
10, 5000);
if (ret)
pr_crit("USB PLL didn't lock when resuming\n");
for (i = 0; registered_pcks[i]; i++) {
num = registered_pcks[i] - 1;
regmap_write(pmcreg, AT91_PMC_PCKR(num), pmc_cache.pckr[num]);
}
if (pmc_cache.uckr & AT91_PMC_UPLLEN)
mask |= AT91_PMC_LOCKU;
while (!pmc_ready(mask))
cpu_relax();
}
static struct syscore_ops pmc_syscore_ops = {

View File

@ -31,8 +31,10 @@ int of_at91_get_clk_range(struct device_node *np, const char *propname,
#ifdef CONFIG_PM
void pmc_register_id(u8 id);
void pmc_register_pck(u8 pck);
#else
static inline void pmc_register_id(u8 id) {}
static inline void pmc_register_pck(u8 pck) {}
#endif
#endif /* __PMC_H_ */

View File

@ -269,23 +269,10 @@ static void __init cygnus_asiu_init(struct device_node *node)
}
CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init);
/*
* AUDIO PLL VCO frequency parameter table
*
* PLL output frequency = ((ndiv_int + ndiv_frac / 2^20) *
* (parent clock rate / pdiv)
*
* On Cygnus, parent is the 25MHz oscillator
*/
static const struct iproc_pll_vco_param audiopll_vco_params[] = {
/* rate (Hz) ndiv_int ndiv_frac pdiv */
{ 1354750204UL, 54, 199238, 1 },
{ 1769470191UL, 70, 816639, 1 },
};
static const struct iproc_pll_ctrl audiopll = {
.flags = IPROC_CLK_PLL_NEEDS_SW_CFG | IPROC_CLK_PLL_HAS_NDIV_FRAC |
IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW,
IPROC_CLK_PLL_USER_MODE_ON | IPROC_CLK_PLL_RESET_ACTIVE_LOW |
IPROC_CLK_PLL_CALC_PARAM,
.reset = RESET_VAL(0x5c, 0, 1),
.dig_filter = DF_VAL(0x48, 0, 3, 6, 4, 3, 3),
.sw_ctrl = SW_CTRL_VAL(0x4, 0),
@ -300,8 +287,7 @@ static const struct iproc_pll_ctrl audiopll = {
static const struct iproc_clk_ctrl audiopll_clk[] = {
[BCM_CYGNUS_AUDIOPLL_CH0] = {
.channel = BCM_CYGNUS_AUDIOPLL_CH0,
.flags = IPROC_CLK_AON |
IPROC_CLK_MCLK_DIV_BY_2,
.flags = IPROC_CLK_AON | IPROC_CLK_MCLK_DIV_BY_2,
.enable = ENABLE_VAL(0x14, 8, 10, 9),
.mdiv = REG_VAL(0x14, 0, 8),
},
@ -321,9 +307,8 @@ static const struct iproc_clk_ctrl audiopll_clk[] = {
static void __init cygnus_audiopll_clk_init(struct device_node *node)
{
iproc_pll_clk_setup(node, &audiopll, audiopll_vco_params,
ARRAY_SIZE(audiopll_vco_params), audiopll_clk,
ARRAY_SIZE(audiopll_clk));
iproc_pll_clk_setup(node, &audiopll, NULL, 0,
audiopll_clk, ARRAY_SIZE(audiopll_clk));
}
CLK_OF_DECLARE(cygnus_audiopll, "brcm,cygnus-audiopll",
cygnus_audiopll_clk_init);

View File

@ -69,16 +69,6 @@ enum vco_freq_range {
VCO_MAX = 4000000000U,
};
struct iproc_pll;
struct iproc_clk {
struct clk_hw hw;
const char *name;
struct iproc_pll *pll;
unsigned long rate;
const struct iproc_clk_ctrl *ctrl;
};
struct iproc_pll {
void __iomem *status_base;
void __iomem *control_base;
@ -88,13 +78,49 @@ struct iproc_pll {
const struct iproc_pll_ctrl *ctrl;
const struct iproc_pll_vco_param *vco_param;
unsigned int num_vco_entries;
};
struct clk_hw_onecell_data *clk_data;
struct iproc_clk *clks;
struct iproc_clk {
struct clk_hw hw;
struct iproc_pll *pll;
const struct iproc_clk_ctrl *ctrl;
};
#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw)
static int pll_calc_param(unsigned long target_rate,
unsigned long parent_rate,
struct iproc_pll_vco_param *vco_out)
{
u64 ndiv_int, ndiv_frac, residual;
ndiv_int = target_rate / parent_rate;
if (!ndiv_int || (ndiv_int > 255))
return -EINVAL;
residual = target_rate - (ndiv_int * parent_rate);
residual <<= 20;
/*
* Add half of the divisor so the result will be rounded to closest
* instead of rounded down.
*/
residual += (parent_rate / 2);
ndiv_frac = div64_u64((u64)residual, (u64)parent_rate);
vco_out->ndiv_int = ndiv_int;
vco_out->ndiv_frac = ndiv_frac;
vco_out->pdiv = 1;
vco_out->rate = vco_out->ndiv_int * parent_rate;
residual = (u64)vco_out->ndiv_frac * (u64)parent_rate;
residual >>= 20;
vco_out->rate += residual;
return 0;
}
/*
* Based on the target frequency, find a match from the VCO frequency parameter
* table and return its index
@ -252,17 +278,51 @@ static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp,
iproc_pll_write(pll, pll->control_base, reset->offset, val);
}
static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
/*
* Determines if the change to be applied to the PLL is minor (just an update
* or the fractional divider). If so, then we can avoid going through a
* disruptive reset and lock sequence.
*/
static bool pll_fractional_change_only(struct iproc_pll *pll,
struct iproc_pll_vco_param *vco)
{
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
u32 val;
u32 ndiv_int;
unsigned int pdiv;
/* PLL needs to be locked */
val = readl(pll->status_base + ctrl->status.offset);
if ((val & (1 << ctrl->status.shift)) == 0)
return false;
val = readl(pll->control_base + ctrl->ndiv_int.offset);
ndiv_int = (val >> ctrl->ndiv_int.shift) &
bit_mask(ctrl->ndiv_int.width);
if (ndiv_int != vco->ndiv_int)
return false;
val = readl(pll->control_base + ctrl->pdiv.offset);
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
if (pdiv != vco->pdiv)
return false;
return true;
}
static int pll_set_rate(struct iproc_clk *clk, struct iproc_pll_vco_param *vco,
unsigned long parent_rate)
{
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index];
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
int ka = 0, ki, kp, ret;
unsigned long rate = vco->rate;
u32 val;
enum kp_band kp_index;
unsigned long ref_freq;
const char *clk_name = clk_hw_get_name(&clk->hw);
/*
* reference frequency = parent frequency / PDIV
@ -285,22 +345,35 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
kp_index = KP_BAND_HIGH_HIGH;
} else {
pr_err("%s: pll: %s has invalid rate: %lu\n", __func__,
clk->name, rate);
clk_name, rate);
return -EINVAL;
}
kp = get_kp(ref_freq, kp_index);
if (kp < 0) {
pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name);
pr_err("%s: pll: %s has invalid kp\n", __func__, clk_name);
return kp;
}
ret = __pll_enable(pll);
if (ret) {
pr_err("%s: pll: %s fails to enable\n", __func__, clk->name);
pr_err("%s: pll: %s fails to enable\n", __func__, clk_name);
return ret;
}
if (pll_fractional_change_only(clk->pll, vco)) {
/* program fractional part of NDIV */
if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) {
val = readl(pll->control_base + ctrl->ndiv_frac.offset);
val &= ~(bit_mask(ctrl->ndiv_frac.width) <<
ctrl->ndiv_frac.shift);
val |= vco->ndiv_frac << ctrl->ndiv_frac.shift;
iproc_pll_write(pll, pll->control_base,
ctrl->ndiv_frac.offset, val);
return 0;
}
}
/* put PLL in reset */
__pll_put_in_reset(pll);
@ -354,7 +427,7 @@ static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index,
ret = pll_wait_for_lock(pll);
if (ret < 0) {
pr_err("%s: pll: %s failed to lock\n", __func__, clk->name);
pr_err("%s: pll: %s failed to lock\n", __func__, clk_name);
return ret;
}
@ -390,16 +463,15 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
u32 val;
u64 ndiv, ndiv_int, ndiv_frac;
unsigned int pdiv;
unsigned long rate;
if (parent_rate == 0)
return 0;
/* PLL needs to be locked */
val = readl(pll->status_base + ctrl->status.offset);
if ((val & (1 << ctrl->status.shift)) == 0) {
clk->rate = 0;
if ((val & (1 << ctrl->status.shift)) == 0)
return 0;
}
/*
* PLL output frequency =
@ -421,35 +493,60 @@ static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw,
val = readl(pll->control_base + ctrl->pdiv.offset);
pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width);
clk->rate = (ndiv * parent_rate) >> 20;
rate = (ndiv * parent_rate) >> 20;
if (pdiv == 0)
clk->rate *= 2;
rate *= 2;
else
clk->rate /= pdiv;
rate /= pdiv;
return clk->rate;
return rate;
}
static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
static int iproc_pll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned i;
unsigned int i;
struct iproc_clk *clk = to_iproc_clk(hw);
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
unsigned long diff, best_diff;
unsigned int best_idx = 0;
int ret;
if (rate == 0 || *parent_rate == 0 || !pll->vco_param)
if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
struct iproc_pll_vco_param vco_param;
ret = pll_calc_param(req->rate, req->best_parent_rate,
&vco_param);
if (ret)
return ret;
req->rate = vco_param.rate;
return 0;
}
if (!pll->vco_param)
return -EINVAL;
best_diff = ULONG_MAX;
for (i = 0; i < pll->num_vco_entries; i++) {
if (rate <= pll->vco_param[i].rate)
diff = abs(req->rate - pll->vco_param[i].rate);
if (diff <= best_diff) {
best_diff = diff;
best_idx = i;
}
/* break now if perfect match */
if (diff == 0)
break;
}
if (i == pll->num_vco_entries)
i--;
req->rate = pll->vco_param[best_idx].rate;
return pll->vco_param[i].rate;
return 0;
}
static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@ -457,13 +554,23 @@ static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct iproc_clk *clk = to_iproc_clk(hw);
struct iproc_pll *pll = clk->pll;
const struct iproc_pll_ctrl *ctrl = pll->ctrl;
struct iproc_pll_vco_param vco_param;
int rate_index, ret;
rate_index = pll_get_rate_index(pll, rate);
if (rate_index < 0)
return rate_index;
if (ctrl->flags & IPROC_CLK_PLL_CALC_PARAM) {
ret = pll_calc_param(rate, parent_rate, &vco_param);
if (ret)
return ret;
} else {
rate_index = pll_get_rate_index(pll, rate);
if (rate_index < 0)
return rate_index;
ret = pll_set_rate(clk, rate_index, parent_rate);
vco_param = pll->vco_param[rate_index];
}
ret = pll_set_rate(clk, &vco_param, parent_rate);
return ret;
}
@ -471,7 +578,7 @@ static const struct clk_ops iproc_pll_ops = {
.enable = iproc_pll_enable,
.disable = iproc_pll_disable,
.recalc_rate = iproc_pll_recalc_rate,
.round_rate = iproc_pll_round_rate,
.determine_rate = iproc_pll_determine_rate,
.set_rate = iproc_pll_set_rate,
};
@ -518,6 +625,7 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
struct iproc_pll *pll = clk->pll;
u32 val;
unsigned int mdiv;
unsigned long rate;
if (parent_rate == 0)
return 0;
@ -528,32 +636,33 @@ static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw,
mdiv = 256;
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
clk->rate = parent_rate / (mdiv * 2);
rate = parent_rate / (mdiv * 2);
else
clk->rate = parent_rate / mdiv;
rate = parent_rate / mdiv;
return clk->rate;
return rate;
}
static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
static int iproc_clk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
unsigned int div;
unsigned int bestdiv;
if (rate == 0 || *parent_rate == 0)
if (req->rate == 0)
return -EINVAL;
if (req->rate == req->best_parent_rate)
return 0;
if (rate == *parent_rate)
return *parent_rate;
bestdiv = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
if (bestdiv < 2)
req->rate = req->best_parent_rate;
div = DIV_ROUND_UP(*parent_rate, rate);
if (div < 2)
return *parent_rate;
if (bestdiv > 256)
bestdiv = 256;
if (div > 256)
div = 256;
req->rate = req->best_parent_rate / bestdiv;
return *parent_rate / div;
return 0;
}
static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@ -568,10 +677,10 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate == 0 || parent_rate == 0)
return -EINVAL;
div = DIV_ROUND_CLOSEST(parent_rate, rate);
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
div = DIV_ROUND_UP(parent_rate, rate * 2);
else
div = DIV_ROUND_UP(parent_rate, rate);
div /= 2;
if (div > 256)
return -EINVAL;
@ -583,10 +692,6 @@ static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val |= div << ctrl->mdiv.shift;
}
iproc_pll_write(pll, pll->control_base, ctrl->mdiv.offset, val);
if (ctrl->flags & IPROC_CLK_MCLK_DIV_BY_2)
clk->rate = parent_rate / (div * 2);
else
clk->rate = parent_rate / div;
return 0;
}
@ -595,7 +700,7 @@ static const struct clk_ops iproc_clk_ops = {
.enable = iproc_clk_enable,
.disable = iproc_clk_disable,
.recalc_rate = iproc_clk_recalc_rate,
.round_rate = iproc_clk_round_rate,
.determine_rate = iproc_clk_determine_rate,
.set_rate = iproc_clk_set_rate,
};
@ -629,6 +734,8 @@ void iproc_pll_clk_setup(struct device_node *node,
struct iproc_clk *iclk;
struct clk_init_data init;
const char *parent_name;
struct iproc_clk *iclk_array;
struct clk_hw_onecell_data *clk_data;
if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl))
return;
@ -637,14 +744,14 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(!pll))
return;
pll->clk_data = kzalloc(sizeof(*pll->clk_data->hws) * num_clks +
sizeof(*pll->clk_data), GFP_KERNEL);
if (WARN_ON(!pll->clk_data))
clk_data = kzalloc(sizeof(*clk_data->hws) * num_clks +
sizeof(*clk_data), GFP_KERNEL);
if (WARN_ON(!clk_data))
goto err_clk_data;
pll->clk_data->num = num_clks;
clk_data->num = num_clks;
pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL);
if (WARN_ON(!pll->clks))
iclk_array = kcalloc(num_clks, sizeof(struct iproc_clk), GFP_KERNEL);
if (WARN_ON(!iclk_array))
goto err_clks;
pll->control_base = of_iomap(node, 0);
@ -674,9 +781,8 @@ void iproc_pll_clk_setup(struct device_node *node,
/* initialize and register the PLL itself */
pll->ctrl = pll_ctrl;
iclk = &pll->clks[0];
iclk = &iclk_array[0];
iclk->pll = pll;
iclk->name = node->name;
init.name = node->name;
init.ops = &iproc_pll_ops;
@ -697,7 +803,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_pll_register;
pll->clk_data->hws[0] = &iclk->hw;
clk_data->hws[0] = &iclk->hw;
/* now initialize and register all leaf clocks */
for (i = 1; i < num_clks; i++) {
@ -711,8 +817,7 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_clk_register;
iclk = &pll->clks[i];
iclk->name = clk_name;
iclk = &iclk_array[i];
iclk->pll = pll;
iclk->ctrl = &clk_ctrl[i];
@ -727,11 +832,10 @@ void iproc_pll_clk_setup(struct device_node *node,
if (WARN_ON(ret))
goto err_clk_register;
pll->clk_data->hws[i] = &iclk->hw;
clk_data->hws[i] = &iclk->hw;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get,
pll->clk_data);
ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
if (WARN_ON(ret))
goto err_clk_register;
@ -739,7 +843,7 @@ void iproc_pll_clk_setup(struct device_node *node,
err_clk_register:
while (--i >= 0)
clk_hw_unregister(pll->clk_data->hws[i]);
clk_hw_unregister(clk_data->hws[i]);
err_pll_register:
if (pll->status_base != pll->control_base)
@ -756,10 +860,10 @@ err_asiu_iomap:
iounmap(pll->control_base);
err_pll_iomap:
kfree(pll->clks);
kfree(iclk_array);
err_clks:
kfree(pll->clk_data);
kfree(clk_data);
err_clk_data:
kfree(pll);

View File

@ -80,6 +80,11 @@
*/
#define IPROC_CLK_PLL_RESET_ACTIVE_LOW BIT(9)
/*
* Calculate the PLL parameters are runtime, instead of using table
*/
#define IPROC_CLK_PLL_CALC_PARAM BIT(10)
/*
* Parameters for VCO frequency configuration
*

667
drivers/clk/clk-aspeed.c Normal file
View File

@ -0,0 +1,667 @@
// SPDX-License-Identifier: GPL-2.0+
#define pr_fmt(fmt) "clk-aspeed: " fmt
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/clock/aspeed-clock.h>
#define ASPEED_NUM_CLKS 35
#define ASPEED_RESET_CTRL 0x04
#define ASPEED_CLK_SELECTION 0x08
#define ASPEED_CLK_STOP_CTRL 0x0c
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
#define AST2400_HPLL_STRAPPED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
#define ASPEED_STRAP 0x70
#define CLKIN_25MHZ_EN BIT(23)
#define AST2400_CLK_SOURCE_SEL BIT(18)
#define ASPEED_CLK_SELECTION_2 0xd8
/* Globally visible clocks */
static DEFINE_SPINLOCK(aspeed_clk_lock);
/* Keeps track of all clocks */
static struct clk_hw_onecell_data *aspeed_clk_data;
static void __iomem *scu_base;
/**
* struct aspeed_gate_data - Aspeed gated clocks
* @clock_idx: bit used to gate this clock in the clock register
* @reset_idx: bit used to reset this IP in the reset register. -1 if no
* reset is required when enabling the clock
* @name: the clock name
* @parent_name: the name of the parent clock
* @flags: standard clock framework flags
*/
struct aspeed_gate_data {
u8 clock_idx;
s8 reset_idx;
const char *name;
const char *parent_name;
unsigned long flags;
};
/**
* struct aspeed_clk_gate - Aspeed specific clk_gate structure
* @hw: handle between common and hardware-specific interfaces
* @reg: register controlling gate
* @clock_idx: bit used to gate this clock in the clock register
* @reset_idx: bit used to reset this IP in the reset register. -1 if no
* reset is required when enabling the clock
* @flags: hardware-specific flags
* @lock: register lock
*
* Some of the clocks in the Aspeed SoC must be put in reset before enabling.
* This modified version of clk_gate allows an optional reset bit to be
* specified.
*/
struct aspeed_clk_gate {
struct clk_hw hw;
struct regmap *map;
u8 clock_idx;
s8 reset_idx;
u8 flags;
spinlock_t *lock;
};
#define to_aspeed_clk_gate(_hw) container_of(_hw, struct aspeed_clk_gate, hw)
/* TODO: ask Aspeed about the actual parent data */
static const struct aspeed_gate_data aspeed_gates[] = {
/* clk rst name parent flags */
[ASPEED_CLK_GATE_ECLK] = { 0, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
[ASPEED_CLK_GATE_BCLK] = { 4, 10, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
[ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
[ASPEED_CLK_GATE_USBUHCICLK] = { 9, 15, "usb-uhci-gate", NULL, 0 }, /* USB1.1 (requires port 2 enabled) */
[ASPEED_CLK_GATE_D1CLK] = { 10, 13, "d1clk-gate", NULL, 0 }, /* GFX CRT */
[ASPEED_CLK_GATE_YCLK] = { 13, 4, "yclk-gate", NULL, 0 }, /* HAC */
[ASPEED_CLK_GATE_USBPORT1CLK] = { 14, 14, "usb-port1-gate", NULL, 0 }, /* USB2 hub/USB2 host port 1/USB1.1 dev */
[ASPEED_CLK_GATE_UART1CLK] = { 15, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
[ASPEED_CLK_GATE_UART2CLK] = { 16, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
[ASPEED_CLK_GATE_UART5CLK] = { 17, -1, "uart5clk-gate", "uart", 0 }, /* UART5 */
[ASPEED_CLK_GATE_ESPICLK] = { 19, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
[ASPEED_CLK_GATE_MAC1CLK] = { 20, 11, "mac1clk-gate", "mac", 0 }, /* MAC1 */
[ASPEED_CLK_GATE_MAC2CLK] = { 21, 12, "mac2clk-gate", "mac", 0 }, /* MAC2 */
[ASPEED_CLK_GATE_RSACLK] = { 24, -1, "rsaclk-gate", NULL, 0 }, /* RSA */
[ASPEED_CLK_GATE_UART3CLK] = { 25, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
[ASPEED_CLK_GATE_UART4CLK] = { 26, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
[ASPEED_CLK_GATE_SDCLKCLK] = { 27, 16, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
[ASPEED_CLK_GATE_LHCCLK] = { 28, -1, "lhclk-gate", "lhclk", 0 }, /* LPC master/LPC+ */
};
static const struct clk_div_table ast2500_mac_div_table[] = {
{ 0x0, 4 }, /* Yep, really. Aspeed confirmed this is correct */
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2400_div_table[] = {
{ 0x0, 2 },
{ 0x1, 4 },
{ 0x2, 6 },
{ 0x3, 8 },
{ 0x4, 10 },
{ 0x5, 12 },
{ 0x6, 14 },
{ 0x7, 16 },
{ 0 }
};
static const struct clk_div_table ast2500_div_table[] = {
{ 0x0, 4 },
{ 0x1, 8 },
{ 0x2, 12 },
{ 0x3, 16 },
{ 0x4, 20 },
{ 0x5, 24 },
{ 0x6, 28 },
{ 0x7, 32 },
{ 0 }
};
static struct clk_hw *aspeed_ast2400_calc_pll(const char *name, u32 val)
{
unsigned int mult, div;
if (val & AST2400_HPLL_BYPASS_EN) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = 24Mhz * (2-OD) * [(N + 2) / (D + 1)] */
u32 n = (val >> 5) & 0x3f;
u32 od = (val >> 4) & 0x1;
u32 d = val & 0xf;
mult = (2 - od) * (n + 2);
div = d + 1;
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
};
static struct clk_hw *aspeed_ast2500_calc_pll(const char *name, u32 val)
{
unsigned int mult, div;
if (val & AST2500_HPLL_BYPASS_EN) {
/* Pass through mode */
mult = div = 1;
} else {
/* F = clkin * [(M+1) / (N+1)] / (P + 1) */
u32 p = (val >> 13) & 0x3f;
u32 m = (val >> 5) & 0xff;
u32 n = val & 0x1f;
mult = (m + 1) / (n + 1);
div = p + 1;
}
return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
mult, div);
}
struct aspeed_clk_soc_data {
const struct clk_div_table *div_table;
const struct clk_div_table *mac_div_table;
struct clk_hw *(*calc_pll)(const char *name, u32 val);
};
static const struct aspeed_clk_soc_data ast2500_data = {
.div_table = ast2500_div_table,
.mac_div_table = ast2500_mac_div_table,
.calc_pll = aspeed_ast2500_calc_pll,
};
static const struct aspeed_clk_soc_data ast2400_data = {
.div_table = ast2400_div_table,
.mac_div_table = ast2400_div_table,
.calc_pll = aspeed_ast2400_calc_pll,
};
static int aspeed_clk_enable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = BIT(gate->clock_idx);
u32 rst = BIT(gate->reset_idx);
u32 enval;
spin_lock_irqsave(gate->lock, flags);
if (gate->reset_idx >= 0) {
/* Put IP in reset */
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
/* Delay 100us */
udelay(100);
}
/* Enable clock */
enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
if (gate->reset_idx >= 0) {
/* A delay of 10ms is specified by the ASPEED docs */
mdelay(10);
/* Take IP out of reset */
regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, 0);
}
spin_unlock_irqrestore(gate->lock, flags);
return 0;
}
static void aspeed_clk_disable(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
unsigned long flags;
u32 clk = BIT(gate->clock_idx);
u32 enval;
spin_lock_irqsave(gate->lock, flags);
enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? clk : 0;
regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval);
spin_unlock_irqrestore(gate->lock, flags);
}
static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
u32 reg;
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
return (reg & clk) ? 0 : 1;
}
static const struct clk_ops aspeed_clk_gate_ops = {
.enable = aspeed_clk_enable,
.disable = aspeed_clk_disable,
.is_enabled = aspeed_clk_is_enabled,
};
/**
* struct aspeed_reset - Aspeed reset controller
* @map: regmap to access the containing system controller
* @rcdev: reset controller device
*/
struct aspeed_reset {
struct regmap *map;
struct reset_controller_dev rcdev;
};
#define to_aspeed_reset(p) container_of((p), struct aspeed_reset, rcdev)
static const u8 aspeed_resets[] = {
[ASPEED_RESET_XDMA] = 25,
[ASPEED_RESET_MCTP] = 24,
[ASPEED_RESET_ADC] = 23,
[ASPEED_RESET_JTAG_MASTER] = 22,
[ASPEED_RESET_MIC] = 18,
[ASPEED_RESET_PWM] = 9,
[ASPEED_RESET_PCIVGA] = 8,
[ASPEED_RESET_I2C] = 2,
[ASPEED_RESET_AHB] = 1,
};
static int aspeed_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 rst = BIT(aspeed_resets[id]);
return regmap_update_bits(ar->map, ASPEED_RESET_CTRL, rst, 0);
}
static int aspeed_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 rst = BIT(aspeed_resets[id]);
return regmap_update_bits(ar->map, ASPEED_RESET_CTRL, rst, rst);
}
static int aspeed_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct aspeed_reset *ar = to_aspeed_reset(rcdev);
u32 val, rst = BIT(aspeed_resets[id]);
int ret;
ret = regmap_read(ar->map, ASPEED_RESET_CTRL, &val);
if (ret)
return ret;
return !!(val & rst);
}
static const struct reset_control_ops aspeed_reset_ops = {
.assert = aspeed_reset_assert,
.deassert = aspeed_reset_deassert,
.status = aspeed_reset_status,
};
static struct clk_hw *aspeed_clk_hw_register_gate(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
struct regmap *map, u8 clock_idx, u8 reset_idx,
u8 clk_gate_flags, spinlock_t *lock)
{
struct aspeed_clk_gate *gate;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
return ERR_PTR(-ENOMEM);
init.name = name;
init.ops = &aspeed_clk_gate_ops;
init.flags = flags;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
gate->map = map;
gate->clock_idx = clock_idx;
gate->reset_idx = reset_idx;
gate->flags = clk_gate_flags;
gate->lock = lock;
gate->hw.init = &init;
hw = &gate->hw;
ret = clk_hw_register(dev, hw);
if (ret) {
kfree(gate);
hw = ERR_PTR(ret);
}
return hw;
}
static int aspeed_clk_probe(struct platform_device *pdev)
{
const struct aspeed_clk_soc_data *soc_data;
struct device *dev = &pdev->dev;
struct aspeed_reset *ar;
struct regmap *map;
struct clk_hw *hw;
u32 val, rate;
int i, ret;
map = syscon_node_to_regmap(dev->of_node);
if (IS_ERR(map)) {
dev_err(dev, "no syscon regmap\n");
return PTR_ERR(map);
}
ar = devm_kzalloc(dev, sizeof(*ar), GFP_KERNEL);
if (!ar)
return -ENOMEM;
ar->map = map;
ar->rcdev.owner = THIS_MODULE;
ar->rcdev.nr_resets = ARRAY_SIZE(aspeed_resets);
ar->rcdev.ops = &aspeed_reset_ops;
ar->rcdev.of_node = dev->of_node;
ret = devm_reset_controller_register(dev, &ar->rcdev);
if (ret) {
dev_err(dev, "could not register reset controller\n");
return ret;
}
/* SoC generations share common layouts but have different divisors */
soc_data = of_device_get_match_data(dev);
if (!soc_data) {
dev_err(dev, "no match data for platform\n");
return -EINVAL;
}
/* UART clock div13 setting */
regmap_read(map, ASPEED_MISC_CTRL, &val);
if (val & UART_DIV13_EN)
rate = 24000000 / 13;
else
rate = 24000000;
/* TODO: Find the parent data for the uart clock */
hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_UART] = hw;
/*
* Memory controller (M-PLL) PLL. This clock is configured by the
* bootloader, and is exposed to Linux as a read-only clock rate.
*/
regmap_read(map, ASPEED_MPLL_PARAM, &val);
hw = soc_data->calc_pll("mpll", val);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MPLL] = hw;
/* SD/SDIO clock divider (TODO: There's a gate too) */
hw = clk_hw_register_divider_table(dev, "sdio", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 12, 3, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_SDIO] = hw;
/* MAC AHB bus clock divider */
hw = clk_hw_register_divider_table(dev, "mac", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 16, 3, 0,
soc_data->mac_div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_MAC] = hw;
/* LPC Host (LHCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "lhclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 20, 3, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_LHCLK] = hw;
/* P-Bus (BCLK) clock divider */
hw = clk_hw_register_divider_table(dev, "bclk", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION_2, 0, 2, 0,
soc_data->div_table,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[ASPEED_CLK_BCLK] = hw;
/*
* TODO: There are a number of clocks that not included in this driver
* as more information is required:
* D2-PLL
* D-PLL
* YCLK
* RGMII
* RMII
* UART[1..5] clock source mux
* Video Engine (ECLK) mux and clock divider
*/
for (i = 0; i < ARRAY_SIZE(aspeed_gates); i++) {
const struct aspeed_gate_data *gd = &aspeed_gates[i];
u32 gate_flags;
/* Special case: the USB port 1 clock (bit 14) is always
* working the opposite way from the other ones.
*/
gate_flags = (gd->clock_idx == 14) ? 0 : CLK_GATE_SET_TO_DISABLE;
hw = aspeed_clk_hw_register_gate(dev,
gd->name,
gd->parent_name,
gd->flags,
map,
gd->clock_idx,
gd->reset_idx,
gate_flags,
&aspeed_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_clk_data->hws[i] = hw;
}
return 0;
};
static const struct of_device_id aspeed_clk_dt_ids[] = {
{ .compatible = "aspeed,ast2400-scu", .data = &ast2400_data },
{ .compatible = "aspeed,ast2500-scu", .data = &ast2500_data },
{ }
};
static struct platform_driver aspeed_clk_driver = {
.probe = aspeed_clk_probe,
.driver = {
.name = "aspeed-clk",
.of_match_table = aspeed_clk_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, freq, div;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
if (val & CLKIN_25MHZ_EN)
freq = 25000000;
else if (val & AST2400_CLK_SOURCE_SEL)
freq = 48000000;
else
freq = 24000000;
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
pr_debug("clkin @%u MHz\n", freq / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
* 00: Select CPU:AHB = 1:1
* 01: Select CPU:AHB = 2:1
* 10: Select CPU:AHB = 4:1
* 11: Select CPU:AHB = 3:1
*/
regmap_read(map, ASPEED_STRAP, &val);
val = (val >> 10) & 0x3;
div = val + 1;
if (div == 3)
div = 4;
else if (div == 4)
div = 3;
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_AHB] = hw;
/* APB clock clock selection register SCU08 (aka PCLK) */
hw = clk_hw_register_divider_table(NULL, "apb", "hpll", 0,
scu_base + ASPEED_CLK_SELECTION, 23, 3, 0,
ast2400_div_table,
&aspeed_clk_lock);
aspeed_clk_data->hws[ASPEED_CLK_APB] = hw;
}
static void __init aspeed_ast2500_cc(struct regmap *map)
{
struct clk_hw *hw;
u32 val, freq, div;
/* CLKIN is the crystal oscillator, 24 or 25MHz selected by strapping */
regmap_read(map, ASPEED_STRAP, &val);
if (val & CLKIN_25MHZ_EN)
freq = 25000000;
else
freq = 24000000;
hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
pr_debug("clkin @%u MHz\n", freq / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
* and we assume that it is enabled
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2500_calc_pll("hpll", val);
/* Strap bits 11:9 define the AXI/AHB clock frequency ratio (aka HCLK)*/
regmap_read(map, ASPEED_STRAP, &val);
val = (val >> 9) & 0x7;
WARN(val == 0, "strapping is zero: cannot determine ahb clock");
div = 2 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "ahb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_AHB] = hw;
/* APB clock clock selection register SCU08 (aka PCLK) */
regmap_read(map, ASPEED_CLK_SELECTION, &val);
val = (val >> 23) & 0x7;
div = 4 * (val + 1);
hw = clk_hw_register_fixed_factor(NULL, "apb", "hpll", 0, 1, div);
aspeed_clk_data->hws[ASPEED_CLK_APB] = hw;
};
static void __init aspeed_cc_init(struct device_node *np)
{
struct regmap *map;
u32 val;
int ret;
int i;
scu_base = of_iomap(np, 0);
if (!scu_base)
return;
aspeed_clk_data = kzalloc(sizeof(*aspeed_clk_data) +
sizeof(*aspeed_clk_data->hws) * ASPEED_NUM_CLKS,
GFP_KERNEL);
if (!aspeed_clk_data)
return;
/*
* This way all clocks fetched before the platform device probes,
* except those we assign here for early use, will be deferred.
*/
for (i = 0; i < ASPEED_NUM_CLKS; i++)
aspeed_clk_data->hws[i] = ERR_PTR(-EPROBE_DEFER);
map = syscon_node_to_regmap(np);
if (IS_ERR(map)) {
pr_err("no syscon regmap\n");
return;
}
/*
* We check that the regmap works on this very first access,
* but as this is an MMIO-backed regmap, subsequent regmap
* access is not going to fail and we skip error checks from
* this point.
*/
ret = regmap_read(map, ASPEED_STRAP, &val);
if (ret) {
pr_err("failed to read strapping register\n");
return;
}
if (of_device_is_compatible(np, "aspeed,ast2400-scu"))
aspeed_ast2400_cc(map);
else if (of_device_is_compatible(np, "aspeed,ast2500-scu"))
aspeed_ast2500_cc(map);
else
pr_err("unknown platform, failed to add clocks\n");
aspeed_clk_data->num = ASPEED_NUM_CLKS;
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_clk_data);
if (ret)
pr_err("failed to add DT provider: %d\n", ret);
};
CLK_OF_DECLARE_DRIVER(aspeed_cc_g5, "aspeed,ast2500-scu", aspeed_cc_init);
CLK_OF_DECLARE_DRIVER(aspeed_cc_g4, "aspeed,ast2400-scu", aspeed_cc_init);

View File

@ -40,6 +40,10 @@
#define MMCM_REG_FILTER1 0x4e
#define MMCM_REG_FILTER2 0x4f
#define MMCM_CLKOUT_NOCOUNT BIT(6)
#define MMCM_CLK_DIV_NOCOUNT BIT(12)
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
@ -298,13 +302,17 @@ static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned int d, m, dout;
unsigned long long tmp;
axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
return *parent_rate / d * m / dout;
tmp = (unsigned long long)*parent_rate * m;
tmp = DIV_ROUND_CLOSEST_ULL(tmp, dout * d);
return min_t(unsigned long long, tmp, LONG_MAX);
}
static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
@ -315,18 +323,33 @@ static unsigned long axi_clkgen_recalc_rate(struct clk_hw *clk_hw,
unsigned int reg;
unsigned long long tmp;
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_2, &reg);
if (reg & MMCM_CLKOUT_NOCOUNT) {
dout = 1;
} else {
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLKOUT0_1, &reg);
dout = (reg & 0x3f) + ((reg >> 6) & 0x3f);
}
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_DIV, &reg);
d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
if (reg & MMCM_CLK_DIV_NOCOUNT)
d = 1;
else
d = (reg & 0x3f) + ((reg >> 6) & 0x3f);
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB2, &reg);
if (reg & MMCM_CLKOUT_NOCOUNT) {
m = 1;
} else {
axi_clkgen_mmcm_read(axi_clkgen, MMCM_REG_CLK_FB1, &reg);
m = (reg & 0x3f) + ((reg >> 6) & 0x3f);
}
if (d == 0 || dout == 0)
return 0;
tmp = (unsigned long long)(parent_rate / d) * m;
do_div(tmp, dout);
tmp = (unsigned long long)parent_rate * m;
tmp = DIV_ROUND_CLOSEST_ULL(tmp, dout * d);
return min_t(unsigned long long, tmp, ULONG_MAX);
}

View File

@ -118,12 +118,11 @@ static unsigned int _get_val(const struct clk_div_table *table,
unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
unsigned int val,
const struct clk_div_table *table,
unsigned long flags)
unsigned long flags, unsigned long width)
{
struct clk_divider *divider = to_clk_divider(hw);
unsigned int div;
div = _get_div(table, val, flags, divider->width);
div = _get_div(table, val, flags, width);
if (!div) {
WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
"%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
@ -145,7 +144,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
val &= div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags);
divider->flags, divider->width);
}
static bool _is_valid_table_div(const struct clk_div_table *table,

View File

@ -41,7 +41,7 @@ struct clockgen_pll_div {
};
struct clockgen_pll {
struct clockgen_pll_div div[4];
struct clockgen_pll_div div[8];
};
#define CLKSEL_VALID 1
@ -1127,6 +1127,13 @@ static void __init create_one_pll(struct clockgen *cg, int idx)
struct clk *clk;
int ret;
/*
* For platform PLL, there are 8 divider clocks.
* For core PLL, there are 4 divider clocks at most.
*/
if (idx != PLATFORM_PLL && i >= 4)
break;
snprintf(pll->div[i].name, sizeof(pll->div[i].name),
"cg-pll%d-div%d", idx, i + 1);

View File

@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = {
"xtal", "clkin"
};
static const char * const si5351_pll_names[] = {
"plla", "pllb", "vxco"
"si5351_plla", "si5351_pllb", "si5351_vxco"
};
static const char * const si5351_msynth_names[] = {
"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
@ -903,13 +903,42 @@ static int _si5351_clkout_set_disable_state(
return 0;
}
static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num)
{
u8 val = si5351_reg_read(drvdata, SI5351_CLK0_CTRL + num);
switch (val & SI5351_CLK_INPUT_MASK) {
case SI5351_CLK_INPUT_XTAL:
case SI5351_CLK_INPUT_CLKIN:
return; /* pll not used, no need to reset */
}
si5351_reg_write(drvdata, SI5351_PLL_RESET,
val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
SI5351_PLL_RESET_A);
dev_dbg(&drvdata->client->dev, "%s - %s: pll = %d\n",
__func__, clk_hw_get_name(&drvdata->clkout[num].hw),
(val & SI5351_CLK_PLL_SELECT) ? 1 : 0);
}
static int si5351_clkout_prepare(struct clk_hw *hw)
{
struct si5351_hw_data *hwdata =
container_of(hw, struct si5351_hw_data, hw);
struct si5351_platform_data *pdata =
hwdata->drvdata->client->dev.platform_data;
si5351_set_bits(hwdata->drvdata, SI5351_CLK0_CTRL + hwdata->num,
SI5351_CLK_POWERDOWN, 0);
/*
* Do a pll soft reset on the parent pll -- needed to get a
* deterministic phase relationship between the output clocks.
*/
if (pdata->clkout[hwdata->num].pll_reset)
_si5351_clkout_reset_pll(hwdata->drvdata, hwdata->num);
si5351_set_bits(hwdata->drvdata, SI5351_OUTPUT_ENABLE_CTRL,
(1 << hwdata->num), 0);
return 0;
@ -1297,6 +1326,9 @@ static int si5351_dt_parse(struct i2c_client *client,
pdata->clkout[num].pll_master =
of_property_read_bool(child, "silabs,pll-master");
pdata->clkout[num].pll_reset =
of_property_read_bool(child, "silabs,pll-reset");
}
client->dev.platform_data = pdata;
@ -1437,11 +1469,6 @@ static int si5351_i2c_probe(struct i2c_client *client,
}
}
if (!IS_ERR(drvdata->pxtal))
clk_prepare_enable(drvdata->pxtal);
if (!IS_ERR(drvdata->pclkin))
clk_prepare_enable(drvdata->pclkin);
/* register xtal input clock gate */
memset(&init, 0, sizeof(init));
init.name = si5351_input_names[0];
@ -1456,7 +1483,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
ret = devm_clk_hw_register(&client->dev, &drvdata->xtal);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
goto err_clk;
return ret;
}
/* register clkin input clock gate */
@ -1474,7 +1501,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
goto err_clk;
return ret;
}
}
@ -1496,7 +1523,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
ret = devm_clk_hw_register(&client->dev, &drvdata->pll[0].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
goto err_clk;
return ret;
}
/* register PLLB or VXCO (Si5351B) */
@ -1520,7 +1547,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
ret = devm_clk_hw_register(&client->dev, &drvdata->pll[1].hw);
if (ret) {
dev_err(&client->dev, "unable to register %s\n", init.name);
goto err_clk;
return ret;
}
/* register clk multisync and clk out divider */
@ -1539,7 +1566,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (WARN_ON(!drvdata->msynth || !drvdata->clkout)) {
ret = -ENOMEM;
goto err_clk;
return ret;
}
for (n = 0; n < num_clocks; n++) {
@ -1559,7 +1586,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
goto err_clk;
return ret;
}
}
@ -1587,7 +1614,7 @@ static int si5351_i2c_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "unable to register %s\n",
init.name);
goto err_clk;
return ret;
}
/* set initial clkout rate */
@ -1606,17 +1633,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
drvdata);
if (ret) {
dev_err(&client->dev, "unable to add clk provider\n");
goto err_clk;
return ret;
}
return 0;
}
err_clk:
if (!IS_ERR(drvdata->pxtal))
clk_disable_unprepare(drvdata->pxtal);
if (!IS_ERR(drvdata->pclkin))
clk_disable_unprepare(drvdata->pclkin);
return ret;
static int si5351_i2c_remove(struct i2c_client *client)
{
of_clk_del_provider(client->dev.of_node);
return 0;
}
static const struct i2c_device_id si5351_i2c_ids[] = {
@ -1634,6 +1661,7 @@ static struct i2c_driver si5351_driver = {
.of_match_table = of_match_ptr(si5351_dt_ids),
},
.probe = si5351_i2c_probe,
.remove = si5351_i2c_remove,
.id_table = si5351_i2c_ids,
};
module_i2c_driver(si5351_driver);

View File

@ -1424,7 +1424,7 @@ static void __init stm32f4_rcc_init(struct device_node *np)
base = of_iomap(np, 0);
if (!base) {
pr_err("%s: unable to map resource", np->name);
pr_err("%s: unable to map resource\n", np->name);
return;
}

View File

@ -1,20 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Gabriel Fernandez 2017
* Author: Gabriel Fernandez <gabriel.fernandez@st.com>
*
* License terms: GPL V2.0.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
* Copyright (C) STMicroelectronics 2017
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
*/
#include <linux/clk.h>

View File

@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
#include <linux/stringify.h>
#include "clk.h"
@ -62,6 +63,7 @@ struct clk_core {
bool orphan;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
unsigned long min_rate;
unsigned long max_rate;
unsigned long accuracy;
@ -86,6 +88,7 @@ struct clk {
const char *con_id;
unsigned long min_rate;
unsigned long max_rate;
unsigned int exclusive_count;
struct hlist_node clks_node;
};
@ -141,10 +144,18 @@ static unsigned long clk_enable_lock(void)
{
unsigned long flags;
if (!spin_trylock_irqsave(&enable_lock, flags)) {
/*
* On UP systems, spin_trylock_irqsave() always returns true, even if
* we already hold the lock. So, in that case, we rely only on
* reference counting.
*/
if (!IS_ENABLED(CONFIG_SMP) ||
!spin_trylock_irqsave(&enable_lock, flags)) {
if (enable_owner == current) {
enable_refcnt++;
__acquire(enable_lock);
if (!IS_ENABLED(CONFIG_SMP))
local_save_flags(flags);
return flags;
}
spin_lock_irqsave(&enable_lock, flags);
@ -170,6 +181,11 @@ static void clk_enable_unlock(unsigned long flags)
spin_unlock_irqrestore(&enable_lock, flags);
}
static bool clk_core_rate_is_protected(struct clk_core *core)
{
return core->protect_count;
}
static bool clk_core_is_prepared(struct clk_core *core)
{
bool ret = false;
@ -382,6 +398,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
return clk_core_is_prepared(hw->core);
}
bool clk_hw_rate_is_protected(const struct clk_hw *hw)
{
return clk_core_rate_is_protected(hw->core);
}
bool clk_hw_is_enabled(const struct clk_hw *hw)
{
return clk_core_is_enabled(hw->core);
@ -520,6 +541,139 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
/*** clk api ***/
static void clk_core_rate_unprotect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (WARN_ON(core->protect_count == 0))
return;
if (--core->protect_count > 0)
return;
clk_core_rate_unprotect(core->parent);
}
static int clk_core_rate_nuke_protect(struct clk_core *core)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (!core)
return -EINVAL;
if (core->protect_count == 0)
return 0;
ret = core->protect_count;
core->protect_count = 1;
clk_core_rate_unprotect(core);
return ret;
}
/**
* clk_rate_exclusive_put - release exclusivity over clock rate control
* @clk: the clk over which the exclusivity is released
*
* clk_rate_exclusive_put() completes a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_put() must be balanced with calls to
* clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
* error status.
*/
void clk_rate_exclusive_put(struct clk *clk)
{
if (!clk)
return;
clk_prepare_lock();
/*
* if there is something wrong with this consumer protect count, stop
* here before messing with the provider
*/
if (WARN_ON(clk->exclusive_count <= 0))
goto out;
clk_core_rate_unprotect(clk->core);
clk->exclusive_count--;
out:
clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
static void clk_core_rate_protect(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (core->protect_count == 0)
clk_core_rate_protect(core->parent);
core->protect_count++;
}
static void clk_core_rate_restore_protect(struct clk_core *core, int count)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return;
if (count == 0)
return;
clk_core_rate_protect(core);
core->protect_count = count;
}
/**
* clk_rate_exclusive_get - get exclusivity over the clk rate control
* @clk: the clk over which the exclusity of rate control is requested
*
* clk_rate_exlusive_get() begins a critical section during which a clock
* consumer cannot tolerate any other consumer making any operation on the
* clock which could result in a rate change or rate glitch. Exclusive clocks
* cannot have their rate changed, either directly or indirectly due to changes
* further up the parent chain of clocks. As a result, clocks up parent chain
* also get under exclusive control of the calling consumer.
*
* If exlusivity is claimed more than once on clock, even by the same consumer,
* the rate effectively gets locked as exclusivity can't be preempted.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put(). Calls to this function may sleep.
* Returns 0 on success, -EERROR otherwise
*/
int clk_rate_exclusive_get(struct clk *clk)
{
if (!clk)
return 0;
clk_prepare_lock();
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
clk_prepare_unlock();
return 0;
}
EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
static void clk_core_unprepare(struct clk_core *core)
{
lockdep_assert_held(&prepare_lock);
@ -906,10 +1060,9 @@ static int clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
struct clk_core *parent;
long rate;
lockdep_assert_held(&prepare_lock);
@ -917,6 +1070,38 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
if (!core)
return 0;
/*
* At this point, core protection will be disabled if
* - if the provider is not protected at all
* - if the calling consumer is the only one which has exclusivity
* over the provider
*/
if (clk_core_rate_is_protected(core)) {
req->rate = core->rate;
} else if (core->ops->determine_rate) {
return core->ops->determine_rate(core->hw, req);
} else if (core->ops->round_rate) {
rate = core->ops->round_rate(core->hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
req->rate = rate;
} else {
return -EINVAL;
}
return 0;
}
static void clk_core_init_rate_req(struct clk_core * const core,
struct clk_rate_request *req)
{
struct clk_core *parent;
if (WARN_ON(!core || !req))
return;
parent = core->parent;
if (parent) {
req->best_parent_hw = parent->hw;
@ -925,22 +1110,32 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
req->best_parent_hw = NULL;
req->best_parent_rate = 0;
}
}
if (core->ops->determine_rate) {
return core->ops->determine_rate(core->hw, req);
} else if (core->ops->round_rate) {
rate = core->ops->round_rate(core->hw, req->rate,
&req->best_parent_rate);
if (rate < 0)
return rate;
static bool clk_core_can_round(struct clk_core * const core)
{
if (core->ops->determine_rate || core->ops->round_rate)
return true;
req->rate = rate;
} else if (core->flags & CLK_SET_RATE_PARENT) {
return clk_core_round_rate_nolock(parent, req);
} else {
req->rate = core->rate;
}
return false;
}
static int clk_core_round_rate_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
clk_core_init_rate_req(core, req);
if (clk_core_can_round(core))
return clk_core_determine_round_nolock(core, req);
else if (core->flags & CLK_SET_RATE_PARENT)
return clk_core_round_rate_nolock(core->parent, req);
req->rate = core->rate;
return 0;
}
@ -997,10 +1192,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
req.rate = rate;
ret = clk_core_round_rate_nolock(clk->core, &req);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
if (ret)
@ -1433,34 +1635,23 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
clk_core_get_boundaries(core, &min_rate, &max_rate);
/* find the closest rate and parent clk/rate */
if (core->ops->determine_rate) {
if (clk_core_can_round(core)) {
struct clk_rate_request req;
req.rate = rate;
req.min_rate = min_rate;
req.max_rate = max_rate;
if (parent) {
req.best_parent_hw = parent->hw;
req.best_parent_rate = parent->rate;
} else {
req.best_parent_hw = NULL;
req.best_parent_rate = 0;
}
ret = core->ops->determine_rate(core->hw, &req);
clk_core_init_rate_req(core, &req);
ret = clk_core_determine_round_nolock(core, &req);
if (ret < 0)
return NULL;
best_parent_rate = req.best_parent_rate;
new_rate = req.rate;
parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
} else if (core->ops->round_rate) {
ret = core->ops->round_rate(core->hw, rate,
&best_parent_rate);
if (ret < 0)
return NULL;
new_rate = ret;
if (new_rate < min_rate || new_rate > max_rate)
return NULL;
} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
@ -1642,25 +1833,58 @@ static void clk_change_rate(struct clk_core *core)
clk_pm_runtime_put(core);
}
static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
int ret, cnt;
struct clk_rate_request req;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/* simulate what the rate would be if it could be freely set */
cnt = clk_core_rate_nuke_protect(core);
if (cnt < 0)
return cnt;
clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
req.rate = req_rate;
ret = clk_core_round_rate_nolock(core, &req);
/* restore the protection */
clk_core_rate_restore_protect(core, cnt);
return ret ? 0 : req.rate;
}
static int clk_core_set_rate_nolock(struct clk_core *core,
unsigned long req_rate)
{
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
unsigned long rate;
int ret = 0;
if (!core)
return 0;
rate = clk_core_req_round_rate_nolock(core, req_rate);
/* bail early if nothing to do */
if (rate == clk_core_get_rate_nolock(core))
return 0;
/* fail on a direct rate set of a protected provider */
if (clk_core_rate_is_protected(core))
return -EBUSY;
if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
return -EBUSY;
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, rate);
top = clk_calc_new_rates(core, req_rate);
if (!top)
return -EINVAL;
@ -1719,14 +1943,67 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_rate_nolock(clk->core, rate);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
/**
* clk_set_rate_exclusive - specify a new rate get exclusive control
* @clk: the clk whose rate is being changed
* @rate: the new rate for clk
*
* This is a combination of clk_set_rate() and clk_rate_exclusive_get()
* within a critical section
*
* This can be used initially to ensure that at least 1 consumer is
* statisfied when several consumers are competing for exclusivity over the
* same clock provider.
*
* The exclusivity is not applied if setting the rate failed.
*
* Calls to clk_rate_exclusive_get() should be balanced with calls to
* clk_rate_exclusive_put().
*
* Returns 0 on success, -EERROR otherwise.
*/
int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
{
int ret;
if (!clk)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
/*
* The temporary protection removal is not here, on purpose
* This function is meant to be used instead of clk_rate_protect,
* so before the consumer code path protect the clock provider
*/
ret = clk_core_set_rate_nolock(clk->core, rate);
if (!ret) {
clk_core_rate_protect(clk->core);
clk->exclusive_count++;
}
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
/**
* clk_set_rate_range - set a rate range for a clock source
* @clk: clock source
@ -1738,6 +2015,7 @@ EXPORT_SYMBOL_GPL(clk_set_rate);
int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
{
int ret = 0;
unsigned long old_min, old_max, rate;
if (!clk)
return 0;
@ -1751,12 +2029,46 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
clk_prepare_lock();
if (min != clk->min_rate || max != clk->max_rate) {
clk->min_rate = min;
clk->max_rate = max;
ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
/* Save the current values in case we need to rollback the change */
old_min = clk->min_rate;
old_max = clk->max_rate;
clk->min_rate = min;
clk->max_rate = max;
rate = clk_core_get_rate_nolock(clk->core);
if (rate < min || rate > max) {
/*
* FIXME:
* We are in bit of trouble here, current rate is outside the
* the requested range. We are going try to request appropriate
* range boundary but there is a catch. It may fail for the
* usual reason (clock broken, clock protected, etc) but also
* because:
* - round_rate() was not favorable and fell on the wrong
* side of the boundary
* - the determine_rate() callback does not really check for
* this corner case when determining the rate
*/
if (rate < min)
rate = min;
else
rate = max;
ret = clk_core_set_rate_nolock(clk->core, rate);
if (ret) {
/* rollback the changes */
clk->min_rate = old_min;
clk->max_rate = old_max;
}
}
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
@ -1877,32 +2189,31 @@ bool clk_has_parent(struct clk *clk, struct clk *parent)
}
EXPORT_SYMBOL_GPL(clk_has_parent);
static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
static int clk_core_set_parent_nolock(struct clk_core *core,
struct clk_core *parent)
{
int ret = 0;
int p_index = 0;
unsigned long p_rate = 0;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
/* prevent racing with updates to the clock topology */
clk_prepare_lock();
if (core->parent == parent)
goto out;
return 0;
/* verify ops for for multi-parent clks */
if ((core->num_parents > 1) && (!core->ops->set_parent)) {
ret = -ENOSYS;
goto out;
}
if (core->num_parents > 1 && !core->ops->set_parent)
return -EPERM;
/* check that we are allowed to re-parent if the clock is in use */
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
ret = -EBUSY;
goto out;
}
if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
return -EBUSY;
if (clk_core_rate_is_protected(core))
return -EBUSY;
/* try finding the new parent index */
if (parent) {
@ -1910,15 +2221,14 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
if (p_index < 0) {
pr_debug("%s: clk %s can not be parent of clk %s\n",
__func__, parent->name, core->name);
ret = p_index;
goto out;
return p_index;
}
p_rate = parent->rate;
}
ret = clk_pm_runtime_get(core);
if (ret)
goto out;
return ret;
/* propagate PRE_RATE_CHANGE notifications */
ret = __clk_speculate_rates(core, p_rate);
@ -1940,8 +2250,6 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
runtime_put:
clk_pm_runtime_put(core);
out:
clk_prepare_unlock();
return ret;
}
@ -1965,13 +2273,50 @@ out:
*/
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret;
if (!clk)
return 0;
return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_parent_nolock(clk->core,
parent ? parent->core : NULL);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_parent);
static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
{
int ret = -EINVAL;
lockdep_assert_held(&prepare_lock);
if (!core)
return 0;
if (clk_core_rate_is_protected(core))
return -EBUSY;
trace_clk_set_phase(core, degrees);
if (core->ops->set_phase)
ret = core->ops->set_phase(core->hw, degrees);
trace_clk_set_phase_complete(core, degrees);
return ret;
}
/**
* clk_set_phase - adjust the phase shift of a clock signal
* @clk: clock signal source
@ -1994,7 +2339,7 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
*/
int clk_set_phase(struct clk *clk, int degrees)
{
int ret = -EINVAL;
int ret;
if (!clk)
return 0;
@ -2006,15 +2351,13 @@ int clk_set_phase(struct clk *clk, int degrees)
clk_prepare_lock();
trace_clk_set_phase(clk->core, degrees);
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
if (clk->core->ops->set_phase)
ret = clk->core->ops->set_phase(clk->core->hw, degrees);
ret = clk_core_set_phase_nolock(clk->core, degrees);
trace_clk_set_phase_complete(clk->core, degrees);
if (!ret)
clk->core->phase = degrees;
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
@ -2102,11 +2445,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, clk_core_get_rate(c),
clk_core_get_accuracy(c), clk_core_get_phase(c));
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
clk_core_get_phase(c));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@ -2128,7 +2472,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n");
seq_puts(s, " enable prepare protect \n");
seq_puts(s, " clock count count count rate accuracy phase\n");
seq_puts(s, "----------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@ -2164,6 +2509,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"%s\": { ", c->name);
seq_printf(s, "\"enable_count\": %d,", c->enable_count);
seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
@ -2223,6 +2569,58 @@ static const struct file_operations clk_dump_fops = {
.release = single_release,
};
static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
#define ENTRY(f) { f, __stringify(f) }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
ENTRY(CLK_IGNORE_UNUSED),
ENTRY(CLK_IS_BASIC),
ENTRY(CLK_GET_RATE_NOCACHE),
ENTRY(CLK_SET_RATE_NO_REPARENT),
ENTRY(CLK_GET_ACCURACY_NOCACHE),
ENTRY(CLK_RECALC_NEW_RATES),
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
#undef ENTRY
};
static int clk_flags_dump(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
unsigned long flags = core->flags;
unsigned int i;
for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
if (flags & clk_flags[i].flag) {
seq_printf(s, "%s\n", clk_flags[i].name);
flags &= ~clk_flags[i].flag;
}
}
if (flags) {
/* Unknown flags */
seq_printf(s, "0x%lx\n", flags);
}
return 0;
}
static int clk_flags_open(struct inode *inode, struct file *file)
{
return single_open(file, clk_flags_dump, inode->i_private);
}
static const struct file_operations clk_flags_fops = {
.open = clk_flags_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int possible_parents_dump(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
@ -2264,43 +2662,46 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
core->dentry = d;
d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
(u32 *)&core->rate);
d = debugfs_create_ulong("clk_rate", 0444, core->dentry, &core->rate);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
(u32 *)&core->accuracy);
d = debugfs_create_ulong("clk_accuracy", 0444, core->dentry,
&core->accuracy);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
(u32 *)&core->phase);
d = debugfs_create_u32("clk_phase", 0444, core->dentry, &core->phase);
if (!d)
goto err_out;
d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
(u32 *)&core->flags);
d = debugfs_create_file("clk_flags", 0444, core->dentry, core,
&clk_flags_fops);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
(u32 *)&core->prepare_count);
d = debugfs_create_u32("clk_prepare_count", 0444, core->dentry,
&core->prepare_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
(u32 *)&core->enable_count);
d = debugfs_create_u32("clk_enable_count", 0444, core->dentry,
&core->enable_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
(u32 *)&core->notifier_count);
d = debugfs_create_u32("clk_protect_count", 0444, core->dentry,
&core->protect_count);
if (!d)
goto err_out;
d = debugfs_create_u32("clk_notifier_count", 0444, core->dentry,
&core->notifier_count);
if (!d)
goto err_out;
if (core->num_parents > 1) {
d = debugfs_create_file("clk_possible_parents", S_IRUGO,
d = debugfs_create_file("clk_possible_parents", 0444,
core->dentry, core, &possible_parents_fops);
if (!d)
goto err_out;
@ -2336,12 +2737,8 @@ static int clk_debug_register(struct clk_core *core)
mutex_lock(&clk_debug_lock);
hlist_add_head(&core->debug_node, &clk_debug_list);
if (!inited)
goto unlock;
ret = clk_debug_create_one(core, rootdir);
unlock:
if (inited)
ret = clk_debug_create_one(core, rootdir);
mutex_unlock(&clk_debug_lock);
return ret;
@ -2396,22 +2793,22 @@ static int __init clk_debug_init(void)
if (!rootdir)
return -ENOMEM;
d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
d = debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
&clk_summary_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
d = debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
&clk_dump_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
d = debugfs_create_file("clk_orphan_summary", 0444, rootdir,
&orphan_list, &clk_summary_fops);
if (!d)
return -ENOMEM;
d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
d = debugfs_create_file("clk_orphan_dump", 0444, rootdir,
&orphan_list, &clk_dump_fops);
if (!d)
return -ENOMEM;
@ -2576,14 +2973,17 @@ static int __clk_core_init(struct clk_core *core)
*/
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
struct clk_core *parent = __clk_init_parent(orphan);
unsigned long flags;
/*
* we could call __clk_set_parent, but that would result in a
* redundant call to the .set_rate op, if it exists
*/
if (parent) {
__clk_set_parent_before(orphan, parent);
__clk_set_parent_after(orphan, parent, NULL);
/* update the clk tree topology */
flags = clk_enable_lock();
clk_reparent(orphan, parent);
clk_enable_unlock(flags);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, 0);
}
@ -2684,7 +3084,13 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
ret = -ENOMEM;
goto fail_name;
}
if (WARN_ON(!hw->init->ops)) {
ret = -EINVAL;
goto fail_ops;
}
core->ops = hw->init->ops;
if (dev && pm_runtime_enabled(dev))
core->dev = dev;
if (dev && dev->driver)
@ -2746,6 +3152,7 @@ fail_parent_names_copy:
kfree_const(core->parent_names[i]);
kfree(core->parent_names);
fail_parent_names:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
@ -2857,7 +3264,7 @@ void clk_unregister(struct clk *clk)
/* Reparent all children to the orphan list. */
hlist_for_each_entry_safe(child, t, &clk->core->children,
child_node)
clk_core_set_parent(child, NULL);
clk_core_set_parent_nolock(child, NULL);
}
hlist_del_init(&clk->core->child_node);
@ -2865,6 +3272,11 @@ void clk_unregister(struct clk *clk)
if (clk->core->prepare_count)
pr_warn("%s: unregistering prepared clock: %s\n",
__func__, clk->core->name);
if (clk->core->protect_count)
pr_warn("%s: unregistering protected clock: %s\n",
__func__, clk->core->name);
kref_put(&clk->core->ref, __clk_release);
unlock:
clk_prepare_unlock();
@ -3023,6 +3435,18 @@ void __clk_put(struct clk *clk)
clk_prepare_lock();
/*
* Before calling clk_put, all calls to clk_rate_exclusive_get() from a
* given user should be balanced with calls to clk_rate_exclusive_put()
* and by that same consumer
*/
if (WARN_ON(clk->exclusive_count)) {
/* We voiced our concern, let's sanitize the situation */
clk->core->protect_count -= (clk->exclusive_count - 1);
clk_core_rate_unprotect(clk->core);
clk->exclusive_count = 0;
}
hlist_del(&clk->clks_node);
if (clk->min_rate > clk->core->req_rate ||
clk->max_rate < clk->core->req_rate)
@ -3559,7 +3983,7 @@ static int parent_ready(struct device_node *np)
* of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
* @np: Device node pointer associated with clock provider
* @index: clock index
* @flags: pointer to clk_core->flags
* @flags: pointer to top-level framework flags
*
* Detects if the clock-critical property exists and, if so, sets the
* corresponding CLK_IS_CRITICAL flag.

View File

@ -20,6 +20,8 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
const char *con_id);
void __clk_free_clk(struct clk *clk);
int __clk_get(struct clk *clk);
void __clk_put(struct clk *clk);
#else
/* All these casts to avoid ifdefs in clkdev... */
static inline struct clk *
@ -32,5 +34,7 @@ static struct clk_hw *__clk_get_hw(struct clk *clk)
{
return (struct clk_hw *)clk;
}
static inline int __clk_get(struct clk *clk) { return 1; }
static inline void __clk_put(struct clk *clk) { }
#endif

View File

@ -256,7 +256,7 @@ vclkdev_alloc(struct clk_hw *hw, const char *con_id, const char *dev_fmt,
{
struct clk_lookup_alloc *cla;
cla = __clkdev_alloc(sizeof(*cla));
cla = kzalloc(sizeof(*cla), GFP_KERNEL);
if (!cla)
return NULL;

View File

@ -24,13 +24,13 @@ static void __init h8300_div_clk_setup(struct device_node *node)
num_parents = of_clk_get_parent_count(node);
if (!num_parents) {
pr_err("%s: no parent found", clk_name);
pr_err("%s: no parent found\n", clk_name);
return;
}
divcr = of_iomap(node, 0);
if (divcr == NULL) {
pr_err("%s: failed to map divide register", clk_name);
pr_err("%s: failed to map divide register\n", clk_name);
goto error;
}
offset = (unsigned long)divcr & 3;

View File

@ -93,7 +93,7 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
num_parents = of_clk_get_parent_count(node);
if (!num_parents) {
pr_err("%s: no parent found", clk_name);
pr_err("%s: no parent found\n", clk_name);
return;
}
@ -104,13 +104,13 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
pll_clock->sckcr = of_iomap(node, 0);
if (pll_clock->sckcr == NULL) {
pr_err("%s: failed to map divide register", clk_name);
pr_err("%s: failed to map divide register\n", clk_name);
goto free_clock;
}
pll_clock->pllcr = of_iomap(node, 1);
if (pll_clock->pllcr == NULL) {
pr_err("%s: failed to map multiply register", clk_name);
pr_err("%s: failed to map multiply register\n", clk_name);
goto unmap_sckcr;
}

View File

@ -49,3 +49,9 @@ config STUB_CLK_HI6220
default ARCH_HISI
help
Build the Hisilicon Hi6220 stub clock driver.
config STUB_CLK_HI3660
bool "Hi3660 Stub Clock Driver"
depends on COMMON_CLK_HI3660 && MAILBOX
help
Build the Hisilicon Hi3660 stub clock driver.

View File

@ -15,3 +15,4 @@ obj-$(CONFIG_COMMON_CLK_HI3798CV200) += crg-hi3798cv200.o
obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
obj-$(CONFIG_RESET_HISI) += reset.o
obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
obj-$(CONFIG_STUB_CLK_HI3660) += clk-hi3660-stub.o

View File

@ -0,0 +1,185 @@
/*
* Hisilicon clock driver
*
* Copyright (c) 2013-2017 Hisilicon Limited.
* Copyright (c) 2017 Linaro Limited.
*
* Author: Kai Zhao <zhaokai1@hisilicon.com>
* Tao Wang <kevin.wangtao@hisilicon.com>
* Leo Yan <leo.yan@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/hi3660-clock.h>
#define HI3660_STUB_CLOCK_DATA (0x70)
#define MHZ (1000 * 1000)
#define DEFINE_CLK_STUB(_id, _cmd, _name) \
{ \
.id = (_id), \
.cmd = (_cmd), \
.hw.init = &(struct clk_init_data) { \
.name = #_name, \
.ops = &hi3660_stub_clk_ops, \
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
},
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
struct hi3660_stub_clk_chan {
struct mbox_client cl;
struct mbox_chan *mbox;
};
struct hi3660_stub_clk {
unsigned int id;
struct clk_hw hw;
unsigned int cmd;
unsigned int msg[8];
unsigned int rate;
};
static void __iomem *freq_reg;
static struct hi3660_stub_clk_chan stub_clk_chan;
static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct hi3660_stub_clk *stub_clk = to_stub_clk(hw);
/*
* LPM3 writes back the CPU frequency in shared SRAM so read
* back the frequency.
*/
stub_clk->rate = readl(freq_reg + (stub_clk->id << 2)) * MHZ;
return stub_clk->rate;
}
static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
return rate;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct hi3660_stub_clk *stub_clk = to_stub_clk(hw);
stub_clk->msg[0] = stub_clk->cmd;
stub_clk->msg[1] = rate / MHZ;
dev_dbg(stub_clk_chan.cl.dev, "set rate msg[0]=0x%x msg[1]=0x%x\n",
stub_clk->msg[0], stub_clk->msg[1]);
mbox_send_message(stub_clk_chan.mbox, stub_clk->msg);
mbox_client_txdone(stub_clk_chan.mbox, 0);
stub_clk->rate = rate;
return 0;
}
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
.round_rate = hi3660_stub_clk_round_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
void *data)
{
unsigned int idx = clkspec->args[0];
if (idx >= HI3660_CLK_STUB_NUM) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
return &hi3660_stub_clks[idx].hw;
}
static int hi3660_stub_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
unsigned int i;
int ret;
/* Use mailbox client without blocking */
stub_clk_chan.cl.dev = dev;
stub_clk_chan.cl.tx_done = NULL;
stub_clk_chan.cl.tx_block = false;
stub_clk_chan.cl.knows_txdone = false;
/* Allocate mailbox channel */
stub_clk_chan.mbox = mbox_request_channel(&stub_clk_chan.cl, 0);
if (IS_ERR(stub_clk_chan.mbox))
return PTR_ERR(stub_clk_chan.mbox);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
freq_reg = devm_ioremap(dev, res->start, resource_size(res));
if (!freq_reg)
return -ENOMEM;
freq_reg += HI3660_STUB_CLOCK_DATA;
for (i = 0; i < HI3660_CLK_STUB_NUM; i++) {
ret = devm_clk_hw_register(&pdev->dev, &hi3660_stub_clks[i].hw);
if (ret)
return ret;
}
return devm_of_clk_add_hw_provider(&pdev->dev, hi3660_stub_clk_hw_get,
hi3660_stub_clks);
}
static const struct of_device_id hi3660_stub_clk_of_match[] = {
{ .compatible = "hisilicon,hi3660-stub-clk", },
{}
};
static struct platform_driver hi3660_stub_clk_driver = {
.probe = hi3660_stub_clk_probe,
.driver = {
.name = "hi3660-stub-clk",
.of_match_table = hi3660_stub_clk_of_match,
},
};
static int __init hi3660_stub_clk_init(void)
{
return platform_driver_register(&hi3660_stub_clk_driver);
}
subsys_initcall(hi3660_stub_clk_init);

View File

@ -56,7 +56,7 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
val &= div_mask(dclk->width);
return divider_recalc_rate(hw, parent_rate, val, dclk->table,
CLK_DIVIDER_ROUND_CLOSEST);
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,

View File

@ -257,10 +257,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
clk[IMX5_CLK_VPU_SEL] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
clk[IMX5_CLK_VPU_GATE] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
clk[IMX5_CLK_VPU_REFERENCE_GATE] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
clk[IMX5_CLK_UART4_IPG_GATE] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[IMX5_CLK_UART4_PER_GATE] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[IMX5_CLK_UART5_IPG_GATE] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[IMX5_CLK_UART5_PER_GATE] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[IMX5_CLK_GPC_DVFS] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
clk[IMX5_CLK_SSI_APM] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
@ -361,6 +357,10 @@ static void __init mx50_clocks_init(struct device_node *np)
clk[IMX5_CLK_USB_PHY1_GATE] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
clk[IMX5_CLK_USB_PHY2_GATE] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
clk[IMX5_CLK_I2C3_GATE] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
clk[IMX5_CLK_UART4_IPG_GATE] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[IMX5_CLK_UART4_PER_GATE] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[IMX5_CLK_UART5_IPG_GATE] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[IMX5_CLK_UART5_PER_GATE] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[IMX5_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));
@ -562,6 +562,10 @@ static void __init mx53_clocks_init(struct device_node *np)
clk[IMX5_CLK_IEEE1588_PRED] = imx_clk_divider("ieee1588_pred", "ieee1588_sel", MXC_CCM_CSCDR2, 6, 3);
clk[IMX5_CLK_IEEE1588_PODF] = imx_clk_divider("ieee1588_podf", "ieee1588_pred", MXC_CCM_CSCDR2, 0, 6);
clk[IMX5_CLK_IEEE1588_GATE] = imx_clk_gate2("ieee1588_serial_gate", "ieee1588_podf", MXC_CCM_CCGR7, 6);
clk[IMX5_CLK_UART4_IPG_GATE] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
clk[IMX5_CLK_UART4_PER_GATE] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
clk[IMX5_CLK_UART5_IPG_GATE] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
clk[IMX5_CLK_UART5_PER_GATE] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
clk[IMX5_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", MXC_CCM_CCOSR, 0, 4,
mx53_cko1_sel, ARRAY_SIZE(mx53_cko1_sel));

View File

@ -1,136 +1,140 @@
#
# MediaTek SoC drivers
# MediaTek Clock Drivers
#
menu "Clock driver for MediaTek SoC"
depends on ARCH_MEDIATEK || COMPILE_TEST
config COMMON_CLK_MEDIATEK
bool
select RESET_CONTROLLER
---help---
Mediatek SoCs' clock support.
MediaTek SoCs' clock support.
config COMMON_CLK_MT2701
bool "Clock driver for Mediatek MT2701"
bool "Clock driver for MediaTek MT2701"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM
---help---
This driver supports Mediatek MT2701 basic clocks.
This driver supports MediaTek MT2701 basic clocks.
config COMMON_CLK_MT2701_MMSYS
bool "Clock driver for Mediatek MT2701 mmsys"
bool "Clock driver for MediaTek MT2701 mmsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 mmsys clocks.
This driver supports MediaTek MT2701 mmsys clocks.
config COMMON_CLK_MT2701_IMGSYS
bool "Clock driver for Mediatek MT2701 imgsys"
bool "Clock driver for MediaTek MT2701 imgsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 imgsys clocks.
This driver supports MediaTek MT2701 imgsys clocks.
config COMMON_CLK_MT2701_VDECSYS
bool "Clock driver for Mediatek MT2701 vdecsys"
bool "Clock driver for MediaTek MT2701 vdecsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 vdecsys clocks.
This driver supports MediaTek MT2701 vdecsys clocks.
config COMMON_CLK_MT2701_HIFSYS
bool "Clock driver for Mediatek MT2701 hifsys"
bool "Clock driver for MediaTek MT2701 hifsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 hifsys clocks.
This driver supports MediaTek MT2701 hifsys clocks.
config COMMON_CLK_MT2701_ETHSYS
bool "Clock driver for Mediatek MT2701 ethsys"
bool "Clock driver for MediaTek MT2701 ethsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 ethsys clocks.
This driver supports MediaTek MT2701 ethsys clocks.
config COMMON_CLK_MT2701_BDPSYS
bool "Clock driver for Mediatek MT2701 bdpsys"
bool "Clock driver for MediaTek MT2701 bdpsys"
depends on COMMON_CLK_MT2701
---help---
This driver supports Mediatek MT2701 bdpsys clocks.
This driver supports MediaTek MT2701 bdpsys clocks.
config COMMON_CLK_MT2712
bool "Clock driver for Mediatek MT2712"
bool "Clock driver for MediaTek MT2712"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
---help---
This driver supports Mediatek MT2712 basic clocks.
This driver supports MediaTek MT2712 basic clocks.
config COMMON_CLK_MT2712_BDPSYS
bool "Clock driver for Mediatek MT2712 bdpsys"
bool "Clock driver for MediaTek MT2712 bdpsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 bdpsys clocks.
This driver supports MediaTek MT2712 bdpsys clocks.
config COMMON_CLK_MT2712_IMGSYS
bool "Clock driver for Mediatek MT2712 imgsys"
bool "Clock driver for MediaTek MT2712 imgsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 imgsys clocks.
This driver supports MediaTek MT2712 imgsys clocks.
config COMMON_CLK_MT2712_JPGDECSYS
bool "Clock driver for Mediatek MT2712 jpgdecsys"
bool "Clock driver for MediaTek MT2712 jpgdecsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 jpgdecsys clocks.
This driver supports MediaTek MT2712 jpgdecsys clocks.
config COMMON_CLK_MT2712_MFGCFG
bool "Clock driver for Mediatek MT2712 mfgcfg"
bool "Clock driver for MediaTek MT2712 mfgcfg"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 mfgcfg clocks.
This driver supports MediaTek MT2712 mfgcfg clocks.
config COMMON_CLK_MT2712_MMSYS
bool "Clock driver for Mediatek MT2712 mmsys"
bool "Clock driver for MediaTek MT2712 mmsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 mmsys clocks.
This driver supports MediaTek MT2712 mmsys clocks.
config COMMON_CLK_MT2712_VDECSYS
bool "Clock driver for Mediatek MT2712 vdecsys"
bool "Clock driver for MediaTek MT2712 vdecsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 vdecsys clocks.
This driver supports MediaTek MT2712 vdecsys clocks.
config COMMON_CLK_MT2712_VENCSYS
bool "Clock driver for Mediatek MT2712 vencsys"
bool "Clock driver for MediaTek MT2712 vencsys"
depends on COMMON_CLK_MT2712
---help---
This driver supports Mediatek MT2712 vencsys clocks.
This driver supports MediaTek MT2712 vencsys clocks.
config COMMON_CLK_MT6797
bool "Clock driver for Mediatek MT6797"
bool "Clock driver for MediaTek MT6797"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
---help---
This driver supports Mediatek MT6797 basic clocks.
This driver supports MediaTek MT6797 basic clocks.
config COMMON_CLK_MT6797_MMSYS
bool "Clock driver for Mediatek MT6797 mmsys"
bool "Clock driver for MediaTek MT6797 mmsys"
depends on COMMON_CLK_MT6797
---help---
This driver supports Mediatek MT6797 mmsys clocks.
This driver supports MediaTek MT6797 mmsys clocks.
config COMMON_CLK_MT6797_IMGSYS
bool "Clock driver for Mediatek MT6797 imgsys"
bool "Clock driver for MediaTek MT6797 imgsys"
depends on COMMON_CLK_MT6797
---help---
This driver supports Mediatek MT6797 imgsys clocks.
This driver supports MediaTek MT6797 imgsys clocks.
config COMMON_CLK_MT6797_VDECSYS
bool "Clock driver for Mediatek MT6797 vdecsys"
bool "Clock driver for MediaTek MT6797 vdecsys"
depends on COMMON_CLK_MT6797
---help---
This driver supports Mediatek MT6797 vdecsys clocks.
This driver supports MediaTek MT6797 vdecsys clocks.
config COMMON_CLK_MT6797_VENCSYS
bool "Clock driver for Mediatek MT6797 vencsys"
bool "Clock driver for MediaTek MT6797 vencsys"
depends on COMMON_CLK_MT6797
---help---
This driver supports Mediatek MT6797 vencsys clocks.
This driver supports MediaTek MT6797 vencsys clocks.
config COMMON_CLK_MT7622
bool "Clock driver for MediaTek MT7622"
@ -163,17 +167,18 @@ config COMMON_CLK_MT7622_AUDSYS
to audio consumers such as I2S and TDM.
config COMMON_CLK_MT8135
bool "Clock driver for Mediatek MT8135"
bool "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM
---help---
This driver supports Mediatek MT8135 clocks.
This driver supports MediaTek MT8135 clocks.
config COMMON_CLK_MT8173
bool "Clock driver for Mediatek MT8173"
bool "Clock driver for MediaTek MT8173"
depends on ARCH_MEDIATEK || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK
---help---
This driver supports Mediatek MT8173 clocks.
This driver supports MediaTek MT8173 clocks.
endmenu

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o
obj-$(CONFIG_RESET_CONTROLLER) += reset.o
obj-$(CONFIG_COMMON_CLK_MEDIATEK) += clk-mtk.o clk-pll.o clk-gate.o clk-apmixed.o clk-cpumux.o reset.o
obj-$(CONFIG_COMMON_CLK_MT6797) += clk-mt6797.o
obj-$(CONFIG_COMMON_CLK_MT6797_IMGSYS) += clk-mt6797-img.o
obj-$(CONFIG_COMMON_CLK_MT6797_MMSYS) += clk-mt6797-mm.o

View File

@ -20,6 +20,7 @@
#include <linux/clk-provider.h>
struct clk;
struct clk_onecell_data;
#define MAX_MUX_GATE_BIT 31
#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1)
@ -228,14 +229,7 @@ void mtk_clk_register_plls(struct device_node *node,
struct clk *mtk_clk_register_ref2usb_tx(const char *name,
const char *parent_name, void __iomem *reg);
#ifdef CONFIG_RESET_CONTROLLER
void mtk_register_reset_controller(struct device_node *np,
unsigned int num_regs, int regofs);
#else
static inline void mtk_register_reset_controller(struct device_node *np,
unsigned int num_regs, int regofs)
{
}
#endif
#endif /* __DRV_CLK_MTK_H */

View File

@ -19,3 +19,11 @@ config COMMON_CLK_GXBB
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
Say Y if you want peripherals and CPU frequency scaling to work.
config COMMON_CLK_AXG
bool
depends on COMMON_CLK_AMLOGIC
select RESET_CONTROLLER
help
Support for the clock controller on AmLogic A113D devices, aka axg.
Say Y if you want peripherals and CPU frequency scaling to work.

View File

@ -5,3 +5,4 @@
obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-cpu.o clk-mpll.o clk-audio-divider.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
obj-$(CONFIG_COMMON_CLK_GXBB) += gxbb.o gxbb-aoclk.o gxbb-aoclk-regmap.o gxbb-aoclk-32k.o
obj-$(CONFIG_COMMON_CLK_AXG) += axg.o

938
drivers/clk/meson/axg.c Normal file
View File

@ -0,0 +1,938 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* AmLogic Meson-AXG Clock Controller Driver
*
* Copyright (c) 2016 Baylibre SAS.
* Author: Michael Turquette <mturquette@baylibre.com>
*
* Copyright (c) 2017 Amlogic, inc.
* Author: Qiufang Dai <qiufang.dai@amlogic.com>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/init.h>
#include "clkc.h"
#include "axg.h"
static DEFINE_SPINLOCK(meson_clk_lock);
static const struct pll_rate_table sys_pll_rate_table[] = {
PLL_RATE(24000000, 56, 1, 2),
PLL_RATE(48000000, 64, 1, 2),
PLL_RATE(72000000, 72, 1, 2),
PLL_RATE(96000000, 64, 1, 2),
PLL_RATE(120000000, 80, 1, 2),
PLL_RATE(144000000, 96, 1, 2),
PLL_RATE(168000000, 56, 1, 1),
PLL_RATE(192000000, 64, 1, 1),
PLL_RATE(216000000, 72, 1, 1),
PLL_RATE(240000000, 80, 1, 1),
PLL_RATE(264000000, 88, 1, 1),
PLL_RATE(288000000, 96, 1, 1),
PLL_RATE(312000000, 52, 1, 2),
PLL_RATE(336000000, 56, 1, 2),
PLL_RATE(360000000, 60, 1, 2),
PLL_RATE(384000000, 64, 1, 2),
PLL_RATE(408000000, 68, 1, 2),
PLL_RATE(432000000, 72, 1, 2),
PLL_RATE(456000000, 76, 1, 2),
PLL_RATE(480000000, 80, 1, 2),
PLL_RATE(504000000, 84, 1, 2),
PLL_RATE(528000000, 88, 1, 2),
PLL_RATE(552000000, 92, 1, 2),
PLL_RATE(576000000, 96, 1, 2),
PLL_RATE(600000000, 50, 1, 1),
PLL_RATE(624000000, 52, 1, 1),
PLL_RATE(648000000, 54, 1, 1),
PLL_RATE(672000000, 56, 1, 1),
PLL_RATE(696000000, 58, 1, 1),
PLL_RATE(720000000, 60, 1, 1),
PLL_RATE(744000000, 62, 1, 1),
PLL_RATE(768000000, 64, 1, 1),
PLL_RATE(792000000, 66, 1, 1),
PLL_RATE(816000000, 68, 1, 1),
PLL_RATE(840000000, 70, 1, 1),
PLL_RATE(864000000, 72, 1, 1),
PLL_RATE(888000000, 74, 1, 1),
PLL_RATE(912000000, 76, 1, 1),
PLL_RATE(936000000, 78, 1, 1),
PLL_RATE(960000000, 80, 1, 1),
PLL_RATE(984000000, 82, 1, 1),
PLL_RATE(1008000000, 84, 1, 1),
PLL_RATE(1032000000, 86, 1, 1),
PLL_RATE(1056000000, 88, 1, 1),
PLL_RATE(1080000000, 90, 1, 1),
PLL_RATE(1104000000, 92, 1, 1),
PLL_RATE(1128000000, 94, 1, 1),
PLL_RATE(1152000000, 96, 1, 1),
PLL_RATE(1176000000, 98, 1, 1),
PLL_RATE(1200000000, 50, 1, 0),
PLL_RATE(1224000000, 51, 1, 0),
PLL_RATE(1248000000, 52, 1, 0),
PLL_RATE(1272000000, 53, 1, 0),
PLL_RATE(1296000000, 54, 1, 0),
PLL_RATE(1320000000, 55, 1, 0),
PLL_RATE(1344000000, 56, 1, 0),
PLL_RATE(1368000000, 57, 1, 0),
PLL_RATE(1392000000, 58, 1, 0),
PLL_RATE(1416000000, 59, 1, 0),
PLL_RATE(1440000000, 60, 1, 0),
PLL_RATE(1464000000, 61, 1, 0),
PLL_RATE(1488000000, 62, 1, 0),
PLL_RATE(1512000000, 63, 1, 0),
PLL_RATE(1536000000, 64, 1, 0),
PLL_RATE(1560000000, 65, 1, 0),
PLL_RATE(1584000000, 66, 1, 0),
PLL_RATE(1608000000, 67, 1, 0),
PLL_RATE(1632000000, 68, 1, 0),
PLL_RATE(1656000000, 68, 1, 0),
PLL_RATE(1680000000, 68, 1, 0),
PLL_RATE(1704000000, 68, 1, 0),
PLL_RATE(1728000000, 69, 1, 0),
PLL_RATE(1752000000, 69, 1, 0),
PLL_RATE(1776000000, 69, 1, 0),
PLL_RATE(1800000000, 69, 1, 0),
PLL_RATE(1824000000, 70, 1, 0),
PLL_RATE(1848000000, 70, 1, 0),
PLL_RATE(1872000000, 70, 1, 0),
PLL_RATE(1896000000, 70, 1, 0),
PLL_RATE(1920000000, 71, 1, 0),
PLL_RATE(1944000000, 71, 1, 0),
PLL_RATE(1968000000, 71, 1, 0),
PLL_RATE(1992000000, 71, 1, 0),
PLL_RATE(2016000000, 72, 1, 0),
PLL_RATE(2040000000, 72, 1, 0),
PLL_RATE(2064000000, 72, 1, 0),
PLL_RATE(2088000000, 72, 1, 0),
PLL_RATE(2112000000, 73, 1, 0),
{ /* sentinel */ },
};
static struct meson_clk_pll axg_fixed_pll = {
.m = {
.reg_off = HHI_MPLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
.reg_off = HHI_MPLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
.reg_off = HHI_MPLL_CNTL,
.shift = 16,
.width = 2,
},
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
static struct meson_clk_pll axg_sys_pll = {
.m = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
.reg_off = HHI_SYS_PLL_CNTL,
.shift = 10,
.width = 2,
},
.rate_table = sys_pll_rate_table,
.rate_count = ARRAY_SIZE(sys_pll_rate_table),
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
.flags = CLK_GET_RATE_NOCACHE,
},
};
static const struct pll_rate_table axg_gp0_pll_rate_table[] = {
PLL_RATE(240000000, 40, 1, 2),
PLL_RATE(246000000, 41, 1, 2),
PLL_RATE(252000000, 42, 1, 2),
PLL_RATE(258000000, 43, 1, 2),
PLL_RATE(264000000, 44, 1, 2),
PLL_RATE(270000000, 45, 1, 2),
PLL_RATE(276000000, 46, 1, 2),
PLL_RATE(282000000, 47, 1, 2),
PLL_RATE(288000000, 48, 1, 2),
PLL_RATE(294000000, 49, 1, 2),
PLL_RATE(300000000, 50, 1, 2),
PLL_RATE(306000000, 51, 1, 2),
PLL_RATE(312000000, 52, 1, 2),
PLL_RATE(318000000, 53, 1, 2),
PLL_RATE(324000000, 54, 1, 2),
PLL_RATE(330000000, 55, 1, 2),
PLL_RATE(336000000, 56, 1, 2),
PLL_RATE(342000000, 57, 1, 2),
PLL_RATE(348000000, 58, 1, 2),
PLL_RATE(354000000, 59, 1, 2),
PLL_RATE(360000000, 60, 1, 2),
PLL_RATE(366000000, 61, 1, 2),
PLL_RATE(372000000, 62, 1, 2),
PLL_RATE(378000000, 63, 1, 2),
PLL_RATE(384000000, 64, 1, 2),
PLL_RATE(390000000, 65, 1, 3),
PLL_RATE(396000000, 66, 1, 3),
PLL_RATE(402000000, 67, 1, 3),
PLL_RATE(408000000, 68, 1, 3),
PLL_RATE(480000000, 40, 1, 1),
PLL_RATE(492000000, 41, 1, 1),
PLL_RATE(504000000, 42, 1, 1),
PLL_RATE(516000000, 43, 1, 1),
PLL_RATE(528000000, 44, 1, 1),
PLL_RATE(540000000, 45, 1, 1),
PLL_RATE(552000000, 46, 1, 1),
PLL_RATE(564000000, 47, 1, 1),
PLL_RATE(576000000, 48, 1, 1),
PLL_RATE(588000000, 49, 1, 1),
PLL_RATE(600000000, 50, 1, 1),
PLL_RATE(612000000, 51, 1, 1),
PLL_RATE(624000000, 52, 1, 1),
PLL_RATE(636000000, 53, 1, 1),
PLL_RATE(648000000, 54, 1, 1),
PLL_RATE(660000000, 55, 1, 1),
PLL_RATE(672000000, 56, 1, 1),
PLL_RATE(684000000, 57, 1, 1),
PLL_RATE(696000000, 58, 1, 1),
PLL_RATE(708000000, 59, 1, 1),
PLL_RATE(720000000, 60, 1, 1),
PLL_RATE(732000000, 61, 1, 1),
PLL_RATE(744000000, 62, 1, 1),
PLL_RATE(756000000, 63, 1, 1),
PLL_RATE(768000000, 64, 1, 1),
PLL_RATE(780000000, 65, 1, 1),
PLL_RATE(792000000, 66, 1, 1),
PLL_RATE(804000000, 67, 1, 1),
PLL_RATE(816000000, 68, 1, 1),
PLL_RATE(960000000, 40, 1, 0),
PLL_RATE(984000000, 41, 1, 0),
PLL_RATE(1008000000, 42, 1, 0),
PLL_RATE(1032000000, 43, 1, 0),
PLL_RATE(1056000000, 44, 1, 0),
PLL_RATE(1080000000, 45, 1, 0),
PLL_RATE(1104000000, 46, 1, 0),
PLL_RATE(1128000000, 47, 1, 0),
PLL_RATE(1152000000, 48, 1, 0),
PLL_RATE(1176000000, 49, 1, 0),
PLL_RATE(1200000000, 50, 1, 0),
PLL_RATE(1224000000, 51, 1, 0),
PLL_RATE(1248000000, 52, 1, 0),
PLL_RATE(1272000000, 53, 1, 0),
PLL_RATE(1296000000, 54, 1, 0),
PLL_RATE(1320000000, 55, 1, 0),
PLL_RATE(1344000000, 56, 1, 0),
PLL_RATE(1368000000, 57, 1, 0),
PLL_RATE(1392000000, 58, 1, 0),
PLL_RATE(1416000000, 59, 1, 0),
PLL_RATE(1440000000, 60, 1, 0),
PLL_RATE(1464000000, 61, 1, 0),
PLL_RATE(1488000000, 62, 1, 0),
PLL_RATE(1512000000, 63, 1, 0),
PLL_RATE(1536000000, 64, 1, 0),
PLL_RATE(1560000000, 65, 1, 0),
PLL_RATE(1584000000, 66, 1, 0),
PLL_RATE(1608000000, 67, 1, 0),
PLL_RATE(1632000000, 68, 1, 0),
{ /* sentinel */ },
};
static struct pll_params_table axg_gp0_params_table[] = {
PLL_PARAM(HHI_GP0_PLL_CNTL, 0x40010250),
PLL_PARAM(HHI_GP0_PLL_CNTL1, 0xc084a000),
PLL_PARAM(HHI_GP0_PLL_CNTL2, 0xb75020be),
PLL_PARAM(HHI_GP0_PLL_CNTL3, 0x0a59a288),
PLL_PARAM(HHI_GP0_PLL_CNTL4, 0xc000004d),
PLL_PARAM(HHI_GP0_PLL_CNTL5, 0x00078000),
};
static struct meson_clk_pll axg_gp0_pll = {
.m = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 0,
.width = 9,
},
.n = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 9,
.width = 5,
},
.od = {
.reg_off = HHI_GP0_PLL_CNTL,
.shift = 16,
.width = 2,
},
.params = {
.params_table = axg_gp0_params_table,
.params_count = ARRAY_SIZE(axg_gp0_params_table),
.no_init_reset = true,
.reset_lock_loop = true,
},
.rate_table = axg_gp0_pll_rate_table,
.rate_count = ARRAY_SIZE(axg_gp0_pll_rate_table),
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
static struct clk_fixed_factor axg_fclk_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "fclk_div2",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct clk_fixed_factor axg_fclk_div3 = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
.name = "fclk_div3",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct clk_fixed_factor axg_fclk_div4 = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
.name = "fclk_div4",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct clk_fixed_factor axg_fclk_div5 = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
.name = "fclk_div5",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct clk_fixed_factor axg_fclk_div7 = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
.name = "fclk_div7",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct meson_clk_mpll axg_mpll0 = {
.sdm = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 0,
.width = 14,
},
.sdm_en = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 15,
.width = 1,
},
.n2 = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 16,
.width = 9,
},
.en = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 14,
.width = 1,
},
.ssen = {
.reg_off = HHI_MPLL_CNTL,
.shift = 25,
.width = 1,
},
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &meson_clk_mpll_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct meson_clk_mpll axg_mpll1 = {
.sdm = {
.reg_off = HHI_MPLL_CNTL8,
.shift = 0,
.width = 14,
},
.sdm_en = {
.reg_off = HHI_MPLL_CNTL8,
.shift = 15,
.width = 1,
},
.n2 = {
.reg_off = HHI_MPLL_CNTL8,
.shift = 16,
.width = 9,
},
.en = {
.reg_off = HHI_MPLL_CNTL8,
.shift = 14,
.width = 1,
},
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &meson_clk_mpll_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct meson_clk_mpll axg_mpll2 = {
.sdm = {
.reg_off = HHI_MPLL_CNTL9,
.shift = 0,
.width = 14,
},
.sdm_en = {
.reg_off = HHI_MPLL_CNTL9,
.shift = 15,
.width = 1,
},
.n2 = {
.reg_off = HHI_MPLL_CNTL9,
.shift = 16,
.width = 9,
},
.en = {
.reg_off = HHI_MPLL_CNTL9,
.shift = 14,
.width = 1,
},
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &meson_clk_mpll_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
static struct meson_clk_mpll axg_mpll3 = {
.sdm = {
.reg_off = HHI_MPLL3_CNTL0,
.shift = 12,
.width = 14,
},
.sdm_en = {
.reg_off = HHI_MPLL3_CNTL0,
.shift = 11,
.width = 1,
},
.n2 = {
.reg_off = HHI_MPLL3_CNTL0,
.shift = 2,
.width = 9,
},
.en = {
.reg_off = HHI_MPLL3_CNTL0,
.shift = 0,
.width = 1,
},
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll3",
.ops = &meson_clk_mpll_ops,
.parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1,
},
};
/*
* FIXME The legacy composite clocks (e.g. clk81) are both PLL post-dividers
* and should be modeled with their respective PLLs via the forthcoming
* coordinated clock rates feature
*/
static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
static const char * const clk81_parent_names[] = {
"xtal", "fclk_div7", "mpll1", "mpll2", "fclk_div4",
"fclk_div3", "fclk_div5"
};
static struct clk_mux axg_mpeg_clk_sel = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
.flags = CLK_MUX_READ_ONLY,
.table = mux_table_clk81,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
.ops = &clk_mux_ro_ops,
.parent_names = clk81_parent_names,
.num_parents = ARRAY_SIZE(clk81_parent_names),
},
};
static struct clk_divider axg_mpeg_clk_div = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_divider_ops,
.parent_names = (const char *[]){ "mpeg_clk_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_gate axg_clk81 = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.bit_idx = 7,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "mpeg_clk_div" },
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
static const char * const axg_sd_emmc_clk0_parent_names[] = {
"xtal", "fclk_div2", "fclk_div3", "fclk_div5", "fclk_div7",
/*
* Following these parent clocks, we should also have had mpll2, mpll3
* and gp0_pll but these clocks are too precious to be used here. All
* the necessary rates for MMC and NAND operation can be acheived using
* xtal or fclk_div clocks
*/
};
/* SDcard clock */
static struct clk_mux axg_sd_emmc_b_clk0_sel = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.mask = 0x7,
.shift = 25,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_divider axg_sd_emmc_b_clk0_div = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.shift = 16,
.width = 7,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
.ops = &clk_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_gate axg_sd_emmc_b_clk0 = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.bit_idx = 23,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* EMMC/NAND clock */
static struct clk_mux axg_sd_emmc_c_clk0_sel = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.mask = 0x7,
.shift = 9,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_mux_ops,
.parent_names = axg_sd_emmc_clk0_parent_names,
.num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_names),
.flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_divider axg_sd_emmc_c_clk0_div = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
.ops = &clk_divider_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
static struct clk_gate axg_sd_emmc_c_clk0 = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.bit_idx = 7,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* Everything Else (EE) domain gates */
static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
static MESON_GATE(axg_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
static MESON_GATE(axg_isa, HHI_GCLK_MPEG0, 5);
static MESON_GATE(axg_pl301, HHI_GCLK_MPEG0, 6);
static MESON_GATE(axg_periphs, HHI_GCLK_MPEG0, 7);
static MESON_GATE(axg_spicc_0, HHI_GCLK_MPEG0, 8);
static MESON_GATE(axg_i2c, HHI_GCLK_MPEG0, 9);
static MESON_GATE(axg_rng0, HHI_GCLK_MPEG0, 12);
static MESON_GATE(axg_uart0, HHI_GCLK_MPEG0, 13);
static MESON_GATE(axg_mipi_dsi_phy, HHI_GCLK_MPEG0, 14);
static MESON_GATE(axg_spicc_1, HHI_GCLK_MPEG0, 15);
static MESON_GATE(axg_pcie_a, HHI_GCLK_MPEG0, 16);
static MESON_GATE(axg_pcie_b, HHI_GCLK_MPEG0, 17);
static MESON_GATE(axg_hiu_reg, HHI_GCLK_MPEG0, 19);
static MESON_GATE(axg_assist_misc, HHI_GCLK_MPEG0, 23);
static MESON_GATE(axg_emmc_b, HHI_GCLK_MPEG0, 25);
static MESON_GATE(axg_emmc_c, HHI_GCLK_MPEG0, 26);
static MESON_GATE(axg_dma, HHI_GCLK_MPEG0, 27);
static MESON_GATE(axg_spi, HHI_GCLK_MPEG0, 30);
static MESON_GATE(axg_audio, HHI_GCLK_MPEG1, 0);
static MESON_GATE(axg_eth_core, HHI_GCLK_MPEG1, 3);
static MESON_GATE(axg_uart1, HHI_GCLK_MPEG1, 16);
static MESON_GATE(axg_g2d, HHI_GCLK_MPEG1, 20);
static MESON_GATE(axg_usb0, HHI_GCLK_MPEG1, 21);
static MESON_GATE(axg_usb1, HHI_GCLK_MPEG1, 22);
static MESON_GATE(axg_reset, HHI_GCLK_MPEG1, 23);
static MESON_GATE(axg_usb_general, HHI_GCLK_MPEG1, 26);
static MESON_GATE(axg_ahb_arb0, HHI_GCLK_MPEG1, 29);
static MESON_GATE(axg_efuse, HHI_GCLK_MPEG1, 30);
static MESON_GATE(axg_boot_rom, HHI_GCLK_MPEG1, 31);
static MESON_GATE(axg_ahb_data_bus, HHI_GCLK_MPEG2, 1);
static MESON_GATE(axg_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
static MESON_GATE(axg_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
static MESON_GATE(axg_usb0_to_ddr, HHI_GCLK_MPEG2, 9);
static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
/* Always On (AO) domain gates */
static MESON_GATE(axg_ao_media_cpu, HHI_GCLK_AO, 0);
static MESON_GATE(axg_ao_ahb_sram, HHI_GCLK_AO, 1);
static MESON_GATE(axg_ao_ahb_bus, HHI_GCLK_AO, 2);
static MESON_GATE(axg_ao_iface, HHI_GCLK_AO, 3);
static MESON_GATE(axg_ao_i2c, HHI_GCLK_AO, 4);
/* Array of all clocks provided by this provider */
static struct clk_hw_onecell_data axg_hw_onecell_data = {
.hws = {
[CLKID_SYS_PLL] = &axg_sys_pll.hw,
[CLKID_FIXED_PLL] = &axg_fixed_pll.hw,
[CLKID_FCLK_DIV2] = &axg_fclk_div2.hw,
[CLKID_FCLK_DIV3] = &axg_fclk_div3.hw,
[CLKID_FCLK_DIV4] = &axg_fclk_div4.hw,
[CLKID_FCLK_DIV5] = &axg_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &axg_fclk_div7.hw,
[CLKID_GP0_PLL] = &axg_gp0_pll.hw,
[CLKID_MPEG_SEL] = &axg_mpeg_clk_sel.hw,
[CLKID_MPEG_DIV] = &axg_mpeg_clk_div.hw,
[CLKID_CLK81] = &axg_clk81.hw,
[CLKID_MPLL0] = &axg_mpll0.hw,
[CLKID_MPLL1] = &axg_mpll1.hw,
[CLKID_MPLL2] = &axg_mpll2.hw,
[CLKID_MPLL3] = &axg_mpll3.hw,
[CLKID_DDR] = &axg_ddr.hw,
[CLKID_AUDIO_LOCKER] = &axg_audio_locker.hw,
[CLKID_MIPI_DSI_HOST] = &axg_mipi_dsi_host.hw,
[CLKID_ISA] = &axg_isa.hw,
[CLKID_PL301] = &axg_pl301.hw,
[CLKID_PERIPHS] = &axg_periphs.hw,
[CLKID_SPICC0] = &axg_spicc_0.hw,
[CLKID_I2C] = &axg_i2c.hw,
[CLKID_RNG0] = &axg_rng0.hw,
[CLKID_UART0] = &axg_uart0.hw,
[CLKID_MIPI_DSI_PHY] = &axg_mipi_dsi_phy.hw,
[CLKID_SPICC1] = &axg_spicc_1.hw,
[CLKID_PCIE_A] = &axg_pcie_a.hw,
[CLKID_PCIE_B] = &axg_pcie_b.hw,
[CLKID_HIU_IFACE] = &axg_hiu_reg.hw,
[CLKID_ASSIST_MISC] = &axg_assist_misc.hw,
[CLKID_SD_EMMC_B] = &axg_emmc_b.hw,
[CLKID_SD_EMMC_C] = &axg_emmc_c.hw,
[CLKID_DMA] = &axg_dma.hw,
[CLKID_SPI] = &axg_spi.hw,
[CLKID_AUDIO] = &axg_audio.hw,
[CLKID_ETH] = &axg_eth_core.hw,
[CLKID_UART1] = &axg_uart1.hw,
[CLKID_G2D] = &axg_g2d.hw,
[CLKID_USB0] = &axg_usb0.hw,
[CLKID_USB1] = &axg_usb1.hw,
[CLKID_RESET] = &axg_reset.hw,
[CLKID_USB] = &axg_usb_general.hw,
[CLKID_AHB_ARB0] = &axg_ahb_arb0.hw,
[CLKID_EFUSE] = &axg_efuse.hw,
[CLKID_BOOT_ROM] = &axg_boot_rom.hw,
[CLKID_AHB_DATA_BUS] = &axg_ahb_data_bus.hw,
[CLKID_AHB_CTRL_BUS] = &axg_ahb_ctrl_bus.hw,
[CLKID_USB1_DDR_BRIDGE] = &axg_usb1_to_ddr.hw,
[CLKID_USB0_DDR_BRIDGE] = &axg_usb0_to_ddr.hw,
[CLKID_MMC_PCLK] = &axg_mmc_pclk.hw,
[CLKID_VPU_INTR] = &axg_vpu_intr.hw,
[CLKID_SEC_AHB_AHB3_BRIDGE] = &axg_sec_ahb_ahb3_bridge.hw,
[CLKID_GIC] = &axg_gic.hw,
[CLKID_AO_MEDIA_CPU] = &axg_ao_media_cpu.hw,
[CLKID_AO_AHB_SRAM] = &axg_ao_ahb_sram.hw,
[CLKID_AO_AHB_BUS] = &axg_ao_ahb_bus.hw,
[CLKID_AO_IFACE] = &axg_ao_iface.hw,
[CLKID_AO_I2C] = &axg_ao_i2c.hw,
[CLKID_SD_EMMC_B_CLK0_SEL] = &axg_sd_emmc_b_clk0_sel.hw,
[CLKID_SD_EMMC_B_CLK0_DIV] = &axg_sd_emmc_b_clk0_div.hw,
[CLKID_SD_EMMC_B_CLK0] = &axg_sd_emmc_b_clk0.hw,
[CLKID_SD_EMMC_C_CLK0_SEL] = &axg_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK0_DIV] = &axg_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C_CLK0] = &axg_sd_emmc_c_clk0.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
};
/* Convenience tables to populate base addresses in .probe */
static struct meson_clk_pll *const axg_clk_plls[] = {
&axg_fixed_pll,
&axg_sys_pll,
&axg_gp0_pll,
};
static struct meson_clk_mpll *const axg_clk_mplls[] = {
&axg_mpll0,
&axg_mpll1,
&axg_mpll2,
&axg_mpll3,
};
static struct clk_gate *const axg_clk_gates[] = {
&axg_clk81,
&axg_ddr,
&axg_audio_locker,
&axg_mipi_dsi_host,
&axg_isa,
&axg_pl301,
&axg_periphs,
&axg_spicc_0,
&axg_i2c,
&axg_rng0,
&axg_uart0,
&axg_mipi_dsi_phy,
&axg_spicc_1,
&axg_pcie_a,
&axg_pcie_b,
&axg_hiu_reg,
&axg_assist_misc,
&axg_emmc_b,
&axg_emmc_c,
&axg_dma,
&axg_spi,
&axg_audio,
&axg_eth_core,
&axg_uart1,
&axg_g2d,
&axg_usb0,
&axg_usb1,
&axg_reset,
&axg_usb_general,
&axg_ahb_arb0,
&axg_efuse,
&axg_boot_rom,
&axg_ahb_data_bus,
&axg_ahb_ctrl_bus,
&axg_usb1_to_ddr,
&axg_usb0_to_ddr,
&axg_mmc_pclk,
&axg_vpu_intr,
&axg_sec_ahb_ahb3_bridge,
&axg_gic,
&axg_ao_media_cpu,
&axg_ao_ahb_sram,
&axg_ao_ahb_bus,
&axg_ao_iface,
&axg_ao_i2c,
&axg_sd_emmc_b_clk0,
&axg_sd_emmc_c_clk0,
};
static struct clk_mux *const axg_clk_muxes[] = {
&axg_mpeg_clk_sel,
&axg_sd_emmc_b_clk0_sel,
&axg_sd_emmc_c_clk0_sel,
};
static struct clk_divider *const axg_clk_dividers[] = {
&axg_mpeg_clk_div,
&axg_sd_emmc_b_clk0_div,
&axg_sd_emmc_c_clk0_div,
};
struct clkc_data {
struct clk_gate *const *clk_gates;
unsigned int clk_gates_count;
struct meson_clk_mpll *const *clk_mplls;
unsigned int clk_mplls_count;
struct meson_clk_pll *const *clk_plls;
unsigned int clk_plls_count;
struct clk_mux *const *clk_muxes;
unsigned int clk_muxes_count;
struct clk_divider *const *clk_dividers;
unsigned int clk_dividers_count;
struct clk_hw_onecell_data *hw_onecell_data;
};
static const struct clkc_data axg_clkc_data = {
.clk_gates = axg_clk_gates,
.clk_gates_count = ARRAY_SIZE(axg_clk_gates),
.clk_mplls = axg_clk_mplls,
.clk_mplls_count = ARRAY_SIZE(axg_clk_mplls),
.clk_plls = axg_clk_plls,
.clk_plls_count = ARRAY_SIZE(axg_clk_plls),
.clk_muxes = axg_clk_muxes,
.clk_muxes_count = ARRAY_SIZE(axg_clk_muxes),
.clk_dividers = axg_clk_dividers,
.clk_dividers_count = ARRAY_SIZE(axg_clk_dividers),
.hw_onecell_data = &axg_hw_onecell_data,
};
static const struct of_device_id clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
static int axg_clkc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct clkc_data *clkc_data;
struct resource *res;
void __iomem *clk_base;
int ret, clkid, i;
clkc_data = of_device_get_match_data(&pdev->dev);
if (!clkc_data)
return -EINVAL;
/* Generic clocks and PLLs */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
clk_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!clk_base) {
dev_err(&pdev->dev, "Unable to map clk base\n");
return -ENXIO;
}
/* Populate base address for PLLs */
for (i = 0; i < clkc_data->clk_plls_count; i++)
clkc_data->clk_plls[i]->base = clk_base;
/* Populate base address for MPLLs */
for (i = 0; i < clkc_data->clk_mplls_count; i++)
clkc_data->clk_mplls[i]->base = clk_base;
/* Populate base address for gates */
for (i = 0; i < clkc_data->clk_gates_count; i++)
clkc_data->clk_gates[i]->reg = clk_base +
(u64)clkc_data->clk_gates[i]->reg;
/* Populate base address for muxes */
for (i = 0; i < clkc_data->clk_muxes_count; i++)
clkc_data->clk_muxes[i]->reg = clk_base +
(u64)clkc_data->clk_muxes[i]->reg;
/* Populate base address for dividers */
for (i = 0; i < clkc_data->clk_dividers_count; i++)
clkc_data->clk_dividers[i]->reg = clk_base +
(u64)clkc_data->clk_dividers[i]->reg;
for (clkid = 0; clkid < clkc_data->hw_onecell_data->num; clkid++) {
/* array might be sparse */
if (!clkc_data->hw_onecell_data->hws[clkid])
continue;
ret = devm_clk_hw_register(dev,
clkc_data->hw_onecell_data->hws[clkid]);
if (ret) {
dev_err(&pdev->dev, "Clock registration failed\n");
return ret;
}
}
return of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
clkc_data->hw_onecell_data);
}
static struct platform_driver axg_driver = {
.probe = axg_clkc_probe,
.driver = {
.name = "axg-clkc",
.of_match_table = clkc_match_table,
},
};
builtin_platform_driver(axg_driver);

126
drivers/clk/meson/axg.h Normal file
View File

@ -0,0 +1,126 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2016 AmLogic, Inc.
* Author: Michael Turquette <mturquette@baylibre.com>
*
* Copyright (c) 2017 Amlogic, inc.
* Author: Qiufang Dai <qiufang.dai@amlogic.com>
*
*/
#ifndef __AXG_H
#define __AXG_H
/*
* Clock controller register offsets
*
* Register offsets from the data sheet must be multiplied by 4 before
* adding them to the base address to get the right value.
*/
#define HHI_GP0_PLL_CNTL 0x40
#define HHI_GP0_PLL_CNTL2 0x44
#define HHI_GP0_PLL_CNTL3 0x48
#define HHI_GP0_PLL_CNTL4 0x4c
#define HHI_GP0_PLL_CNTL5 0x50
#define HHI_GP0_PLL_STS 0x54
#define HHI_GP0_PLL_CNTL1 0x58
#define HHI_HIFI_PLL_CNTL 0x80
#define HHI_HIFI_PLL_CNTL2 0x84
#define HHI_HIFI_PLL_CNTL3 0x88
#define HHI_HIFI_PLL_CNTL4 0x8C
#define HHI_HIFI_PLL_CNTL5 0x90
#define HHI_HIFI_PLL_STS 0x94
#define HHI_HIFI_PLL_CNTL1 0x98
#define HHI_XTAL_DIVN_CNTL 0xbc
#define HHI_GCLK2_MPEG0 0xc0
#define HHI_GCLK2_MPEG1 0xc4
#define HHI_GCLK2_MPEG2 0xc8
#define HHI_GCLK2_OTHER 0xd0
#define HHI_GCLK2_AO 0xd4
#define HHI_PCIE_PLL_CNTL 0xd8
#define HHI_PCIE_PLL_CNTL1 0xdC
#define HHI_PCIE_PLL_CNTL2 0xe0
#define HHI_PCIE_PLL_CNTL3 0xe4
#define HHI_PCIE_PLL_CNTL4 0xe8
#define HHI_PCIE_PLL_CNTL5 0xec
#define HHI_PCIE_PLL_CNTL6 0xf0
#define HHI_PCIE_PLL_STS 0xf4
#define HHI_MEM_PD_REG0 0x100
#define HHI_VPU_MEM_PD_REG0 0x104
#define HHI_VIID_CLK_DIV 0x128
#define HHI_VIID_CLK_CNTL 0x12c
#define HHI_GCLK_MPEG0 0x140
#define HHI_GCLK_MPEG1 0x144
#define HHI_GCLK_MPEG2 0x148
#define HHI_GCLK_OTHER 0x150
#define HHI_GCLK_AO 0x154
#define HHI_SYS_CPU_CLK_CNTL1 0x15c
#define HHI_SYS_CPU_RESET_CNTL 0x160
#define HHI_VID_CLK_DIV 0x164
#define HHI_SPICC_HCLK_CNTL 0x168
#define HHI_MPEG_CLK_CNTL 0x174
#define HHI_VID_CLK_CNTL 0x17c
#define HHI_TS_CLK_CNTL 0x190
#define HHI_VID_CLK_CNTL2 0x194
#define HHI_SYS_CPU_CLK_CNTL0 0x19c
#define HHI_VID_PLL_CLK_DIV 0x1a0
#define HHI_VPU_CLK_CNTL 0x1bC
#define HHI_VAPBCLK_CNTL 0x1F4
#define HHI_GEN_CLK_CNTL 0x228
#define HHI_VDIN_MEAS_CLK_CNTL 0x250
#define HHI_NAND_CLK_CNTL 0x25C
#define HHI_SD_EMMC_CLK_CNTL 0x264
#define HHI_MPLL_CNTL 0x280
#define HHI_MPLL_CNTL2 0x284
#define HHI_MPLL_CNTL3 0x288
#define HHI_MPLL_CNTL4 0x28C
#define HHI_MPLL_CNTL5 0x290
#define HHI_MPLL_CNTL6 0x294
#define HHI_MPLL_CNTL7 0x298
#define HHI_MPLL_CNTL8 0x29C
#define HHI_MPLL_CNTL9 0x2A0
#define HHI_MPLL_CNTL10 0x2A4
#define HHI_MPLL3_CNTL0 0x2E0
#define HHI_MPLL3_CNTL1 0x2E4
#define HHI_PLL_TOP_MISC 0x2E8
#define HHI_SYS_PLL_CNTL1 0x2FC
#define HHI_SYS_PLL_CNTL 0x300
#define HHI_SYS_PLL_CNTL2 0x304
#define HHI_SYS_PLL_CNTL3 0x308
#define HHI_SYS_PLL_CNTL4 0x30c
#define HHI_SYS_PLL_CNTL5 0x310
#define HHI_SYS_PLL_STS 0x314
#define HHI_DPLL_TOP_I 0x318
#define HHI_DPLL_TOP2_I 0x31C
/*
* CLKID index values
*
* These indices are entirely contrived and do not map onto the hardware.
* It has now been decided to expose everything by default in the DT header:
* include/dt-bindings/clock/axg-clkc.h. Only the clocks ids we don't want
* to expose, such as the internal muxes and dividers of composite clocks,
* will remain defined here.
*/
#define CLKID_MPEG_SEL 8
#define CLKID_MPEG_DIV 9
#define CLKID_SD_EMMC_B_CLK0_SEL 61
#define CLKID_SD_EMMC_B_CLK0_DIV 62
#define CLKID_SD_EMMC_C_CLK0_SEL 63
#define CLKID_SD_EMMC_C_CLK0_DIV 64
#define NR_CLKS 65
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
#endif /* __AXG_H */

View File

@ -98,7 +98,7 @@ static void params_from_rate(unsigned long requested_rate,
*sdm = SDM_DEN - 1;
} else {
*n2 = div;
*sdm = DIV_ROUND_UP(rem * SDM_DEN, requested_rate);
*sdm = DIV_ROUND_UP_ULL((u64)rem * SDM_DEN, requested_rate);
}
}

View File

@ -134,7 +134,7 @@ struct meson_clk_audio_divider {
struct clk_gate _name = { \
.reg = (void __iomem *) _reg, \
.bit_idx = (_bit), \
.lock = &clk_lock, \
.lock = &meson_clk_lock, \
.hw.init = &(struct clk_init_data) { \
.name = #_name, \
.ops = &clk_gate_ops, \

View File

@ -27,7 +27,7 @@
#include "clkc.h"
#include "gxbb.h"
static DEFINE_SPINLOCK(clk_lock);
static DEFINE_SPINLOCK(meson_clk_lock);
static const struct pll_rate_table sys_pll_rate_table[] = {
PLL_RATE(24000000, 56, 1, 2),
@ -294,7 +294,7 @@ static struct meson_clk_pll gxbb_fixed_pll = {
.shift = 16,
.width = 2,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@ -330,7 +330,7 @@ static struct meson_clk_pll gxbb_hdmi_pll = {
.shift = 22,
.width = 2,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll",
.ops = &meson_clk_pll_ro_ops,
@ -358,7 +358,7 @@ static struct meson_clk_pll gxbb_sys_pll = {
},
.rate_table = sys_pll_rate_table,
.rate_count = ARRAY_SIZE(sys_pll_rate_table),
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ro_ops,
@ -399,7 +399,7 @@ static struct meson_clk_pll gxbb_gp0_pll = {
},
.rate_table = gxbb_gp0_pll_rate_table,
.rate_count = ARRAY_SIZE(gxbb_gp0_pll_rate_table),
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@ -442,7 +442,7 @@ static struct meson_clk_pll gxl_gp0_pll = {
},
.rate_table = gxl_gp0_pll_rate_table,
.rate_count = ARRAY_SIZE(gxl_gp0_pll_rate_table),
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "gp0_pll",
.ops = &meson_clk_pll_ops,
@ -533,7 +533,7 @@ static struct meson_clk_mpll gxbb_mpll0 = {
.shift = 25,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &meson_clk_mpll_ops,
@ -563,7 +563,7 @@ static struct meson_clk_mpll gxbb_mpll1 = {
.shift = 14,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &meson_clk_mpll_ops,
@ -593,7 +593,7 @@ static struct meson_clk_mpll gxbb_mpll2 = {
.shift = 14,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &meson_clk_mpll_ops,
@ -620,7 +620,7 @@ static struct clk_mux gxbb_mpeg_clk_sel = {
.shift = 12,
.flags = CLK_MUX_READ_ONLY,
.table = mux_table_clk81,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
.ops = &clk_mux_ro_ops,
@ -639,7 +639,7 @@ static struct clk_divider gxbb_mpeg_clk_div = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_divider_ops,
@ -653,7 +653,7 @@ static struct clk_divider gxbb_mpeg_clk_div = {
static struct clk_gate gxbb_clk81 = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.bit_idx = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_gate_ops,
@ -667,7 +667,7 @@ static struct clk_mux gxbb_sar_adc_clk_sel = {
.reg = (void *)HHI_SAR_CLK_CNTL,
.mask = 0x3,
.shift = 9,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_sel",
.ops = &clk_mux_ops,
@ -681,7 +681,7 @@ static struct clk_divider gxbb_sar_adc_clk_div = {
.reg = (void *)HHI_SAR_CLK_CNTL,
.shift = 0,
.width = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk_div",
.ops = &clk_divider_ops,
@ -693,7 +693,7 @@ static struct clk_divider gxbb_sar_adc_clk_div = {
static struct clk_gate gxbb_sar_adc_clk = {
.reg = (void *)HHI_SAR_CLK_CNTL,
.bit_idx = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sar_adc_clk",
.ops = &clk_gate_ops,
@ -719,7 +719,7 @@ static struct clk_mux gxbb_mali_0_sel = {
.mask = 0x7,
.shift = 9,
.table = mux_table_mali_0_1,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_mux_ops,
@ -738,7 +738,7 @@ static struct clk_divider gxbb_mali_0_div = {
.reg = (void *)HHI_MALI_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_0_div",
.ops = &clk_divider_ops,
@ -751,7 +751,7 @@ static struct clk_divider gxbb_mali_0_div = {
static struct clk_gate gxbb_mali_0 = {
.reg = (void *)HHI_MALI_CLK_CNTL,
.bit_idx = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_0",
.ops = &clk_gate_ops,
@ -766,7 +766,7 @@ static struct clk_mux gxbb_mali_1_sel = {
.mask = 0x7,
.shift = 25,
.table = mux_table_mali_0_1,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_mux_ops,
@ -785,7 +785,7 @@ static struct clk_divider gxbb_mali_1_div = {
.reg = (void *)HHI_MALI_CLK_CNTL,
.shift = 16,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_1_div",
.ops = &clk_divider_ops,
@ -798,7 +798,7 @@ static struct clk_divider gxbb_mali_1_div = {
static struct clk_gate gxbb_mali_1 = {
.reg = (void *)HHI_MALI_CLK_CNTL,
.bit_idx = 24,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali_1",
.ops = &clk_gate_ops,
@ -818,7 +818,7 @@ static struct clk_mux gxbb_mali = {
.mask = 1,
.shift = 31,
.table = mux_table_mali,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_mux_ops,
@ -834,7 +834,7 @@ static struct clk_mux gxbb_cts_amclk_sel = {
.shift = 9,
/* Default parent unknown (register reset value: 0) */
.table = (u32[]){ 1, 2, 3 },
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_mux_ops,
@ -851,7 +851,7 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
.width = 8,
},
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_div",
.ops = &meson_clk_audio_divider_ops,
@ -864,7 +864,7 @@ static struct meson_clk_audio_divider gxbb_cts_amclk_div = {
static struct clk_gate gxbb_cts_amclk = {
.reg = (void *) HHI_AUD_CLK_CNTL,
.bit_idx = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_amclk",
.ops = &clk_gate_ops,
@ -880,7 +880,7 @@ static struct clk_mux gxbb_cts_mclk_i958_sel = {
.shift = 25,
/* Default parent unknown (register reset value: 0) */
.table = (u32[]){ 1, 2, 3 },
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_mux_ops,
@ -894,7 +894,7 @@ static struct clk_divider gxbb_cts_mclk_i958_div = {
.reg = (void *)HHI_AUD_CLK_CNTL2,
.shift = 16,
.width = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_div",
@ -908,7 +908,7 @@ static struct clk_divider gxbb_cts_mclk_i958_div = {
static struct clk_gate gxbb_cts_mclk_i958 = {
.reg = (void *)HHI_AUD_CLK_CNTL2,
.bit_idx = 24,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_mclk_i958",
.ops = &clk_gate_ops,
@ -922,7 +922,7 @@ static struct clk_mux gxbb_cts_i958 = {
.reg = (void *)HHI_AUD_CLK_CNTL2,
.mask = 0x1,
.shift = 27,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "cts_i958",
.ops = &clk_mux_ops,
@ -940,7 +940,7 @@ static struct clk_divider gxbb_32k_clk_div = {
.reg = (void *)HHI_32K_CLK_CNTL,
.shift = 0,
.width = 14,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "32k_clk_div",
.ops = &clk_divider_ops,
@ -953,7 +953,7 @@ static struct clk_divider gxbb_32k_clk_div = {
static struct clk_gate gxbb_32k_clk = {
.reg = (void *)HHI_32K_CLK_CNTL,
.bit_idx = 15,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "32k_clk",
.ops = &clk_gate_ops,
@ -971,7 +971,7 @@ static struct clk_mux gxbb_32k_clk_sel = {
.reg = (void *)HHI_32K_CLK_CNTL,
.mask = 0x3,
.shift = 16,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
.ops = &clk_mux_ops,
@ -997,7 +997,7 @@ static struct clk_mux gxbb_sd_emmc_a_clk0_sel = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.mask = 0x7,
.shift = 9,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_mux_ops,
@ -1011,7 +1011,7 @@ static struct clk_divider gxbb_sd_emmc_a_clk0_div = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_div",
@ -1025,23 +1025,13 @@ static struct clk_divider gxbb_sd_emmc_a_clk0_div = {
static struct clk_gate gxbb_sd_emmc_a_clk0 = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.bit_idx = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_a_clk0",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_a_clk0_div" },
.num_parents = 1,
/*
* FIXME:
* We need CLK_IGNORE_UNUSED because mmc DT node point to xtal
* instead of this clock. CCF would gate this on boot, killing
* the mmc controller. Please remove this flag once DT properly
* point to this clock instead of xtal
*
* Same goes for emmc B and C clocks
*/
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -1050,7 +1040,7 @@ static struct clk_mux gxbb_sd_emmc_b_clk0_sel = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.mask = 0x7,
.shift = 25,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_mux_ops,
@ -1064,7 +1054,7 @@ static struct clk_divider gxbb_sd_emmc_b_clk0_div = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.shift = 16,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_div",
@ -1078,13 +1068,13 @@ static struct clk_divider gxbb_sd_emmc_b_clk0_div = {
static struct clk_gate gxbb_sd_emmc_b_clk0 = {
.reg = (void *)HHI_SD_EMMC_CLK_CNTL,
.bit_idx = 23,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_b_clk0",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_b_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -1093,7 +1083,7 @@ static struct clk_mux gxbb_sd_emmc_c_clk0_sel = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.mask = 0x7,
.shift = 9,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_mux_ops,
@ -1107,7 +1097,7 @@ static struct clk_divider gxbb_sd_emmc_c_clk0_div = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.flags = CLK_DIVIDER_ROUND_CLOSEST,
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_div",
@ -1121,13 +1111,13 @@ static struct clk_divider gxbb_sd_emmc_c_clk0_div = {
static struct clk_gate gxbb_sd_emmc_c_clk0 = {
.reg = (void *)HHI_NAND_CLK_CNTL,
.bit_idx = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_c_clk0",
.ops = &clk_gate_ops,
.parent_names = (const char *[]){ "sd_emmc_c_clk0_div" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
.flags = CLK_SET_RATE_PARENT,
},
};
@ -1142,7 +1132,7 @@ static struct clk_mux gxbb_vpu_0_sel = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.mask = 0x3,
.shift = 9,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.table = mux_table_vpu,
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
@ -1161,7 +1151,7 @@ static struct clk_divider gxbb_vpu_0_div = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vpu_0_div",
.ops = &clk_divider_ops,
@ -1174,7 +1164,7 @@ static struct clk_divider gxbb_vpu_0_div = {
static struct clk_gate gxbb_vpu_0 = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.bit_idx = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "vpu_0",
.ops = &clk_gate_ops,
@ -1188,7 +1178,7 @@ static struct clk_mux gxbb_vpu_1_sel = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.mask = 0x3,
.shift = 25,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.table = mux_table_vpu,
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
@ -1207,7 +1197,7 @@ static struct clk_divider gxbb_vpu_1_div = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.shift = 16,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vpu_1_div",
.ops = &clk_divider_ops,
@ -1220,7 +1210,7 @@ static struct clk_divider gxbb_vpu_1_div = {
static struct clk_gate gxbb_vpu_1 = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.bit_idx = 24,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "vpu_1",
.ops = &clk_gate_ops,
@ -1234,7 +1224,7 @@ static struct clk_mux gxbb_vpu = {
.reg = (void *)HHI_VPU_CLK_CNTL,
.mask = 1,
.shift = 31,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vpu",
.ops = &clk_mux_ops,
@ -1259,7 +1249,7 @@ static struct clk_mux gxbb_vapb_0_sel = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.mask = 0x3,
.shift = 9,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.table = mux_table_vapb,
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
@ -1278,7 +1268,7 @@ static struct clk_divider gxbb_vapb_0_div = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vapb_0_div",
.ops = &clk_divider_ops,
@ -1291,7 +1281,7 @@ static struct clk_divider gxbb_vapb_0_div = {
static struct clk_gate gxbb_vapb_0 = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.bit_idx = 8,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "vapb_0",
.ops = &clk_gate_ops,
@ -1305,7 +1295,7 @@ static struct clk_mux gxbb_vapb_1_sel = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.mask = 0x3,
.shift = 25,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.table = mux_table_vapb,
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
@ -1324,7 +1314,7 @@ static struct clk_divider gxbb_vapb_1_div = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.shift = 16,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vapb_1_div",
.ops = &clk_divider_ops,
@ -1337,7 +1327,7 @@ static struct clk_divider gxbb_vapb_1_div = {
static struct clk_gate gxbb_vapb_1 = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.bit_idx = 24,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "vapb_1",
.ops = &clk_gate_ops,
@ -1351,7 +1341,7 @@ static struct clk_mux gxbb_vapb_sel = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.mask = 1,
.shift = 31,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vapb_sel",
.ops = &clk_mux_ops,
@ -1368,7 +1358,7 @@ static struct clk_mux gxbb_vapb_sel = {
static struct clk_gate gxbb_vapb = {
.reg = (void *)HHI_VAPBCLK_CNTL,
.bit_idx = 30,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data) {
.name = "vapb",
.ops = &clk_gate_ops,
@ -1386,7 +1376,7 @@ static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG0, 10);
static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
@ -1437,7 +1427,7 @@ static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG2, 22);
static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);

View File

@ -32,7 +32,7 @@
#include "clkc.h"
#include "meson8b.h"
static DEFINE_SPINLOCK(clk_lock);
static DEFINE_SPINLOCK(meson_clk_lock);
static void __iomem *clk_base;
@ -136,7 +136,7 @@ static struct meson_clk_pll meson8b_fixed_pll = {
.shift = 16,
.width = 2,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "fixed_pll",
.ops = &meson_clk_pll_ro_ops,
@ -162,7 +162,7 @@ static struct meson_clk_pll meson8b_vid_pll = {
.shift = 16,
.width = 2,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "vid_pll",
.ops = &meson_clk_pll_ro_ops,
@ -190,7 +190,7 @@ static struct meson_clk_pll meson8b_sys_pll = {
},
.rate_table = sys_pll_rate_table,
.rate_count = ARRAY_SIZE(sys_pll_rate_table),
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
.ops = &meson_clk_pll_ops,
@ -281,7 +281,7 @@ static struct meson_clk_mpll meson8b_mpll0 = {
.shift = 25,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &meson_clk_mpll_ops,
@ -311,7 +311,7 @@ static struct meson_clk_mpll meson8b_mpll1 = {
.shift = 14,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll1",
.ops = &meson_clk_mpll_ops,
@ -341,7 +341,7 @@ static struct meson_clk_mpll meson8b_mpll2 = {
.shift = 14,
.width = 1,
},
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpll2",
.ops = &meson_clk_mpll_ops,
@ -375,7 +375,7 @@ struct clk_mux meson8b_mpeg_clk_sel = {
.shift = 12,
.flags = CLK_MUX_READ_ONLY,
.table = mux_table_clk81,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_sel",
.ops = &clk_mux_ro_ops,
@ -395,7 +395,7 @@ struct clk_divider meson8b_mpeg_clk_div = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "mpeg_clk_div",
.ops = &clk_divider_ops,
@ -408,7 +408,7 @@ struct clk_divider meson8b_mpeg_clk_div = {
struct clk_gate meson8b_clk81 = {
.reg = (void *)HHI_MPEG_CLK_CNTL,
.bit_idx = 7,
.lock = &clk_lock,
.lock = &meson_clk_lock,
.hw.init = &(struct clk_init_data){
.name = "clk81",
.ops = &clk_gate_ops,
@ -773,7 +773,7 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
reset = &meson8b_clk_reset_bits[id];
spin_lock_irqsave(&clk_lock, flags);
spin_lock_irqsave(&meson_clk_lock, flags);
val = readl(meson8b_clk_reset->base + reset->reg);
if (assert)
@ -782,7 +782,7 @@ static int meson8b_clk_reset_update(struct reset_controller_dev *rcdev,
val &= ~BIT(reset->bit_idx);
writel(val, meson8b_clk_reset->base + reset->reg);
spin_unlock_irqrestore(&clk_lock, flags);
spin_unlock_irqrestore(&meson_clk_lock, flags);
return 0;
}

View File

@ -21,9 +21,11 @@
*/
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define TBG_SEL 0x0
@ -33,6 +35,26 @@
#define CLK_SEL 0x10
#define CLK_DIS 0x14
#define LOAD_LEVEL_NR 4
#define ARMADA_37XX_NB_L0L1 0x18
#define ARMADA_37XX_NB_L2L3 0x1C
#define ARMADA_37XX_NB_TBG_DIV_OFF 13
#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
#define ARMADA_37XX_NB_CLK_SEL_OFF 11
#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
#define ARMADA_37XX_NB_TBG_SEL_OFF 9
#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
#define ARMADA_37XX_NB_CONFIG_SHIFT 16
#define ARMADA_37XX_NB_DYN_MOD 0x24
#define ARMADA_37XX_NB_DFS_EN 31
#define ARMADA_37XX_NB_CPU_LOAD 0x30
#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
#define ARMADA_37XX_DVFS_LOAD_0 0
#define ARMADA_37XX_DVFS_LOAD_1 1
#define ARMADA_37XX_DVFS_LOAD_2 2
#define ARMADA_37XX_DVFS_LOAD_3 3
struct clk_periph_driver_data {
struct clk_hw_onecell_data *hw_data;
spinlock_t lock;
@ -46,7 +68,18 @@ struct clk_double_div {
u8 shift2;
};
struct clk_pm_cpu {
struct clk_hw hw;
void __iomem *reg_mux;
u8 shift_mux;
u32 mask_mux;
void __iomem *reg_div;
u8 shift_div;
struct regmap *nb_pm_base;
};
#define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
#define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
struct clk_periph_data {
const char *name;
@ -55,6 +88,7 @@ struct clk_periph_data {
struct clk_hw *mux_hw;
struct clk_hw *rate_hw;
struct clk_hw *gate_hw;
struct clk_hw *muxrate_hw;
bool is_double_div;
};
@ -79,7 +113,9 @@ static const struct clk_div_table clk_table2[] = {
{ .val = 1, .div = 4, },
{ .val = 0, .div = 0, }, /* last entry */
};
static const struct clk_ops clk_double_div_ops;
static const struct clk_ops clk_pm_cpu_ops;
#define PERIPH_GATE(_name, _bit) \
struct clk_gate gate_##_name = { \
@ -121,6 +157,18 @@ struct clk_divider rate_##_name = { \
} \
};
#define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
struct clk_pm_cpu muxrate_##_name = { \
.reg_mux = (void *)TBG_SEL, \
.mask_mux = 3, \
.shift_mux = _shift1, \
.reg_div = (void *)_reg, \
.shift_div = _shift2, \
.hw.init = &(struct clk_init_data){ \
.ops = &clk_pm_cpu_ops, \
} \
};
#define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
static PERIPH_GATE(_name, _bit); \
static PERIPH_MUX(_name, _shift); \
@ -135,10 +183,6 @@ static PERIPH_DIV(_name, _reg, _shift1, _table);
static PERIPH_GATE(_name, _bit); \
static PERIPH_DIV(_name, _reg, _shift, _table);
#define PERIPH_CLK_MUX_DIV(_name, _shift, _reg, _shift_div, _table) \
static PERIPH_MUX(_name, _shift); \
static PERIPH_DIV(_name, _reg, _shift_div, _table);
#define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
static PERIPH_MUX(_name, _shift); \
static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
@ -179,13 +223,12 @@ static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
.rate_hw = &rate_##_name.hw, \
}
#define REF_CLK_MUX_DIV(_name) \
#define REF_CLK_PM_CPU(_name) \
{ .name = #_name, \
.parent_names = (const char *[]){ "TBG-A-P", \
"TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
.num_parents = 4, \
.mux_hw = &mux_##_name.hw, \
.rate_hw = &rate_##_name.hw, \
.muxrate_hw = &muxrate_##_name.hw, \
}
#define REF_CLK_MUX_DD(_name) \
@ -215,9 +258,9 @@ PERIPH_CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12);
PERIPH_CLK_FULL(trace, 22, 18, DIV_SEL0, 20, clk_table6);
PERIPH_CLK_FULL(counter, 23, 20, DIV_SEL0, 23, clk_table6);
PERIPH_CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19);
PERIPH_CLK_MUX_DIV(cpu, 22, DIV_SEL0, 28, clk_table6);
static PERIPH_PM_CPU(cpu, 22, DIV_SEL0, 28);
static struct clk_periph_data data_nb[] ={
static struct clk_periph_data data_nb[] = {
REF_CLK_FULL_DD(mmc),
REF_CLK_FULL_DD(sata_host),
REF_CLK_FULL_DD(sec_at),
@ -234,7 +277,7 @@ static struct clk_periph_data data_nb[] ={
REF_CLK_FULL(trace),
REF_CLK_FULL(counter),
REF_CLK_FULL_DD(eip97),
REF_CLK_MUX_DIV(cpu),
REF_CLK_PM_CPU(cpu),
{ },
};
@ -281,7 +324,7 @@ static unsigned int get_div(void __iomem *reg, int shift)
}
static unsigned long clk_double_div_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
unsigned long parent_rate)
{
struct clk_double_div *double_div = to_clk_double_div(hw);
unsigned int div;
@ -296,6 +339,222 @@ static const struct clk_ops clk_double_div_ops = {
.recalc_rate = clk_double_div_recalc_rate,
};
static void armada_3700_pm_dvfs_update_regs(unsigned int load_level,
unsigned int *reg,
unsigned int *offset)
{
if (load_level <= ARMADA_37XX_DVFS_LOAD_1)
*reg = ARMADA_37XX_NB_L0L1;
else
*reg = ARMADA_37XX_NB_L2L3;
if (load_level == ARMADA_37XX_DVFS_LOAD_0 ||
load_level == ARMADA_37XX_DVFS_LOAD_2)
*offset += ARMADA_37XX_NB_CONFIG_SHIFT;
}
static bool armada_3700_pm_dvfs_is_enabled(struct regmap *base)
{
unsigned int val, reg = ARMADA_37XX_NB_DYN_MOD;
if (IS_ERR(base))
return false;
regmap_read(base, reg, &val);
return !!(val & BIT(ARMADA_37XX_NB_DFS_EN));
}
static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
unsigned int offset = ARMADA_37XX_NB_TBG_DIV_OFF;
unsigned int load_level, div;
/*
* This function is always called after the function
* armada_3700_pm_dvfs_is_enabled, so no need to check again
* if the base is valid.
*/
regmap_read(base, reg, &load_level);
/*
* The register and the offset inside this register accessed to
* read the current divider depend on the load level
*/
load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
regmap_read(base, reg, &div);
return (div >> offset) & ARMADA_37XX_NB_TBG_DIV_MASK;
}
static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap *base)
{
unsigned int reg = ARMADA_37XX_NB_CPU_LOAD;
unsigned int offset = ARMADA_37XX_NB_TBG_SEL_OFF;
unsigned int load_level, sel;
/*
* This function is always called after the function
* armada_3700_pm_dvfs_is_enabled, so no need to check again
* if the base is valid
*/
regmap_read(base, reg, &load_level);
/*
* The register and the offset inside this register accessed to
* read the current divider depend on the load level
*/
load_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
regmap_read(base, reg, &sel);
return (sel >> offset) & ARMADA_37XX_NB_TBG_SEL_MASK;
}
static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
int num_parents = clk_hw_get_num_parents(hw);
u32 val;
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base)) {
val = armada_3700_pm_dvfs_get_cpu_parent(pm_cpu->nb_pm_base);
} else {
val = readl(pm_cpu->reg_mux) >> pm_cpu->shift_mux;
val &= pm_cpu->mask_mux;
}
if (val >= num_parents)
return -EINVAL;
return val;
}
static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
int load_level;
/*
* We set the clock parent only if the DVFS is available but
* not enabled.
*/
if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
return -EINVAL;
/* Set the parent clock for all the load level */
for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
unsigned int reg, mask, val,
offset = ARMADA_37XX_NB_TBG_SEL_OFF;
armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
val = index << offset;
mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
regmap_update_bits(base, reg, mask, val);
}
return 0;
}
static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
unsigned int div;
if (armada_3700_pm_dvfs_is_enabled(pm_cpu->nb_pm_base))
div = armada_3700_pm_dvfs_get_cpu_div(pm_cpu->nb_pm_base);
else
div = get_div(pm_cpu->reg_div, pm_cpu->shift_div);
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
unsigned int div = *parent_rate / rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
return -EINVAL;
for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
unsigned int reg, val, offset = ARMADA_37XX_NB_TBG_DIV_OFF;
armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
regmap_read(base, reg, &val);
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
if (val == div)
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
return *parent_rate / div;
}
/* We didn't find any valid divider */
return -EINVAL;
}
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
unsigned int div = parent_rate / rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
return -EINVAL;
for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
unsigned int reg, mask, val,
offset = ARMADA_37XX_NB_TBG_DIV_OFF;
armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
regmap_read(base, reg, &val);
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
if (val == div) {
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
regmap_update_bits(base, reg, mask, load_level);
return rate;
}
}
/* We didn't find any valid divider */
return -EINVAL;
}
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
.set_parent = clk_pm_cpu_set_parent,
.round_rate = clk_pm_cpu_round_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
};
static const struct of_device_id armada_3700_periph_clock_of_match[] = {
{ .compatible = "marvell,armada-3700-periph-clock-nb",
.data = data_nb, },
@ -303,6 +562,7 @@ static const struct of_device_id armada_3700_periph_clock_of_match[] = {
.data = data_sb, },
{ }
};
static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
void __iomem *reg, spinlock_t *lock,
struct device *dev, struct clk_hw **hw)
@ -354,15 +614,31 @@ static int armada_3700_add_composite_clk(const struct clk_periph_data *data,
}
}
if (data->muxrate_hw) {
struct clk_pm_cpu *pmcpu_clk;
struct clk_hw *muxrate_hw = data->muxrate_hw;
struct regmap *map;
pmcpu_clk = to_clk_pm_cpu(muxrate_hw);
pmcpu_clk->reg_mux = reg + (u64)pmcpu_clk->reg_mux;
pmcpu_clk->reg_div = reg + (u64)pmcpu_clk->reg_div;
mux_hw = muxrate_hw;
rate_hw = muxrate_hw;
mux_ops = muxrate_hw->init->ops;
rate_ops = muxrate_hw->init->ops;
map = syscon_regmap_lookup_by_compatible(
"marvell,armada-3700-nb-pm");
pmcpu_clk->nb_pm_base = map;
}
*hw = clk_hw_register_composite(dev, data->name, data->parent_names,
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
data->num_parents, mux_hw,
mux_ops, rate_hw, rate_ops,
gate_hw, gate_ops, CLK_IGNORE_UNUSED);
if (IS_ERR(*hw))
return PTR_ERR(*hw);
return 0;
return PTR_ERR_OR_ZERO(*hw);
}
static int armada_3700_periph_clock_probe(struct platform_device *pdev)
@ -406,12 +682,11 @@ static int armada_3700_periph_clock_probe(struct platform_device *pdev)
if (armada_3700_add_composite_clk(&data[i], reg,
&driver_data->lock, dev, hw))
dev_err(dev, "Can't register periph clock %s\n",
data[i].name);
data[i].name);
}
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
driver_data->hw_data);
driver_data->hw_data);
if (ret) {
for (i = 0; i < num_periph; i++)
clk_hw_unregister(driver_data->hw_data->hws[i]);

View File

@ -526,7 +526,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
!(pll_is_valid(parent_rate, 1, 1000000, 20000000)
&& pll_is_valid(cco_rate, 1, 156000000, 320000000)
&& pll_is_valid(ref_rate, 1, 1000000, 27000000)))
pr_err("%s: PLL clocks are not in valid ranges: %lu/%lu/%lu",
pr_err("%s: PLL clocks are not in valid ranges: %lu/%lu/%lu\n",
clk_hw_get_name(hw),
parent_rate, cco_rate, ref_rate);
@ -956,7 +956,7 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
val &= div_mask(divider->width);
return divider_recalc_rate(hw, parent_rate, val, divider->table,
divider->flags);
divider->flags, divider->width);
}
static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
@ -1505,7 +1505,7 @@ static void __init lpc32xx_clk_init(struct device_node *np)
return;
}
if (clk_get_rate(clk_32k) != 32768) {
pr_err("invalid clock rate of external 32KHz oscillator");
pr_err("invalid clock rate of external 32KHz oscillator\n");
return;
}

View File

@ -329,12 +329,16 @@ static void __init pxa3xx_dummy_clocks_init(void)
static void __init pxa3xx_base_clocks_init(void)
{
struct clk *clk;
pxa3xx_register_plls();
pxa3xx_register_core();
clk_register_clk_pxa3xx_system_bus();
clk_register_clk_pxa3xx_ac97();
clk_register_clk_pxa3xx_smemc();
clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0, OSCC, 11, 0, NULL);
clk = clk_register_gate(NULL, "CLK_POUT",
"osc_13mhz", 0, OSCC, 11, 0, NULL);
clk_register_clkdev(clk, "CLK_POUT", NULL);
clkdev_pxa_register(CLK_OSTIMER, "OSTIMER0", NULL,
clk_register_fixed_factor(NULL, "os-timer0",
"osc_13mhz", 0, 1, 4));

View File

@ -12,6 +12,27 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
config QCOM_A53PLL
tristate "MSM8916 A53 PLL"
depends on COMMON_CLK_QCOM
default ARCH_QCOM
help
Support for the A53 PLL on MSM8916 devices. It provides
the CPU with frequencies above 1GHz.
Say Y if you want to support higher CPU frequencies on MSM8916
devices.
config QCOM_CLK_APCS_MSM8916
tristate "MSM8916 APCS Clock Controller"
depends on COMMON_CLK_QCOM
depends on QCOM_APCS_IPC || COMPILE_TEST
default ARCH_QCOM
help
Support for the APCS Clock Controller on msm8916 devices. The
APCS is managing the mux and divider which feeds the CPUs.
Say Y if you want to support CPU frequency scaling on devices
such as msm8916.
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
@ -196,3 +217,12 @@ config MSM_MMCC_8996
Support for the multimedia clock controller on msm8996 devices.
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
help
This driver supports the clkdiv functionality on the Qualcomm
Technologies, Inc. SPMI PMIC. It configures the frequency of
clkdiv outputs of the PMIC. These clocks are typically wired
through alternate functions on GPIO pins.

View File

@ -10,6 +10,7 @@ clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
clk-qcom-y += clk-regmap-mux-div.o
clk-qcom-y += reset.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
@ -32,5 +33,8 @@ obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o
obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o

107
drivers/clk/qcom/a53-pll.c Normal file
View File

@ -0,0 +1,107 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm A53 PLL driver
*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/module.h>
#include "clk-pll.h"
#include "clk-regmap.h"
static const struct pll_freq_tbl a53pll_freq[] = {
{ 998400000, 52, 0x0, 0x1, 0 },
{ 1094400000, 57, 0x0, 0x1, 0 },
{ 1152000000, 62, 0x0, 0x1, 0 },
{ 1209600000, 63, 0x0, 0x1, 0 },
{ 1248000000, 65, 0x0, 0x1, 0 },
{ 1363200000, 71, 0x0, 0x1, 0 },
{ 1401600000, 73, 0x0, 0x1, 0 },
{ }
};
static const struct regmap_config a53pll_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
.fast_io = true,
};
static int qcom_a53pll_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct regmap *regmap;
struct resource *res;
struct clk_pll *pll;
void __iomem *base;
struct clk_init_data init = { };
int ret;
pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
if (!pll)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
regmap = devm_regmap_init_mmio(dev, base, &a53pll_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
pll->l_reg = 0x04;
pll->m_reg = 0x08;
pll->n_reg = 0x0c;
pll->config_reg = 0x14;
pll->mode_reg = 0x00;
pll->status_reg = 0x1c;
pll->status_bit = 16;
pll->freq_tbl = a53pll_freq;
init.name = "a53pll";
init.parent_names = (const char *[]){ "xo" };
init.num_parents = 1;
init.ops = &clk_pll_sr2_ops;
init.flags = CLK_IS_CRITICAL;
pll->clkr.hw.init = &init;
ret = devm_clk_register_regmap(dev, &pll->clkr);
if (ret) {
dev_err(dev, "failed to register regmap clock: %d\n", ret);
return ret;
}
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
&pll->clkr.hw);
if (ret) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
return ret;
}
return 0;
}
static const struct of_device_id qcom_a53pll_match_table[] = {
{ .compatible = "qcom,msm8916-a53pll" },
{ }
};
static struct platform_driver qcom_a53pll_driver = {
.probe = qcom_a53pll_probe,
.driver = {
.name = "qcom-a53pll",
.of_match_table = qcom_a53pll_match_table,
},
};
module_platform_driver(qcom_a53pll_driver);
MODULE_DESCRIPTION("Qualcomm A53 PLL Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Qualcomm APCS clock controller driver
*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include "clk-regmap.h"
#include "clk-regmap-mux-div.h"
static const u32 gpll0_a53cc_map[] = { 4, 5 };
static const char * const gpll0_a53cc[] = {
"gpll0_vote",
"a53pll",
};
/*
* We use the notifier function for switching to a temporary safe configuration
* (mux and divider), while the A53 PLL is reconfigured.
*/
static int a53cc_notifier_cb(struct notifier_block *nb, unsigned long event,
void *data)
{
int ret = 0;
struct clk_regmap_mux_div *md = container_of(nb,
struct clk_regmap_mux_div,
clk_nb);
if (event == PRE_RATE_CHANGE)
/* set the mux and divider to safe frequency (400mhz) */
ret = mux_div_set_src_div(md, 4, 3);
return notifier_from_errno(ret);
}
static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *parent = dev->parent;
struct clk_regmap_mux_div *a53cc;
struct regmap *regmap;
struct clk_init_data init = { };
int ret;
regmap = dev_get_regmap(parent, NULL);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(dev, "failed to get regmap: %d\n", ret);
return ret;
}
a53cc = devm_kzalloc(dev, sizeof(*a53cc), GFP_KERNEL);
if (!a53cc)
return -ENOMEM;
init.name = "a53mux";
init.parent_names = gpll0_a53cc;
init.num_parents = ARRAY_SIZE(gpll0_a53cc);
init.ops = &clk_regmap_mux_div_ops;
init.flags = CLK_SET_RATE_PARENT;
a53cc->clkr.hw.init = &init;
a53cc->clkr.regmap = regmap;
a53cc->reg_offset = 0x50;
a53cc->hid_width = 5;
a53cc->hid_shift = 0;
a53cc->src_width = 3;
a53cc->src_shift = 8;
a53cc->parent_map = gpll0_a53cc_map;
a53cc->pclk = devm_clk_get(parent, NULL);
if (IS_ERR(a53cc->pclk)) {
ret = PTR_ERR(a53cc->pclk);
dev_err(dev, "failed to get clk: %d\n", ret);
return ret;
}
a53cc->clk_nb.notifier_call = a53cc_notifier_cb;
ret = clk_notifier_register(a53cc->pclk, &a53cc->clk_nb);
if (ret) {
dev_err(dev, "failed to register clock notifier: %d\n", ret);
return ret;
}
ret = devm_clk_register_regmap(dev, &a53cc->clkr);
if (ret) {
dev_err(dev, "failed to register regmap clock: %d\n", ret);
goto err;
}
ret = of_clk_add_hw_provider(parent->of_node, of_clk_hw_simple_get,
&a53cc->clkr.hw);
if (ret) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
goto err;
}
platform_set_drvdata(pdev, a53cc);
return 0;
err:
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
return ret;
}
static int qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
{
struct clk_regmap_mux_div *a53cc = platform_get_drvdata(pdev);
struct device *parent = pdev->dev.parent;
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
of_clk_del_provider(parent->of_node);
return 0;
}
static struct platform_driver qcom_apcs_msm8916_clk_driver = {
.probe = qcom_apcs_msm8916_clk_probe,
.remove = qcom_apcs_msm8916_clk_remove,
.driver = {
.name = "qcom-apcs-msm8916-clk",
},
};
module_platform_driver(qcom_apcs_msm8916_clk_driver);
MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm MSM8916 APCS clock driver");

View File

@ -20,7 +20,7 @@
#include "clk-alpha-pll.h"
#include "common.h"
#define PLL_MODE 0x00
#define PLL_MODE(p) ((p)->offset + 0x0)
# define PLL_OUTCTRL BIT(0)
# define PLL_BYPASSNL BIT(1)
# define PLL_RESET_N BIT(2)
@ -32,35 +32,87 @@
# define PLL_VOTE_FSM_ENA BIT(20)
# define PLL_FSM_ENA BIT(20)
# define PLL_VOTE_FSM_RESET BIT(21)
# define PLL_UPDATE BIT(22)
# define PLL_UPDATE_BYPASS BIT(23)
# define PLL_OFFLINE_ACK BIT(28)
# define ALPHA_PLL_ACK_LATCH BIT(29)
# define PLL_ACTIVE_FLAG BIT(30)
# define PLL_LOCK_DET BIT(31)
#define PLL_L_VAL 0x04
#define PLL_ALPHA_VAL 0x08
#define PLL_ALPHA_VAL_U 0x0c
#define PLL_L_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_L_VAL])
#define PLL_ALPHA_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL])
#define PLL_ALPHA_VAL_U(p) ((p)->offset + (p)->regs[PLL_OFF_ALPHA_VAL_U])
#define PLL_USER_CTL 0x10
#define PLL_USER_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL])
# define PLL_POST_DIV_SHIFT 8
# define PLL_POST_DIV_MASK 0xf
# define PLL_POST_DIV_MASK(p) GENMASK((p)->width, 0)
# define PLL_ALPHA_EN BIT(24)
# define PLL_ALPHA_MODE BIT(25)
# define PLL_VCO_SHIFT 20
# define PLL_VCO_MASK 0x3
#define PLL_USER_CTL_U 0x14
#define PLL_USER_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_USER_CTL_U])
#define PLL_CONFIG_CTL 0x18
#define PLL_CONFIG_CTL_U 0x20
#define PLL_TEST_CTL 0x1c
#define PLL_TEST_CTL_U 0x20
#define PLL_STATUS 0x24
#define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
#define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
#define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
#define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
#define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS])
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_USER_CTL_U] = 0x14,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
[CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_CONFIG_CTL] = 0x14,
[PLL_OFF_CONFIG_CTL_U] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
[CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
[PLL_OFF_USER_CTL] = 0x10,
[PLL_OFF_CONFIG_CTL] = 0x18,
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
/*
* Even though 40 bits are present, use only 32 for ease of calculation.
*/
#define ALPHA_REG_BITWIDTH 40
#define ALPHA_BITWIDTH 32
#define ALPHA_16BIT_MASK 0xffff
#define ALPHA_REG_16BIT_WIDTH 16
#define ALPHA_BITWIDTH 32U
#define ALPHA_SHIFT(w) min(w, ALPHA_BITWIDTH)
#define PLL_HUAYRA_M_WIDTH 8
#define PLL_HUAYRA_M_SHIFT 8
#define PLL_HUAYRA_M_MASK 0xff
#define PLL_HUAYRA_N_SHIFT 0
#define PLL_HUAYRA_N_MASK 0xff
#define PLL_HUAYRA_ALPHA_WIDTH 16
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
ALPHA_REG_BITWIDTH : ALPHA_REG_16BIT_WIDTH)
#define pll_has_64bit_config(p) ((PLL_CONFIG_CTL_U(p) - PLL_CONFIG_CTL(p)) == 4)
#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll, clkr)
@ -71,18 +123,17 @@
static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
const char *action)
{
u32 val, off;
u32 val;
int count;
int ret;
const char *name = clk_hw_get_name(&pll->clkr.hw);
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
for (count = 100; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
if (inverse && !(val & mask))
@ -109,16 +160,30 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
#define wait_for_pll_offline(pll) \
wait_for_pll(pll, PLL_OFFLINE_ACK, 0, "offline")
#define wait_for_pll_update(pll) \
wait_for_pll(pll, PLL_UPDATE, 1, "update")
#define wait_for_pll_update_ack_set(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 0, "update_ack_set")
#define wait_for_pll_update_ack_clear(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
u32 val, mask;
u32 off = pll->offset;
regmap_write(regmap, off + PLL_L_VAL, config->l);
regmap_write(regmap, off + PLL_ALPHA_VAL, config->alpha);
regmap_write(regmap, off + PLL_CONFIG_CTL, config->config_ctl_val);
regmap_write(regmap, off + PLL_CONFIG_CTL_U, config->config_ctl_hi_val);
regmap_write(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
regmap_write(regmap, PLL_CONFIG_CTL(pll), config->config_ctl_val);
if (pll_has_64bit_config(pll))
regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
config->config_ctl_hi_val);
if (pll_alpha_width(pll) > 32)
regmap_write(regmap, PLL_ALPHA_VAL_U(pll), config->alpha_hi);
val = config->main_output_mask;
val |= config->aux_output_mask;
@ -127,6 +192,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
val |= config->pre_div_val;
val |= config->post_div_val;
val |= config->vco_val;
val |= config->alpha_en_mask;
val |= config->alpha_mode_mask;
mask = config->main_output_mask;
mask |= config->aux_output_mask;
@ -136,20 +203,19 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
mask |= config->post_div_mask;
mask |= config->vco_mask;
regmap_update_bits(regmap, off + PLL_USER_CTL, mask, val);
regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val);
if (pll->flags & SUPPORTS_FSM_MODE)
qcom_pll_set_fsm_mode(regmap, off + PLL_MODE, 6, 0);
qcom_pll_set_fsm_mode(regmap, PLL_MODE(pll), 6, 0);
}
static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -158,7 +224,7 @@ static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
if (pll->flags & SUPPORTS_OFFLINE_REQ)
val &= ~PLL_OFFLINE_REQ;
ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
ret = regmap_write(pll->clkr.regmap, PLL_MODE(pll), val);
if (ret)
return ret;
@ -171,16 +237,15 @@ static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return;
if (pll->flags & SUPPORTS_OFFLINE_REQ) {
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
if (ret)
return;
@ -191,7 +256,7 @@ static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
}
/* Disable hwfsm */
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_FSM_ENA, 0);
if (ret)
return;
@ -202,11 +267,10 @@ static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
static int pll_is_enabled(struct clk_hw *hw, u32 mask)
{
int ret;
u32 val, off;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -227,12 +291,10 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
{
int ret;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val, mask, off;
off = pll->offset;
u32 val, mask;
mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@ -248,7 +310,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if ((val & mask) == mask)
return 0;
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_BYPASSNL, PLL_BYPASSNL);
if (ret)
return ret;
@ -260,7 +322,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
mb();
udelay(5);
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_RESET_N, PLL_RESET_N);
if (ret)
return ret;
@ -269,7 +331,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll),
PLL_OUTCTRL, PLL_OUTCTRL);
/* Ensure that the write above goes through before returning. */
@ -281,11 +343,9 @@ static void clk_alpha_pll_disable(struct clk_hw *hw)
{
int ret;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 val, mask, off;
u32 val, mask;
off = pll->offset;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return;
@ -296,23 +356,25 @@ static void clk_alpha_pll_disable(struct clk_hw *hw)
}
mask = PLL_OUTCTRL;
regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
/* Delay of 2 output clock ticks required until output is disabled */
mb();
udelay(1);
mask = PLL_RESET_N | PLL_BYPASSNL;
regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
}
static unsigned long alpha_pll_calc_rate(u64 prate, u32 l, u32 a)
{
return (prate * l) + ((prate * a) >> ALPHA_BITWIDTH);
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), mask, 0);
}
static unsigned long
alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
alpha_pll_calc_rate(u64 prate, u32 l, u32 a, u32 alpha_width)
{
return (prate * l) + ((prate * a) >> ALPHA_SHIFT(alpha_width));
}
static unsigned long
alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a,
u32 alpha_width)
{
u64 remainder;
u64 quotient;
@ -327,14 +389,15 @@ alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
}
/* Upper ALPHA_BITWIDTH bits of Alpha */
quotient = remainder << ALPHA_BITWIDTH;
quotient = remainder << ALPHA_SHIFT(alpha_width);
remainder = do_div(quotient, prate);
if (remainder)
quotient++;
*a = quotient;
return alpha_pll_calc_rate(prate, *l, *a);
return alpha_pll_calc_rate(prate, *l, *a, alpha_width);
}
static const struct pll_vco *
@ -356,71 +419,138 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
u32 l, low, high, ctl;
u64 a = 0, prate = parent_rate;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 off = pll->offset;
u32 alpha_width = pll_alpha_width(pll);
regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low);
if (pll->flags & SUPPORTS_16BIT_ALPHA) {
a = low & ALPHA_16BIT_MASK;
} else {
regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U,
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low);
if (alpha_width > 32) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll),
&high);
a = (u64)high << 32 | low;
a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
} else {
a = low & GENMASK(alpha_width - 1, 0);
}
if (alpha_width > ALPHA_BITWIDTH)
a >>= alpha_width - ALPHA_BITWIDTH;
}
return alpha_pll_calc_rate(prate, l, a);
return alpha_pll_calc_rate(prate, l, a, alpha_width);
}
static int clk_alpha_pll_update_latch(struct clk_alpha_pll *pll,
int (*is_enabled)(struct clk_hw *))
{
int ret;
u32 mode;
if (!is_enabled(&pll->clkr.hw) ||
!(pll->flags & SUPPORTS_DYNAMIC_UPDATE))
return 0;
regmap_read(pll->clkr.regmap, PLL_MODE(pll), &mode);
/* Latch the input to the PLL */
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE,
PLL_UPDATE);
/* Wait for 2 reference cycle before checking ACK bit */
udelay(1);
/*
* PLL will latch the new L, Alpha and freq control word.
* PLL will respond by raising PLL_ACK_LATCH output when new programming
* has been latched in and PLL is being updated. When
* UPDATE_LOGIC_BYPASS bit is not set, PLL_UPDATE will be cleared
* automatically by hardware when PLL_ACK_LATCH is asserted by PLL.
*/
if (mode & PLL_UPDATE_BYPASS) {
ret = wait_for_pll_update_ack_set(pll);
if (ret)
return ret;
regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), PLL_UPDATE, 0);
} else {
ret = wait_for_pll_update(pll);
if (ret)
return ret;
}
ret = wait_for_pll_update_ack_clear(pll);
if (ret)
return ret;
/* Wait for PLL output to stabilize */
udelay(10);
return 0;
}
static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate,
int (*is_enabled)(struct clk_hw *))
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
vco = alpha_pll_find_vco(pll, rate);
if (pll->vco_table && !vco) {
pr_err("alpha pll not in a valid vco range\n");
return -EINVAL;
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
if (alpha_width > ALPHA_BITWIDTH)
a <<= alpha_width - ALPHA_BITWIDTH;
if (alpha_width > 32)
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), a >> 32);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
if (vco) {
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
}
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN, PLL_ALPHA_EN);
return clk_alpha_pll_update_latch(pll, is_enabled);
}
static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
const struct pll_vco *vco;
u32 l, off = pll->offset;
u64 a;
return __clk_alpha_pll_set_rate(hw, rate, prate,
clk_alpha_pll_is_enabled);
}
rate = alpha_pll_round_rate(rate, prate, &l, &a);
vco = alpha_pll_find_vco(pll, rate);
if (!vco) {
pr_err("alpha pll not in a valid vco range\n");
return -EINVAL;
}
regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
if (pll->flags & SUPPORTS_16BIT_ALPHA) {
regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL,
a & ALPHA_16BIT_MASK);
} else {
a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
}
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
PLL_VCO_MASK << PLL_VCO_SHIFT,
vco->val << PLL_VCO_SHIFT);
regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL, PLL_ALPHA_EN,
PLL_ALPHA_EN);
return 0;
static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
return __clk_alpha_pll_set_rate(hw, rate, prate,
clk_alpha_pll_hwfsm_is_enabled);
}
static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l;
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
rate = alpha_pll_round_rate(rate, *prate, &l, &a);
if (alpha_pll_find_vco(pll, rate))
rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
return rate;
min_freq = pll->vco_table[0].min_freq;
@ -429,6 +559,158 @@ static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
return clamp(rate, min_freq, max_freq);
}
static unsigned long
alpha_huayra_pll_calc_rate(u64 prate, u32 l, u32 a)
{
/*
* a contains 16 bit alpha_val in twos compliment number in the range
* of [-0.5, 0.5).
*/
if (a >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
l -= 1;
return (prate * l) + (prate * a >> PLL_HUAYRA_ALPHA_WIDTH);
}
static unsigned long
alpha_huayra_pll_round_rate(unsigned long rate, unsigned long prate,
u32 *l, u32 *a)
{
u64 remainder;
u64 quotient;
quotient = rate;
remainder = do_div(quotient, prate);
*l = quotient;
if (!remainder) {
*a = 0;
return rate;
}
quotient = remainder << PLL_HUAYRA_ALPHA_WIDTH;
remainder = do_div(quotient, prate);
if (remainder)
quotient++;
/*
* alpha_val should be in twos compliment number in the range
* of [-0.5, 0.5) so if quotient >= 0.5 then increment the l value
* since alpha value will be subtracted in this case.
*/
if (quotient >= BIT(PLL_HUAYRA_ALPHA_WIDTH - 1))
*l += 1;
*a = quotient;
return alpha_huayra_pll_calc_rate(prate, *l, *a);
}
static unsigned long
alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
u64 rate = parent_rate, tmp;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha = 0, ctl, alpha_m, alpha_n;
regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN) {
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha);
/*
* Depending upon alpha_mode, it can be treated as M/N value or
* as a twos compliment number. When alpha_mode=1,
* pll_alpha_val<15:8>=M and pll_apla_val<7:0>=N
*
* Fout=FIN*(L+(M/N))
*
* M is a signed number (-128 to 127) and N is unsigned
* (0 to 255). M/N has to be within +/-0.5.
*
* When alpha_mode=0, it is a twos compliment number in the
* range [-0.5, 0.5).
*
* Fout=FIN*(L+(alpha_val)/2^16)
*
* where alpha_val is twos compliment number.
*/
if (!(ctl & PLL_ALPHA_MODE))
return alpha_huayra_pll_calc_rate(rate, l, alpha);
alpha_m = alpha >> PLL_HUAYRA_M_SHIFT & PLL_HUAYRA_M_MASK;
alpha_n = alpha >> PLL_HUAYRA_N_SHIFT & PLL_HUAYRA_N_MASK;
rate *= l;
tmp = parent_rate;
if (alpha_m >= BIT(PLL_HUAYRA_M_WIDTH - 1)) {
alpha_m = BIT(PLL_HUAYRA_M_WIDTH) - alpha_m;
tmp *= alpha_m;
do_div(tmp, alpha_n);
rate -= tmp;
} else {
tmp *= alpha_m;
do_div(tmp, alpha_n);
rate += tmp;
}
return rate;
}
return alpha_huayra_pll_calc_rate(rate, l, alpha);
}
static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, a, ctl, cur_alpha = 0;
rate = alpha_huayra_pll_round_rate(rate, prate, &l, &a);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
if (ctl & PLL_ALPHA_EN)
regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &cur_alpha);
/*
* Huayra PLL supports PLL dynamic programming. User can change L_VAL,
* without having to go through the power on sequence.
*/
if (clk_alpha_pll_is_enabled(hw)) {
if (cur_alpha != a) {
pr_err("clock needs to be gated %s\n",
clk_hw_get_name(hw));
return -EBUSY;
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
/* Ensure that the write above goes to detect L val change. */
mb();
return wait_for_pll_enable_lock(pll);
}
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
if (a == 0)
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN, 0x0);
else
regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_ALPHA_EN | PLL_ALPHA_MODE, PLL_ALPHA_EN);
return 0;
}
static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
u32 l, a;
return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
}
const struct clk_ops clk_alpha_pll_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
@ -439,13 +721,23 @@ const struct clk_ops clk_alpha_pll_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
const struct clk_ops clk_alpha_pll_huayra_ops = {
.enable = clk_alpha_pll_enable,
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
.round_rate = alpha_pll_huayra_round_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.enable = clk_alpha_pll_hwfsm_enable,
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_set_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@ -455,10 +747,10 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl;
regmap_read(pll->clkr.regmap, pll->offset + PLL_USER_CTL, &ctl);
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= PLL_POST_DIV_MASK;
ctl &= PLL_POST_DIV_MASK(pll);
return parent_rate >> fls(ctl);
}
@ -472,16 +764,48 @@ static const struct clk_div_table clk_alpha_div_table[] = {
{ }
};
static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ 0x0, 1 },
{ 0x1, 2 },
{ 0x3, 4 },
{ }
};
static long
clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
return divider_round_rate(hw, rate, prate, clk_alpha_div_table,
if (pll->width == 2)
table = clk_alpha_2bit_div_table;
else
table = clk_alpha_div_table;
return divider_round_rate(hw, rate, prate, table,
pll->width, CLK_DIVIDER_POWER_OF_TWO);
}
static long
clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl);
ctl >>= PLL_POST_DIV_SHIFT;
ctl &= BIT(pll->width) - 1;
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
*prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
return DIV_ROUND_UP_ULL((u64)*prate, div);
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@ -491,8 +815,8 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
/* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
return regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT,
return regmap_update_bits(pll->clkr.regmap, PLL_USER_CTL(pll),
PLL_POST_DIV_MASK(pll) << PLL_POST_DIV_SHIFT,
div << PLL_POST_DIV_SHIFT);
}
@ -502,3 +826,9 @@ const struct clk_ops clk_alpha_pll_postdiv_ops = {
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
.round_rate = clk_alpha_pll_postdiv_round_ro_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);

View File

@ -17,6 +17,30 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
/* Alpha PLL types */
enum {
CLK_ALPHA_PLL_TYPE_DEFAULT,
CLK_ALPHA_PLL_TYPE_HUAYRA,
CLK_ALPHA_PLL_TYPE_BRAMMO,
CLK_ALPHA_PLL_TYPE_MAX,
};
enum {
PLL_OFF_L_VAL,
PLL_OFF_ALPHA_VAL,
PLL_OFF_ALPHA_VAL_U,
PLL_OFF_USER_CTL,
PLL_OFF_USER_CTL_U,
PLL_OFF_CONFIG_CTL,
PLL_OFF_CONFIG_CTL_U,
PLL_OFF_TEST_CTL,
PLL_OFF_TEST_CTL_U,
PLL_OFF_STATUS,
PLL_OFF_MAX_REGS
};
extern const u8 clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_MAX][PLL_OFF_MAX_REGS];
struct pll_vco {
unsigned long min_freq;
unsigned long max_freq;
@ -27,16 +51,18 @@ struct pll_vco {
* struct clk_alpha_pll - phase locked loop (PLL)
* @offset: base address of registers
* @vco_table: array of VCO settings
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
* @clkr: regmap clock handle
*/
struct clk_alpha_pll {
u32 offset;
const u8 *regs;
const struct pll_vco *vco_table;
size_t num_vco;
#define SUPPORTS_OFFLINE_REQ BIT(0)
#define SUPPORTS_16BIT_ALPHA BIT(1)
#define SUPPORTS_FSM_MODE BIT(2)
#define SUPPORTS_DYNAMIC_UPDATE BIT(3)
u8 flags;
struct clk_regmap clkr;
@ -45,12 +71,14 @@ struct clk_alpha_pll {
/**
* struct clk_alpha_pll_postdiv - phase locked loop (PLL) post-divider
* @offset: base address of registers
* @regs: alpha pll register map (see @clk_alpha_pll_regs)
* @width: width of post-divider
* @clkr: regmap clock handle
*/
struct clk_alpha_pll_postdiv {
u32 offset;
u8 width;
const u8 *regs;
struct clk_regmap clkr;
};
@ -58,12 +86,15 @@ struct clk_alpha_pll_postdiv {
struct alpha_pll_config {
u32 l;
u32 alpha;
u32 alpha_hi;
u32 config_ctl_val;
u32 config_ctl_hi_val;
u32 main_output_mask;
u32 aux_output_mask;
u32 aux2_output_mask;
u32 early_output_mask;
u32 alpha_en_mask;
u32 alpha_mode_mask;
u32 pre_div_val;
u32 pre_div_mask;
u32 post_div_val;
@ -75,6 +106,8 @@ struct alpha_pll_config {
extern const struct clk_ops clk_alpha_pll_ops;
extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
extern const struct clk_ops clk_alpha_pll_huayra_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ro_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);

View File

@ -25,16 +25,6 @@ struct freq_tbl {
u16 n;
};
/**
* struct parent_map - map table for PLL source select configuration values
* @src: source PLL
* @cfg: configuration value
*/
struct parent_map {
u8 src;
u8 cfg;
};
/**
* struct mn - M/N:D counter
* @mnctr_en_bit: bit to enable mn counter

View File

@ -23,6 +23,29 @@ static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
}
static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
u32 div;
struct clk_hw *hw_parent = clk_hw_get_parent(hw);
regmap_read(clkr->regmap, divider->reg, &div);
div >>= divider->shift;
div &= BIT(divider->width) - 1;
div += 1;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
if (!hw_parent)
return -EINVAL;
*prate = clk_hw_round_rate(hw_parent, rate * div);
}
return DIV_ROUND_UP_ULL((u64)*prate, div);
}
static long div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
@ -59,7 +82,7 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
div &= BIT(divider->width) - 1;
return divider_recalc_rate(hw, parent_rate, div, NULL,
CLK_DIVIDER_ROUND_CLOSEST);
CLK_DIVIDER_ROUND_CLOSEST, divider->width);
}
const struct clk_ops clk_regmap_div_ops = {
@ -68,3 +91,9 @@ const struct clk_ops clk_regmap_div_ops = {
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
const struct clk_ops clk_regmap_div_ro_ops = {
.round_rate = div_round_ro_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);

View File

@ -25,5 +25,6 @@ struct clk_regmap_div {
};
extern const struct clk_ops clk_regmap_div_ops;
extern const struct clk_ops clk_regmap_div_ro_ops;
#endif

View File

@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/regmap.h>
#include "clk-regmap-mux-div.h"
#define CMD_RCGR 0x0
#define CMD_RCGR_UPDATE BIT(0)
#define CMD_RCGR_DIRTY_CFG BIT(4)
#define CMD_RCGR_ROOT_OFF BIT(31)
#define CFG_RCGR 0x4
#define to_clk_regmap_mux_div(_hw) \
container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div)
{
int ret, count;
u32 val, mask;
const char *name = clk_hw_get_name(&md->clkr.hw);
val = (div << md->hid_shift) | (src << md->src_shift);
mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
((BIT(md->src_width) - 1) << md->src_shift);
ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
mask, val);
if (ret)
return ret;
ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
if (ret)
return ret;
/* Wait for update to take effect */
for (count = 500; count > 0; count--) {
ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
&val);
if (ret)
return ret;
if (!(val & CMD_RCGR_UPDATE))
return 0;
udelay(1);
}
pr_err("%s: RCG did not update its configuration", name);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(mux_div_set_src_div);
static void mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src,
u32 *div)
{
u32 val, d, s;
const char *name = clk_hw_get_name(&md->clkr.hw);
regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
if (val & CMD_RCGR_DIRTY_CFG) {
pr_err("%s: RCG configuration is pending\n", name);
return;
}
regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
s = (val >> md->src_shift);
s &= BIT(md->src_width) - 1;
*src = s;
d = (val >> md->hid_shift);
d &= BIT(md->hid_width) - 1;
*div = d;
}
static inline bool is_better_rate(unsigned long req, unsigned long best,
unsigned long new)
{
return (req <= new && new < best) || (best < req && best < new);
}
static int mux_div_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
unsigned int i, div, max_div;
unsigned long actual_rate, best_rate = 0;
unsigned long req_rate = req->rate;
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(parent);
max_div = BIT(md->hid_width) - 1;
for (div = 1; div < max_div; div++) {
parent_rate = mult_frac(req_rate, div, 2);
parent_rate = clk_hw_round_rate(parent, parent_rate);
actual_rate = mult_frac(parent_rate, 2, div);
if (is_better_rate(req_rate, best_rate, actual_rate)) {
best_rate = actual_rate;
req->rate = best_rate;
req->best_parent_rate = parent_rate;
req->best_parent_hw = parent;
}
if (actual_rate < req_rate || best_rate <= req_rate)
break;
}
}
if (!best_rate)
return -EINVAL;
return 0;
}
static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long prate, u32 src)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
int ret;
u32 div, max_div, best_src = 0, best_div = 0;
unsigned int i;
unsigned long actual_rate, best_rate = 0;
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(parent);
max_div = BIT(md->hid_width) - 1;
for (div = 1; div < max_div; div++) {
parent_rate = mult_frac(rate, div, 2);
parent_rate = clk_hw_round_rate(parent, parent_rate);
actual_rate = mult_frac(parent_rate, 2, div);
if (is_better_rate(rate, best_rate, actual_rate)) {
best_rate = actual_rate;
best_src = md->parent_map[i];
best_div = div - 1;
}
if (actual_rate < rate || best_rate <= rate)
break;
}
}
ret = mux_div_set_src_div(md, best_src, best_div);
if (!ret) {
md->div = best_div;
md->src = best_src;
}
return ret;
}
static u8 mux_div_get_parent(struct clk_hw *hw)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
const char *name = clk_hw_get_name(hw);
u32 i, div, src = 0;
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < clk_hw_get_num_parents(hw); i++)
if (src == md->parent_map[i])
return i;
pr_err("%s: Can't find parent with src %d\n", name, src);
return 0;
}
static int mux_div_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return mux_div_set_src_div(md, md->parent_map[index], md->div);
}
static int mux_div_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long prate)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return __mux_div_set_rate_and_parent(hw, rate, prate, md->src);
}
static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long prate, u8 index)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
return __mux_div_set_rate_and_parent(hw, rate, prate,
md->parent_map[index]);
}
static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
u32 div, src;
int i, num_parents = clk_hw_get_num_parents(hw);
const char *name = clk_hw_get_name(hw);
mux_div_get_src_div(md, &src, &div);
for (i = 0; i < num_parents; i++)
if (src == md->parent_map[i]) {
struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
unsigned long parent_rate = clk_hw_get_rate(p);
return mult_frac(parent_rate, 2, div + 1);
}
pr_err("%s: Can't find parent %d\n", name, src);
return 0;
}
const struct clk_ops clk_regmap_mux_div_ops = {
.get_parent = mux_div_get_parent,
.set_parent = mux_div_set_parent,
.set_rate = mux_div_set_rate,
.set_rate_and_parent = mux_div_set_rate_and_parent,
.determine_rate = mux_div_determine_rate,
.recalc_rate = mux_div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);

View File

@ -0,0 +1,44 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2017, Linaro Limited
* Author: Georgi Djakov <georgi.djakov@linaro.org>
*/
#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
#define __QCOM_CLK_REGMAP_MUX_DIV_H__
#include <linux/clk-provider.h>
#include "clk-regmap.h"
/**
* struct mux_div_clk - combined mux/divider clock
* @reg_offset: offset of the mux/divider register
* @hid_width: number of bits in half integer divider
* @hid_shift: lowest bit of hid value field
* @src_width: number of bits in source select
* @src_shift: lowest bit of source select field
* @div: the divider raw configuration value
* @src: the mux index which will be used if the clock is enabled
* @parent_map: map from parent_names index to src_sel field
* @clkr: handle between common and hardware-specific interfaces
* @pclk: the input PLL clock
* @clk_nb: clock notifier for rate changes of the input PLL
*/
struct clk_regmap_mux_div {
u32 reg_offset;
u32 hid_width;
u32 hid_shift;
u32 src_width;
u32 src_shift;
u32 div;
u32 src;
const u32 *parent_map;
struct clk_regmap clkr;
struct clk *pclk;
struct notifier_block clk_nb;
};
extern const struct clk_ops clk_regmap_mux_div_ops;
extern int mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src, u32 div);
#endif

View File

@ -35,6 +35,9 @@ static u8 mux_get_parent(struct clk_hw *hw)
val >>= mux->shift;
val &= mask;
if (mux->parent_map)
return qcom_find_src_index(hw, mux->parent_map, val);
return val;
}
@ -45,6 +48,9 @@ static int mux_set_parent(struct clk_hw *hw, u8 index)
unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
unsigned int val;
if (mux->parent_map)
index = mux->parent_map[index].cfg;
val = index;
val <<= mux->shift;

View File

@ -16,11 +16,13 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
#include "common.h"
struct clk_regmap_mux {
u32 reg;
u32 shift;
u32 width;
const struct parent_map *parent_map;
struct clk_regmap clkr;
};

View File

@ -0,0 +1,302 @@
/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/types.h>
#define REG_DIV_CTL1 0x43
#define DIV_CTL1_DIV_FACTOR_MASK GENMASK(2, 0)
#define REG_EN_CTL 0x46
#define REG_EN_MASK BIT(7)
struct clkdiv {
struct regmap *regmap;
u16 base;
spinlock_t lock;
struct clk_hw hw;
unsigned int cxo_period_ns;
};
static inline struct clkdiv *to_clkdiv(struct clk_hw *hw)
{
return container_of(hw, struct clkdiv, hw);
}
static inline unsigned int div_factor_to_div(unsigned int div_factor)
{
if (!div_factor)
div_factor = 1;
return 1 << (div_factor - 1);
}
static inline unsigned int div_to_div_factor(unsigned int div)
{
return min(ilog2(div) + 1, 7);
}
static bool is_spmi_pmic_clkdiv_enabled(struct clkdiv *clkdiv)
{
unsigned int val = 0;
regmap_read(clkdiv->regmap, clkdiv->base + REG_EN_CTL, &val);
return val & REG_EN_MASK;
}
static int
__spmi_pmic_clkdiv_set_enable_state(struct clkdiv *clkdiv, bool enable,
unsigned int div_factor)
{
int ret;
unsigned int ns = clkdiv->cxo_period_ns;
unsigned int div = div_factor_to_div(div_factor);
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_EN_CTL,
REG_EN_MASK, enable ? REG_EN_MASK : 0);
if (ret)
return ret;
if (enable)
ndelay((2 + 3 * div) * ns);
else
ndelay(3 * div * ns);
return 0;
}
static int spmi_pmic_clkdiv_set_enable_state(struct clkdiv *clkdiv, bool enable)
{
unsigned int div_factor;
regmap_read(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1, &div_factor);
div_factor &= DIV_CTL1_DIV_FACTOR_MASK;
return __spmi_pmic_clkdiv_set_enable_state(clkdiv, enable, div_factor);
}
static int clk_spmi_pmic_div_enable(struct clk_hw *hw)
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned long flags;
int ret;
spin_lock_irqsave(&clkdiv->lock, flags);
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, true);
spin_unlock_irqrestore(&clkdiv->lock, flags);
return ret;
}
static void clk_spmi_pmic_div_disable(struct clk_hw *hw)
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned long flags;
spin_lock_irqsave(&clkdiv->lock, flags);
spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
spin_unlock_irqrestore(&clkdiv->lock, flags);
}
static long clk_spmi_pmic_div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
unsigned int div, div_factor;
div = DIV_ROUND_UP(*parent_rate, rate);
div_factor = div_to_div_factor(div);
div = div_factor_to_div(div_factor);
return *parent_rate / div;
}
static unsigned long
clk_spmi_pmic_div_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor;
regmap_read(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1, &div_factor);
div_factor &= DIV_CTL1_DIV_FACTOR_MASK;
return parent_rate / div_factor_to_div(div_factor);
}
static int clk_spmi_pmic_div_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clkdiv *clkdiv = to_clkdiv(hw);
unsigned int div_factor = div_to_div_factor(parent_rate / rate);
unsigned long flags;
bool enabled;
int ret;
spin_lock_irqsave(&clkdiv->lock, flags);
enabled = is_spmi_pmic_clkdiv_enabled(clkdiv);
if (enabled) {
ret = spmi_pmic_clkdiv_set_enable_state(clkdiv, false);
if (ret)
goto unlock;
}
ret = regmap_update_bits(clkdiv->regmap, clkdiv->base + REG_DIV_CTL1,
DIV_CTL1_DIV_FACTOR_MASK, div_factor);
if (ret)
goto unlock;
if (enabled)
ret = __spmi_pmic_clkdiv_set_enable_state(clkdiv, true,
div_factor);
unlock:
spin_unlock_irqrestore(&clkdiv->lock, flags);
return ret;
}
static const struct clk_ops clk_spmi_pmic_div_ops = {
.enable = clk_spmi_pmic_div_enable,
.disable = clk_spmi_pmic_div_disable,
.set_rate = clk_spmi_pmic_div_set_rate,
.recalc_rate = clk_spmi_pmic_div_recalc_rate,
.round_rate = clk_spmi_pmic_div_round_rate,
};
struct spmi_pmic_div_clk_cc {
int nclks;
struct clkdiv clks[];
};
static struct clk_hw *
spmi_pmic_div_clk_hw_get(struct of_phandle_args *clkspec, void *data)
{
struct spmi_pmic_div_clk_cc *cc = data;
int idx = clkspec->args[0] - 1; /* Start at 1 instead of 0 */
if (idx < 0 || idx >= cc->nclks) {
pr_err("%s: index value %u is invalid; allowed range [1, %d]\n",
__func__, clkspec->args[0], cc->nclks);
return ERR_PTR(-EINVAL);
}
return &cc->clks[idx].hw;
}
static int spmi_pmic_clkdiv_probe(struct platform_device *pdev)
{
struct spmi_pmic_div_clk_cc *cc;
struct clk_init_data init = {};
struct clkdiv *clkdiv;
struct clk *cxo;
struct regmap *regmap;
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
const char *parent_name;
int nclks, i, ret, cxo_hz;
char name[20];
u32 start;
ret = of_property_read_u32(of_node, "reg", &start);
if (ret < 0) {
dev_err(dev, "reg property reading failed\n");
return ret;
}
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap) {
dev_err(dev, "Couldn't get parent's regmap\n");
return -EINVAL;
}
ret = of_property_read_u32(of_node, "qcom,num-clkdivs", &nclks);
if (ret < 0) {
dev_err(dev, "qcom,num-clkdivs property reading failed, ret=%d\n",
ret);
return ret;
}
if (!nclks)
return -EINVAL;
cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*cc->clks) * nclks,
GFP_KERNEL);
if (!cc)
return -ENOMEM;
cc->nclks = nclks;
cxo = clk_get(dev, "xo");
if (IS_ERR(cxo)) {
ret = PTR_ERR(cxo);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get xo clock\n");
return ret;
}
cxo_hz = clk_get_rate(cxo);
clk_put(cxo);
parent_name = of_clk_get_parent_name(of_node, 0);
if (!parent_name) {
dev_err(dev, "missing parent clock\n");
return -ENODEV;
}
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
init.ops = &clk_spmi_pmic_div_ops;
for (i = 0, clkdiv = cc->clks; i < nclks; i++) {
snprintf(name, sizeof(name), "div_clk%d", i + 1);
spin_lock_init(&clkdiv[i].lock);
clkdiv[i].base = start + i * 0x100;
clkdiv[i].regmap = regmap;
clkdiv[i].cxo_period_ns = NSEC_PER_SEC / cxo_hz;
clkdiv[i].hw.init = &init;
ret = devm_clk_hw_register(dev, &clkdiv[i].hw);
if (ret)
return ret;
}
return devm_of_clk_add_hw_provider(dev, spmi_pmic_div_clk_hw_get, cc);
}
static const struct of_device_id spmi_pmic_clkdiv_match_table[] = {
{ .compatible = "qcom,spmi-clkdiv" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spmi_pmic_clkdiv_match_table);
static struct platform_driver spmi_pmic_clkdiv_driver = {
.driver = {
.name = "qcom,spmi-pmic-clkdiv",
.of_match_table = spmi_pmic_clkdiv_match_table,
},
.probe = spmi_pmic_clkdiv_probe,
};
module_platform_driver(spmi_pmic_clkdiv_driver);
MODULE_DESCRIPTION("QCOM SPMI PMIC clkdiv driver");
MODULE_LICENSE("GPL v2");

View File

@ -20,7 +20,6 @@ struct qcom_reset_map;
struct regmap;
struct freq_tbl;
struct clk_hw;
struct parent_map;
#define PLL_LOCK_COUNT_SHIFT 8
#define PLL_LOCK_COUNT_MASK 0x3f
@ -39,6 +38,16 @@ struct qcom_cc_desc {
size_t num_gdscs;
};
/**
* struct parent_map - map table for source select configuration values
* @src: source
* @cfg: configuration value
*/
struct parent_map {
u8 src;
u8 cfg;
};
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
unsigned long rate);
extern const struct freq_tbl *qcom_find_freq_floor(const struct freq_tbl *f,

File diff suppressed because it is too large Load Diff

View File

@ -1259,20 +1259,25 @@ static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
};
static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
F(128000, P_XO, 10, 1, 15),
F(256000, P_XO, 5, 1, 15),
F(384000, P_XO, 5, 1, 10),
F(512000, P_XO, 5, 2, 15),
F(576000, P_XO, 5, 3, 20),
F(705600, P_GPLL1, 16, 1, 80),
F(768000, P_XO, 5, 1, 5),
F(800000, P_XO, 5, 5, 24),
F(1024000, P_GPLL1, 14, 1, 63),
F(1024000, P_XO, 5, 4, 15),
F(1152000, P_XO, 1, 3, 50),
F(1411200, P_GPLL1, 16, 1, 40),
F(1536000, P_XO, 1, 2, 25),
F(1600000, P_XO, 12, 0, 0),
F(2048000, P_GPLL1, 9, 1, 49),
F(1728000, P_XO, 5, 9, 20),
F(2048000, P_XO, 5, 8, 15),
F(2304000, P_XO, 5, 3, 5),
F(2400000, P_XO, 8, 0, 0),
F(2822400, P_GPLL1, 16, 1, 20),
F(3072000, P_GPLL1, 14, 1, 21),
F(3072000, P_XO, 5, 4, 5),
F(4096000, P_GPLL1, 9, 2, 49),
F(4800000, P_XO, 4, 0, 0),
F(5644800, P_GPLL1, 16, 1, 10),
@ -1431,6 +1436,7 @@ static struct clk_branch gcc_ultaudio_stc_xo_clk = {
static const struct freq_tbl ftbl_codec_clk[] = {
F(9600000, P_XO, 2, 0, 0),
F(12288000, P_XO, 1, 16, 25),
F(19200000, P_XO, 1, 0, 0),
F(11289600, P_EXT_MCLK, 1, 0, 0),
{ }
@ -1438,6 +1444,7 @@ static const struct freq_tbl ftbl_codec_clk[] = {
static struct clk_rcg2 codec_digcodec_clk_src = {
.cmd_rcgr = 0x1c09c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll1_emclk_sleep_map,
.freq_tbl = ftbl_codec_clk,

View File

@ -73,6 +73,7 @@ static struct clk_fixed_factor xo = {
static struct clk_alpha_pll gpll0_early = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(0),
@ -88,6 +89,7 @@ static struct clk_alpha_pll gpll0_early = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data)
{
.name = "gpll0",
@ -99,6 +101,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_early = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(4),
@ -114,6 +117,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data)
{
.name = "gpll4",

View File

@ -227,6 +227,7 @@ static struct clk_fixed_factor xo = {
static struct clk_alpha_pll gpll0_early = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x52000,
.enable_mask = BIT(0),
@ -252,6 +253,7 @@ static struct clk_fixed_factor gpll0_early_div = {
static struct clk_alpha_pll_postdiv gpll0 = {
.offset = 0x00000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
.parent_names = (const char *[]){ "gpll0_early" },
@ -262,6 +264,7 @@ static struct clk_alpha_pll_postdiv gpll0 = {
static struct clk_alpha_pll gpll4_early = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x52000,
.enable_mask = BIT(4),
@ -276,6 +279,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x77000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
.parent_names = (const char *[]){ "gpll4_early" },

View File

@ -267,6 +267,7 @@ static struct pll_vco mmpll_t_vco[] = {
static struct clk_alpha_pll mmpll0_early = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr = {
@ -283,6 +284,7 @@ static struct clk_alpha_pll mmpll0_early = {
static struct clk_alpha_pll_postdiv mmpll0 = {
.offset = 0x0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll0",
@ -295,6 +297,7 @@ static struct clk_alpha_pll_postdiv mmpll0 = {
static struct clk_alpha_pll mmpll1_early = {
.offset = 0x30,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr = {
@ -311,6 +314,7 @@ static struct clk_alpha_pll mmpll1_early = {
static struct clk_alpha_pll_postdiv mmpll1 = {
.offset = 0x30,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll1",
@ -323,6 +327,7 @@ static struct clk_alpha_pll_postdiv mmpll1 = {
static struct clk_alpha_pll mmpll2_early = {
.offset = 0x4100,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_gfx_vco,
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -335,6 +340,7 @@ static struct clk_alpha_pll mmpll2_early = {
static struct clk_alpha_pll_postdiv mmpll2 = {
.offset = 0x4100,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll2",
@ -347,6 +353,7 @@ static struct clk_alpha_pll_postdiv mmpll2 = {
static struct clk_alpha_pll mmpll3_early = {
.offset = 0x60,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -359,6 +366,7 @@ static struct clk_alpha_pll mmpll3_early = {
static struct clk_alpha_pll_postdiv mmpll3 = {
.offset = 0x60,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
@ -371,6 +379,7 @@ static struct clk_alpha_pll_postdiv mmpll3 = {
static struct clk_alpha_pll mmpll4_early = {
.offset = 0x90,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_t_vco,
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -383,6 +392,7 @@ static struct clk_alpha_pll mmpll4_early = {
static struct clk_alpha_pll_postdiv mmpll4 = {
.offset = 0x90,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
@ -395,6 +405,7 @@ static struct clk_alpha_pll_postdiv mmpll4 = {
static struct clk_alpha_pll mmpll5_early = {
.offset = 0xc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_p_vco,
.num_vco = ARRAY_SIZE(mmpll_p_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -407,6 +418,7 @@ static struct clk_alpha_pll mmpll5_early = {
static struct clk_alpha_pll_postdiv mmpll5 = {
.offset = 0xc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
@ -419,6 +431,7 @@ static struct clk_alpha_pll_postdiv mmpll5 = {
static struct clk_alpha_pll mmpll8_early = {
.offset = 0x4130,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_gfx_vco,
.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -431,6 +444,7 @@ static struct clk_alpha_pll mmpll8_early = {
static struct clk_alpha_pll_postdiv mmpll8 = {
.offset = 0x4130,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 4,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll8",
@ -443,6 +457,7 @@ static struct clk_alpha_pll_postdiv mmpll8 = {
static struct clk_alpha_pll mmpll9_early = {
.offset = 0x4200,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.vco_table = mmpll_t_vco,
.num_vco = ARRAY_SIZE(mmpll_t_vco),
.clkr.hw.init = &(struct clk_init_data){
@ -455,6 +470,7 @@ static struct clk_alpha_pll mmpll9_early = {
static struct clk_alpha_pll_postdiv mmpll9 = {
.offset = 0x4200,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.width = 2,
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll9",

View File

@ -341,7 +341,7 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
return;
pd->name = np->name;
pd->flags = GENPD_FLAG_PM_CLK;
pd->flags = GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
pd->attach_dev = cpg_mstp_attach_dev;
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);

View File

@ -115,6 +115,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a7796_mod_clks[] __initconst = {
DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1),
DEF_MOD("scif5", 202, R8A7796_CLK_S3D4),
DEF_MOD("scif4", 203, R8A7796_CLK_S3D4),
DEF_MOD("scif3", 204, R8A7796_CLK_S3D4),

Some files were not shown because too many files have changed in this diff Show More