2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 22:53:55 +08:00
linux-next/drivers/clk/clk-hsdk-pll.c

433 lines
11 KiB
C
Raw Normal View History

/*
* Synopsys HSDK SDP Generic PLL clock driver
*
* Copyright (C) 2017 Synopsys
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/clk-provider.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
clk: Remove io.h from clk-provider.h Now that we've gotten rid of clk_readl() we can remove io.h from the clk-provider header and push out the io.h include to any code that isn't already including the io.h header but using things like readl/writel, etc. Found with this grep: git grep -l clk-provider.h | grep '.c$' | xargs git grep -L 'linux/io.h' | \ xargs git grep -l \ -e '\<__iowrite32_copy\>' --or \ -e '\<__ioread32_copy\>' --or \ -e '\<__iowrite64_copy\>' --or \ -e '\<ioremap_page_range\>' --or \ -e '\<ioremap_huge_init\>' --or \ -e '\<arch_ioremap_pud_supported\>' --or \ -e '\<arch_ioremap_pmd_supported\>' --or \ -e '\<devm_ioport_map\>' --or \ -e '\<devm_ioport_unmap\>' --or \ -e '\<IOMEM_ERR_PTR\>' --or \ -e '\<devm_ioremap\>' --or \ -e '\<devm_ioremap_nocache\>' --or \ -e '\<devm_ioremap_wc\>' --or \ -e '\<devm_iounmap\>' --or \ -e '\<devm_ioremap_release\>' --or \ -e '\<devm_memremap\>' --or \ -e '\<devm_memunmap\>' --or \ -e '\<__devm_memremap_pages\>' --or \ -e '\<pci_remap_cfgspace\>' --or \ -e '\<arch_has_dev_port\>' --or \ -e '\<arch_phys_wc_add\>' --or \ -e '\<arch_phys_wc_del\>' --or \ -e '\<memremap\>' --or \ -e '\<memunmap\>' --or \ -e '\<arch_io_reserve_memtype_wc\>' --or \ -e '\<arch_io_free_memtype_wc\>' --or \ -e '\<__io_aw\>' --or \ -e '\<__io_pbw\>' --or \ -e '\<__io_paw\>' --or \ -e '\<__io_pbr\>' --or \ -e '\<__io_par\>' --or \ -e '\<__raw_readb\>' --or \ -e '\<__raw_readw\>' --or \ -e '\<__raw_readl\>' --or \ -e '\<__raw_readq\>' --or \ -e '\<__raw_writeb\>' --or \ -e '\<__raw_writew\>' --or \ -e '\<__raw_writel\>' --or \ -e '\<__raw_writeq\>' --or \ -e '\<readb\>' --or \ -e '\<readw\>' --or \ -e '\<readl\>' --or \ -e '\<readq\>' --or \ -e '\<writeb\>' --or \ -e '\<writew\>' --or \ -e '\<writel\>' --or \ -e '\<writeq\>' --or \ -e '\<readb_relaxed\>' --or \ -e '\<readw_relaxed\>' --or \ -e '\<readl_relaxed\>' --or \ -e '\<readq_relaxed\>' --or \ -e '\<writeb_relaxed\>' --or \ -e '\<writew_relaxed\>' --or \ -e '\<writel_relaxed\>' --or \ -e '\<writeq_relaxed\>' --or \ -e '\<readsb\>' --or \ -e '\<readsw\>' --or \ -e '\<readsl\>' --or \ -e '\<readsq\>' --or \ -e '\<writesb\>' --or \ -e '\<writesw\>' --or \ -e '\<writesl\>' --or \ -e '\<writesq\>' --or \ -e '\<inb\>' --or \ -e '\<inw\>' --or \ -e '\<inl\>' --or \ -e '\<outb\>' --or \ -e '\<outw\>' --or \ -e '\<outl\>' --or \ -e '\<inb_p\>' --or \ -e '\<inw_p\>' --or \ -e '\<inl_p\>' --or \ -e '\<outb_p\>' --or \ -e '\<outw_p\>' --or \ -e '\<outl_p\>' --or \ -e '\<insb\>' --or \ -e '\<insw\>' --or \ -e '\<insl\>' --or \ -e '\<outsb\>' --or \ -e '\<outsw\>' --or \ -e '\<outsl\>' --or \ -e '\<insb_p\>' --or \ -e '\<insw_p\>' --or \ -e '\<insl_p\>' --or \ -e '\<outsb_p\>' --or \ -e '\<outsw_p\>' --or \ -e '\<outsl_p\>' --or \ -e '\<ioread8\>' --or \ -e '\<ioread16\>' --or \ -e '\<ioread32\>' --or \ -e '\<ioread64\>' --or \ -e '\<iowrite8\>' --or \ -e '\<iowrite16\>' --or \ -e '\<iowrite32\>' --or \ -e '\<iowrite64\>' --or \ -e '\<ioread16be\>' --or \ -e '\<ioread32be\>' --or \ -e '\<ioread64be\>' --or \ -e '\<iowrite16be\>' --or \ -e '\<iowrite32be\>' --or \ -e '\<iowrite64be\>' --or \ -e '\<ioread8_rep\>' --or \ -e '\<ioread16_rep\>' --or \ -e '\<ioread32_rep\>' --or \ -e '\<ioread64_rep\>' --or \ -e '\<iowrite8_rep\>' --or \ -e '\<iowrite16_rep\>' --or \ -e '\<iowrite32_rep\>' --or \ -e '\<iowrite64_rep\>' --or \ -e '\<__io_virt\>' --or \ -e '\<pci_iounmap\>' --or \ -e '\<virt_to_phys\>' --or \ -e '\<phys_to_virt\>' --or \ -e '\<ioremap_uc\>' --or \ -e '\<ioremap\>' --or \ -e '\<__ioremap\>' --or \ -e '\<iounmap\>' --or \ -e '\<ioremap\>' --or \ -e '\<ioremap_nocache\>' --or \ -e '\<ioremap_uc\>' --or \ -e '\<ioremap_wc\>' --or \ -e '\<ioremap_wc\>' --or \ -e '\<ioremap_wt\>' --or \ -e '\<ioport_map\>' --or \ -e '\<ioport_unmap\>' --or \ -e '\<ioport_map\>' --or \ -e '\<ioport_unmap\>' --or \ -e '\<xlate_dev_kmem_ptr\>' --or \ -e '\<xlate_dev_mem_ptr\>' --or \ -e '\<unxlate_dev_mem_ptr\>' --or \ -e '\<virt_to_bus\>' --or \ -e '\<bus_to_virt\>' --or \ -e '\<memset_io\>' --or \ -e '\<memcpy_fromio\>' --or \ -e '\<memcpy_toio\>' I also reordered a couple includes when they weren't alphabetical and removed clk.h from kona, replacing it with clk-provider.h because that driver doesn't use clk consumer APIs. Acked-by: Geert Uytterhoeven <geert+renesas@glider.be> Cc: Chen-Yu Tsai <wens@csie.org> Acked-by: Maxime Ripard <maxime.ripard@bootlin.com> Acked-by: Tero Kristo <t-kristo@ti.com> Acked-by: Sekhar Nori <nsekhar@ti.com> Cc: Krzysztof Kozlowski <krzk@kernel.org> Acked-by: Mark Brown <broonie@kernel.org> Cc: Chris Zankel <chris@zankel.net> Acked-by: Max Filippov <jcmvbkbc@gmail.com> Acked-by: John Crispin <john@phrozen.org> Acked-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Stephen Boyd <sboyd@kernel.org>
2019-04-19 06:20:22 +08:00
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
#define CGU_PLL_CTRL_ODIV_SHIFT 2
#define CGU_PLL_CTRL_IDIV_SHIFT 4
#define CGU_PLL_CTRL_FBDIV_SHIFT 9
#define CGU_PLL_CTRL_BAND_SHIFT 20
#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
#define CGU_PLL_CTRL_PD BIT(0)
#define CGU_PLL_CTRL_BYPASS BIT(1)
#define CGU_PLL_STATUS_LOCK BIT(0)
#define CGU_PLL_STATUS_ERR BIT(1)
#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
#define CGU_PLL_SOURCE_MAX 1
#define CORE_IF_CLK_THRESHOLD_HZ 500000000
#define CREG_CORE_IF_CLK_DIV_1 0x0
#define CREG_CORE_IF_CLK_DIV_2 0x1
struct hsdk_pll_cfg {
u32 rate;
u32 idiv;
u32 fbdiv;
u32 odiv;
u32 band;
};
static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
{ 100000000, 0, 11, 3, 0 },
{ 133000000, 0, 15, 3, 0 },
{ 200000000, 1, 47, 3, 0 },
{ 233000000, 1, 27, 2, 0 },
{ 300000000, 1, 35, 2, 0 },
{ 333000000, 1, 39, 2, 0 },
{ 400000000, 1, 47, 2, 0 },
{ 500000000, 0, 14, 1, 0 },
{ 600000000, 0, 17, 1, 0 },
{ 700000000, 0, 20, 1, 0 },
{ 800000000, 0, 23, 1, 0 },
{ 900000000, 1, 26, 0, 0 },
{ 1000000000, 1, 29, 0, 0 },
{ 1100000000, 1, 32, 0, 0 },
{ 1200000000, 1, 35, 0, 0 },
{ 1300000000, 1, 38, 0, 0 },
{ 1400000000, 1, 41, 0, 0 },
{ 1500000000, 1, 44, 0, 0 },
{ 1600000000, 1, 47, 0, 0 },
{}
};
static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
{ 297000000, 0, 21, 2, 0 },
{ 540000000, 0, 19, 1, 0 },
{ 594000000, 0, 21, 1, 0 },
{}
};
struct hsdk_pll_clk {
struct clk_hw hw;
void __iomem *regs;
void __iomem *spec_regs;
const struct hsdk_pll_devdata *pll_devdata;
struct device *dev;
};
struct hsdk_pll_devdata {
const struct hsdk_pll_cfg *pll_cfg;
int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
const struct hsdk_pll_cfg *cfg);
};
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
const struct hsdk_pll_cfg *);
static const struct hsdk_pll_devdata core_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_core_update_rate,
};
static const struct hsdk_pll_devdata sdt_pll_devdata = {
.pll_cfg = asdt_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static const struct hsdk_pll_devdata hdmi_pll_devdata = {
.pll_cfg = hdmi_pll_cfg,
.update_rate = hsdk_pll_comm_update_rate,
};
static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
{
iowrite32(val, clk->regs + reg);
}
static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
{
return ioread32(clk->regs + reg);
}
static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
const struct hsdk_pll_cfg *cfg)
{
u32 val = 0;
/* Powerdown and Bypass bits should be cleared */
val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
dev_dbg(clk->dev, "write configuration: %#x\n", val);
hsdk_pll_write(clk, CGU_PLL_CTRL, val);
}
static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
}
static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
{
return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
}
static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
{
return container_of(hw, struct hsdk_pll_clk, hw);
}
static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
u32 val;
u64 rate;
u32 idiv, fbdiv, odiv;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
val = hsdk_pll_read(clk, CGU_PLL_CTRL);
dev_dbg(clk->dev, "current configuration: %#x\n", val);
/* Check if PLL is disabled */
if (val & CGU_PLL_CTRL_PD)
return 0;
/* Check if PLL is bypassed */
if (val & CGU_PLL_CTRL_BYPASS)
return parent_rate;
/* input divider = reg.idiv + 1 */
idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
/* fb divider = 2*(reg.fbdiv + 1) */
fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
/* output divider = 2^(reg.odiv) */
odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
rate = (u64)parent_rate * fbdiv;
do_div(rate, idiv * odiv);
return rate;
}
static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
int i;
unsigned long best_rate;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
if (pll_cfg[0].rate == 0)
return -EINVAL;
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
return best_rate;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
return 0;
}
static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
unsigned long rate,
const struct hsdk_pll_cfg *cfg)
{
/*
* When core clock exceeds 500MHz, the divider for the interface
* clock must be programmed to div-by-2.
*/
if (rate > CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
hsdk_pll_set_cfg(clk, cfg);
/*
* Wait until CGU relocks and check error status.
* If after timeout CGU is unlocked yet return error.
*/
udelay(HSDK_PLL_MAX_LOCK_TIME);
if (!hsdk_pll_is_locked(clk))
return -ETIMEDOUT;
if (hsdk_pll_is_err(clk))
return -EINVAL;
/*
* Program divider to div-by-1 if we succesfuly set core clock below
* 500MHz threshold.
*/
if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
return 0;
}
static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
int i;
struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
for (i = 0; pll_cfg[i].rate != 0; i++) {
if (pll_cfg[i].rate == rate) {
return clk->pll_devdata->update_rate(clk, rate,
&pll_cfg[i]);
}
}
dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
parent_rate);
return -EINVAL;
}
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
.round_rate = hsdk_pll_round_rate,
.set_rate = hsdk_pll_set_rate,
};
static int hsdk_pll_clk_probe(struct platform_device *pdev)
{
int ret;
struct resource *mem;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
struct device *dev = &pdev->dev;
pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return -ENOMEM;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pll_clk->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(pll_clk->regs))
return PTR_ERR(pll_clk->regs);
init.name = dev->of_node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(dev->of_node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(dev->of_node);
if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
dev_err(dev, "wrong clock parents number: %u\n", num_parents);
return -EINVAL;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->dev = dev;
pll_clk->pll_devdata = of_device_get_match_data(dev);
if (!pll_clk->pll_devdata) {
dev_err(dev, "No OF match data provided\n");
return -EINVAL;
}
ret = devm_clk_hw_register(dev, &pll_clk->hw);
if (ret) {
dev_err(dev, "failed to register %s clock\n", init.name);
return ret;
}
return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
&pll_clk->hw);
}
static int hsdk_pll_clk_remove(struct platform_device *pdev)
{
of_clk_del_provider(pdev->dev.of_node);
return 0;
}
static void __init of_hsdk_pll_clk_setup(struct device_node *node)
{
int ret;
const char *parent_name;
unsigned int num_parents;
struct hsdk_pll_clk *pll_clk;
struct clk_init_data init = { };
pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
if (!pll_clk)
return;
pll_clk->regs = of_iomap(node, 0);
if (!pll_clk->regs) {
pr_err("failed to map pll registers\n");
goto err_free_pll_clk;
}
pll_clk->spec_regs = of_iomap(node, 1);
if (!pll_clk->spec_regs) {
pr_err("failed to map pll registers\n");
goto err_unmap_comm_regs;
}
init.name = node->name;
init.ops = &hsdk_pll_ops;
parent_name = of_clk_get_parent_name(node, 0);
init.parent_names = &parent_name;
num_parents = of_clk_get_parent_count(node);
if (num_parents > CGU_PLL_SOURCE_MAX) {
pr_err("too much clock parents: %u\n", num_parents);
goto err_unmap_spec_regs;
}
init.num_parents = num_parents;
pll_clk->hw.init = &init;
pll_clk->pll_devdata = &core_pll_devdata;
ret = clk_hw_register(NULL, &pll_clk->hw);
if (ret) {
pr_err("failed to register %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
if (ret) {
pr_err("failed to add hw provider for %pOFn clock\n", node);
goto err_unmap_spec_regs;
}
return;
err_unmap_spec_regs:
iounmap(pll_clk->spec_regs);
err_unmap_comm_regs:
iounmap(pll_clk->regs);
err_free_pll_clk:
kfree(pll_clk);
}
/* Core PLL needed early for ARC cpus timers */
CLK_OF_DECLARE(hsdk_pll_clock, "snps,hsdk-core-pll-clock",
of_hsdk_pll_clk_setup);
static const struct of_device_id hsdk_pll_clk_id[] = {
{ .compatible = "snps,hsdk-gp-pll-clock", .data = &sdt_pll_devdata},
{ .compatible = "snps,hsdk-hdmi-pll-clock", .data = &hdmi_pll_devdata},
{ }
};
static struct platform_driver hsdk_pll_clk_driver = {
.driver = {
.name = "hsdk-gp-pll-clock",
.of_match_table = hsdk_pll_clk_id,
},
.probe = hsdk_pll_clk_probe,
.remove = hsdk_pll_clk_remove,
};
builtin_platform_driver(hsdk_pll_clk_driver);