FROMLIST: clk: qcom: Add support for muxes, dividers, and mux dividers

The Krait CPU clocks are made up of muxes and dividers with a
handful of sources. Add a set of clk_ops that allow us to
configure these clocks so we can support CPU frequency scaling on
Krait CPUs.

Based on code originally written by Saravana Kannan.

Cc: Saravana Kannan <skannan@codeaurora.org>
Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
BUG=chrome-os-partner:31587
TEST=build/boot on AP-148 and storm P0

Change-Id: If3956736a9821a31d19254e5a9e65e7322a4540e
Signed-off-by: Grant Grundler <grundler@chromium.org>
Reviewed-on: https://chromium-review.googlesource.com/215087
Reviewed-by: Olof Johansson <olofj@chromium.org>
This commit is contained in:
Stephen Boyd
2014-03-25 13:37:55 -07:00
committed by chrome-internal-fetch
parent 26faee8da7
commit c4357eff7d
3 changed files with 614 additions and 0 deletions

View File

@@ -6,6 +6,7 @@ clk-qcom-y += clk-pll.o
clk-qcom-y += clk-rcg.o
clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-generic.o
clk-qcom-y += reset.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o

View File

@@ -0,0 +1,405 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/clk/msm-clk-generic.h>
/* ==================== Mux clock ==================== */
static int mux_set_parent(struct clk_hw *hw, u8 sel)
{
struct mux_clk *mux = to_mux_clk(hw);
if (mux->parent_map)
sel = mux->parent_map[sel];
return mux->ops->set_mux_sel(mux, sel);
}
static u8 mux_get_parent(struct clk_hw *hw)
{
struct mux_clk *mux = to_mux_clk(hw);
int num_parents = __clk_get_num_parents(hw->clk);
int i;
u8 sel;
sel = mux->ops->get_mux_sel(mux);
if (mux->parent_map) {
for (i = 0; i < num_parents; i++)
if (sel == mux->parent_map[i])
return i;
WARN(1, "Can't find parent\n");
return -EINVAL;
}
return sel;
}
static int mux_enable(struct clk_hw *hw)
{
struct mux_clk *mux = to_mux_clk(hw);
if (mux->ops->enable)
return mux->ops->enable(mux);
return 0;
}
static void mux_disable(struct clk_hw *hw)
{
struct mux_clk *mux = to_mux_clk(hw);
if (mux->ops->disable)
return mux->ops->disable(mux);
}
static struct clk *mux_get_safe_parent(struct clk_hw *hw)
{
int i;
struct mux_clk *mux = to_mux_clk(hw);
int num_parents = __clk_get_num_parents(hw->clk);
if (!mux->has_safe_parent)
return NULL;
i = mux->safe_sel;
if (mux->parent_map)
for (i = 0; i < num_parents; i++)
if (mux->safe_sel == mux->parent_map[i])
break;
return clk_get_parent_by_index(hw->clk, i);
}
const struct clk_ops clk_ops_gen_mux = {
.enable = mux_enable,
.disable = mux_disable,
.set_parent = mux_set_parent,
.get_parent = mux_get_parent,
.determine_rate = __clk_mux_determine_rate,
.get_safe_parent = mux_get_safe_parent,
};
EXPORT_SYMBOL_GPL(clk_ops_gen_mux);
/* ==================== Divider clock ==================== */
static long __div_round_rate(struct div_data *data, unsigned long rate,
struct clk *parent, unsigned int *best_div, unsigned long *best_prate,
bool set_parent)
{
unsigned int div, min_div, max_div, _best_div = 1;
unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
unsigned int numer;
rate = max(rate, 1UL);
min_div = max(data->min_div, 1U);
max_div = min(data->max_div, (unsigned int) (ULONG_MAX / rate));
/*
* div values are doubled for half dividers.
* Adjust for that by picking a numer of 2.
*/
numer = data->is_half_divider ? 2 : 1;
if (!set_parent) {
prate = *best_prate * numer;
div = DIV_ROUND_UP(prate, rate);
div = clamp(1U, div, max_div);
if (best_div)
*best_div = div;
return mult_frac(*best_prate, numer, div);
}
for (div = min_div; div <= max_div; div++) {
req_prate = mult_frac(rate, div, numer);
prate = __clk_round_rate(parent, req_prate);
if (IS_ERR_VALUE(prate))
break;
actual_rate = mult_frac(prate, numer, div);
if (is_better_rate(rate, rrate, actual_rate)) {
rrate = actual_rate;
_best_div = div;
_best_prate = prate;
}
/*
* Trying higher dividers is only going to ask the parent for
* a higher rate. If it can't even output a rate higher than
* the one we request for this divider, the parent is not
* going to be able to output an even higher rate required
* for a higher divider. So, stop trying higher dividers.
*/
if (actual_rate < rate)
break;
if (rrate <= rate)
break;
}
if (!rrate)
return -EINVAL;
if (best_div)
*best_div = _best_div;
if (best_prate)
*best_prate = _best_prate;
return rrate;
}
static long div_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
struct div_clk *d = to_div_clk(hw);
bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
return __div_round_rate(&d->data, rate, __clk_get_parent(hw->clk),
NULL, parent_rate, set_parent);
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
parent_rate)
{
struct div_clk *d = to_div_clk(hw);
int div, rc = 0;
struct div_data *data = &d->data;
div = parent_rate / rate;
if (div != data->div)
rc = d->ops->set_div(d, div);
data->div = div;
return rc;
}
static int div_enable(struct clk_hw *hw)
{
struct div_clk *d = to_div_clk(hw);
if (d->ops && d->ops->enable)
return d->ops->enable(d);
return 0;
}
static void div_disable(struct clk_hw *hw)
{
struct div_clk *d = to_div_clk(hw);
if (d->ops && d->ops->disable)
return d->ops->disable(d);
}
static unsigned long div_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct div_clk *d = to_div_clk(hw);
unsigned int div = d->data.div;
if (d->ops && d->ops->get_div)
div = max(d->ops->get_div(d), 1);
div = max(div, 1U);
if (!d->ops || !d->ops->set_div)
d->data.min_div = d->data.max_div = div;
d->data.div = div;
return prate / div;
}
const struct clk_ops clk_ops_div = {
.enable = div_enable,
.disable = div_disable,
.round_rate = div_round_rate,
.set_rate = div_set_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_ops_div);
/* ==================== Mux_div clock ==================== */
static int mux_div_clk_enable(struct clk_hw *hw)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
if (md->ops->enable)
return md->ops->enable(md);
return 0;
}
static void mux_div_clk_disable(struct clk_hw *hw)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
if (md->ops->disable)
return md->ops->disable(md);
}
static long __mux_div_round_rate(struct clk_hw *hw, unsigned long rate,
struct clk **best_parent, int *best_div, unsigned long *best_prate)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
unsigned int i;
unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
struct clk *_best_parent = 0;
int num_parents = __clk_get_num_parents(hw->clk);
bool set_parent = __clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT;
for (i = 0; i < num_parents; i++) {
int div;
unsigned long prate;
struct clk *p = clk_get_parent_by_index(hw->clk, i);
rrate = __div_round_rate(&md->data, rate, p, &div, &prate,
set_parent);
if (is_better_rate(rate, best, rrate)) {
best = rrate;
_best_div = div;
_best_prate = prate;
_best_parent = p;
}
if (rate <= rrate)
break;
}
if (best_div)
*best_div = _best_div;
if (best_prate)
*best_prate = _best_prate;
if (best_parent)
*best_parent = _best_parent;
if (best)
return best;
return -EINVAL;
}
static long mux_div_clk_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
return __mux_div_round_rate(hw, rate, NULL, NULL, parent_rate);
}
/* requires enable lock to be held */
static int __set_src_div(struct mux_div_clk *md, u8 src_sel, u32 div)
{
int rc;
rc = md->ops->set_src_div(md, src_sel, div);
if (!rc) {
md->data.div = div;
md->src_sel = src_sel;
}
return rc;
}
/* Must be called after handoff to ensure parent clock rates are initialized */
static int safe_parent_init_once(struct clk_hw *hw)
{
unsigned long rrate;
u32 best_div;
struct clk *best_parent;
struct mux_div_clk *md = to_mux_div_clk(hw);
if (IS_ERR(md->safe_parent))
return -EINVAL;
if (!md->safe_freq || md->safe_parent)
return 0;
rrate = __mux_div_round_rate(hw, md->safe_freq, &best_parent,
&best_div, NULL);
if (rrate == md->safe_freq) {
md->safe_div = best_div;
md->safe_parent = best_parent;
} else {
md->safe_parent = ERR_PTR(-EINVAL);
return -EINVAL;
}
return 0;
}
static int
__mux_div_clk_set_rate_and_parent(struct clk_hw *hw, u8 index, u32 div)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
int rc;
rc = safe_parent_init_once(hw);
if (rc)
return rc;
return __set_src_div(md, index, div);
}
static int mux_div_clk_set_rate_and_parent(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate, u8 index)
{
return __mux_div_clk_set_rate_and_parent(hw, index, parent_rate / rate);
}
static int mux_div_clk_set_rate(struct clk_hw *hw,
unsigned long rate, unsigned long parent_rate)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
return __mux_div_clk_set_rate_and_parent(hw, md->src_sel,
parent_rate / rate);
}
static int mux_div_clk_set_parent(struct clk_hw *hw, u8 index)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
return __mux_div_clk_set_rate_and_parent(hw, md->parent_map[index],
md->data.div);
}
static u8 mux_div_clk_get_parent(struct clk_hw *hw)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
int num_parents = __clk_get_num_parents(hw->clk);
u32 i, div, sel;
md->ops->get_src_div(md, &sel, &div);
md->src_sel = sel;
for (i = 0; i < num_parents; i++)
if (sel == md->parent_map[i])
return i;
WARN(1, "Can't find parent\n");
return -EINVAL;
}
static unsigned long
mux_div_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
{
struct mux_div_clk *md = to_mux_div_clk(hw);
u32 div, sel;
md->ops->get_src_div(md, &sel, &div);
return prate / div;
}
const struct clk_ops clk_ops_mux_div_clk = {
.enable = mux_div_clk_enable,
.disable = mux_div_clk_disable,
.set_rate_and_parent = mux_div_clk_set_rate_and_parent,
.set_rate = mux_div_clk_set_rate,
.set_parent = mux_div_clk_set_parent,
.round_rate = mux_div_clk_round_rate,
.get_parent = mux_div_clk_get_parent,
.recalc_rate = mux_div_clk_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_ops_mux_div_clk);

View File

@@ -0,0 +1,208 @@
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __QCOM_CLK_GENERIC_H__
#define __QCOM_CLK_GENERIC_H__
#include <linux/err.h>
#include <linux/clk-provider.h>
static inline bool is_better_rate(unsigned long req, unsigned long best,
unsigned long new)
{
if (IS_ERR_VALUE(new))
return false;
return (req <= new && new < best) || (best < req && best < new);
}
/* ==================== Mux clock ==================== */
struct mux_clk;
struct clk_mux_ops {
int (*set_mux_sel)(struct mux_clk *clk, int sel);
int (*get_mux_sel)(struct mux_clk *clk);
/* Optional */
bool (*is_enabled)(struct mux_clk *clk);
int (*enable)(struct mux_clk *clk);
void (*disable)(struct mux_clk *clk);
};
struct mux_clk {
/* Parents in decreasing order of preference for obtaining rates. */
u8 *parent_map;
bool has_safe_parent;
u8 safe_sel;
const struct clk_mux_ops *ops;
/* Fields not used by helper function. */
void __iomem *base;
u32 offset;
u32 en_offset;
int en_reg;
u32 mask;
u32 shift;
u32 en_mask;
void *priv;
struct clk_hw hw;
};
static inline struct mux_clk *to_mux_clk(struct clk_hw *hw)
{
return container_of(hw, struct mux_clk, hw);
}
extern const struct clk_ops clk_ops_gen_mux;
/* ==================== Divider clock ==================== */
struct div_clk;
struct clk_div_ops {
int (*set_div)(struct div_clk *clk, int div);
int (*get_div)(struct div_clk *clk);
bool (*is_enabled)(struct div_clk *clk);
int (*enable)(struct div_clk *clk);
void (*disable)(struct div_clk *clk);
};
struct div_data {
unsigned int div;
unsigned int min_div;
unsigned int max_div;
/*
* Indicate whether this divider clock supports half-interger divider.
* If it is, all the min_div and max_div have been doubled. It means
* they are 2*N.
*/
bool is_half_divider;
};
struct div_clk {
struct div_data data;
/* Optional */
const struct clk_div_ops *ops;
/* Fields not used by helper function. */
void __iomem *base;
u32 offset;
u32 mask;
u32 shift;
u32 en_mask;
void *priv;
struct clk_hw hw;
};
static inline struct div_clk *to_div_clk(struct clk_hw *hw)
{
return container_of(hw, struct div_clk, hw);
}
extern const struct clk_ops clk_ops_div;
#define DEFINE_FIXED_DIV_CLK(clk_name, _div, _parent) \
static struct div_clk clk_name = { \
.data = { \
.max_div = _div, \
.min_div = _div, \
.div = _div, \
}, \
.hw.init = &(struct clk_init_data){ \
.parent_names = (const char *[]){ _parent }, \
.num_parents = 1, \
.name = #clk_name, \
.ops = &clk_ops_div, \
.flags = CLK_SET_RATE_PARENT, \
} \
}
/* ==================== Mux Div clock ==================== */
struct mux_div_clk;
/*
* struct mux_div_ops
* the enable and disable ops are optional.
*/
struct mux_div_ops {
int (*set_src_div)(struct mux_div_clk *, u32 src_sel, u32 div);
void (*get_src_div)(struct mux_div_clk *, u32 *src_sel, u32 *div);
int (*enable)(struct mux_div_clk *);
void (*disable)(struct mux_div_clk *);
bool (*is_enabled)(struct mux_div_clk *);
};
/*
* struct mux_div_clk - combined mux/divider clock
* @priv
parameters needed by ops
* @safe_freq
when switching rates from A to B, the mux div clock will
instead switch from A -> safe_freq -> B. This allows the
mux_div clock to change rates while enabled, even if this
behavior is not supported by the parent clocks.
If changing the rate of parent A also causes the rate of
parent B to change, then safe_freq must be defined.
safe_freq is expected to have a source clock which is always
on and runs at only one rate.
* @parents
list of parents and mux indicies
* @ops
function pointers for hw specific operations
* @src_sel
the mux index which will be used if the clock is enabled.
*/
struct mux_div_clk {
/* Required parameters */
const struct mux_div_ops *ops;
struct div_data data;
u8 *parent_map;
struct clk_hw hw;
/* Internal */
u32 src_sel;
/* Optional parameters */
void *priv;
void __iomem *base;
u32 div_mask;
u32 div_offset;
u32 div_shift;
u32 src_mask;
u32 src_offset;
u32 src_shift;
u32 en_mask;
u32 en_offset;
u32 safe_div;
struct clk *safe_parent;
unsigned long safe_freq;
};
static inline struct mux_div_clk *to_mux_div_clk(struct clk_hw *hw)
{
return container_of(hw, struct mux_div_clk, hw);
}
extern const struct clk_ops clk_ops_mux_div_clk;
#endif