The round_rate() clk ops is deprecated, so migrate this driver from
round_rate() to determine_rate() using the Coccinelle semantic patch
on the cover letter of this series.

Signed-off-by: Brian Masney <bmas...@redhat.com>
---
 drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c | 34 +++++++++++++------------
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c 
b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 
f3643320ff2f2bae5301bb94f1fe19fa03db584c..8d234685c1aa3d4579e3dc766c000de1909f87f6
 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -231,21 +231,21 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
        pll_28nm->phy->pll_on = false;
 }
 
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
-               unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+                                          struct clk_rate_request *req)
 {
        struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
 
-       if      (rate < pll_28nm->phy->cfg->min_pll_rate)
-               return  pll_28nm->phy->cfg->min_pll_rate;
-       else if (rate > pll_28nm->phy->cfg->max_pll_rate)
-               return  pll_28nm->phy->cfg->max_pll_rate;
-       else
-               return rate;
+       if (req->rate < pll_28nm->phy->cfg->min_pll_rate)
+               req->rate = pll_28nm->phy->cfg->min_pll_rate;
+       else if (req->rate > pll_28nm->phy->cfg->max_pll_rate)
+               req->rate = pll_28nm->phy->cfg->max_pll_rate;
+
+       return 0;
 }
 
 static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
-       .round_rate = dsi_pll_28nm_clk_round_rate,
+       .determine_rate = dsi_pll_28nm_clk_determine_rate,
        .set_rate = dsi_pll_28nm_clk_set_rate,
        .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
        .prepare = dsi_pll_28nm_vco_prepare,
@@ -296,18 +296,20 @@ static unsigned int get_vco_mul_factor(unsigned long 
byte_clk_rate)
                return 8;
 }
 
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long *prate)
+static int clk_bytediv_determine_rate(struct clk_hw *hw,
+                                     struct clk_rate_request *req)
 {
        unsigned long best_parent;
        unsigned int factor;
 
-       factor = get_vco_mul_factor(rate);
+       factor = get_vco_mul_factor(req->rate);
+
+       best_parent = req->rate * factor;
+       req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 
best_parent);
 
-       best_parent = rate * factor;
-       *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+       req->rate = req->best_parent_rate / factor;
 
-       return *prate / factor;
+       return 0;
 }
 
 static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -328,7 +330,7 @@ static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned 
long rate,
 
 /* Our special byte clock divider ops */
 static const struct clk_ops clk_bytediv_ops = {
-       .round_rate = clk_bytediv_round_rate,
+       .determine_rate = clk_bytediv_determine_rate,
        .set_rate = clk_bytediv_set_rate,
        .recalc_rate = clk_bytediv_recalc_rate,
 };

-- 
2.50.0

Reply via email to