Skip to content

Commit

Permalink
phy: cadence: Sierra: Add support for PHY multilink configurations
Browse files Browse the repository at this point in the history
Add support for multilink configuration of Sierra PHY. Currently,
maximum two links are supported.

Signed-off-by: Swapnil Jakhade <[email protected]>
Reviewed-by: Aswath Govindraju <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Vinod Koul <[email protected]>
  • Loading branch information
sjakhadecdns authored and vinodkoul committed Dec 27, 2021
1 parent da08aab commit 6b81f05
Showing 1 changed file with 190 additions and 8 deletions.
198 changes: 190 additions & 8 deletions drivers/phy/cadence/phy-cadence-sierra.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include <dt-bindings/phy/phy-cadence.h>

#define NUM_SSC_MODE 3
#define NUM_PHY_TYPE 3
#define NUM_PHY_TYPE 4

/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
Expand Down Expand Up @@ -184,6 +184,13 @@
(0xE000 << (block_offset))
#define SIERRA_PHY_PMA_CMN_CTRL 0x000

/* PHY PMA lane registers */
#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0xF000 << (block_offset)) + \
(((ln) << 8) << (reg_offset)))

#define SIERRA_PHY_PMA_XCVR_CTRL 0x000

#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
Expand Down Expand Up @@ -299,6 +306,8 @@ struct cdns_sierra_data {
u8 reg_offset_shift;
struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
[NUM_SSC_MODE];
struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
Expand All @@ -322,6 +331,7 @@ struct cdns_sierra_phy {
struct regmap *regmap_phy_pcs_common_cdb;
struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_phy_pma_common_cdb;
struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
Expand Down Expand Up @@ -438,6 +448,34 @@ static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
.reg_read = cdns_regmap_read,
};

#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
{ \
.name = "sierra_phy_pma_lane" n "_cdb", \
.reg_stride = 1, \
.fast_io = true, \
.reg_write = cdns_regmap_write, \
.reg_read = cdns_regmap_read, \
}

static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
};

static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
Expand All @@ -446,14 +484,15 @@ static int cdns_sierra_phy_init(struct phy *gphy)
struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_type = ins->phy_type;
enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
u32 num_regs;
int i, j;

/* Initialise the PHY registers, unless auto configured */
if (phy->autoconf)
if (phy->autoconf || phy->nsubnodes > 1)
return 0;

clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
Expand All @@ -469,6 +508,18 @@ static int cdns_sierra_phy_init(struct phy *gphy)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}

/* PHY PMA lane registers configurations */
phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
if (phy_pma_ln_vals) {
reg_pairs = phy_pma_ln_vals->reg_pairs;
num_regs = phy_pma_ln_vals->num_regs;
for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}

/* PMA common registers configurations */
pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
if (pma_cmn_vals) {
Expand Down Expand Up @@ -502,10 +553,13 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;

ret = reset_control_deassert(sp->phy_rst);
if (ret) {
dev_err(dev, "Failed to take the PHY out of reset\n");
return ret;
if (sp->nsubnodes == 1) {
/* Take the PHY out of reset */
ret = reset_control_deassert(sp->phy_rst);
if (ret) {
dev_err(dev, "Failed to take the PHY out of reset\n");
return ret;
}
}

/* Take the PHY lane group out of reset */
Expand Down Expand Up @@ -923,6 +977,19 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
}
sp->regmap_phy_pma_common_cdb = regmap;

for (i = 0; i < SIERRA_MAX_LANES; i++) {
block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
reg_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset,
reg_offset_shift,
&cdns_sierra_phy_pma_lane_cdb_config[i]);
if (IS_ERR(regmap)) {
dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
return PTR_ERR(regmap);
}
sp->regmap_phy_pma_lane_cdb[i] = regmap;
}

return 0;
}

Expand Down Expand Up @@ -1030,6 +1097,118 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
return 0;
}

static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
{
const struct cdns_sierra_data *init_data = sp->init_data;
struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
enum cdns_sierra_phy_type phy_t1, phy_t2;
struct cdns_sierra_vals *phy_pma_ln_vals;
const struct cdns_reg_pairs *reg_pairs;
struct cdns_sierra_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
enum cdns_sierra_ssc_mode ssc;
struct regmap *regmap;
u32 num_regs;

/* Maximum 2 links (subnodes) are supported */
if (sp->nsubnodes != 2)
return -EINVAL;

clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);

/* PHY configured to use both PLL LC and LC1 */
regmap_field_write(sp->phy_pll_cfg_1, 0x1);

phy_t1 = sp->phys[0].phy_type;
phy_t2 = sp->phys[1].phy_type;

/*
* PHY configuration for multi-link operation is done in two steps.
* e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
* Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
* and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
*
* [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
* So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
* This will configure PHY registers associated for PCIe (i.e. first protocol)
* involving PLLLC registers and registers for first 2 lanes of PHY.
* [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
* phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
* [TYPE_QSGMII][TYPE_PCIE][ssc].
* This will configure PHY registers associated for QSGMII (i.e. second protocol)
* involving PLLLC1 registers and registers for other 2 lanes of PHY.
*
* This completes the PHY configuration for multilink operation. This approach enables
* dividing the large number of PHY register configurations into protocol specific
* smaller groups.
*/
for (node = 0; node < sp->nsubnodes; node++) {
if (node == 1) {
/*
* If first link with phy_t1 is configured, then configure the PHY for
* second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
*/
swap(phy_t1, phy_t2);
}

mlane = sp->phys[node].mlane;
ssc = sp->phys[node].ssc_mode;
num_lanes = sp->phys[node].num_lanes;

/* PHY PCS common registers configurations */
pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
regmap = sp->regmap_phy_pcs_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}

/* PHY PMA lane registers configurations */
phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
if (phy_pma_ln_vals) {
reg_pairs = phy_pma_ln_vals->reg_pairs;
num_regs = phy_pma_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}

/* PMA common registers configurations */
pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
if (pma_cmn_vals) {
reg_pairs = pma_cmn_vals->reg_pairs;
num_regs = pma_cmn_vals->num_regs;
regmap = sp->regmap_common_cdb;
for (i = 0; i < num_regs; i++)
regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}

/* PMA lane registers configurations */
pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
if (pma_ln_vals) {
reg_pairs = pma_ln_vals->reg_pairs;
num_regs = pma_ln_vals->num_regs;
for (i = 0; i < num_lanes; i++) {
regmap = sp->regmap_lane_cdb[i + mlane];
for (j = 0; j < num_regs; j++)
regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
}

/* Take the PHY out of reset */
ret = reset_control_deassert(sp->phy_rst);
if (ret)
return ret;

return 0;
}

static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
Expand Down Expand Up @@ -1149,8 +1328,11 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
}

/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1)
regmap_field_write(sp->phy_pll_cfg_1, 0x1);
if (!sp->autoconf && sp->nsubnodes > 1) {
ret = cdns_sierra_phy_configure_multilink(sp);
if (ret)
goto put_child2;
}

pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
Expand Down

0 comments on commit 6b81f05

Please sign in to comment.