aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorTom Rini <trini@konsulko.com>2022-02-08 12:28:04 -0500
committerTom Rini <trini@konsulko.com>2022-02-08 12:28:04 -0500
commit531c00894577a0a852431adf61ade76925f8b162 (patch)
tree37cde6437d2f64f49d5eec47cfccd36bb92d4146 /drivers
parent8b139f4e1c08c4ffb1a8e739db128ed02cbc637f (diff)
parentf55d4978e130bbe488f031bcad2763ea90c372bd (diff)
Merge branch '2022-02-08-TI-platform-updates'
- J721S2 support, IPU support on DRA7, SIERRA PHY mulitlink configuration support, Nokia RX-51 DM_KEYBOARD conversion
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clk/ti/clk-k3.c5
-rw-r--r--drivers/dma/ti/Makefile1
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c167
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c2
-rw-r--r--drivers/firmware/ti_sci_static_data.h40
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1537
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c2
-rw-r--r--drivers/power/domain/ti-power-domain.c5
-rw-r--r--drivers/ram/Kconfig2
-rw-r--r--drivers/ram/k3-ddrss/k3-ddrss.c296
-rw-r--r--drivers/ram/k3-ddrss/lpddr4_structs_if.h1
-rw-r--r--drivers/remoteproc/Kconfig10
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/ipu_rproc.c759
-rw-r--r--drivers/remoteproc/k3_system_controller.c20
-rw-r--r--drivers/remoteproc/rproc-uclass.c534
-rw-r--r--drivers/reset/Kconfig6
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/reset-dra7.c97
-rw-r--r--drivers/soc/soc_ti_k3.c44
23 files changed, 3317 insertions, 225 deletions
diff --git a/drivers/clk/ti/clk-k3.c b/drivers/clk/ti/clk-k3.c
index e04c57eff2..74beb4d8eb 100644
--- a/drivers/clk/ti/clk-k3.c
+++ b/drivers/clk/ti/clk-k3.c
@@ -68,6 +68,11 @@ static const struct soc_attr ti_k3_soc_clk_data[] = {
.family = "J7200",
.data = &j7200_clk_platdata,
},
+#elif CONFIG_SOC_K3_J721S2
+ {
+ .family = "J721S2",
+ .data = &j721s2_clk_platdata,
+ },
#endif
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 0391cd3d80..6a4f4f1365 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -5,4 +5,5 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil-data.o
k3-psil-data-y += k3-psil.o
k3-psil-data-$(CONFIG_SOC_K3_AM6) += k3-psil-am654.o
k3-psil-data-$(CONFIG_SOC_K3_J721E) += k3-psil-j721e.o
+k3-psil-data-$(CONFIG_SOC_K3_J721S2) += k3-psil-j721s2.o
k3-psil-data-$(CONFIG_SOC_K3_AM642) += k3-psil-am64.o
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
new file mode 100644
index 0000000000..4c4172a4d2
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721s2_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ PSIL_PDMA_XY_PKT(0x4614),
+ PSIL_PDMA_XY_PKT(0x4615),
+ PSIL_PDMA_XY_PKT(0x4616),
+ PSIL_PDMA_XY_PKT(0x4617),
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721s2_dst_ep_map[] = {
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j721s2_ep_map = {
+ .name = "j721s2",
+ .src = j721s2_src_ep_map,
+ .src_count = ARRAY_SIZE(j721s2_src_ep_map),
+ .dst = j721s2_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721s2_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index 02d1c201a9..77acaf2139 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -39,6 +39,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
/* SoC PSI-L endpoint maps */
extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
+extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am64_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index e82f807541..8b2129d4f5 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -20,6 +20,8 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
soc_ep_map = &am654_ep_map;
else if (IS_ENABLED(CONFIG_SOC_K3_J721E))
soc_ep_map = &j721e_ep_map;
+ else if (IS_ENABLED(CONFIG_SOC_K3_J721S2))
+ soc_ep_map = &j721s2_ep_map;
else if (IS_ENABLED(CONFIG_SOC_K3_AM642))
soc_ep_map = &am64_ep_map;
}
diff --git a/drivers/firmware/ti_sci_static_data.h b/drivers/firmware/ti_sci_static_data.h
index 3c506e667a..e6a3b66c03 100644
--- a/drivers/firmware/ti_sci_static_data.h
+++ b/drivers/firmware/ti_sci_static_data.h
@@ -56,21 +56,21 @@ static struct ti_sci_resource_static_data rm_static_data[] = {
{
.dev_id = 235,
.subtype = 1,
- .range_start = 144,
+ .range_start = 124,
.range_num = 32,
},
/* TX channels */
{
.dev_id = 236,
.subtype = 13,
- .range_start = 7,
+ .range_start = 6,
.range_num = 2,
},
/* RX channels */
{
.dev_id = 236,
.subtype = 10,
- .range_start = 7,
+ .range_start = 6,
.range_num = 2,
},
/* RX Free flows */
@@ -84,6 +84,40 @@ static struct ti_sci_resource_static_data rm_static_data[] = {
};
#endif /* CONFIG_TARGET_J7200_R5_EVM */
+#if IS_ENABLED(CONFIG_TARGET_J721S2_R5_EVM)
+static struct ti_sci_resource_static_data rm_static_data[] = {
+ /* Free rings */
+ {
+ .dev_id = 272,
+ .subtype = 1,
+ .range_start = 180,
+ .range_num = 32,
+ },
+ /* TX channels */
+ {
+ .dev_id = 273,
+ .subtype = 13,
+ .range_start = 12,
+ .range_num = 2,
+ },
+ /* RX channels */
+ {
+ .dev_id = 273,
+ .subtype = 10,
+ .range_start = 12,
+ .range_num = 2,
+ },
+ /* RX Free flows */
+ {
+ .dev_id = 273,
+ .subtype = 0,
+ .range_start = 80,
+ .range_num = 8,
+ },
+ { },
+};
+#endif /* CONFIG_TARGET_J721S2_R5_EVM */
+
#else
static struct ti_sci_resource_static_data rm_static_data[] = {
{ },
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a8baaeaf5c..0ade3e32b0 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -453,6 +453,15 @@ config FS_LOADER
The consumer driver would then use this loader to program whatever,
ie. the FPGA device.
+config SPL_FS_LOADER
+ bool "Enable loader driver for file system"
+ help
+ This is file system generic loader which can be used to load
+ the file image from the storage into target such as memory.
+
+ The consumer driver would then use this loader to program whatever,
+ ie. the FPGA device.
+
config GDSYS_SOC
bool "Enable gdsys SOC driver"
depends on MISC
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f9826d2462..bca7b24e99 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -37,7 +37,7 @@ obj-$(CONFIG_FSL_IFC) += fsl_ifc.o
obj-$(CONFIG_FSL_IIM) += fsl_iim.o
obj-$(CONFIG_FSL_MC9SDZ60) += mc9sdz60.o
obj-$(CONFIG_FSL_SEC_MON) += fsl_sec_mon.o
-obj-$(CONFIG_FS_LOADER) += fs_loader.o
+obj-$(CONFIG_$(SPL_)FS_LOADER) += fs_loader.o
obj-$(CONFIG_GDSYS_IOEP) += gdsys_ioep.o
obj-$(CONFIG_GDSYS_RXAUI_CTRL) += gdsys_rxaui_ctrl.o
obj-$(CONFIG_GDSYS_SOC) += gdsys_soc.o
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 715def6f17..d95d4b432a 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -13,6 +13,8 @@
*/
#include <common.h>
#include <clk.h>
+#include <linux/delay.h>
+#include <linux/clk-provider.h>
#include <generic-phy.h>
#include <reset.h>
#include <dm/device.h>
@@ -24,18 +26,36 @@
#include <dm/devres.h>
#include <linux/io.h>
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/phy/phy-cadence.h>
#include <regmap.h>
+#define usleep_range(a, b) udelay((b))
+
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 4
+
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
+#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
+#define SIERRA_CMN_PLLLC_SS_PREG 0x52
+#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
+#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_REFRCV1_PREG 0xB8
+#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
+#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
+#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
#define SIERRA_LANE_CDB_OFFSET(ln, offset) \
(0x4000 + ((ln) * (0x800 >> (2 - (offset)))))
@@ -47,7 +67,11 @@
#define SIERRA_DET_STANDEC_E_PREG 0x004
#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_LN_A3_PREG 0x023
+#define SIERRA_PSC_LN_A4_PREG 0x024
+#define SIERRA_PSC_LN_IDLE_PREG 0x026
#define SIERRA_PSC_TX_A0_PREG 0x028
#define SIERRA_PSC_TX_A1_PREG 0x029
#define SIERRA_PSC_TX_A2_PREG 0x02A
@@ -57,18 +81,22 @@
#define SIERRA_PSC_RX_A2_PREG 0x032
#define SIERRA_PSC_RX_A3_PREG 0x033
#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
#define SIERRA_PLLCTRL_STATUS_PREG 0x044
#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_CAL_PREG 0x08F
#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
@@ -118,15 +146,28 @@
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DEQ_DFETAP0 0x129
+#define SIERRA_DEQ_DFETAP1 0x12B
+#define SIERRA_DEQ_DFETAP2 0x12D
+#define SIERRA_DEQ_DFETAP3 0x12F
+#define SIERRA_DEQ_DFETAP4 0x131
#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_PRECUR_PREG 0x138
+#define SIERRA_DEQ_POSTCUR_PREG 0x140
+#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
+#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
#define SIERRA_LFPSFILT_RD_PREG 0x18B
@@ -140,29 +181,112 @@
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
-#define SIERRA_PHY_CONFIG_CTRL_OFFSET 0xc000
+#define SIERRA_PHY_PCS_COMMON_OFFSET 0xc000
+#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
#define SIERRA_PHY_PLL_CFG 0xe
+/* PHY PMA common registers */
+#define SIERRA_PHY_PMA_COMMON_OFFSET 0xe000
+#define SIERRA_PHY_PMA_CMN_CTRL 0x0
+
+/* PHY PCS lane registers */
+#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, offset) \
+ (0xD000 + ((ln) * (0x800 >> (3 - (offset)))))
+#define SIERRA_PHY_ISO_LINK_CTRL 0xB
+
+/* PHY PMA lane registers */
+#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, offset) \
+ (0xF000 + ((ln) * (0x800 >> (3 - (offset)))))
+#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
+
#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100
+#define CDNS_SIERRA_INPUT_CLOCKS 5
+enum cdns_sierra_clock_input {
+ PHY_CLK,
+ CMN_REFCLK_DIG_DIV,
+ CMN_REFCLK1_DIG_DIV,
+ PLL0_REFCLK,
+ PLL1_REFCLK,
+};
+
+#define SIERRA_NUM_CMN_PLLC 2
+#define SIERRA_NUM_CMN_PLLC_PARENTS 2
+
static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pma_cmn_ready =
+ REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const struct reg_field phy_iso_link_ctrl_1 =
+ REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
+
+static const char * const clk_names[] = {
+ [CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
+ [CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+};
+
+enum cdns_sierra_cmn_plllc {
+ CMN_PLLLC,
+ CMN_PLLLC1,
+};
+
+struct cdns_sierra_pll_mux_reg_fields {
+ struct reg_field pfdclk_sel_preg;
+ struct reg_field plllc1en_field;
+ struct reg_field termen_field;
+};
+
+static const struct cdns_sierra_pll_mux_reg_fields cmn_plllc_pfdclk1_sel_preg[] = {
+ [CMN_PLLLC] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV1_PREG, 0, 0),
+ },
+ [CMN_PLLLC1] = {
+ .pfdclk_sel_preg = REG_FIELD(SIERRA_CMN_PLLLC1_GEN_PREG, 1, 1),
+ .plllc1en_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 8, 8),
+ .termen_field = REG_FIELD(SIERRA_CMN_REFRCV_PREG, 0, 0),
+ },
+};
+
+struct cdns_sierra_pll_mux {
+ struct cdns_sierra_phy *sp;
+ struct clk *clk;
+ struct clk *parent_clks[2];
+ struct regmap_field *pfdclk_sel_preg;
+ struct regmap_field *plllc1en_field;
+ struct regmap_field *termen_field;
+};
#define reset_control_assert(rst) cdns_reset_assert(rst)
#define reset_control_deassert(rst) cdns_reset_deassert(rst)
#define reset_control reset_ctl
+enum cdns_sierra_phy_type {
+ TYPE_NONE,
+ TYPE_PCIE,
+ TYPE_USB,
+ TYPE_QSGMII
+};
+
+enum cdns_sierra_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
+
struct cdns_sierra_inst {
- u32 phy_type;
+ enum cdns_sierra_phy_type phy_type;
u32 num_lanes;
u32 mlane;
struct reset_ctl_bulk *lnk_rst;
+ enum cdns_sierra_ssc_mode ssc_mode;
};
struct cdns_reg_pairs {
@@ -170,24 +294,23 @@ struct cdns_reg_pairs {
u32 off;
};
+struct cdns_sierra_vals {
+ const struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
struct cdns_sierra_data {
u32 id_value;
u8 block_offset_shift;
u8 reg_offset_shift;
- u32 pcie_cmn_regs;
- u32 pcie_ln_regs;
- u32 usb_cmn_regs;
- u32 usb_ln_regs;
- struct cdns_reg_pairs *pcie_cmn_vals;
- struct cdns_reg_pairs *pcie_ln_vals;
- struct cdns_reg_pairs *usb_cmn_vals;
- struct cdns_reg_pairs *usb_ln_vals;
-};
-
-struct cdns_regmap_cdb_context {
- struct udevice *dev;
- void __iomem *base;
- u8 reg_offset_shift;
+ struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_sierra_phy {
@@ -196,20 +319,27 @@ struct cdns_sierra_phy {
size_t size;
struct regmap *regmap;
struct cdns_sierra_data *init_data;
- struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
+ struct cdns_sierra_inst *phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
- struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pma_cmn_ready;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
- struct clk *clk;
- struct clk *cmn_refclk;
- struct clk *cmn_refclk1;
+ struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
+ struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
+ struct clk *input_clks[CDNS_SIERRA_INPUT_CLOCKS];
+ struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
int nsubnodes;
u32 num_lanes;
bool autoconf;
+ unsigned int already_configured;
};
static inline int cdns_reset_assert(struct reset_control *rst)
@@ -237,8 +367,8 @@ static inline struct cdns_sierra_inst *phy_get_drvdata(struct phy *phy)
return NULL;
for (index = 0; index < sp->nsubnodes; index++) {
- if (phy->id == sp->phys[index].mlane)
- return &sp->phys[index];
+ if (phy->id == sp->phys[index]->mlane)
+ return sp->phys[index];
}
return NULL;
@@ -248,40 +378,65 @@ static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_priv(gphy->dev);
+ struct cdns_sierra_data *init_data = phy->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap = phy->regmap;
+ u32 num_regs;
int i, j;
- struct cdns_reg_pairs *cmn_vals, *ln_vals;
- u32 num_cmn_regs, num_ln_regs;
/* Initialise the PHY registers, unless auto configured */
- if (phy->autoconf)
+ if (phy->autoconf || phy->already_configured || phy->nsubnodes > 1)
return 0;
- clk_set_rate(phy->cmn_refclk, 25000000);
- clk_set_rate(phy->cmn_refclk1, 25000000);
-
- if (ins->phy_type == PHY_TYPE_PCIE) {
- num_cmn_regs = phy->init_data->pcie_cmn_regs;
- num_ln_regs = phy->init_data->pcie_ln_regs;
- cmn_vals = phy->init_data->pcie_cmn_vals;
- ln_vals = phy->init_data->pcie_ln_vals;
- } else if (ins->phy_type == PHY_TYPE_USB3) {
- num_cmn_regs = phy->init_data->usb_cmn_regs;
- num_ln_regs = phy->init_data->usb_ln_regs;
- cmn_vals = phy->init_data->usb_cmn_vals;
- ln_vals = phy->init_data->usb_ln_vals;
- } else {
- return -EINVAL;
+ clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
- regmap = phy->regmap_common_cdb;
- for (j = 0; j < num_cmn_regs ; j++)
- regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
- for (i = 0; i < ins->num_lanes; i++) {
- for (j = 0; j < num_ln_regs ; j++) {
+ /* PMA TX lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_lane_cdb[i + ins->mlane];
- regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
@@ -296,6 +451,20 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;
+ if (sp->already_configured) {
+ usleep_range(5000, 10000);
+ return 0;
+ }
+
+ if (sp->nsubnodes == 1) {
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
+ }
+
/* Take the PHY lane group out of reset */
ret = reset_deassert_bulk(ins->lnk_rst);
if (ret) {
@@ -303,6 +472,26 @@ static int cdns_sierra_phy_on(struct phy *gphy)
return ret;
}
+ if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
+ val, !val, 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
+ 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
val, val, 1000, PLL_LOCK_TIME);
if (ret < 0)
@@ -337,17 +526,146 @@ static const struct phy_ops ops = {
.reset = cdns_sierra_phy_reset,
};
+struct cdns_sierra_pll_mux_sel {
+ enum cdns_sierra_cmn_plllc mux_sel;
+ u32 table[2];
+ const char *node_name;
+ u32 num_parents;
+ u32 parents[2];
+};
+
+static struct cdns_sierra_pll_mux_sel pll_clk_mux_sel[] = {
+ {
+ .num_parents = 2,
+ .parents = { PLL0_REFCLK, PLL1_REFCLK },
+ .mux_sel = CMN_PLLLC,
+ .table = { 0, 1 },
+ .node_name = "pll_cmnlc",
+ },
+ {
+ .num_parents = 2,
+ .parents = { PLL1_REFCLK, PLL0_REFCLK },
+ .mux_sel = CMN_PLLLC1,
+ .table = { 1, 0 },
+ .node_name = "pll_cmnlc1",
+ },
+};
+
+static int cdns_sierra_pll_mux_set_parent(struct clk *clk, struct clk *parent)
+{
+ struct udevice *dev = clk->dev;
+ struct cdns_sierra_pll_mux *priv = dev_get_priv(dev);
+ struct cdns_sierra_pll_mux_sel *data = dev_get_plat(dev);
+ struct cdns_sierra_phy *sp = priv->sp;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) {
+ if (parent->dev == priv->parent_clks[i]->dev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(priv->parent_clks))
+ return -EINVAL;
+
+ ret = regmap_field_write(sp->cmn_refrcv_refclk_plllc1en_preg[data[clk->id].mux_sel], i);
+ ret |= regmap_field_write(sp->cmn_refrcv_refclk_termen_preg[data[clk->id].mux_sel], i);
+ ret |= regmap_field_write(sp->cmn_plllc_pfdclk1_sel_preg[data[clk->id].mux_sel],
+ data[clk->id].table[i]);
+
+ return ret;
+}
+
+static const struct clk_ops cdns_sierra_pll_mux_ops = {
+ .set_parent = cdns_sierra_pll_mux_set_parent,
+};
+
+int cdns_sierra_pll_mux_probe(struct udevice *dev)
+{
+ struct cdns_sierra_pll_mux *priv = dev_get_priv(dev);
+ struct cdns_sierra_phy *sp = dev_get_priv(dev->parent);
+ struct cdns_sierra_pll_mux_sel *data = dev_get_plat(dev);
+ struct clk *clk;
+ int i, j;
+
+ for (j = 0; j < SIERRA_NUM_CMN_PLLC; j++) {
+ for (i = 0; i < ARRAY_SIZE(priv->parent_clks); i++) {
+ clk = sp->input_clks[data[j].parents[i]];
+ if (IS_ERR_OR_NULL(clk)) {
+ dev_err(dev, "No parent clock for PLL mux clocks\n");
+ return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
+ }
+ priv->parent_clks[i] = clk;
+ }
+ }
+
+ priv->sp = dev_get_priv(dev->parent);
+
+ return 0;
+}
+
+U_BOOT_DRIVER(cdns_sierra_pll_mux_clk) = {
+ .name = "cdns_sierra_mux_clk",
+ .id = UCLASS_CLK,
+ .priv_auto = sizeof(struct cdns_sierra_pll_mux),
+ .ops = &cdns_sierra_pll_mux_ops,
+ .probe = cdns_sierra_pll_mux_probe,
+ .plat_auto = sizeof(struct cdns_sierra_pll_mux_sel) * SIERRA_NUM_CMN_PLLC,
+};
+
+static int cdns_sierra_pll_bind_of_clocks(struct cdns_sierra_phy *sp)
+{
+ struct udevice *dev = sp->dev;
+ struct driver *cdns_sierra_clk_drv;
+ struct cdns_sierra_pll_mux_sel *data = pll_clk_mux_sel;
+ int i, rc;
+
+ cdns_sierra_clk_drv = lists_driver_lookup_name("cdns_sierra_mux_clk");
+ if (!cdns_sierra_clk_drv) {
+ dev_err(dev, "Can not find driver 'cdns_sierra_mux_clk'\n");
+ return -ENOENT;
+ }
+
+ rc = device_bind(dev, cdns_sierra_clk_drv, "pll_mux_clk",
+ data, dev_ofnode(dev), NULL);
+ if (rc) {
+ dev_err(dev, "cannot bind driver for clock %s\n",
+ clk_names[i]);
+ }
+
+ return 0;
+}
+
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
ofnode child)
{
+ u32 phy_type;
+
if (ofnode_read_u32(child, "reg", &inst->mlane))
return -EINVAL;
if (ofnode_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
return -EINVAL;
- if (ofnode_read_u32(child, "cdns,phy-type", &inst->phy_type))
+ if (ofnode_read_u32(child, "cdns,phy-type", &phy_type))
+ return -EINVAL;
+
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ inst->phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_USB3:
+ inst->phy_type = TYPE_USB;
+ break;
+ case PHY_TYPE_QSGMII:
+ inst->phy_type = TYPE_QSGMII;
+ break;
+ default:
return -EINVAL;
+ }
+
+ inst->ssc_mode = EXTERNAL_SSC;
+ ofnode_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
return 0;
}
@@ -371,6 +689,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
{
struct udevice *dev = sp->dev;
struct regmap_field *field;
+ struct reg_field reg_field;
struct regmap *regmap;
int i;
@@ -382,7 +701,33 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->macro_id_type = field;
- regmap = sp->regmap_phy_config_ctrl;
+ for (i = 0; i < SIERRA_NUM_CMN_PLLC; i++) {
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].pfdclk_sel_preg;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PLLLC%d_PFDCLK1_SEL failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_plllc_pfdclk1_sel_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].plllc1en_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_PLLLC1EN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_plllc1en_preg[i] = field;
+
+ reg_field = cmn_plllc_pfdclk1_sel_preg[i].termen_field;
+ field = devm_regmap_field_alloc(dev, regmap, reg_field);
+ if (IS_ERR(field)) {
+ dev_err(dev, "REFRCV%d_REFCLK_TERMEN failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->cmn_refrcv_refclk_termen_preg[i] = field;
+ }
+
+ regmap = sp->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
@@ -390,6 +735,14 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->phy_pll_cfg_1 = field;
+ regmap = sp->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->pma_cmn_ready = field;
+
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
@@ -397,7 +750,17 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(field);
}
- sp->pllctrl_lock[i] = field;
+ sp->pllctrl_lock[i] = field;
+ }
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->phy_iso_link_ctrl_1[i] = field;
}
return 0;
@@ -431,25 +794,300 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
}
sp->regmap_common_cdb = regmap;
- regmap = cdns_regmap_init(dev, base, SIERRA_PHY_CONFIG_CTRL_OFFSET,
+ regmap = cdns_regmap_init(dev, base, SIERRA_PHY_PCS_COMMON_OFFSET,
block_offset_shift, reg_offset_shift);
if (IS_ERR(regmap)) {
- dev_err(dev, "Failed to init PHY config and control regmap\n");
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
return PTR_ERR(regmap);
}
- sp->regmap_phy_config_ctrl = regmap;
+ sp->regmap_phy_pcs_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ block_offset_shift, reg_offset_shift);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ regmap = cdns_regmap_init(dev, base, SIERRA_PHY_PMA_COMMON_OFFSET,
+ block_offset_shift, reg_offset_shift);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ block_offset_shift, reg_offset_shift);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_lane_cdb[i] = regmap;
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
+{
+ const struct cdns_sierra_data *init_data = sp->init_data;
+ enum cdns_sierra_phy_type phy_t1, phy_t2, tmp_phy_type;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ enum cdns_sierra_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (sp->nsubnodes != 2)
+ return -EINVAL;
+
+ clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY configured to use both PLL LC and LC1 */
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+
+ phy_t1 = sp->phys[0]->phy_type;
+ phy_t2 = sp->phys[1]->phy_type;
+
+ /*
+ * First configure the PHY for first link with phy_t1. Get the array
+ * values as [phy_t1][phy_t2][ssc].
+ */
+ for (node = 0; node < sp->nsubnodes; node++) {
+ if (node == 1) {
+ /*
+ * If first link with phy_t1 is configured, then
+ * configure the PHY for second link with phy_t2.
+ * Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ tmp_phy_type = phy_t1;
+ phy_t1 = phy_t2;
+ phy_t2 = tmp_phy_type;
+ }
+
+ mlane = sp->phys[node]->mlane;
+ ssc = sp->phys[node]->ssc_mode;
+ num_lanes = sp->phys[node]->num_lanes;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = sp->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA TX lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ if (phy_t1 == TYPE_QSGMII)
+ reset_deassert_bulk(sp->phys[node]->lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int cdns_sierra_phy_get_clocks(struct cdns_sierra_phy *sp,
+ struct udevice *dev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "cmn_refclk1_dig_div clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[CMN_REFCLK1_DIG_DIV] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll0_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll0_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL0_REFCLK] = clk;
+
+ clk = devm_clk_get_optional(dev, "pll1_refclk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "pll1_refclk clock not found\n");
+ ret = PTR_ERR(clk);
+ return ret;
+ }
+ sp->input_clks[PLL1_REFCLK] = clk;
return 0;
}
+static int cdns_sierra_phy_clk(struct cdns_sierra_phy *sp)
+{
+ struct udevice *dev = sp->dev;
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get_optional(dev, "phy_clk");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock phy_clk\n");
+ return PTR_ERR(clk);
+ }
+ sp->input_clks[PHY_CLK] = clk;
+
+ ret = clk_prepare_enable(sp->input_clks[PHY_CLK]);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
+ struct udevice *dev)
+{
+ struct reset_control *rst;
+
+ rst = devm_reset_control_get(dev, "sierra_reset");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "failed to get reset\n");
+ return PTR_ERR(rst);
+ }
+ sp->phy_rst = rst;
+
+ return 0;
+}
+
+static int cdns_sierra_bind_link_nodes(struct cdns_sierra_phy *sp)
+{
+ struct udevice *dev = sp->dev;
+ struct driver *link_drv;
+ ofnode child;
+ int rc;
+
+ link_drv = lists_driver_lookup_name("sierra_phy_link");
+ if (!link_drv) {
+ dev_err(dev, "Cannot find driver 'sierra_phy_link'\n");
+ return -ENOENT;
+ }
+
+ ofnode_for_each_subnode(child, dev_ofnode(dev)) {
+ if (!(ofnode_name_eq(child, "phy") ||
+ ofnode_name_eq(child, "link")))
+ continue;
+
+ rc = device_bind(dev, link_drv, "link", NULL, child, NULL);
+ if (rc) {
+ dev_err(dev, "cannot bind driver for link\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cdns_sierra_link_probe(struct udevice *dev)
+{
+ struct cdns_sierra_inst *inst = dev_get_priv(dev);
+ struct cdns_sierra_phy *sp = dev_get_priv(dev->parent);
+ struct reset_ctl_bulk *rst;
+ int ret, node;
+
+ rst = devm_reset_bulk_get_by_node(dev, dev_ofnode(dev));
+ if (IS_ERR(rst)) {
+ ret = PTR_ERR(rst);
+ dev_err(dev, "failed to get reset\n");
+ return ret;
+ }
+ inst->lnk_rst = rst;
+
+ ret = cdns_sierra_get_optional(inst, dev_ofnode(dev));
+ if (ret) {
+ dev_err(dev, "missing property in node\n");
+ return ret;
+ }
+ node = sp->nsubnodes;
+ sp->phys[node] = inst;
+ sp->nsubnodes += 1;
+ sp->num_lanes += inst->num_lanes;
+
+ /* If more than one subnode, configure the PHY as multilink */
+ if (!sp->autoconf && !sp->already_configured && sp->nsubnodes > 1) {
+ ret = cdns_sierra_phy_configure_multilink(sp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+U_BOOT_DRIVER(sierra_phy_link) = {
+ .name = "sierra_phy_link",
+ .id = UCLASS_PHY,
+ .probe = cdns_sierra_link_probe,
+ .priv_auto = sizeof(struct cdns_sierra_inst),
+};
+
static int cdns_sierra_phy_probe(struct udevice *dev)
{
struct cdns_sierra_phy *sp = dev_get_priv(dev);
struct cdns_sierra_data *data;
unsigned int id_value;
- int ret, node = 0;
- struct clk *clk;
- ofnode child;
+ int ret;
sp->dev = dev;
@@ -473,38 +1111,26 @@ static int cdns_sierra_phy_probe(struct udevice *dev)
if (ret)
return ret;
- sp->clk = devm_clk_get_optional(dev, "phy_clk");
- if (IS_ERR(sp->clk)) {
- dev_err(dev, "failed to get clock phy_clk\n");
- return PTR_ERR(sp->clk);
- }
-
- sp->phy_rst = devm_reset_control_get(dev, "sierra_reset");
- if (IS_ERR(sp->phy_rst)) {
- dev_err(dev, "failed to get reset\n");
- return PTR_ERR(sp->phy_rst);
- }
-
- clk = devm_clk_get_optional(dev, "cmn_refclk_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk clock not found\n");
- ret = PTR_ERR(clk);
- return ret;
- }
- sp->cmn_refclk = clk;
-
- clk = devm_clk_get_optional(dev, "cmn_refclk1_dig_div");
- if (IS_ERR(clk)) {
- dev_err(dev, "cmn_refclk1 clock not found\n");
- ret = PTR_ERR(clk);
+ ret = cdns_sierra_phy_get_clocks(sp, dev);
+ if (ret)
return ret;
- }
- sp->cmn_refclk1 = clk;
- ret = clk_prepare_enable(sp->clk);
+ ret = cdns_sierra_pll_bind_of_clocks(sp);
if (ret)
return ret;
+ regmap_field_read(sp->pma_cmn_ready, &sp->already_configured);
+
+ if (!sp->already_configured) {
+ ret = cdns_sierra_phy_clk(sp);
+ if (ret)
+ return ret;
+
+ ret = cdns_sierra_phy_get_resets(sp, dev);
+ if (ret)
+ return ret;
+ }
+
/* Check that PHY is present */
regmap_field_read(sp->macro_id_type, &id_value);
if (sp->init_data->id_value != id_value) {
@@ -515,45 +1141,17 @@ static int cdns_sierra_phy_probe(struct udevice *dev)
}
sp->autoconf = dev_read_bool(dev, "cdns,autoconf");
+ /* Binding link nodes as children to serdes */
+ ret = cdns_sierra_bind_link_nodes(sp);
+ if (ret)
+ goto clk_disable;
- ofnode_for_each_subnode(child, dev_ofnode(dev)) {
- sp->phys[node].lnk_rst = devm_reset_bulk_get_by_node(dev,
- child);
- if (IS_ERR(sp->phys[node].lnk_rst)) {
- ret = PTR_ERR(sp->phys[node].lnk_rst);
- dev_err(dev, "failed to get reset %s\n",
- ofnode_get_name(child));
- goto put_child2;
- }
-
- if (!sp->autoconf) {
- ret = cdns_sierra_get_optional(&sp->phys[node], child);
- if (ret) {
- dev_err(dev, "missing property in node %s\n",
- ofnode_get_name(child));
- goto put_child;
- }
- }
- sp->num_lanes += sp->phys[node].num_lanes;
-
- node++;
- }
- sp->nsubnodes = node;
-
- /* If more than one subnode, configure the PHY as multilink */
- if (!sp->autoconf && sp->nsubnodes > 1)
- regmap_field_write(sp->phy_pll_cfg_1, 0x1);
-
- reset_control_deassert(sp->phy_rst);
dev_info(dev, "sierra probed\n");
return 0;
-put_child:
- node++;
-put_child2:
-
clk_disable:
- clk_disable_unprepare(sp->clk);
+ if (!sp->already_configured)
+ clk_disable_unprepare(sp->input_clks[PHY_CLK]);
return ret;
}
@@ -569,11 +1167,456 @@ static int cdns_sierra_phy_remove(struct udevice *dev)
* Need to put the subnode resets here though.
*/
for (i = 0; i < phy->nsubnodes; i++)
- reset_assert_bulk(phy->phys[i].lnk_rst);
+ reset_assert_bulk(phy->phys[i]->lnk_rst);
+
+ clk_disable_unprepare(phy->input_clks[PHY_CLK]);
return 0;
}
+/* QSGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+ .reg_pairs = qsgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
+};
+
+/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x0252, SIERRA_DET_STANDEC_E_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8422, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
+};
+
+/* PCIE PHY PCS common configuration */
+static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+ {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
+};
+
+static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+ .reg_pairs = pcie_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -585,13 +1628,62 @@ static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
- {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
@@ -606,10 +1698,10 @@ static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = {
static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0xFE0A, SIERRA_DET_STANDEC_A_PREG},
{0x000F, SIERRA_DET_STANDEC_B_PREG},
- {0x00A5, SIERRA_DET_STANDEC_C_PREG},
+ {0x55A5, SIERRA_DET_STANDEC_C_PREG},
{0x69ad, SIERRA_DET_STANDEC_D_PREG},
{0x0241, SIERRA_DET_STANDEC_E_PREG},
- {0x0010, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
+ {0x0110, SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG},
{0x0014, SIERRA_PSM_A0IN_TMR_PREG},
{0xCF00, SIERRA_PSM_DIAG_PREG},
{0x001F, SIERRA_PSC_TX_A0_PREG},
@@ -617,7 +1709,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x0003, SIERRA_PSC_TX_A2_PREG},
{0x0003, SIERRA_PSC_TX_A3_PREG},
{0x0FFF, SIERRA_PSC_RX_A0_PREG},
- {0x0619, SIERRA_PSC_RX_A1_PREG},
+ {0x0003, SIERRA_PSC_RX_A1_PREG},
{0x0003, SIERRA_PSC_RX_A2_PREG},
{0x0001, SIERRA_PSC_RX_A3_PREG},
{0x0001, SIERRA_PLLCTRL_SUBRATE_PREG},
@@ -626,19 +1718,19 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x00CA, SIERRA_CLKPATH_BIASTRIM_PREG},
{0x2512, SIERRA_DFE_BIASTRIM_PREG},
{0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
- {0x873E, SIERRA_CLKPATHCTRL_TMR_PREG},
- {0x03CF, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
- {0x01CE, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x823E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x078F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
{0x7B3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
- {0x033F, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x023C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
{0x3232, SIERRA_CREQ_FSMCLK_SEL_PREG},
{0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
- {0x8000, SIERRA_CREQ_SPARE_PREG},
+ {0x0000, SIERRA_CREQ_SPARE_PREG},
{0xCC44, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
- {0x8453, SIERRA_CTLELUT_CTRL_PREG},
- {0x4110, SIERRA_DFE_ECMP_RATESEL_PREG},
- {0x4110, SIERRA_DFE_SMP_RATESEL_PREG},
- {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x8452, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4121, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4121, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0003, SIERRA_DEQ_PHALIGN_CTRL},
{0x3200, SIERRA_DEQ_CONCUR_CTRL1_PREG},
{0x5064, SIERRA_DEQ_CONCUR_CTRL2_PREG},
{0x0030, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
@@ -646,7 +1738,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x5A5A, SIERRA_DEQ_ERRCMP_CTRL_PREG},
{0x02F5, SIERRA_DEQ_OFFSET_CTRL_PREG},
{0x02F5, SIERRA_DEQ_GAIN_CTRL_PREG},
- {0x9A8A, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x9999, SIERRA_DEQ_VGATUNE_CTRL_PREG},
{0x0014, SIERRA_DEQ_GLUT0},
{0x0014, SIERRA_DEQ_GLUT1},
{0x0014, SIERRA_DEQ_GLUT2},
@@ -693,6 +1785,7 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x000F, SIERRA_LFPSFILT_NS_PREG},
{0x0009, SIERRA_LFPSFILT_RD_PREG},
{0x0001, SIERRA_LFPSFILT_MP_PREG},
+ {0x6013, SIERRA_SIGDET_SUPPORT_PREG},
{0x8013, SIERRA_SDFILT_H2L_A_PREG},
{0x8009, SIERRA_SDFILT_L2H_PREG},
{0x0024, SIERRA_RXBUFFER_CTLECTRL_PREG},
@@ -700,32 +1793,168 @@ static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
+static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_usb_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
- SIERRA_MACRO_ID,
- 0x2,
- 0x2,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+
+ },
};
static const struct cdns_sierra_data cdns_ti_map_sierra = {
- SIERRA_MACRO_ID,
- 0x0,
- 0x1,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .phy_pma_ln_vals = {
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct udevice_id cdns_sierra_id_table[] = {
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index d74efcd212..686cdc6f7c 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -523,7 +523,7 @@ static int wiz_reset_deassert(struct reset_ctl *reset_ctl)
return ret;
}
- if (wiz->lane_phy_type[id - 1] == PHY_TYPE_PCIE)
+ if (wiz->lane_phy_type[id - 1] == PHY_TYPE_DP)
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE);
else
ret = regmap_field_write(wiz->p_enable[id - 1], P_ENABLE_FORCE);
diff --git a/drivers/power/domain/ti-power-domain.c b/drivers/power/domain/ti-power-domain.c
index b45e9b8245..a7dadf2eea 100644
--- a/drivers/power/domain/ti-power-domain.c
+++ b/drivers/power/domain/ti-power-domain.c
@@ -79,6 +79,11 @@ static const struct soc_attr ti_k3_soc_pd_data[] = {
.family = "J7200",
.data = &j7200_pd_platdata,
},
+#elif CONFIG_SOC_K3_J721S2
+ {
+ .family = "J721S2",
+ .data = &j721s2_pd_platdata,
+ },
#endif
{ /* sentinel */ }
};
diff --git a/drivers/ram/Kconfig b/drivers/ram/Kconfig
index a79594d351..709c916a2a 100644
--- a/drivers/ram/Kconfig
+++ b/drivers/ram/Kconfig
@@ -62,7 +62,7 @@ choice
depends on K3_DDRSS
prompt "K3 DDRSS Arch Support"
- default K3_J721E_DDRSS if SOC_K3_J721E
+ default K3_J721E_DDRSS if SOC_K3_J721E || SOC_K3_J721S2
default K3_AM64_DDRSS if SOC_K3_AM642
config K3_J721E_DDRSS
diff --git a/drivers/ram/k3-ddrss/k3-ddrss.c b/drivers/ram/k3-ddrss/k3-ddrss.c
index 95b5cf9128..25e3976e65 100644
--- a/drivers/ram/k3-ddrss/k3-ddrss.c
+++ b/drivers/ram/k3-ddrss/k3-ddrss.c
@@ -30,6 +30,78 @@
#define DDRSS_V2A_R1_MAT_REG 0x0020
#define DDRSS_ECC_CTRL_REG 0x0120
+#define SINGLE_DDR_SUBSYSTEM 0x1
+#define MULTI_DDR_SUBSYSTEM 0x2
+
+#define MULTI_DDR_CFG0 0x00114100
+#define MULTI_DDR_CFG1 0x00114104
+#define DDR_CFG_LOAD 0x00114110
+
+enum intrlv_gran {
+ GRAN_128B,
+ GRAN_512B,
+ GRAN_2KB,
+ GRAN_4KB,
+ GRAN_16KB,
+ GRAN_32KB,
+ GRAN_512KB,
+ GRAN_1GB,
+ GRAN_1_5GB,
+ GRAN_2GB,
+ GRAN_3GB,
+ GRAN_4GB,
+ GRAN_6GB,
+ GRAN_8GB,
+ GRAN_16GB
+};
+
+enum intrlv_size {
+ SIZE_0,
+ SIZE_128MB,
+ SIZE_256MB,
+ SIZE_512MB,
+ SIZE_1GB,
+ SIZE_2GB,
+ SIZE_3GB,
+ SIZE_4GB,
+ SIZE_6GB,
+ SIZE_8GB,
+ SIZE_12GB,
+ SIZE_16GB,
+ SIZE_32GB
+};
+
+struct k3_ddrss_data {
+ u32 flags;
+};
+
+enum ecc_enable {
+ DISABLE_ALL = 0,
+ ENABLE_0,
+ ENABLE_1,
+ ENABLE_ALL
+};
+
+enum emif_config {
+ INTERLEAVE_ALL = 0,
+ SEPR0,
+ SEPR1
+};
+
+enum emif_active {
+ EMIF_0 = 1,
+ EMIF_1,
+ EMIF_ALL
+};
+
+struct k3_msmc {
+ enum intrlv_gran gran;
+ enum intrlv_size size;
+ enum ecc_enable enable;
+ enum emif_config config;
+ enum emif_active active;
+};
+
struct k3_ddrss_desc {
struct udevice *dev;
void __iomem *ddrss_ss_cfg;
@@ -42,14 +114,12 @@ struct k3_ddrss_desc {
u32 ddr_freq2;
u32 ddr_fhs_cnt;
struct udevice *vtt_supply;
+ u32 instance;
+ lpddr4_obj *driverdt;
+ lpddr4_config config;
+ lpddr4_privatedata pd;
};
-static lpddr4_obj *driverdt;
-static lpddr4_config config;
-static lpddr4_privatedata pd;
-
-static struct k3_ddrss_desc *ddrss;
-
struct reginitdata {
u32 ctl_regs[LPDDR4_INTR_CTL_REG_COUNT];
u16 ctl_regs_offs[LPDDR4_INTR_CTL_REG_COUNT];
@@ -83,15 +153,16 @@ struct reginitdata {
offset = offset * 10 + (*i - '0'); } \
} while (0)
-static u32 k3_lpddr4_read_ddr_type(void)
+static u32 k3_lpddr4_read_ddr_type(const lpddr4_privatedata *pd)
{
u32 status = 0U;
u32 offset = 0U;
u32 regval = 0U;
u32 dram_class = 0U;
+ struct k3_ddrss_desc *ddrss = (struct k3_ddrss_desc *)pd->ddr_instance;
TH_OFFSET_FROM_REG(LPDDR4__DRAM_CLASS__REG, CTL_SHIFT, offset);
- status = driverdt->readreg(&pd, LPDDR4_CTL_REGS, offset, &regval);
+ status = ddrss->driverdt->readreg(pd, LPDDR4_CTL_REGS, offset, &regval);
if (status > 0U) {
printf("%s: Failed to read DRAM_CLASS\n", __func__);
hang();
@@ -102,23 +173,23 @@ static u32 k3_lpddr4_read_ddr_type(void)
return dram_class;
}
-static void k3_lpddr4_freq_update(void)
+static void k3_lpddr4_freq_update(struct k3_ddrss_desc *ddrss)
{
unsigned int req_type, counter;
for (counter = 0; counter < ddrss->ddr_fhs_cnt; counter++) {
if (wait_for_bit_le32(ddrss->ddrss_ctrl_mmr +
- CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS, 0x80,
+ CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS + ddrss->instance * 0x10, 0x80,
true, 10000, false)) {
printf("Timeout during frequency handshake\n");
hang();
}
req_type = readl(ddrss->ddrss_ctrl_mmr +
- CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS) & 0x03;
+ CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS + ddrss->instance * 0x10) & 0x03;
- debug("%s: received freq change req: req type = %d, req no. = %d\n",
- __func__, req_type, counter);
+ debug("%s: received freq change req: req type = %d, req no. = %d, instance = %d\n",
+ __func__, req_type, counter, ddrss->instance);
if (req_type == 1)
clk_set_rate(&ddrss->ddr_clk, ddrss->ddr_freq1);
@@ -132,31 +203,32 @@ static void k3_lpddr4_freq_update(void)
printf("%s: Invalid freq request type\n", __func__);
writel(0x1, ddrss->ddrss_ctrl_mmr +
- CTRLMMR_DDR4_FSP_CLKCHNG_ACK_OFFS);
+ CTRLMMR_DDR4_FSP_CLKCHNG_ACK_OFFS + ddrss->instance * 0x10);
if (wait_for_bit_le32(ddrss->ddrss_ctrl_mmr +
- CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS, 0x80,
+ CTRLMMR_DDR4_FSP_CLKCHNG_REQ_OFFS + ddrss->instance * 0x10, 0x80,
false, 10, false)) {
printf("Timeout during frequency handshake\n");
hang();
}
writel(0x0, ddrss->ddrss_ctrl_mmr +
- CTRLMMR_DDR4_FSP_CLKCHNG_ACK_OFFS);
+ CTRLMMR_DDR4_FSP_CLKCHNG_ACK_OFFS + ddrss->instance * 0x10);
}
}
-static void k3_lpddr4_ack_freq_upd_req(void)
+static void k3_lpddr4_ack_freq_upd_req(const lpddr4_privatedata *pd)
{
u32 dram_class;
+ struct k3_ddrss_desc *ddrss = (struct k3_ddrss_desc *)pd->ddr_instance;
debug("--->>> LPDDR4 Initialization is in progress ... <<<---\n");
- dram_class = k3_lpddr4_read_ddr_type();
+ dram_class = k3_lpddr4_read_ddr_type(pd);
switch (dram_class) {
case DENALI_CTL_0_DRAM_CLASS_DDR4:
break;
case DENALI_CTL_0_DRAM_CLASS_LPDDR4:
- k3_lpddr4_freq_update();
+ k3_lpddr4_freq_update(ddrss);
break;
default:
printf("Unrecognized dram_class cannot update frequency!\n");
@@ -167,8 +239,9 @@ static int k3_ddrss_init_freq(struct k3_ddrss_desc *ddrss)
{
u32 dram_class;
int ret;
+ lpddr4_privatedata *pd = &ddrss->pd;
- dram_class = k3_lpddr4_read_ddr_type();
+ dram_class = k3_lpddr4_read_ddr_type(pd);
switch (dram_class) {
case DENALI_CTL_0_DRAM_CLASS_DDR4:
@@ -196,7 +269,7 @@ static void k3_lpddr4_info_handler(const lpddr4_privatedata *pd,
lpddr4_infotype infotype)
{
if (infotype == LPDDR4_DRV_SOC_PLL_UPDATE)
- k3_lpddr4_ack_freq_upd_req();
+ k3_lpddr4_ack_freq_upd_req(pd);
}
static int k3_ddrss_power_on(struct k3_ddrss_desc *ddrss)
@@ -235,6 +308,7 @@ static int k3_ddrss_power_on(struct k3_ddrss_desc *ddrss)
static int k3_ddrss_ofdata_to_priv(struct udevice *dev)
{
struct k3_ddrss_desc *ddrss = dev_get_priv(dev);
+ struct k3_ddrss_data *ddrss_data = (struct k3_ddrss_data *)dev_get_driver_data(dev);
phys_addr_t reg;
int ret;
@@ -274,6 +348,17 @@ static int k3_ddrss_ofdata_to_priv(struct udevice *dev)
if (ret)
dev_err(dev, "clk get failed for osc clk %d\n", ret);
+ /* Reading instance number for multi ddr subystems */
+ if (ddrss_data->flags & MULTI_DDR_SUBSYSTEM) {
+ ret = dev_read_u32(dev, "instance", &ddrss->instance);
+ if (ret) {
+ dev_err(dev, "missing instance property");
+ return -EINVAL;
+ }
+ } else {
+ ddrss->instance = 0;
+ }
+
ret = dev_read_u32(dev, "ti,ddr-freq1", &ddrss->ddr_freq1);
if (ret)
dev_err(dev, "ddr freq1 not populated %d\n", ret);
@@ -289,12 +374,13 @@ static int k3_ddrss_ofdata_to_priv(struct udevice *dev)
return ret;
}
-void k3_lpddr4_probe(void)
+void k3_lpddr4_probe(struct k3_ddrss_desc *ddrss)
{
u32 status = 0U;
u16 configsize = 0U;
+ lpddr4_config *config = &ddrss->config;
- status = driverdt->probe(&config, &configsize);
+ status = ddrss->driverdt->probe(config, &configsize);
if ((status != 0) || (configsize != sizeof(lpddr4_privatedata))
|| (configsize > SRAM_MAX)) {
@@ -305,25 +391,30 @@ void k3_lpddr4_probe(void)
}
}
-void k3_lpddr4_init(void)
+void k3_lpddr4_init(struct k3_ddrss_desc *ddrss)
{
u32 status = 0U;
+ lpddr4_config *config = &ddrss->config;
+ lpddr4_obj *driverdt = ddrss->driverdt;
+ lpddr4_privatedata *pd = &ddrss->pd;
- if ((sizeof(pd) != sizeof(lpddr4_privatedata))
- || (sizeof(pd) > SRAM_MAX)) {
+ if ((sizeof(*pd) != sizeof(lpddr4_privatedata)) || (sizeof(*pd) > SRAM_MAX)) {
printf("%s: FAIL\n", __func__);
hang();
}
- config.ctlbase = (struct lpddr4_ctlregs_s *)ddrss->ddrss_ss_cfg;
- config.infohandler = (lpddr4_infocallback) k3_lpddr4_info_handler;
+ config->ctlbase = (struct lpddr4_ctlregs_s *)ddrss->ddrss_ss_cfg;
+ config->infohandler = (lpddr4_infocallback) k3_lpddr4_info_handler;
+
+ status = driverdt->init(pd, config);
- status = driverdt->init(&pd, &config);
+ /* linking ddr instance to lpddr4 */
+ pd->ddr_instance = (void *)ddrss;
if ((status > 0U) ||
- (pd.ctlbase != (struct lpddr4_ctlregs_s *)config.ctlbase) ||
- (pd.ctlinterrupthandler != config.ctlinterrupthandler) ||
- (pd.phyindepinterrupthandler != config.phyindepinterrupthandler)) {
+ (pd->ctlbase != (struct lpddr4_ctlregs_s *)config->ctlbase) ||
+ (pd->ctlinterrupthandler != config->ctlinterrupthandler) ||
+ (pd->phyindepinterrupthandler != config->phyindepinterrupthandler)) {
printf("%s: FAIL\n", __func__);
hang();
} else {
@@ -331,7 +422,8 @@ void k3_lpddr4_init(void)
}
}
-void populate_data_array_from_dt(struct reginitdata *reginit_data)
+void populate_data_array_from_dt(struct k3_ddrss_desc *ddrss,
+ struct reginitdata *reginit_data)
{
int ret, i;
@@ -363,22 +455,24 @@ void populate_data_array_from_dt(struct reginitdata *reginit_data)
reginit_data->phy_regs_offs[i] = i;
}
-void k3_lpddr4_hardware_reg_init(void)
+void k3_lpddr4_hardware_reg_init(struct k3_ddrss_desc *ddrss)
{
u32 status = 0U;
struct reginitdata reginitdata;
+ lpddr4_obj *driverdt = ddrss->driverdt;
+ lpddr4_privatedata *pd = &ddrss->pd;
- populate_data_array_from_dt(&reginitdata);
+ populate_data_array_from_dt(ddrss, &reginitdata);
- status = driverdt->writectlconfig(&pd, reginitdata.ctl_regs,
+ status = driverdt->writectlconfig(pd, reginitdata.ctl_regs,
reginitdata.ctl_regs_offs,
LPDDR4_INTR_CTL_REG_COUNT);
if (!status)
- status = driverdt->writephyindepconfig(&pd, reginitdata.pi_regs,
+ status = driverdt->writephyindepconfig(pd, reginitdata.pi_regs,
reginitdata.pi_regs_offs,
LPDDR4_INTR_PHY_INDEP_REG_COUNT);
if (!status)
- status = driverdt->writephyconfig(&pd, reginitdata.phy_regs,
+ status = driverdt->writephyconfig(pd, reginitdata.phy_regs,
reginitdata.phy_regs_offs,
LPDDR4_INTR_PHY_REG_COUNT);
if (status) {
@@ -387,27 +481,29 @@ void k3_lpddr4_hardware_reg_init(void)
}
}
-void k3_lpddr4_start(void)
+void k3_lpddr4_start(struct k3_ddrss_desc *ddrss)
{
u32 status = 0U;
u32 regval = 0U;
u32 offset = 0U;
+ lpddr4_obj *driverdt = ddrss->driverdt;
+ lpddr4_privatedata *pd = &ddrss->pd;
TH_OFFSET_FROM_REG(LPDDR4__START__REG, CTL_SHIFT, offset);
- status = driverdt->readreg(&pd, LPDDR4_CTL_REGS, offset, &regval);
+ status = driverdt->readreg(pd, LPDDR4_CTL_REGS, offset, &regval);
if ((status > 0U) || ((regval & TH_FLD_MASK(LPDDR4__START__FLD)) != 0U)) {
printf("%s: Pre start FAIL\n", __func__);
hang();
}
- status = driverdt->start(&pd);
+ status = driverdt->start(pd);
if (status > 0U) {
printf("%s: FAIL\n", __func__);
hang();
}
- status = driverdt->readreg(&pd, LPDDR4_CTL_REGS, offset, &regval);
+ status = driverdt->readreg(pd, LPDDR4_CTL_REGS, offset, &regval);
if ((status > 0U) || ((regval & TH_FLD_MASK(LPDDR4__START__FLD)) != 1U)) {
printf("%s: Post start FAIL\n", __func__);
hang();
@@ -419,8 +515,7 @@ void k3_lpddr4_start(void)
static int k3_ddrss_probe(struct udevice *dev)
{
int ret;
-
- ddrss = dev_get_priv(dev);
+ struct k3_ddrss_desc *ddrss = dev_get_priv(dev);
debug("%s(dev=%p)\n", __func__, dev);
@@ -439,16 +534,17 @@ static int k3_ddrss_probe(struct udevice *dev)
writel(0x0, ddrss->ddrss_ss_cfg + DDRSS_ECC_CTRL_REG);
#endif
- driverdt = lpddr4_getinstance();
- k3_lpddr4_probe();
- k3_lpddr4_init();
- k3_lpddr4_hardware_reg_init();
+ ddrss->driverdt = lpddr4_getinstance();
+
+ k3_lpddr4_probe(ddrss);
+ k3_lpddr4_init(ddrss);
+ k3_lpddr4_hardware_reg_init(ddrss);
ret = k3_ddrss_init_freq(ddrss);
if (ret)
return ret;
- k3_lpddr4_start();
+ k3_lpddr4_start(ddrss);
return ret;
}
@@ -462,9 +558,18 @@ static struct ram_ops k3_ddrss_ops = {
.get_info = k3_ddrss_get_info,
};
+static const struct k3_ddrss_data k3_data = {
+ .flags = SINGLE_DDR_SUBSYSTEM,
+};
+
+static const struct k3_ddrss_data j721s2_data = {
+ .flags = MULTI_DDR_SUBSYSTEM,
+};
+
static const struct udevice_id k3_ddrss_ids[] = {
- {.compatible = "ti,am64-ddrss"},
- {.compatible = "ti,j721e-ddrss"},
+ {.compatible = "ti,am64-ddrss", .data = (ulong)&k3_data, },
+ {.compatible = "ti,j721e-ddrss", .data = (ulong)&k3_data, },
+ {.compatible = "ti,j721s2-ddrss", .data = (ulong)&j721s2_data, },
{}
};
@@ -476,3 +581,92 @@ U_BOOT_DRIVER(k3_ddrss) = {
.probe = k3_ddrss_probe,
.priv_auto = sizeof(struct k3_ddrss_desc),
};
+
+static int k3_msmc_set_config(struct k3_msmc *msmc)
+{
+ u32 ddr_cfg0 = 0;
+ u32 ddr_cfg1 = 0;
+
+ ddr_cfg0 |= msmc->gran << 24;
+ ddr_cfg0 |= msmc->size << 16;
+ /* heartbeat_per, bit[4:0] setting to 3 is advisable */
+ ddr_cfg0 |= 3;
+
+ /* Program MULTI_DDR_CFG0 */
+ writel(ddr_cfg0, MULTI_DDR_CFG0);
+
+ ddr_cfg1 |= msmc->enable << 16;
+ ddr_cfg1 |= msmc->config << 8;
+ ddr_cfg1 |= msmc->active;
+
+ /* Program MULTI_DDR_CFG1 */
+ writel(ddr_cfg1, MULTI_DDR_CFG1);
+
+ /* Program DDR_CFG_LOAD */
+ writel(0x60000000, DDR_CFG_LOAD);
+
+ return 0;
+}
+
+static int k3_msmc_probe(struct udevice *dev)
+{
+ struct k3_msmc *msmc = dev_get_priv(dev);
+ int ret = 0;
+
+ /* Read the granular size from DT */
+ ret = dev_read_u32(dev, "intrlv-gran", &msmc->gran);
+ if (ret) {
+ dev_err(dev, "missing intrlv-gran property");
+ return -EINVAL;
+ }
+
+ /* Read the interleave region from DT */
+ ret = dev_read_u32(dev, "intrlv-size", &msmc->size);
+ if (ret) {
+ dev_err(dev, "missing intrlv-size property");
+ return -EINVAL;
+ }
+
+ /* Read ECC enable config */
+ ret = dev_read_u32(dev, "ecc-enable", &msmc->enable);
+ if (ret) {
+ dev_err(dev, "missing ecc-enable property");
+ return -EINVAL;
+ }
+
+ /* Read EMIF configuration */
+ ret = dev_read_u32(dev, "emif-config", &msmc->config);
+ if (ret) {
+ dev_err(dev, "missing emif-config property");
+ return -EINVAL;
+ }
+
+ /* Read EMIF active */
+ ret = dev_read_u32(dev, "emif-active", &msmc->active);
+ if (ret) {
+ dev_err(dev, "missing emif-active property");
+ return -EINVAL;
+ }
+
+ ret = k3_msmc_set_config(msmc);
+ if (ret) {
+ dev_err(dev, "error setting msmc config");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct udevice_id k3_msmc_ids[] = {
+ { .compatible = "ti,j721s2-msmc"},
+ {}
+};
+
+U_BOOT_DRIVER(k3_msmc) = {
+ .name = "k3_msmc",
+ .of_match = k3_msmc_ids,
+ .id = UCLASS_MISC,
+ .probe = k3_msmc_probe,
+ .priv_auto = sizeof(struct k3_msmc),
+ .flags = DM_FLAG_DEFAULT_PD_CTRL_OFF,
+};
diff --git a/drivers/ram/k3-ddrss/lpddr4_structs_if.h b/drivers/ram/k3-ddrss/lpddr4_structs_if.h
index e41cbb7ff4..f2f1210c3c 100644
--- a/drivers/ram/k3-ddrss/lpddr4_structs_if.h
+++ b/drivers/ram/k3-ddrss/lpddr4_structs_if.h
@@ -24,6 +24,7 @@ struct lpddr4_privatedata_s {
lpddr4_infocallback infohandler;
lpddr4_ctlcallback ctlinterrupthandler;
lpddr4_phyindepcallback phyindepinterrupthandler;
+ void *ddr_instance;
};
struct lpddr4_debuginfo_s {
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 24e536463b..27e4a60ff5 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -92,4 +92,14 @@ config REMOTEPROC_TI_PRU
help
Say 'y' here to add support for TI' K3 remoteproc driver.
+config REMOTEPROC_TI_IPU
+ bool "Support for TI's K3 based IPU remoteproc driver"
+ select REMOTEPROC
+ depends on DM
+ depends on SPL_DRIVERS_MISC
+ depends on SPL_FS_LOADER
+ depends on OF_CONTROL
+ help
+ Say 'y' here to add support for TI' K3 remoteproc driver.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index f0e83451d6..fbe9c172bc 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_REMOTEPROC_TI_K3_DSP) += ti_k3_dsp_rproc.o
obj-$(CONFIG_REMOTEPROC_TI_K3_R5F) += ti_k3_r5f_rproc.o
obj-$(CONFIG_REMOTEPROC_TI_POWER) += ti_power_proc.o
obj-$(CONFIG_REMOTEPROC_TI_PRU) += pru_rproc.o
+obj-$(CONFIG_REMOTEPROC_TI_IPU) += ipu_rproc.o
diff --git a/drivers/remoteproc/ipu_rproc.c b/drivers/remoteproc/ipu_rproc.c
new file mode 100644
index 0000000000..b4a06bc955
--- /dev/null
+++ b/drivers/remoteproc/ipu_rproc.c
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IPU remoteproc driver for various SoCs
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Angela Stegmaier <angelabaker@ti.com>
+ * Venkateswara Rao Mandela <venkat.mandela@ti.com>
+ * Keerthy <j-keerthy@ti.com>
+ */
+
+#include <common.h>
+#include <hang.h>
+#include <cpu_func.h>
+#include <dm.h>
+#include <dm/device_compat.h>
+#include <elf.h>
+#include <env.h>
+#include <dm/of_access.h>
+#include <fs_loader.h>
+#include <remoteproc.h>
+#include <errno.h>
+#include <clk.h>
+#include <reset.h>
+#include <regmap.h>
+#include <syscon.h>
+#include <asm/io.h>
+#include <misc.h>
+#include <power-domain.h>
+#include <timer.h>
+#include <fs.h>
+#include <spl.h>
+#include <timer.h>
+#include <reset.h>
+#include <linux/bitmap.h>
+
+#define IPU1_LOAD_ADDR (0xa17ff000)
+#define MAX_REMOTECORE_BIN_SIZE (8 * 0x100000)
+
+enum ipu_num {
+ IPU1 = 0,
+ IPU2,
+ RPROC_END_ENUMS,
+};
+
+#define IPU2_LOAD_ADDR (IPU1_LOAD_ADDR + MAX_REMOTECORE_BIN_SIZE)
+
+#define PAGE_SHIFT 12
+#define PAGESIZE_1M 0x0
+#define PAGESIZE_64K 0x1
+#define PAGESIZE_4K 0x2
+#define PAGESIZE_16M 0x3
+#define LE 0
+#define BE 1
+#define ELEMSIZE_8 0x0
+#define ELEMSIZE_16 0x1
+#define ELEMSIZE_32 0x2
+#define MIXED_TLB 0x0
+#define MIXED_CPU 0x1
+
+#define PGT_SMALLPAGE_SIZE 0x00001000
+#define PGT_LARGEPAGE_SIZE 0x00010000
+#define PGT_SECTION_SIZE 0x00100000
+#define PGT_SUPERSECTION_SIZE 0x01000000
+
+#define PGT_L1_DESC_PAGE 0x00001
+#define PGT_L1_DESC_SECTION 0x00002
+#define PGT_L1_DESC_SUPERSECTION 0x40002
+
+#define PGT_L1_DESC_PAGE_MASK 0xfffffC00
+#define PGT_L1_DESC_SECTION_MASK 0xfff00000
+#define PGT_L1_DESC_SUPERSECTION_MASK 0xff000000
+
+#define PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT 12
+#define PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT 16
+#define PGT_L1_DESC_SECTION_INDEX_SHIFT 20
+#define PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT 24
+
+#define PGT_L2_DESC_SMALLPAGE 0x02
+#define PGT_L2_DESC_LARGEPAGE 0x01
+
+#define PGT_L2_DESC_SMALLPAGE_MASK 0xfffff000
+#define PGT_L2_DESC_LARGEPAGE_MASK 0xffff0000
+
+/*
+ * The memory for the page tables (256 KB per IPU) is placed just before
+ * the carveout memories for the remote processors. 16 KB of memory is
+ * needed for the L1 page table (4096 entries * 4 bytes per 1 MB section).
+ * Any smaller page (64 KB or 4 KB) entries are supported through L2 page
+ * tables (1 KB per table). The remaining 240 KB can provide support for
+ * 240 L2 page tables. Any remoteproc firmware image requiring more than
+ * 240 L2 page table entries would need more memory to be reserved.
+ */
+#define PAGE_TABLE_SIZE_L1 (0x00004000)
+#define PAGE_TABLE_SIZE_L2 (0x400)
+#define MAX_NUM_L2_PAGE_TABLES (240)
+#define PAGE_TABLE_SIZE_L2_TOTAL (MAX_NUM_L2_PAGE_TABLES * PAGE_TABLE_SIZE_L2)
+#define PAGE_TABLE_SIZE (PAGE_TABLE_SIZE_L1 + (PAGE_TABLE_SIZE_L2_TOTAL))
+
+/**
+ * struct omap_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: bus address used to access the memory region
+ * @dev_addr: device address of the memory region from DSP view
+ * @size: size of the memory region
+ */
+struct omap_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+struct ipu_privdata {
+ struct omap_rproc_mem mem;
+ struct list_head mappings;
+ const char *fw_name;
+ u32 bootaddr;
+ int id;
+ struct udevice *rdev;
+};
+
+typedef int (*handle_resource_t) (void *, int offset, int avail);
+
+unsigned int *page_table_l1 = (unsigned int *)0x0;
+unsigned int *page_table_l2 = (unsigned int *)0x0;
+
+/*
+ * Set maximum carveout size to 96 MB
+ */
+#define DRA7_RPROC_MAX_CO_SIZE (96 * 0x100000)
+
+/*
+ * These global variables are used for deriving the MMU page tables. They
+ * are initialized for each core with the appropriate values. The length
+ * of the array mem_bitmap is set as per a 96 MB carveout which the
+ * maximum set aside in the current memory map.
+ */
+unsigned long mem_base;
+unsigned long mem_size;
+unsigned long
+
+mem_bitmap[BITS_TO_LONGS(DRA7_RPROC_MAX_CO_SIZE >> PAGE_SHIFT)];
+unsigned long mem_count;
+
+unsigned int pgtable_l2_map[MAX_NUM_L2_PAGE_TABLES];
+unsigned int pgtable_l2_cnt;
+
+void *ipu_alloc_mem(struct udevice *dev, unsigned long len, unsigned long align)
+{
+ unsigned long mask;
+ unsigned long pageno;
+ int count;
+
+ count = ((len + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ mask = (1 << align) - 1;
+ pageno =
+ bitmap_find_next_zero_area(mem_bitmap, mem_count, 0, count, mask);
+ debug("%s: count %d mask %#lx pageno %#lx\n", __func__, count, mask,
+ pageno);
+
+ if (pageno >= mem_count) {
+ debug("%s: %s Error allocating memory; "
+ "Please check carveout size\n", __FILE__, __func__);
+ return NULL;
+ }
+
+ bitmap_set(mem_bitmap, pageno, count);
+ return (void *)(mem_base + (pageno << PAGE_SHIFT));
+}
+
+int find_pagesz(unsigned int virt, unsigned int phys, unsigned int len)
+{
+ int pg_sz_ind = -1;
+ unsigned int min_align = __ffs(virt);
+
+ if (min_align > __ffs(phys))
+ min_align = __ffs(phys);
+
+ if (min_align >= PGT_L1_DESC_SUPERSECTION_INDEX_SHIFT &&
+ len >= 0x1000000) {
+ pg_sz_ind = PAGESIZE_16M;
+ goto ret_block;
+ }
+ if (min_align >= PGT_L1_DESC_SECTION_INDEX_SHIFT &&
+ len >= 0x100000) {
+ pg_sz_ind = PAGESIZE_1M;
+ goto ret_block;
+ }
+ if (min_align >= PGT_L1_DESC_LARGEPAGE_INDEX_SHIFT &&
+ len >= 0x10000) {
+ pg_sz_ind = PAGESIZE_64K;
+ goto ret_block;
+ }
+ if (min_align >= PGT_L1_DESC_SMALLPAGE_INDEX_SHIFT &&
+ len >= 0x1000) {
+ pg_sz_ind = PAGESIZE_4K;
+ goto ret_block;
+ }
+
+ ret_block:
+ return pg_sz_ind;
+}
+
+int get_l2_pg_tbl_addr(unsigned int virt, unsigned int *pg_tbl_addr)
+{
+ int ret = -1;
+ int i = 0;
+ int match_found = 0;
+ unsigned int tag = (virt & PGT_L1_DESC_SECTION_MASK);
+
+ *pg_tbl_addr = 0;
+ for (i = 0; (i < pgtable_l2_cnt) && (match_found == 0); i++) {
+ if (tag == pgtable_l2_map[i]) {
+ *pg_tbl_addr =
+ ((unsigned int)page_table_l2) +
+ (i * PAGE_TABLE_SIZE_L2);
+ match_found = 1;
+ ret = 0;
+ }
+ }
+
+ if (match_found == 0 && i < MAX_NUM_L2_PAGE_TABLES) {
+ pgtable_l2_map[i] = tag;
+ pgtable_l2_cnt++;
+ *pg_tbl_addr =
+ ((unsigned int)page_table_l2) + (i * PAGE_TABLE_SIZE_L2);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int
+config_l2_pagetable(unsigned int virt, unsigned int phys,
+ unsigned int pg_sz, unsigned int pg_tbl_addr)
+{
+ int ret = -1;
+ unsigned int desc = 0;
+ int i = 0;
+ unsigned int *pg_tbl = (unsigned int *)pg_tbl_addr;
+
+ /*
+ * Pick bit 19:12 of the virtual address as index
+ */
+ unsigned int index = (virt & (~PGT_L1_DESC_SECTION_MASK)) >> PAGE_SHIFT;
+
+ switch (pg_sz) {
+ case PAGESIZE_64K:
+ desc =
+ (phys & PGT_L2_DESC_LARGEPAGE_MASK) | PGT_L2_DESC_LARGEPAGE;
+ for (i = 0; i < 16; i++)
+ pg_tbl[index + i] = desc;
+ ret = 0;
+ break;
+ case PAGESIZE_4K:
+ desc =
+ (phys & PGT_L2_DESC_SMALLPAGE_MASK) | PGT_L2_DESC_SMALLPAGE;
+ pg_tbl[index] = desc;
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+unsigned int
+ipu_config_pagetable(struct udevice *dev, unsigned int virt, unsigned int phys,
+ unsigned int len)
+{
+ unsigned int index;
+ unsigned int l = len;
+ unsigned int desc;
+ int pg_sz = 0;
+ int i = 0, err = 0;
+ unsigned int pg_tbl_l2_addr = 0;
+ unsigned int tmp_pgsz;
+
+ if ((len & 0x0FFF) != 0)
+ return 0;
+
+ while (l > 0) {
+ pg_sz = find_pagesz(virt, phys, l);
+ index = virt >> PGT_L1_DESC_SECTION_INDEX_SHIFT;
+ switch (pg_sz) {
+ /*
+ * 16 MB super section
+ */
+ case PAGESIZE_16M:
+ /*
+ * Program the next 16 descriptors
+ */
+ desc =
+ (phys & PGT_L1_DESC_SUPERSECTION_MASK) |
+ PGT_L1_DESC_SUPERSECTION;
+ for (i = 0; i < 16; i++)
+ page_table_l1[index + i] = desc;
+ l -= PGT_SUPERSECTION_SIZE;
+ phys += PGT_SUPERSECTION_SIZE;
+ virt += PGT_SUPERSECTION_SIZE;
+ break;
+ /*
+ * 1 MB section
+ */
+ case PAGESIZE_1M:
+ desc =
+ (phys & PGT_L1_DESC_SECTION_MASK) |
+ PGT_L1_DESC_SECTION;
+ page_table_l1[index] = desc;
+ l -= PGT_SECTION_SIZE;
+ phys += PGT_SECTION_SIZE;
+ virt += PGT_SECTION_SIZE;
+ break;
+ /*
+ * 64 KB large page
+ */
+ case PAGESIZE_64K:
+ case PAGESIZE_4K:
+ if (pg_sz == PAGESIZE_64K)
+ tmp_pgsz = 0x10000;
+ else
+ tmp_pgsz = 0x1000;
+
+ err = get_l2_pg_tbl_addr(virt, &pg_tbl_l2_addr);
+ if (err != 0) {
+ debug
+ ("Unable to get level 2 PT address\n");
+ hang();
+ }
+ err =
+ config_l2_pagetable(virt, phys, pg_sz,
+ pg_tbl_l2_addr);
+ desc =
+ (pg_tbl_l2_addr & PGT_L1_DESC_PAGE_MASK) |
+ PGT_L1_DESC_PAGE;
+ page_table_l1[index] = desc;
+ l -= tmp_pgsz;
+ phys += tmp_pgsz;
+ virt += tmp_pgsz;
+ break;
+ case -1:
+ default:
+ return 0;
+ }
+ }
+
+ return len;
+}
+
+int da_to_pa(struct udevice *dev, int da)
+{
+ struct rproc_mem_entry *maps = NULL;
+ struct ipu_privdata *priv = dev_get_priv(dev);
+
+ list_for_each_entry(maps, &priv->mappings, node) {
+ if (da >= maps->da && da < (maps->da + maps->len))
+ return maps->dma + (da - maps->da);
+ }
+
+ return 0;
+}
+
+u32 ipu_config_mmu(u32 core_id, struct rproc *cfg)
+{
+ u32 i = 0;
+ u32 reg = 0;
+
+ /*
+ * Clear the entire pagetable location before programming the
+ * address into the MMU
+ */
+ memset((void *)cfg->page_table_addr, 0x00, PAGE_TABLE_SIZE);
+
+ for (i = 0; i < cfg->num_iommus; i++) {
+ u32 mmu_base = cfg->mmu_base_addr[i];
+
+ __raw_writel((int)cfg->page_table_addr, mmu_base + 0x4c);
+ reg = __raw_readl(mmu_base + 0x88);
+
+ /*
+ * enable bus-error back
+ */
+ __raw_writel(reg | 0x1, mmu_base + 0x88);
+
+ /*
+ * Enable the MMU IRQs during MMU programming for the
+ * late attachcase. This is to allow the MMU fault to be
+ * detected by the kernel.
+ *
+ * MULTIHITFAULT|EMMUMISS|TRANSLATIONFAULT|TABLEWALKFAULT
+ */
+ __raw_writel(0x1E, mmu_base + 0x1c);
+
+ /*
+ * emutlbupdate|TWLENABLE|MMUENABLE
+ */
+ __raw_writel(0x6, mmu_base + 0x44);
+ }
+
+ return 0;
+}
+
+/**
+ * enum ipu_mem - PRU core memory range identifiers
+ */
+enum ipu_mem {
+ PRU_MEM_IRAM = 0,
+ PRU_MEM_CTRL,
+ PRU_MEM_DEBUG,
+ PRU_MEM_MAX,
+};
+
+static int ipu_start(struct udevice *dev)
+{
+ struct ipu_privdata *priv;
+ struct reset_ctl reset;
+ struct rproc *cfg = NULL;
+ int ret;
+
+ priv = dev_get_priv(dev);
+
+ cfg = rproc_cfg_arr[priv->id];
+ if (cfg->config_peripherals)
+ cfg->config_peripherals(priv->id, cfg);
+
+ /*
+ * Start running the remote core
+ */
+ ret = reset_get_by_index(dev, 0, &reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error getting reset index %d\n", __func__, 0);
+ return ret;
+ }
+
+ ret = reset_deassert(&reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error deasserting reset %d\n", __func__, 0);
+ return ret;
+ }
+
+ ret = reset_get_by_index(dev, 1, &reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error getting reset index %d\n", __func__, 1);
+ return ret;
+ }
+
+ ret = reset_deassert(&reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error deasserting reset %d\n", __func__, 1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ipu_stop(struct udevice *dev)
+{
+ return 0;
+}
+
+/**
+ * ipu_init() - Initialize the remote processor
+ * @dev: rproc device pointer
+ *
+ * Return: 0 if all went ok, else return appropriate error
+ */
+static int ipu_init(struct udevice *dev)
+{
+ return 0;
+}
+
+static int ipu_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
+{
+ struct ipu_privdata *priv = dev_get_priv(dev);
+
+ list_add_tail(&mapping->node, &priv->mappings);
+ return 0;
+}
+
+static int ipu_load(struct udevice *dev, ulong addr, ulong size)
+{
+ Elf32_Ehdr *ehdr; /* Elf header structure pointer */
+ Elf32_Phdr *phdr; /* Program header structure pointer */
+ Elf32_Phdr proghdr;
+ int va;
+ int pa;
+ int i;
+
+ ehdr = (Elf32_Ehdr *)addr;
+ phdr = (Elf32_Phdr *)(addr + ehdr->e_phoff);
+ /*
+ * Load each program header
+ */
+ for (i = 0; i < ehdr->e_phnum; ++i) {
+ memcpy(&proghdr, phdr, sizeof(Elf32_Phdr));
+
+ if (proghdr.p_type != PT_LOAD) {
+ ++phdr;
+ continue;
+ }
+
+ va = proghdr.p_paddr;
+ pa = da_to_pa(dev, va);
+ if (pa)
+ proghdr.p_paddr = pa;
+
+ void *dst = (void *)(uintptr_t)proghdr.p_paddr;
+ void *src = (void *)addr + proghdr.p_offset;
+
+ debug("Loading phdr %i to 0x%p (%i bytes)\n", i, dst,
+ proghdr.p_filesz);
+ if (proghdr.p_filesz)
+ memcpy(dst, src, proghdr.p_filesz);
+
+ flush_cache((unsigned long)dst, proghdr.p_memsz);
+
+ ++phdr;
+ }
+
+ return 0;
+}
+
+static const struct dm_rproc_ops ipu_ops = {
+ .init = ipu_init,
+ .start = ipu_start,
+ .stop = ipu_stop,
+ .load = ipu_load,
+ .add_res = ipu_add_res,
+ .config_pagetable = ipu_config_pagetable,
+ .alloc_mem = ipu_alloc_mem,
+};
+
+/*
+ * If the remotecore binary expects any peripherals to be setup before it has
+ * booted, configure them here.
+ *
+ * These functions are left empty by default as their operation is usecase
+ * specific.
+ */
+
+u32 ipu1_config_peripherals(u32 core_id, struct rproc *cfg)
+{
+ return 0;
+}
+
+u32 ipu2_config_peripherals(u32 core_id, struct rproc *cfg)
+{
+ return 0;
+}
+
+struct rproc_intmem_to_l3_mapping ipu1_intmem_to_l3_mapping = {
+ .num_entries = 1,
+ .mappings = {
+ /*
+ * L2 SRAM
+ */
+ {
+ .priv_addr = 0x55020000,
+ .l3_addr = 0x58820000,
+ .len = (64 * 1024)},
+ }
+};
+
+struct rproc_intmem_to_l3_mapping ipu2_intmem_to_l3_mapping = {
+ .num_entries = 1,
+ .mappings = {
+ /*
+ * L2 SRAM
+ */
+ {
+ .priv_addr = 0x55020000,
+ .l3_addr = 0x55020000,
+ .len = (64 * 1024)},
+ }
+};
+
+struct rproc ipu1_config = {
+ .num_iommus = 1,
+ .mmu_base_addr = {0x58882000, 0},
+ .load_addr = IPU1_LOAD_ADDR,
+ .core_name = "IPU1",
+ .firmware_name = "dra7-ipu1-fw.xem4",
+ .config_mmu = ipu_config_mmu,
+ .config_peripherals = ipu1_config_peripherals,
+ .intmem_to_l3_mapping = &ipu1_intmem_to_l3_mapping
+};
+
+struct rproc ipu2_config = {
+ .num_iommus = 1,
+ .mmu_base_addr = {0x55082000, 0},
+ .load_addr = IPU2_LOAD_ADDR,
+ .core_name = "IPU2",
+ .firmware_name = "dra7-ipu2-fw.xem4",
+ .config_mmu = ipu_config_mmu,
+ .config_peripherals = ipu2_config_peripherals,
+ .intmem_to_l3_mapping = &ipu2_intmem_to_l3_mapping
+};
+
+struct rproc *rproc_cfg_arr[2] = {
+ [IPU2] = &ipu2_config,
+ [IPU1] = &ipu1_config,
+};
+
+u32 spl_pre_boot_core(struct udevice *dev, u32 core_id)
+{
+ struct rproc *cfg = NULL;
+ unsigned long load_elf_status = 0;
+ int tablesz;
+
+ cfg = rproc_cfg_arr[core_id];
+ /*
+ * Check for valid elf image
+ */
+ if (!valid_elf_image(cfg->load_addr))
+ return 1;
+
+ if (rproc_find_resource_table(dev, cfg->load_addr, &tablesz))
+ cfg->has_rsc_table = 1;
+ else
+ cfg->has_rsc_table = 0;
+
+ /*
+ * Configure the MMU
+ */
+ if (cfg->config_mmu && cfg->has_rsc_table)
+ cfg->config_mmu(core_id, cfg);
+
+ /*
+ * Load the remote core. Fill the page table of the first(possibly
+ * only) IOMMU during ELF loading. Copy the page table to the second
+ * IOMMU before running the remote core.
+ */
+
+ page_table_l1 = (unsigned int *)cfg->page_table_addr;
+ page_table_l2 =
+ (unsigned int *)(cfg->page_table_addr + PAGE_TABLE_SIZE_L1);
+ mem_base = cfg->cma_base;
+ mem_size = cfg->cma_size;
+ memset(mem_bitmap, 0x00, sizeof(mem_bitmap));
+ mem_count = (cfg->cma_size >> PAGE_SHIFT);
+
+ /*
+ * Clear variables used for level 2 page table allocation
+ */
+ memset(pgtable_l2_map, 0x00, sizeof(pgtable_l2_map));
+ pgtable_l2_cnt = 0;
+
+ load_elf_status = rproc_parse_resource_table(dev, cfg);
+ if (load_elf_status == 0) {
+ debug("load_elf_image_phdr returned error for core %s\n",
+ cfg->core_name);
+ return 1;
+ }
+
+ flush_cache(cfg->page_table_addr, PAGE_TABLE_SIZE);
+
+ return 0;
+}
+
+static fdt_addr_t ipu_parse_mem_nodes(struct udevice *dev, char *name,
+ int privid, fdt_size_t *sizep)
+{
+ int ret;
+ u32 sp;
+ ofnode mem_node;
+
+ ret = ofnode_read_u32(dev_ofnode(dev), name, &sp);
+ if (ret) {
+ dev_err(dev, "memory-region node fetch failed %d\n", ret);
+ return ret;
+ }
+
+ mem_node = ofnode_get_by_phandle(sp);
+ if (!ofnode_valid(mem_node))
+ return -EINVAL;
+
+ return ofnode_get_addr_size_index(mem_node, 0, sizep);
+}
+
+/**
+ * ipu_probe() - Basic probe
+ * @dev: corresponding k3 remote processor device
+ *
+ * Return: 0 if all goes good, else appropriate error message.
+ */
+static int ipu_probe(struct udevice *dev)
+{
+ struct ipu_privdata *priv;
+ struct rproc *cfg = NULL;
+ struct reset_ctl reset;
+ static const char *const ipu_mem_names[] = { "l2ram" };
+ int ret;
+ fdt_size_t sizep;
+
+ priv = dev_get_priv(dev);
+
+ priv->mem.bus_addr =
+ devfdt_get_addr_size_name(dev,
+ ipu_mem_names[0],
+ (fdt_addr_t *)&priv->mem.size);
+
+ ret = reset_get_by_index(dev, 2, &reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error getting reset index %d\n", __func__, 2);
+ return ret;
+ }
+
+ ret = reset_deassert(&reset);
+ if (ret < 0) {
+ dev_err(dev, "%s: error deasserting reset %d\n", __func__, 2);
+ return ret;
+ }
+
+ if (priv->mem.bus_addr == FDT_ADDR_T_NONE) {
+ dev_err(dev, "%s bus address not found\n", ipu_mem_names[0]);
+ return -EINVAL;
+ }
+ priv->mem.cpu_addr = map_physmem(priv->mem.bus_addr,
+ priv->mem.size, MAP_NOCACHE);
+
+ if (devfdt_get_addr(dev) == 0x58820000)
+ priv->id = 0;
+ else
+ priv->id = 1;
+
+ cfg = rproc_cfg_arr[priv->id];
+ cfg->cma_base = ipu_parse_mem_nodes(dev, "memory-region", priv->id,
+ &sizep);
+ cfg->cma_size = sizep;
+
+ cfg->page_table_addr = ipu_parse_mem_nodes(dev, "pg-tbl", priv->id,
+ &sizep);
+
+ dev_info(dev,
+ "ID %d memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
+ priv->id, ipu_mem_names[0], &priv->mem.bus_addr,
+ priv->mem.size, priv->mem.cpu_addr, priv->mem.dev_addr);
+
+ INIT_LIST_HEAD(&priv->mappings);
+ if (spl_pre_boot_core(dev, priv->id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct udevice_id ipu_ids[] = {
+ {.compatible = "ti,dra7-ipu"},
+ {}
+};
+
+U_BOOT_DRIVER(ipu) = {
+ .name = "ipu",
+ .of_match = ipu_ids,
+ .id = UCLASS_REMOTEPROC,
+ .ops = &ipu_ops,
+ .probe = ipu_probe,
+ .priv_auto = sizeof(struct ipu_privdata),
+};
diff --git a/drivers/remoteproc/k3_system_controller.c b/drivers/remoteproc/k3_system_controller.c
index 89cb90207d..e2affe69c6 100644
--- a/drivers/remoteproc/k3_system_controller.c
+++ b/drivers/remoteproc/k3_system_controller.c
@@ -77,14 +77,18 @@ struct k3_sysctrler_desc {
* struct k3_sysctrler_privdata - Structure representing System Controller data.
* @chan_tx: Transmit mailbox channel
* @chan_rx: Receive mailbox channel
+ * @chan_boot_notify: Boot notification channel
* @desc: SoC description for this instance
* @seq_nr: Counter for number of messages sent.
+ * @has_boot_notify: Has separate boot notification channel
*/
struct k3_sysctrler_privdata {
struct mbox_chan chan_tx;
struct mbox_chan chan_rx;
+ struct mbox_chan chan_boot_notify;
struct k3_sysctrler_desc *desc;
u32 seq_nr;
+ bool has_boot_notify;
};
static inline
@@ -223,7 +227,8 @@ static int k3_sysctrler_start(struct udevice *dev)
debug("%s(dev=%p)\n", __func__, dev);
/* Receive the boot notification. Note that it is sent only once. */
- ret = mbox_recv(&priv->chan_rx, &msg, priv->desc->max_rx_timeout_us);
+ ret = mbox_recv(priv->has_boot_notify ? &priv->chan_boot_notify :
+ &priv->chan_rx, &msg, priv->desc->max_rx_timeout_us);
if (ret) {
dev_err(dev, "%s: Boot Notification response failed. ret = %d\n",
__func__, ret);
@@ -272,6 +277,19 @@ static int k3_of_to_priv(struct udevice *dev,
return ret;
}
+ /* Some SoCs may have a optional channel for boot notification. */
+ priv->has_boot_notify = 1;
+ ret = mbox_get_by_name(dev, "boot_notify", &priv->chan_boot_notify);
+ if (ret == -ENODATA) {
+ dev_dbg(dev, "%s: Acquiring optional Boot_notify failed. ret = %d. Using Rx\n",
+ __func__, ret);
+ priv->has_boot_notify = 0;
+ } else if (ret) {
+ dev_err(dev, "%s: Acquiring boot_notify channel failed. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/remoteproc/rproc-uclass.c b/drivers/remoteproc/rproc-uclass.c
index 87e1ec7ad7..50bcc9030e 100644
--- a/drivers/remoteproc/rproc-uclass.c
+++ b/drivers/remoteproc/rproc-uclass.c
@@ -8,15 +8,31 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <common.h>
+#include <elf.h>
#include <errno.h>
#include <log.h>
#include <malloc.h>
+#include <virtio_ring.h>
#include <remoteproc.h>
#include <asm/io.h>
#include <dm/device-internal.h>
#include <dm.h>
#include <dm/uclass.h>
#include <dm/uclass-internal.h>
+#include <linux/compat.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct resource_table {
+ u32 ver;
+ u32 num;
+ u32 reserved[2];
+ u32 offset[0];
+} __packed;
+
+typedef int (*handle_resource_t) (struct udevice *, void *, int offset, int avail);
+
+static struct resource_table *rsc_table;
/**
* for_each_remoteproc_device() - iterate through the list of rproc devices
@@ -196,6 +212,80 @@ static int rproc_post_probe(struct udevice *dev)
return 0;
}
+/**
+ * rproc_add_res() - After parsing the resource table add the mappings
+ * @dev: device we finished probing
+ * @mapping: rproc_mem_entry for the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static int rproc_add_res(struct udevice *dev, struct rproc_mem_entry *mapping)
+{
+ const struct dm_rproc_ops *ops = rproc_get_ops(dev);
+
+ if (!ops->add_res)
+ return -ENOSYS;
+
+ return ops->add_res(dev, mapping);
+}
+
+/**
+ * rproc_alloc_mem() - After parsing the resource table allocat mem
+ * @dev: device we finished probing
+ * @len: rproc_mem_entry for the resource
+ * @align: alignment for the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static void *rproc_alloc_mem(struct udevice *dev, unsigned long len,
+ unsigned long align)
+{
+ const struct dm_rproc_ops *ops;
+
+ ops = rproc_get_ops(dev);
+ if (!ops) {
+ debug("%s driver has no ops?\n", dev->name);
+ return NULL;
+ }
+
+ if (ops->alloc_mem)
+ return ops->alloc_mem(dev, len, align);
+
+ return NULL;
+}
+
+/**
+ * rproc_config_pagetable() - Configure page table for remote processor
+ * @dev: device we finished probing
+ * @virt: Virtual address of the resource
+ * @phys: Physical address the resource
+ * @len: length the resource
+ *
+ * Return: if the remote proc driver has a add_res routine, invokes it and
+ * hands over the return value. overall, 0 if all went well, else appropriate
+ * error value.
+ */
+static int rproc_config_pagetable(struct udevice *dev, unsigned int virt,
+ unsigned int phys, unsigned int len)
+{
+ const struct dm_rproc_ops *ops;
+
+ ops = rproc_get_ops(dev);
+ if (!ops) {
+ debug("%s driver has no ops?\n", dev->name);
+ return -EINVAL;
+ }
+
+ if (ops->config_pagetable)
+ return ops->config_pagetable(dev, virt, phys, len);
+
+ return 0;
+}
+
UCLASS_DRIVER(rproc) = {
.id = UCLASS_REMOTEPROC,
.name = "remoteproc",
@@ -426,3 +516,447 @@ int rproc_is_running(int id)
{
return _rproc_ops_wrapper(id, RPROC_RUNNING);
};
+
+
+static int handle_trace(struct udevice *dev, struct fw_rsc_trace *rsc,
+ int offset, int avail)
+{
+ if (sizeof(*rsc) > avail) {
+ debug("trace rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /*
+ * make sure reserved bytes are zeroes
+ */
+ if (rsc->reserved) {
+ debug("trace rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ debug("trace rsc: da 0x%x, len 0x%x\n", rsc->da, rsc->len);
+
+ return 0;
+}
+
+static int handle_devmem(struct udevice *dev, struct fw_rsc_devmem *rsc,
+ int offset, int avail)
+{
+ struct rproc_mem_entry *mapping;
+
+ if (sizeof(*rsc) > avail) {
+ debug("devmem rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /*
+ * make sure reserved bytes are zeroes
+ */
+ if (rsc->reserved) {
+ debug("devmem rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ debug("devmem rsc: pa 0x%x, da 0x%x, len 0x%x\n",
+ rsc->pa, rsc->da, rsc->len);
+
+ rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ /*
+ * We'll need this info later when we'll want to unmap everything
+ * (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the resource
+ * table, so we must maintain this info independently.
+ */
+ mapping->dma = rsc->pa;
+ mapping->da = rsc->da;
+ mapping->len = rsc->len;
+ rproc_add_res(dev, mapping);
+
+ debug("mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
+ rsc->pa, rsc->da, rsc->len);
+
+ return 0;
+}
+
+static int handle_carveout(struct udevice *dev, struct fw_rsc_carveout *rsc,
+ int offset, int avail)
+{
+ struct rproc_mem_entry *mapping;
+
+ if (sizeof(*rsc) > avail) {
+ debug("carveout rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /*
+ * make sure reserved bytes are zeroes
+ */
+ if (rsc->reserved) {
+ debug("carveout rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ debug("carveout rsc: da %x, pa %x, len %x, flags %x\n",
+ rsc->da, rsc->pa, rsc->len, rsc->flags);
+
+ rsc->pa = (uintptr_t)rproc_alloc_mem(dev, rsc->len, 8);
+ if (!rsc->pa) {
+ debug
+ ("failed to allocate carveout rsc: da %x, pa %x, len %x, flags %x\n",
+ rsc->da, rsc->pa, rsc->len, rsc->flags);
+ return -ENOMEM;
+ }
+ rproc_config_pagetable(dev, rsc->da, rsc->pa, rsc->len);
+
+ /*
+ * Ok, this is non-standard.
+ *
+ * Sometimes we can't rely on the generic iommu-based DMA API
+ * to dynamically allocate the device address and then set the IOMMU
+ * tables accordingly, because some remote processors might
+ * _require_ us to use hard coded device addresses that their
+ * firmware was compiled with.
+ *
+ * In this case, we must use the IOMMU API directly and map
+ * the memory to the device address as expected by the remote
+ * processor.
+ *
+ * Obviously such remote processor devices should not be configured
+ * to use the iommu-based DMA API: we expect 'dma' to contain the
+ * physical address in this case.
+ */
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ /*
+ * We'll need this info later when we'll want to unmap
+ * everything (e.g. on shutdown).
+ *
+ * We can't trust the remote processor not to change the
+ * resource table, so we must maintain this info independently.
+ */
+ mapping->dma = rsc->pa;
+ mapping->da = rsc->da;
+ mapping->len = rsc->len;
+ rproc_add_res(dev, mapping);
+
+ debug("carveout mapped 0x%x to 0x%x\n", rsc->da, rsc->pa);
+
+ return 0;
+}
+
+#define RPROC_PAGE_SHIFT 12
+#define RPROC_PAGE_SIZE BIT(RPROC_PAGE_SHIFT)
+#define RPROC_PAGE_ALIGN(x) (((x) + (RPROC_PAGE_SIZE - 1)) & ~(RPROC_PAGE_SIZE - 1))
+
+static int alloc_vring(struct udevice *dev, struct fw_rsc_vdev *rsc, int i)
+{
+ struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
+ int size;
+ int order;
+ void *pa;
+
+ debug("vdev rsc: vring%d: da %x, qsz %d, align %d\n",
+ i, vring->da, vring->num, vring->align);
+
+ /*
+ * verify queue size and vring alignment are sane
+ */
+ if (!vring->num || !vring->align) {
+ debug("invalid qsz (%d) or alignment (%d)\n", vring->num,
+ vring->align);
+ return -EINVAL;
+ }
+
+ /*
+ * actual size of vring (in bytes)
+ */
+ size = RPROC_PAGE_ALIGN(vring_size(vring->num, vring->align));
+ order = vring->align >> RPROC_PAGE_SHIFT;
+
+ pa = rproc_alloc_mem(dev, size, order);
+ if (!pa) {
+ debug("failed to allocate vring rsc\n");
+ return -ENOMEM;
+ }
+ debug("alloc_mem(%#x, %d): %p\n", size, order, pa);
+ vring->da = (uintptr_t)pa;
+
+ return !pa;
+}
+
+static int handle_vdev(struct udevice *dev, struct fw_rsc_vdev *rsc,
+ int offset, int avail)
+{
+ int i, ret;
+ void *pa;
+
+ /*
+ * make sure resource isn't truncated
+ */
+ if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
+ + rsc->config_len > avail) {
+ debug("vdev rsc is truncated\n");
+ return -EINVAL;
+ }
+
+ /*
+ * make sure reserved bytes are zeroes
+ */
+ if (rsc->reserved[0] || rsc->reserved[1]) {
+ debug("vdev rsc has non zero reserved bytes\n");
+ return -EINVAL;
+ }
+
+ debug("vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
+ rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);
+
+ /*
+ * we currently support only two vrings per rvdev
+ */
+ if (rsc->num_of_vrings > 2) {
+ debug("too many vrings: %d\n", rsc->num_of_vrings);
+ return -EINVAL;
+ }
+
+ /*
+ * allocate the vrings
+ */
+ for (i = 0; i < rsc->num_of_vrings; i++) {
+ ret = alloc_vring(dev, rsc, i);
+ if (ret)
+ goto alloc_error;
+ }
+
+ pa = rproc_alloc_mem(dev, RPMSG_TOTAL_BUF_SPACE, 6);
+ if (!pa) {
+ debug("failed to allocate vdev rsc\n");
+ return -ENOMEM;
+ }
+ debug("vring buffer alloc_mem(%#x, 6): %p\n", RPMSG_TOTAL_BUF_SPACE,
+ pa);
+
+ return 0;
+
+ alloc_error:
+ return ret;
+}
+
+/*
+ * A lookup table for resource handlers. The indices are defined in
+ * enum fw_resource_type.
+ */
+static handle_resource_t loading_handlers[RSC_LAST] = {
+ [RSC_CARVEOUT] = (handle_resource_t)handle_carveout,
+ [RSC_DEVMEM] = (handle_resource_t)handle_devmem,
+ [RSC_TRACE] = (handle_resource_t)handle_trace,
+ [RSC_VDEV] = (handle_resource_t)handle_vdev,
+};
+
+/*
+ * handle firmware resource entries before booting the remote processor
+ */
+static int handle_resources(struct udevice *dev, int len,
+ handle_resource_t handlers[RSC_LAST])
+{
+ handle_resource_t handler;
+ int ret = 0, i;
+
+ for (i = 0; i < rsc_table->num; i++) {
+ int offset = rsc_table->offset[i];
+ struct fw_rsc_hdr *hdr = (void *)rsc_table + offset;
+ int avail = len - offset - sizeof(*hdr);
+ void *rsc = (void *)hdr + sizeof(*hdr);
+
+ /*
+ * make sure table isn't truncated
+ */
+ if (avail < 0) {
+ debug("rsc table is truncated\n");
+ return -EINVAL;
+ }
+
+ debug("rsc: type %d\n", hdr->type);
+
+ if (hdr->type >= RSC_LAST) {
+ debug("unsupported resource %d\n", hdr->type);
+ continue;
+ }
+
+ handler = handlers[hdr->type];
+ if (!handler)
+ continue;
+
+ ret = handler(dev, rsc, offset + sizeof(*hdr), avail);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+handle_intmem_to_l3_mapping(struct udevice *dev,
+ struct rproc_intmem_to_l3_mapping *l3_mapping)
+{
+ u32 i = 0;
+
+ for (i = 0; i < l3_mapping->num_entries; i++) {
+ struct l3_map *curr_map = &l3_mapping->mappings[i];
+ struct rproc_mem_entry *mapping;
+
+ mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
+ if (!mapping)
+ return -ENOMEM;
+
+ mapping->dma = curr_map->l3_addr;
+ mapping->da = curr_map->priv_addr;
+ mapping->len = curr_map->len;
+ rproc_add_res(dev, mapping);
+ }
+
+ return 0;
+}
+
+static Elf32_Shdr *rproc_find_table(unsigned int addr)
+{
+ Elf32_Ehdr *ehdr; /* Elf header structure pointer */
+ Elf32_Shdr *shdr; /* Section header structure pointer */
+ Elf32_Shdr sectionheader;
+ int i;
+ u8 *elf_data;
+ char *name_table;
+ struct resource_table *ptable;
+
+ ehdr = (Elf32_Ehdr *)(uintptr_t)addr;
+ elf_data = (u8 *)ehdr;
+ shdr = (Elf32_Shdr *)(elf_data + ehdr->e_shoff);
+ memcpy(&sectionheader, &shdr[ehdr->e_shstrndx], sizeof(sectionheader));
+ name_table = (char *)(elf_data + sectionheader.sh_offset);
+
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ memcpy(&sectionheader, shdr, sizeof(sectionheader));
+ u32 size = sectionheader.sh_size;
+ u32 offset = sectionheader.sh_offset;
+
+ if (strcmp
+ (name_table + sectionheader.sh_name, ".resource_table"))
+ continue;
+
+ ptable = (struct resource_table *)(elf_data + offset);
+
+ /*
+ * make sure table has at least the header
+ */
+ if (sizeof(struct resource_table) > size) {
+ debug("header-less resource table\n");
+ return NULL;
+ }
+
+ /*
+ * we don't support any version beyond the first
+ */
+ if (ptable->ver != 1) {
+ debug("unsupported fw ver: %d\n", ptable->ver);
+ return NULL;
+ }
+
+ /*
+ * make sure reserved bytes are zeroes
+ */
+ if (ptable->reserved[0] || ptable->reserved[1]) {
+ debug("non zero reserved bytes\n");
+ return NULL;
+ }
+
+ /*
+ * make sure the offsets array isn't truncated
+ */
+ if (ptable->num * sizeof(ptable->offset[0]) +
+ sizeof(struct resource_table) > size) {
+ debug("resource table incomplete\n");
+ return NULL;
+ }
+
+ return shdr;
+ }
+
+ return NULL;
+}
+
+struct resource_table *rproc_find_resource_table(struct udevice *dev,
+ unsigned int addr,
+ int *tablesz)
+{
+ Elf32_Shdr *shdr;
+ Elf32_Shdr sectionheader;
+ struct resource_table *ptable;
+ u8 *elf_data = (u8 *)(uintptr_t)addr;
+
+ shdr = rproc_find_table(addr);
+ if (!shdr) {
+ debug("%s: failed to get resource section header\n", __func__);
+ return NULL;
+ }
+
+ memcpy(&sectionheader, shdr, sizeof(sectionheader));
+ ptable = (struct resource_table *)(elf_data + sectionheader.sh_offset);
+ if (tablesz)
+ *tablesz = sectionheader.sh_size;
+
+ return ptable;
+}
+
+unsigned long rproc_parse_resource_table(struct udevice *dev, struct rproc *cfg)
+{
+ struct resource_table *ptable = NULL;
+ int tablesz;
+ int ret;
+ unsigned long addr;
+
+ addr = cfg->load_addr;
+
+ ptable = rproc_find_resource_table(dev, addr, &tablesz);
+ if (!ptable) {
+ debug("%s : failed to find resource table\n", __func__);
+ return 0;
+ }
+
+ debug("%s : found resource table\n", __func__);
+ rsc_table = kzalloc(tablesz, GFP_KERNEL);
+ if (!rsc_table) {
+ debug("resource table alloc failed!\n");
+ return 0;
+ }
+
+ /*
+ * Copy the resource table into a local buffer before handling the
+ * resource table.
+ */
+ memcpy(rsc_table, ptable, tablesz);
+ if (cfg->intmem_to_l3_mapping)
+ handle_intmem_to_l3_mapping(dev, cfg->intmem_to_l3_mapping);
+ ret = handle_resources(dev, tablesz, loading_handlers);
+ if (ret) {
+ debug("handle_resources failed: %d\n", ret);
+ return 0;
+ }
+
+ /*
+ * Instead of trying to mimic the kernel flow of copying the
+ * processed resource table into its post ELF load location in DDR
+ * copying it into its original location.
+ */
+ memcpy(ptable, rsc_table, tablesz);
+ free(rsc_table);
+ rsc_table = NULL;
+
+ return 1;
+}
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index d73daf5e31..b57714111b 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -206,4 +206,10 @@ config RESET_ZYNQMP
passing request via Xilinx firmware interface to TF-A and PMU
firmware.
+config RESET_DRA7
+ bool "Support for TI's DRA7 Reset driver"
+ depends on DM_RESET
+ help
+ Support for TI DRA7-RESET subsystem. Basic Assert/Deassert
+ is supported.
endmenu
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d69486bdeb..97e3a782c0 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_RESET_SYSCON) += reset-syscon.o
obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o
obj-$(CONFIG_RESET_SCMI) += reset-scmi.o
obj-$(CONFIG_RESET_ZYNQMP) += reset-zynqmp.o
+obj-$(CONFIG_RESET_DRA7) += reset-dra7.o
diff --git a/drivers/reset/reset-dra7.c b/drivers/reset/reset-dra7.c
new file mode 100644
index 0000000000..585f8323c5
--- /dev/null
+++ b/drivers/reset/reset-dra7.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments DRA7 reset driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Keerthy <j-keerthy@ti.com>
+ */
+
+#include <asm/io.h>
+#include <common.h>
+#include <dm.h>
+#include <reset-uclass.h>
+#include <dm/device_compat.h>
+
+struct dra7_reset_priv {
+ u32 rstctrl;
+ u32 rstst;
+ u8 nreset;
+};
+
+static int dra7_reset_request(struct reset_ctl *reset_ctl)
+{
+ return 0;
+}
+
+static int dra7_reset_free(struct reset_ctl *reset_ctl)
+{
+ return 0;
+}
+
+static inline void dra7_reset_rmw(u32 addr, u32 value, u32 mask)
+{
+ writel(((readl(addr) & (~mask)) | (value & mask)), addr);
+}
+
+static int dra7_reset_deassert(struct reset_ctl *reset_ctl)
+{
+ struct dra7_reset_priv *priv = dev_get_priv(reset_ctl->dev);
+ int mask = 1 << reset_ctl->id;
+
+ if (reset_ctl->id < 0 || reset_ctl->id >= priv->nreset)
+ return -EINVAL;
+
+ dra7_reset_rmw(priv->rstctrl, 0x0, mask);
+
+ while ((readl(priv->rstst) & mask) != mask)
+ ;
+
+ return 0;
+}
+
+static int dra7_reset_assert(struct reset_ctl *reset_ctl)
+{
+ struct dra7_reset_priv *priv = dev_get_priv(reset_ctl->dev);
+ int mask = 1 << reset_ctl->id;
+
+ if (reset_ctl->id < 0 || reset_ctl->id >= priv->nreset)
+ return -EINVAL;
+
+ dra7_reset_rmw(priv->rstctrl, mask, 0x0);
+
+ return 0;
+}
+
+struct reset_ops dra7_reset_ops = {
+ .request = dra7_reset_request,
+ .rfree = dra7_reset_free,
+ .rst_assert = dra7_reset_assert,
+ .rst_deassert = dra7_reset_deassert,
+};
+
+static const struct udevice_id dra7_reset_ids[] = {
+ { .compatible = "ti,dra7-reset" },
+ { }
+};
+
+static int dra7_reset_probe(struct udevice *dev)
+{
+ struct dra7_reset_priv *priv = dev_get_priv(dev);
+
+ priv->rstctrl = dev_read_addr(dev);
+ priv->rstst = priv->rstctrl + 0x4;
+ priv->nreset = dev_read_u32_default(dev, "ti,nresets", 1);
+
+ dev_info(dev, "dra7-reset successfully probed %s\n", dev->name);
+
+ return 0;
+}
+
+U_BOOT_DRIVER(dra7_reset) = {
+ .name = "dra7_reset",
+ .id = UCLASS_RESET,
+ .of_match = dra7_reset_ids,
+ .probe = dra7_reset_probe,
+ .ops = &dra7_reset_ops,
+ .priv_auto = sizeof(struct dra7_reset_priv),
+};
diff --git a/drivers/soc/soc_ti_k3.c b/drivers/soc/soc_ti_k3.c
index 9abed7d490..965728e818 100644
--- a/drivers/soc/soc_ti_k3.c
+++ b/drivers/soc/soc_ti_k3.c
@@ -14,9 +14,7 @@
#define J721E 0xbb64
#define J7200 0xbb6d
#define AM64X 0xbb38
-
-#define REV_SR1_0 0
-#define REV_SR2_0 1
+#define J721S2 0xbb75
#define JTAG_ID_VARIANT_SHIFT 28
#define JTAG_ID_VARIANT_MASK (0xf << 28)
@@ -48,6 +46,9 @@ static const char *get_family_string(u32 idreg)
case AM64X:
family = "AM64X";
break;
+ case J721S2:
+ family = "J721S2";
+ break;
default:
family = "Unknown Silicon";
};
@@ -55,25 +56,42 @@ static const char *get_family_string(u32 idreg)
return family;
}
+static char *j721e_rev_string_map[] = {
+ "1.0", "1.1",
+};
+
+static char *am65x_rev_string_map[] = {
+ "1.0", "2.0",
+};
+
static const char *get_rev_string(u32 idreg)
{
- const char *revision;
u32 rev;
+ u32 soc;
rev = (idreg & JTAG_ID_VARIANT_MASK) >> JTAG_ID_VARIANT_SHIFT;
+ soc = (idreg & JTAG_ID_PARTNO_MASK) >> JTAG_ID_PARTNO_SHIFT;
- switch (rev) {
- case REV_SR1_0:
- revision = "1.0";
- break;
- case REV_SR2_0:
- revision = "2.0";
- break;
+ switch (soc) {
+ case J721E:
+ if (rev > ARRAY_SIZE(j721e_rev_string_map))
+ goto bail;
+ return j721e_rev_string_map[rev];
+
+ case AM65X:
+ if (rev > ARRAY_SIZE(am65x_rev_string_map))
+ goto bail;
+ return am65x_rev_string_map[rev];
+
+ case AM64X:
+ case J7200:
default:
- revision = "Unknown Revision";
+ if (!rev)
+ return "1.0";
};
- return revision;
+bail:
+ return "Unknown Revision";
}
static int soc_ti_k3_get_family(struct udevice *dev, char *buf, int size)